1 /**************************************************************************
3 * Copyright (C) 2000-2008 Alacritech, Inc. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following
13 * disclaimer in the documentation and/or other materials provided
14 * with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * The views and conclusions contained in the software and documentation
30 * are those of the authors and should not be interpreted as representing
31 * official policies, either expressed or implied, of Alacritech, Inc.
33 * Parts developed by LinSysSoft Sahara team
35 **************************************************************************/
40 * The SXG driver for Alacritech's 10Gbe products.
42 * NOTE: This is the standard, non-accelerated version of Alacritech's
46 #include <linux/kernel.h>
47 #include <linux/string.h>
48 #include <linux/errno.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/ioport.h>
52 #include <linux/slab.h>
53 #include <linux/interrupt.h>
54 #include <linux/timer.h>
55 #include <linux/pci.h>
56 #include <linux/spinlock.h>
57 #include <linux/init.h>
58 #include <linux/netdevice.h>
59 #include <linux/etherdevice.h>
60 #include <linux/ethtool.h>
61 #include <linux/skbuff.h>
62 #include <linux/delay.h>
63 #include <linux/types.h>
64 #include <linux/dma-mapping.h>
65 #include <linux/mii.h>
68 #include <linux/tcp.h>
69 #include <linux/ipv6.h>
71 #define SLIC_GET_STATS_ENABLED 0
72 #define LINUX_FREES_ADAPTER_RESOURCES 1
73 #define SXG_OFFLOAD_IP_CHECKSUM 0
74 #define SXG_POWER_MANAGEMENT_ENABLED 0
84 #include "sxgphycode.h"
85 #define SXG_UCODE_DBG 0 /* Turn on for debugging */
87 #include "saharadbgdownload.c"
88 #include "saharadbgdownloadB.c"
90 #include "saharadownload.c"
91 #include "saharadownloadB.c"
94 static int sxg_allocate_buffer_memory(struct adapter_t *adapter, u32 Size,
95 enum sxg_buffer_type BufferType);
96 static int sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
98 dma_addr_t PhysicalAddress,
100 static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter,
101 struct sxg_scatter_gather *SxgSgl,
102 dma_addr_t PhysicalAddress,
105 static void sxg_mcast_init_crc32(void);
106 static int sxg_entry_open(struct net_device *dev);
107 static int sxg_second_open(struct net_device * dev);
108 static int sxg_entry_halt(struct net_device *dev);
109 static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
110 static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev);
111 static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb);
112 static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
113 struct sxg_scatter_gather *SxgSgl);
115 static void sxg_handle_interrupt(struct adapter_t *adapter, int *work_done,
117 static void sxg_interrupt(struct adapter_t *adapter);
118 static int sxg_poll(struct napi_struct *napi, int budget);
119 static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId);
120 static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId,
121 int *sxg_napi_continue, int *work_done, int budget);
122 static void sxg_complete_slow_send(struct adapter_t *adapter, int irq_context);
123 static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter,
124 struct sxg_event *Event);
125 static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus);
126 static bool sxg_mac_filter(struct adapter_t *adapter,
127 struct ether_header *EtherHdr, ushort length);
128 static struct net_device_stats *sxg_get_stats(struct net_device * dev);
129 void sxg_free_resources(struct adapter_t *adapter);
130 void sxg_free_rcvblocks(struct adapter_t *adapter);
131 void sxg_free_sgl_buffers(struct adapter_t *adapter);
132 void sxg_unmap_resources(struct adapter_t *adapter);
133 void sxg_free_mcast_addrs(struct adapter_t *adapter);
134 void sxg_collect_statistics(struct adapter_t *adapter);
139 static int sxg_mac_set_address(struct net_device *dev, void *ptr);
140 static void sxg_unmap_mmio_space(struct adapter_t *adapter);
142 static void sxg_mcast_set_list(struct net_device *dev);
144 static int sxg_adapter_set_hwaddr(struct adapter_t *adapter);
146 static int sxg_initialize_adapter(struct adapter_t *adapter);
147 static void sxg_stock_rcv_buffers(struct adapter_t *adapter);
148 static void sxg_complete_descriptor_blocks(struct adapter_t *adapter,
149 unsigned char Index);
150 static int sxg_initialize_link(struct adapter_t *adapter);
151 static int sxg_phy_init(struct adapter_t *adapter);
152 static void sxg_link_event(struct adapter_t *adapter);
153 static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter);
154 static void sxg_link_state(struct adapter_t *adapter,
155 enum SXG_LINK_STATE LinkState);
156 static int sxg_write_mdio_reg(struct adapter_t *adapter,
157 u32 DevAddr, u32 RegAddr, u32 Value);
158 static int sxg_read_mdio_reg(struct adapter_t *adapter,
159 u32 DevAddr, u32 RegAddr, u32 *pValue);
160 static void sxg_set_mcast_addr(struct adapter_t *adapter);
162 static unsigned int sxg_first_init = 1;
163 static char *sxg_banner =
164 "Alacritech SLIC Technology(tm) Server and Storage \
165 10Gbe Accelerator (Non-Accelerated)\n";
167 static int sxg_debug = 1;
168 static int debug = -1;
169 static struct net_device *head_netdevice = NULL;
171 static struct sxgbase_driver sxg_global = {
174 static int intagg_delay = 100;
175 static u32 dynamic_intagg = 0;
177 char sxg_driver_name[] = "sxg_nic";
178 #define DRV_AUTHOR "Alacritech, Inc. Engineering"
179 #define DRV_DESCRIPTION \
180 "Alacritech SLIC Techonology(tm) Non-Accelerated 10Gbe Driver"
181 #define DRV_COPYRIGHT \
182 "Copyright 2000-2008 Alacritech, Inc. All rights reserved."
184 MODULE_AUTHOR(DRV_AUTHOR);
185 MODULE_DESCRIPTION(DRV_DESCRIPTION);
186 MODULE_LICENSE("GPL");
188 module_param(dynamic_intagg, int, 0);
189 MODULE_PARM_DESC(dynamic_intagg, "Dynamic Interrupt Aggregation Setting");
190 module_param(intagg_delay, int, 0);
191 MODULE_PARM_DESC(intagg_delay, "uSec Interrupt Aggregation Delay");
193 static struct pci_device_id sxg_pci_tbl[] __devinitdata = {
194 {PCI_DEVICE(SXG_VENDOR_ID, SXG_DEVICE_ID)},
198 MODULE_DEVICE_TABLE(pci, sxg_pci_tbl);
200 static inline void sxg_reg32_write(void __iomem *reg, u32 value, bool flush)
207 static inline void sxg_reg64_write(struct adapter_t *adapter, void __iomem *reg,
210 u32 value_high = (u32) (value >> 32);
211 u32 value_low = (u32) (value & 0x00000000FFFFFFFF);
214 spin_lock_irqsave(&adapter->Bit64RegLock, flags);
215 writel(value_high, (void __iomem *)(&adapter->UcodeRegs[cpu].Upper));
216 writel(value_low, reg);
217 spin_unlock_irqrestore(&adapter->Bit64RegLock, flags);
220 static void sxg_init_driver(void)
222 if (sxg_first_init) {
223 DBG_ERROR("sxg: %s sxg_first_init set jiffies[%lx]\n",
226 spin_lock_init(&sxg_global.driver_lock);
230 static void sxg_dbg_macaddrs(struct adapter_t *adapter)
232 DBG_ERROR(" (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
233 adapter->netdev->name, adapter->currmacaddr[0],
234 adapter->currmacaddr[1], adapter->currmacaddr[2],
235 adapter->currmacaddr[3], adapter->currmacaddr[4],
236 adapter->currmacaddr[5]);
237 DBG_ERROR(" (%s) mac %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
238 adapter->netdev->name, adapter->macaddr[0],
239 adapter->macaddr[1], adapter->macaddr[2],
240 adapter->macaddr[3], adapter->macaddr[4],
241 adapter->macaddr[5]);
246 static struct sxg_driver SxgDriver;
249 static struct sxg_trace_buffer LSxgTraceBuffer;
251 static struct sxg_trace_buffer *SxgTraceBuffer = NULL;
254 * sxg_download_microcode
256 * Download Microcode to Sahara adapter
259 * adapter - A pointer to our adapter structure
260 * UcodeSel - microcode file selection
265 static bool sxg_download_microcode(struct adapter_t *adapter,
266 enum SXG_UCODE_SEL UcodeSel)
268 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
271 u32 *Instruction = NULL;
272 u32 BaseAddress, AddressOffset, Address;
278 u32 sectionStart[16];
280 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DnldUcod",
282 DBG_ERROR("sxg: %s ENTER\n", __func__);
285 case SXG_UCODE_SAHARA: /* Sahara operational ucode */
286 numSections = SNumSections;
287 for (i = 0; i < numSections; i++) {
288 sectionSize[i] = SSectionSize[i];
289 sectionStart[i] = SSectionStart[i];
293 printk(KERN_ERR KBUILD_MODNAME
294 ": Woah, big error with the microcode!\n");
298 DBG_ERROR("sxg: RESET THE CARD\n");
299 /* First, reset the card */
300 WRITE_REG(HwRegs->Reset, 0xDEAD, FLUSH);
303 * Download each section of the microcode as specified in
304 * its download file. The *download.c file is generated using
305 * the saharaobjtoc facility which converts the metastep .obj
306 * file to a .c file which contains a two dimentional array.
308 for (Section = 0; Section < numSections; Section++) {
309 DBG_ERROR("sxg: SECTION # %d\n", Section);
311 case SXG_UCODE_SAHARA:
312 Instruction = (u32 *) & SaharaUCode[Section][0];
318 BaseAddress = sectionStart[Section];
319 /* Size in instructions */
320 ThisSectionSize = sectionSize[Section] / 12;
321 for (AddressOffset = 0; AddressOffset < ThisSectionSize;
323 Address = BaseAddress + AddressOffset;
324 ASSERT((Address & ~MICROCODE_ADDRESS_MASK) == 0);
325 /* Write instruction bits 31 - 0 */
326 WRITE_REG(HwRegs->UcodeDataLow, *Instruction, FLUSH);
327 /* Write instruction bits 63-32 */
328 WRITE_REG(HwRegs->UcodeDataMiddle, *(Instruction + 1),
330 /* Write instruction bits 95-64 */
331 WRITE_REG(HwRegs->UcodeDataHigh, *(Instruction + 2),
333 /* Write instruction address with the WRITE bit set */
334 WRITE_REG(HwRegs->UcodeAddr,
335 (Address | MICROCODE_ADDRESS_WRITE), FLUSH);
337 * Sahara bug in the ucode download logic - the write to DataLow
338 * for the next instruction could get corrupted. To avoid this,
339 * write to DataLow again for this instruction (which may get
340 * corrupted, but it doesn't matter), then increment the address
341 * and write the data for the next instruction to DataLow. That
342 * write should succeed.
344 WRITE_REG(HwRegs->UcodeDataLow, *Instruction, TRUE);
345 /* Advance 3 u32S to start of next instruction */
350 * Now repeat the entire operation reading the instruction back and
351 * checking for parity errors
353 for (Section = 0; Section < numSections; Section++) {
354 DBG_ERROR("sxg: check SECTION # %d\n", Section);
356 case SXG_UCODE_SAHARA:
357 Instruction = (u32 *) & SaharaUCode[Section][0];
363 BaseAddress = sectionStart[Section];
364 /* Size in instructions */
365 ThisSectionSize = sectionSize[Section] / 12;
366 for (AddressOffset = 0; AddressOffset < ThisSectionSize;
368 Address = BaseAddress + AddressOffset;
369 /* Write the address with the READ bit set */
370 WRITE_REG(HwRegs->UcodeAddr,
371 (Address | MICROCODE_ADDRESS_READ), FLUSH);
372 /* Read it back and check parity bit. */
373 READ_REG(HwRegs->UcodeAddr, ValueRead);
374 if (ValueRead & MICROCODE_ADDRESS_PARITY) {
375 DBG_ERROR("sxg: %s PARITY ERROR\n",
378 return FALSE; /* Parity error */
380 ASSERT((ValueRead & MICROCODE_ADDRESS_MASK) == Address);
381 /* Read the instruction back and compare */
382 READ_REG(HwRegs->UcodeDataLow, ValueRead);
383 if (ValueRead != *Instruction) {
384 DBG_ERROR("sxg: %s MISCOMPARE LOW\n",
386 return FALSE; /* Miscompare */
388 READ_REG(HwRegs->UcodeDataMiddle, ValueRead);
389 if (ValueRead != *(Instruction + 1)) {
390 DBG_ERROR("sxg: %s MISCOMPARE MIDDLE\n",
392 return FALSE; /* Miscompare */
394 READ_REG(HwRegs->UcodeDataHigh, ValueRead);
395 if (ValueRead != *(Instruction + 2)) {
396 DBG_ERROR("sxg: %s MISCOMPARE HIGH\n",
398 return FALSE; /* Miscompare */
400 /* Advance 3 u32S to start of next instruction */
405 /* Everything OK, Go. */
406 WRITE_REG(HwRegs->UcodeAddr, MICROCODE_ADDRESS_GO, FLUSH);
409 * Poll the CardUp register to wait for microcode to initialize
410 * Give up after 10,000 attemps (500ms).
412 for (i = 0; i < 10000; i++) {
414 READ_REG(adapter->UcodeRegs[0].CardUp, ValueRead);
415 if (ValueRead == 0xCAFE) {
416 DBG_ERROR("sxg: %s BOO YA 0xCAFE\n", __func__);
421 DBG_ERROR("sxg: %s TIMEOUT\n", __func__);
423 return FALSE; /* Timeout */
426 * Now write the LoadSync register. This is used to
427 * synchronize with the card so it can scribble on the memory
428 * that contained 0xCAFE from the "CardUp" step above
430 if (UcodeSel == SXG_UCODE_SAHARA) {
431 WRITE_REG(adapter->UcodeRegs[0].LoadSync, 0, FLUSH);
434 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDnldUcd",
436 DBG_ERROR("sxg: %s EXIT\n", __func__);
442 * sxg_allocate_resources - Allocate memory and locks
445 * adapter - A pointer to our adapter structure
449 static int sxg_allocate_resources(struct adapter_t *adapter)
453 u32 RssIds, IsrCount;
454 /* struct sxg_xmt_ring *XmtRing; */
455 /* struct sxg_rcv_ring *RcvRing; */
457 DBG_ERROR("%s ENTER\n", __func__);
459 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocRes",
462 /* Windows tells us how many CPUs it plans to use for */
464 RssIds = SXG_RSS_CPU_COUNT(adapter);
465 IsrCount = adapter->MsiEnabled ? RssIds : 1;
467 DBG_ERROR("%s Setup the spinlocks\n", __func__);
469 /* Allocate spinlocks and initialize listheads first. */
470 spin_lock_init(&adapter->RcvQLock);
471 spin_lock_init(&adapter->SglQLock);
472 spin_lock_init(&adapter->XmtZeroLock);
473 spin_lock_init(&adapter->Bit64RegLock);
474 spin_lock_init(&adapter->AdapterLock);
475 atomic_set(&adapter->pending_allocations, 0);
477 DBG_ERROR("%s Setup the lists\n", __func__);
479 InitializeListHead(&adapter->FreeRcvBuffers);
480 InitializeListHead(&adapter->FreeRcvBlocks);
481 InitializeListHead(&adapter->AllRcvBlocks);
482 InitializeListHead(&adapter->FreeSglBuffers);
483 InitializeListHead(&adapter->AllSglBuffers);
486 * Mark these basic allocations done. This flags essentially
487 * tells the SxgFreeResources routine that it can grab spinlocks
488 * and reference listheads.
490 adapter->BasicAllocations = TRUE;
492 * Main allocation loop. Start with the maximum supported by
493 * the microcode and back off if memory allocation
494 * fails. If we hit a minimum, fail.
498 DBG_ERROR("%s Allocate XmtRings size[%x]\n", __func__,
499 (unsigned int)(sizeof(struct sxg_xmt_ring) * 1));
502 * Start with big items first - receive and transmit rings.
503 * At the moment I'm going to keep the ring size fixed and
504 * adjust the TCBs if we fail. Later we might
505 * consider reducing the ring size as well..
507 adapter->XmtRings = pci_alloc_consistent(adapter->pcidev,
508 sizeof(struct sxg_xmt_ring) *
510 &adapter->PXmtRings);
511 DBG_ERROR("%s XmtRings[%p]\n", __func__, adapter->XmtRings);
513 if (!adapter->XmtRings) {
514 goto per_tcb_allocation_failed;
516 memset(adapter->XmtRings, 0, sizeof(struct sxg_xmt_ring) * 1);
518 DBG_ERROR("%s Allocate RcvRings size[%x]\n", __func__,
519 (unsigned int)(sizeof(struct sxg_rcv_ring) * 1));
521 pci_alloc_consistent(adapter->pcidev,
522 sizeof(struct sxg_rcv_ring) * 1,
523 &adapter->PRcvRings);
524 DBG_ERROR("%s RcvRings[%p]\n", __func__, adapter->RcvRings);
525 if (!adapter->RcvRings) {
526 goto per_tcb_allocation_failed;
528 memset(adapter->RcvRings, 0, sizeof(struct sxg_rcv_ring) * 1);
529 adapter->ucode_stats = kzalloc(sizeof(struct sxg_ucode_stats), GFP_ATOMIC);
530 adapter->pucode_stats = pci_map_single(adapter->pcidev,
531 adapter->ucode_stats,
532 sizeof(struct sxg_ucode_stats),
534 // memset(adapter->ucode_stats, 0, sizeof(struct sxg_ucode_stats));
537 per_tcb_allocation_failed:
538 /* an allocation failed. Free any successful allocations. */
539 if (adapter->XmtRings) {
540 pci_free_consistent(adapter->pcidev,
541 sizeof(struct sxg_xmt_ring) * 1,
544 adapter->XmtRings = NULL;
546 if (adapter->RcvRings) {
547 pci_free_consistent(adapter->pcidev,
548 sizeof(struct sxg_rcv_ring) * 1,
551 adapter->RcvRings = NULL;
553 /* Loop around and try again.... */
554 if (adapter->ucode_stats) {
555 pci_unmap_single(adapter->pcidev,
556 sizeof(struct sxg_ucode_stats),
557 adapter->pucode_stats, PCI_DMA_FROMDEVICE);
558 adapter->ucode_stats = NULL;
563 DBG_ERROR("%s Initialize RCV ZERO and XMT ZERO rings\n", __func__);
564 /* Initialize rcv zero and xmt zero rings */
565 SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo, SXG_RCV_RING_SIZE);
566 SXG_INITIALIZE_RING(adapter->XmtRingZeroInfo, SXG_XMT_RING_SIZE);
568 /* Sanity check receive data structure format */
569 /* ASSERT((adapter->ReceiveBufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
570 (adapter->ReceiveBufferSize == SXG_RCV_JUMBO_BUFFER_SIZE)); */
571 ASSERT(sizeof(struct sxg_rcv_descriptor_block) ==
572 SXG_RCV_DESCRIPTOR_BLOCK_SIZE);
575 * Allocate receive data buffers. We allocate a block of buffers and
576 * a corresponding descriptor block at once. See sxghw.h:SXG_RCV_BLOCK
578 for (i = 0; i < SXG_INITIAL_RCV_DATA_BUFFERS;
579 i += SXG_RCV_DESCRIPTORS_PER_BLOCK) {
580 status = sxg_allocate_buffer_memory(adapter,
581 SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE),
582 SXG_BUFFER_TYPE_RCV);
583 if (status != STATUS_SUCCESS)
587 * NBL resource allocation can fail in the 'AllocateComplete' routine,
588 * which doesn't return status. Make sure we got the number of buffers
591 if (adapter->FreeRcvBufferCount < SXG_INITIAL_RCV_DATA_BUFFERS) {
592 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF6",
593 adapter, adapter->FreeRcvBufferCount, SXG_MAX_ENTRIES,
595 return (STATUS_RESOURCES);
598 DBG_ERROR("%s Allocate EventRings size[%x]\n", __func__,
599 (unsigned int)(sizeof(struct sxg_event_ring) * RssIds));
601 /* Allocate event queues. */
602 adapter->EventRings = pci_alloc_consistent(adapter->pcidev,
603 sizeof(struct sxg_event_ring) *
605 &adapter->PEventRings);
607 if (!adapter->EventRings) {
608 /* Caller will call SxgFreeAdapter to clean up above
610 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF8",
611 adapter, SXG_MAX_ENTRIES, 0, 0);
612 status = STATUS_RESOURCES;
613 goto per_tcb_allocation_failed;
615 memset(adapter->EventRings, 0, sizeof(struct sxg_event_ring) * RssIds);
617 DBG_ERROR("%s Allocate ISR size[%x]\n", __func__, IsrCount);
619 adapter->Isr = pci_alloc_consistent(adapter->pcidev,
620 IsrCount, &adapter->PIsr);
622 /* Caller will call SxgFreeAdapter to clean up above
624 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF9",
625 adapter, SXG_MAX_ENTRIES, 0, 0);
626 status = STATUS_RESOURCES;
627 goto per_tcb_allocation_failed;
629 memset(adapter->Isr, 0, sizeof(u32) * IsrCount);
631 DBG_ERROR("%s Allocate shared XMT ring zero index location size[%x]\n",
632 __func__, (unsigned int)sizeof(u32));
634 /* Allocate shared XMT ring zero index location */
635 adapter->XmtRingZeroIndex = pci_alloc_consistent(adapter->pcidev,
639 if (!adapter->XmtRingZeroIndex) {
640 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF10",
641 adapter, SXG_MAX_ENTRIES, 0, 0);
642 status = STATUS_RESOURCES;
643 goto per_tcb_allocation_failed;
645 memset(adapter->XmtRingZeroIndex, 0, sizeof(u32));
647 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlcResS",
648 adapter, SXG_MAX_ENTRIES, 0, 0);
656 * Set up PCI Configuration space
659 * pcidev - A pointer to our adapter structure
661 static void sxg_config_pci(struct pci_dev *pcidev)
666 pci_read_config_word(pcidev, PCI_COMMAND, &pci_command);
667 DBG_ERROR("sxg: %s PCI command[%4.4x]\n", __func__, pci_command);
668 /* Set the command register */
669 new_command = pci_command | (
670 /* Memory Space Enable */
672 /* Bus master enable */
674 /* Memory write and invalidate */
675 PCI_COMMAND_INVALIDATE |
676 /* Parity error response */
680 /* Fast back-to-back */
681 PCI_COMMAND_FAST_BACK);
682 if (pci_command != new_command) {
683 DBG_ERROR("%s -- Updating PCI COMMAND register %4.4x->%4.4x.\n",
684 __func__, pci_command, new_command);
685 pci_write_config_word(pcidev, PCI_COMMAND, new_command);
691 * @adapter : Pointer to the adapter structure for the card
692 * This function will read the configuration data from EEPROM/FLASH
694 static inline int sxg_read_config(struct adapter_t *adapter)
696 /* struct sxg_config data; */
697 struct sw_cfg_data *data;
699 unsigned long status;
702 data = pci_alloc_consistent(adapter->pcidev,
703 sizeof(struct sw_cfg_data), &p_addr);
706 * We cant get even this much memory. Raise a hell
709 printk(KERN_ERR"%s : Could not allocate memory for reading \
710 EEPROM\n", __FUNCTION__);
714 WRITE_REG(adapter->UcodeRegs[0].ConfigStat, SXG_CFG_TIMEOUT, TRUE);
716 WRITE_REG64(adapter, adapter->UcodeRegs[0].Config, p_addr, 0);
717 for(i=0; i<1000; i++) {
718 READ_REG(adapter->UcodeRegs[0].ConfigStat, status);
719 if (status != SXG_CFG_TIMEOUT)
721 mdelay(1); /* Do we really need this */
725 /* Config read from EEPROM succeeded */
726 case SXG_CFG_LOAD_EEPROM:
727 /* Config read from Flash succeeded */
728 case SXG_CFG_LOAD_FLASH:
729 /* Copy the MAC address to adapter structure */
730 /* TODO: We are not doing the remaining part : FRU,
733 memcpy(adapter->macaddr, data->MacAddr[0].MacAddr,
734 sizeof(struct sxg_config_mac));
736 case SXG_CFG_TIMEOUT:
737 case SXG_CFG_LOAD_INVALID:
738 case SXG_CFG_LOAD_ERROR:
739 default: /* Fix default handler later */
740 printk(KERN_WARNING"%s : We could not read the config \
741 word. Status = %ld\n", __FUNCTION__, status);
744 pci_free_consistent(adapter->pcidev, sizeof(struct sw_cfg_data), data,
746 if (adapter->netdev) {
747 memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6);
748 memcpy(adapter->netdev->perm_addr, adapter->currmacaddr, 6);
750 sxg_dbg_macaddrs(adapter);
755 static int sxg_entry_probe(struct pci_dev *pcidev,
756 const struct pci_device_id *pci_tbl_entry)
758 static int did_version = 0;
760 struct net_device *netdev;
761 struct adapter_t *adapter;
762 void __iomem *memmapped_ioaddr;
764 ulong mmio_start = 0;
767 DBG_ERROR("sxg: %s 2.6 VERSION ENTER jiffies[%lx] cpu %d\n",
768 __func__, jiffies, smp_processor_id());
770 /* Initialize trace buffer */
772 SxgTraceBuffer = &LSxgTraceBuffer;
773 SXG_TRACE_INIT(SxgTraceBuffer, TRACE_NOISY);
776 sxg_global.dynamic_intagg = dynamic_intagg;
778 err = pci_enable_device(pcidev);
780 DBG_ERROR("Call pci_enable_device(%p) status[%x]\n", pcidev, err);
785 if (sxg_debug > 0 && did_version++ == 0) {
786 printk(KERN_INFO "%s\n", sxg_banner);
787 printk(KERN_INFO "%s\n", SXG_DRV_VERSION);
790 if (!(err = pci_set_dma_mask(pcidev, DMA_64BIT_MASK))) {
791 DBG_ERROR("pci_set_dma_mask(DMA_64BIT_MASK) successful\n");
793 if ((err = pci_set_dma_mask(pcidev, DMA_32BIT_MASK))) {
795 ("No usable DMA configuration, aborting err[%x]\n",
799 DBG_ERROR("pci_set_dma_mask(DMA_32BIT_MASK) successful\n");
802 DBG_ERROR("Call pci_request_regions\n");
804 err = pci_request_regions(pcidev, sxg_driver_name);
806 DBG_ERROR("pci_request_regions FAILED err[%x]\n", err);
810 DBG_ERROR("call pci_set_master\n");
811 pci_set_master(pcidev);
813 DBG_ERROR("call alloc_etherdev\n");
814 netdev = alloc_etherdev(sizeof(struct adapter_t));
817 goto err_out_exit_sxg_probe;
819 DBG_ERROR("alloc_etherdev for slic netdev[%p]\n", netdev);
821 SET_NETDEV_DEV(netdev, &pcidev->dev);
823 pci_set_drvdata(pcidev, netdev);
824 adapter = netdev_priv(netdev);
825 adapter->netdev = netdev;
826 adapter->pcidev = pcidev;
828 mmio_start = pci_resource_start(pcidev, 0);
829 mmio_len = pci_resource_len(pcidev, 0);
831 DBG_ERROR("sxg: call ioremap(mmio_start[%lx], mmio_len[%lx])\n",
832 mmio_start, mmio_len);
834 memmapped_ioaddr = ioremap(mmio_start, mmio_len);
835 DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __func__,
837 if (!memmapped_ioaddr) {
838 DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
839 __func__, mmio_len, mmio_start);
840 goto err_out_free_mmio_region_0;
843 DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, start[%lx] \
844 len[%lx], IRQ %d.\n", __func__, memmapped_ioaddr, mmio_start,
845 mmio_len, pcidev->irq);
847 adapter->HwRegs = (void *)memmapped_ioaddr;
848 adapter->base_addr = memmapped_ioaddr;
850 mmio_start = pci_resource_start(pcidev, 2);
851 mmio_len = pci_resource_len(pcidev, 2);
853 DBG_ERROR("sxg: call ioremap(mmio_start[%lx], mmio_len[%lx])\n",
854 mmio_start, mmio_len);
856 memmapped_ioaddr = ioremap(mmio_start, mmio_len);
857 DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __func__,
859 if (!memmapped_ioaddr) {
860 DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
861 __func__, mmio_len, mmio_start);
862 goto err_out_free_mmio_region_2;
865 DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, "
866 "start[%lx] len[%lx], IRQ %d.\n", __func__,
867 memmapped_ioaddr, mmio_start, mmio_len, pcidev->irq);
869 adapter->UcodeRegs = (void *)memmapped_ioaddr;
871 adapter->State = SXG_STATE_INITIALIZING;
873 * Maintain a list of all adapters anchored by
874 * the global SxgDriver structure.
876 adapter->Next = SxgDriver.Adapters;
877 SxgDriver.Adapters = adapter;
878 adapter->AdapterID = ++SxgDriver.AdapterID;
880 /* Initialize CRC table used to determine multicast hash */
881 sxg_mcast_init_crc32();
883 adapter->JumboEnabled = FALSE;
884 adapter->RssEnabled = FALSE;
885 if (adapter->JumboEnabled) {
886 adapter->FrameSize = JUMBOMAXFRAME;
887 adapter->ReceiveBufferSize = SXG_RCV_JUMBO_BUFFER_SIZE;
889 adapter->FrameSize = ETHERMAXFRAME;
890 adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE;
894 * status = SXG_READ_EEPROM(adapter);
900 DBG_ERROR("sxg: %s ENTER sxg_config_pci\n", __func__);
901 sxg_config_pci(pcidev);
902 DBG_ERROR("sxg: %s EXIT sxg_config_pci\n", __func__);
904 DBG_ERROR("sxg: %s ENTER sxg_init_driver\n", __func__);
906 DBG_ERROR("sxg: %s EXIT sxg_init_driver\n", __func__);
908 adapter->vendid = pci_tbl_entry->vendor;
909 adapter->devid = pci_tbl_entry->device;
910 adapter->subsysid = pci_tbl_entry->subdevice;
911 adapter->slotnumber = ((pcidev->devfn >> 3) & 0x1F);
912 adapter->functionnumber = (pcidev->devfn & 0x7);
913 adapter->memorylength = pci_resource_len(pcidev, 0);
914 adapter->irq = pcidev->irq;
915 adapter->next_netdevice = head_netdevice;
916 head_netdevice = netdev;
917 adapter->port = 0; /*adapter->functionnumber; */
919 /* Allocate memory and other resources */
920 DBG_ERROR("sxg: %s ENTER sxg_allocate_resources\n", __func__);
921 status = sxg_allocate_resources(adapter);
922 DBG_ERROR("sxg: %s EXIT sxg_allocate_resources status %x\n",
924 if (status != STATUS_SUCCESS) {
928 DBG_ERROR("sxg: %s ENTER sxg_download_microcode\n", __func__);
929 if (sxg_download_microcode(adapter, SXG_UCODE_SAHARA)) {
930 DBG_ERROR("sxg: %s ENTER sxg_adapter_set_hwaddr\n",
932 sxg_read_config(adapter);
933 status = sxg_adapter_set_hwaddr(adapter);
935 adapter->state = ADAPT_FAIL;
936 adapter->linkstate = LINK_DOWN;
937 DBG_ERROR("sxg_download_microcode FAILED status[%x]\n", status);
940 netdev->base_addr = (unsigned long)adapter->base_addr;
941 netdev->irq = adapter->irq;
942 netdev->open = sxg_entry_open;
943 netdev->stop = sxg_entry_halt;
944 netdev->hard_start_xmit = sxg_send_packets;
945 netdev->do_ioctl = sxg_ioctl;
947 netdev->set_mac_address = sxg_mac_set_address;
949 netdev->get_stats = sxg_get_stats;
950 netdev->set_multicast_list = sxg_mcast_set_list;
951 SET_ETHTOOL_OPS(netdev, &sxg_nic_ethtool_ops);
953 strcpy(netdev->name, "eth%d");
954 /* strcpy(netdev->name, pci_name(pcidev)); */
955 if ((err = register_netdev(netdev))) {
956 DBG_ERROR("Cannot register net device, aborting. %s\n",
961 netif_napi_add(netdev, &adapter->napi,
962 sxg_poll, SXG_NETDEV_WEIGHT);
964 ("sxg: %s addr 0x%lx, irq %d, MAC addr \
965 %02X:%02X:%02X:%02X:%02X:%02X\n",
966 netdev->name, netdev->base_addr, pcidev->irq, netdev->dev_addr[0],
967 netdev->dev_addr[1], netdev->dev_addr[2], netdev->dev_addr[3],
968 netdev->dev_addr[4], netdev->dev_addr[5]);
971 ASSERT(status == FALSE);
972 /* sxg_free_adapter(adapter); */
974 DBG_ERROR("sxg: %s EXIT status[%x] jiffies[%lx] cpu %d\n", __func__,
975 status, jiffies, smp_processor_id());
979 sxg_free_resources(adapter);
981 err_out_free_mmio_region_2:
983 mmio_start = pci_resource_start(pcidev, 2);
984 mmio_len = pci_resource_len(pcidev, 2);
985 release_mem_region(mmio_start, mmio_len);
987 err_out_free_mmio_region_0:
989 mmio_start = pci_resource_start(pcidev, 0);
990 mmio_len = pci_resource_len(pcidev, 0);
992 release_mem_region(mmio_start, mmio_len);
994 err_out_exit_sxg_probe:
996 DBG_ERROR("%s EXIT jiffies[%lx] cpu %d\n", __func__, jiffies,
999 pci_disable_device(pcidev);
1000 DBG_ERROR("sxg: %s deallocate device\n", __FUNCTION__);
1002 printk("Exit %s, Sxg driver loading failed..\n", __FUNCTION__);
1008 * LINE BASE Interrupt routines..
1010 * sxg_disable_interrupt
1012 * DisableInterrupt Handler
1016 * adapter: Our adapter structure
1021 static void sxg_disable_interrupt(struct adapter_t *adapter)
1023 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DisIntr",
1024 adapter, adapter->InterruptsEnabled, 0, 0);
1025 /* For now, RSS is disabled with line based interrupts */
1026 ASSERT(adapter->RssEnabled == FALSE);
1027 ASSERT(adapter->MsiEnabled == FALSE);
1028 /* Turn off interrupts by writing to the icr register. */
1029 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_DISABLE), TRUE);
1031 adapter->InterruptsEnabled = 0;
1033 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDisIntr",
1034 adapter, adapter->InterruptsEnabled, 0, 0);
1038 * sxg_enable_interrupt
1040 * EnableInterrupt Handler
1044 * adapter: Our adapter structure
1049 static void sxg_enable_interrupt(struct adapter_t *adapter)
1051 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "EnIntr",
1052 adapter, adapter->InterruptsEnabled, 0, 0);
1053 /* For now, RSS is disabled with line based interrupts */
1054 ASSERT(adapter->RssEnabled == FALSE);
1055 ASSERT(adapter->MsiEnabled == FALSE);
1056 /* Turn on interrupts by writing to the icr register. */
1057 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_ENABLE), TRUE);
1059 adapter->InterruptsEnabled = 1;
1061 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XEnIntr",
1066 * sxg_isr - Process an line-based interrupt
1069 * Context - Our adapter structure
1070 * QueueDefault - Output parameter to queue to default CPU
1071 * TargetCpus - Output bitmap to schedule DPC's
1073 * Return Value: TRUE if our interrupt
1075 static irqreturn_t sxg_isr(int irq, void *dev_id)
1077 struct net_device *dev = (struct net_device *) dev_id;
1078 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
1080 if(adapter->state != ADAPT_UP)
1082 adapter->Stats.NumInts++;
1083 if (adapter->Isr[0] == 0) {
1085 * The SLIC driver used to experience a number of spurious
1086 * interrupts due to the delay associated with the masking of
1087 * the interrupt (we'd bounce back in here). If we see that
1088 * again with Sahara,add a READ_REG of the Icr register after
1089 * the WRITE_REG below.
1091 adapter->Stats.FalseInts++;
1095 * Move the Isr contents and clear the value in
1096 * shared memory, and mask interrupts
1098 /* ASSERT(adapter->IsrDpcsPending == 0); */
1099 #if XXXTODO /* RSS Stuff */
1101 * If RSS is enabled and the ISR specifies SXG_ISR_EVENT, then
1102 * schedule DPC's based on event queues.
1104 if (adapter->RssEnabled && (adapter->IsrCopy[0] & SXG_ISR_EVENT)) {
1106 i < adapter->RssSystemInfo->ProcessorInfo.RssCpuCount;
1108 struct sxg_event_ring *EventRing =
1109 &adapter->EventRings[i];
1110 struct sxg_event *Event =
1111 &EventRing->Ring[adapter->NextEvent[i]];
1113 adapter->RssSystemInfo->RssIdToCpu[i];
1114 if (Event->Status & EVENT_STATUS_VALID) {
1115 adapter->IsrDpcsPending++;
1116 CpuMask |= (1 << Cpu);
1121 * Now, either schedule the CPUs specified by the CpuMask,
1125 *QueueDefault = FALSE;
1127 adapter->IsrDpcsPending = 1;
1128 *QueueDefault = TRUE;
1130 *TargetCpus = CpuMask;
1132 sxg_interrupt(adapter);
1137 static void sxg_interrupt(struct adapter_t *adapter)
1139 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_MASK), TRUE);
1141 if (netif_rx_schedule_prep(&adapter->napi)) {
1142 __netif_rx_schedule(&adapter->napi);
1146 static void sxg_handle_interrupt(struct adapter_t *adapter, int *work_done,
1149 /* unsigned char RssId = 0; */
1151 int sxg_napi_continue = 1;
1152 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "HndlIntr",
1153 adapter, adapter->IsrCopy[0], 0, 0);
1154 /* For now, RSS is disabled with line based interrupts */
1155 ASSERT(adapter->RssEnabled == FALSE);
1156 ASSERT(adapter->MsiEnabled == FALSE);
1158 adapter->IsrCopy[0] = adapter->Isr[0];
1159 adapter->Isr[0] = 0;
1161 /* Always process the event queue. */
1162 while (sxg_napi_continue)
1164 sxg_process_event_queue(adapter,
1165 (adapter->RssEnabled ? /*RssId */ 0 : 0),
1166 &sxg_napi_continue, work_done, budget);
1169 #if XXXTODO /* RSS stuff */
1170 if (--adapter->IsrDpcsPending) {
1172 ASSERT(adapter->RssEnabled);
1173 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DPCsPend",
1178 /* Last (or only) DPC processes the ISR and clears the interrupt. */
1179 NewIsr = sxg_process_isr(adapter, 0);
1180 /* Reenable interrupts */
1181 adapter->IsrCopy[0] = 0;
1182 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ClearIsr",
1183 adapter, NewIsr, 0, 0);
1185 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XHndlInt",
1188 static int sxg_poll(struct napi_struct *napi, int budget)
1190 struct adapter_t *adapter = container_of(napi, struct adapter_t, napi);
1193 sxg_handle_interrupt(adapter, &work_done, budget);
1195 if (work_done < budget) {
1196 netif_rx_complete(napi);
1197 WRITE_REG(adapter->UcodeRegs[0].Isr, 0, TRUE);
1204 * sxg_process_isr - Process an interrupt. Called from the line-based and
1205 * message based interrupt DPC routines
1208 * adapter - Our adapter structure
1209 * Queue - The ISR that needs processing
1214 static int sxg_process_isr(struct adapter_t *adapter, u32 MessageId)
1216 u32 Isr = adapter->IsrCopy[MessageId];
1219 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ProcIsr",
1220 adapter, Isr, 0, 0);
1223 if (Isr & SXG_ISR_ERR) {
1224 if (Isr & SXG_ISR_PDQF) {
1225 adapter->Stats.PdqFull++;
1226 DBG_ERROR("%s: SXG_ISR_ERR PDQF!!\n", __func__);
1228 /* No host buffer */
1229 if (Isr & SXG_ISR_RMISS) {
1231 * There is a bunch of code in the SLIC driver which
1232 * attempts to process more receive events per DPC
1233 * if we start to fall behind. We'll probablyd
1234 * need to do something similar here, but hold
1235 * off for now. I don't want to make the code more
1236 * complicated than strictly needed.
1238 adapter->stats.rx_missed_errors++;
1239 if (adapter->stats.rx_missed_errors< 5) {
1240 DBG_ERROR("%s: SXG_ISR_ERR RMISS!!\n",
1245 if (Isr & SXG_ISR_DEAD) {
1247 * Set aside the crash info and set the adapter state
1250 adapter->CrashCpu = (unsigned char)
1251 ((Isr & SXG_ISR_CPU) >> SXG_ISR_CPU_SHIFT);
1252 adapter->CrashLocation = (ushort) (Isr & SXG_ISR_CRASH);
1253 adapter->Dead = TRUE;
1254 DBG_ERROR("%s: ISR_DEAD %x, CPU: %d\n", __func__,
1255 adapter->CrashLocation, adapter->CrashCpu);
1257 /* Event ring full */
1258 if (Isr & SXG_ISR_ERFULL) {
1260 * Same issue as RMISS, really. This means the
1261 * host is falling behind the card. Need to increase
1262 * event ring size, process more events per interrupt,
1263 * and/or reduce/remove interrupt aggregation.
1265 adapter->Stats.EventRingFull++;
1266 DBG_ERROR("%s: SXG_ISR_ERR EVENT RING FULL!!\n",
1269 /* Transmit drop - no DRAM buffers or XMT error */
1270 if (Isr & SXG_ISR_XDROP) {
1271 DBG_ERROR("%s: SXG_ISR_ERR XDROP!!\n", __func__);
1274 /* Slowpath send completions */
1275 if (Isr & SXG_ISR_SPSEND) {
1276 sxg_complete_slow_send(adapter, 1);
1279 if (Isr & SXG_ISR_UPC) {
1280 /* Maybe change when debug is added.. */
1281 // ASSERT(adapter->DumpCmdRunning);
1282 adapter->DumpCmdRunning = FALSE;
1285 if (Isr & SXG_ISR_LINK) {
1286 sxg_link_event(adapter);
1288 /* Debug - breakpoint hit */
1289 if (Isr & SXG_ISR_BREAK) {
1291 * At the moment AGDB isn't written to support interactive
1292 * debug sessions. When it is, this interrupt will be used to
1293 * signal AGDB that it has hit a breakpoint. For now, ASSERT.
1297 /* Heartbeat response */
1298 if (Isr & SXG_ISR_PING) {
1299 adapter->PingOutstanding = FALSE;
1301 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XProcIsr",
1302 adapter, Isr, NewIsr, 0);
1308 * sxg_process_event_queue - Process our event queue
1311 * - adapter - Adapter structure
1312 * - RssId - The event queue requiring processing
1317 static u32 sxg_process_event_queue(struct adapter_t *adapter, u32 RssId,
1318 int *sxg_napi_continue, int *work_done, int budget)
1320 struct sxg_event_ring *EventRing = &adapter->EventRings[RssId];
1321 struct sxg_event *Event = &EventRing->Ring[adapter->NextEvent[RssId]];
1322 u32 EventsProcessed = 0, Batches = 0;
1323 struct sk_buff *skb;
1324 #ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
1325 struct sk_buff *prev_skb = NULL;
1326 struct sk_buff *IndicationList[SXG_RCV_ARRAYSIZE];
1328 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
1330 u32 ReturnStatus = 0;
1332 ASSERT((adapter->State == SXG_STATE_RUNNING) ||
1333 (adapter->State == SXG_STATE_PAUSING) ||
1334 (adapter->State == SXG_STATE_PAUSED) ||
1335 (adapter->State == SXG_STATE_HALTING));
1337 * We may still have unprocessed events on the queue if
1338 * the card crashed. Don't process them.
1340 if (adapter->Dead) {
1344 * In theory there should only be a single processor that
1345 * accesses this queue, and only at interrupt-DPC time. So/
1346 * we shouldn't need a lock for any of this.
1348 while (Event->Status & EVENT_STATUS_VALID) {
1349 (*sxg_napi_continue) = 1;
1350 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "Event",
1351 Event, Event->Code, Event->Status,
1352 adapter->NextEvent);
1353 switch (Event->Code) {
1354 case EVENT_CODE_BUFFERS:
1355 /* struct sxg_ring_info Head & Tail == unsigned char */
1356 ASSERT(!(Event->CommandIndex & 0xFF00));
1357 sxg_complete_descriptor_blocks(adapter,
1358 Event->CommandIndex);
1360 case EVENT_CODE_SLOWRCV:
1362 --adapter->RcvBuffersOnCard;
1363 if ((skb = sxg_slow_receive(adapter, Event))) {
1365 #ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
1366 /* Add it to our indication list */
1367 SXG_ADD_RCV_PACKET(adapter, skb, prev_skb,
1368 IndicationList, num_skbs);
1370 * Linux, we just pass up each skb to the
1371 * protocol above at this point, there is no
1372 * capability of an indication list.
1375 /* CHECK skb_pull(skb, INIC_RCVBUF_HEADSIZE); */
1376 /* (rcvbuf->length & IRHDDR_FLEN_MSK); */
1377 rx_bytes = Event->Length;
1378 adapter->stats.rx_packets++;
1379 adapter->stats.rx_bytes += rx_bytes;
1380 #if SXG_OFFLOAD_IP_CHECKSUM
1381 skb->ip_summed = CHECKSUM_UNNECESSARY;
1383 skb->dev = adapter->netdev;
1384 netif_receive_skb(skb);
1389 DBG_ERROR("%s: ERROR Invalid EventCode %d\n",
1390 __func__, Event->Code);
1394 * See if we need to restock card receive buffers.
1395 * There are two things to note here:
1396 * First - This test is not SMP safe. The
1397 * adapter->BuffersOnCard field is protected via atomic
1398 * interlocked calls, but we do not protect it with respect
1399 * to these tests. The only way to do that is with a lock,
1400 * and I don't want to grab a lock every time we adjust the
1401 * BuffersOnCard count. Instead, we allow the buffer
1402 * replenishment to be off once in a while. The worst that
1403 * can happen is the card is given on more-or-less descriptor
1404 * block than the arbitrary value we've chosen. No big deal
1405 * In short DO NOT ADD A LOCK HERE, OR WHERE RcvBuffersOnCard
1407 * Second - We expect this test to rarely
1408 * evaluate to true. We attempt to refill descriptor blocks
1409 * as they are returned to us (sxg_complete_descriptor_blocks)
1410 * so The only time this should evaluate to true is when
1411 * sxg_complete_descriptor_blocks failed to allocate
1414 if (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) {
1415 sxg_stock_rcv_buffers(adapter);
1418 * It's more efficient to just set this to zero.
1419 * But clearing the top bit saves potential debug info...
1421 Event->Status &= ~EVENT_STATUS_VALID;
1422 /* Advance to the next event */
1423 SXG_ADVANCE_INDEX(adapter->NextEvent[RssId], EVENT_RING_SIZE);
1424 Event = &EventRing->Ring[adapter->NextEvent[RssId]];
1426 if (EventsProcessed == EVENT_RING_BATCH) {
1427 /* Release a batch of events back to the card */
1428 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1429 EVENT_RING_BATCH, FALSE);
1430 EventsProcessed = 0;
1432 * If we've processed our batch limit, break out of the
1433 * loop and return SXG_ISR_EVENT to arrange for us to
1436 if (Batches++ == EVENT_BATCH_LIMIT) {
1437 SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
1438 TRACE_NOISY, "EvtLimit", Batches,
1439 adapter->NextEvent, 0, 0);
1440 ReturnStatus = SXG_ISR_EVENT;
1444 if (*work_done >= budget) {
1445 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1446 EventsProcessed, FALSE);
1447 EventsProcessed = 0;
1448 (*sxg_napi_continue) = 0;
1452 if (!(Event->Status & EVENT_STATUS_VALID))
1453 (*sxg_napi_continue) = 0;
1455 #ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
1456 /* Indicate any received dumb-nic frames */
1457 SXG_INDICATE_PACKETS(adapter, IndicationList, num_skbs);
1459 /* Release events back to the card. */
1460 if (EventsProcessed) {
1461 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1462 EventsProcessed, FALSE);
1464 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XPrcEvnt",
1465 Batches, EventsProcessed, adapter->NextEvent, num_skbs);
1467 return (ReturnStatus);
1471 * sxg_complete_slow_send - Complete slowpath or dumb-nic sends
1474 * adapter - A pointer to our adapter structure
1475 * irq_context - An integer to denote if we are in interrupt context
1479 static void sxg_complete_slow_send(struct adapter_t *adapter, int irq_context)
1481 struct sxg_xmt_ring *XmtRing = &adapter->XmtRings[0];
1482 struct sxg_ring_info *XmtRingInfo = &adapter->XmtRingZeroInfo;
1484 struct sxg_cmd *XmtCmd;
1485 unsigned long flags = 0;
1486 unsigned long sgl_flags = 0;
1487 unsigned int processed_count = 0;
1490 * NOTE - This lock is dropped and regrabbed in this loop.
1491 * This means two different processors can both be running/
1492 * through this loop. Be *very* careful.
1495 if(!spin_trylock(&adapter->XmtZeroLock))
1499 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
1501 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnds",
1502 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
1504 while ((XmtRingInfo->Tail != *adapter->XmtRingZeroIndex)
1505 && processed_count++ < SXG_COMPLETE_SLOW_SEND_LIMIT) {
1507 * Locate the current Cmd (ring descriptor entry), and
1508 * associated SGL, and advance the tail
1510 SXG_RETURN_CMD(XmtRing, XmtRingInfo, XmtCmd, ContextType);
1511 ASSERT(ContextType);
1512 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd",
1513 XmtRingInfo->Head, XmtRingInfo->Tail, XmtCmd, 0);
1514 /* Clear the SGL field. */
1517 switch (*ContextType) {
1520 struct sk_buff *skb;
1521 struct sxg_scatter_gather *SxgSgl =
1522 (struct sxg_scatter_gather *)ContextType;
1523 dma64_addr_t FirstSgeAddress;
1526 /* Dumb-nic send. Command context is the dumb-nic SGL */
1527 skb = (struct sk_buff *)ContextType;
1528 skb = SxgSgl->DumbPacket;
1529 FirstSgeAddress = XmtCmd->Buffer.FirstSgeAddress;
1530 FirstSgeLength = XmtCmd->Buffer.FirstSgeLength;
1531 /* Complete the send */
1532 SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
1533 TRACE_IMPORTANT, "DmSndCmp", skb, 0,
1535 ASSERT(adapter->Stats.XmtQLen);
1537 * Now drop the lock and complete the send
1538 * back to Microsoft. We need to drop the lock
1539 * because Microsoft can come back with a
1540 * chimney send, which results in a double trip
1544 spin_unlock(&adapter->XmtZeroLock);
1546 spin_unlock_irqrestore(
1547 &adapter->XmtZeroLock, flags);
1549 SxgSgl->DumbPacket = NULL;
1550 SXG_COMPLETE_DUMB_SEND(adapter, skb,
1553 SXG_FREE_SGL_BUFFER(adapter, SxgSgl, NULL,
1555 /* and reacquire.. */
1557 if(!spin_trylock(&adapter->XmtZeroLock))
1561 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
1569 spin_unlock(&adapter->XmtZeroLock);
1571 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
1573 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd",
1574 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
1581 * adapter - A pointer to our adapter structure
1582 * Event - Receive event
1586 static struct sk_buff *sxg_slow_receive(struct adapter_t *adapter,
1587 struct sxg_event *Event)
1589 u32 BufferSize = adapter->ReceiveBufferSize;
1590 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
1591 struct sk_buff *Packet;
1592 static int read_counter = 0;
1594 RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr *) Event->HostHandle;
1595 if(read_counter++ & 0x100)
1597 sxg_collect_statistics(adapter);
1600 ASSERT(RcvDataBufferHdr);
1601 ASSERT(RcvDataBufferHdr->State == SXG_BUFFER_ONCARD);
1602 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "SlowRcv", Event,
1603 RcvDataBufferHdr, RcvDataBufferHdr->State,
1604 /*RcvDataBufferHdr->VirtualAddress*/ 0);
1605 /* Drop rcv frames in non-running state */
1606 switch (adapter->State) {
1607 case SXG_STATE_RUNNING:
1609 case SXG_STATE_PAUSING:
1610 case SXG_STATE_PAUSED:
1611 case SXG_STATE_HALTING:
1619 * memcpy(SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1620 * RcvDataBufferHdr->VirtualAddress, Event->Length);
1623 /* Change buffer state to UPSTREAM */
1624 RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM;
1625 if (Event->Status & EVENT_STATUS_RCVERR) {
1626 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvError",
1627 Event, Event->Status, Event->HostHandle, 0);
1628 /* XXXTODO - Remove this print later */
1629 DBG_ERROR("SXG: Receive error %x\n", *(u32 *)
1630 SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr));
1631 sxg_process_rcv_error(adapter, *(u32 *)
1632 SXG_RECEIVE_DATA_LOCATION
1633 (RcvDataBufferHdr));
1636 #if XXXTODO /* VLAN stuff */
1637 /* If there's a VLAN tag, extract it and validate it */
1638 if (((struct ether_header *)
1639 (SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)))->EtherType
1640 == ETHERTYPE_VLAN) {
1641 if (SxgExtractVlanHeader(adapter, RcvDataBufferHdr, Event) !=
1643 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY,
1645 SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1651 /* Dumb-nic frame. See if it passes our mac filter and update stats */
1653 if (!sxg_mac_filter(adapter,
1654 (struct ether_header *)(SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)),
1656 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvFiltr",
1657 Event, SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1662 Packet = RcvDataBufferHdr->SxgDumbRcvPacket;
1663 SXG_ADJUST_RCV_PACKET(Packet, RcvDataBufferHdr, Event);
1664 Packet->protocol = eth_type_trans(Packet, adapter->netdev);
1666 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumbRcv",
1667 RcvDataBufferHdr, Packet, Event->Length, 0);
1668 /* Lastly adjust the receive packet length. */
1669 RcvDataBufferHdr->SxgDumbRcvPacket = NULL;
1670 RcvDataBufferHdr->PhysicalAddress = (dma_addr_t)NULL;
1671 SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr, BufferSize);
1672 if (RcvDataBufferHdr->skb)
1674 spin_lock(&adapter->RcvQLock);
1675 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
1676 // adapter->RcvBuffersOnCard ++;
1677 spin_unlock(&adapter->RcvQLock);
1682 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DropRcv",
1683 RcvDataBufferHdr, Event->Length, 0, 0);
1684 adapter->stats.rx_dropped++;
1685 // adapter->Stats.RcvDiscards++;
1686 spin_lock(&adapter->RcvQLock);
1687 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
1688 spin_unlock(&adapter->RcvQLock);
1693 * sxg_process_rcv_error - process receive error and update
1697 * adapter - Adapter structure
1698 * ErrorStatus - 4-byte receive error status
1700 * Return Value : None
1702 static void sxg_process_rcv_error(struct adapter_t *adapter, u32 ErrorStatus)
1706 adapter->stats.rx_errors++;
1708 if (ErrorStatus & SXG_RCV_STATUS_TRANSPORT_ERROR) {
1709 Error = ErrorStatus & SXG_RCV_STATUS_TRANSPORT_MASK;
1711 case SXG_RCV_STATUS_TRANSPORT_CSUM:
1712 adapter->Stats.TransportCsum++;
1714 case SXG_RCV_STATUS_TRANSPORT_UFLOW:
1715 adapter->Stats.TransportUflow++;
1717 case SXG_RCV_STATUS_TRANSPORT_HDRLEN:
1718 adapter->Stats.TransportHdrLen++;
1722 if (ErrorStatus & SXG_RCV_STATUS_NETWORK_ERROR) {
1723 Error = ErrorStatus & SXG_RCV_STATUS_NETWORK_MASK;
1725 case SXG_RCV_STATUS_NETWORK_CSUM:
1726 adapter->Stats.NetworkCsum++;
1728 case SXG_RCV_STATUS_NETWORK_UFLOW:
1729 adapter->Stats.NetworkUflow++;
1731 case SXG_RCV_STATUS_NETWORK_HDRLEN:
1732 adapter->Stats.NetworkHdrLen++;
1736 if (ErrorStatus & SXG_RCV_STATUS_PARITY) {
1737 adapter->Stats.Parity++;
1739 if (ErrorStatus & SXG_RCV_STATUS_LINK_ERROR) {
1740 Error = ErrorStatus & SXG_RCV_STATUS_LINK_MASK;
1742 case SXG_RCV_STATUS_LINK_PARITY:
1743 adapter->Stats.LinkParity++;
1745 case SXG_RCV_STATUS_LINK_EARLY:
1746 adapter->Stats.LinkEarly++;
1748 case SXG_RCV_STATUS_LINK_BUFOFLOW:
1749 adapter->Stats.LinkBufOflow++;
1751 case SXG_RCV_STATUS_LINK_CODE:
1752 adapter->Stats.LinkCode++;
1754 case SXG_RCV_STATUS_LINK_DRIBBLE:
1755 adapter->Stats.LinkDribble++;
1757 case SXG_RCV_STATUS_LINK_CRC:
1758 adapter->Stats.LinkCrc++;
1760 case SXG_RCV_STATUS_LINK_OFLOW:
1761 adapter->Stats.LinkOflow++;
1763 case SXG_RCV_STATUS_LINK_UFLOW:
1764 adapter->Stats.LinkUflow++;
1774 * adapter - Adapter structure
1775 * pether - Ethernet header
1776 * length - Frame length
1778 * Return Value : TRUE if the frame is to be allowed
1780 static bool sxg_mac_filter(struct adapter_t *adapter,
1781 struct ether_header *EtherHdr, ushort length)
1784 struct net_device *dev = adapter->netdev;
1786 if (SXG_MULTICAST_PACKET(EtherHdr)) {
1787 if (SXG_BROADCAST_PACKET(EtherHdr)) {
1789 if (adapter->MacFilter & MAC_BCAST) {
1790 adapter->Stats.DumbRcvBcastPkts++;
1791 adapter->Stats.DumbRcvBcastBytes += length;
1796 if (adapter->MacFilter & MAC_ALLMCAST) {
1797 adapter->Stats.DumbRcvMcastPkts++;
1798 adapter->Stats.DumbRcvMcastBytes += length;
1801 if (adapter->MacFilter & MAC_MCAST) {
1802 struct dev_mc_list *mclist = dev->mc_list;
1804 ETHER_EQ_ADDR(mclist->da_addr,
1805 EtherHdr->ether_dhost,
1811 DumbRcvMcastBytes += length;
1814 mclist = mclist->next;
1818 } else if (adapter->MacFilter & MAC_DIRECTED) {
1820 * Not broadcast or multicast. Must be directed at us or
1821 * the card is in promiscuous mode. Either way, consider it
1822 * ours if MAC_DIRECTED is set
1824 adapter->Stats.DumbRcvUcastPkts++;
1825 adapter->Stats.DumbRcvUcastBytes += length;
1828 if (adapter->MacFilter & MAC_PROMISC) {
1829 /* Whatever it is, keep it. */
1835 static int sxg_register_interrupt(struct adapter_t *adapter)
1837 if (!adapter->intrregistered) {
1841 ("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x] %x\n",
1842 __func__, adapter, adapter->netdev->irq, NR_IRQS);
1844 spin_unlock_irqrestore(&sxg_global.driver_lock,
1847 retval = request_irq(adapter->netdev->irq,
1850 adapter->netdev->name, adapter->netdev);
1852 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
1855 DBG_ERROR("sxg: request_irq (%s) FAILED [%x]\n",
1856 adapter->netdev->name, retval);
1859 adapter->intrregistered = 1;
1860 adapter->IntRegistered = TRUE;
1861 /* Disable RSS with line-based interrupts */
1862 adapter->MsiEnabled = FALSE;
1863 adapter->RssEnabled = FALSE;
1864 DBG_ERROR("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x]\n",
1865 __func__, adapter, adapter->netdev->irq);
1867 return (STATUS_SUCCESS);
1870 static void sxg_deregister_interrupt(struct adapter_t *adapter)
1872 DBG_ERROR("sxg: %s ENTER adapter[%p]\n", __func__, adapter);
1874 slic_init_cleanup(adapter);
1876 memset(&adapter->stats, 0, sizeof(struct net_device_stats));
1877 adapter->error_interrupts = 0;
1878 adapter->rcv_interrupts = 0;
1879 adapter->xmit_interrupts = 0;
1880 adapter->linkevent_interrupts = 0;
1881 adapter->upr_interrupts = 0;
1882 adapter->num_isrs = 0;
1883 adapter->xmit_completes = 0;
1884 adapter->rcv_broadcasts = 0;
1885 adapter->rcv_multicasts = 0;
1886 adapter->rcv_unicasts = 0;
1887 DBG_ERROR("sxg: %s EXIT\n", __func__);
1893 * Perform initialization of our slic interface.
1896 static int sxg_if_init(struct adapter_t *adapter)
1898 struct net_device *dev = adapter->netdev;
1901 DBG_ERROR("sxg: %s (%s) ENTER states[%d:%d] flags[%x]\n",
1902 __func__, adapter->netdev->name,
1904 adapter->linkstate, dev->flags);
1906 /* adapter should be down at this point */
1907 if (adapter->state != ADAPT_DOWN) {
1908 DBG_ERROR("sxg_if_init adapter->state != ADAPT_DOWN\n");
1911 ASSERT(adapter->linkstate == LINK_DOWN);
1913 adapter->devflags_prev = dev->flags;
1914 adapter->MacFilter = MAC_DIRECTED;
1916 DBG_ERROR("sxg: %s (%s) Set MAC options: ", __func__,
1917 adapter->netdev->name);
1918 if (dev->flags & IFF_BROADCAST) {
1919 adapter->MacFilter |= MAC_BCAST;
1920 DBG_ERROR("BCAST ");
1922 if (dev->flags & IFF_PROMISC) {
1923 adapter->MacFilter |= MAC_PROMISC;
1924 DBG_ERROR("PROMISC ");
1926 if (dev->flags & IFF_ALLMULTI) {
1927 adapter->MacFilter |= MAC_ALLMCAST;
1928 DBG_ERROR("ALL_MCAST ");
1930 if (dev->flags & IFF_MULTICAST) {
1931 adapter->MacFilter |= MAC_MCAST;
1932 DBG_ERROR("MCAST ");
1936 status = sxg_register_interrupt(adapter);
1937 if (status != STATUS_SUCCESS) {
1938 DBG_ERROR("sxg_if_init: sxg_register_interrupt FAILED %x\n",
1940 sxg_deregister_interrupt(adapter);
1944 adapter->state = ADAPT_UP;
1946 /* clear any pending events, then enable interrupts */
1947 DBG_ERROR("sxg: %s ENABLE interrupts(slic)\n", __func__);
1949 return (STATUS_SUCCESS);
1952 void sxg_set_interrupt_aggregation(struct adapter_t *adapter)
1955 * Top bit disables aggregation on xmt (SXG_AGG_XMT_DISABLE).
1956 * Make sure Max is less than 0x8000.
1958 adapter->max_aggregation = SXG_MAX_AGG_DEFAULT;
1959 adapter->min_aggregation = SXG_MIN_AGG_DEFAULT;
1960 WRITE_REG(adapter->UcodeRegs[0].Aggregation,
1961 ((adapter->max_aggregation << SXG_MAX_AGG_SHIFT) |
1962 adapter->min_aggregation),
1966 static int sxg_entry_open(struct net_device *dev)
1968 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
1973 sxg_second_open(adapter->netdev);
1975 return STATUS_SUCCESS;
1981 DBG_ERROR("sxg: %s adapter->activated[%d]\n", __func__,
1982 adapter->activated);
1984 ("sxg: %s (%s): [jiffies[%lx] cpu %d] dev[%p] adapt[%p] port[%d]\n",
1985 __func__, adapter->netdev->name, jiffies, smp_processor_id(),
1986 adapter->netdev, adapter, adapter->port);
1988 netif_stop_queue(adapter->netdev);
1990 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
1991 if (!adapter->activated) {
1992 sxg_global.num_sxg_ports_active++;
1993 adapter->activated = 1;
1995 /* Initialize the adapter */
1996 DBG_ERROR("sxg: %s ENTER sxg_initialize_adapter\n", __func__);
1997 status = sxg_initialize_adapter(adapter);
1998 DBG_ERROR("sxg: %s EXIT sxg_initialize_adapter status[%x]\n",
2001 if (status == STATUS_SUCCESS) {
2002 DBG_ERROR("sxg: %s ENTER sxg_if_init\n", __func__);
2003 status = sxg_if_init(adapter);
2004 DBG_ERROR("sxg: %s EXIT sxg_if_init status[%x]\n", __func__,
2008 if (status != STATUS_SUCCESS) {
2009 if (adapter->activated) {
2010 sxg_global.num_sxg_ports_active--;
2011 adapter->activated = 0;
2013 spin_unlock_irqrestore(&sxg_global.driver_lock,
2017 DBG_ERROR("sxg: %s ENABLE ALL INTERRUPTS\n", __func__);
2018 sxg_set_interrupt_aggregation(adapter);
2019 napi_enable(&adapter->napi);
2021 /* Enable interrupts */
2022 SXG_ENABLE_ALL_INTERRUPTS(adapter);
2024 DBG_ERROR("sxg: %s EXIT\n", __func__);
2026 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
2027 return STATUS_SUCCESS;
2030 int sxg_second_open(struct net_device * dev)
2032 struct adapter_t *adapter = (struct adapter_t*) netdev_priv(dev);
2035 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
2036 netif_start_queue(adapter->netdev);
2037 adapter->state = ADAPT_UP;
2038 adapter->linkstate = LINK_UP;
2040 status = sxg_initialize_adapter(adapter);
2041 sxg_set_interrupt_aggregation(adapter);
2042 napi_enable(&adapter->napi);
2043 /* Re-enable interrupts */
2044 SXG_ENABLE_ALL_INTERRUPTS(adapter);
2046 netif_carrier_on(dev);
2047 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
2048 sxg_register_interrupt(adapter);
2049 return (STATUS_SUCCESS);
2053 static void __devexit sxg_entry_remove(struct pci_dev *pcidev)
2058 struct net_device *dev = pci_get_drvdata(pcidev);
2059 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
2061 flush_scheduled_work();
2063 /* Deallocate Resources */
2064 unregister_netdev(dev);
2065 sxg_free_resources(adapter);
2069 mmio_start = pci_resource_start(pcidev, 0);
2070 mmio_len = pci_resource_len(pcidev, 0);
2072 DBG_ERROR("sxg: %s rel_region(0) start[%x] len[%x]\n", __FUNCTION__,
2073 mmio_start, mmio_len);
2074 release_mem_region(mmio_start, mmio_len);
2076 mmio_start = pci_resource_start(pcidev, 2);
2077 mmio_len = pci_resource_len(pcidev, 2);
2079 DBG_ERROR("sxg: %s rel_region(2) start[%x] len[%x]\n", __FUNCTION__,
2080 mmio_start, mmio_len);
2081 release_mem_region(mmio_start, mmio_len);
2083 pci_disable_device(pcidev);
2085 DBG_ERROR("sxg: %s deallocate device\n", __func__);
2087 DBG_ERROR("sxg: %s EXIT\n", __func__);
2090 static int sxg_entry_halt(struct net_device *dev)
2092 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
2094 napi_disable(&adapter->napi);
2095 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
2096 DBG_ERROR("sxg: %s (%s) ENTER\n", __func__, dev->name);
2098 netif_stop_queue(adapter->netdev);
2099 adapter->state = ADAPT_DOWN;
2100 adapter->linkstate = LINK_DOWN;
2101 adapter->devflags_prev = 0;
2102 DBG_ERROR("sxg: %s (%s) set adapter[%p] state to ADAPT_DOWN(%d)\n",
2103 __func__, dev->name, adapter, adapter->state);
2105 DBG_ERROR("sxg: %s (%s) EXIT\n", __func__, dev->name);
2106 DBG_ERROR("sxg: %s EXIT\n", __func__);
2108 /* Disable interrupts */
2109 SXG_DISABLE_ALL_INTERRUPTS(adapter);
2111 netif_carrier_off(dev);
2112 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
2114 sxg_deregister_interrupt(adapter);
2115 return (STATUS_SUCCESS);
2118 static int sxg_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2121 /* DBG_ERROR("sxg: %s cmd[%x] rq[%p] dev[%p]\n", __func__, cmd, rq, dev);*/
2123 case SIOCSLICSETINTAGG:
2125 /* struct adapter_t *adapter = (struct adapter_t *)
2131 if (copy_from_user(data, rq->ifr_data, 28)) {
2132 DBG_ERROR("copy_from_user FAILED getting \
2138 "%s: set interrupt aggregation to %d\n",
2144 /* DBG_ERROR("sxg: %s UNSUPPORTED[%x]\n", __func__, cmd); */
2150 #define NORMAL_ETHFRAME 0
2153 * sxg_send_packets - Send a skb packet
2156 * skb - The packet to send
2157 * dev - Our linux net device that refs our adapter
2160 * 0 regardless of outcome XXXTODO refer to e1000 driver
2162 static int sxg_send_packets(struct sk_buff *skb, struct net_device *dev)
2164 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
2165 u32 status = STATUS_SUCCESS;
2168 * DBG_ERROR("sxg: %s ENTER sxg_send_packets skb[%p]\n", __FUNCTION__,
2172 /* Check the adapter state */
2173 switch (adapter->State) {
2174 case SXG_STATE_INITIALIZING:
2175 case SXG_STATE_HALTED:
2176 case SXG_STATE_SHUTDOWN:
2177 ASSERT(0); /* unexpected */
2179 case SXG_STATE_RESETTING:
2180 case SXG_STATE_SLEEP:
2181 case SXG_STATE_BOOTDIAG:
2182 case SXG_STATE_DIAG:
2183 case SXG_STATE_HALTING:
2184 status = STATUS_FAILURE;
2186 case SXG_STATE_RUNNING:
2187 if (adapter->LinkState != SXG_LINK_UP) {
2188 status = STATUS_FAILURE;
2193 status = STATUS_FAILURE;
2195 if (status != STATUS_SUCCESS) {
2199 status = sxg_transmit_packet(adapter, skb);
2200 if (status == STATUS_SUCCESS) {
2205 /* reject & complete all the packets if they cant be sent */
2206 if (status != STATUS_SUCCESS) {
2208 /* sxg_send_packets_fail(adapter, skb, status); */
2210 SXG_DROP_DUMB_SEND(adapter, skb);
2211 adapter->stats.tx_dropped++;
2212 return NETDEV_TX_BUSY;
2215 DBG_ERROR("sxg: %s EXIT sxg_send_packets status[%x]\n", __func__,
2219 return NETDEV_TX_OK;
2223 * sxg_transmit_packet
2225 * This function transmits a single packet.
2228 * adapter - Pointer to our adapter structure
2229 * skb - The packet to be sent
2231 * Return - STATUS of send
2233 static int sxg_transmit_packet(struct adapter_t *adapter, struct sk_buff *skb)
2235 struct sxg_x64_sgl *pSgl;
2236 struct sxg_scatter_gather *SxgSgl;
2237 unsigned long sgl_flags;
2238 /* void *SglBuffer; */
2239 /* u32 SglBufferLength; */
2242 * The vast majority of work is done in the shared
2243 * sxg_dumb_sgl routine.
2245 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSend",
2246 adapter, skb, 0, 0);
2248 /* Allocate a SGL buffer */
2249 SXG_GET_SGL_BUFFER(adapter, SxgSgl, 0);
2251 adapter->Stats.NoSglBuf++;
2252 adapter->stats.tx_errors++;
2253 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "SndPktF1",
2254 adapter, skb, 0, 0);
2255 return (STATUS_RESOURCES);
2257 ASSERT(SxgSgl->adapter == adapter);
2258 /*SglBuffer = SXG_SGL_BUFFER(SxgSgl);
2259 SglBufferLength = SXG_SGL_BUF_SIZE; */
2260 SxgSgl->VlanTag.VlanTci = 0;
2261 SxgSgl->VlanTag.VlanTpid = 0;
2262 SxgSgl->Type = SXG_SGL_DUMB;
2263 SxgSgl->DumbPacket = skb;
2266 /* Call the common sxg_dumb_sgl routine to complete the send. */
2267 return (sxg_dumb_sgl(pSgl, SxgSgl));
2275 * SxgSgl - struct sxg_scatter_gather
2278 * Status of send operation.
2280 static int sxg_dumb_sgl(struct sxg_x64_sgl *pSgl,
2281 struct sxg_scatter_gather *SxgSgl)
2283 struct adapter_t *adapter = SxgSgl->adapter;
2284 struct sk_buff *skb = SxgSgl->DumbPacket;
2285 /* For now, all dumb-nic sends go on RSS queue zero */
2286 struct sxg_xmt_ring *XmtRing = &adapter->XmtRings[0];
2287 struct sxg_ring_info *XmtRingInfo = &adapter->XmtRingZeroInfo;
2288 struct sxg_cmd *XmtCmd = NULL;
2289 /* u32 Index = 0; */
2290 u32 DataLength = skb->len;
2291 /* unsigned int BufLen; */
2292 /* u32 SglOffset; */
2294 unsigned long flags;
2295 unsigned long queue_id=0;
2297 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSgl",
2298 pSgl, SxgSgl, 0, 0);
2300 /* Set aside a pointer to the sgl */
2301 SxgSgl->pSgl = pSgl;
2303 /* Sanity check that our SGL format is as we expect. */
2304 ASSERT(sizeof(struct sxg_x64_sge) == sizeof(struct sxg_x64_sge));
2305 /* Shouldn't be a vlan tag on this frame */
2306 ASSERT(SxgSgl->VlanTag.VlanTci == 0);
2307 ASSERT(SxgSgl->VlanTag.VlanTpid == 0);
2310 * From here below we work with the SGL placed in our
2314 SxgSgl->Sgl.NumberOfElements = 1;
2316 * Set ucode Queue ID based on bottom bits of destination TCP port.
2317 * This Queue ID splits slowpath/dumb-nic packet processing across
2318 * multiple threads on the card to improve performance. It is split
2319 * using the TCP port to avoid out-of-order packets that can result
2320 * from multithreaded processing. We use the destination port because
2321 * we expect to be run on a server, so in nearly all cases the local
2322 * port is likely to be constant (well-known server port) and the
2323 * remote port is likely to be random. The exception to this is iSCSI,
2324 * in which case we use the sport instead. Note
2325 * that original attempt at XOR'ing source and dest port resulted in
2326 * poor balance on NTTTCP/iometer applications since they tend to
2327 * line up (even-even, odd-odd..).
2330 if (skb->protocol == htons(ETH_P_IP)) {
2334 if ((ip->protocol == IPPROTO_TCP)&&(DataLength >= sizeof(
2336 queue_id = ((ntohs(tcp_hdr(skb)->dest) == ISCSI_PORT) ?
2337 (ntohs (tcp_hdr(skb)->source) &
2338 SXG_LARGE_SEND_QUEUE_MASK):
2339 (ntohs(tcp_hdr(skb)->dest) &
2340 SXG_LARGE_SEND_QUEUE_MASK));
2342 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2343 if ( (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) && (DataLength >=
2344 sizeof(struct tcphdr)) ) {
2345 queue_id = ((ntohs(tcp_hdr(skb)->dest) == ISCSI_PORT) ?
2346 (ntohs (tcp_hdr(skb)->source) &
2347 SXG_LARGE_SEND_QUEUE_MASK):
2348 (ntohs(tcp_hdr(skb)->dest) &
2349 SXG_LARGE_SEND_QUEUE_MASK));
2353 /* Grab the spinlock and acquire a command */
2354 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
2355 SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
2356 if (XmtCmd == NULL) {
2358 * Call sxg_complete_slow_send to see if we can
2359 * free up any XmtRingZero entries and then try again
2362 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2363 sxg_complete_slow_send(adapter, 0);
2364 spin_lock_irqsave(&adapter->XmtZeroLock, flags);
2365 SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
2366 if (XmtCmd == NULL) {
2367 adapter->Stats.XmtZeroFull++;
2371 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbCmd",
2372 XmtCmd, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
2374 adapter->stats.tx_packets++;
2375 adapter->stats.tx_bytes += DataLength;
2376 #if XXXTODO /* Stats stuff */
2377 if (SXG_MULTICAST_PACKET(EtherHdr)) {
2378 if (SXG_BROADCAST_PACKET(EtherHdr)) {
2379 adapter->Stats.DumbXmtBcastPkts++;
2380 adapter->Stats.DumbXmtBcastBytes += DataLength;
2382 adapter->Stats.DumbXmtMcastPkts++;
2383 adapter->Stats.DumbXmtMcastBytes += DataLength;
2386 adapter->Stats.DumbXmtUcastPkts++;
2387 adapter->Stats.DumbXmtUcastBytes += DataLength;
2391 * Fill in the command
2392 * Copy out the first SGE to the command and adjust for offset
2394 phys_addr = pci_map_single(adapter->pcidev, skb->data, skb->len,
2396 memset(XmtCmd, '\0', sizeof(*XmtCmd));
2397 XmtCmd->Buffer.FirstSgeAddress = phys_addr;
2398 XmtCmd->Buffer.FirstSgeLength = DataLength;
2399 XmtCmd->Buffer.SgeOffset = 0;
2400 XmtCmd->Buffer.TotalLength = DataLength;
2401 XmtCmd->SgEntries = 1;
2404 * Advance transmit cmd descripter by 1.
2405 * NOTE - See comments in SxgTcpOutput where we write
2406 * to the XmtCmd register regarding CPU ID values and/or
2407 * multiple commands.
2408 * Top 16 bits specify queue_id. See comments about queue_id above
2410 /* Four queues at the moment */
2411 ASSERT((queue_id & ~SXG_LARGE_SEND_QUEUE_MASK) == 0);
2412 WRITE_REG(adapter->UcodeRegs[0].XmtCmd, ((queue_id << 16) | 1), TRUE);
2413 adapter->Stats.XmtQLen++; /* Stats within lock */
2414 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2415 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDumSgl2",
2416 XmtCmd, pSgl, SxgSgl, 0);
2417 return STATUS_SUCCESS;
2421 * NOTE - Only jump to this label AFTER grabbing the
2422 * XmtZeroLock, and DO NOT DROP IT between the
2423 * command allocation and the following abort.
2426 SXG_ABORT_CMD(XmtRingInfo);
2428 spin_unlock_irqrestore(&adapter->XmtZeroLock, flags);
2432 * Jump to this label if failure occurs before the
2433 * XmtZeroLock is grabbed
2435 adapter->stats.tx_errors++;
2436 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumSGFal",
2437 pSgl, SxgSgl, XmtRingInfo->Head, XmtRingInfo->Tail);
2438 /* SxgSgl->DumbPacket is the skb */
2439 // SXG_COMPLETE_DUMB_SEND(adapter, SxgSgl->DumbPacket);
2441 return STATUS_FAILURE;
2445 * Link management functions
2447 * sxg_initialize_link - Initialize the link stuff
2450 * adapter - A pointer to our adapter structure
2455 static int sxg_initialize_link(struct adapter_t *adapter)
2457 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
2463 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitLink",
2466 /* Reset PHY and XGXS module */
2467 WRITE_REG(HwRegs->LinkStatus, LS_SERDES_POWER_DOWN, TRUE);
2469 /* Reset transmit configuration register */
2470 WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_RESET, TRUE);
2472 /* Reset receive configuration register */
2473 WRITE_REG(HwRegs->RcvConfig, RCV_CONFIG_RESET, TRUE);
2475 /* Reset all MAC modules */
2476 WRITE_REG(HwRegs->MacConfig0, AXGMAC_CFG0_SUB_RESET, TRUE);
2480 * XXXTODO - This assumes the MAC address (0a:0b:0c:0d:0e:0f)
2481 * is stored with the first nibble (0a) in the byte 0
2482 * of the Mac address. Possibly reverse?
2484 Value = *(u32 *) adapter->macaddr;
2485 WRITE_REG(HwRegs->LinkAddress0Low, Value, TRUE);
2486 /* also write the MAC address to the MAC. Endian is reversed. */
2487 WRITE_REG(HwRegs->MacAddressLow, ntohl(Value), TRUE);
2488 Value = (*(u16 *) & adapter->macaddr[4] & 0x0000FFFF);
2489 WRITE_REG(HwRegs->LinkAddress0High, Value | LINK_ADDRESS_ENABLE, TRUE);
2490 /* endian swap for the MAC (put high bytes in bits [31:16], swapped) */
2491 Value = ntohl(Value);
2492 WRITE_REG(HwRegs->MacAddressHigh, Value, TRUE);
2493 /* Link address 1 */
2494 WRITE_REG(HwRegs->LinkAddress1Low, 0, TRUE);
2495 WRITE_REG(HwRegs->LinkAddress1High, 0, TRUE);
2496 /* Link address 2 */
2497 WRITE_REG(HwRegs->LinkAddress2Low, 0, TRUE);
2498 WRITE_REG(HwRegs->LinkAddress2High, 0, TRUE);
2499 /* Link address 3 */
2500 WRITE_REG(HwRegs->LinkAddress3Low, 0, TRUE);
2501 WRITE_REG(HwRegs->LinkAddress3High, 0, TRUE);
2503 /* Enable MAC modules */
2504 WRITE_REG(HwRegs->MacConfig0, 0, TRUE);
2507 WRITE_REG(HwRegs->MacConfig1, (
2508 /* Allow sending of pause */
2509 AXGMAC_CFG1_XMT_PAUSE |
2511 AXGMAC_CFG1_XMT_EN |
2512 /* Enable detection of pause */
2513 AXGMAC_CFG1_RCV_PAUSE |
2514 /* Enable receive */
2515 AXGMAC_CFG1_RCV_EN |
2516 /* short frame detection */
2517 AXGMAC_CFG1_SHORT_ASSERT |
2518 /* Verify frame length */
2519 AXGMAC_CFG1_CHECK_LEN |
2521 AXGMAC_CFG1_GEN_FCS |
2522 /* Pad frames to 64 bytes */
2523 AXGMAC_CFG1_PAD_64),
2526 /* Set AXGMAC max frame length if jumbo. Not needed for standard MTU */
2527 if (adapter->JumboEnabled) {
2528 WRITE_REG(HwRegs->MacMaxFrameLen, AXGMAC_MAXFRAME_JUMBO, TRUE);
2531 * AMIIM Configuration Register -
2532 * The value placed in the AXGMAC_AMIIM_CFG_HALF_CLOCK portion
2533 * (bottom bits) of this register is used to determine the MDC frequency
2534 * as specified in the A-XGMAC Design Document. This value must not be
2535 * zero. The following value (62 or 0x3E) is based on our MAC transmit
2536 * clock frequency (MTCLK) of 312.5 MHz. Given a maximum MDIO clock
2537 * frequency of 2.5 MHz (see the PHY spec), we get:
2538 * 312.5/(2*(X+1)) < 2.5 ==> X = 62.
2539 * This value happens to be the default value for this register, so we
2540 * really don't have to do this.
2542 WRITE_REG(HwRegs->MacAmiimConfig, 0x0000003E, TRUE);
2544 /* Power up and enable PHY and XAUI/XGXS/Serdes logic */
2545 WRITE_REG(HwRegs->LinkStatus,
2548 LS_XGXS_CTL | LS_PHY_CLK_EN | LS_ATTN_ALARM), TRUE);
2549 DBG_ERROR("After Power Up and enable PHY in sxg_initialize_link\n");
2552 * Per information given by Aeluros, wait 100 ms after removing reset.
2553 * It's not enough to wait for the self-clearing reset bit in reg 0 to
2558 /* Verify the PHY has come up by checking that the Reset bit has
2561 status = sxg_read_mdio_reg(adapter,
2562 MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
2563 PHY_PMA_CONTROL1, /* PMA/PMD control register */
2565 DBG_ERROR("After sxg_read_mdio_reg Value[%x] fail=%x\n", Value,
2566 (Value & PMA_CONTROL1_RESET));
2567 if (status != STATUS_SUCCESS)
2568 return (STATUS_FAILURE);
2569 if (Value & PMA_CONTROL1_RESET) /* reset complete if bit is 0 */
2570 return (STATUS_FAILURE);
2572 /* The SERDES should be initialized by now - confirm */
2573 READ_REG(HwRegs->LinkStatus, Value);
2574 if (Value & LS_SERDES_DOWN) /* verify SERDES is initialized */
2575 return (STATUS_FAILURE);
2577 /* The XAUI link should also be up - confirm */
2578 if (!(Value & LS_XAUI_LINK_UP)) /* verify XAUI link is up */
2579 return (STATUS_FAILURE);
2581 /* Initialize the PHY */
2582 status = sxg_phy_init(adapter);
2583 if (status != STATUS_SUCCESS)
2584 return (STATUS_FAILURE);
2586 /* Enable the Link Alarm */
2588 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
2589 * LASI_CONTROL - LASI control register
2590 * LASI_CTL_LS_ALARM_ENABLE - enable link alarm bit
2592 status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2594 LASI_CTL_LS_ALARM_ENABLE);
2595 if (status != STATUS_SUCCESS)
2596 return (STATUS_FAILURE);
2598 /* XXXTODO - temporary - verify bit is set */
2600 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
2601 * LASI_CONTROL - LASI control register
2603 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2607 if (status != STATUS_SUCCESS)
2608 return (STATUS_FAILURE);
2609 if (!(Value & LASI_CTL_LS_ALARM_ENABLE)) {
2610 DBG_ERROR("Error! LASI Control Alarm Enable bit not set!\n");
2612 /* Enable receive */
2613 MaxFrame = adapter->JumboEnabled ? JUMBOMAXFRAME : ETHERMAXFRAME;
2614 ConfigData = (RCV_CONFIG_ENABLE |
2615 RCV_CONFIG_ENPARSE |
2617 RCV_CONFIG_RCVPAUSE |
2620 RCV_CONFIG_HASH_16 |
2621 RCV_CONFIG_SOCKET | RCV_CONFIG_BUFSIZE(MaxFrame));
2622 WRITE_REG(HwRegs->RcvConfig, ConfigData, TRUE);
2624 WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_ENABLE, TRUE);
2626 /* Mark the link as down. We'll get a link event when it comes up. */
2627 sxg_link_state(adapter, SXG_LINK_DOWN);
2629 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInitLnk",
2631 return (STATUS_SUCCESS);
2635 * sxg_phy_init - Initialize the PHY
2638 * adapter - A pointer to our adapter structure
2643 static int sxg_phy_init(struct adapter_t *adapter)
2646 struct phy_ucode *p;
2649 DBG_ERROR("ENTER %s\n", __func__);
2651 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module
2652 * 0xC205 - PHY ID register (?)
2653 * &Value - XXXTODO - add def
2655 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2658 if (status != STATUS_SUCCESS)
2659 return (STATUS_FAILURE);
2661 if (Value == 0x0012) {
2662 /* 0x0012 == AEL2005C PHY(?) - XXXTODO - add def */
2663 DBG_ERROR("AEL2005C PHY detected. Downloading PHY \
2666 /* Initialize AEL2005C PHY and download PHY microcode */
2667 for (p = PhyUcode; p->Addr != 0xFFFF; p++) {
2669 /* if address == 0, data == sleep time in ms */
2672 /* write the given data to the specified address */
2673 status = sxg_write_mdio_reg(adapter,
2679 if (status != STATUS_SUCCESS)
2680 return (STATUS_FAILURE);
2684 DBG_ERROR("EXIT %s\n", __func__);
2686 return (STATUS_SUCCESS);
2690 * sxg_link_event - Process a link event notification from the card
2693 * adapter - A pointer to our adapter structure
2698 static void sxg_link_event(struct adapter_t *adapter)
2700 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
2701 struct net_device *netdev = adapter->netdev;
2702 enum SXG_LINK_STATE LinkState;
2706 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "LinkEvnt",
2708 DBG_ERROR("ENTER %s\n", __func__);
2710 /* Check the Link Status register. We should have a Link Alarm. */
2711 READ_REG(HwRegs->LinkStatus, Value);
2712 if (Value & LS_LINK_ALARM) {
2714 * We got a Link Status alarm. First, pause to let the
2715 * link state settle (it can bounce a number of times)
2719 /* Now clear the alarm by reading the LASI status register. */
2720 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module */
2721 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2722 /* LASI status register */
2725 if (status != STATUS_SUCCESS) {
2726 DBG_ERROR("Error reading LASI Status MDIO register!\n");
2727 sxg_link_state(adapter, SXG_LINK_DOWN);
2730 ASSERT(Value & LASI_STATUS_LS_ALARM);
2732 /* Now get and set the link state */
2733 LinkState = sxg_get_link_state(adapter);
2734 sxg_link_state(adapter, LinkState);
2735 DBG_ERROR("SXG: Link Alarm occurred. Link is %s\n",
2736 ((LinkState == SXG_LINK_UP) ? "UP" : "DOWN"));
2737 if (LinkState == SXG_LINK_UP)
2738 netif_carrier_on(netdev);
2740 netif_carrier_off(netdev);
2743 * XXXTODO - Assuming Link Attention is only being generated
2744 * for the Link Alarm pin (and not for a XAUI Link Status change)
2745 * , then it's impossible to get here. Yet we've gotten here
2746 * twice (under extreme conditions - bouncing the link up and
2747 * down many times a second). Needs further investigation.
2749 DBG_ERROR("SXG: sxg_link_event: Can't get here!\n");
2750 DBG_ERROR("SXG: Link Status == 0x%08X.\n", Value);
2753 DBG_ERROR("EXIT %s\n", __func__);
2758 * sxg_get_link_state - Determine if the link is up or down
2761 * adapter - A pointer to our adapter structure
2766 static enum SXG_LINK_STATE sxg_get_link_state(struct adapter_t *adapter)
2771 DBG_ERROR("ENTER %s\n", __func__);
2773 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "GetLink",
2777 * Per the Xenpak spec (and the IEEE 10Gb spec?), the link is up if
2778 * the following 3 bits (from 3 different MDIO registers) are all true.
2781 /* MIIM_DEV_PHY_PMA - PHY PMA/PMD module */
2782 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA,
2783 /* PMA/PMD Receive Signal Detect register */
2786 if (status != STATUS_SUCCESS)
2789 /* If PMA/PMD receive signal detect is 0, then the link is down */
2790 if (!(Value & PMA_RCV_DETECT))
2791 return (SXG_LINK_DOWN);
2793 /* MIIM_DEV_PHY_PCS - PHY PCS module */
2794 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PCS,
2795 /* PCS 10GBASE-R Status 1 register */
2796 PHY_PCS_10G_STATUS1,
2798 if (status != STATUS_SUCCESS)
2801 /* If PCS is not locked to receive blocks, then the link is down */
2802 if (!(Value & PCS_10B_BLOCK_LOCK))
2803 return (SXG_LINK_DOWN);
2805 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_XS,/* PHY XS module */
2806 /* XS Lane Status register */
2809 if (status != STATUS_SUCCESS)
2812 /* If XS transmit lanes are not aligned, then the link is down */
2813 if (!(Value & XS_LANE_ALIGN))
2814 return (SXG_LINK_DOWN);
2816 /* All 3 bits are true, so the link is up */
2817 DBG_ERROR("EXIT %s\n", __func__);
2819 return (SXG_LINK_UP);
2822 /* An error occurred reading an MDIO register. This shouldn't happen. */
2823 DBG_ERROR("Error reading an MDIO register!\n");
2825 return (SXG_LINK_DOWN);
2828 static void sxg_indicate_link_state(struct adapter_t *adapter,
2829 enum SXG_LINK_STATE LinkState)
2831 if (adapter->LinkState == SXG_LINK_UP) {
2832 DBG_ERROR("%s: LINK now UP, call netif_start_queue\n",
2834 netif_start_queue(adapter->netdev);
2836 DBG_ERROR("%s: LINK now DOWN, call netif_stop_queue\n",
2838 netif_stop_queue(adapter->netdev);
2843 * sxg_link_state - Set the link state and if necessary, indicate.
2844 * This routine the central point of processing for all link state changes.
2845 * Nothing else in the driver should alter the link state or perform
2846 * link state indications
2849 * adapter - A pointer to our adapter structure
2850 * LinkState - The link state
2855 static void sxg_link_state(struct adapter_t *adapter,
2856 enum SXG_LINK_STATE LinkState)
2858 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "LnkINDCT",
2859 adapter, LinkState, adapter->LinkState, adapter->State);
2861 DBG_ERROR("ENTER %s\n", __func__);
2864 * Hold the adapter lock during this routine. Maybe move
2865 * the lock to the caller.
2867 /* IMP TODO : Check if we can survive without taking this lock */
2868 // spin_lock(&adapter->AdapterLock);
2869 if (LinkState == adapter->LinkState) {
2870 /* Nothing changed.. */
2871 // spin_unlock(&adapter->AdapterLock);
2872 DBG_ERROR("EXIT #0 %s. Link status = %d\n",
2873 __func__, LinkState);
2876 /* Save the adapter state */
2877 adapter->LinkState = LinkState;
2879 /* Drop the lock and indicate link state */
2880 // spin_unlock(&adapter->AdapterLock);
2881 DBG_ERROR("EXIT #1 %s\n", __func__);
2883 sxg_indicate_link_state(adapter, LinkState);
2887 * sxg_write_mdio_reg - Write to a register on the MDIO bus
2890 * adapter - A pointer to our adapter structure
2891 * DevAddr - MDIO device number being addressed
2892 * RegAddr - register address for the specified MDIO device
2893 * Value - value to write to the MDIO register
2898 static int sxg_write_mdio_reg(struct adapter_t *adapter,
2899 u32 DevAddr, u32 RegAddr, u32 Value)
2901 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
2902 /* Address operation (written to MIIM field reg) */
2904 /* Write operation (written to MIIM field reg) */
2906 u32 Cmd;/* Command (written to MIIM command reg) */
2910 /* DBG_ERROR("ENTER %s\n", __func__); */
2912 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
2915 /* Ensure values don't exceed field width */
2916 DevAddr &= 0x001F; /* 5-bit field */
2917 RegAddr &= 0xFFFF; /* 16-bit field */
2918 Value &= 0xFFFF; /* 16-bit field */
2920 /* Set MIIM field register bits for an MIIM address operation */
2921 AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
2922 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
2923 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
2924 (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
2926 /* Set MIIM field register bits for an MIIM write operation */
2927 WriteOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
2928 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
2929 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
2930 (MIIM_OP_WRITE << AXGMAC_AMIIM_FIELD_OP_SHIFT) | Value;
2932 /* Set MIIM command register bits to execute an MIIM command */
2933 Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
2935 /* Reset the command register command bit (in case it's not 0) */
2936 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
2938 /* MIIM write to set the address of the specified MDIO register */
2939 WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
2941 /* Write to MIIM Command Register to execute to address operation */
2942 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
2944 /* Poll AMIIM Indicator register to wait for completion */
2945 Timeout = SXG_LINK_TIMEOUT;
2947 udelay(100); /* Timeout in 100us units */
2948 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
2949 if (--Timeout == 0) {
2950 return (STATUS_FAILURE);
2952 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
2954 /* Reset the command register command bit */
2955 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
2957 /* MIIM write to set up an MDIO write operation */
2958 WRITE_REG(HwRegs->MacAmiimField, WriteOp, TRUE);
2960 /* Write to MIIM Command Register to execute the write operation */
2961 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
2963 /* Poll AMIIM Indicator register to wait for completion */
2964 Timeout = SXG_LINK_TIMEOUT;
2966 udelay(100); /* Timeout in 100us units */
2967 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
2968 if (--Timeout == 0) {
2969 return (STATUS_FAILURE);
2971 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
2973 /* DBG_ERROR("EXIT %s\n", __func__); */
2975 return (STATUS_SUCCESS);
2979 * sxg_read_mdio_reg - Read a register on the MDIO bus
2982 * adapter - A pointer to our adapter structure
2983 * DevAddr - MDIO device number being addressed
2984 * RegAddr - register address for the specified MDIO device
2985 * pValue - pointer to where to put data read from the MDIO register
2990 static int sxg_read_mdio_reg(struct adapter_t *adapter,
2991 u32 DevAddr, u32 RegAddr, u32 *pValue)
2993 struct sxg_hw_regs *HwRegs = adapter->HwRegs;
2994 u32 AddrOp; /* Address operation (written to MIIM field reg) */
2995 u32 ReadOp; /* Read operation (written to MIIM field reg) */
2996 u32 Cmd; /* Command (written to MIIM command reg) */
3000 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
3002 DBG_ERROR("ENTER %s\n", __FUNCTION__);
3004 /* Ensure values don't exceed field width */
3005 DevAddr &= 0x001F; /* 5-bit field */
3006 RegAddr &= 0xFFFF; /* 16-bit field */
3008 /* Set MIIM field register bits for an MIIM address operation */
3009 AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
3010 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
3011 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
3012 (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
3014 /* Set MIIM field register bits for an MIIM read operation */
3015 ReadOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
3016 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
3017 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
3018 (MIIM_OP_READ << AXGMAC_AMIIM_FIELD_OP_SHIFT);
3020 /* Set MIIM command register bits to execute an MIIM command */
3021 Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
3023 /* Reset the command register command bit (in case it's not 0) */
3024 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
3026 /* MIIM write to set the address of the specified MDIO register */
3027 WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
3029 /* Write to MIIM Command Register to execute to address operation */
3030 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
3032 /* Poll AMIIM Indicator register to wait for completion */
3033 Timeout = SXG_LINK_TIMEOUT;
3035 udelay(100); /* Timeout in 100us units */
3036 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
3037 if (--Timeout == 0) {
3038 DBG_ERROR("EXIT %s with STATUS_FAILURE 1\n", __FUNCTION__);
3040 return (STATUS_FAILURE);
3042 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
3044 /* Reset the command register command bit */
3045 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
3047 /* MIIM write to set up an MDIO register read operation */
3048 WRITE_REG(HwRegs->MacAmiimField, ReadOp, TRUE);
3050 /* Write to MIIM Command Register to execute the read operation */
3051 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
3053 /* Poll AMIIM Indicator register to wait for completion */
3054 Timeout = SXG_LINK_TIMEOUT;
3056 udelay(100); /* Timeout in 100us units */
3057 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
3058 if (--Timeout == 0) {
3059 DBG_ERROR("EXIT %s with STATUS_FAILURE 2\n", __FUNCTION__);
3061 return (STATUS_FAILURE);
3063 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
3065 /* Read the MDIO register data back from the field register */
3066 READ_REG(HwRegs->MacAmiimField, *pValue);
3067 *pValue &= 0xFFFF; /* data is in the lower 16 bits */
3069 DBG_ERROR("EXIT %s\n", __FUNCTION__);
3071 return (STATUS_SUCCESS);
3075 * Functions to obtain the CRC corresponding to the destination mac address.
3076 * This is a standard ethernet CRC in that it is a 32-bit, reflected CRC using
3078 * x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 + x^7 + x^5
3079 * + x^4 + x^2 + x^1.
3081 * After the CRC for the 6 bytes is generated (but before the value is
3082 * complemented), we must then transpose the value and return bits 30-23.
3084 static u32 sxg_crc_table[256];/* Table of CRC's for all possible byte values */
3085 static u32 sxg_crc_init; /* Is table initialized */
3087 /* Contruct the CRC32 table */
3088 static void sxg_mcast_init_crc32(void)
3090 u32 c; /* CRC shit reg */
3091 u32 e = 0; /* Poly X-or pattern */
3092 int i; /* counter */
3093 int k; /* byte being shifted into crc */
3095 static int p[] = { 0, 1, 2, 4, 5, 7, 8, 10, 11, 12, 16, 22, 23, 26 };
3097 for (i = 0; i < sizeof(p) / sizeof(int); i++) {
3098 e |= 1L << (31 - p[i]);
3101 for (i = 1; i < 256; i++) {
3103 for (k = 8; k; k--) {
3104 c = c & 1 ? (c >> 1) ^ e : c >> 1;
3106 sxg_crc_table[i] = c;
3111 * Return the MAC hast as described above.
3113 static unsigned char sxg_mcast_get_mac_hash(char *macaddr)
3118 unsigned char machash = 0;
3120 if (!sxg_crc_init) {
3121 sxg_mcast_init_crc32();
3125 crc = 0xFFFFFFFF; /* Preload shift register, per crc-32 spec */
3126 for (i = 0, p = macaddr; i < 6; ++p, ++i) {
3127 crc = (crc >> 8) ^ sxg_crc_table[(crc ^ *p) & 0xFF];
3130 /* Return bits 1-8, transposed */
3131 for (i = 1; i < 9; i++) {
3132 machash |= (((crc >> i) & 1) << (8 - i));
3138 static void sxg_mcast_set_mask(struct adapter_t *adapter)
3140 struct sxg_ucode_regs *sxg_regs = adapter->UcodeRegs;
3142 DBG_ERROR("%s ENTER (%s) MacFilter[%x] mask[%llx]\n", __FUNCTION__,
3143 adapter->netdev->name, (unsigned int)adapter->MacFilter,
3144 adapter->MulticastMask);
3146 if (adapter->MacFilter & (MAC_ALLMCAST | MAC_PROMISC)) {
3148 * Turn on all multicast addresses. We have to do this for
3149 * promiscuous mode as well as ALLMCAST mode. It saves the
3150 * Microcode from having keep state about the MAC configuration
3152 /* DBG_ERROR("sxg: %s MacFilter = MAC_ALLMCAST | MAC_PROMISC\n \
3153 * SLUT MODE!!!\n",__func__);
3155 WRITE_REG(sxg_regs->McastLow, 0xFFFFFFFF, FLUSH);
3156 WRITE_REG(sxg_regs->McastHigh, 0xFFFFFFFF, FLUSH);
3157 /* DBG_ERROR("%s (%s) WRITE to slic_regs slic_mcastlow&high \
3158 * 0xFFFFFFFF\n",__func__, adapter->netdev->name);
3163 * Commit our multicast mast to the SLIC by writing to the
3164 * multicast address mask registers
3166 DBG_ERROR("%s (%s) WRITE mcastlow[%lx] mcasthigh[%lx]\n",
3167 __func__, adapter->netdev->name,
3168 ((ulong) (adapter->MulticastMask & 0xFFFFFFFF)),
3170 ((adapter->MulticastMask >> 32) & 0xFFFFFFFF)));
3172 WRITE_REG(sxg_regs->McastLow,
3173 (u32) (adapter->MulticastMask & 0xFFFFFFFF), FLUSH);
3174 WRITE_REG(sxg_regs->McastHigh,
3176 MulticastMask >> 32) & 0xFFFFFFFF), FLUSH);
3180 static void sxg_mcast_set_bit(struct adapter_t *adapter, char *address)
3182 unsigned char crcpoly;
3184 /* Get the CRC polynomial for the mac address */
3185 crcpoly = sxg_mcast_get_mac_hash(address);
3188 * We only have space on the SLIC for 64 entries. Lop
3189 * off the top two bits. (2^6 = 64)
3193 /* OR in the new bit into our 64 bit mask. */
3194 adapter->MulticastMask |= (u64) 1 << crcpoly;
3198 * Function takes MAC addresses from dev_mc_list and generates the Mask
3201 static void sxg_set_mcast_addr(struct adapter_t *adapter)
3203 struct dev_mc_list *mclist;
3204 struct net_device *dev = adapter->netdev;
3207 if (adapter->MacFilter & (MAC_ALLMCAST | MAC_MCAST)) {
3208 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3209 i++, mclist = mclist->next) {
3210 sxg_mcast_set_bit(adapter,mclist->da_addr);
3213 sxg_mcast_set_mask(adapter);
3216 static void sxg_mcast_set_list(struct net_device *dev)
3218 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
3221 if (dev->flags & IFF_PROMISC) {
3222 adapter->MacFilter |= MAC_PROMISC;
3225 if (dev->flags & IFF_MULTICAST)
3226 adapter->MacFilter |= MAC_MCAST;
3228 if (dev->flags & IFF_ALLMULTI) {
3229 adapter->MacFilter |= MAC_ALLMCAST;
3232 //XXX handle other flags as well
3233 sxg_set_mcast_addr(adapter);
3237 static void sxg_unmap_mmio_space(struct adapter_t *adapter)
3239 #if LINUX_FREES_ADAPTER_RESOURCES
3241 * if (adapter->Regs) {
3242 * iounmap(adapter->Regs);
3244 * adapter->slic_regs = NULL;
3250 void sxg_free_sgl_buffers(struct adapter_t *adapter)
3252 struct list_entry *ple;
3253 struct sxg_scatter_gather *Sgl;
3255 while(!(IsListEmpty(&adapter->AllSglBuffers))) {
3256 ple = RemoveHeadList(&adapter->AllSglBuffers);
3257 Sgl = container_of(ple, struct sxg_scatter_gather, AllList);
3259 adapter->AllSglBufferCount--;
3263 void sxg_free_rcvblocks(struct adapter_t *adapter)
3266 void *temp_RcvBlock;
3267 struct list_entry *ple;
3268 struct sxg_rcv_block_hdr *RcvBlockHdr;
3269 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
3270 ASSERT((adapter->state == SXG_STATE_INITIALIZING) ||
3271 (adapter->state == SXG_STATE_HALTING));
3272 while(!(IsListEmpty(&adapter->AllRcvBlocks))) {
3274 ple = RemoveHeadList(&adapter->AllRcvBlocks);
3275 RcvBlockHdr = container_of(ple, struct sxg_rcv_block_hdr, AllList);
3277 if(RcvBlockHdr->VirtualAddress) {
3278 temp_RcvBlock = RcvBlockHdr->VirtualAddress;
3280 for(i=0; i< SXG_RCV_DESCRIPTORS_PER_BLOCK;
3281 i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3283 (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock;
3284 SXG_FREE_RCV_PACKET(RcvDataBufferHdr);
3288 pci_free_consistent(adapter->pcidev,
3289 SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE),
3290 RcvBlockHdr->VirtualAddress,
3291 RcvBlockHdr->PhysicalAddress);
3292 adapter->AllRcvBlockCount--;
3294 ASSERT(adapter->AllRcvBlockCount == 0);
3295 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFrRBlk",
3298 void sxg_free_mcast_addrs(struct adapter_t *adapter)
3300 struct sxg_multicast_address *address;
3301 while(adapter->MulticastAddrs) {
3302 address = adapter->MulticastAddrs;
3303 adapter->MulticastAddrs = address->Next;
3307 adapter->MulticastMask= 0;
3310 void sxg_unmap_resources(struct adapter_t *adapter)
3312 if(adapter->HwRegs) {
3313 iounmap((void *)adapter->HwRegs);
3315 if(adapter->UcodeRegs) {
3316 iounmap((void *)adapter->UcodeRegs);
3319 ASSERT(adapter->AllRcvBlockCount == 0);
3320 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFrRBlk",
3327 * sxg_free_resources - Free everything allocated in SxgAllocateResources
3330 * adapter - A pointer to our adapter structure
3335 void sxg_free_resources(struct adapter_t *adapter)
3337 u32 RssIds, IsrCount;
3338 struct net_device *netdev = adapter->netdev;
3339 RssIds = SXG_RSS_CPU_COUNT(adapter);
3340 IsrCount = adapter->MsiEnabled ? RssIds : 1;
3342 if (adapter->BasicAllocations == FALSE) {
3344 * No allocations have been made, including spinlocks,
3345 * or listhead initializations. Return.
3351 free_irq(adapter->netdev->irq, netdev);
3353 if (!(IsListEmpty(&adapter->AllRcvBlocks))) {
3354 sxg_free_rcvblocks(adapter);
3356 if (!(IsListEmpty(&adapter->AllSglBuffers))) {
3357 sxg_free_sgl_buffers(adapter);
3360 if (adapter->XmtRingZeroIndex) {
3361 pci_free_consistent(adapter->pcidev,
3363 adapter->XmtRingZeroIndex,
3364 adapter->PXmtRingZeroIndex);
3367 pci_free_consistent(adapter->pcidev,
3368 sizeof(u32) * IsrCount,
3369 adapter->Isr, adapter->PIsr);
3372 if (adapter->EventRings) {
3373 pci_free_consistent(adapter->pcidev,
3374 sizeof(struct sxg_event_ring) * RssIds,
3375 adapter->EventRings, adapter->PEventRings);
3377 if (adapter->RcvRings) {
3378 pci_free_consistent(adapter->pcidev,
3379 sizeof(struct sxg_rcv_ring) * 1,
3381 adapter->PRcvRings);
3382 adapter->RcvRings = NULL;
3385 if(adapter->XmtRings) {
3386 pci_free_consistent(adapter->pcidev,
3387 sizeof(struct sxg_xmt_ring) * 1,
3389 adapter->PXmtRings);
3390 adapter->XmtRings = NULL;
3393 if (adapter->ucode_stats) {
3394 pci_unmap_single(adapter->pcidev,
3395 sizeof(struct sxg_ucode_stats),
3396 adapter->pucode_stats, PCI_DMA_FROMDEVICE);
3397 adapter->ucode_stats = NULL;
3401 /* Unmap register spaces */
3402 sxg_unmap_resources(adapter);
3404 sxg_free_mcast_addrs(adapter);
3406 adapter->BasicAllocations = FALSE;
3411 * sxg_allocate_complete -
3413 * This routine is called when a memory allocation has completed.
3416 * struct adapter_t * - Our adapter structure
3417 * VirtualAddress - Memory virtual address
3418 * PhysicalAddress - Memory physical address
3419 * Length - Length of memory allocated (or 0)
3420 * Context - The type of buffer allocated
3425 static int sxg_allocate_complete(struct adapter_t *adapter,
3426 void *VirtualAddress,
3427 dma_addr_t PhysicalAddress,
3428 u32 Length, enum sxg_buffer_type Context)
3431 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocCmp",
3432 adapter, VirtualAddress, Length, Context);
3433 ASSERT(atomic_read(&adapter->pending_allocations));
3434 atomic_dec(&adapter->pending_allocations);
3438 case SXG_BUFFER_TYPE_RCV:
3439 status = sxg_allocate_rcvblock_complete(adapter,
3441 PhysicalAddress, Length);
3443 case SXG_BUFFER_TYPE_SGL:
3444 sxg_allocate_sgl_buffer_complete(adapter, (struct sxg_scatter_gather *)
3446 PhysicalAddress, Length);
3449 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlocCmp",
3450 adapter, VirtualAddress, Length, Context);
3456 * sxg_allocate_buffer_memory - Shared memory allocation routine used for
3457 * synchronous and asynchronous buffer allocations
3460 * adapter - A pointer to our adapter structure
3461 * Size - block size to allocate
3462 * BufferType - Type of buffer to allocate
3467 static int sxg_allocate_buffer_memory(struct adapter_t *adapter,
3468 u32 Size, enum sxg_buffer_type BufferType)
3474 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocMem",
3475 adapter, Size, BufferType, 0);
3477 * Grab the adapter lock and check the state. If we're in anything other
3478 * than INITIALIZING or RUNNING state, fail. This is to prevent
3479 * allocations in an improper driver state
3482 atomic_inc(&adapter->pending_allocations);
3484 if(BufferType != SXG_BUFFER_TYPE_SGL)
3485 Buffer = pci_alloc_consistent(adapter->pcidev, Size, &pBuffer);
3487 Buffer = kzalloc(Size, GFP_ATOMIC);
3488 pBuffer = (dma_addr_t)NULL;
3490 if (Buffer == NULL) {
3492 * Decrement the AllocationsPending count while holding
3493 * the lock. Pause processing relies on this
3495 atomic_dec(&adapter->pending_allocations);
3496 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlcMemF1",
3497 adapter, Size, BufferType, 0);
3498 return (STATUS_RESOURCES);
3500 status = sxg_allocate_complete(adapter, Buffer, pBuffer, Size, BufferType);
3502 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlocMem",
3503 adapter, Size, BufferType, status);
3508 * sxg_allocate_rcvblock_complete - Complete a receive descriptor
3512 * adapter - A pointer to our adapter structure
3513 * RcvBlock - receive block virtual address
3514 * PhysicalAddress - Physical address
3515 * Length - Memory length
3519 static int sxg_allocate_rcvblock_complete(struct adapter_t *adapter,
3521 dma_addr_t PhysicalAddress,
3525 u32 BufferSize = adapter->ReceiveBufferSize;
3527 void *temp_RcvBlock;
3528 struct sxg_rcv_block_hdr *RcvBlockHdr;
3529 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
3530 struct sxg_rcv_descriptor_block *RcvDescriptorBlock;
3531 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr;
3533 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlRcvBlk",
3534 adapter, RcvBlock, Length, 0);
3535 if (RcvBlock == NULL) {
3538 memset(RcvBlock, 0, Length);
3539 ASSERT((BufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
3540 (BufferSize == SXG_RCV_JUMBO_BUFFER_SIZE));
3541 ASSERT(Length == SXG_RCV_BLOCK_SIZE(SXG_RCV_DATA_HDR_SIZE));
3543 * First, initialize the contained pool of receive data buffers.
3544 * This initialization requires NBL/NB/MDL allocations, if any of them
3545 * fail, free the block and return without queueing the shared memory
3547 //RcvDataBuffer = RcvBlock;
3548 temp_RcvBlock = RcvBlock;
3549 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
3550 i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3551 RcvDataBufferHdr = (struct sxg_rcv_data_buffer_hdr *)
3553 /* For FREE macro assertion */
3554 RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM;
3555 SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr, BufferSize);
3556 if (RcvDataBufferHdr->SxgDumbRcvPacket == NULL)
3562 * Place this entire block of memory on the AllRcvBlocks queue so it
3566 RcvBlockHdr = (struct sxg_rcv_block_hdr *) ((unsigned char *)RcvBlock +
3567 SXG_RCV_BLOCK_HDR_OFFSET(SXG_RCV_DATA_HDR_SIZE));
3568 RcvBlockHdr->VirtualAddress = RcvBlock;
3569 RcvBlockHdr->PhysicalAddress = PhysicalAddress;
3570 spin_lock(&adapter->RcvQLock);
3571 adapter->AllRcvBlockCount++;
3572 InsertTailList(&adapter->AllRcvBlocks, &RcvBlockHdr->AllList);
3573 spin_unlock(&adapter->RcvQLock);
3575 /* Now free the contained receive data buffers that we
3576 * initialized above */
3577 temp_RcvBlock = RcvBlock;
3578 for (i = 0, Paddr = PhysicalAddress;
3579 i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
3580 i++, Paddr += SXG_RCV_DATA_HDR_SIZE,
3581 temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3583 (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock;
3584 spin_lock(&adapter->RcvQLock);
3585 SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
3586 spin_unlock(&adapter->RcvQLock);
3589 /* Locate the descriptor block and put it on a separate free queue */
3590 RcvDescriptorBlock =
3591 (struct sxg_rcv_descriptor_block *) ((unsigned char *)RcvBlock +
3592 SXG_RCV_DESCRIPTOR_BLOCK_OFFSET
3593 (SXG_RCV_DATA_HDR_SIZE));
3594 RcvDescriptorBlockHdr =
3595 (struct sxg_rcv_descriptor_block_hdr *) ((unsigned char *)RcvBlock +
3596 SXG_RCV_DESCRIPTOR_BLOCK_HDR_OFFSET
3597 (SXG_RCV_DATA_HDR_SIZE));
3598 RcvDescriptorBlockHdr->VirtualAddress = RcvDescriptorBlock;
3599 RcvDescriptorBlockHdr->PhysicalAddress = Paddr;
3600 spin_lock(&adapter->RcvQLock);
3601 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter, RcvDescriptorBlockHdr);
3602 spin_unlock(&adapter->RcvQLock);
3603 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlRBlk",
3604 adapter, RcvBlock, Length, 0);
3605 return STATUS_SUCCESS;
3607 /* Free any allocated resources */
3609 temp_RcvBlock = RcvBlock;
3610 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
3611 i++, temp_RcvBlock += SXG_RCV_DATA_HDR_SIZE) {
3613 (struct sxg_rcv_data_buffer_hdr *)temp_RcvBlock;
3614 SXG_FREE_RCV_PACKET(RcvDataBufferHdr);
3616 pci_free_consistent(adapter->pcidev,
3617 Length, RcvBlock, PhysicalAddress);
3619 DBG_ERROR("%s: OUT OF RESOURCES\n", __func__);
3620 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "RcvAFail",
3621 adapter, adapter->FreeRcvBufferCount,
3622 adapter->FreeRcvBlockCount, adapter->AllRcvBlockCount);
3623 adapter->Stats.NoMem++;
3624 /* As allocation failed, free all previously allocated blocks..*/
3625 //sxg_free_rcvblocks(adapter);
3627 return STATUS_RESOURCES;
3631 * sxg_allocate_sgl_buffer_complete - Complete a SGL buffer allocation
3634 * adapter - A pointer to our adapter structure
3635 * SxgSgl - struct sxg_scatter_gather buffer
3636 * PhysicalAddress - Physical address
3637 * Length - Memory length
3641 static void sxg_allocate_sgl_buffer_complete(struct adapter_t *adapter,
3642 struct sxg_scatter_gather *SxgSgl,
3643 dma_addr_t PhysicalAddress,
3646 unsigned long sgl_flags;
3647 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlSglCmp",
3648 adapter, SxgSgl, Length, 0);
3650 spin_lock_irqsave(&adapter->SglQLock, sgl_flags);
3652 spin_lock(&adapter->SglQLock);
3653 adapter->AllSglBufferCount++;
3654 /* PhysicalAddress; */
3655 SxgSgl->PhysicalAddress = PhysicalAddress;
3656 /* Initialize backpointer once */
3657 SxgSgl->adapter = adapter;
3658 InsertTailList(&adapter->AllSglBuffers, &SxgSgl->AllList);
3660 spin_unlock_irqrestore(&adapter->SglQLock, sgl_flags);
3662 spin_unlock(&adapter->SglQLock);
3663 SxgSgl->State = SXG_BUFFER_BUSY;
3664 SXG_FREE_SGL_BUFFER(adapter, SxgSgl, NULL, in_irq());
3665 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlSgl",
3666 adapter, SxgSgl, Length, 0);
3670 static int sxg_adapter_set_hwaddr(struct adapter_t *adapter)
3673 * DBG_ERROR ("%s ENTER card->config_set[%x] port[%d] physport[%d] \
3674 * funct#[%d]\n", __func__, card->config_set,
3675 * adapter->port, adapter->physport, adapter->functionnumber);
3677 * sxg_dbg_macaddrs(adapter);
3679 /* DBG_ERROR ("%s AFTER copying from config.macinfo into currmacaddr\n",
3683 /* sxg_dbg_macaddrs(adapter); */
3685 struct net_device * dev = adapter->netdev;
3688 printk("sxg: Dev is Null\n");
3691 DBG_ERROR("%s ENTER (%s)\n", __FUNCTION__, adapter->netdev->name);
3693 if (netif_running(dev)) {
3700 if (!(adapter->currmacaddr[0] ||
3701 adapter->currmacaddr[1] ||
3702 adapter->currmacaddr[2] ||
3703 adapter->currmacaddr[3] ||
3704 adapter->currmacaddr[4] || adapter->currmacaddr[5])) {
3705 memcpy(adapter->currmacaddr, adapter->macaddr, 6);
3707 if (adapter->netdev) {
3708 memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6);
3709 memcpy(adapter->netdev->perm_addr, adapter->currmacaddr, 6);
3711 /* DBG_ERROR ("%s EXIT port %d\n", __func__, adapter->port); */
3712 sxg_dbg_macaddrs(adapter);
3718 static int sxg_mac_set_address(struct net_device *dev, void *ptr)
3720 struct adapter_t *adapter = (struct adapter_t *) netdev_priv(dev);
3721 struct sockaddr *addr = ptr;
3723 DBG_ERROR("%s ENTER (%s)\n", __func__, adapter->netdev->name);
3725 if (netif_running(dev)) {
3731 DBG_ERROR("sxg: %s (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
3732 __func__, adapter->netdev->name, adapter->currmacaddr[0],
3733 adapter->currmacaddr[1], adapter->currmacaddr[2],
3734 adapter->currmacaddr[3], adapter->currmacaddr[4],
3735 adapter->currmacaddr[5]);
3736 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3737 memcpy(adapter->currmacaddr, addr->sa_data, dev->addr_len);
3738 DBG_ERROR("sxg: %s (%s) new %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
3739 __func__, adapter->netdev->name, adapter->currmacaddr[0],
3740 adapter->currmacaddr[1], adapter->currmacaddr[2],
3741 adapter->currmacaddr[3], adapter->currmacaddr[4],
3742 adapter->currmacaddr[5]);
3744 sxg_config_set(adapter, TRUE);
3750 * SXG DRIVER FUNCTIONS (below)
3752 * sxg_initialize_adapter - Initialize adapter
3755 * adapter - A pointer to our adapter structure
3759 static int sxg_initialize_adapter(struct adapter_t *adapter)
3761 u32 RssIds, IsrCount;
3765 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitAdpt",
3768 RssIds = 1; /* XXXTODO SXG_RSS_CPU_COUNT(adapter); */
3769 IsrCount = adapter->MsiEnabled ? RssIds : 1;
3772 * Sanity check SXG_UCODE_REGS structure definition to
3773 * make sure the length is correct
3775 ASSERT(sizeof(struct sxg_ucode_regs) == SXG_REGISTER_SIZE_PER_CPU);
3777 /* Disable interrupts */
3778 SXG_DISABLE_ALL_INTERRUPTS(adapter);
3781 ASSERT((adapter->FrameSize == ETHERMAXFRAME) ||
3782 (adapter->FrameSize == JUMBOMAXFRAME));
3783 WRITE_REG(adapter->UcodeRegs[0].LinkMtu, adapter->FrameSize, TRUE);
3785 /* Set event ring base address and size */
3786 WRITE_REG64(adapter,
3787 adapter->UcodeRegs[0].EventBase, adapter->PEventRings, 0);
3788 WRITE_REG(adapter->UcodeRegs[0].EventSize, EVENT_RING_SIZE, TRUE);
3790 /* Per-ISR initialization */
3791 for (i = 0; i < IsrCount; i++) {
3793 /* Set interrupt status pointer */
3794 Addr = adapter->PIsr + (i * sizeof(u32));
3795 WRITE_REG64(adapter, adapter->UcodeRegs[i].Isp, Addr, i);
3798 /* XMT ring zero index */
3799 WRITE_REG64(adapter,
3800 adapter->UcodeRegs[0].SPSendIndex,
3801 adapter->PXmtRingZeroIndex, 0);
3803 /* Per-RSS initialization */
3804 for (i = 0; i < RssIds; i++) {
3805 /* Release all event ring entries to the Microcode */
3806 WRITE_REG(adapter->UcodeRegs[i].EventRelease, EVENT_RING_SIZE,
3810 /* Transmit ring base and size */
3811 WRITE_REG64(adapter,
3812 adapter->UcodeRegs[0].XmtBase, adapter->PXmtRings, 0);
3813 WRITE_REG(adapter->UcodeRegs[0].XmtSize, SXG_XMT_RING_SIZE, TRUE);
3815 /* Receive ring base and size */
3816 WRITE_REG64(adapter,
3817 adapter->UcodeRegs[0].RcvBase, adapter->PRcvRings, 0);
3818 WRITE_REG(adapter->UcodeRegs[0].RcvSize, SXG_RCV_RING_SIZE, TRUE);
3820 /* Populate the card with receive buffers */
3821 sxg_stock_rcv_buffers(adapter);
3824 * Initialize checksum offload capabilities. At the moment we always
3825 * enable IP and TCP receive checksums on the card. Depending on the
3826 * checksum configuration specified by the user, we can choose to
3827 * report or ignore the checksum information provided by the card.
3829 WRITE_REG(adapter->UcodeRegs[0].ReceiveChecksum,
3830 SXG_RCV_TCP_CSUM_ENABLED | SXG_RCV_IP_CSUM_ENABLED, TRUE);
3832 /* Initialize the MAC, XAUI */
3833 DBG_ERROR("sxg: %s ENTER sxg_initialize_link\n", __func__);
3834 status = sxg_initialize_link(adapter);
3835 DBG_ERROR("sxg: %s EXIT sxg_initialize_link status[%x]\n", __func__,
3837 if (status != STATUS_SUCCESS) {
3841 * Initialize Dead to FALSE.
3842 * SlicCheckForHang or SlicDumpThread will take it from here.
3844 adapter->Dead = FALSE;
3845 adapter->PingOutstanding = FALSE;
3846 adapter->State = SXG_STATE_RUNNING;
3848 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInit",
3850 return (STATUS_SUCCESS);
3854 * sxg_fill_descriptor_block - Populate a descriptor block and give it to
3855 * the card. The caller should hold the RcvQLock
3858 * adapter - A pointer to our adapter structure
3859 * RcvDescriptorBlockHdr - Descriptor block to fill
3864 static int sxg_fill_descriptor_block(struct adapter_t *adapter,
3865 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr)
3868 struct sxg_ring_info *RcvRingInfo = &adapter->RcvRingZeroInfo;
3869 struct sxg_rcv_data_buffer_hdr *RcvDataBufferHdr;
3870 struct sxg_rcv_descriptor_block *RcvDescriptorBlock;
3871 struct sxg_cmd *RingDescriptorCmd;
3872 struct sxg_rcv_ring *RingZero = &adapter->RcvRings[0];
3874 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "FilBlk",
3875 adapter, adapter->RcvBuffersOnCard,
3876 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
3878 ASSERT(RcvDescriptorBlockHdr);
3881 * If we don't have the resources to fill the descriptor block,
3884 if ((adapter->FreeRcvBufferCount < SXG_RCV_DESCRIPTORS_PER_BLOCK) ||
3885 SXG_RING_FULL(RcvRingInfo)) {
3886 adapter->Stats.NoMem++;
3887 return (STATUS_FAILURE);
3889 /* Get a ring descriptor command */
3890 SXG_GET_CMD(RingZero,
3891 RcvRingInfo, RingDescriptorCmd, RcvDescriptorBlockHdr);
3892 ASSERT(RingDescriptorCmd);
3893 RcvDescriptorBlockHdr->State = SXG_BUFFER_ONCARD;
3894 RcvDescriptorBlock = (struct sxg_rcv_descriptor_block *)
3895 RcvDescriptorBlockHdr->VirtualAddress;
3897 /* Fill in the descriptor block */
3898 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; i++) {
3899 SXG_GET_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
3900 ASSERT(RcvDataBufferHdr);
3901 // ASSERT(RcvDataBufferHdr->SxgDumbRcvPacket);
3902 if (!RcvDataBufferHdr->SxgDumbRcvPacket) {
3903 SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr,
3904 adapter->ReceiveBufferSize);
3905 if(RcvDataBufferHdr->skb)
3906 RcvDataBufferHdr->SxgDumbRcvPacket =
3907 RcvDataBufferHdr->skb;
3911 SXG_REINIATIALIZE_PACKET(RcvDataBufferHdr->SxgDumbRcvPacket);
3912 RcvDataBufferHdr->State = SXG_BUFFER_ONCARD;
3913 RcvDescriptorBlock->Descriptors[i].VirtualAddress =
3914 (void *)RcvDataBufferHdr;
3916 RcvDescriptorBlock->Descriptors[i].PhysicalAddress =
3917 RcvDataBufferHdr->PhysicalAddress;
3919 /* Add the descriptor block to receive descriptor ring 0 */
3920 RingDescriptorCmd->Sgl = RcvDescriptorBlockHdr->PhysicalAddress;
3923 * RcvBuffersOnCard is not protected via the receive lock (see
3924 * sxg_process_event_queue) We don't want to grap a lock every time a
3925 * buffer is returned to us, so we use atomic interlocked functions
3928 adapter->RcvBuffersOnCard += SXG_RCV_DESCRIPTORS_PER_BLOCK;
3930 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DscBlk",
3931 RcvDescriptorBlockHdr,
3932 RingDescriptorCmd, RcvRingInfo->Head, RcvRingInfo->Tail);
3934 WRITE_REG(adapter->UcodeRegs[0].RcvCmd, 1, true);
3935 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFilBlk",
3936 adapter, adapter->RcvBuffersOnCard,
3937 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
3938 return (STATUS_SUCCESS);
3944 * sxg_stock_rcv_buffers - Stock the card with receive buffers
3947 * adapter - A pointer to our adapter structure
3952 static void sxg_stock_rcv_buffers(struct adapter_t *adapter)
3954 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr;
3956 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "StockBuf",
3957 adapter, adapter->RcvBuffersOnCard,
3958 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
3960 * First, see if we've got less than our minimum threshold of
3961 * receive buffers, there isn't an allocation in progress, and
3962 * we haven't exceeded our maximum.. get another block of buffers
3963 * None of this needs to be SMP safe. It's round numbers.
3965 if ((adapter->FreeRcvBufferCount < SXG_MIN_RCV_DATA_BUFFERS) &&
3966 (adapter->AllRcvBlockCount < SXG_MAX_RCV_BLOCKS) &&
3967 (atomic_read(&adapter->pending_allocations) == 0)) {
3968 sxg_allocate_buffer_memory(adapter,
3970 (SXG_RCV_DATA_HDR_SIZE),
3971 SXG_BUFFER_TYPE_RCV);
3973 /* Now grab the RcvQLock lock and proceed */
3974 spin_lock(&adapter->RcvQLock);
3975 while (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) {
3976 struct list_entry *_ple;
3978 /* Get a descriptor block */
3979 RcvDescriptorBlockHdr = NULL;
3980 if (adapter->FreeRcvBlockCount) {
3981 _ple = RemoveHeadList(&adapter->FreeRcvBlocks);
3982 RcvDescriptorBlockHdr =
3983 container_of(_ple, struct sxg_rcv_descriptor_block_hdr,
3985 adapter->FreeRcvBlockCount--;
3986 RcvDescriptorBlockHdr->State = SXG_BUFFER_BUSY;
3989 if (RcvDescriptorBlockHdr == NULL) {
3991 adapter->Stats.NoMem++;
3994 /* Fill in the descriptor block and give it to the card */
3995 if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) ==
3997 /* Free the descriptor block */
3998 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
3999 RcvDescriptorBlockHdr);
4003 spin_unlock(&adapter->RcvQLock);
4004 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFilBlks",
4005 adapter, adapter->RcvBuffersOnCard,
4006 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
4010 * sxg_complete_descriptor_blocks - Return descriptor blocks that have been
4011 * completed by the microcode
4014 * adapter - A pointer to our adapter structure
4015 * Index - Where the microcode is up to
4020 static void sxg_complete_descriptor_blocks(struct adapter_t *adapter,
4021 unsigned char Index)
4023 struct sxg_rcv_ring *RingZero = &adapter->RcvRings[0];
4024 struct sxg_ring_info *RcvRingInfo = &adapter->RcvRingZeroInfo;
4025 struct sxg_rcv_descriptor_block_hdr *RcvDescriptorBlockHdr;
4026 struct sxg_cmd *RingDescriptorCmd;
4028 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlks",
4029 adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail);
4031 /* Now grab the RcvQLock lock and proceed */
4032 spin_lock(&adapter->RcvQLock);
4033 ASSERT(Index != RcvRingInfo->Tail);
4034 while (sxg_ring_get_forward_diff(RcvRingInfo, Index,
4035 RcvRingInfo->Tail) > 3) {
4037 * Locate the current Cmd (ring descriptor entry), and
4038 * associated receive descriptor block, and advance
4041 SXG_RETURN_CMD(RingZero,
4043 RingDescriptorCmd, RcvDescriptorBlockHdr);
4044 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlk",
4045 RcvRingInfo->Head, RcvRingInfo->Tail,
4046 RingDescriptorCmd, RcvDescriptorBlockHdr);
4048 /* Clear the SGL field */
4049 RingDescriptorCmd->Sgl = 0;
4051 * Attempt to refill it and hand it right back to the
4052 * card. If we fail to refill it, free the descriptor block
4053 * header. The card will be restocked later via the
4054 * RcvBuffersOnCard test
4056 if (sxg_fill_descriptor_block(adapter,
4057 RcvDescriptorBlockHdr) == STATUS_FAILURE)
4058 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
4059 RcvDescriptorBlockHdr);
4061 spin_unlock(&adapter->RcvQLock);
4062 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XCRBlks",
4063 adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail);
4067 * Read the statistics which the card has been maintaining.
4069 void sxg_collect_statistics(struct adapter_t *adapter)
4071 if(adapter->ucode_stats)
4072 WRITE_REG64(adapter, adapter->UcodeRegs[0].GetUcodeStats,
4073 adapter->pucode_stats, 0);
4074 adapter->stats.rx_fifo_errors = adapter->ucode_stats->ERDrops;
4075 adapter->stats.rx_over_errors = adapter->ucode_stats->NBDrops;
4076 adapter->stats.tx_fifo_errors = adapter->ucode_stats->XDrops;
4079 static struct net_device_stats *sxg_get_stats(struct net_device * dev)
4081 struct adapter_t *adapter = netdev_priv(dev);
4083 sxg_collect_statistics(adapter);
4084 return (&adapter->stats);
4087 static struct pci_driver sxg_driver = {
4088 .name = sxg_driver_name,
4089 .id_table = sxg_pci_tbl,
4090 .probe = sxg_entry_probe,
4091 .remove = sxg_entry_remove,
4092 #if SXG_POWER_MANAGEMENT_ENABLED
4093 .suspend = sxgpm_suspend,
4094 .resume = sxgpm_resume,
4096 /* .shutdown = slic_shutdown, MOOK_INVESTIGATE */
4099 static int __init sxg_module_init(void)
4106 return pci_register_driver(&sxg_driver);
4109 static void __exit sxg_module_cleanup(void)
4111 pci_unregister_driver(&sxg_driver);
4114 module_init(sxg_module_init);
4115 module_exit(sxg_module_cleanup);