1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2008 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/delay.h>
16 #include <linux/notifier.h>
18 #include <linux/tcp.h>
20 #include <linux/crc32.h>
21 #include <linux/ethtool.h>
22 #include <linux/topology.h>
23 #include "net_driver.h"
32 #define EFX_MAX_MTU (9 * 1024)
34 /* RX slow fill workqueue. If memory allocation fails in the fast path,
35 * a work item is pushed onto this work queue to retry the allocation later,
36 * to avoid the NIC being starved of RX buffers. Since this is a per cpu
37 * workqueue, there is nothing to be gained in making it per NIC
39 static struct workqueue_struct *refill_workqueue;
41 /* Reset workqueue. If any NIC has a hardware failure then a reset will be
42 * queued onto this work queue. This is not a per-nic work queue, because
43 * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
45 static struct workqueue_struct *reset_workqueue;
47 /**************************************************************************
51 *************************************************************************/
54 * Enable large receive offload (LRO) aka soft segment reassembly (SSR)
56 * This sets the default for new devices. It can be controlled later
59 static int lro = true;
60 module_param(lro, int, 0644);
61 MODULE_PARM_DESC(lro, "Large receive offload acceleration");
64 * Use separate channels for TX and RX events
66 * Set this to 1 to use separate channels for TX and RX. It allows us
67 * to control interrupt affinity separately for TX and RX.
69 * This is only used in MSI-X interrupt mode
71 static unsigned int separate_tx_channels;
72 module_param(separate_tx_channels, uint, 0644);
73 MODULE_PARM_DESC(separate_tx_channels,
74 "Use separate channels for TX and RX");
76 /* This is the weight assigned to each of the (per-channel) virtual
79 static int napi_weight = 64;
81 /* This is the time (in jiffies) between invocations of the hardware
82 * monitor, which checks for known hardware bugs and resets the
83 * hardware and driver as necessary.
85 unsigned int efx_monitor_interval = 1 * HZ;
87 /* This controls whether or not the driver will initialise devices
88 * with invalid MAC addresses stored in the EEPROM or flash. If true,
89 * such devices will be initialised with a random locally-generated
90 * MAC address. This allows for loading the sfc_mtd driver to
91 * reprogram the flash, even if the flash contents (including the MAC
92 * address) have previously been erased.
94 static unsigned int allow_bad_hwaddr;
96 /* Initial interrupt moderation settings. They can be modified after
97 * module load with ethtool.
99 * The default for RX should strike a balance between increasing the
100 * round-trip latency and reducing overhead.
102 static unsigned int rx_irq_mod_usec = 60;
104 /* Initial interrupt moderation settings. They can be modified after
105 * module load with ethtool.
107 * This default is chosen to ensure that a 10G link does not go idle
108 * while a TX queue is stopped after it has become full. A queue is
109 * restarted when it drops below half full. The time this takes (assuming
110 * worst case 3 descriptors per packet and 1024 descriptors) is
111 * 512 / 3 * 1.2 = 205 usec.
113 static unsigned int tx_irq_mod_usec = 150;
115 /* This is the first interrupt mode to try out of:
120 static unsigned int interrupt_mode;
122 /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
123 * i.e. the number of CPUs among which we may distribute simultaneous
124 * interrupt handling.
126 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
127 * The default (0) means to assign an interrupt to each package (level II cache)
129 static unsigned int rss_cpus;
130 module_param(rss_cpus, uint, 0444);
131 MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
133 static int phy_flash_cfg;
134 module_param(phy_flash_cfg, int, 0644);
135 MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
137 /**************************************************************************
139 * Utility functions and prototypes
141 *************************************************************************/
142 static void efx_remove_channel(struct efx_channel *channel);
143 static void efx_remove_port(struct efx_nic *efx);
144 static void efx_fini_napi(struct efx_nic *efx);
145 static void efx_fini_channels(struct efx_nic *efx);
147 #define EFX_ASSERT_RESET_SERIALISED(efx) \
149 if (efx->state == STATE_RUNNING) \
153 /**************************************************************************
155 * Event queue processing
157 *************************************************************************/
159 /* Process channel's event queue
161 * This function is responsible for processing the event queue of a
162 * single channel. The caller must guarantee that this function will
163 * never be concurrently called more than once on the same channel,
164 * though different channels may be being processed concurrently.
166 static int efx_process_channel(struct efx_channel *channel, int rx_quota)
168 struct efx_nic *efx = channel->efx;
171 if (unlikely(efx->reset_pending != RESET_TYPE_NONE ||
175 rx_packets = falcon_process_eventq(channel, rx_quota);
179 /* Deliver last RX packet. */
180 if (channel->rx_pkt) {
181 __efx_rx_packet(channel, channel->rx_pkt,
182 channel->rx_pkt_csummed);
183 channel->rx_pkt = NULL;
186 efx_flush_lro(channel);
187 efx_rx_strategy(channel);
189 efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
194 /* Mark channel as finished processing
196 * Note that since we will not receive further interrupts for this
197 * channel before we finish processing and call the eventq_read_ack()
198 * method, there is no need to use the interrupt hold-off timers.
200 static inline void efx_channel_processed(struct efx_channel *channel)
202 /* The interrupt handler for this channel may set work_pending
203 * as soon as we acknowledge the events we've seen. Make sure
204 * it's cleared before then. */
205 channel->work_pending = false;
208 falcon_eventq_read_ack(channel);
213 * NAPI guarantees serialisation of polls of the same device, which
214 * provides the guarantee required by efx_process_channel().
216 static int efx_poll(struct napi_struct *napi, int budget)
218 struct efx_channel *channel =
219 container_of(napi, struct efx_channel, napi_str);
220 struct net_device *napi_dev = channel->napi_dev;
223 EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
224 channel->channel, raw_smp_processor_id());
226 rx_packets = efx_process_channel(channel, budget);
228 if (rx_packets < budget) {
229 /* There is no race here; although napi_disable() will
230 * only wait for netif_rx_complete(), this isn't a problem
231 * since efx_channel_processed() will have no effect if
232 * interrupts have already been disabled.
234 netif_rx_complete(napi_dev, napi);
235 efx_channel_processed(channel);
241 /* Process the eventq of the specified channel immediately on this CPU
243 * Disable hardware generated interrupts, wait for any existing
244 * processing to finish, then directly poll (and ack ) the eventq.
245 * Finally reenable NAPI and interrupts.
247 * Since we are touching interrupts the caller should hold the suspend lock
249 void efx_process_channel_now(struct efx_channel *channel)
251 struct efx_nic *efx = channel->efx;
253 BUG_ON(!channel->used_flags);
254 BUG_ON(!channel->enabled);
256 /* Disable interrupts and wait for ISRs to complete */
257 falcon_disable_interrupts(efx);
259 synchronize_irq(efx->legacy_irq);
261 synchronize_irq(channel->irq);
263 /* Wait for any NAPI processing to complete */
264 napi_disable(&channel->napi_str);
266 /* Poll the channel */
267 efx_process_channel(channel, efx->type->evq_size);
269 /* Ack the eventq. This may cause an interrupt to be generated
270 * when they are reenabled */
271 efx_channel_processed(channel);
273 napi_enable(&channel->napi_str);
274 falcon_enable_interrupts(efx);
277 /* Create event queue
278 * Event queue memory allocations are done only once. If the channel
279 * is reset, the memory buffer will be reused; this guards against
280 * errors during channel reset and also simplifies interrupt handling.
282 static int efx_probe_eventq(struct efx_channel *channel)
284 EFX_LOG(channel->efx, "chan %d create event queue\n", channel->channel);
286 return falcon_probe_eventq(channel);
289 /* Prepare channel's event queue */
290 static void efx_init_eventq(struct efx_channel *channel)
292 EFX_LOG(channel->efx, "chan %d init event queue\n", channel->channel);
294 channel->eventq_read_ptr = 0;
296 falcon_init_eventq(channel);
299 static void efx_fini_eventq(struct efx_channel *channel)
301 EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel);
303 falcon_fini_eventq(channel);
306 static void efx_remove_eventq(struct efx_channel *channel)
308 EFX_LOG(channel->efx, "chan %d remove event queue\n", channel->channel);
310 falcon_remove_eventq(channel);
313 /**************************************************************************
317 *************************************************************************/
319 static int efx_probe_channel(struct efx_channel *channel)
321 struct efx_tx_queue *tx_queue;
322 struct efx_rx_queue *rx_queue;
325 EFX_LOG(channel->efx, "creating channel %d\n", channel->channel);
327 rc = efx_probe_eventq(channel);
331 efx_for_each_channel_tx_queue(tx_queue, channel) {
332 rc = efx_probe_tx_queue(tx_queue);
337 efx_for_each_channel_rx_queue(rx_queue, channel) {
338 rc = efx_probe_rx_queue(rx_queue);
343 channel->n_rx_frm_trunc = 0;
348 efx_for_each_channel_rx_queue(rx_queue, channel)
349 efx_remove_rx_queue(rx_queue);
351 efx_for_each_channel_tx_queue(tx_queue, channel)
352 efx_remove_tx_queue(tx_queue);
358 static void efx_set_channel_names(struct efx_nic *efx)
360 struct efx_channel *channel;
361 const char *type = "";
364 efx_for_each_channel(channel, efx) {
365 number = channel->channel;
366 if (efx->n_channels > efx->n_rx_queues) {
367 if (channel->channel < efx->n_rx_queues) {
371 number -= efx->n_rx_queues;
374 snprintf(channel->name, sizeof(channel->name),
375 "%s%s-%d", efx->name, type, number);
379 /* Channels are shutdown and reinitialised whilst the NIC is running
380 * to propagate configuration changes (mtu, checksum offload), or
381 * to clear hardware error conditions
383 static void efx_init_channels(struct efx_nic *efx)
385 struct efx_tx_queue *tx_queue;
386 struct efx_rx_queue *rx_queue;
387 struct efx_channel *channel;
389 /* Calculate the rx buffer allocation parameters required to
390 * support the current MTU, including padding for header
391 * alignment and overruns.
393 efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
394 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
395 efx->type->rx_buffer_padding);
396 efx->rx_buffer_order = get_order(efx->rx_buffer_len);
398 /* Initialise the channels */
399 efx_for_each_channel(channel, efx) {
400 EFX_LOG(channel->efx, "init chan %d\n", channel->channel);
402 efx_init_eventq(channel);
404 efx_for_each_channel_tx_queue(tx_queue, channel)
405 efx_init_tx_queue(tx_queue);
407 /* The rx buffer allocation strategy is MTU dependent */
408 efx_rx_strategy(channel);
410 efx_for_each_channel_rx_queue(rx_queue, channel)
411 efx_init_rx_queue(rx_queue);
413 WARN_ON(channel->rx_pkt != NULL);
414 efx_rx_strategy(channel);
418 /* This enables event queue processing and packet transmission.
420 * Note that this function is not allowed to fail, since that would
421 * introduce too much complexity into the suspend/resume path.
423 static void efx_start_channel(struct efx_channel *channel)
425 struct efx_rx_queue *rx_queue;
427 EFX_LOG(channel->efx, "starting chan %d\n", channel->channel);
429 if (!(channel->efx->net_dev->flags & IFF_UP))
430 netif_napi_add(channel->napi_dev, &channel->napi_str,
431 efx_poll, napi_weight);
433 /* The interrupt handler for this channel may set work_pending
434 * as soon as we enable it. Make sure it's cleared before
435 * then. Similarly, make sure it sees the enabled flag set. */
436 channel->work_pending = false;
437 channel->enabled = true;
440 napi_enable(&channel->napi_str);
442 /* Load up RX descriptors */
443 efx_for_each_channel_rx_queue(rx_queue, channel)
444 efx_fast_push_rx_descriptors(rx_queue);
447 /* This disables event queue processing and packet transmission.
448 * This function does not guarantee that all queue processing
449 * (e.g. RX refill) is complete.
451 static void efx_stop_channel(struct efx_channel *channel)
453 struct efx_rx_queue *rx_queue;
455 if (!channel->enabled)
458 EFX_LOG(channel->efx, "stop chan %d\n", channel->channel);
460 channel->enabled = false;
461 napi_disable(&channel->napi_str);
463 /* Ensure that any worker threads have exited or will be no-ops */
464 efx_for_each_channel_rx_queue(rx_queue, channel) {
465 spin_lock_bh(&rx_queue->add_lock);
466 spin_unlock_bh(&rx_queue->add_lock);
470 static void efx_fini_channels(struct efx_nic *efx)
472 struct efx_channel *channel;
473 struct efx_tx_queue *tx_queue;
474 struct efx_rx_queue *rx_queue;
477 EFX_ASSERT_RESET_SERIALISED(efx);
478 BUG_ON(efx->port_enabled);
480 rc = falcon_flush_queues(efx);
482 EFX_ERR(efx, "failed to flush queues\n");
484 EFX_LOG(efx, "successfully flushed all queues\n");
486 efx_for_each_channel(channel, efx) {
487 EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel);
489 efx_for_each_channel_rx_queue(rx_queue, channel)
490 efx_fini_rx_queue(rx_queue);
491 efx_for_each_channel_tx_queue(tx_queue, channel)
492 efx_fini_tx_queue(tx_queue);
493 efx_fini_eventq(channel);
497 static void efx_remove_channel(struct efx_channel *channel)
499 struct efx_tx_queue *tx_queue;
500 struct efx_rx_queue *rx_queue;
502 EFX_LOG(channel->efx, "destroy chan %d\n", channel->channel);
504 efx_for_each_channel_rx_queue(rx_queue, channel)
505 efx_remove_rx_queue(rx_queue);
506 efx_for_each_channel_tx_queue(tx_queue, channel)
507 efx_remove_tx_queue(tx_queue);
508 efx_remove_eventq(channel);
510 channel->used_flags = 0;
513 void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
515 queue_delayed_work(refill_workqueue, &rx_queue->work, delay);
518 /**************************************************************************
522 **************************************************************************/
524 /* This ensures that the kernel is kept informed (via
525 * netif_carrier_on/off) of the link status, and also maintains the
526 * link status's stop on the port's TX queue.
528 static void efx_link_status_changed(struct efx_nic *efx)
530 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
531 * that no events are triggered between unregister_netdev() and the
532 * driver unloading. A more general condition is that NETDEV_CHANGE
533 * can only be generated between NETDEV_UP and NETDEV_DOWN */
534 if (!netif_running(efx->net_dev))
537 if (efx->port_inhibited) {
538 netif_carrier_off(efx->net_dev);
542 if (efx->link_up != netif_carrier_ok(efx->net_dev)) {
543 efx->n_link_state_changes++;
546 netif_carrier_on(efx->net_dev);
548 netif_carrier_off(efx->net_dev);
551 /* Status message for kernel log */
553 EFX_INFO(efx, "link up at %uMbps %s-duplex (MTU %d)%s\n",
554 efx->link_speed, efx->link_fd ? "full" : "half",
556 (efx->promiscuous ? " [PROMISC]" : ""));
558 EFX_INFO(efx, "link down\n");
563 /* This call reinitialises the MAC to pick up new PHY settings. The
564 * caller must hold the mac_lock */
565 void __efx_reconfigure_port(struct efx_nic *efx)
567 WARN_ON(!mutex_is_locked(&efx->mac_lock));
569 EFX_LOG(efx, "reconfiguring MAC from PHY settings on CPU %d\n",
570 raw_smp_processor_id());
572 /* Serialise the promiscuous flag with efx_set_multicast_list. */
573 if (efx_dev_registered(efx)) {
574 netif_addr_lock_bh(efx->net_dev);
575 netif_addr_unlock_bh(efx->net_dev);
578 falcon_reconfigure_xmac(efx);
580 /* Inform kernel of loss/gain of carrier */
581 efx_link_status_changed(efx);
584 /* Reinitialise the MAC to pick up new PHY settings, even if the port is
586 void efx_reconfigure_port(struct efx_nic *efx)
588 EFX_ASSERT_RESET_SERIALISED(efx);
590 mutex_lock(&efx->mac_lock);
591 __efx_reconfigure_port(efx);
592 mutex_unlock(&efx->mac_lock);
595 /* Asynchronous efx_reconfigure_port work item. To speed up efx_flush_all()
596 * we don't efx_reconfigure_port() if the port is disabled. Care is taken
597 * in efx_stop_all() and efx_start_port() to prevent PHY events being lost */
598 static void efx_reconfigure_work(struct work_struct *data)
600 struct efx_nic *efx = container_of(data, struct efx_nic,
603 mutex_lock(&efx->mac_lock);
604 if (efx->port_enabled)
605 __efx_reconfigure_port(efx);
606 mutex_unlock(&efx->mac_lock);
609 static int efx_probe_port(struct efx_nic *efx)
613 EFX_LOG(efx, "create port\n");
615 /* Connect up MAC/PHY operations table and read MAC address */
616 rc = falcon_probe_port(efx);
621 efx->phy_mode = PHY_MODE_SPECIAL;
623 /* Sanity check MAC address */
624 if (is_valid_ether_addr(efx->mac_address)) {
625 memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
627 EFX_ERR(efx, "invalid MAC address %pM\n",
629 if (!allow_bad_hwaddr) {
633 random_ether_addr(efx->net_dev->dev_addr);
634 EFX_INFO(efx, "using locally-generated MAC %pM\n",
635 efx->net_dev->dev_addr);
641 efx_remove_port(efx);
645 static int efx_init_port(struct efx_nic *efx)
649 EFX_LOG(efx, "init port\n");
651 /* Initialise the MAC and PHY */
652 rc = falcon_init_xmac(efx);
656 efx->port_initialized = true;
657 efx->stats_enabled = true;
659 /* Reconfigure port to program MAC registers */
660 falcon_reconfigure_xmac(efx);
665 /* Allow efx_reconfigure_port() to be scheduled, and close the window
666 * between efx_stop_port and efx_flush_all whereby a previously scheduled
667 * efx_reconfigure_port() may have been cancelled */
668 static void efx_start_port(struct efx_nic *efx)
670 EFX_LOG(efx, "start port\n");
671 BUG_ON(efx->port_enabled);
673 mutex_lock(&efx->mac_lock);
674 efx->port_enabled = true;
675 __efx_reconfigure_port(efx);
676 mutex_unlock(&efx->mac_lock);
679 /* Prevent efx_reconfigure_work and efx_monitor() from executing, and
680 * efx_set_multicast_list() from scheduling efx_reconfigure_work.
681 * efx_reconfigure_work can still be scheduled via NAPI processing
682 * until efx_flush_all() is called */
683 static void efx_stop_port(struct efx_nic *efx)
685 EFX_LOG(efx, "stop port\n");
687 mutex_lock(&efx->mac_lock);
688 efx->port_enabled = false;
689 mutex_unlock(&efx->mac_lock);
691 /* Serialise against efx_set_multicast_list() */
692 if (efx_dev_registered(efx)) {
693 netif_addr_lock_bh(efx->net_dev);
694 netif_addr_unlock_bh(efx->net_dev);
698 static void efx_fini_port(struct efx_nic *efx)
700 EFX_LOG(efx, "shut down port\n");
702 if (!efx->port_initialized)
705 falcon_fini_xmac(efx);
706 efx->port_initialized = false;
708 efx->link_up = false;
709 efx_link_status_changed(efx);
712 static void efx_remove_port(struct efx_nic *efx)
714 EFX_LOG(efx, "destroying port\n");
716 falcon_remove_port(efx);
719 /**************************************************************************
723 **************************************************************************/
725 /* This configures the PCI device to enable I/O and DMA. */
726 static int efx_init_io(struct efx_nic *efx)
728 struct pci_dev *pci_dev = efx->pci_dev;
729 dma_addr_t dma_mask = efx->type->max_dma_mask;
732 EFX_LOG(efx, "initialising I/O\n");
734 rc = pci_enable_device(pci_dev);
736 EFX_ERR(efx, "failed to enable PCI device\n");
740 pci_set_master(pci_dev);
742 /* Set the PCI DMA mask. Try all possibilities from our
743 * genuine mask down to 32 bits, because some architectures
744 * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
745 * masks event though they reject 46 bit masks.
747 while (dma_mask > 0x7fffffffUL) {
748 if (pci_dma_supported(pci_dev, dma_mask) &&
749 ((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0))
754 EFX_ERR(efx, "could not find a suitable DMA mask\n");
757 EFX_LOG(efx, "using DMA mask %llx\n", (unsigned long long) dma_mask);
758 rc = pci_set_consistent_dma_mask(pci_dev, dma_mask);
760 /* pci_set_consistent_dma_mask() is not *allowed* to
761 * fail with a mask that pci_set_dma_mask() accepted,
762 * but just in case...
764 EFX_ERR(efx, "failed to set consistent DMA mask\n");
768 efx->membase_phys = pci_resource_start(efx->pci_dev,
770 rc = pci_request_region(pci_dev, efx->type->mem_bar, "sfc");
772 EFX_ERR(efx, "request for memory BAR failed\n");
776 efx->membase = ioremap_nocache(efx->membase_phys,
777 efx->type->mem_map_size);
779 EFX_ERR(efx, "could not map memory BAR %d at %llx+%x\n",
781 (unsigned long long)efx->membase_phys,
782 efx->type->mem_map_size);
786 EFX_LOG(efx, "memory BAR %u at %llx+%x (virtual %p)\n",
787 efx->type->mem_bar, (unsigned long long)efx->membase_phys,
788 efx->type->mem_map_size, efx->membase);
793 pci_release_region(efx->pci_dev, efx->type->mem_bar);
795 efx->membase_phys = 0;
797 pci_disable_device(efx->pci_dev);
802 static void efx_fini_io(struct efx_nic *efx)
804 EFX_LOG(efx, "shutting down I/O\n");
807 iounmap(efx->membase);
811 if (efx->membase_phys) {
812 pci_release_region(efx->pci_dev, efx->type->mem_bar);
813 efx->membase_phys = 0;
816 pci_disable_device(efx->pci_dev);
819 /* Get number of RX queues wanted. Return number of online CPU
820 * packages in the expectation that an IRQ balancer will spread
821 * interrupts across them. */
822 static int efx_wanted_rx_queues(void)
828 cpus_clear(core_mask);
830 for_each_online_cpu(cpu) {
831 if (!cpu_isset(cpu, core_mask)) {
833 cpus_or(core_mask, core_mask,
834 topology_core_siblings(cpu));
841 /* Probe the number and type of interrupts we are able to obtain, and
842 * the resulting numbers of channels and RX queues.
844 static void efx_probe_interrupts(struct efx_nic *efx)
847 min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
850 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
851 struct msix_entry xentries[EFX_MAX_CHANNELS];
855 /* We want one RX queue and interrupt per CPU package
856 * (or as specified by the rss_cpus module parameter).
857 * We will need one channel per interrupt.
859 rx_queues = rss_cpus ? rss_cpus : efx_wanted_rx_queues();
860 wanted_ints = rx_queues + (separate_tx_channels ? 1 : 0);
861 wanted_ints = min(wanted_ints, max_channels);
863 for (i = 0; i < wanted_ints; i++)
864 xentries[i].entry = i;
865 rc = pci_enable_msix(efx->pci_dev, xentries, wanted_ints);
867 EFX_ERR(efx, "WARNING: Insufficient MSI-X vectors"
868 " available (%d < %d).\n", rc, wanted_ints);
869 EFX_ERR(efx, "WARNING: Performance may be reduced.\n");
870 EFX_BUG_ON_PARANOID(rc >= wanted_ints);
872 rc = pci_enable_msix(efx->pci_dev, xentries,
877 efx->n_rx_queues = min(rx_queues, wanted_ints);
878 efx->n_channels = wanted_ints;
879 for (i = 0; i < wanted_ints; i++)
880 efx->channel[i].irq = xentries[i].vector;
882 /* Fall back to single channel MSI */
883 efx->interrupt_mode = EFX_INT_MODE_MSI;
884 EFX_ERR(efx, "could not enable MSI-X\n");
888 /* Try single interrupt MSI */
889 if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
890 efx->n_rx_queues = 1;
892 rc = pci_enable_msi(efx->pci_dev);
894 efx->channel[0].irq = efx->pci_dev->irq;
896 EFX_ERR(efx, "could not enable MSI\n");
897 efx->interrupt_mode = EFX_INT_MODE_LEGACY;
901 /* Assume legacy interrupts */
902 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
903 efx->n_rx_queues = 1;
904 efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
905 efx->legacy_irq = efx->pci_dev->irq;
909 static void efx_remove_interrupts(struct efx_nic *efx)
911 struct efx_channel *channel;
913 /* Remove MSI/MSI-X interrupts */
914 efx_for_each_channel(channel, efx)
916 pci_disable_msi(efx->pci_dev);
917 pci_disable_msix(efx->pci_dev);
919 /* Remove legacy interrupt */
923 static void efx_set_channels(struct efx_nic *efx)
925 struct efx_tx_queue *tx_queue;
926 struct efx_rx_queue *rx_queue;
928 efx_for_each_tx_queue(tx_queue, efx) {
929 if (separate_tx_channels)
930 tx_queue->channel = &efx->channel[efx->n_channels-1];
932 tx_queue->channel = &efx->channel[0];
933 tx_queue->channel->used_flags |= EFX_USED_BY_TX;
936 efx_for_each_rx_queue(rx_queue, efx) {
937 rx_queue->channel = &efx->channel[rx_queue->queue];
938 rx_queue->channel->used_flags |= EFX_USED_BY_RX;
942 static int efx_probe_nic(struct efx_nic *efx)
946 EFX_LOG(efx, "creating NIC\n");
948 /* Carry out hardware-type specific initialisation */
949 rc = falcon_probe_nic(efx);
953 /* Determine the number of channels and RX queues by trying to hook
954 * in MSI-X interrupts. */
955 efx_probe_interrupts(efx);
957 efx_set_channels(efx);
959 /* Initialise the interrupt moderation settings */
960 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec);
965 static void efx_remove_nic(struct efx_nic *efx)
967 EFX_LOG(efx, "destroying NIC\n");
969 efx_remove_interrupts(efx);
970 falcon_remove_nic(efx);
973 /**************************************************************************
975 * NIC startup/shutdown
977 *************************************************************************/
979 static int efx_probe_all(struct efx_nic *efx)
981 struct efx_channel *channel;
985 rc = efx_probe_nic(efx);
987 EFX_ERR(efx, "failed to create NIC\n");
992 rc = efx_probe_port(efx);
994 EFX_ERR(efx, "failed to create port\n");
998 /* Create channels */
999 efx_for_each_channel(channel, efx) {
1000 rc = efx_probe_channel(channel);
1002 EFX_ERR(efx, "failed to create channel %d\n",
1007 efx_set_channel_names(efx);
1012 efx_for_each_channel(channel, efx)
1013 efx_remove_channel(channel);
1014 efx_remove_port(efx);
1016 efx_remove_nic(efx);
1021 /* Called after previous invocation(s) of efx_stop_all, restarts the
1022 * port, kernel transmit queue, NAPI processing and hardware interrupts,
1023 * and ensures that the port is scheduled to be reconfigured.
1024 * This function is safe to call multiple times when the NIC is in any
1026 static void efx_start_all(struct efx_nic *efx)
1028 struct efx_channel *channel;
1030 EFX_ASSERT_RESET_SERIALISED(efx);
1032 /* Check that it is appropriate to restart the interface. All
1033 * of these flags are safe to read under just the rtnl lock */
1034 if (efx->port_enabled)
1036 if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
1038 if (efx_dev_registered(efx) && !netif_running(efx->net_dev))
1041 /* Mark the port as enabled so port reconfigurations can start, then
1042 * restart the transmit interface early so the watchdog timer stops */
1043 efx_start_port(efx);
1044 if (efx_dev_registered(efx))
1045 efx_wake_queue(efx);
1047 efx_for_each_channel(channel, efx)
1048 efx_start_channel(channel);
1050 falcon_enable_interrupts(efx);
1052 /* Start hardware monitor if we're in RUNNING */
1053 if (efx->state == STATE_RUNNING)
1054 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1055 efx_monitor_interval);
1058 /* Flush all delayed work. Should only be called when no more delayed work
1059 * will be scheduled. This doesn't flush pending online resets (efx_reset),
1060 * since we're holding the rtnl_lock at this point. */
1061 static void efx_flush_all(struct efx_nic *efx)
1063 struct efx_rx_queue *rx_queue;
1065 /* Make sure the hardware monitor is stopped */
1066 cancel_delayed_work_sync(&efx->monitor_work);
1068 /* Ensure that all RX slow refills are complete. */
1069 efx_for_each_rx_queue(rx_queue, efx)
1070 cancel_delayed_work_sync(&rx_queue->work);
1072 /* Stop scheduled port reconfigurations */
1073 cancel_work_sync(&efx->reconfigure_work);
1077 /* Quiesce hardware and software without bringing the link down.
1078 * Safe to call multiple times, when the nic and interface is in any
1079 * state. The caller is guaranteed to subsequently be in a position
1080 * to modify any hardware and software state they see fit without
1082 static void efx_stop_all(struct efx_nic *efx)
1084 struct efx_channel *channel;
1086 EFX_ASSERT_RESET_SERIALISED(efx);
1088 /* port_enabled can be read safely under the rtnl lock */
1089 if (!efx->port_enabled)
1092 /* Disable interrupts and wait for ISR to complete */
1093 falcon_disable_interrupts(efx);
1094 if (efx->legacy_irq)
1095 synchronize_irq(efx->legacy_irq);
1096 efx_for_each_channel(channel, efx) {
1098 synchronize_irq(channel->irq);
1101 /* Stop all NAPI processing and synchronous rx refills */
1102 efx_for_each_channel(channel, efx)
1103 efx_stop_channel(channel);
1105 /* Stop all asynchronous port reconfigurations. Since all
1106 * event processing has already been stopped, there is no
1107 * window to loose phy events */
1110 /* Flush reconfigure_work, refill_workqueue, monitor_work */
1113 /* Isolate the MAC from the TX and RX engines, so that queue
1114 * flushes will complete in a timely fashion. */
1115 falcon_drain_tx_fifo(efx);
1117 /* Stop the kernel transmit interface late, so the watchdog
1118 * timer isn't ticking over the flush */
1119 if (efx_dev_registered(efx)) {
1120 efx_stop_queue(efx);
1121 netif_tx_lock_bh(efx->net_dev);
1122 netif_tx_unlock_bh(efx->net_dev);
1126 static void efx_remove_all(struct efx_nic *efx)
1128 struct efx_channel *channel;
1130 efx_for_each_channel(channel, efx)
1131 efx_remove_channel(channel);
1132 efx_remove_port(efx);
1133 efx_remove_nic(efx);
1136 /* A convinience function to safely flush all the queues */
1137 void efx_flush_queues(struct efx_nic *efx)
1139 EFX_ASSERT_RESET_SERIALISED(efx);
1143 efx_fini_channels(efx);
1144 efx_init_channels(efx);
1149 /**************************************************************************
1151 * Interrupt moderation
1153 **************************************************************************/
1155 /* Set interrupt moderation parameters */
1156 void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs)
1158 struct efx_tx_queue *tx_queue;
1159 struct efx_rx_queue *rx_queue;
1161 EFX_ASSERT_RESET_SERIALISED(efx);
1163 efx_for_each_tx_queue(tx_queue, efx)
1164 tx_queue->channel->irq_moderation = tx_usecs;
1166 efx_for_each_rx_queue(rx_queue, efx)
1167 rx_queue->channel->irq_moderation = rx_usecs;
1170 /**************************************************************************
1174 **************************************************************************/
1176 /* Run periodically off the general workqueue. Serialised against
1177 * efx_reconfigure_port via the mac_lock */
1178 static void efx_monitor(struct work_struct *data)
1180 struct efx_nic *efx = container_of(data, struct efx_nic,
1184 EFX_TRACE(efx, "hardware monitor executing on CPU %d\n",
1185 raw_smp_processor_id());
1188 /* If the mac_lock is already held then it is likely a port
1189 * reconfiguration is already in place, which will likely do
1190 * most of the work of check_hw() anyway. */
1191 if (!mutex_trylock(&efx->mac_lock)) {
1192 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1193 efx_monitor_interval);
1197 if (efx->port_enabled)
1198 rc = falcon_check_xmac(efx);
1199 mutex_unlock(&efx->mac_lock);
1201 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1202 efx_monitor_interval);
1205 /**************************************************************************
1209 *************************************************************************/
1212 * Context: process, rtnl_lock() held.
1214 static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1216 struct efx_nic *efx = netdev_priv(net_dev);
1218 EFX_ASSERT_RESET_SERIALISED(efx);
1220 return generic_mii_ioctl(&efx->mii, if_mii(ifr), cmd, NULL);
1223 /**************************************************************************
1227 **************************************************************************/
1229 static int efx_init_napi(struct efx_nic *efx)
1231 struct efx_channel *channel;
1234 efx_for_each_channel(channel, efx) {
1235 channel->napi_dev = efx->net_dev;
1236 rc = efx_lro_init(&channel->lro_mgr, efx);
1246 static void efx_fini_napi(struct efx_nic *efx)
1248 struct efx_channel *channel;
1250 efx_for_each_channel(channel, efx) {
1251 efx_lro_fini(&channel->lro_mgr);
1252 channel->napi_dev = NULL;
1256 /**************************************************************************
1258 * Kernel netpoll interface
1260 *************************************************************************/
1262 #ifdef CONFIG_NET_POLL_CONTROLLER
1264 /* Although in the common case interrupts will be disabled, this is not
1265 * guaranteed. However, all our work happens inside the NAPI callback,
1266 * so no locking is required.
1268 static void efx_netpoll(struct net_device *net_dev)
1270 struct efx_nic *efx = netdev_priv(net_dev);
1271 struct efx_channel *channel;
1273 efx_for_each_channel(channel, efx)
1274 efx_schedule_channel(channel);
1279 /**************************************************************************
1281 * Kernel net device interface
1283 *************************************************************************/
1285 /* Context: process, rtnl_lock() held. */
1286 static int efx_net_open(struct net_device *net_dev)
1288 struct efx_nic *efx = netdev_priv(net_dev);
1289 EFX_ASSERT_RESET_SERIALISED(efx);
1291 EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name,
1292 raw_smp_processor_id());
1294 if (efx->phy_mode & PHY_MODE_SPECIAL)
1301 /* Context: process, rtnl_lock() held.
1302 * Note that the kernel will ignore our return code; this method
1303 * should really be a void.
1305 static int efx_net_stop(struct net_device *net_dev)
1307 struct efx_nic *efx = netdev_priv(net_dev);
1309 EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name,
1310 raw_smp_processor_id());
1312 /* Stop the device and flush all the channels */
1314 efx_fini_channels(efx);
1315 efx_init_channels(efx);
1320 /* Context: process, dev_base_lock or RTNL held, non-blocking. */
1321 static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
1323 struct efx_nic *efx = netdev_priv(net_dev);
1324 struct efx_mac_stats *mac_stats = &efx->mac_stats;
1325 struct net_device_stats *stats = &net_dev->stats;
1327 /* Update stats if possible, but do not wait if another thread
1328 * is updating them (or resetting the NIC); slightly stale
1329 * stats are acceptable.
1331 if (!spin_trylock(&efx->stats_lock))
1333 if (efx->stats_enabled) {
1334 falcon_update_stats_xmac(efx);
1335 falcon_update_nic_stats(efx);
1337 spin_unlock(&efx->stats_lock);
1339 stats->rx_packets = mac_stats->rx_packets;
1340 stats->tx_packets = mac_stats->tx_packets;
1341 stats->rx_bytes = mac_stats->rx_bytes;
1342 stats->tx_bytes = mac_stats->tx_bytes;
1343 stats->multicast = mac_stats->rx_multicast;
1344 stats->collisions = mac_stats->tx_collision;
1345 stats->rx_length_errors = (mac_stats->rx_gtjumbo +
1346 mac_stats->rx_length_error);
1347 stats->rx_over_errors = efx->n_rx_nodesc_drop_cnt;
1348 stats->rx_crc_errors = mac_stats->rx_bad;
1349 stats->rx_frame_errors = mac_stats->rx_align_error;
1350 stats->rx_fifo_errors = mac_stats->rx_overflow;
1351 stats->rx_missed_errors = mac_stats->rx_missed;
1352 stats->tx_window_errors = mac_stats->tx_late_collision;
1354 stats->rx_errors = (stats->rx_length_errors +
1355 stats->rx_over_errors +
1356 stats->rx_crc_errors +
1357 stats->rx_frame_errors +
1358 stats->rx_fifo_errors +
1359 stats->rx_missed_errors +
1360 mac_stats->rx_symbol_error);
1361 stats->tx_errors = (stats->tx_window_errors +
1367 /* Context: netif_tx_lock held, BHs disabled. */
1368 static void efx_watchdog(struct net_device *net_dev)
1370 struct efx_nic *efx = netdev_priv(net_dev);
1372 EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d:"
1373 " resetting channels\n",
1374 atomic_read(&efx->netif_stop_count), efx->port_enabled);
1376 efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
1380 /* Context: process, rtnl_lock() held. */
1381 static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1383 struct efx_nic *efx = netdev_priv(net_dev);
1386 EFX_ASSERT_RESET_SERIALISED(efx);
1388 if (new_mtu > EFX_MAX_MTU)
1393 EFX_LOG(efx, "changing MTU to %d\n", new_mtu);
1395 efx_fini_channels(efx);
1396 net_dev->mtu = new_mtu;
1397 efx_init_channels(efx);
1403 static int efx_set_mac_address(struct net_device *net_dev, void *data)
1405 struct efx_nic *efx = netdev_priv(net_dev);
1406 struct sockaddr *addr = data;
1407 char *new_addr = addr->sa_data;
1409 EFX_ASSERT_RESET_SERIALISED(efx);
1411 if (!is_valid_ether_addr(new_addr)) {
1412 EFX_ERR(efx, "invalid ethernet MAC address requested: %pM\n",
1417 memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
1419 /* Reconfigure the MAC */
1420 efx_reconfigure_port(efx);
1425 /* Context: netif_addr_lock held, BHs disabled. */
1426 static void efx_set_multicast_list(struct net_device *net_dev)
1428 struct efx_nic *efx = netdev_priv(net_dev);
1429 struct dev_mc_list *mc_list = net_dev->mc_list;
1430 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
1431 bool promiscuous = !!(net_dev->flags & IFF_PROMISC);
1432 bool changed = (efx->promiscuous != promiscuous);
1437 efx->promiscuous = promiscuous;
1439 /* Build multicast hash table */
1440 if (promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
1441 memset(mc_hash, 0xff, sizeof(*mc_hash));
1443 memset(mc_hash, 0x00, sizeof(*mc_hash));
1444 for (i = 0; i < net_dev->mc_count; i++) {
1445 crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr);
1446 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
1447 set_bit_le(bit, mc_hash->byte);
1448 mc_list = mc_list->next;
1452 if (!efx->port_enabled)
1453 /* Delay pushing settings until efx_start_port() */
1457 queue_work(efx->workqueue, &efx->reconfigure_work);
1459 /* Create and activate new global multicast hash table */
1460 falcon_set_multicast_hash(efx);
1463 static const struct net_device_ops efx_netdev_ops = {
1464 .ndo_open = efx_net_open,
1465 .ndo_stop = efx_net_stop,
1466 .ndo_get_stats = efx_net_stats,
1467 .ndo_tx_timeout = efx_watchdog,
1468 .ndo_start_xmit = efx_hard_start_xmit,
1469 .ndo_validate_addr = eth_validate_addr,
1470 .ndo_do_ioctl = efx_ioctl,
1471 .ndo_change_mtu = efx_change_mtu,
1472 .ndo_set_mac_address = efx_set_mac_address,
1473 .ndo_set_multicast_list = efx_set_multicast_list,
1474 #ifdef CONFIG_NET_POLL_CONTROLLER
1475 .ndo_poll_controller = efx_netpoll,
1479 static int efx_netdev_event(struct notifier_block *this,
1480 unsigned long event, void *ptr)
1482 struct net_device *net_dev = ptr;
1484 if (net_dev->netdev_ops == &efx_netdev_ops && event == NETDEV_CHANGENAME) {
1485 struct efx_nic *efx = netdev_priv(net_dev);
1487 strcpy(efx->name, net_dev->name);
1488 efx_mtd_rename(efx);
1489 efx_set_channel_names(efx);
1495 static struct notifier_block efx_netdev_notifier = {
1496 .notifier_call = efx_netdev_event,
1500 show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
1502 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
1503 return sprintf(buf, "%d\n", efx->phy_type);
1505 static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL);
1507 static int efx_register_netdev(struct efx_nic *efx)
1509 struct net_device *net_dev = efx->net_dev;
1512 net_dev->watchdog_timeo = 5 * HZ;
1513 net_dev->irq = efx->pci_dev->irq;
1514 net_dev->netdev_ops = &efx_netdev_ops;
1515 SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
1516 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
1518 /* Always start with carrier off; PHY events will detect the link */
1519 netif_carrier_off(efx->net_dev);
1521 /* Clear MAC statistics */
1522 falcon_update_stats_xmac(efx);
1523 memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
1525 rc = register_netdev(net_dev);
1527 EFX_ERR(efx, "could not register net dev\n");
1530 strcpy(efx->name, net_dev->name);
1531 efx_set_channel_names(efx);
1533 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
1535 EFX_ERR(efx, "failed to init net dev attributes\n");
1536 goto fail_registered;
1542 unregister_netdev(net_dev);
1546 static void efx_unregister_netdev(struct efx_nic *efx)
1548 struct efx_tx_queue *tx_queue;
1553 BUG_ON(netdev_priv(efx->net_dev) != efx);
1555 /* Free up any skbs still remaining. This has to happen before
1556 * we try to unregister the netdev as running their destructors
1557 * may be needed to get the device ref. count to 0. */
1558 efx_for_each_tx_queue(tx_queue, efx)
1559 efx_release_tx_buffers(tx_queue);
1561 if (efx_dev_registered(efx)) {
1562 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
1563 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
1564 unregister_netdev(efx->net_dev);
1568 /**************************************************************************
1570 * Device reset and suspend
1572 **************************************************************************/
1574 /* Tears down the entire software state and most of the hardware state
1576 void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
1580 EFX_ASSERT_RESET_SERIALISED(efx);
1582 /* The net_dev->get_stats handler is quite slow, and will fail
1583 * if a fetch is pending over reset. Serialise against it. */
1584 spin_lock(&efx->stats_lock);
1585 efx->stats_enabled = false;
1586 spin_unlock(&efx->stats_lock);
1589 mutex_lock(&efx->mac_lock);
1590 mutex_lock(&efx->spi_lock);
1592 rc = falcon_xmac_get_settings(efx, ecmd);
1594 EFX_ERR(efx, "could not back up PHY settings\n");
1596 efx_fini_channels(efx);
1599 /* This function will always ensure that the locks acquired in
1600 * efx_reset_down() are released. A failure return code indicates
1601 * that we were unable to reinitialise the hardware, and the
1602 * driver should be disabled. If ok is false, then the rx and tx
1603 * engines are not restarted, pending a RESET_DISABLE. */
1604 int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok)
1608 EFX_ASSERT_RESET_SERIALISED(efx);
1610 rc = falcon_init_nic(efx);
1612 EFX_ERR(efx, "failed to initialise NIC\n");
1617 efx_init_channels(efx);
1619 if (falcon_xmac_set_settings(efx, ecmd))
1620 EFX_ERR(efx, "could not restore PHY settings\n");
1623 mutex_unlock(&efx->spi_lock);
1624 mutex_unlock(&efx->mac_lock);
1628 efx->stats_enabled = true;
1633 /* Reset the NIC as transparently as possible. Do not reset the PHY
1634 * Note that the reset may fail, in which case the card will be left
1635 * in a most-probably-unusable state.
1637 * This function will sleep. You cannot reset from within an atomic
1638 * state; use efx_schedule_reset() instead.
1640 * Grabs the rtnl_lock.
1642 static int efx_reset(struct efx_nic *efx)
1644 struct ethtool_cmd ecmd;
1645 enum reset_type method = efx->reset_pending;
1648 /* Serialise with kernel interfaces */
1651 /* If we're not RUNNING then don't reset. Leave the reset_pending
1652 * flag set so that efx_pci_probe_main will be retried */
1653 if (efx->state != STATE_RUNNING) {
1654 EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n");
1658 EFX_INFO(efx, "resetting (%d)\n", method);
1660 efx_reset_down(efx, &ecmd);
1662 rc = falcon_reset_hw(efx, method);
1664 EFX_ERR(efx, "failed to reset hardware\n");
1668 /* Allow resets to be rescheduled. */
1669 efx->reset_pending = RESET_TYPE_NONE;
1671 /* Reinitialise bus-mastering, which may have been turned off before
1672 * the reset was scheduled. This is still appropriate, even in the
1673 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
1674 * can respond to requests. */
1675 pci_set_master(efx->pci_dev);
1677 /* Leave device stopped if necessary */
1678 if (method == RESET_TYPE_DISABLE) {
1683 rc = efx_reset_up(efx, &ecmd, true);
1687 EFX_LOG(efx, "reset complete\n");
1693 efx_reset_up(efx, &ecmd, false);
1695 EFX_ERR(efx, "has been disabled\n");
1696 efx->state = STATE_DISABLED;
1699 efx_unregister_netdev(efx);
1704 /* The worker thread exists so that code that cannot sleep can
1705 * schedule a reset for later.
1707 static void efx_reset_work(struct work_struct *data)
1709 struct efx_nic *nic = container_of(data, struct efx_nic, reset_work);
1714 void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
1716 enum reset_type method;
1718 if (efx->reset_pending != RESET_TYPE_NONE) {
1719 EFX_INFO(efx, "quenching already scheduled reset\n");
1724 case RESET_TYPE_INVISIBLE:
1725 case RESET_TYPE_ALL:
1726 case RESET_TYPE_WORLD:
1727 case RESET_TYPE_DISABLE:
1730 case RESET_TYPE_RX_RECOVERY:
1731 case RESET_TYPE_RX_DESC_FETCH:
1732 case RESET_TYPE_TX_DESC_FETCH:
1733 case RESET_TYPE_TX_SKIP:
1734 method = RESET_TYPE_INVISIBLE;
1737 method = RESET_TYPE_ALL;
1742 EFX_LOG(efx, "scheduling reset (%d:%d)\n", type, method);
1744 EFX_LOG(efx, "scheduling reset (%d)\n", method);
1746 efx->reset_pending = method;
1748 queue_work(reset_workqueue, &efx->reset_work);
1751 /**************************************************************************
1753 * List of NICs we support
1755 **************************************************************************/
1757 /* PCI device ID table */
1758 static struct pci_device_id efx_pci_table[] __devinitdata = {
1759 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID),
1760 .driver_data = (unsigned long) &falcon_a_nic_type},
1761 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID),
1762 .driver_data = (unsigned long) &falcon_b_nic_type},
1763 {0} /* end of list */
1766 /**************************************************************************
1768 * Dummy PHY/MAC/Board operations
1770 * Can be used for some unimplemented operations
1771 * Needed so all function pointers are valid and do not have to be tested
1774 **************************************************************************/
1775 int efx_port_dummy_op_int(struct efx_nic *efx)
1779 void efx_port_dummy_op_void(struct efx_nic *efx) {}
1780 void efx_port_dummy_op_blink(struct efx_nic *efx, bool blink) {}
1782 static struct efx_phy_operations efx_dummy_phy_operations = {
1783 .init = efx_port_dummy_op_int,
1784 .reconfigure = efx_port_dummy_op_void,
1785 .check_hw = efx_port_dummy_op_int,
1786 .fini = efx_port_dummy_op_void,
1787 .clear_interrupt = efx_port_dummy_op_void,
1790 static struct efx_board efx_dummy_board_info = {
1791 .init = efx_port_dummy_op_int,
1792 .init_leds = efx_port_dummy_op_int,
1793 .set_fault_led = efx_port_dummy_op_blink,
1794 .monitor = efx_port_dummy_op_int,
1795 .blink = efx_port_dummy_op_blink,
1796 .fini = efx_port_dummy_op_void,
1799 /**************************************************************************
1803 **************************************************************************/
1805 /* This zeroes out and then fills in the invariants in a struct
1806 * efx_nic (including all sub-structures).
1808 static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
1809 struct pci_dev *pci_dev, struct net_device *net_dev)
1811 struct efx_channel *channel;
1812 struct efx_tx_queue *tx_queue;
1813 struct efx_rx_queue *rx_queue;
1816 /* Initialise common structures */
1817 memset(efx, 0, sizeof(*efx));
1818 spin_lock_init(&efx->biu_lock);
1819 spin_lock_init(&efx->phy_lock);
1820 mutex_init(&efx->spi_lock);
1821 INIT_WORK(&efx->reset_work, efx_reset_work);
1822 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
1823 efx->pci_dev = pci_dev;
1824 efx->state = STATE_INIT;
1825 efx->reset_pending = RESET_TYPE_NONE;
1826 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
1827 efx->board_info = efx_dummy_board_info;
1829 efx->net_dev = net_dev;
1830 efx->rx_checksum_enabled = true;
1831 spin_lock_init(&efx->netif_stop_lock);
1832 spin_lock_init(&efx->stats_lock);
1833 mutex_init(&efx->mac_lock);
1834 efx->phy_op = &efx_dummy_phy_operations;
1835 efx->mii.dev = net_dev;
1836 INIT_WORK(&efx->reconfigure_work, efx_reconfigure_work);
1837 atomic_set(&efx->netif_stop_count, 1);
1839 for (i = 0; i < EFX_MAX_CHANNELS; i++) {
1840 channel = &efx->channel[i];
1842 channel->channel = i;
1843 channel->work_pending = false;
1845 for (i = 0; i < EFX_TX_QUEUE_COUNT; i++) {
1846 tx_queue = &efx->tx_queue[i];
1847 tx_queue->efx = efx;
1848 tx_queue->queue = i;
1849 tx_queue->buffer = NULL;
1850 tx_queue->channel = &efx->channel[0]; /* for safety */
1851 tx_queue->tso_headers_free = NULL;
1853 for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
1854 rx_queue = &efx->rx_queue[i];
1855 rx_queue->efx = efx;
1856 rx_queue->queue = i;
1857 rx_queue->channel = &efx->channel[0]; /* for safety */
1858 rx_queue->buffer = NULL;
1859 spin_lock_init(&rx_queue->add_lock);
1860 INIT_DELAYED_WORK(&rx_queue->work, efx_rx_work);
1865 /* Sanity-check NIC type */
1866 EFX_BUG_ON_PARANOID(efx->type->txd_ring_mask &
1867 (efx->type->txd_ring_mask + 1));
1868 EFX_BUG_ON_PARANOID(efx->type->rxd_ring_mask &
1869 (efx->type->rxd_ring_mask + 1));
1870 EFX_BUG_ON_PARANOID(efx->type->evq_size &
1871 (efx->type->evq_size - 1));
1872 /* As close as we can get to guaranteeing that we don't overflow */
1873 EFX_BUG_ON_PARANOID(efx->type->evq_size <
1874 (efx->type->txd_ring_mask + 1 +
1875 efx->type->rxd_ring_mask + 1));
1876 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
1878 /* Higher numbered interrupt modes are less capable! */
1879 efx->interrupt_mode = max(efx->type->max_interrupt_mode,
1882 efx->workqueue = create_singlethread_workqueue("sfc_work");
1883 if (!efx->workqueue)
1889 static void efx_fini_struct(struct efx_nic *efx)
1891 if (efx->workqueue) {
1892 destroy_workqueue(efx->workqueue);
1893 efx->workqueue = NULL;
1897 /**************************************************************************
1901 **************************************************************************/
1903 /* Main body of final NIC shutdown code
1904 * This is called only at module unload (or hotplug removal).
1906 static void efx_pci_remove_main(struct efx_nic *efx)
1908 EFX_ASSERT_RESET_SERIALISED(efx);
1910 /* Skip everything if we never obtained a valid membase */
1914 efx_fini_channels(efx);
1917 /* Shutdown the board, then the NIC and board state */
1918 efx->board_info.fini(efx);
1919 falcon_fini_interrupt(efx);
1922 efx_remove_all(efx);
1925 /* Final NIC shutdown
1926 * This is called only at module unload (or hotplug removal).
1928 static void efx_pci_remove(struct pci_dev *pci_dev)
1930 struct efx_nic *efx;
1932 efx = pci_get_drvdata(pci_dev);
1936 efx_mtd_remove(efx);
1938 /* Mark the NIC as fini, then stop the interface */
1940 efx->state = STATE_FINI;
1941 dev_close(efx->net_dev);
1943 /* Allow any queued efx_resets() to complete */
1946 if (efx->membase == NULL)
1949 efx_unregister_netdev(efx);
1951 /* Wait for any scheduled resets to complete. No more will be
1952 * scheduled from this point because efx_stop_all() has been
1953 * called, we are no longer registered with driverlink, and
1954 * the net_device's have been removed. */
1955 cancel_work_sync(&efx->reset_work);
1957 efx_pci_remove_main(efx);
1961 EFX_LOG(efx, "shutdown successful\n");
1963 pci_set_drvdata(pci_dev, NULL);
1964 efx_fini_struct(efx);
1965 free_netdev(efx->net_dev);
1968 /* Main body of NIC initialisation
1969 * This is called at module load (or hotplug insertion, theoretically).
1971 static int efx_pci_probe_main(struct efx_nic *efx)
1975 /* Do start-of-day initialisation */
1976 rc = efx_probe_all(efx);
1980 rc = efx_init_napi(efx);
1984 /* Initialise the board */
1985 rc = efx->board_info.init(efx);
1987 EFX_ERR(efx, "failed to initialise board\n");
1991 rc = falcon_init_nic(efx);
1993 EFX_ERR(efx, "failed to initialise NIC\n");
1997 rc = efx_init_port(efx);
1999 EFX_ERR(efx, "failed to initialise port\n");
2003 efx_init_channels(efx);
2005 rc = falcon_init_interrupt(efx);
2012 efx_fini_channels(efx);
2016 efx->board_info.fini(efx);
2020 efx_remove_all(efx);
2025 /* NIC initialisation
2027 * This is called at module load (or hotplug insertion,
2028 * theoretically). It sets up PCI mappings, tests and resets the NIC,
2029 * sets up and registers the network devices with the kernel and hooks
2030 * the interrupt service routine. It does not prepare the device for
2031 * transmission; this is left to the first time one of the network
2032 * interfaces is brought up (i.e. efx_net_open).
2034 static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2035 const struct pci_device_id *entry)
2037 struct efx_nic_type *type = (struct efx_nic_type *) entry->driver_data;
2038 struct net_device *net_dev;
2039 struct efx_nic *efx;
2042 /* Allocate and initialise a struct net_device and struct efx_nic */
2043 net_dev = alloc_etherdev(sizeof(*efx));
2046 net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG |
2047 NETIF_F_HIGHDMA | NETIF_F_TSO);
2049 net_dev->features |= NETIF_F_LRO;
2050 /* Mask for features that also apply to VLAN devices */
2051 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
2052 NETIF_F_HIGHDMA | NETIF_F_TSO);
2053 efx = netdev_priv(net_dev);
2054 pci_set_drvdata(pci_dev, efx);
2055 rc = efx_init_struct(efx, type, pci_dev, net_dev);
2059 EFX_INFO(efx, "Solarflare Communications NIC detected\n");
2061 /* Set up basic I/O (BAR mappings etc) */
2062 rc = efx_init_io(efx);
2066 /* No serialisation is required with the reset path because
2067 * we're in STATE_INIT. */
2068 for (i = 0; i < 5; i++) {
2069 rc = efx_pci_probe_main(efx);
2073 /* Serialise against efx_reset(). No more resets will be
2074 * scheduled since efx_stop_all() has been called, and we
2075 * have not and never have been registered with either
2076 * the rtnetlink or driverlink layers. */
2077 cancel_work_sync(&efx->reset_work);
2079 /* Retry if a recoverably reset event has been scheduled */
2080 if ((efx->reset_pending != RESET_TYPE_INVISIBLE) &&
2081 (efx->reset_pending != RESET_TYPE_ALL))
2084 efx->reset_pending = RESET_TYPE_NONE;
2088 EFX_ERR(efx, "Could not reset NIC\n");
2092 /* Switch to the running state before we expose the device to
2093 * the OS. This is to ensure that the initial gathering of
2094 * MAC stats succeeds. */
2096 efx->state = STATE_RUNNING;
2099 rc = efx_register_netdev(efx);
2103 EFX_LOG(efx, "initialisation successful\n");
2105 efx_mtd_probe(efx); /* allowed to fail */
2109 efx_pci_remove_main(efx);
2114 efx_fini_struct(efx);
2116 EFX_LOG(efx, "initialisation failed. rc=%d\n", rc);
2117 free_netdev(net_dev);
2121 static struct pci_driver efx_pci_driver = {
2122 .name = EFX_DRIVER_NAME,
2123 .id_table = efx_pci_table,
2124 .probe = efx_pci_probe,
2125 .remove = efx_pci_remove,
2128 /**************************************************************************
2130 * Kernel module interface
2132 *************************************************************************/
2134 module_param(interrupt_mode, uint, 0444);
2135 MODULE_PARM_DESC(interrupt_mode,
2136 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
2138 static int __init efx_init_module(void)
2142 printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
2144 rc = register_netdevice_notifier(&efx_netdev_notifier);
2148 refill_workqueue = create_workqueue("sfc_refill");
2149 if (!refill_workqueue) {
2153 reset_workqueue = create_singlethread_workqueue("sfc_reset");
2154 if (!reset_workqueue) {
2159 rc = pci_register_driver(&efx_pci_driver);
2166 destroy_workqueue(reset_workqueue);
2168 destroy_workqueue(refill_workqueue);
2170 unregister_netdevice_notifier(&efx_netdev_notifier);
2175 static void __exit efx_exit_module(void)
2177 printk(KERN_INFO "Solarflare NET driver unloading\n");
2179 pci_unregister_driver(&efx_pci_driver);
2180 destroy_workqueue(reset_workqueue);
2181 destroy_workqueue(refill_workqueue);
2182 unregister_netdevice_notifier(&efx_netdev_notifier);
2186 module_init(efx_init_module);
2187 module_exit(efx_exit_module);
2189 MODULE_AUTHOR("Michael Brown <mbrown@fensystems.co.uk> and "
2190 "Solarflare Communications");
2191 MODULE_DESCRIPTION("Solarflare Communications network driver");
2192 MODULE_LICENSE("GPL");
2193 MODULE_DEVICE_TABLE(pci, efx_pci_table);