1 /**********************************************************************
2 * Author: Cavium Networks
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
7 * Copyright (c) 2003-2007 Cavium Networks
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 **********************************************************************/
27 #include <linux/kernel.h>
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/delay.h>
33 #include <linux/mii.h>
37 #include <asm/octeon/octeon.h>
39 #include "ethernet-defines.h"
40 #include "ethernet-mem.h"
41 #include "ethernet-rx.h"
42 #include "ethernet-tx.h"
43 #include "ethernet-mdio.h"
44 #include "ethernet-util.h"
45 #include "ethernet-proc.h"
46 #include "octeon-ethernet.h"
52 #include "cvmx-helper.h"
54 #include "cvmx-gmxx-defs.h"
55 #include "cvmx-smix-defs.h"
57 #if defined(CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS) \
58 && CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS
59 int num_packet_buffers = CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS;
61 int num_packet_buffers = 1024;
63 module_param(num_packet_buffers, int, 0444);
64 MODULE_PARM_DESC(num_packet_buffers, "\n"
65 "\tNumber of packet buffers to allocate and store in the\n"
66 "\tFPA. By default, 1024 packet buffers are used unless\n"
67 "\tCONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS is defined.");
69 int pow_receive_group = 15;
70 module_param(pow_receive_group, int, 0444);
71 MODULE_PARM_DESC(pow_receive_group, "\n"
72 "\tPOW group to receive packets from. All ethernet hardware\n"
73 "\twill be configured to send incomming packets to this POW\n"
74 "\tgroup. Also any other software can submit packets to this\n"
75 "\tgroup for the kernel to process.");
77 int pow_send_group = -1;
78 module_param(pow_send_group, int, 0644);
79 MODULE_PARM_DESC(pow_send_group, "\n"
80 "\tPOW group to send packets to other software on. This\n"
81 "\tcontrols the creation of the virtual device pow0.\n"
82 "\talways_use_pow also depends on this value.");
85 module_param(always_use_pow, int, 0444);
86 MODULE_PARM_DESC(always_use_pow, "\n"
87 "\tWhen set, always send to the pow group. This will cause\n"
88 "\tpackets sent to real ethernet devices to be sent to the\n"
89 "\tPOW group instead of the hardware. Unless some other\n"
90 "\tapplication changes the config, packets will still be\n"
91 "\treceived from the low level hardware. Use this option\n"
92 "\tto allow a CVMX app to intercept all packets from the\n"
93 "\tlinux kernel. You must specify pow_send_group along with\n"
96 char pow_send_list[128] = "";
97 module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444);
98 MODULE_PARM_DESC(pow_send_list, "\n"
99 "\tComma separated list of ethernet devices that should use the\n"
100 "\tPOW for transmit instead of the actual ethernet hardware. This\n"
101 "\tis a per port version of always_use_pow. always_use_pow takes\n"
102 "\tprecedence over this list. For example, setting this to\n"
103 "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
104 "\tusing the pow_send_group.");
106 static int disable_core_queueing = 1;
107 module_param(disable_core_queueing, int, 0444);
108 MODULE_PARM_DESC(disable_core_queueing, "\n"
109 "\tWhen set the networking core's tx_queue_len is set to zero. This\n"
110 "\tallows packets to be sent without lock contention in the packet\n"
111 "\tscheduler resulting in some cases in improved throughput.\n");
114 * Periodic timer to check auto negotiation
116 static struct timer_list cvm_oct_poll_timer;
119 * Array of every ethernet device owned by this driver indexed by
120 * the ipd input port number.
122 struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
124 extern struct semaphore mdio_sem;
127 * Periodic timer tick for slow management operations
129 * @arg: Device to check
131 static void cvm_do_timer(unsigned long arg)
134 if (port < CVMX_PIP_NUM_INPUT_PORTS) {
135 if (cvm_oct_device[port]) {
138 struct octeon_ethernet *priv =
139 netdev_priv(cvm_oct_device[port]);
141 /* skip polling if we don't get the lock */
142 if (!down_trylock(&mdio_sem)) {
143 priv->poll(cvm_oct_device[port]);
148 queues_per_port = cvmx_pko_get_num_queues(port);
149 /* Drain any pending packets in the free list */
150 for (qos = 0; qos < queues_per_port; qos++) {
151 if (skb_queue_len(&priv->tx_free_list[qos])) {
152 spin_lock(&priv->tx_free_list[qos].
155 (&priv->tx_free_list[qos]) >
156 cvmx_fau_fetch_and_add32(priv->
160 dev_kfree_skb(__skb_dequeue
164 spin_unlock(&priv->tx_free_list[qos].
168 cvm_oct_device[port]->netdev_ops->ndo_get_stats(cvm_oct_device[port]);
171 /* Poll the next port in a 50th of a second.
172 This spreads the polling of ports out a little bit */
173 mod_timer(&cvm_oct_poll_timer, jiffies + HZ / 50);
176 /* All ports have been polled. Start the next iteration through
177 the ports in one second */
178 mod_timer(&cvm_oct_poll_timer, jiffies + HZ);
183 * Configure common hardware for all interfaces
185 static __init void cvm_oct_configure_common_hw(void)
190 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
192 cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
194 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
195 cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
196 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
199 cvmx_helper_setup_red(num_packet_buffers / 4,
200 num_packet_buffers / 8);
202 /* Enable the MII interface */
203 if (!octeon_is_simulation())
204 cvmx_write_csr(CVMX_SMIX_EN(0), 1);
206 /* Register an IRQ hander for to receive POW interrupts */
207 r = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
208 cvm_oct_do_interrupt, IRQF_SHARED, "Ethernet",
211 #if defined(CONFIG_SMP) && 0
212 if (USE_MULTICORE_RECEIVE) {
213 irq_set_affinity(OCTEON_IRQ_WORKQ0 + pow_receive_group,
220 * Free a work queue entry received in a intercept callback.
223 * Work queue entry to free
224 * Returns Zero on success, Negative on failure.
226 int cvm_oct_free_work(void *work_queue_entry)
228 cvmx_wqe_t *work = work_queue_entry;
230 int segments = work->word2.s.bufs;
231 union cvmx_buf_ptr segment_ptr = work->packet_ptr;
234 union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
235 cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
236 if (unlikely(!segment_ptr.s.i))
237 cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
239 DONT_WRITEBACK(CVMX_FPA_PACKET_POOL_SIZE /
241 segment_ptr = next_ptr;
243 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));
247 EXPORT_SYMBOL(cvm_oct_free_work);
250 * Get the low level ethernet statistics
252 * @dev: Device to get the statistics from
253 * Returns Pointer to the statistics
255 static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
257 cvmx_pip_port_status_t rx_status;
258 cvmx_pko_port_status_t tx_status;
259 struct octeon_ethernet *priv = netdev_priv(dev);
261 if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
262 if (octeon_is_simulation()) {
263 /* The simulator doesn't support statistics */
264 memset(&rx_status, 0, sizeof(rx_status));
265 memset(&tx_status, 0, sizeof(tx_status));
267 cvmx_pip_get_port_status(priv->port, 1, &rx_status);
268 cvmx_pko_get_port_status(priv->port, 1, &tx_status);
271 priv->stats.rx_packets += rx_status.inb_packets;
272 priv->stats.tx_packets += tx_status.packets;
273 priv->stats.rx_bytes += rx_status.inb_octets;
274 priv->stats.tx_bytes += tx_status.octets;
275 priv->stats.multicast += rx_status.multicast_packets;
276 priv->stats.rx_crc_errors += rx_status.inb_errors;
277 priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
280 * The drop counter must be incremented atomically
281 * since the RX tasklet also increments it.
284 atomic64_add(rx_status.dropped_packets,
285 (atomic64_t *)&priv->stats.rx_dropped);
287 atomic_add(rx_status.dropped_packets,
288 (atomic_t *)&priv->stats.rx_dropped);
296 * Change the link MTU. Unimplemented
298 * @dev: Device to change
299 * @new_mtu: The new MTU
301 * Returns Zero on success
303 static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
305 struct octeon_ethernet *priv = netdev_priv(dev);
306 int interface = INTERFACE(priv->port);
307 int index = INDEX(priv->port);
308 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
315 * Limit the MTU to make sure the ethernet packets are between
316 * 64 bytes and 65535 bytes.
318 if ((new_mtu + 14 + 4 + vlan_bytes < 64)
319 || (new_mtu + 14 + 4 + vlan_bytes > 65392)) {
320 pr_err("MTU must be between %d and %d.\n",
321 64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes);
327 && (cvmx_helper_interface_get_mode(interface) !=
328 CVMX_HELPER_INTERFACE_MODE_SPI)) {
329 /* Add ethernet header and FCS, and VLAN if configured. */
330 int max_packet = new_mtu + 14 + 4 + vlan_bytes;
332 if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
333 || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
334 /* Signal errors on packets larger than the MTU */
335 cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
339 * Set the hardware to truncate packets larger
340 * than the MTU and smaller the 64 bytes.
342 union cvmx_pip_frm_len_chkx frm_len_chk;
344 frm_len_chk.s.minlen = 64;
345 frm_len_chk.s.maxlen = max_packet;
346 cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
350 * Set the hardware to truncate packets larger than
351 * the MTU. The jabber register must be set to a
352 * multiple of 8 bytes, so round up.
354 cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
355 (max_packet + 7) & ~7u);
361 * Set the multicast list. Currently unimplemented.
363 * @dev: Device to work on
365 static void cvm_oct_common_set_multicast_list(struct net_device *dev)
367 union cvmx_gmxx_prtx_cfg gmx_cfg;
368 struct octeon_ethernet *priv = netdev_priv(dev);
369 int interface = INTERFACE(priv->port);
370 int index = INDEX(priv->port);
373 && (cvmx_helper_interface_get_mode(interface) !=
374 CVMX_HELPER_INTERFACE_MODE_SPI)) {
375 union cvmx_gmxx_rxx_adr_ctl control;
377 control.s.bcst = 1; /* Allow broadcast MAC addresses */
379 if (dev->mc_list || (dev->flags & IFF_ALLMULTI) ||
380 (dev->flags & IFF_PROMISC))
381 /* Force accept multicast packets */
384 /* Force reject multicat packets */
387 if (dev->flags & IFF_PROMISC)
389 * Reject matches if promisc. Since CAM is
390 * shut off, should accept everything.
392 control.s.cam_mode = 0;
394 /* Filter packets based on the CAM */
395 control.s.cam_mode = 1;
398 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
399 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
400 gmx_cfg.u64 & ~1ull);
402 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
404 if (dev->flags & IFF_PROMISC)
405 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
406 (index, interface), 0);
408 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
409 (index, interface), 1);
411 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
417 * Set the hardware MAC address for a device
419 * @dev: Device to change the MAC address for
420 * @addr: Address structure to change it too. MAC address is addr + 2.
421 * Returns Zero on success
423 static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
425 struct octeon_ethernet *priv = netdev_priv(dev);
426 union cvmx_gmxx_prtx_cfg gmx_cfg;
427 int interface = INTERFACE(priv->port);
428 int index = INDEX(priv->port);
430 memcpy(dev->dev_addr, addr + 2, 6);
433 && (cvmx_helper_interface_get_mode(interface) !=
434 CVMX_HELPER_INTERFACE_MODE_SPI)) {
438 for (i = 0; i < 6; i++)
439 mac = (mac << 8) | (uint64_t) (ptr[i + 2]);
442 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
443 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
444 gmx_cfg.u64 & ~1ull);
446 cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
447 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
449 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
451 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
453 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
455 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
457 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
459 cvm_oct_common_set_multicast_list(dev);
460 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
467 * Per network device initialization
469 * @dev: Device to initialize
470 * Returns Zero on success
472 int cvm_oct_common_init(struct net_device *dev)
475 char mac[8] = { 0x00, 0x00,
476 octeon_bootinfo->mac_addr_base[0],
477 octeon_bootinfo->mac_addr_base[1],
478 octeon_bootinfo->mac_addr_base[2],
479 octeon_bootinfo->mac_addr_base[3],
480 octeon_bootinfo->mac_addr_base[4],
481 octeon_bootinfo->mac_addr_base[5] + count
483 struct octeon_ethernet *priv = netdev_priv(dev);
486 * Force the interface to use the POW send if always_use_pow
487 * was specified or it is in the pow send list.
489 if ((pow_send_group != -1)
490 && (always_use_pow || strstr(pow_send_list, dev->name)))
493 if (priv->queue != -1 && USE_HW_TCPUDP_CHECKSUM)
494 dev->features |= NETIF_F_IP_CSUM;
498 /* We do our own locking, Linux doesn't need to */
499 dev->features |= NETIF_F_LLTX;
500 SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops);
502 cvm_oct_mdio_setup_device(dev);
503 dev->netdev_ops->ndo_set_mac_address(dev, mac);
504 dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
507 * Zero out stats for port so we won't mistakenly show
508 * counters from the bootloader.
510 memset(dev->netdev_ops->ndo_get_stats(dev), 0,
511 sizeof(struct net_device_stats));
516 void cvm_oct_common_uninit(struct net_device *dev)
518 /* Currently nothing to do */
521 static const struct net_device_ops cvm_oct_npi_netdev_ops = {
522 .ndo_init = cvm_oct_common_init,
523 .ndo_uninit = cvm_oct_common_uninit,
524 .ndo_start_xmit = cvm_oct_xmit,
525 .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
526 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
527 .ndo_do_ioctl = cvm_oct_ioctl,
528 .ndo_change_mtu = cvm_oct_common_change_mtu,
529 .ndo_get_stats = cvm_oct_common_get_stats,
530 #ifdef CONFIG_NET_POLL_CONTROLLER
531 .ndo_poll_controller = cvm_oct_poll_controller,
534 static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
535 .ndo_init = cvm_oct_xaui_init,
536 .ndo_uninit = cvm_oct_xaui_uninit,
537 .ndo_open = cvm_oct_xaui_open,
538 .ndo_stop = cvm_oct_xaui_stop,
539 .ndo_start_xmit = cvm_oct_xmit,
540 .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
541 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
542 .ndo_do_ioctl = cvm_oct_ioctl,
543 .ndo_change_mtu = cvm_oct_common_change_mtu,
544 .ndo_get_stats = cvm_oct_common_get_stats,
545 #ifdef CONFIG_NET_POLL_CONTROLLER
546 .ndo_poll_controller = cvm_oct_poll_controller,
549 static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
550 .ndo_init = cvm_oct_sgmii_init,
551 .ndo_uninit = cvm_oct_sgmii_uninit,
552 .ndo_open = cvm_oct_sgmii_open,
553 .ndo_stop = cvm_oct_sgmii_stop,
554 .ndo_start_xmit = cvm_oct_xmit,
555 .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
556 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
557 .ndo_do_ioctl = cvm_oct_ioctl,
558 .ndo_change_mtu = cvm_oct_common_change_mtu,
559 .ndo_get_stats = cvm_oct_common_get_stats,
560 #ifdef CONFIG_NET_POLL_CONTROLLER
561 .ndo_poll_controller = cvm_oct_poll_controller,
564 static const struct net_device_ops cvm_oct_spi_netdev_ops = {
565 .ndo_init = cvm_oct_spi_init,
566 .ndo_uninit = cvm_oct_spi_uninit,
567 .ndo_start_xmit = cvm_oct_xmit,
568 .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
569 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
570 .ndo_do_ioctl = cvm_oct_ioctl,
571 .ndo_change_mtu = cvm_oct_common_change_mtu,
572 .ndo_get_stats = cvm_oct_common_get_stats,
573 #ifdef CONFIG_NET_POLL_CONTROLLER
574 .ndo_poll_controller = cvm_oct_poll_controller,
577 static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
578 .ndo_init = cvm_oct_rgmii_init,
579 .ndo_uninit = cvm_oct_rgmii_uninit,
580 .ndo_open = cvm_oct_rgmii_open,
581 .ndo_stop = cvm_oct_rgmii_stop,
582 .ndo_start_xmit = cvm_oct_xmit,
583 .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
584 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
585 .ndo_do_ioctl = cvm_oct_ioctl,
586 .ndo_change_mtu = cvm_oct_common_change_mtu,
587 .ndo_get_stats = cvm_oct_common_get_stats,
588 #ifdef CONFIG_NET_POLL_CONTROLLER
589 .ndo_poll_controller = cvm_oct_poll_controller,
592 static const struct net_device_ops cvm_oct_pow_netdev_ops = {
593 .ndo_init = cvm_oct_common_init,
594 .ndo_start_xmit = cvm_oct_xmit_pow,
595 .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
596 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
597 .ndo_do_ioctl = cvm_oct_ioctl,
598 .ndo_change_mtu = cvm_oct_common_change_mtu,
599 .ndo_get_stats = cvm_oct_common_get_stats,
600 #ifdef CONFIG_NET_POLL_CONTROLLER
601 .ndo_poll_controller = cvm_oct_poll_controller,
606 * Module/ driver initialization. Creates the linux network
609 * Returns Zero on success
611 static int __init cvm_oct_init_module(void)
615 int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
618 pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION);
620 cvm_oct_proc_initialize();
621 cvm_oct_rx_initialize();
622 cvm_oct_configure_common_hw();
624 cvmx_helper_initialize_packet_io_global();
626 /* Change the input group for all ports before input is enabled */
627 num_interfaces = cvmx_helper_get_number_of_interfaces();
628 for (interface = 0; interface < num_interfaces; interface++) {
629 int num_ports = cvmx_helper_ports_on_interface(interface);
632 for (port = cvmx_helper_get_ipd_port(interface, 0);
633 port < cvmx_helper_get_ipd_port(interface, num_ports);
635 union cvmx_pip_prt_tagx pip_prt_tagx;
637 cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
638 pip_prt_tagx.s.grp = pow_receive_group;
639 cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
644 cvmx_helper_ipd_and_packet_input_enable();
646 memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
649 * Initialize the FAU used for counting packet buffers that
652 cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
654 if ((pow_send_group != -1)) {
655 struct net_device *dev;
656 pr_info("\tConfiguring device for POW only access\n");
657 dev = alloc_etherdev(sizeof(struct octeon_ethernet));
659 /* Initialize the device private structure. */
660 struct octeon_ethernet *priv = netdev_priv(dev);
661 memset(priv, 0, sizeof(struct octeon_ethernet));
663 dev->netdev_ops = &cvm_oct_pow_netdev_ops;
664 priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
665 priv->port = CVMX_PIP_NUM_INPUT_PORTS;
667 strcpy(dev->name, "pow%d");
668 for (qos = 0; qos < 16; qos++)
669 skb_queue_head_init(&priv->tx_free_list[qos]);
671 if (register_netdev(dev) < 0) {
672 pr_err("Failed to register ethernet "
676 cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
677 pr_info("%s: POW send group %d, receive "
679 dev->name, pow_send_group,
683 pr_err("Failed to allocate ethernet device "
688 num_interfaces = cvmx_helper_get_number_of_interfaces();
689 for (interface = 0; interface < num_interfaces; interface++) {
690 cvmx_helper_interface_mode_t imode =
691 cvmx_helper_interface_get_mode(interface);
692 int num_ports = cvmx_helper_ports_on_interface(interface);
695 for (port = cvmx_helper_get_ipd_port(interface, 0);
696 port < cvmx_helper_get_ipd_port(interface, num_ports);
698 struct octeon_ethernet *priv;
699 struct net_device *dev =
700 alloc_etherdev(sizeof(struct octeon_ethernet));
702 pr_err("Failed to allocate ethernet device "
703 "for port %d\n", port);
706 if (disable_core_queueing)
707 dev->tx_queue_len = 0;
709 /* Initialize the device private structure. */
710 priv = netdev_priv(dev);
711 memset(priv, 0, sizeof(struct octeon_ethernet));
715 priv->queue = cvmx_pko_get_base_queue(priv->port);
716 priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
717 for (qos = 0; qos < 16; qos++)
718 skb_queue_head_init(&priv->tx_free_list[qos]);
719 for (qos = 0; qos < cvmx_pko_get_num_queues(port);
721 cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
723 switch (priv->imode) {
725 /* These types don't support ports to IPD/PKO */
726 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
727 case CVMX_HELPER_INTERFACE_MODE_PCIE:
728 case CVMX_HELPER_INTERFACE_MODE_PICMG:
731 case CVMX_HELPER_INTERFACE_MODE_NPI:
732 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
733 strcpy(dev->name, "npi%d");
736 case CVMX_HELPER_INTERFACE_MODE_XAUI:
737 dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
738 strcpy(dev->name, "xaui%d");
741 case CVMX_HELPER_INTERFACE_MODE_LOOP:
742 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
743 strcpy(dev->name, "loop%d");
746 case CVMX_HELPER_INTERFACE_MODE_SGMII:
747 dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
748 strcpy(dev->name, "eth%d");
751 case CVMX_HELPER_INTERFACE_MODE_SPI:
752 dev->netdev_ops = &cvm_oct_spi_netdev_ops;
753 strcpy(dev->name, "spi%d");
756 case CVMX_HELPER_INTERFACE_MODE_RGMII:
757 case CVMX_HELPER_INTERFACE_MODE_GMII:
758 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
759 strcpy(dev->name, "eth%d");
763 if (!dev->netdev_ops) {
765 } else if (register_netdev(dev) < 0) {
766 pr_err("Failed to register ethernet device "
767 "for interface %d, port %d\n",
768 interface, priv->port);
771 cvm_oct_device[priv->port] = dev;
773 cvmx_pko_get_num_queues(priv->port) *
779 if (INTERRUPT_LIMIT) {
781 * Set the POW timer rate to give an interrupt at most
782 * INTERRUPT_LIMIT times per second.
784 cvmx_write_csr(CVMX_POW_WQ_INT_PC,
785 octeon_bootinfo->eclock_hz / (INTERRUPT_LIMIT *
789 * Enable POW timer interrupt. It will count when
790 * there are packets available.
792 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group),
795 /* Enable POW interrupt when our port has at least one packet */
796 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0x1001);
799 /* Enable the poll timer for checking RGMII status */
800 init_timer(&cvm_oct_poll_timer);
801 cvm_oct_poll_timer.data = 0;
802 cvm_oct_poll_timer.function = cvm_do_timer;
803 mod_timer(&cvm_oct_poll_timer, jiffies + HZ);
809 * Module / driver shutdown
811 * Returns Zero on success
813 static void __exit cvm_oct_cleanup_module(void)
817 /* Disable POW interrupt */
818 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0);
822 /* Free the interrupt handler */
823 free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device);
825 del_timer(&cvm_oct_poll_timer);
826 cvm_oct_rx_shutdown();
829 /* Free the ethernet devices */
830 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
831 if (cvm_oct_device[port]) {
832 cvm_oct_tx_shutdown(cvm_oct_device[port]);
833 unregister_netdev(cvm_oct_device[port]);
834 kfree(cvm_oct_device[port]);
835 cvm_oct_device[port] = NULL;
840 cvm_oct_proc_shutdown();
844 /* Free the HW pools */
845 cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
847 cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
849 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
850 cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
851 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
854 MODULE_LICENSE("GPL");
855 MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
856 MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");
857 module_init(cvm_oct_init_module);
858 module_exit(cvm_oct_cleanup_module);