Staging: octeon-ethernet: Convert to use net_device_ops.
[pandora-kernel.git] / drivers / staging / octeon / ethernet.c
1 /**********************************************************************
2  * Author: Cavium Networks
3  *
4  * Contact: support@caviumnetworks.com
5  * This file is part of the OCTEON SDK
6  *
7  * Copyright (c) 2003-2007 Cavium Networks
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more
17  * details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this file; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22  * or visit http://www.gnu.org/licenses/.
23  *
24  * This file may also be available under a different license from Cavium.
25  * Contact Cavium Networks for more information
26 **********************************************************************/
27 #include <linux/kernel.h>
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/delay.h>
33 #include <linux/mii.h>
34
35 #include <net/dst.h>
36
37 #include <asm/octeon/octeon.h>
38
39 #include "ethernet-defines.h"
40 #include "ethernet-mem.h"
41 #include "ethernet-rx.h"
42 #include "ethernet-tx.h"
43 #include "ethernet-mdio.h"
44 #include "ethernet-util.h"
45 #include "ethernet-proc.h"
46 #include "octeon-ethernet.h"
47
48 #include "cvmx-pip.h"
49 #include "cvmx-pko.h"
50 #include "cvmx-fau.h"
51 #include "cvmx-ipd.h"
52 #include "cvmx-helper.h"
53
54 #include "cvmx-gmxx-defs.h"
55 #include "cvmx-smix-defs.h"
56
57 #if defined(CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS) \
58         && CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS
59 int num_packet_buffers = CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS;
60 #else
61 int num_packet_buffers = 1024;
62 #endif
63 module_param(num_packet_buffers, int, 0444);
64 MODULE_PARM_DESC(num_packet_buffers, "\n"
65         "\tNumber of packet buffers to allocate and store in the\n"
66         "\tFPA. By default, 1024 packet buffers are used unless\n"
67         "\tCONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS is defined.");
68
69 int pow_receive_group = 15;
70 module_param(pow_receive_group, int, 0444);
71 MODULE_PARM_DESC(pow_receive_group, "\n"
72         "\tPOW group to receive packets from. All ethernet hardware\n"
73         "\twill be configured to send incomming packets to this POW\n"
74         "\tgroup. Also any other software can submit packets to this\n"
75         "\tgroup for the kernel to process.");
76
77 int pow_send_group = -1;
78 module_param(pow_send_group, int, 0644);
79 MODULE_PARM_DESC(pow_send_group, "\n"
80         "\tPOW group to send packets to other software on. This\n"
81         "\tcontrols the creation of the virtual device pow0.\n"
82         "\talways_use_pow also depends on this value.");
83
84 int always_use_pow;
85 module_param(always_use_pow, int, 0444);
86 MODULE_PARM_DESC(always_use_pow, "\n"
87         "\tWhen set, always send to the pow group. This will cause\n"
88         "\tpackets sent to real ethernet devices to be sent to the\n"
89         "\tPOW group instead of the hardware. Unless some other\n"
90         "\tapplication changes the config, packets will still be\n"
91         "\treceived from the low level hardware. Use this option\n"
92         "\tto allow a CVMX app to intercept all packets from the\n"
93         "\tlinux kernel. You must specify pow_send_group along with\n"
94         "\tthis option.");
95
96 char pow_send_list[128] = "";
97 module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444);
98 MODULE_PARM_DESC(pow_send_list, "\n"
99         "\tComma separated list of ethernet devices that should use the\n"
100         "\tPOW for transmit instead of the actual ethernet hardware. This\n"
101         "\tis a per port version of always_use_pow. always_use_pow takes\n"
102         "\tprecedence over this list. For example, setting this to\n"
103         "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
104         "\tusing the pow_send_group.");
105
106 static int disable_core_queueing = 1;
107 module_param(disable_core_queueing, int, 0444);
108 MODULE_PARM_DESC(disable_core_queueing, "\n"
109         "\tWhen set the networking core's tx_queue_len is set to zero.  This\n"
110         "\tallows packets to be sent without lock contention in the packet\n"
111         "\tscheduler resulting in some cases in improved throughput.\n");
112
113 /**
114  * Periodic timer to check auto negotiation
115  */
116 static struct timer_list cvm_oct_poll_timer;
117
118 /**
119  * Array of every ethernet device owned by this driver indexed by
120  * the ipd input port number.
121  */
122 struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
123
124 extern struct semaphore mdio_sem;
125
126 /**
127  * Periodic timer tick for slow management operations
128  *
129  * @arg:    Device to check
130  */
131 static void cvm_do_timer(unsigned long arg)
132 {
133         static int port;
134         if (port < CVMX_PIP_NUM_INPUT_PORTS) {
135                 if (cvm_oct_device[port]) {
136                         int queues_per_port;
137                         int qos;
138                         struct octeon_ethernet *priv =
139                                 netdev_priv(cvm_oct_device[port]);
140                         if (priv->poll) {
141                                 /* skip polling if we don't get the lock */
142                                 if (!down_trylock(&mdio_sem)) {
143                                         priv->poll(cvm_oct_device[port]);
144                                         up(&mdio_sem);
145                                 }
146                         }
147
148                         queues_per_port = cvmx_pko_get_num_queues(port);
149                         /* Drain any pending packets in the free list */
150                         for (qos = 0; qos < queues_per_port; qos++) {
151                                 if (skb_queue_len(&priv->tx_free_list[qos])) {
152                                         spin_lock(&priv->tx_free_list[qos].
153                                                   lock);
154                                         while (skb_queue_len
155                                                (&priv->tx_free_list[qos]) >
156                                                cvmx_fau_fetch_and_add32(priv->
157                                                                         fau +
158                                                                         qos * 4,
159                                                                         0))
160                                                 dev_kfree_skb(__skb_dequeue
161                                                               (&priv->
162                                                                tx_free_list
163                                                                [qos]));
164                                         spin_unlock(&priv->tx_free_list[qos].
165                                                     lock);
166                                 }
167                         }
168                         cvm_oct_device[port]->netdev_ops->ndo_get_stats(cvm_oct_device[port]);
169                 }
170                 port++;
171                 /* Poll the next port in a 50th of a second.
172                    This spreads the polling of ports out a little bit */
173                 mod_timer(&cvm_oct_poll_timer, jiffies + HZ / 50);
174         } else {
175                 port = 0;
176                 /* All ports have been polled. Start the next iteration through
177                    the ports in one second */
178                 mod_timer(&cvm_oct_poll_timer, jiffies + HZ);
179         }
180 }
181
182 /**
183  * Configure common hardware for all interfaces
184  */
185 static __init void cvm_oct_configure_common_hw(void)
186 {
187         int r;
188         /* Setup the FPA */
189         cvmx_fpa_enable();
190         cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
191                              num_packet_buffers);
192         cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
193                              num_packet_buffers);
194         if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
195                 cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
196                                      CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
197
198         if (USE_RED)
199                 cvmx_helper_setup_red(num_packet_buffers / 4,
200                                       num_packet_buffers / 8);
201
202         /* Enable the MII interface */
203         if (!octeon_is_simulation())
204                 cvmx_write_csr(CVMX_SMIX_EN(0), 1);
205
206         /* Register an IRQ hander for to receive POW interrupts */
207         r = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
208                         cvm_oct_do_interrupt, IRQF_SHARED, "Ethernet",
209                         cvm_oct_device);
210
211 #if defined(CONFIG_SMP) && 0
212         if (USE_MULTICORE_RECEIVE) {
213                 irq_set_affinity(OCTEON_IRQ_WORKQ0 + pow_receive_group,
214                                  cpu_online_mask);
215         }
216 #endif
217 }
218
219 /**
220  * Free a work queue entry received in a intercept callback.
221  *
222  * @work_queue_entry:
223  *               Work queue entry to free
224  * Returns Zero on success, Negative on failure.
225  */
226 int cvm_oct_free_work(void *work_queue_entry)
227 {
228         cvmx_wqe_t *work = work_queue_entry;
229
230         int segments = work->word2.s.bufs;
231         union cvmx_buf_ptr segment_ptr = work->packet_ptr;
232
233         while (segments--) {
234                 union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
235                         cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
236                 if (unlikely(!segment_ptr.s.i))
237                         cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
238                                       segment_ptr.s.pool,
239                                       DONT_WRITEBACK(CVMX_FPA_PACKET_POOL_SIZE /
240                                                      128));
241                 segment_ptr = next_ptr;
242         }
243         cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));
244
245         return 0;
246 }
247 EXPORT_SYMBOL(cvm_oct_free_work);
248
249 /**
250  * Get the low level ethernet statistics
251  *
252  * @dev:    Device to get the statistics from
253  * Returns Pointer to the statistics
254  */
255 static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
256 {
257         cvmx_pip_port_status_t rx_status;
258         cvmx_pko_port_status_t tx_status;
259         struct octeon_ethernet *priv = netdev_priv(dev);
260
261         if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
262                 if (octeon_is_simulation()) {
263                         /* The simulator doesn't support statistics */
264                         memset(&rx_status, 0, sizeof(rx_status));
265                         memset(&tx_status, 0, sizeof(tx_status));
266                 } else {
267                         cvmx_pip_get_port_status(priv->port, 1, &rx_status);
268                         cvmx_pko_get_port_status(priv->port, 1, &tx_status);
269                 }
270
271                 priv->stats.rx_packets += rx_status.inb_packets;
272                 priv->stats.tx_packets += tx_status.packets;
273                 priv->stats.rx_bytes += rx_status.inb_octets;
274                 priv->stats.tx_bytes += tx_status.octets;
275                 priv->stats.multicast += rx_status.multicast_packets;
276                 priv->stats.rx_crc_errors += rx_status.inb_errors;
277                 priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
278
279                 /*
280                  * The drop counter must be incremented atomically
281                  * since the RX tasklet also increments it.
282                  */
283 #ifdef CONFIG_64BIT
284                 atomic64_add(rx_status.dropped_packets,
285                              (atomic64_t *)&priv->stats.rx_dropped);
286 #else
287                 atomic_add(rx_status.dropped_packets,
288                              (atomic_t *)&priv->stats.rx_dropped);
289 #endif
290         }
291
292         return &priv->stats;
293 }
294
295 /**
296  * Change the link MTU. Unimplemented
297  *
298  * @dev:     Device to change
299  * @new_mtu: The new MTU
300  *
301  * Returns Zero on success
302  */
303 static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
304 {
305         struct octeon_ethernet *priv = netdev_priv(dev);
306         int interface = INTERFACE(priv->port);
307         int index = INDEX(priv->port);
308 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
309         int vlan_bytes = 4;
310 #else
311         int vlan_bytes = 0;
312 #endif
313
314         /*
315          * Limit the MTU to make sure the ethernet packets are between
316          * 64 bytes and 65535 bytes.
317          */
318         if ((new_mtu + 14 + 4 + vlan_bytes < 64)
319             || (new_mtu + 14 + 4 + vlan_bytes > 65392)) {
320                 pr_err("MTU must be between %d and %d.\n",
321                        64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes);
322                 return -EINVAL;
323         }
324         dev->mtu = new_mtu;
325
326         if ((interface < 2)
327             && (cvmx_helper_interface_get_mode(interface) !=
328                 CVMX_HELPER_INTERFACE_MODE_SPI)) {
329                 /* Add ethernet header and FCS, and VLAN if configured. */
330                 int max_packet = new_mtu + 14 + 4 + vlan_bytes;
331
332                 if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
333                     || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
334                         /* Signal errors on packets larger than the MTU */
335                         cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
336                                        max_packet);
337                 } else {
338                         /*
339                          * Set the hardware to truncate packets larger
340                          * than the MTU and smaller the 64 bytes.
341                          */
342                         union cvmx_pip_frm_len_chkx frm_len_chk;
343                         frm_len_chk.u64 = 0;
344                         frm_len_chk.s.minlen = 64;
345                         frm_len_chk.s.maxlen = max_packet;
346                         cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
347                                        frm_len_chk.u64);
348                 }
349                 /*
350                  * Set the hardware to truncate packets larger than
351                  * the MTU. The jabber register must be set to a
352                  * multiple of 8 bytes, so round up.
353                  */
354                 cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
355                                (max_packet + 7) & ~7u);
356         }
357         return 0;
358 }
359
360 /**
361  * Set the multicast list. Currently unimplemented.
362  *
363  * @dev:    Device to work on
364  */
365 static void cvm_oct_common_set_multicast_list(struct net_device *dev)
366 {
367         union cvmx_gmxx_prtx_cfg gmx_cfg;
368         struct octeon_ethernet *priv = netdev_priv(dev);
369         int interface = INTERFACE(priv->port);
370         int index = INDEX(priv->port);
371
372         if ((interface < 2)
373             && (cvmx_helper_interface_get_mode(interface) !=
374                 CVMX_HELPER_INTERFACE_MODE_SPI)) {
375                 union cvmx_gmxx_rxx_adr_ctl control;
376                 control.u64 = 0;
377                 control.s.bcst = 1;     /* Allow broadcast MAC addresses */
378
379                 if (dev->mc_list || (dev->flags & IFF_ALLMULTI) ||
380                     (dev->flags & IFF_PROMISC))
381                         /* Force accept multicast packets */
382                         control.s.mcst = 2;
383                 else
384                         /* Force reject multicat packets */
385                         control.s.mcst = 1;
386
387                 if (dev->flags & IFF_PROMISC)
388                         /*
389                          * Reject matches if promisc. Since CAM is
390                          * shut off, should accept everything.
391                          */
392                         control.s.cam_mode = 0;
393                 else
394                         /* Filter packets based on the CAM */
395                         control.s.cam_mode = 1;
396
397                 gmx_cfg.u64 =
398                     cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
399                 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
400                                gmx_cfg.u64 & ~1ull);
401
402                 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
403                                control.u64);
404                 if (dev->flags & IFF_PROMISC)
405                         cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
406                                        (index, interface), 0);
407                 else
408                         cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
409                                        (index, interface), 1);
410
411                 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
412                                gmx_cfg.u64);
413         }
414 }
415
416 /**
417  * Set the hardware MAC address for a device
418  *
419  * @dev:    Device to change the MAC address for
420  * @addr:   Address structure to change it too. MAC address is addr + 2.
421  * Returns Zero on success
422  */
423 static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
424 {
425         struct octeon_ethernet *priv = netdev_priv(dev);
426         union cvmx_gmxx_prtx_cfg gmx_cfg;
427         int interface = INTERFACE(priv->port);
428         int index = INDEX(priv->port);
429
430         memcpy(dev->dev_addr, addr + 2, 6);
431
432         if ((interface < 2)
433             && (cvmx_helper_interface_get_mode(interface) !=
434                 CVMX_HELPER_INTERFACE_MODE_SPI)) {
435                 int i;
436                 uint8_t *ptr = addr;
437                 uint64_t mac = 0;
438                 for (i = 0; i < 6; i++)
439                         mac = (mac << 8) | (uint64_t) (ptr[i + 2]);
440
441                 gmx_cfg.u64 =
442                     cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
443                 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
444                                gmx_cfg.u64 & ~1ull);
445
446                 cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
447                 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
448                                ptr[2]);
449                 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
450                                ptr[3]);
451                 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
452                                ptr[4]);
453                 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
454                                ptr[5]);
455                 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
456                                ptr[6]);
457                 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
458                                ptr[7]);
459                 cvm_oct_common_set_multicast_list(dev);
460                 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
461                                gmx_cfg.u64);
462         }
463         return 0;
464 }
465
466 /**
467  * Per network device initialization
468  *
469  * @dev:    Device to initialize
470  * Returns Zero on success
471  */
472 int cvm_oct_common_init(struct net_device *dev)
473 {
474         static int count;
475         char mac[8] = { 0x00, 0x00,
476                 octeon_bootinfo->mac_addr_base[0],
477                 octeon_bootinfo->mac_addr_base[1],
478                 octeon_bootinfo->mac_addr_base[2],
479                 octeon_bootinfo->mac_addr_base[3],
480                 octeon_bootinfo->mac_addr_base[4],
481                 octeon_bootinfo->mac_addr_base[5] + count
482         };
483         struct octeon_ethernet *priv = netdev_priv(dev);
484
485         /*
486          * Force the interface to use the POW send if always_use_pow
487          * was specified or it is in the pow send list.
488          */
489         if ((pow_send_group != -1)
490             && (always_use_pow || strstr(pow_send_list, dev->name)))
491                 priv->queue = -1;
492
493         if (priv->queue != -1 && USE_HW_TCPUDP_CHECKSUM)
494                 dev->features |= NETIF_F_IP_CSUM;
495
496         count++;
497
498         /* We do our own locking, Linux doesn't need to */
499         dev->features |= NETIF_F_LLTX;
500         SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops);
501
502         cvm_oct_mdio_setup_device(dev);
503         dev->netdev_ops->ndo_set_mac_address(dev, mac);
504         dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
505
506         /*
507          * Zero out stats for port so we won't mistakenly show
508          * counters from the bootloader.
509          */
510         memset(dev->netdev_ops->ndo_get_stats(dev), 0,
511                sizeof(struct net_device_stats));
512
513         return 0;
514 }
515
516 void cvm_oct_common_uninit(struct net_device *dev)
517 {
518         /* Currently nothing to do */
519 }
520
521 static const struct net_device_ops cvm_oct_npi_netdev_ops = {
522         .ndo_init               = cvm_oct_common_init,
523         .ndo_uninit             = cvm_oct_common_uninit,
524         .ndo_start_xmit         = cvm_oct_xmit,
525         .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
526         .ndo_set_mac_address    = cvm_oct_common_set_mac_address,
527         .ndo_do_ioctl           = cvm_oct_ioctl,
528         .ndo_change_mtu         = cvm_oct_common_change_mtu,
529         .ndo_get_stats          = cvm_oct_common_get_stats,
530 #ifdef CONFIG_NET_POLL_CONTROLLER
531         .ndo_poll_controller    = cvm_oct_poll_controller,
532 #endif
533 };
534 static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
535         .ndo_init               = cvm_oct_xaui_init,
536         .ndo_uninit             = cvm_oct_xaui_uninit,
537         .ndo_open               = cvm_oct_xaui_open,
538         .ndo_stop               = cvm_oct_xaui_stop,
539         .ndo_start_xmit         = cvm_oct_xmit,
540         .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
541         .ndo_set_mac_address    = cvm_oct_common_set_mac_address,
542         .ndo_do_ioctl           = cvm_oct_ioctl,
543         .ndo_change_mtu         = cvm_oct_common_change_mtu,
544         .ndo_get_stats          = cvm_oct_common_get_stats,
545 #ifdef CONFIG_NET_POLL_CONTROLLER
546         .ndo_poll_controller    = cvm_oct_poll_controller,
547 #endif
548 };
549 static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
550         .ndo_init               = cvm_oct_sgmii_init,
551         .ndo_uninit             = cvm_oct_sgmii_uninit,
552         .ndo_open               = cvm_oct_sgmii_open,
553         .ndo_stop               = cvm_oct_sgmii_stop,
554         .ndo_start_xmit         = cvm_oct_xmit,
555         .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
556         .ndo_set_mac_address    = cvm_oct_common_set_mac_address,
557         .ndo_do_ioctl           = cvm_oct_ioctl,
558         .ndo_change_mtu         = cvm_oct_common_change_mtu,
559         .ndo_get_stats          = cvm_oct_common_get_stats,
560 #ifdef CONFIG_NET_POLL_CONTROLLER
561         .ndo_poll_controller    = cvm_oct_poll_controller,
562 #endif
563 };
564 static const struct net_device_ops cvm_oct_spi_netdev_ops = {
565         .ndo_init               = cvm_oct_spi_init,
566         .ndo_uninit             = cvm_oct_spi_uninit,
567         .ndo_start_xmit         = cvm_oct_xmit,
568         .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
569         .ndo_set_mac_address    = cvm_oct_common_set_mac_address,
570         .ndo_do_ioctl           = cvm_oct_ioctl,
571         .ndo_change_mtu         = cvm_oct_common_change_mtu,
572         .ndo_get_stats          = cvm_oct_common_get_stats,
573 #ifdef CONFIG_NET_POLL_CONTROLLER
574         .ndo_poll_controller    = cvm_oct_poll_controller,
575 #endif
576 };
577 static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
578         .ndo_init               = cvm_oct_rgmii_init,
579         .ndo_uninit             = cvm_oct_rgmii_uninit,
580         .ndo_open               = cvm_oct_rgmii_open,
581         .ndo_stop               = cvm_oct_rgmii_stop,
582         .ndo_start_xmit         = cvm_oct_xmit,
583         .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
584         .ndo_set_mac_address    = cvm_oct_common_set_mac_address,
585         .ndo_do_ioctl           = cvm_oct_ioctl,
586         .ndo_change_mtu         = cvm_oct_common_change_mtu,
587         .ndo_get_stats          = cvm_oct_common_get_stats,
588 #ifdef CONFIG_NET_POLL_CONTROLLER
589         .ndo_poll_controller    = cvm_oct_poll_controller,
590 #endif
591 };
592 static const struct net_device_ops cvm_oct_pow_netdev_ops = {
593         .ndo_init               = cvm_oct_common_init,
594         .ndo_start_xmit         = cvm_oct_xmit_pow,
595         .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
596         .ndo_set_mac_address    = cvm_oct_common_set_mac_address,
597         .ndo_do_ioctl           = cvm_oct_ioctl,
598         .ndo_change_mtu         = cvm_oct_common_change_mtu,
599         .ndo_get_stats          = cvm_oct_common_get_stats,
600 #ifdef CONFIG_NET_POLL_CONTROLLER
601         .ndo_poll_controller    = cvm_oct_poll_controller,
602 #endif
603 };
604
605 /**
606  * Module/ driver initialization. Creates the linux network
607  * devices.
608  *
609  * Returns Zero on success
610  */
611 static int __init cvm_oct_init_module(void)
612 {
613         int num_interfaces;
614         int interface;
615         int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
616         int qos;
617
618         pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION);
619
620         cvm_oct_proc_initialize();
621         cvm_oct_rx_initialize();
622         cvm_oct_configure_common_hw();
623
624         cvmx_helper_initialize_packet_io_global();
625
626         /* Change the input group for all ports before input is enabled */
627         num_interfaces = cvmx_helper_get_number_of_interfaces();
628         for (interface = 0; interface < num_interfaces; interface++) {
629                 int num_ports = cvmx_helper_ports_on_interface(interface);
630                 int port;
631
632                 for (port = cvmx_helper_get_ipd_port(interface, 0);
633                      port < cvmx_helper_get_ipd_port(interface, num_ports);
634                      port++) {
635                         union cvmx_pip_prt_tagx pip_prt_tagx;
636                         pip_prt_tagx.u64 =
637                             cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
638                         pip_prt_tagx.s.grp = pow_receive_group;
639                         cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
640                                        pip_prt_tagx.u64);
641                 }
642         }
643
644         cvmx_helper_ipd_and_packet_input_enable();
645
646         memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
647
648         /*
649          * Initialize the FAU used for counting packet buffers that
650          * need to be freed.
651          */
652         cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
653
654         if ((pow_send_group != -1)) {
655                 struct net_device *dev;
656                 pr_info("\tConfiguring device for POW only access\n");
657                 dev = alloc_etherdev(sizeof(struct octeon_ethernet));
658                 if (dev) {
659                         /* Initialize the device private structure. */
660                         struct octeon_ethernet *priv = netdev_priv(dev);
661                         memset(priv, 0, sizeof(struct octeon_ethernet));
662
663                         dev->netdev_ops = &cvm_oct_pow_netdev_ops;
664                         priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
665                         priv->port = CVMX_PIP_NUM_INPUT_PORTS;
666                         priv->queue = -1;
667                         strcpy(dev->name, "pow%d");
668                         for (qos = 0; qos < 16; qos++)
669                                 skb_queue_head_init(&priv->tx_free_list[qos]);
670
671                         if (register_netdev(dev) < 0) {
672                                 pr_err("Failed to register ethernet "
673                                          "device for POW\n");
674                                 kfree(dev);
675                         } else {
676                                 cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
677                                 pr_info("%s: POW send group %d, receive "
678                                         "group %d\n",
679                                      dev->name, pow_send_group,
680                                      pow_receive_group);
681                         }
682                 } else {
683                         pr_err("Failed to allocate ethernet device "
684                                  "for POW\n");
685                 }
686         }
687
688         num_interfaces = cvmx_helper_get_number_of_interfaces();
689         for (interface = 0; interface < num_interfaces; interface++) {
690                 cvmx_helper_interface_mode_t imode =
691                     cvmx_helper_interface_get_mode(interface);
692                 int num_ports = cvmx_helper_ports_on_interface(interface);
693                 int port;
694
695                 for (port = cvmx_helper_get_ipd_port(interface, 0);
696                      port < cvmx_helper_get_ipd_port(interface, num_ports);
697                      port++) {
698                         struct octeon_ethernet *priv;
699                         struct net_device *dev =
700                             alloc_etherdev(sizeof(struct octeon_ethernet));
701                         if (!dev) {
702                                 pr_err("Failed to allocate ethernet device "
703                                          "for port %d\n", port);
704                                 continue;
705                         }
706                         if (disable_core_queueing)
707                                 dev->tx_queue_len = 0;
708
709                         /* Initialize the device private structure. */
710                         priv = netdev_priv(dev);
711                         memset(priv, 0, sizeof(struct octeon_ethernet));
712
713                         priv->imode = imode;
714                         priv->port = port;
715                         priv->queue = cvmx_pko_get_base_queue(priv->port);
716                         priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
717                         for (qos = 0; qos < 16; qos++)
718                                 skb_queue_head_init(&priv->tx_free_list[qos]);
719                         for (qos = 0; qos < cvmx_pko_get_num_queues(port);
720                              qos++)
721                                 cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
722
723                         switch (priv->imode) {
724
725                         /* These types don't support ports to IPD/PKO */
726                         case CVMX_HELPER_INTERFACE_MODE_DISABLED:
727                         case CVMX_HELPER_INTERFACE_MODE_PCIE:
728                         case CVMX_HELPER_INTERFACE_MODE_PICMG:
729                                 break;
730
731                         case CVMX_HELPER_INTERFACE_MODE_NPI:
732                                 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
733                                 strcpy(dev->name, "npi%d");
734                                 break;
735
736                         case CVMX_HELPER_INTERFACE_MODE_XAUI:
737                                 dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
738                                 strcpy(dev->name, "xaui%d");
739                                 break;
740
741                         case CVMX_HELPER_INTERFACE_MODE_LOOP:
742                                 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
743                                 strcpy(dev->name, "loop%d");
744                                 break;
745
746                         case CVMX_HELPER_INTERFACE_MODE_SGMII:
747                                 dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
748                                 strcpy(dev->name, "eth%d");
749                                 break;
750
751                         case CVMX_HELPER_INTERFACE_MODE_SPI:
752                                 dev->netdev_ops = &cvm_oct_spi_netdev_ops;
753                                 strcpy(dev->name, "spi%d");
754                                 break;
755
756                         case CVMX_HELPER_INTERFACE_MODE_RGMII:
757                         case CVMX_HELPER_INTERFACE_MODE_GMII:
758                                 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
759                                 strcpy(dev->name, "eth%d");
760                                 break;
761                         }
762
763                         if (!dev->netdev_ops) {
764                                 kfree(dev);
765                         } else if (register_netdev(dev) < 0) {
766                                 pr_err("Failed to register ethernet device "
767                                          "for interface %d, port %d\n",
768                                          interface, priv->port);
769                                 kfree(dev);
770                         } else {
771                                 cvm_oct_device[priv->port] = dev;
772                                 fau -=
773                                     cvmx_pko_get_num_queues(priv->port) *
774                                     sizeof(uint32_t);
775                         }
776                 }
777         }
778
779         if (INTERRUPT_LIMIT) {
780                 /*
781                  * Set the POW timer rate to give an interrupt at most
782                  * INTERRUPT_LIMIT times per second.
783                  */
784                 cvmx_write_csr(CVMX_POW_WQ_INT_PC,
785                                octeon_bootinfo->eclock_hz / (INTERRUPT_LIMIT *
786                                                              16 * 256) << 8);
787
788                 /*
789                  * Enable POW timer interrupt. It will count when
790                  * there are packets available.
791                  */
792                 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group),
793                                0x1ful << 24);
794         } else {
795                 /* Enable POW interrupt when our port has at least one packet */
796                 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0x1001);
797         }
798
799         /* Enable the poll timer for checking RGMII status */
800         init_timer(&cvm_oct_poll_timer);
801         cvm_oct_poll_timer.data = 0;
802         cvm_oct_poll_timer.function = cvm_do_timer;
803         mod_timer(&cvm_oct_poll_timer, jiffies + HZ);
804
805         return 0;
806 }
807
808 /**
809  * Module / driver shutdown
810  *
811  * Returns Zero on success
812  */
813 static void __exit cvm_oct_cleanup_module(void)
814 {
815         int port;
816
817         /* Disable POW interrupt */
818         cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0);
819
820         cvmx_ipd_disable();
821
822         /* Free the interrupt handler */
823         free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device);
824
825         del_timer(&cvm_oct_poll_timer);
826         cvm_oct_rx_shutdown();
827         cvmx_pko_disable();
828
829         /* Free the ethernet devices */
830         for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
831                 if (cvm_oct_device[port]) {
832                         cvm_oct_tx_shutdown(cvm_oct_device[port]);
833                         unregister_netdev(cvm_oct_device[port]);
834                         kfree(cvm_oct_device[port]);
835                         cvm_oct_device[port] = NULL;
836                 }
837         }
838
839         cvmx_pko_shutdown();
840         cvm_oct_proc_shutdown();
841
842         cvmx_ipd_free_ptr();
843
844         /* Free the HW pools */
845         cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
846                               num_packet_buffers);
847         cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
848                               num_packet_buffers);
849         if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
850                 cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
851                                       CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
852 }
853
854 MODULE_LICENSE("GPL");
855 MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
856 MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");
857 module_init(cvm_oct_init_module);
858 module_exit(cvm_oct_cleanup_module);