1 /**********************************************************************
2 * Author: Cavium Networks
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
7 * Copyright (c) 2003-2010 Cavium Networks
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 **********************************************************************/
27 #include <linux/module.h>
28 #include <linux/kernel.h>
29 #include <linux/cache.h>
30 #include <linux/cpumask.h>
31 #include <linux/netdevice.h>
32 #include <linux/init.h>
33 #include <linux/etherdevice.h>
35 #include <linux/string.h>
36 #include <linux/prefetch.h>
37 #include <linux/smp.h>
40 #include <linux/xfrm.h>
42 #endif /* CONFIG_XFRM */
44 #include <asm/atomic.h>
46 #include <asm/octeon/octeon.h>
48 #include "ethernet-defines.h"
49 #include "ethernet-mem.h"
50 #include "ethernet-rx.h"
51 #include "octeon-ethernet.h"
52 #include "ethernet-util.h"
54 #include "cvmx-helper.h"
59 #include "cvmx-scratch.h"
61 #include "cvmx-gmxx-defs.h"
63 struct cvm_napi_wrapper {
64 struct napi_struct napi;
65 } ____cacheline_aligned_in_smp;
67 static struct cvm_napi_wrapper cvm_oct_napi[NR_CPUS] __cacheline_aligned_in_smp;
69 struct cvm_oct_core_state {
72 * The number of additional cores that could be processing
75 atomic_t available_cores;
77 } ____cacheline_aligned_in_smp;
79 static struct cvm_oct_core_state core_state __cacheline_aligned_in_smp;
81 static void cvm_oct_enable_napi(void *_)
83 int cpu = smp_processor_id();
84 napi_schedule(&cvm_oct_napi[cpu].napi);
87 static void cvm_oct_enable_one_cpu(void)
92 /* Check to see if more CPUs are available for receive processing... */
93 v = atomic_sub_if_positive(1, &core_state.available_cores);
97 /* ... if a CPU is available, Turn on NAPI polling for that CPU. */
98 for_each_online_cpu(cpu) {
99 if (!cpu_test_and_set(cpu, core_state.cpu_state)) {
100 v = smp_call_function_single(cpu, cvm_oct_enable_napi,
103 panic("Can't enable NAPI.");
109 static void cvm_oct_no_more_work(void)
111 int cpu = smp_processor_id();
114 * CPU zero is special. It always has the irq enabled when
115 * waiting for incoming packets.
118 enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group);
122 cpu_clear(cpu, core_state.cpu_state);
123 atomic_add(1, &core_state.available_cores);
127 * Interrupt handler. The interrupt occurs whenever the POW
128 * has packets in our group.
131 static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id)
133 /* Disable the IRQ and start napi_poll. */
134 disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
135 cvm_oct_enable_napi(NULL);
141 * This is called on receive errors, and determines if the packet
142 * can be dropped early-on in cvm_oct_tasklet_rx().
144 * @work: Work queue entry pointing to the packet.
145 * Returns Non-zero if the packet can be dropped, zero otherwise.
147 static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
149 if ((work->word2.snoip.err_code == 10) && (work->len <= 64)) {
151 * Ignore length errors on min size packets. Some
152 * equipment incorrectly pads packets to 64+4FCS
153 * instead of 60+4FCS. Note these packets still get
154 * counted as frame errors.
157 if (USE_10MBPS_PREAMBLE_WORKAROUND
158 && ((work->word2.snoip.err_code == 5)
159 || (work->word2.snoip.err_code == 7))) {
162 * We received a packet with either an alignment error
163 * or a FCS error. This may be signalling that we are
164 * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK}
165 * off. If this is the case we need to parse the
166 * packet to determine if we can remove a non spec
167 * preamble and generate a correct packet.
169 int interface = cvmx_helper_get_interface_num(work->ipprt);
170 int index = cvmx_helper_get_interface_index_num(work->ipprt);
171 union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
172 gmxx_rxx_frm_ctl.u64 =
173 cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
174 if (gmxx_rxx_frm_ctl.s.pre_chk == 0) {
177 cvmx_phys_to_ptr(work->packet_ptr.s.addr);
180 while (i < work->len - 1) {
189 DEBUGPRINT("Port %d received 0xd5 preamble\n", work->ipprt);
191 work->packet_ptr.s.addr += i + 1;
193 } else if ((*ptr & 0xf) == 0xd) {
195 DEBUGPRINT("Port %d received 0x?d preamble\n", work->ipprt);
197 work->packet_ptr.s.addr += i;
199 for (i = 0; i < work->len; i++) {
201 ((*ptr & 0xf0) >> 4) |
202 ((*(ptr + 1) & 0xf) << 4);
206 DEBUGPRINT("Port %d unknown preamble, packet "
210 cvmx_helper_dump_packet(work);
212 cvm_oct_free_work(work);
217 DEBUGPRINT("Port %d receive error code %d, packet dropped\n",
218 work->ipprt, work->word2.snoip.err_code);
219 cvm_oct_free_work(work);
227 * The NAPI poll function.
229 * @napi: The NAPI instance, or null if called from cvm_oct_poll_controller
230 * @budget: Maximum number of packets to receive.
232 static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
234 const int coreid = cvmx_get_core_num();
235 uint64_t old_group_mask;
236 uint64_t old_scratch;
238 int did_work_request = 0;
239 int packet_not_copied;
241 /* Prefetch cvm_oct_device since we know we need it soon */
242 prefetch(cvm_oct_device);
244 if (USE_ASYNC_IOBDMA) {
245 /* Save scratch in case userspace is using it */
247 old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
250 /* Only allow work for our group (and preserve priorities) */
251 old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
252 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
253 (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group);
255 if (USE_ASYNC_IOBDMA) {
256 cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
257 did_work_request = 1;
260 while (rx_count < budget) {
261 struct sk_buff *skb = NULL;
262 struct sk_buff **pskb = NULL;
266 if (USE_ASYNC_IOBDMA && did_work_request)
267 work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
269 work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
272 did_work_request = 0;
274 union cvmx_pow_wq_int wq_int;
276 wq_int.s.iq_dis = 1 << pow_receive_group;
277 wq_int.s.wq_int = 1 << pow_receive_group;
278 cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
281 pskb = (struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) - sizeof(void *));
284 if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) {
285 cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
286 did_work_request = 1;
291 * First time through, see if there is enough
292 * work waiting to merit waking another
295 union cvmx_pow_wq_int_cntx counts;
297 int cores_in_use = core_state.baseline_cores - atomic_read(&core_state.available_cores);
298 counts.u64 = cvmx_read_csr(CVMX_POW_WQ_INT_CNTX(pow_receive_group));
299 backlog = counts.s.iq_cnt + counts.s.ds_cnt;
300 if (backlog > budget * cores_in_use && napi != NULL)
301 cvm_oct_enable_one_cpu();
304 skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1;
305 if (likely(skb_in_hw)) {
307 prefetch(&skb->head);
310 prefetch(cvm_oct_device[work->ipprt]);
312 /* Immediately throw away all packets with receive errors */
313 if (unlikely(work->word2.snoip.rcv_error)) {
314 if (cvm_oct_check_rcv_error(work))
319 * We can only use the zero copy path if skbuffs are
320 * in the FPA pool and the packet fits in a single
323 if (likely(skb_in_hw)) {
324 skb->data = skb->head + work->packet_ptr.s.addr - cvmx_ptr_to_phys(skb->head);
326 skb->len = work->len;
327 skb_set_tail_pointer(skb, skb->len);
328 packet_not_copied = 1;
331 * We have to copy the packet. First allocate
334 skb = dev_alloc_skb(work->len);
336 DEBUGPRINT("Port %d failed to allocate skbuff, packet dropped\n",
338 cvm_oct_free_work(work);
343 * Check if we've received a packet that was
344 * entirely stored in the work entry.
346 if (unlikely(work->word2.s.bufs == 0)) {
347 uint8_t *ptr = work->packet_data;
349 if (likely(!work->word2.s.not_IP)) {
351 * The beginning of the packet
352 * moves for IP packets.
354 if (work->word2.s.is_v6)
359 memcpy(skb_put(skb, work->len), ptr, work->len);
360 /* No packet buffers to free */
362 int segments = work->word2.s.bufs;
363 union cvmx_buf_ptr segment_ptr = work->packet_ptr;
367 union cvmx_buf_ptr next_ptr =
368 *(union cvmx_buf_ptr *)cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
371 * Octeon Errata PKI-100: The segment size is
372 * wrong. Until it is fixed, calculate the
373 * segment size based on the packet pool
374 * buffer size. When it is fixed, the
375 * following line should be replaced with this
376 * one: int segment_size =
377 * segment_ptr.s.size;
379 int segment_size = CVMX_FPA_PACKET_POOL_SIZE -
380 (segment_ptr.s.addr - (((segment_ptr.s.addr >> 7) - segment_ptr.s.back) << 7));
382 * Don't copy more than what
383 * is left in the packet.
385 if (segment_size > len)
387 /* Copy the data into the packet */
388 memcpy(skb_put(skb, segment_size),
389 cvmx_phys_to_ptr(segment_ptr.s.addr),
392 segment_ptr = next_ptr;
395 packet_not_copied = 0;
398 if (likely((work->ipprt < TOTAL_NUMBER_OF_PORTS) &&
399 cvm_oct_device[work->ipprt])) {
400 struct net_device *dev = cvm_oct_device[work->ipprt];
401 struct octeon_ethernet *priv = netdev_priv(dev);
404 * Only accept packets for devices that are
407 if (likely(dev->flags & IFF_UP)) {
408 skb->protocol = eth_type_trans(skb, dev);
411 if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc || work->word2.s.L4_error))
412 skb->ip_summed = CHECKSUM_NONE;
414 skb->ip_summed = CHECKSUM_UNNECESSARY;
416 /* Increment RX stats for virtual ports */
417 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
419 atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
420 atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
422 atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
423 atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
426 netif_receive_skb(skb);
429 /* Drop any packet received for a device that isn't up */
431 DEBUGPRINT("%s: Device not up, packet dropped\n",
435 atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
437 atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
439 dev_kfree_skb_irq(skb);
443 * Drop any packet received for a device that
446 DEBUGPRINT("Port %d not controlled by Linux, packet dropped\n",
448 dev_kfree_skb_irq(skb);
451 * Check to see if the skbuff and work share the same
454 if (USE_SKBUFFS_IN_HW && likely(packet_not_copied)) {
456 * This buffer needs to be replaced, increment
457 * the number of buffers we need to free by
460 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
463 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL,
466 cvm_oct_free_work(work);
469 /* Restore the original POW group mask */
470 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
471 if (USE_ASYNC_IOBDMA) {
472 /* Restore the scratch area */
473 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
475 cvm_oct_rx_refill_pool(0);
477 if (rx_count < budget && napi != NULL) {
480 cvm_oct_no_more_work();
485 #ifdef CONFIG_NET_POLL_CONTROLLER
487 * This is called when the kernel needs to manually poll the
490 * @dev: Device to poll. Unused
492 void cvm_oct_poll_controller(struct net_device *dev)
494 cvm_oct_napi_poll(NULL, 16);
498 void cvm_oct_rx_initialize(void)
501 struct net_device *dev_for_napi = NULL;
502 union cvmx_pow_wq_int_thrx int_thr;
503 union cvmx_pow_wq_int_pc int_pc;
505 for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) {
506 if (cvm_oct_device[i]) {
507 dev_for_napi = cvm_oct_device[i];
512 if (NULL == dev_for_napi)
513 panic("No net_devices were allocated.");
515 if (max_rx_cpus > 1 && max_rx_cpus < num_online_cpus())
516 atomic_set(&core_state.available_cores, max_rx_cpus);
518 atomic_set(&core_state.available_cores, num_online_cpus());
519 core_state.baseline_cores = atomic_read(&core_state.available_cores);
521 core_state.cpu_state = CPU_MASK_NONE;
522 for_each_possible_cpu(i) {
523 netif_napi_add(dev_for_napi, &cvm_oct_napi[i].napi,
524 cvm_oct_napi_poll, rx_napi_weight);
525 napi_enable(&cvm_oct_napi[i].napi);
527 /* Register an IRQ hander for to receive POW interrupts */
528 i = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
529 cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_device);
532 panic("Could not acquire Ethernet IRQ %d\n",
533 OCTEON_IRQ_WORKQ0 + pow_receive_group);
535 disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
539 int_thr.s.tc_thr = 1;
540 /* Enable POW interrupt when our port has at least one packet */
541 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), int_thr.u64);
545 cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
548 /* Scheduld NAPI now. This will indirectly enable interrupts. */
549 cvm_oct_enable_one_cpu();
552 void cvm_oct_rx_shutdown(void)
555 /* Shutdown all of the NAPIs */
556 for_each_possible_cpu(i)
557 netif_napi_del(&cvm_oct_napi[i].napi);