b2e6ab6a334958662b4ad2d5c976bc67149f1e47
[pandora-kernel.git] / drivers / staging / octeon / ethernet-rx.c
1 /**********************************************************************
2  * Author: Cavium Networks
3  *
4  * Contact: support@caviumnetworks.com
5  * This file is part of the OCTEON SDK
6  *
7  * Copyright (c) 2003-2010 Cavium Networks
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more
17  * details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this file; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22  * or visit http://www.gnu.org/licenses/.
23  *
24  * This file may also be available under a different license from Cavium.
25  * Contact Cavium Networks for more information
26 **********************************************************************/
27 #include <linux/module.h>
28 #include <linux/kernel.h>
29 #include <linux/cache.h>
30 #include <linux/cpumask.h>
31 #include <linux/netdevice.h>
32 #include <linux/init.h>
33 #include <linux/etherdevice.h>
34 #include <linux/ip.h>
35 #include <linux/string.h>
36 #include <linux/prefetch.h>
37 #include <linux/smp.h>
38 #include <net/dst.h>
39 #ifdef CONFIG_XFRM
40 #include <linux/xfrm.h>
41 #include <net/xfrm.h>
42 #endif /* CONFIG_XFRM */
43
44 #include <asm/atomic.h>
45
46 #include <asm/octeon/octeon.h>
47
48 #include "ethernet-defines.h"
49 #include "ethernet-mem.h"
50 #include "ethernet-rx.h"
51 #include "octeon-ethernet.h"
52 #include "ethernet-util.h"
53
54 #include "cvmx-helper.h"
55 #include "cvmx-wqe.h"
56 #include "cvmx-fau.h"
57 #include "cvmx-pow.h"
58 #include "cvmx-pip.h"
59 #include "cvmx-scratch.h"
60
61 #include "cvmx-gmxx-defs.h"
62
63 struct cvm_napi_wrapper {
64         struct napi_struct napi;
65 } ____cacheline_aligned_in_smp;
66
67 static struct cvm_napi_wrapper cvm_oct_napi[NR_CPUS] __cacheline_aligned_in_smp;
68
69 struct cvm_oct_core_state {
70         int baseline_cores;
71         /*
72          * The number of additional cores that could be processing
73          * input packtes.
74          */
75         atomic_t available_cores;
76         cpumask_t cpu_state;
77 } ____cacheline_aligned_in_smp;
78
79 static struct cvm_oct_core_state core_state __cacheline_aligned_in_smp;
80
81 static void cvm_oct_enable_napi(void *_)
82 {
83         int cpu = smp_processor_id();
84         napi_schedule(&cvm_oct_napi[cpu].napi);
85 }
86
87 static void cvm_oct_enable_one_cpu(void)
88 {
89         int v;
90         int cpu;
91
92         /* Check to see if more CPUs are available for receive processing... */
93         v = atomic_sub_if_positive(1, &core_state.available_cores);
94         if (v < 0)
95                 return;
96
97         /* ... if a CPU is available, Turn on NAPI polling for that CPU.  */
98         for_each_online_cpu(cpu) {
99                 if (!cpu_test_and_set(cpu, core_state.cpu_state)) {
100                         v = smp_call_function_single(cpu, cvm_oct_enable_napi,
101                                                      NULL, 0);
102                         if (v)
103                                 panic("Can't enable NAPI.");
104                         break;
105                 }
106         }
107 }
108
109 static void cvm_oct_no_more_work(void)
110 {
111         int cpu = smp_processor_id();
112
113         /*
114          * CPU zero is special.  It always has the irq enabled when
115          * waiting for incoming packets.
116          */
117         if (cpu == 0) {
118                 enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group);
119                 return;
120         }
121
122         cpu_clear(cpu, core_state.cpu_state);
123         atomic_add(1, &core_state.available_cores);
124 }
125
126 /**
127  * Interrupt handler. The interrupt occurs whenever the POW
128  * has packets in our group.
129  *
130  */
131 static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id)
132 {
133         /* Disable the IRQ and start napi_poll. */
134         disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
135         cvm_oct_enable_napi(NULL);
136
137         return IRQ_HANDLED;
138 }
139
140 /**
141  * This is called on receive errors, and determines if the packet
142  * can be dropped early-on in cvm_oct_tasklet_rx().
143  *
144  * @work: Work queue entry pointing to the packet.
145  * Returns Non-zero if the packet can be dropped, zero otherwise.
146  */
147 static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
148 {
149         if ((work->word2.snoip.err_code == 10) && (work->len <= 64)) {
150                 /*
151                  * Ignore length errors on min size packets. Some
152                  * equipment incorrectly pads packets to 64+4FCS
153                  * instead of 60+4FCS.  Note these packets still get
154                  * counted as frame errors.
155                  */
156         } else
157             if (USE_10MBPS_PREAMBLE_WORKAROUND
158                 && ((work->word2.snoip.err_code == 5)
159                     || (work->word2.snoip.err_code == 7))) {
160
161                 /*
162                  * We received a packet with either an alignment error
163                  * or a FCS error. This may be signalling that we are
164                  * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK}
165                  * off. If this is the case we need to parse the
166                  * packet to determine if we can remove a non spec
167                  * preamble and generate a correct packet.
168                  */
169                 int interface = cvmx_helper_get_interface_num(work->ipprt);
170                 int index = cvmx_helper_get_interface_index_num(work->ipprt);
171                 union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
172                 gmxx_rxx_frm_ctl.u64 =
173                     cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
174                 if (gmxx_rxx_frm_ctl.s.pre_chk == 0) {
175
176                         uint8_t *ptr =
177                             cvmx_phys_to_ptr(work->packet_ptr.s.addr);
178                         int i = 0;
179
180                         while (i < work->len - 1) {
181                                 if (*ptr != 0x55)
182                                         break;
183                                 ptr++;
184                                 i++;
185                         }
186
187                         if (*ptr == 0xd5) {
188                                 /*
189                                    DEBUGPRINT("Port %d received 0xd5 preamble\n", work->ipprt);
190                                  */
191                                 work->packet_ptr.s.addr += i + 1;
192                                 work->len -= i + 5;
193                         } else if ((*ptr & 0xf) == 0xd) {
194                                 /*
195                                    DEBUGPRINT("Port %d received 0x?d preamble\n", work->ipprt);
196                                  */
197                                 work->packet_ptr.s.addr += i;
198                                 work->len -= i + 4;
199                                 for (i = 0; i < work->len; i++) {
200                                         *ptr =
201                                             ((*ptr & 0xf0) >> 4) |
202                                             ((*(ptr + 1) & 0xf) << 4);
203                                         ptr++;
204                                 }
205                         } else {
206                                 DEBUGPRINT("Port %d unknown preamble, packet "
207                                            "dropped\n",
208                                      work->ipprt);
209                                 /*
210                                    cvmx_helper_dump_packet(work);
211                                  */
212                                 cvm_oct_free_work(work);
213                                 return 1;
214                         }
215                 }
216         } else {
217                 DEBUGPRINT("Port %d receive error code %d, packet dropped\n",
218                            work->ipprt, work->word2.snoip.err_code);
219                 cvm_oct_free_work(work);
220                 return 1;
221         }
222
223         return 0;
224 }
225
226 /**
227  * The NAPI poll function.
228  *
229  * @napi: The NAPI instance, or null if called from cvm_oct_poll_controller
230  * @budget: Maximum number of packets to receive.
231  */
232 static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
233 {
234         const int       coreid = cvmx_get_core_num();
235         uint64_t        old_group_mask;
236         uint64_t        old_scratch;
237         int             rx_count = 0;
238         int             did_work_request = 0;
239         int             packet_not_copied;
240
241         /* Prefetch cvm_oct_device since we know we need it soon */
242         prefetch(cvm_oct_device);
243
244         if (USE_ASYNC_IOBDMA) {
245                 /* Save scratch in case userspace is using it */
246                 CVMX_SYNCIOBDMA;
247                 old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
248         }
249
250         /* Only allow work for our group (and preserve priorities) */
251         old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
252         cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
253                        (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group);
254
255         if (USE_ASYNC_IOBDMA) {
256                 cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
257                 did_work_request = 1;
258         }
259
260         while (rx_count < budget) {
261                 struct sk_buff *skb = NULL;
262                 struct sk_buff **pskb = NULL;
263                 int skb_in_hw;
264                 cvmx_wqe_t *work;
265
266                 if (USE_ASYNC_IOBDMA && did_work_request)
267                         work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
268                 else
269                         work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
270
271                 prefetch(work);
272                 did_work_request = 0;
273                 if (work == NULL) {
274                         union cvmx_pow_wq_int wq_int;
275                         wq_int.u64 = 0;
276                         wq_int.s.iq_dis = 1 << pow_receive_group;
277                         wq_int.s.wq_int = 1 << pow_receive_group;
278                         cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
279                         break;
280                 }
281                 pskb = (struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) - sizeof(void *));
282                 prefetch(pskb);
283
284                 if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) {
285                         cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
286                         did_work_request = 1;
287                 }
288
289                 if (rx_count == 0) {
290                         /*
291                          * First time through, see if there is enough
292                          * work waiting to merit waking another
293                          * CPU.
294                          */
295                         union cvmx_pow_wq_int_cntx counts;
296                         int backlog;
297                         int cores_in_use = core_state.baseline_cores - atomic_read(&core_state.available_cores);
298                         counts.u64 = cvmx_read_csr(CVMX_POW_WQ_INT_CNTX(pow_receive_group));
299                         backlog = counts.s.iq_cnt + counts.s.ds_cnt;
300                         if (backlog > budget * cores_in_use && napi != NULL)
301                                 cvm_oct_enable_one_cpu();
302                 }
303
304                 skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1;
305                 if (likely(skb_in_hw)) {
306                         skb = *pskb;
307                         prefetch(&skb->head);
308                         prefetch(&skb->len);
309                 }
310                 prefetch(cvm_oct_device[work->ipprt]);
311
312                 /* Immediately throw away all packets with receive errors */
313                 if (unlikely(work->word2.snoip.rcv_error)) {
314                         if (cvm_oct_check_rcv_error(work))
315                                 continue;
316                 }
317
318                 /*
319                  * We can only use the zero copy path if skbuffs are
320                  * in the FPA pool and the packet fits in a single
321                  * buffer.
322                  */
323                 if (likely(skb_in_hw)) {
324                         skb->data = skb->head + work->packet_ptr.s.addr - cvmx_ptr_to_phys(skb->head);
325                         prefetch(skb->data);
326                         skb->len = work->len;
327                         skb_set_tail_pointer(skb, skb->len);
328                         packet_not_copied = 1;
329                 } else {
330                         /*
331                          * We have to copy the packet. First allocate
332                          * an skbuff for it.
333                          */
334                         skb = dev_alloc_skb(work->len);
335                         if (!skb) {
336                                 DEBUGPRINT("Port %d failed to allocate skbuff, packet dropped\n",
337                                            work->ipprt);
338                                 cvm_oct_free_work(work);
339                                 continue;
340                         }
341
342                         /*
343                          * Check if we've received a packet that was
344                          * entirely stored in the work entry.
345                          */
346                         if (unlikely(work->word2.s.bufs == 0)) {
347                                 uint8_t *ptr = work->packet_data;
348
349                                 if (likely(!work->word2.s.not_IP)) {
350                                         /*
351                                          * The beginning of the packet
352                                          * moves for IP packets.
353                                          */
354                                         if (work->word2.s.is_v6)
355                                                 ptr += 2;
356                                         else
357                                                 ptr += 6;
358                                 }
359                                 memcpy(skb_put(skb, work->len), ptr, work->len);
360                                 /* No packet buffers to free */
361                         } else {
362                                 int segments = work->word2.s.bufs;
363                                 union cvmx_buf_ptr segment_ptr = work->packet_ptr;
364                                 int len = work->len;
365
366                                 while (segments--) {
367                                         union cvmx_buf_ptr next_ptr =
368                                             *(union cvmx_buf_ptr *)cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
369
370                         /*
371                          * Octeon Errata PKI-100: The segment size is
372                          * wrong. Until it is fixed, calculate the
373                          * segment size based on the packet pool
374                          * buffer size. When it is fixed, the
375                          * following line should be replaced with this
376                          * one: int segment_size =
377                          * segment_ptr.s.size;
378                          */
379                                         int segment_size = CVMX_FPA_PACKET_POOL_SIZE -
380                                                 (segment_ptr.s.addr - (((segment_ptr.s.addr >> 7) - segment_ptr.s.back) << 7));
381                                         /*
382                                          * Don't copy more than what
383                                          * is left in the packet.
384                                          */
385                                         if (segment_size > len)
386                                                 segment_size = len;
387                                         /* Copy the data into the packet */
388                                         memcpy(skb_put(skb, segment_size),
389                                                cvmx_phys_to_ptr(segment_ptr.s.addr),
390                                                segment_size);
391                                         len -= segment_size;
392                                         segment_ptr = next_ptr;
393                                 }
394                         }
395                         packet_not_copied = 0;
396                 }
397
398                 if (likely((work->ipprt < TOTAL_NUMBER_OF_PORTS) &&
399                            cvm_oct_device[work->ipprt])) {
400                         struct net_device *dev = cvm_oct_device[work->ipprt];
401                         struct octeon_ethernet *priv = netdev_priv(dev);
402
403                         /*
404                          * Only accept packets for devices that are
405                          * currently up.
406                          */
407                         if (likely(dev->flags & IFF_UP)) {
408                                 skb->protocol = eth_type_trans(skb, dev);
409                                 skb->dev = dev;
410
411                                 if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc || work->word2.s.L4_error))
412                                         skb->ip_summed = CHECKSUM_NONE;
413                                 else
414                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
415
416                                 /* Increment RX stats for virtual ports */
417                                 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
418 #ifdef CONFIG_64BIT
419                                         atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
420                                         atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
421 #else
422                                         atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
423                                         atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
424 #endif
425                                 }
426                                 netif_receive_skb(skb);
427                                 rx_count++;
428                         } else {
429                                 /* Drop any packet received for a device that isn't up */
430                                 /*
431                                 DEBUGPRINT("%s: Device not up, packet dropped\n",
432                                            dev->name);
433                                 */
434 #ifdef CONFIG_64BIT
435                                 atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
436 #else
437                                 atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
438 #endif
439                                 dev_kfree_skb_irq(skb);
440                         }
441                 } else {
442                         /*
443                          * Drop any packet received for a device that
444                          * doesn't exist.
445                          */
446                         DEBUGPRINT("Port %d not controlled by Linux, packet dropped\n",
447                                    work->ipprt);
448                         dev_kfree_skb_irq(skb);
449                 }
450                 /*
451                  * Check to see if the skbuff and work share the same
452                  * packet buffer.
453                  */
454                 if (USE_SKBUFFS_IN_HW && likely(packet_not_copied)) {
455                         /*
456                          * This buffer needs to be replaced, increment
457                          * the number of buffers we need to free by
458                          * one.
459                          */
460                         cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
461                                               1);
462
463                         cvmx_fpa_free(work, CVMX_FPA_WQE_POOL,
464                                       DONT_WRITEBACK(1));
465                 } else {
466                         cvm_oct_free_work(work);
467                 }
468         }
469         /* Restore the original POW group mask */
470         cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
471         if (USE_ASYNC_IOBDMA) {
472                 /* Restore the scratch area */
473                 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
474         }
475         cvm_oct_rx_refill_pool(0);
476
477         if (rx_count < budget && napi != NULL) {
478                 /* No more work */
479                 napi_complete(napi);
480                 cvm_oct_no_more_work();
481         }
482         return rx_count;
483 }
484
485 #ifdef CONFIG_NET_POLL_CONTROLLER
486 /**
487  * This is called when the kernel needs to manually poll the
488  * device.
489  *
490  * @dev:    Device to poll. Unused
491  */
492 void cvm_oct_poll_controller(struct net_device *dev)
493 {
494         cvm_oct_napi_poll(NULL, 16);
495 }
496 #endif
497
498 void cvm_oct_rx_initialize(void)
499 {
500         int i;
501         struct net_device *dev_for_napi = NULL;
502         union cvmx_pow_wq_int_thrx int_thr;
503         union cvmx_pow_wq_int_pc int_pc;
504
505         for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) {
506                 if (cvm_oct_device[i]) {
507                         dev_for_napi = cvm_oct_device[i];
508                         break;
509                 }
510         }
511
512         if (NULL == dev_for_napi)
513                 panic("No net_devices were allocated.");
514
515         if (max_rx_cpus > 1  && max_rx_cpus < num_online_cpus())
516                 atomic_set(&core_state.available_cores, max_rx_cpus);
517         else
518                 atomic_set(&core_state.available_cores, num_online_cpus());
519         core_state.baseline_cores = atomic_read(&core_state.available_cores);
520
521         core_state.cpu_state = CPU_MASK_NONE;
522         for_each_possible_cpu(i) {
523                 netif_napi_add(dev_for_napi, &cvm_oct_napi[i].napi,
524                                cvm_oct_napi_poll, rx_napi_weight);
525                 napi_enable(&cvm_oct_napi[i].napi);
526         }
527         /* Register an IRQ hander for to receive POW interrupts */
528         i = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
529                         cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_device);
530
531         if (i)
532                 panic("Could not acquire Ethernet IRQ %d\n",
533                       OCTEON_IRQ_WORKQ0 + pow_receive_group);
534
535         disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
536
537         int_thr.u64 = 0;
538         int_thr.s.tc_en = 1;
539         int_thr.s.tc_thr = 1;
540         /* Enable POW interrupt when our port has at least one packet */
541         cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), int_thr.u64);
542
543         int_pc.u64 = 0;
544         int_pc.s.pc_thr = 5;
545         cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
546
547
548         /* Scheduld NAPI now.  This will indirectly enable interrupts. */
549         cvm_oct_enable_one_cpu();
550 }
551
552 void cvm_oct_rx_shutdown(void)
553 {
554         int i;
555         /* Shutdown all of the NAPIs */
556         for_each_possible_cpu(i)
557                 netif_napi_del(&cvm_oct_napi[i].napi);
558 }