sfc: Remove 'Solarstorm' from Kconfig descriptions
[pandora-kernel.git] / drivers / net / gianfar_ethtool.c
1 /*
2  *  drivers/net/gianfar_ethtool.c
3  *
4  *  Gianfar Ethernet Driver
5  *  Ethtool support for Gianfar Enet
6  *  Based on e1000 ethtool support
7  *
8  *  Author: Andy Fleming
9  *  Maintainer: Kumar Gala
10  *  Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
11  *
12  *  Copyright 2003-2006, 2008-2009, 2011 Freescale Semiconductor, Inc.
13  *
14  *  This software may be used and distributed according to
15  *  the terms of the GNU Public License, Version 2, incorporated herein
16  *  by reference.
17  */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/kernel.h>
22 #include <linux/string.h>
23 #include <linux/errno.h>
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/skbuff.h>
30 #include <linux/spinlock.h>
31 #include <linux/mm.h>
32
33 #include <asm/io.h>
34 #include <asm/irq.h>
35 #include <asm/uaccess.h>
36 #include <linux/module.h>
37 #include <linux/crc32.h>
38 #include <asm/types.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/phy.h>
42 #include <linux/sort.h>
43
44 #include "gianfar.h"
45
46 extern void gfar_start(struct net_device *dev);
47 extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
48
49 #define GFAR_MAX_COAL_USECS 0xffff
50 #define GFAR_MAX_COAL_FRAMES 0xff
51 static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
52                      u64 * buf);
53 static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf);
54 static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals);
55 static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals);
56 static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals);
57 static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals);
58 static void gfar_gdrvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo);
59
60 static char stat_gstrings[][ETH_GSTRING_LEN] = {
61         "rx-dropped-by-kernel",
62         "rx-large-frame-errors",
63         "rx-short-frame-errors",
64         "rx-non-octet-errors",
65         "rx-crc-errors",
66         "rx-overrun-errors",
67         "rx-busy-errors",
68         "rx-babbling-errors",
69         "rx-truncated-frames",
70         "ethernet-bus-error",
71         "tx-babbling-errors",
72         "tx-underrun-errors",
73         "rx-skb-missing-errors",
74         "tx-timeout-errors",
75         "tx-rx-64-frames",
76         "tx-rx-65-127-frames",
77         "tx-rx-128-255-frames",
78         "tx-rx-256-511-frames",
79         "tx-rx-512-1023-frames",
80         "tx-rx-1024-1518-frames",
81         "tx-rx-1519-1522-good-vlan",
82         "rx-bytes",
83         "rx-packets",
84         "rx-fcs-errors",
85         "receive-multicast-packet",
86         "receive-broadcast-packet",
87         "rx-control-frame-packets",
88         "rx-pause-frame-packets",
89         "rx-unknown-op-code",
90         "rx-alignment-error",
91         "rx-frame-length-error",
92         "rx-code-error",
93         "rx-carrier-sense-error",
94         "rx-undersize-packets",
95         "rx-oversize-packets",
96         "rx-fragmented-frames",
97         "rx-jabber-frames",
98         "rx-dropped-frames",
99         "tx-byte-counter",
100         "tx-packets",
101         "tx-multicast-packets",
102         "tx-broadcast-packets",
103         "tx-pause-control-frames",
104         "tx-deferral-packets",
105         "tx-excessive-deferral-packets",
106         "tx-single-collision-packets",
107         "tx-multiple-collision-packets",
108         "tx-late-collision-packets",
109         "tx-excessive-collision-packets",
110         "tx-total-collision",
111         "reserved",
112         "tx-dropped-frames",
113         "tx-jabber-frames",
114         "tx-fcs-errors",
115         "tx-control-frames",
116         "tx-oversize-frames",
117         "tx-undersize-frames",
118         "tx-fragmented-frames",
119 };
120
121 /* Fill in a buffer with the strings which correspond to the
122  * stats */
123 static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
124 {
125         struct gfar_private *priv = netdev_priv(dev);
126
127         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
128                 memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN);
129         else
130                 memcpy(buf, stat_gstrings,
131                                 GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN);
132 }
133
134 /* Fill in an array of 64-bit statistics from various sources.
135  * This array will be appended to the end of the ethtool_stats
136  * structure, and returned to user space
137  */
138 static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 * buf)
139 {
140         int i;
141         struct gfar_private *priv = netdev_priv(dev);
142         struct gfar __iomem *regs = priv->gfargrp[0].regs;
143         u64 *extra = (u64 *) & priv->extra_stats;
144
145         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
146                 u32 __iomem *rmon = (u32 __iomem *) &regs->rmon;
147                 struct gfar_stats *stats = (struct gfar_stats *) buf;
148
149                 for (i = 0; i < GFAR_RMON_LEN; i++)
150                         stats->rmon[i] = (u64) gfar_read(&rmon[i]);
151
152                 for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
153                         stats->extra[i] = extra[i];
154         } else
155                 for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
156                         buf[i] = extra[i];
157 }
158
159 static int gfar_sset_count(struct net_device *dev, int sset)
160 {
161         struct gfar_private *priv = netdev_priv(dev);
162
163         switch (sset) {
164         case ETH_SS_STATS:
165                 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
166                         return GFAR_STATS_LEN;
167                 else
168                         return GFAR_EXTRA_STATS_LEN;
169         default:
170                 return -EOPNOTSUPP;
171         }
172 }
173
174 /* Fills in the drvinfo structure with some basic info */
175 static void gfar_gdrvinfo(struct net_device *dev, struct
176               ethtool_drvinfo *drvinfo)
177 {
178         strncpy(drvinfo->driver, DRV_NAME, GFAR_INFOSTR_LEN);
179         strncpy(drvinfo->version, gfar_driver_version, GFAR_INFOSTR_LEN);
180         strncpy(drvinfo->fw_version, "N/A", GFAR_INFOSTR_LEN);
181         strncpy(drvinfo->bus_info, "N/A", GFAR_INFOSTR_LEN);
182         drvinfo->regdump_len = 0;
183         drvinfo->eedump_len = 0;
184 }
185
186
187 static int gfar_ssettings(struct net_device *dev, struct ethtool_cmd *cmd)
188 {
189         struct gfar_private *priv = netdev_priv(dev);
190         struct phy_device *phydev = priv->phydev;
191
192         if (NULL == phydev)
193                 return -ENODEV;
194
195         return phy_ethtool_sset(phydev, cmd);
196 }
197
198
199 /* Return the current settings in the ethtool_cmd structure */
200 static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
201 {
202         struct gfar_private *priv = netdev_priv(dev);
203         struct phy_device *phydev = priv->phydev;
204         struct gfar_priv_rx_q *rx_queue = NULL;
205         struct gfar_priv_tx_q *tx_queue = NULL;
206
207         if (NULL == phydev)
208                 return -ENODEV;
209         tx_queue = priv->tx_queue[0];
210         rx_queue = priv->rx_queue[0];
211
212         /* etsec-1.7 and older versions have only one txic
213          * and rxic regs although they support multiple queues */
214         cmd->maxtxpkt = get_icft_value(tx_queue->txic);
215         cmd->maxrxpkt = get_icft_value(rx_queue->rxic);
216
217         return phy_ethtool_gset(phydev, cmd);
218 }
219
220 /* Return the length of the register structure */
221 static int gfar_reglen(struct net_device *dev)
222 {
223         return sizeof (struct gfar);
224 }
225
226 /* Return a dump of the GFAR register space */
227 static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf)
228 {
229         int i;
230         struct gfar_private *priv = netdev_priv(dev);
231         u32 __iomem *theregs = (u32 __iomem *) priv->gfargrp[0].regs;
232         u32 *buf = (u32 *) regbuf;
233
234         for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++)
235                 buf[i] = gfar_read(&theregs[i]);
236 }
237
238 /* Convert microseconds to ethernet clock ticks, which changes
239  * depending on what speed the controller is running at */
240 static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int usecs)
241 {
242         unsigned int count;
243
244         /* The timer is different, depending on the interface speed */
245         switch (priv->phydev->speed) {
246         case SPEED_1000:
247                 count = GFAR_GBIT_TIME;
248                 break;
249         case SPEED_100:
250                 count = GFAR_100_TIME;
251                 break;
252         case SPEED_10:
253         default:
254                 count = GFAR_10_TIME;
255                 break;
256         }
257
258         /* Make sure we return a number greater than 0
259          * if usecs > 0 */
260         return (usecs * 1000 + count - 1) / count;
261 }
262
263 /* Convert ethernet clock ticks to microseconds */
264 static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int ticks)
265 {
266         unsigned int count;
267
268         /* The timer is different, depending on the interface speed */
269         switch (priv->phydev->speed) {
270         case SPEED_1000:
271                 count = GFAR_GBIT_TIME;
272                 break;
273         case SPEED_100:
274                 count = GFAR_100_TIME;
275                 break;
276         case SPEED_10:
277         default:
278                 count = GFAR_10_TIME;
279                 break;
280         }
281
282         /* Make sure we return a number greater than 0 */
283         /* if ticks is > 0 */
284         return (ticks * count) / 1000;
285 }
286
287 /* Get the coalescing parameters, and put them in the cvals
288  * structure.  */
289 static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
290 {
291         struct gfar_private *priv = netdev_priv(dev);
292         struct gfar_priv_rx_q *rx_queue = NULL;
293         struct gfar_priv_tx_q *tx_queue = NULL;
294         unsigned long rxtime;
295         unsigned long rxcount;
296         unsigned long txtime;
297         unsigned long txcount;
298
299         if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
300                 return -EOPNOTSUPP;
301
302         if (NULL == priv->phydev)
303                 return -ENODEV;
304
305         rx_queue = priv->rx_queue[0];
306         tx_queue = priv->tx_queue[0];
307
308         rxtime  = get_ictt_value(rx_queue->rxic);
309         rxcount = get_icft_value(rx_queue->rxic);
310         txtime  = get_ictt_value(tx_queue->txic);
311         txcount = get_icft_value(tx_queue->txic);
312         cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime);
313         cvals->rx_max_coalesced_frames = rxcount;
314
315         cvals->tx_coalesce_usecs = gfar_ticks2usecs(priv, txtime);
316         cvals->tx_max_coalesced_frames = txcount;
317
318         cvals->use_adaptive_rx_coalesce = 0;
319         cvals->use_adaptive_tx_coalesce = 0;
320
321         cvals->pkt_rate_low = 0;
322         cvals->rx_coalesce_usecs_low = 0;
323         cvals->rx_max_coalesced_frames_low = 0;
324         cvals->tx_coalesce_usecs_low = 0;
325         cvals->tx_max_coalesced_frames_low = 0;
326
327         /* When the packet rate is below pkt_rate_high but above
328          * pkt_rate_low (both measured in packets per second) the
329          * normal {rx,tx}_* coalescing parameters are used.
330          */
331
332         /* When the packet rate is (measured in packets per second)
333          * is above pkt_rate_high, the {rx,tx}_*_high parameters are
334          * used.
335          */
336         cvals->pkt_rate_high = 0;
337         cvals->rx_coalesce_usecs_high = 0;
338         cvals->rx_max_coalesced_frames_high = 0;
339         cvals->tx_coalesce_usecs_high = 0;
340         cvals->tx_max_coalesced_frames_high = 0;
341
342         /* How often to do adaptive coalescing packet rate sampling,
343          * measured in seconds.  Must not be zero.
344          */
345         cvals->rate_sample_interval = 0;
346
347         return 0;
348 }
349
350 /* Change the coalescing values.
351  * Both cvals->*_usecs and cvals->*_frames have to be > 0
352  * in order for coalescing to be active
353  */
354 static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
355 {
356         struct gfar_private *priv = netdev_priv(dev);
357         int i = 0;
358
359         if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
360                 return -EOPNOTSUPP;
361
362         /* Set up rx coalescing */
363         /* As of now, we will enable/disable coalescing for all
364          * queues together in case of eTSEC2, this will be modified
365          * along with the ethtool interface */
366         if ((cvals->rx_coalesce_usecs == 0) ||
367             (cvals->rx_max_coalesced_frames == 0)) {
368                 for (i = 0; i < priv->num_rx_queues; i++)
369                         priv->rx_queue[i]->rxcoalescing = 0;
370         } else {
371                 for (i = 0; i < priv->num_rx_queues; i++)
372                         priv->rx_queue[i]->rxcoalescing = 1;
373         }
374
375         if (NULL == priv->phydev)
376                 return -ENODEV;
377
378         /* Check the bounds of the values */
379         if (cvals->rx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
380                 pr_info("Coalescing is limited to %d microseconds\n",
381                         GFAR_MAX_COAL_USECS);
382                 return -EINVAL;
383         }
384
385         if (cvals->rx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
386                 pr_info("Coalescing is limited to %d frames\n",
387                         GFAR_MAX_COAL_FRAMES);
388                 return -EINVAL;
389         }
390
391         for (i = 0; i < priv->num_rx_queues; i++) {
392                 priv->rx_queue[i]->rxic = mk_ic_value(
393                         cvals->rx_max_coalesced_frames,
394                         gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs));
395         }
396
397         /* Set up tx coalescing */
398         if ((cvals->tx_coalesce_usecs == 0) ||
399             (cvals->tx_max_coalesced_frames == 0)) {
400                 for (i = 0; i < priv->num_tx_queues; i++)
401                         priv->tx_queue[i]->txcoalescing = 0;
402         } else {
403                 for (i = 0; i < priv->num_tx_queues; i++)
404                         priv->tx_queue[i]->txcoalescing = 1;
405         }
406
407         /* Check the bounds of the values */
408         if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
409                 pr_info("Coalescing is limited to %d microseconds\n",
410                         GFAR_MAX_COAL_USECS);
411                 return -EINVAL;
412         }
413
414         if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
415                 pr_info("Coalescing is limited to %d frames\n",
416                         GFAR_MAX_COAL_FRAMES);
417                 return -EINVAL;
418         }
419
420         for (i = 0; i < priv->num_tx_queues; i++) {
421                 priv->tx_queue[i]->txic = mk_ic_value(
422                         cvals->tx_max_coalesced_frames,
423                         gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
424         }
425
426         gfar_configure_coalescing(priv, 0xFF, 0xFF);
427
428         return 0;
429 }
430
431 /* Fills in rvals with the current ring parameters.  Currently,
432  * rx, rx_mini, and rx_jumbo rings are the same size, as mini and
433  * jumbo are ignored by the driver */
434 static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
435 {
436         struct gfar_private *priv = netdev_priv(dev);
437         struct gfar_priv_tx_q *tx_queue = NULL;
438         struct gfar_priv_rx_q *rx_queue = NULL;
439
440         tx_queue = priv->tx_queue[0];
441         rx_queue = priv->rx_queue[0];
442
443         rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE;
444         rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE;
445         rvals->rx_jumbo_max_pending = GFAR_RX_MAX_RING_SIZE;
446         rvals->tx_max_pending = GFAR_TX_MAX_RING_SIZE;
447
448         /* Values changeable by the user.  The valid values are
449          * in the range 1 to the "*_max_pending" counterpart above.
450          */
451         rvals->rx_pending = rx_queue->rx_ring_size;
452         rvals->rx_mini_pending = rx_queue->rx_ring_size;
453         rvals->rx_jumbo_pending = rx_queue->rx_ring_size;
454         rvals->tx_pending = tx_queue->tx_ring_size;
455 }
456
457 /* Change the current ring parameters, stopping the controller if
458  * necessary so that we don't mess things up while we're in
459  * motion.  We wait for the ring to be clean before reallocating
460  * the rings. */
461 static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
462 {
463         struct gfar_private *priv = netdev_priv(dev);
464         int err = 0, i = 0;
465
466         if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
467                 return -EINVAL;
468
469         if (!is_power_of_2(rvals->rx_pending)) {
470                 netdev_err(dev, "Ring sizes must be a power of 2\n");
471                 return -EINVAL;
472         }
473
474         if (rvals->tx_pending > GFAR_TX_MAX_RING_SIZE)
475                 return -EINVAL;
476
477         if (!is_power_of_2(rvals->tx_pending)) {
478                 netdev_err(dev, "Ring sizes must be a power of 2\n");
479                 return -EINVAL;
480         }
481
482
483         if (dev->flags & IFF_UP) {
484                 unsigned long flags;
485
486                 /* Halt TX and RX, and process the frames which
487                  * have already been received */
488                 local_irq_save(flags);
489                 lock_tx_qs(priv);
490                 lock_rx_qs(priv);
491
492                 gfar_halt(dev);
493
494                 unlock_rx_qs(priv);
495                 unlock_tx_qs(priv);
496                 local_irq_restore(flags);
497
498                 for (i = 0; i < priv->num_rx_queues; i++)
499                         gfar_clean_rx_ring(priv->rx_queue[i],
500                                         priv->rx_queue[i]->rx_ring_size);
501
502                 /* Now we take down the rings to rebuild them */
503                 stop_gfar(dev);
504         }
505
506         /* Change the size */
507         for (i = 0; i < priv->num_rx_queues; i++) {
508                 priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
509                 priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
510                 priv->tx_queue[i]->num_txbdfree = priv->tx_queue[i]->tx_ring_size;
511         }
512
513         /* Rebuild the rings with the new size */
514         if (dev->flags & IFF_UP) {
515                 err = startup_gfar(dev);
516                 netif_tx_wake_all_queues(dev);
517         }
518         return err;
519 }
520
521 int gfar_set_features(struct net_device *dev, u32 features)
522 {
523         struct gfar_private *priv = netdev_priv(dev);
524         unsigned long flags;
525         int err = 0, i = 0;
526         u32 changed = dev->features ^ features;
527
528         if (!(changed & NETIF_F_RXCSUM))
529                 return 0;
530
531         if (dev->flags & IFF_UP) {
532                 /* Halt TX and RX, and process the frames which
533                  * have already been received */
534                 local_irq_save(flags);
535                 lock_tx_qs(priv);
536                 lock_rx_qs(priv);
537
538                 gfar_halt(dev);
539
540                 unlock_tx_qs(priv);
541                 unlock_rx_qs(priv);
542                 local_irq_restore(flags);
543
544                 for (i = 0; i < priv->num_rx_queues; i++)
545                         gfar_clean_rx_ring(priv->rx_queue[i],
546                                         priv->rx_queue[i]->rx_ring_size);
547
548                 /* Now we take down the rings to rebuild them */
549                 stop_gfar(dev);
550
551                 dev->features = features;
552
553                 err = startup_gfar(dev);
554                 netif_tx_wake_all_queues(dev);
555         }
556         return err;
557 }
558
559 static uint32_t gfar_get_msglevel(struct net_device *dev)
560 {
561         struct gfar_private *priv = netdev_priv(dev);
562         return priv->msg_enable;
563 }
564
565 static void gfar_set_msglevel(struct net_device *dev, uint32_t data)
566 {
567         struct gfar_private *priv = netdev_priv(dev);
568         priv->msg_enable = data;
569 }
570
571 #ifdef CONFIG_PM
572 static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
573 {
574         struct gfar_private *priv = netdev_priv(dev);
575
576         if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) {
577                 wol->supported = WAKE_MAGIC;
578                 wol->wolopts = priv->wol_en ? WAKE_MAGIC : 0;
579         } else {
580                 wol->supported = wol->wolopts = 0;
581         }
582 }
583
584 static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
585 {
586         struct gfar_private *priv = netdev_priv(dev);
587         unsigned long flags;
588
589         if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
590             wol->wolopts != 0)
591                 return -EINVAL;
592
593         if (wol->wolopts & ~WAKE_MAGIC)
594                 return -EINVAL;
595
596         device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
597
598         spin_lock_irqsave(&priv->bflock, flags);
599         priv->wol_en =  !!device_may_wakeup(&dev->dev);
600         spin_unlock_irqrestore(&priv->bflock, flags);
601
602         return 0;
603 }
604 #endif
605
606 static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
607 {
608         u32 fcr = 0x0, fpr = FPR_FILER_MASK;
609
610         if (ethflow & RXH_L2DA) {
611                 fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
612                         RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
613                 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
614                 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
615                 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
616                 priv->cur_filer_idx = priv->cur_filer_idx - 1;
617
618                 fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
619                                 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
620                 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
621                 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
622                 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
623                 priv->cur_filer_idx = priv->cur_filer_idx - 1;
624         }
625
626         if (ethflow & RXH_VLAN) {
627                 fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
628                                 RQFCR_AND | RQFCR_HASHTBL_0;
629                 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
630                 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
631                 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
632                 priv->cur_filer_idx = priv->cur_filer_idx - 1;
633         }
634
635         if (ethflow & RXH_IP_SRC) {
636                 fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
637                         RQFCR_AND | RQFCR_HASHTBL_0;
638                 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
639                 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
640                 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
641                 priv->cur_filer_idx = priv->cur_filer_idx - 1;
642         }
643
644         if (ethflow & (RXH_IP_DST)) {
645                 fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
646                         RQFCR_AND | RQFCR_HASHTBL_0;
647                 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
648                 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
649                 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
650                 priv->cur_filer_idx = priv->cur_filer_idx - 1;
651         }
652
653         if (ethflow & RXH_L3_PROTO) {
654                 fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
655                         RQFCR_AND | RQFCR_HASHTBL_0;
656                 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
657                 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
658                 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
659                 priv->cur_filer_idx = priv->cur_filer_idx - 1;
660         }
661
662         if (ethflow & RXH_L4_B_0_1) {
663                 fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
664                         RQFCR_AND | RQFCR_HASHTBL_0;
665                 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
666                 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
667                 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
668                 priv->cur_filer_idx = priv->cur_filer_idx - 1;
669         }
670
671         if (ethflow & RXH_L4_B_2_3) {
672                 fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
673                         RQFCR_AND | RQFCR_HASHTBL_0;
674                 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
675                 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
676                 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
677                 priv->cur_filer_idx = priv->cur_filer_idx - 1;
678         }
679 }
680
681 static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u64 class)
682 {
683         unsigned int last_rule_idx = priv->cur_filer_idx;
684         unsigned int cmp_rqfpr;
685         unsigned int local_rqfpr[MAX_FILER_IDX + 1];
686         unsigned int local_rqfcr[MAX_FILER_IDX + 1];
687         int i = 0x0, k = 0x0;
688         int j = MAX_FILER_IDX, l = 0x0;
689
690         switch (class) {
691         case TCP_V4_FLOW:
692                 cmp_rqfpr = RQFPR_IPV4 |RQFPR_TCP;
693                 break;
694         case UDP_V4_FLOW:
695                 cmp_rqfpr = RQFPR_IPV4 |RQFPR_UDP;
696                 break;
697         case TCP_V6_FLOW:
698                 cmp_rqfpr = RQFPR_IPV6 |RQFPR_TCP;
699                 break;
700         case UDP_V6_FLOW:
701                 cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP;
702                 break;
703         default:
704                 pr_err("Right now this class is not supported\n");
705                 return 0;
706         }
707
708         for (i = 0; i < MAX_FILER_IDX + 1; i++) {
709                 local_rqfpr[j] = priv->ftp_rqfpr[i];
710                 local_rqfcr[j] = priv->ftp_rqfcr[i];
711                 j--;
712                 if ((priv->ftp_rqfcr[i] == (RQFCR_PID_PARSE |
713                         RQFCR_CLE |RQFCR_AND)) &&
714                         (priv->ftp_rqfpr[i] == cmp_rqfpr))
715                         break;
716         }
717
718         if (i == MAX_FILER_IDX + 1) {
719                 pr_err("No parse rule found, can't create hash rules\n");
720                 return 0;
721         }
722
723         /* If a match was found, then it begins the starting of a cluster rule
724          * if it was already programmed, we need to overwrite these rules
725          */
726         for (l = i+1; l < MAX_FILER_IDX; l++) {
727                 if ((priv->ftp_rqfcr[l] & RQFCR_CLE) &&
728                         !(priv->ftp_rqfcr[l] & RQFCR_AND)) {
729                         priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
730                                 RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
731                         priv->ftp_rqfpr[l] = FPR_FILER_MASK;
732                         gfar_write_filer(priv, l, priv->ftp_rqfcr[l],
733                                 priv->ftp_rqfpr[l]);
734                         break;
735                 }
736
737                 if (!(priv->ftp_rqfcr[l] & RQFCR_CLE) &&
738                         (priv->ftp_rqfcr[l] & RQFCR_AND))
739                         continue;
740                 else {
741                         local_rqfpr[j] = priv->ftp_rqfpr[l];
742                         local_rqfcr[j] = priv->ftp_rqfcr[l];
743                         j--;
744                 }
745         }
746
747         priv->cur_filer_idx = l - 1;
748         last_rule_idx = l;
749
750         /* hash rules */
751         ethflow_to_filer_rules(priv, ethflow);
752
753         /* Write back the popped out rules again */
754         for (k = j+1; k < MAX_FILER_IDX; k++) {
755                 priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
756                 priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
757                 gfar_write_filer(priv, priv->cur_filer_idx,
758                                 local_rqfcr[k], local_rqfpr[k]);
759                 if (!priv->cur_filer_idx)
760                         break;
761                 priv->cur_filer_idx = priv->cur_filer_idx - 1;
762         }
763
764         return 1;
765 }
766
767 static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
768 {
769         /* write the filer rules here */
770         if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
771                 return -EINVAL;
772
773         return 0;
774 }
775
776 static int gfar_check_filer_hardware(struct gfar_private *priv)
777 {
778         struct gfar __iomem *regs = NULL;
779         u32 i;
780
781         regs = priv->gfargrp[0].regs;
782
783         /* Check if we are in FIFO mode */
784         i = gfar_read(&regs->ecntrl);
785         i &= ECNTRL_FIFM;
786         if (i == ECNTRL_FIFM) {
787                 netdev_notice(priv->ndev, "Interface in FIFO mode\n");
788                 i = gfar_read(&regs->rctrl);
789                 i &= RCTRL_PRSDEP_MASK | RCTRL_PRSFM;
790                 if (i == (RCTRL_PRSDEP_MASK | RCTRL_PRSFM)) {
791                         netdev_info(priv->ndev,
792                                         "Receive Queue Filtering enabled\n");
793                 } else {
794                         netdev_warn(priv->ndev,
795                                         "Receive Queue Filtering disabled\n");
796                         return -EOPNOTSUPP;
797                 }
798         }
799         /* Or in standard mode */
800         else {
801                 i = gfar_read(&regs->rctrl);
802                 i &= RCTRL_PRSDEP_MASK;
803                 if (i == RCTRL_PRSDEP_MASK) {
804                         netdev_info(priv->ndev,
805                                         "Receive Queue Filtering enabled\n");
806                 } else {
807                         netdev_warn(priv->ndev,
808                                         "Receive Queue Filtering disabled\n");
809                         return -EOPNOTSUPP;
810                 }
811         }
812
813         /* Sets the properties for arbitrary filer rule
814          * to the first 4 Layer 4 Bytes */
815         regs->rbifx = 0xC0C1C2C3;
816         return 0;
817 }
818
819 static int gfar_comp_asc(const void *a, const void *b)
820 {
821         return memcmp(a, b, 4);
822 }
823
824 static int gfar_comp_desc(const void *a, const void *b)
825 {
826         return -memcmp(a, b, 4);
827 }
828
829 static void gfar_swap(void *a, void *b, int size)
830 {
831         u32 *_a = a;
832         u32 *_b = b;
833
834         swap(_a[0], _b[0]);
835         swap(_a[1], _b[1]);
836         swap(_a[2], _b[2]);
837         swap(_a[3], _b[3]);
838 }
839
840 /* Write a mask to filer cache */
841 static void gfar_set_mask(u32 mask, struct filer_table *tab)
842 {
843         tab->fe[tab->index].ctrl = RQFCR_AND | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
844         tab->fe[tab->index].prop = mask;
845         tab->index++;
846 }
847
848 /* Sets parse bits (e.g. IP or TCP) */
849 static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab)
850 {
851         gfar_set_mask(mask, tab);
852         tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE
853                         | RQFCR_AND;
854         tab->fe[tab->index].prop = value;
855         tab->index++;
856 }
857
858 static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
859                 struct filer_table *tab)
860 {
861         gfar_set_mask(mask, tab);
862         tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag;
863         tab->fe[tab->index].prop = value;
864         tab->index++;
865 }
866
867 /*
868  * For setting a tuple of value and mask of type flag
869  * Example:
870  * IP-Src = 10.0.0.0/255.0.0.0
871  * value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4
872  *
873  * Ethtool gives us a value=0 and mask=~0 for don't care a tuple
874  * For a don't care mask it gives us a 0
875  *
876  * The check if don't care and the mask adjustment if mask=0 is done for VLAN
877  * and MAC stuff on an upper level (due to missing information on this level).
878  * For these guys we can discard them if they are value=0 and mask=0.
879  *
880  * Further the all masks are one-padded for better hardware efficiency.
881  */
882 static void gfar_set_attribute(u32 value, u32 mask, u32 flag,
883                 struct filer_table *tab)
884 {
885         switch (flag) {
886         /* 3bit */
887         case RQFCR_PID_PRI:
888                 if (!(value | mask))
889                         return;
890                 mask |= RQFCR_PID_PRI_MASK;
891                 break;
892                 /* 8bit */
893         case RQFCR_PID_L4P:
894         case RQFCR_PID_TOS:
895                 if (!~(mask | RQFCR_PID_L4P_MASK))
896                         return;
897                 if (!mask)
898                         mask = ~0;
899                 else
900                         mask |= RQFCR_PID_L4P_MASK;
901                 break;
902                 /* 12bit */
903         case RQFCR_PID_VID:
904                 if (!(value | mask))
905                         return;
906                 mask |= RQFCR_PID_VID_MASK;
907                 break;
908                 /* 16bit */
909         case RQFCR_PID_DPT:
910         case RQFCR_PID_SPT:
911         case RQFCR_PID_ETY:
912                 if (!~(mask | RQFCR_PID_PORT_MASK))
913                         return;
914                 if (!mask)
915                         mask = ~0;
916                 else
917                         mask |= RQFCR_PID_PORT_MASK;
918                 break;
919                 /* 24bit */
920         case RQFCR_PID_DAH:
921         case RQFCR_PID_DAL:
922         case RQFCR_PID_SAH:
923         case RQFCR_PID_SAL:
924                 if (!(value | mask))
925                         return;
926                 mask |= RQFCR_PID_MAC_MASK;
927                 break;
928                 /* for all real 32bit masks */
929         default:
930                 if (!~mask)
931                         return;
932                 if (!mask)
933                         mask = ~0;
934                 break;
935         }
936         gfar_set_general_attribute(value, mask, flag, tab);
937 }
938
939 /* Translates value and mask for UDP, TCP or SCTP */
940 static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
941                 struct ethtool_tcpip4_spec *mask, struct filer_table *tab)
942 {
943         gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
944         gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
945         gfar_set_attribute(value->pdst, mask->pdst, RQFCR_PID_DPT, tab);
946         gfar_set_attribute(value->psrc, mask->psrc, RQFCR_PID_SPT, tab);
947         gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
948 }
949
950 /* Translates value and mask for RAW-IP4 */
951 static void gfar_set_user_ip(struct ethtool_usrip4_spec *value,
952                 struct ethtool_usrip4_spec *mask, struct filer_table *tab)
953 {
954         gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
955         gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
956         gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
957         gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab);
958         gfar_set_attribute(value->l4_4_bytes, mask->l4_4_bytes, RQFCR_PID_ARB,
959                         tab);
960
961 }
962
963 /* Translates value and mask for ETHER spec */
964 static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask,
965                 struct filer_table *tab)
966 {
967         u32 upper_temp_mask = 0;
968         u32 lower_temp_mask = 0;
969         /* Source address */
970         if (!is_broadcast_ether_addr(mask->h_source)) {
971
972                 if (is_zero_ether_addr(mask->h_source)) {
973                         upper_temp_mask = 0xFFFFFFFF;
974                         lower_temp_mask = 0xFFFFFFFF;
975                 } else {
976                         upper_temp_mask = mask->h_source[0] << 16
977                                         | mask->h_source[1] << 8
978                                         | mask->h_source[2];
979                         lower_temp_mask = mask->h_source[3] << 16
980                                         | mask->h_source[4] << 8
981                                         | mask->h_source[5];
982                 }
983                 /* Upper 24bit */
984                 gfar_set_attribute(
985                                 value->h_source[0] << 16 | value->h_source[1]
986                                                 << 8 | value->h_source[2],
987                                 upper_temp_mask, RQFCR_PID_SAH, tab);
988                 /* And the same for the lower part */
989                 gfar_set_attribute(
990                                 value->h_source[3] << 16 | value->h_source[4]
991                                                 << 8 | value->h_source[5],
992                                 lower_temp_mask, RQFCR_PID_SAL, tab);
993         }
994         /* Destination address */
995         if (!is_broadcast_ether_addr(mask->h_dest)) {
996
997                 /* Special for destination is limited broadcast */
998                 if ((is_broadcast_ether_addr(value->h_dest)
999                                 && is_zero_ether_addr(mask->h_dest))) {
1000                         gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab);
1001                 } else {
1002
1003                         if (is_zero_ether_addr(mask->h_dest)) {
1004                                 upper_temp_mask = 0xFFFFFFFF;
1005                                 lower_temp_mask = 0xFFFFFFFF;
1006                         } else {
1007                                 upper_temp_mask = mask->h_dest[0] << 16
1008                                                 | mask->h_dest[1] << 8
1009                                                 | mask->h_dest[2];
1010                                 lower_temp_mask = mask->h_dest[3] << 16
1011                                                 | mask->h_dest[4] << 8
1012                                                 | mask->h_dest[5];
1013                         }
1014
1015                         /* Upper 24bit */
1016                         gfar_set_attribute(
1017                                         value->h_dest[0] << 16
1018                                                         | value->h_dest[1] << 8
1019                                                         | value->h_dest[2],
1020                                         upper_temp_mask, RQFCR_PID_DAH, tab);
1021                         /* And the same for the lower part */
1022                         gfar_set_attribute(
1023                                         value->h_dest[3] << 16
1024                                                         | value->h_dest[4] << 8
1025                                                         | value->h_dest[5],
1026                                         lower_temp_mask, RQFCR_PID_DAL, tab);
1027                 }
1028         }
1029
1030         gfar_set_attribute(value->h_proto, mask->h_proto, RQFCR_PID_ETY, tab);
1031
1032 }
1033
1034 /* Convert a rule to binary filter format of gianfar */
1035 static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
1036                 struct filer_table *tab)
1037 {
1038         u32 vlan = 0, vlan_mask = 0;
1039         u32 id = 0, id_mask = 0;
1040         u32 cfi = 0, cfi_mask = 0;
1041         u32 prio = 0, prio_mask = 0;
1042
1043         u32 old_index = tab->index;
1044
1045         /* Check if vlan is wanted */
1046         if ((rule->flow_type & FLOW_EXT) && (rule->m_ext.vlan_tci != 0xFFFF)) {
1047                 if (!rule->m_ext.vlan_tci)
1048                         rule->m_ext.vlan_tci = 0xFFFF;
1049
1050                 vlan = RQFPR_VLN;
1051                 vlan_mask = RQFPR_VLN;
1052
1053                 /* Separate the fields */
1054                 id = rule->h_ext.vlan_tci & 0xFFF;
1055                 id_mask = rule->m_ext.vlan_tci & 0xFFF;
1056                 cfi = (rule->h_ext.vlan_tci >> 12) & 1;
1057                 cfi_mask = (rule->m_ext.vlan_tci >> 12) & 1;
1058                 prio = (rule->h_ext.vlan_tci >> 13) & 0x7;
1059                 prio_mask = (rule->m_ext.vlan_tci >> 13) & 0x7;
1060
1061                 if (cfi == 1 && cfi_mask == 1) {
1062                         vlan |= RQFPR_CFI;
1063                         vlan_mask |= RQFPR_CFI;
1064                 } else if (cfi == 0 && cfi_mask == 1) {
1065                         vlan_mask |= RQFPR_CFI;
1066                 }
1067         }
1068
1069         switch (rule->flow_type & ~FLOW_EXT) {
1070         case TCP_V4_FLOW:
1071                 gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_TCP | vlan,
1072                                 RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab);
1073                 gfar_set_basic_ip(&rule->h_u.tcp_ip4_spec,
1074                                 &rule->m_u.tcp_ip4_spec, tab);
1075                 break;
1076         case UDP_V4_FLOW:
1077                 gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_UDP | vlan,
1078                                 RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab);
1079                 gfar_set_basic_ip(&rule->h_u.udp_ip4_spec,
1080                                 &rule->m_u.udp_ip4_spec, tab);
1081                 break;
1082         case SCTP_V4_FLOW:
1083                 gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
1084                                 tab);
1085                 gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab);
1086                 gfar_set_basic_ip((struct ethtool_tcpip4_spec *) &rule->h_u,
1087                                 (struct ethtool_tcpip4_spec *) &rule->m_u, tab);
1088                 break;
1089         case IP_USER_FLOW:
1090                 gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
1091                                 tab);
1092                 gfar_set_user_ip((struct ethtool_usrip4_spec *) &rule->h_u,
1093                                 (struct ethtool_usrip4_spec *) &rule->m_u, tab);
1094                 break;
1095         case ETHER_FLOW:
1096                 if (vlan)
1097                         gfar_set_parse_bits(vlan, vlan_mask, tab);
1098                 gfar_set_ether((struct ethhdr *) &rule->h_u,
1099                                 (struct ethhdr *) &rule->m_u, tab);
1100                 break;
1101         default:
1102                 return -1;
1103         }
1104
1105         /* Set the vlan attributes in the end */
1106         if (vlan) {
1107                 gfar_set_attribute(id, id_mask, RQFCR_PID_VID, tab);
1108                 gfar_set_attribute(prio, prio_mask, RQFCR_PID_PRI, tab);
1109         }
1110
1111         /* If there has been nothing written till now, it must be a default */
1112         if (tab->index == old_index) {
1113                 gfar_set_mask(0xFFFFFFFF, tab);
1114                 tab->fe[tab->index].ctrl = 0x20;
1115                 tab->fe[tab->index].prop = 0x0;
1116                 tab->index++;
1117         }
1118
1119         /* Remove last AND */
1120         tab->fe[tab->index - 1].ctrl &= (~RQFCR_AND);
1121
1122         /* Specify which queue to use or to drop */
1123         if (rule->ring_cookie == RX_CLS_FLOW_DISC)
1124                 tab->fe[tab->index - 1].ctrl |= RQFCR_RJE;
1125         else
1126                 tab->fe[tab->index - 1].ctrl |= (rule->ring_cookie << 10);
1127
1128         /* Only big enough entries can be clustered */
1129         if (tab->index > (old_index + 2)) {
1130                 tab->fe[old_index + 1].ctrl |= RQFCR_CLE;
1131                 tab->fe[tab->index - 1].ctrl |= RQFCR_CLE;
1132         }
1133
1134         /* In rare cases the cache can be full while there is free space in hw */
1135         if (tab->index > MAX_FILER_CACHE_IDX - 1)
1136                 return -EBUSY;
1137
1138         return 0;
1139 }
1140
1141 /* Copy size filer entries */
1142 static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
1143                 struct gfar_filer_entry src[0], s32 size)
1144 {
1145         while (size > 0) {
1146                 size--;
1147                 dst[size].ctrl = src[size].ctrl;
1148                 dst[size].prop = src[size].prop;
1149         }
1150 }
1151
1152 /* Delete the contents of the filer-table between start and end
1153  * and collapse them */
1154 static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
1155 {
1156         int length;
1157         if (end > MAX_FILER_CACHE_IDX || end < begin)
1158                 return -EINVAL;
1159
1160         end++;
1161         length = end - begin;
1162
1163         /* Copy */
1164         while (end < tab->index) {
1165                 tab->fe[begin].ctrl = tab->fe[end].ctrl;
1166                 tab->fe[begin++].prop = tab->fe[end++].prop;
1167
1168         }
1169         /* Fill up with don't cares */
1170         while (begin < tab->index) {
1171                 tab->fe[begin].ctrl = 0x60;
1172                 tab->fe[begin].prop = 0xFFFFFFFF;
1173                 begin++;
1174         }
1175
1176         tab->index -= length;
1177         return 0;
1178 }
1179
1180 /* Make space on the wanted location */
1181 static int gfar_expand_filer_entries(u32 begin, u32 length,
1182                 struct filer_table *tab)
1183 {
1184         if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX || begin
1185                         > MAX_FILER_CACHE_IDX)
1186                 return -EINVAL;
1187
1188         gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
1189                         tab->index - length + 1);
1190
1191         tab->index += length;
1192         return 0;
1193 }
1194
1195 static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
1196 {
1197         for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); start++) {
1198                 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE))
1199                                 == (RQFCR_AND | RQFCR_CLE))
1200                         return start;
1201         }
1202         return -1;
1203 }
1204
1205 static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
1206 {
1207         for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); start++) {
1208                 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE))
1209                                 == (RQFCR_CLE))
1210                         return start;
1211         }
1212         return -1;
1213 }
1214
1215 /*
1216  * Uses hardwares clustering option to reduce
1217  * the number of filer table entries
1218  */
1219 static void gfar_cluster_filer(struct filer_table *tab)
1220 {
1221         s32 i = -1, j, iend, jend;
1222
1223         while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
1224                 j = i;
1225                 while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
1226                         /*
1227                          * The cluster entries self and the previous one
1228                          * (a mask) must be identical!
1229                          */
1230                         if (tab->fe[i].ctrl != tab->fe[j].ctrl)
1231                                 break;
1232                         if (tab->fe[i].prop != tab->fe[j].prop)
1233                                 break;
1234                         if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl)
1235                                 break;
1236                         if (tab->fe[i - 1].prop != tab->fe[j - 1].prop)
1237                                 break;
1238                         iend = gfar_get_next_cluster_end(i, tab);
1239                         jend = gfar_get_next_cluster_end(j, tab);
1240                         if (jend == -1 || iend == -1)
1241                                 break;
1242                         /*
1243                          * First we make some free space, where our cluster
1244                          * element should be. Then we copy it there and finally
1245                          * delete in from its old location.
1246                          */
1247
1248                         if (gfar_expand_filer_entries(iend, (jend - j), tab)
1249                                         == -EINVAL)
1250                                 break;
1251
1252                         gfar_copy_filer_entries(&(tab->fe[iend + 1]),
1253                                         &(tab->fe[jend + 1]), jend - j);
1254
1255                         if (gfar_trim_filer_entries(jend - 1,
1256                                         jend + (jend - j), tab) == -EINVAL)
1257                                 return;
1258
1259                         /* Mask out cluster bit */
1260                         tab->fe[iend].ctrl &= ~(RQFCR_CLE);
1261                 }
1262         }
1263 }
1264
1265 /* Swaps the 0xFF80 masked bits of a1<>a2 and b1<>b2 */
1266 static void gfar_swap_ff80_bits(struct gfar_filer_entry *a1,
1267                 struct gfar_filer_entry *a2, struct gfar_filer_entry *b1,
1268                 struct gfar_filer_entry *b2)
1269 {
1270         u32 temp[4];
1271         temp[0] = a1->ctrl & 0xFF80;
1272         temp[1] = a2->ctrl & 0xFF80;
1273         temp[2] = b1->ctrl & 0xFF80;
1274         temp[3] = b2->ctrl & 0xFF80;
1275
1276         a1->ctrl &= ~0xFF80;
1277         a2->ctrl &= ~0xFF80;
1278         b1->ctrl &= ~0xFF80;
1279         b2->ctrl &= ~0xFF80;
1280
1281         a1->ctrl |= temp[1];
1282         a2->ctrl |= temp[0];
1283         b1->ctrl |= temp[3];
1284         b2->ctrl |= temp[2];
1285 }
1286
1287 /*
1288  * Generate a list consisting of masks values with their start and
1289  * end of validity and block as indicator for parts belonging
1290  * together (glued by ANDs) in mask_table
1291  */
1292 static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
1293                 struct filer_table *tab)
1294 {
1295         u32 i, and_index = 0, block_index = 1;
1296
1297         for (i = 0; i < tab->index; i++) {
1298
1299                 /* LSByte of control = 0 sets a mask */
1300                 if (!(tab->fe[i].ctrl & 0xF)) {
1301                         mask_table[and_index].mask = tab->fe[i].prop;
1302                         mask_table[and_index].start = i;
1303                         mask_table[and_index].block = block_index;
1304                         if (and_index >= 1)
1305                                 mask_table[and_index - 1].end = i - 1;
1306                         and_index++;
1307                 }
1308                 /* cluster starts will be separated because they should
1309                  * hold their position */
1310                 if (tab->fe[i].ctrl & RQFCR_CLE)
1311                         block_index++;
1312                 /* A not set AND indicates the end of a depended block */
1313                 if (!(tab->fe[i].ctrl & RQFCR_AND))
1314                         block_index++;
1315
1316         }
1317
1318         mask_table[and_index - 1].end = i - 1;
1319
1320         return and_index;
1321 }
1322
1323 /*
1324  * Sorts the entries of mask_table by the values of the masks.
1325  * Important: The 0xFF80 flags of the first and last entry of a
1326  * block must hold their position (which queue, CLusterEnable, ReJEct,
1327  * AND)
1328  */
1329 static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
1330                 struct filer_table *temp_table, u32 and_index)
1331 {
1332         /* Pointer to compare function (_asc or _desc) */
1333         int (*gfar_comp)(const void *, const void *);
1334
1335         u32 i, size = 0, start = 0, prev = 1;
1336         u32 old_first, old_last, new_first, new_last;
1337
1338         gfar_comp = &gfar_comp_desc;
1339
1340         for (i = 0; i < and_index; i++) {
1341
1342                 if (prev != mask_table[i].block) {
1343                         old_first = mask_table[start].start + 1;
1344                         old_last = mask_table[i - 1].end;
1345                         sort(mask_table + start, size,
1346                                         sizeof(struct gfar_mask_entry),
1347                                         gfar_comp, &gfar_swap);
1348
1349                         /* Toggle order for every block. This makes the
1350                          * thing more efficient! */
1351                         if (gfar_comp == gfar_comp_desc)
1352                                 gfar_comp = &gfar_comp_asc;
1353                         else
1354                                 gfar_comp = &gfar_comp_desc;
1355
1356                         new_first = mask_table[start].start + 1;
1357                         new_last = mask_table[i - 1].end;
1358
1359                         gfar_swap_ff80_bits(&temp_table->fe[new_first],
1360                                         &temp_table->fe[old_first],
1361                                         &temp_table->fe[new_last],
1362                                         &temp_table->fe[old_last]);
1363
1364                         start = i;
1365                         size = 0;
1366                 }
1367                 size++;
1368                 prev = mask_table[i].block;
1369         }
1370
1371 }
1372
1373 /*
1374  * Reduces the number of masks needed in the filer table to save entries
1375  * This is done by sorting the masks of a depended block. A depended block is
1376  * identified by gluing ANDs or CLE. The sorting order toggles after every
1377  * block. Of course entries in scope of a mask must change their location with
1378  * it.
1379  */
1380 static int gfar_optimize_filer_masks(struct filer_table *tab)
1381 {
1382         struct filer_table *temp_table;
1383         struct gfar_mask_entry *mask_table;
1384
1385         u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0;
1386         s32 ret = 0;
1387
1388         /* We need a copy of the filer table because
1389          * we want to change its order */
1390         temp_table = kmalloc(sizeof(*temp_table), GFP_KERNEL);
1391         if (temp_table == NULL)
1392                 return -ENOMEM;
1393         memcpy(temp_table, tab, sizeof(*temp_table));
1394
1395         mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
1396                         sizeof(struct gfar_mask_entry), GFP_KERNEL);
1397
1398         if (mask_table == NULL) {
1399                 ret = -ENOMEM;
1400                 goto end;
1401         }
1402
1403         and_index = gfar_generate_mask_table(mask_table, tab);
1404
1405         gfar_sort_mask_table(mask_table, temp_table, and_index);
1406
1407         /* Now we can copy the data from our duplicated filer table to
1408          * the real one in the order the mask table says */
1409         for (i = 0; i < and_index; i++) {
1410                 size = mask_table[i].end - mask_table[i].start + 1;
1411                 gfar_copy_filer_entries(&(tab->fe[j]),
1412                                 &(temp_table->fe[mask_table[i].start]), size);
1413                 j += size;
1414         }
1415
1416         /* And finally we just have to check for duplicated masks and drop the
1417          * second ones */
1418         for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
1419                 if (tab->fe[i].ctrl == 0x80) {
1420                         previous_mask = i++;
1421                         break;
1422                 }
1423         }
1424         for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
1425                 if (tab->fe[i].ctrl == 0x80) {
1426                         if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
1427                                 /* Two identical ones found!
1428                                  * So drop the second one! */
1429                                 gfar_trim_filer_entries(i, i, tab);
1430                         } else
1431                                 /* Not identical! */
1432                                 previous_mask = i;
1433                 }
1434         }
1435
1436         kfree(mask_table);
1437 end:    kfree(temp_table);
1438         return ret;
1439 }
1440
1441 /* Write the bit-pattern from software's buffer to hardware registers */
1442 static int gfar_write_filer_table(struct gfar_private *priv,
1443                 struct filer_table *tab)
1444 {
1445         u32 i = 0;
1446         if (tab->index > MAX_FILER_IDX - 1)
1447                 return -EBUSY;
1448
1449         /* Avoid inconsistent filer table to be processed */
1450         lock_rx_qs(priv);
1451
1452         /* Fill regular entries */
1453         for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl); i++)
1454                 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
1455         /* Fill the rest with fall-troughs */
1456         for (; i < MAX_FILER_IDX - 1; i++)
1457                 gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
1458         /* Last entry must be default accept
1459          * because that's what people expect */
1460         gfar_write_filer(priv, i, 0x20, 0x0);
1461
1462         unlock_rx_qs(priv);
1463
1464         return 0;
1465 }
1466
1467 static int gfar_check_capability(struct ethtool_rx_flow_spec *flow,
1468                 struct gfar_private *priv)
1469 {
1470
1471         if (flow->flow_type & FLOW_EXT) {
1472                 if (~flow->m_ext.data[0] || ~flow->m_ext.data[1])
1473                         netdev_warn(priv->ndev,
1474                                         "User-specific data not supported!\n");
1475                 if (~flow->m_ext.vlan_etype)
1476                         netdev_warn(priv->ndev,
1477                                         "VLAN-etype not supported!\n");
1478         }
1479         if (flow->flow_type == IP_USER_FLOW)
1480                 if (flow->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
1481                         netdev_warn(priv->ndev,
1482                                         "IP-Version differing from IPv4 not supported!\n");
1483
1484         return 0;
1485 }
1486
1487 static int gfar_process_filer_changes(struct gfar_private *priv)
1488 {
1489         struct ethtool_flow_spec_container *j;
1490         struct filer_table *tab;
1491         s32 i = 0;
1492         s32 ret = 0;
1493
1494         /* So index is set to zero, too! */
1495         tab = kzalloc(sizeof(*tab), GFP_KERNEL);
1496         if (tab == NULL)
1497                 return -ENOMEM;
1498
1499         /* Now convert the existing filer data from flow_spec into
1500          * filer tables binary format */
1501         list_for_each_entry(j, &priv->rx_list.list, list) {
1502                 ret = gfar_convert_to_filer(&j->fs, tab);
1503                 if (ret == -EBUSY) {
1504                         netdev_err(priv->ndev, "Rule not added: No free space!\n");
1505                         goto end;
1506                 }
1507                 if (ret == -1) {
1508                         netdev_err(priv->ndev, "Rule not added: Unsupported Flow-type!\n");
1509                         goto end;
1510                 }
1511         }
1512
1513         i = tab->index;
1514
1515         /* Optimizations to save entries */
1516         gfar_cluster_filer(tab);
1517         gfar_optimize_filer_masks(tab);
1518
1519         pr_debug("\n\tSummary:\n"
1520                 "\tData on hardware: %d\n"
1521                 "\tCompression rate: %d%%\n",
1522                 tab->index, 100 - (100 * tab->index) / i);
1523
1524         /* Write everything to hardware */
1525         ret = gfar_write_filer_table(priv, tab);
1526         if (ret == -EBUSY) {
1527                 netdev_err(priv->ndev, "Rule not added: No free space!\n");
1528                 goto end;
1529         }
1530
1531 end:    kfree(tab);
1532         return ret;
1533 }
1534
1535 static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow)
1536 {
1537         u32 i = 0;
1538
1539         for (i = 0; i < sizeof(flow->m_u); i++)
1540                 flow->m_u.hdata[i] ^= 0xFF;
1541
1542         flow->m_ext.vlan_etype ^= 0xFFFF;
1543         flow->m_ext.vlan_tci ^= 0xFFFF;
1544         flow->m_ext.data[0] ^= ~0;
1545         flow->m_ext.data[1] ^= ~0;
1546 }
1547
1548 static int gfar_add_cls(struct gfar_private *priv,
1549                 struct ethtool_rx_flow_spec *flow)
1550 {
1551         struct ethtool_flow_spec_container *temp, *comp;
1552         int ret = 0;
1553
1554         temp = kmalloc(sizeof(*temp), GFP_KERNEL);
1555         if (temp == NULL)
1556                 return -ENOMEM;
1557         memcpy(&temp->fs, flow, sizeof(temp->fs));
1558
1559         gfar_invert_masks(&temp->fs);
1560         ret = gfar_check_capability(&temp->fs, priv);
1561         if (ret)
1562                 goto clean_mem;
1563         /* Link in the new element at the right @location */
1564         if (list_empty(&priv->rx_list.list)) {
1565                 ret = gfar_check_filer_hardware(priv);
1566                 if (ret != 0)
1567                         goto clean_mem;
1568                 list_add(&temp->list, &priv->rx_list.list);
1569                 goto process;
1570         } else {
1571
1572                 list_for_each_entry(comp, &priv->rx_list.list, list) {
1573                         if (comp->fs.location > flow->location) {
1574                                 list_add_tail(&temp->list, &comp->list);
1575                                 goto process;
1576                         }
1577                         if (comp->fs.location == flow->location) {
1578                                 netdev_err(priv->ndev,
1579                                                 "Rule not added: ID %d not free!\n",
1580                                         flow->location);
1581                                 ret = -EBUSY;
1582                                 goto clean_mem;
1583                         }
1584                 }
1585                 list_add_tail(&temp->list, &priv->rx_list.list);
1586         }
1587
1588 process:
1589         ret = gfar_process_filer_changes(priv);
1590         if (ret)
1591                 goto clean_list;
1592         priv->rx_list.count++;
1593         return ret;
1594
1595 clean_list:
1596         list_del(&temp->list);
1597 clean_mem:
1598         kfree(temp);
1599         return ret;
1600 }
1601
1602 static int gfar_del_cls(struct gfar_private *priv, u32 loc)
1603 {
1604         struct ethtool_flow_spec_container *comp;
1605         u32 ret = -EINVAL;
1606
1607         if (list_empty(&priv->rx_list.list))
1608                 return ret;
1609
1610         list_for_each_entry(comp, &priv->rx_list.list, list) {
1611                 if (comp->fs.location == loc) {
1612                         list_del(&comp->list);
1613                         kfree(comp);
1614                         priv->rx_list.count--;
1615                         gfar_process_filer_changes(priv);
1616                         ret = 0;
1617                         break;
1618                 }
1619         }
1620
1621         return ret;
1622
1623 }
1624
1625 static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
1626 {
1627         struct ethtool_flow_spec_container *comp;
1628         u32 ret = -EINVAL;
1629
1630         list_for_each_entry(comp, &priv->rx_list.list, list) {
1631                 if (comp->fs.location == cmd->fs.location) {
1632                         memcpy(&cmd->fs, &comp->fs, sizeof(cmd->fs));
1633                         gfar_invert_masks(&cmd->fs);
1634                         ret = 0;
1635                         break;
1636                 }
1637         }
1638
1639         return ret;
1640 }
1641
1642 static int gfar_get_cls_all(struct gfar_private *priv,
1643                 struct ethtool_rxnfc *cmd, u32 *rule_locs)
1644 {
1645         struct ethtool_flow_spec_container *comp;
1646         u32 i = 0;
1647
1648         list_for_each_entry(comp, &priv->rx_list.list, list) {
1649                 if (i <= cmd->rule_cnt) {
1650                         rule_locs[i] = comp->fs.location;
1651                         i++;
1652                 }
1653         }
1654
1655         cmd->data = MAX_FILER_IDX;
1656
1657         return 0;
1658 }
1659
1660 static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1661 {
1662         struct gfar_private *priv = netdev_priv(dev);
1663         int ret = 0;
1664
1665         mutex_lock(&priv->rx_queue_access);
1666
1667         switch (cmd->cmd) {
1668         case ETHTOOL_SRXFH:
1669                 ret = gfar_set_hash_opts(priv, cmd);
1670                 break;
1671         case ETHTOOL_SRXCLSRLINS:
1672                 if (cmd->fs.ring_cookie != RX_CLS_FLOW_DISC &&
1673                         cmd->fs.ring_cookie >= priv->num_rx_queues) {
1674                         ret = -EINVAL;
1675                         break;
1676                 }
1677                 ret = gfar_add_cls(priv, &cmd->fs);
1678                 break;
1679         case ETHTOOL_SRXCLSRLDEL:
1680                 ret = gfar_del_cls(priv, cmd->fs.location);
1681                 break;
1682         default:
1683                 ret = -EINVAL;
1684         }
1685
1686         mutex_unlock(&priv->rx_queue_access);
1687
1688         return ret;
1689 }
1690
1691 static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1692                 void *rule_locs)
1693 {
1694         struct gfar_private *priv = netdev_priv(dev);
1695         int ret = 0;
1696
1697         switch (cmd->cmd) {
1698         case ETHTOOL_GRXRINGS:
1699                 cmd->data = priv->num_rx_queues;
1700                 break;
1701         case ETHTOOL_GRXCLSRLCNT:
1702                 cmd->rule_cnt = priv->rx_list.count;
1703                 break;
1704         case ETHTOOL_GRXCLSRULE:
1705                 ret = gfar_get_cls(priv, cmd);
1706                 break;
1707         case ETHTOOL_GRXCLSRLALL:
1708                 ret = gfar_get_cls_all(priv, cmd, (u32 *) rule_locs);
1709                 break;
1710         default:
1711                 ret = -EINVAL;
1712                 break;
1713         }
1714
1715         return ret;
1716 }
1717
1718 const struct ethtool_ops gfar_ethtool_ops = {
1719         .get_settings = gfar_gsettings,
1720         .set_settings = gfar_ssettings,
1721         .get_drvinfo = gfar_gdrvinfo,
1722         .get_regs_len = gfar_reglen,
1723         .get_regs = gfar_get_regs,
1724         .get_link = ethtool_op_get_link,
1725         .get_coalesce = gfar_gcoalesce,
1726         .set_coalesce = gfar_scoalesce,
1727         .get_ringparam = gfar_gringparam,
1728         .set_ringparam = gfar_sringparam,
1729         .get_strings = gfar_gstrings,
1730         .get_sset_count = gfar_sset_count,
1731         .get_ethtool_stats = gfar_fill_stats,
1732         .get_msglevel = gfar_get_msglevel,
1733         .set_msglevel = gfar_set_msglevel,
1734 #ifdef CONFIG_PM
1735         .get_wol = gfar_get_wol,
1736         .set_wol = gfar_set_wol,
1737 #endif
1738         .set_rxnfc = gfar_set_nfc,
1739         .get_rxnfc = gfar_get_nfc,
1740 };