Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/mchehab/v4l-dvb
[pandora-kernel.git] / drivers / net / b44.c
1 /* b44.c: Broadcom 4400 device driver.
2  *
3  * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4  * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
5  * Copyright (C) 2006 Broadcom Corporation.
6  *
7  * Distribute under GPL.
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/types.h>
14 #include <linux/netdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/mii.h>
17 #include <linux/if_ether.h>
18 #include <linux/etherdevice.h>
19 #include <linux/pci.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/dma-mapping.h>
23
24 #include <asm/uaccess.h>
25 #include <asm/io.h>
26 #include <asm/irq.h>
27
28 #include "b44.h"
29
30 #define DRV_MODULE_NAME         "b44"
31 #define PFX DRV_MODULE_NAME     ": "
32 #define DRV_MODULE_VERSION      "1.01"
33 #define DRV_MODULE_RELDATE      "Jun 16, 2006"
34
35 #define B44_DEF_MSG_ENABLE        \
36         (NETIF_MSG_DRV          | \
37          NETIF_MSG_PROBE        | \
38          NETIF_MSG_LINK         | \
39          NETIF_MSG_TIMER        | \
40          NETIF_MSG_IFDOWN       | \
41          NETIF_MSG_IFUP         | \
42          NETIF_MSG_RX_ERR       | \
43          NETIF_MSG_TX_ERR)
44
45 /* length of time before we decide the hardware is borked,
46  * and dev->tx_timeout() should be called to fix the problem
47  */
48 #define B44_TX_TIMEOUT                  (5 * HZ)
49
50 /* hardware minimum and maximum for a single frame's data payload */
51 #define B44_MIN_MTU                     60
52 #define B44_MAX_MTU                     1500
53
54 #define B44_RX_RING_SIZE                512
55 #define B44_DEF_RX_RING_PENDING         200
56 #define B44_RX_RING_BYTES       (sizeof(struct dma_desc) * \
57                                  B44_RX_RING_SIZE)
58 #define B44_TX_RING_SIZE                512
59 #define B44_DEF_TX_RING_PENDING         (B44_TX_RING_SIZE - 1)
60 #define B44_TX_RING_BYTES       (sizeof(struct dma_desc) * \
61                                  B44_TX_RING_SIZE)
62 #define B44_DMA_MASK 0x3fffffff
63
64 #define TX_RING_GAP(BP) \
65         (B44_TX_RING_SIZE - (BP)->tx_pending)
66 #define TX_BUFFS_AVAIL(BP)                                              \
67         (((BP)->tx_cons <= (BP)->tx_prod) ?                             \
68           (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod :            \
69           (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
70 #define NEXT_TX(N)              (((N) + 1) & (B44_TX_RING_SIZE - 1))
71
72 #define RX_PKT_BUF_SZ           (1536 + bp->rx_offset + 64)
73 #define TX_PKT_BUF_SZ           (B44_MAX_MTU + ETH_HLEN + 8)
74
75 /* minimum number of free TX descriptors required to wake up TX process */
76 #define B44_TX_WAKEUP_THRESH            (B44_TX_RING_SIZE / 4)
77
78 /* b44 internal pattern match filter info */
79 #define B44_PATTERN_BASE        0x400
80 #define B44_PATTERN_SIZE        0x80
81 #define B44_PMASK_BASE          0x600
82 #define B44_PMASK_SIZE          0x10
83 #define B44_MAX_PATTERNS        16
84 #define B44_ETHIPV6UDP_HLEN     62
85 #define B44_ETHIPV4UDP_HLEN     42
86
87 static char version[] __devinitdata =
88         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
89
90 MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
91 MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
92 MODULE_LICENSE("GPL");
93 MODULE_VERSION(DRV_MODULE_VERSION);
94
95 static int b44_debug = -1;      /* -1 == use B44_DEF_MSG_ENABLE as value */
96 module_param(b44_debug, int, 0);
97 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
98
99 static struct pci_device_id b44_pci_tbl[] = {
100         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
101           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
102         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
103           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
104         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
105           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
106         { }     /* terminate list with empty entry */
107 };
108
109 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
110
111 static void b44_halt(struct b44 *);
112 static void b44_init_rings(struct b44 *);
113 static void b44_init_hw(struct b44 *, int);
114
115 static int dma_desc_align_mask;
116 static int dma_desc_sync_size;
117
118 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
119 #define _B44(x...)      # x,
120 B44_STAT_REG_DECLARE
121 #undef _B44
122 };
123
124 static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
125                                                 dma_addr_t dma_base,
126                                                 unsigned long offset,
127                                                 enum dma_data_direction dir)
128 {
129         dma_sync_single_range_for_device(&pdev->dev, dma_base,
130                                          offset & dma_desc_align_mask,
131                                          dma_desc_sync_size, dir);
132 }
133
134 static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
135                                              dma_addr_t dma_base,
136                                              unsigned long offset,
137                                              enum dma_data_direction dir)
138 {
139         dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
140                                       offset & dma_desc_align_mask,
141                                       dma_desc_sync_size, dir);
142 }
143
144 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
145 {
146         return readl(bp->regs + reg);
147 }
148
149 static inline void bw32(const struct b44 *bp,
150                         unsigned long reg, unsigned long val)
151 {
152         writel(val, bp->regs + reg);
153 }
154
155 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
156                         u32 bit, unsigned long timeout, const int clear)
157 {
158         unsigned long i;
159
160         for (i = 0; i < timeout; i++) {
161                 u32 val = br32(bp, reg);
162
163                 if (clear && !(val & bit))
164                         break;
165                 if (!clear && (val & bit))
166                         break;
167                 udelay(10);
168         }
169         if (i == timeout) {
170                 printk(KERN_ERR PFX "%s: BUG!  Timeout waiting for bit %08x of register "
171                        "%lx to %s.\n",
172                        bp->dev->name,
173                        bit, reg,
174                        (clear ? "clear" : "set"));
175                 return -ENODEV;
176         }
177         return 0;
178 }
179
180 /* Sonics SiliconBackplane support routines.  ROFL, you should see all the
181  * buzz words used on this company's website :-)
182  *
183  * All of these routines must be invoked with bp->lock held and
184  * interrupts disabled.
185  */
186
187 #define SB_PCI_DMA             0x40000000      /* Client Mode PCI memory access space (1 GB) */
188 #define BCM4400_PCI_CORE_ADDR  0x18002000      /* Address of PCI core on BCM4400 cards */
189
190 static u32 ssb_get_core_rev(struct b44 *bp)
191 {
192         return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
193 }
194
195 static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
196 {
197         u32 bar_orig, pci_rev, val;
198
199         pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
200         pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
201         pci_rev = ssb_get_core_rev(bp);
202
203         val = br32(bp, B44_SBINTVEC);
204         val |= cores;
205         bw32(bp, B44_SBINTVEC, val);
206
207         val = br32(bp, SSB_PCI_TRANS_2);
208         val |= SSB_PCI_PREF | SSB_PCI_BURST;
209         bw32(bp, SSB_PCI_TRANS_2, val);
210
211         pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
212
213         return pci_rev;
214 }
215
216 static void ssb_core_disable(struct b44 *bp)
217 {
218         if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
219                 return;
220
221         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
222         b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
223         b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
224         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
225                             SBTMSLOW_REJECT | SBTMSLOW_RESET));
226         br32(bp, B44_SBTMSLOW);
227         udelay(1);
228         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
229         br32(bp, B44_SBTMSLOW);
230         udelay(1);
231 }
232
233 static void ssb_core_reset(struct b44 *bp)
234 {
235         u32 val;
236
237         ssb_core_disable(bp);
238         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
239         br32(bp, B44_SBTMSLOW);
240         udelay(1);
241
242         /* Clear SERR if set, this is a hw bug workaround.  */
243         if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
244                 bw32(bp, B44_SBTMSHIGH, 0);
245
246         val = br32(bp, B44_SBIMSTATE);
247         if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
248                 bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
249
250         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
251         br32(bp, B44_SBTMSLOW);
252         udelay(1);
253
254         bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
255         br32(bp, B44_SBTMSLOW);
256         udelay(1);
257 }
258
259 static int ssb_core_unit(struct b44 *bp)
260 {
261 #if 0
262         u32 val = br32(bp, B44_SBADMATCH0);
263         u32 base;
264
265         type = val & SBADMATCH0_TYPE_MASK;
266         switch (type) {
267         case 0:
268                 base = val & SBADMATCH0_BS0_MASK;
269                 break;
270
271         case 1:
272                 base = val & SBADMATCH0_BS1_MASK;
273                 break;
274
275         case 2:
276         default:
277                 base = val & SBADMATCH0_BS2_MASK;
278                 break;
279         };
280 #endif
281         return 0;
282 }
283
284 static int ssb_is_core_up(struct b44 *bp)
285 {
286         return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
287                 == SBTMSLOW_CLOCK);
288 }
289
290 static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
291 {
292         u32 val;
293
294         val  = ((u32) data[2]) << 24;
295         val |= ((u32) data[3]) << 16;
296         val |= ((u32) data[4]) <<  8;
297         val |= ((u32) data[5]) <<  0;
298         bw32(bp, B44_CAM_DATA_LO, val);
299         val = (CAM_DATA_HI_VALID |
300                (((u32) data[0]) << 8) |
301                (((u32) data[1]) << 0));
302         bw32(bp, B44_CAM_DATA_HI, val);
303         bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
304                             (index << CAM_CTRL_INDEX_SHIFT)));
305         b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
306 }
307
308 static inline void __b44_disable_ints(struct b44 *bp)
309 {
310         bw32(bp, B44_IMASK, 0);
311 }
312
313 static void b44_disable_ints(struct b44 *bp)
314 {
315         __b44_disable_ints(bp);
316
317         /* Flush posted writes. */
318         br32(bp, B44_IMASK);
319 }
320
321 static void b44_enable_ints(struct b44 *bp)
322 {
323         bw32(bp, B44_IMASK, bp->imask);
324 }
325
326 static int b44_readphy(struct b44 *bp, int reg, u32 *val)
327 {
328         int err;
329
330         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
331         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
332                              (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
333                              (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
334                              (reg << MDIO_DATA_RA_SHIFT) |
335                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
336         err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
337         *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
338
339         return err;
340 }
341
342 static int b44_writephy(struct b44 *bp, int reg, u32 val)
343 {
344         bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
345         bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
346                              (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
347                              (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
348                              (reg << MDIO_DATA_RA_SHIFT) |
349                              (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
350                              (val & MDIO_DATA_DATA)));
351         return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
352 }
353
354 /* miilib interface */
355 /* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
356  * due to code existing before miilib use was added to this driver.
357  * Someone should remove this artificial driver limitation in
358  * b44_{read,write}phy.  bp->phy_addr itself is fine (and needed).
359  */
360 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
361 {
362         u32 val;
363         struct b44 *bp = netdev_priv(dev);
364         int rc = b44_readphy(bp, location, &val);
365         if (rc)
366                 return 0xffffffff;
367         return val;
368 }
369
370 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
371                          int val)
372 {
373         struct b44 *bp = netdev_priv(dev);
374         b44_writephy(bp, location, val);
375 }
376
377 static int b44_phy_reset(struct b44 *bp)
378 {
379         u32 val;
380         int err;
381
382         err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
383         if (err)
384                 return err;
385         udelay(100);
386         err = b44_readphy(bp, MII_BMCR, &val);
387         if (!err) {
388                 if (val & BMCR_RESET) {
389                         printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
390                                bp->dev->name);
391                         err = -ENODEV;
392                 }
393         }
394
395         return 0;
396 }
397
398 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
399 {
400         u32 val;
401
402         bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
403         bp->flags |= pause_flags;
404
405         val = br32(bp, B44_RXCONFIG);
406         if (pause_flags & B44_FLAG_RX_PAUSE)
407                 val |= RXCONFIG_FLOW;
408         else
409                 val &= ~RXCONFIG_FLOW;
410         bw32(bp, B44_RXCONFIG, val);
411
412         val = br32(bp, B44_MAC_FLOW);
413         if (pause_flags & B44_FLAG_TX_PAUSE)
414                 val |= (MAC_FLOW_PAUSE_ENAB |
415                         (0xc0 & MAC_FLOW_RX_HI_WATER));
416         else
417                 val &= ~MAC_FLOW_PAUSE_ENAB;
418         bw32(bp, B44_MAC_FLOW, val);
419 }
420
421 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
422 {
423         u32 pause_enab = 0;
424
425         /* The driver supports only rx pause by default because
426            the b44 mac tx pause mechanism generates excessive
427            pause frames.
428            Use ethtool to turn on b44 tx pause if necessary.
429          */
430         if ((local & ADVERTISE_PAUSE_CAP) &&
431             (local & ADVERTISE_PAUSE_ASYM)){
432                 if ((remote & LPA_PAUSE_ASYM) &&
433                     !(remote & LPA_PAUSE_CAP))
434                         pause_enab |= B44_FLAG_RX_PAUSE;
435         }
436
437         __b44_set_flow_ctrl(bp, pause_enab);
438 }
439
440 static int b44_setup_phy(struct b44 *bp)
441 {
442         u32 val;
443         int err;
444
445         if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
446                 goto out;
447         if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
448                                 val & MII_ALEDCTRL_ALLMSK)) != 0)
449                 goto out;
450         if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
451                 goto out;
452         if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
453                                 val | MII_TLEDCTRL_ENABLE)) != 0)
454                 goto out;
455
456         if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
457                 u32 adv = ADVERTISE_CSMA;
458
459                 if (bp->flags & B44_FLAG_ADV_10HALF)
460                         adv |= ADVERTISE_10HALF;
461                 if (bp->flags & B44_FLAG_ADV_10FULL)
462                         adv |= ADVERTISE_10FULL;
463                 if (bp->flags & B44_FLAG_ADV_100HALF)
464                         adv |= ADVERTISE_100HALF;
465                 if (bp->flags & B44_FLAG_ADV_100FULL)
466                         adv |= ADVERTISE_100FULL;
467
468                 if (bp->flags & B44_FLAG_PAUSE_AUTO)
469                         adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
470
471                 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
472                         goto out;
473                 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
474                                                        BMCR_ANRESTART))) != 0)
475                         goto out;
476         } else {
477                 u32 bmcr;
478
479                 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
480                         goto out;
481                 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
482                 if (bp->flags & B44_FLAG_100_BASE_T)
483                         bmcr |= BMCR_SPEED100;
484                 if (bp->flags & B44_FLAG_FULL_DUPLEX)
485                         bmcr |= BMCR_FULLDPLX;
486                 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
487                         goto out;
488
489                 /* Since we will not be negotiating there is no safe way
490                  * to determine if the link partner supports flow control
491                  * or not.  So just disable it completely in this case.
492                  */
493                 b44_set_flow_ctrl(bp, 0, 0);
494         }
495
496 out:
497         return err;
498 }
499
500 static void b44_stats_update(struct b44 *bp)
501 {
502         unsigned long reg;
503         u32 *val;
504
505         val = &bp->hw_stats.tx_good_octets;
506         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
507                 *val++ += br32(bp, reg);
508         }
509
510         /* Pad */
511         reg += 8*4UL;
512
513         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
514                 *val++ += br32(bp, reg);
515         }
516 }
517
518 static void b44_link_report(struct b44 *bp)
519 {
520         if (!netif_carrier_ok(bp->dev)) {
521                 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
522         } else {
523                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
524                        bp->dev->name,
525                        (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
526                        (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
527
528                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
529                        "%s for RX.\n",
530                        bp->dev->name,
531                        (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
532                        (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
533         }
534 }
535
536 static void b44_check_phy(struct b44 *bp)
537 {
538         u32 bmsr, aux;
539
540         if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
541             !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
542             (bmsr != 0xffff)) {
543                 if (aux & MII_AUXCTRL_SPEED)
544                         bp->flags |= B44_FLAG_100_BASE_T;
545                 else
546                         bp->flags &= ~B44_FLAG_100_BASE_T;
547                 if (aux & MII_AUXCTRL_DUPLEX)
548                         bp->flags |= B44_FLAG_FULL_DUPLEX;
549                 else
550                         bp->flags &= ~B44_FLAG_FULL_DUPLEX;
551
552                 if (!netif_carrier_ok(bp->dev) &&
553                     (bmsr & BMSR_LSTATUS)) {
554                         u32 val = br32(bp, B44_TX_CTRL);
555                         u32 local_adv, remote_adv;
556
557                         if (bp->flags & B44_FLAG_FULL_DUPLEX)
558                                 val |= TX_CTRL_DUPLEX;
559                         else
560                                 val &= ~TX_CTRL_DUPLEX;
561                         bw32(bp, B44_TX_CTRL, val);
562
563                         if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
564                             !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
565                             !b44_readphy(bp, MII_LPA, &remote_adv))
566                                 b44_set_flow_ctrl(bp, local_adv, remote_adv);
567
568                         /* Link now up */
569                         netif_carrier_on(bp->dev);
570                         b44_link_report(bp);
571                 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
572                         /* Link now down */
573                         netif_carrier_off(bp->dev);
574                         b44_link_report(bp);
575                 }
576
577                 if (bmsr & BMSR_RFAULT)
578                         printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
579                                bp->dev->name);
580                 if (bmsr & BMSR_JCD)
581                         printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
582                                bp->dev->name);
583         }
584 }
585
586 static void b44_timer(unsigned long __opaque)
587 {
588         struct b44 *bp = (struct b44 *) __opaque;
589
590         spin_lock_irq(&bp->lock);
591
592         b44_check_phy(bp);
593
594         b44_stats_update(bp);
595
596         spin_unlock_irq(&bp->lock);
597
598         bp->timer.expires = jiffies + HZ;
599         add_timer(&bp->timer);
600 }
601
602 static void b44_tx(struct b44 *bp)
603 {
604         u32 cur, cons;
605
606         cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
607         cur /= sizeof(struct dma_desc);
608
609         /* XXX needs updating when NETIF_F_SG is supported */
610         for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
611                 struct ring_info *rp = &bp->tx_buffers[cons];
612                 struct sk_buff *skb = rp->skb;
613
614                 BUG_ON(skb == NULL);
615
616                 pci_unmap_single(bp->pdev,
617                                  pci_unmap_addr(rp, mapping),
618                                  skb->len,
619                                  PCI_DMA_TODEVICE);
620                 rp->skb = NULL;
621                 dev_kfree_skb_irq(skb);
622         }
623
624         bp->tx_cons = cons;
625         if (netif_queue_stopped(bp->dev) &&
626             TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
627                 netif_wake_queue(bp->dev);
628
629         bw32(bp, B44_GPTIMER, 0);
630 }
631
632 /* Works like this.  This chip writes a 'struct rx_header" 30 bytes
633  * before the DMA address you give it.  So we allocate 30 more bytes
634  * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
635  * point the chip at 30 bytes past where the rx_header will go.
636  */
637 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
638 {
639         struct dma_desc *dp;
640         struct ring_info *src_map, *map;
641         struct rx_header *rh;
642         struct sk_buff *skb;
643         dma_addr_t mapping;
644         int dest_idx;
645         u32 ctrl;
646
647         src_map = NULL;
648         if (src_idx >= 0)
649                 src_map = &bp->rx_buffers[src_idx];
650         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
651         map = &bp->rx_buffers[dest_idx];
652         skb = dev_alloc_skb(RX_PKT_BUF_SZ);
653         if (skb == NULL)
654                 return -ENOMEM;
655
656         mapping = pci_map_single(bp->pdev, skb->data,
657                                  RX_PKT_BUF_SZ,
658                                  PCI_DMA_FROMDEVICE);
659
660         /* Hardware bug work-around, the chip is unable to do PCI DMA
661            to/from anything above 1GB :-( */
662         if (dma_mapping_error(mapping) ||
663                 mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
664                 /* Sigh... */
665                 if (!dma_mapping_error(mapping))
666                         pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
667                 dev_kfree_skb_any(skb);
668                 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
669                 if (skb == NULL)
670                         return -ENOMEM;
671                 mapping = pci_map_single(bp->pdev, skb->data,
672                                          RX_PKT_BUF_SZ,
673                                          PCI_DMA_FROMDEVICE);
674                 if (dma_mapping_error(mapping) ||
675                         mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
676                         if (!dma_mapping_error(mapping))
677                                 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
678                         dev_kfree_skb_any(skb);
679                         return -ENOMEM;
680                 }
681         }
682
683         skb->dev = bp->dev;
684         skb_reserve(skb, bp->rx_offset);
685
686         rh = (struct rx_header *)
687                 (skb->data - bp->rx_offset);
688         rh->len = 0;
689         rh->flags = 0;
690
691         map->skb = skb;
692         pci_unmap_addr_set(map, mapping, mapping);
693
694         if (src_map != NULL)
695                 src_map->skb = NULL;
696
697         ctrl  = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset));
698         if (dest_idx == (B44_RX_RING_SIZE - 1))
699                 ctrl |= DESC_CTRL_EOT;
700
701         dp = &bp->rx_ring[dest_idx];
702         dp->ctrl = cpu_to_le32(ctrl);
703         dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
704
705         if (bp->flags & B44_FLAG_RX_RING_HACK)
706                 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
707                                              dest_idx * sizeof(dp),
708                                              DMA_BIDIRECTIONAL);
709
710         return RX_PKT_BUF_SZ;
711 }
712
713 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
714 {
715         struct dma_desc *src_desc, *dest_desc;
716         struct ring_info *src_map, *dest_map;
717         struct rx_header *rh;
718         int dest_idx;
719         u32 ctrl;
720
721         dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
722         dest_desc = &bp->rx_ring[dest_idx];
723         dest_map = &bp->rx_buffers[dest_idx];
724         src_desc = &bp->rx_ring[src_idx];
725         src_map = &bp->rx_buffers[src_idx];
726
727         dest_map->skb = src_map->skb;
728         rh = (struct rx_header *) src_map->skb->data;
729         rh->len = 0;
730         rh->flags = 0;
731         pci_unmap_addr_set(dest_map, mapping,
732                            pci_unmap_addr(src_map, mapping));
733
734         if (bp->flags & B44_FLAG_RX_RING_HACK)
735                 b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
736                                           src_idx * sizeof(src_desc),
737                                           DMA_BIDIRECTIONAL);
738
739         ctrl = src_desc->ctrl;
740         if (dest_idx == (B44_RX_RING_SIZE - 1))
741                 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
742         else
743                 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
744
745         dest_desc->ctrl = ctrl;
746         dest_desc->addr = src_desc->addr;
747
748         src_map->skb = NULL;
749
750         if (bp->flags & B44_FLAG_RX_RING_HACK)
751                 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
752                                              dest_idx * sizeof(dest_desc),
753                                              DMA_BIDIRECTIONAL);
754
755         pci_dma_sync_single_for_device(bp->pdev, src_desc->addr,
756                                        RX_PKT_BUF_SZ,
757                                        PCI_DMA_FROMDEVICE);
758 }
759
760 static int b44_rx(struct b44 *bp, int budget)
761 {
762         int received;
763         u32 cons, prod;
764
765         received = 0;
766         prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
767         prod /= sizeof(struct dma_desc);
768         cons = bp->rx_cons;
769
770         while (cons != prod && budget > 0) {
771                 struct ring_info *rp = &bp->rx_buffers[cons];
772                 struct sk_buff *skb = rp->skb;
773                 dma_addr_t map = pci_unmap_addr(rp, mapping);
774                 struct rx_header *rh;
775                 u16 len;
776
777                 pci_dma_sync_single_for_cpu(bp->pdev, map,
778                                             RX_PKT_BUF_SZ,
779                                             PCI_DMA_FROMDEVICE);
780                 rh = (struct rx_header *) skb->data;
781                 len = cpu_to_le16(rh->len);
782                 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
783                     (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
784                 drop_it:
785                         b44_recycle_rx(bp, cons, bp->rx_prod);
786                 drop_it_no_recycle:
787                         bp->stats.rx_dropped++;
788                         goto next_pkt;
789                 }
790
791                 if (len == 0) {
792                         int i = 0;
793
794                         do {
795                                 udelay(2);
796                                 barrier();
797                                 len = cpu_to_le16(rh->len);
798                         } while (len == 0 && i++ < 5);
799                         if (len == 0)
800                                 goto drop_it;
801                 }
802
803                 /* Omit CRC. */
804                 len -= 4;
805
806                 if (len > RX_COPY_THRESHOLD) {
807                         int skb_size;
808                         skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
809                         if (skb_size < 0)
810                                 goto drop_it;
811                         pci_unmap_single(bp->pdev, map,
812                                          skb_size, PCI_DMA_FROMDEVICE);
813                         /* Leave out rx_header */
814                         skb_put(skb, len+bp->rx_offset);
815                         skb_pull(skb,bp->rx_offset);
816                 } else {
817                         struct sk_buff *copy_skb;
818
819                         b44_recycle_rx(bp, cons, bp->rx_prod);
820                         copy_skb = dev_alloc_skb(len + 2);
821                         if (copy_skb == NULL)
822                                 goto drop_it_no_recycle;
823
824                         copy_skb->dev = bp->dev;
825                         skb_reserve(copy_skb, 2);
826                         skb_put(copy_skb, len);
827                         /* DMA sync done above, copy just the actual packet */
828                         memcpy(copy_skb->data, skb->data+bp->rx_offset, len);
829
830                         skb = copy_skb;
831                 }
832                 skb->ip_summed = CHECKSUM_NONE;
833                 skb->protocol = eth_type_trans(skb, bp->dev);
834                 netif_receive_skb(skb);
835                 bp->dev->last_rx = jiffies;
836                 received++;
837                 budget--;
838         next_pkt:
839                 bp->rx_prod = (bp->rx_prod + 1) &
840                         (B44_RX_RING_SIZE - 1);
841                 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
842         }
843
844         bp->rx_cons = cons;
845         bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
846
847         return received;
848 }
849
850 static int b44_poll(struct net_device *netdev, int *budget)
851 {
852         struct b44 *bp = netdev_priv(netdev);
853         int done;
854
855         spin_lock_irq(&bp->lock);
856
857         if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
858                 /* spin_lock(&bp->tx_lock); */
859                 b44_tx(bp);
860                 /* spin_unlock(&bp->tx_lock); */
861         }
862         spin_unlock_irq(&bp->lock);
863
864         done = 1;
865         if (bp->istat & ISTAT_RX) {
866                 int orig_budget = *budget;
867                 int work_done;
868
869                 if (orig_budget > netdev->quota)
870                         orig_budget = netdev->quota;
871
872                 work_done = b44_rx(bp, orig_budget);
873
874                 *budget -= work_done;
875                 netdev->quota -= work_done;
876
877                 if (work_done >= orig_budget)
878                         done = 0;
879         }
880
881         if (bp->istat & ISTAT_ERRORS) {
882                 unsigned long flags;
883
884                 spin_lock_irqsave(&bp->lock, flags);
885                 b44_halt(bp);
886                 b44_init_rings(bp);
887                 b44_init_hw(bp, 1);
888                 netif_wake_queue(bp->dev);
889                 spin_unlock_irqrestore(&bp->lock, flags);
890                 done = 1;
891         }
892
893         if (done) {
894                 netif_rx_complete(netdev);
895                 b44_enable_ints(bp);
896         }
897
898         return (done ? 0 : 1);
899 }
900
901 static irqreturn_t b44_interrupt(int irq, void *dev_id)
902 {
903         struct net_device *dev = dev_id;
904         struct b44 *bp = netdev_priv(dev);
905         u32 istat, imask;
906         int handled = 0;
907
908         spin_lock(&bp->lock);
909
910         istat = br32(bp, B44_ISTAT);
911         imask = br32(bp, B44_IMASK);
912
913         /* The interrupt mask register controls which interrupt bits
914          * will actually raise an interrupt to the CPU when set by hw/firmware,
915          * but doesn't mask off the bits.
916          */
917         istat &= imask;
918         if (istat) {
919                 handled = 1;
920
921                 if (unlikely(!netif_running(dev))) {
922                         printk(KERN_INFO "%s: late interrupt.\n", dev->name);
923                         goto irq_ack;
924                 }
925
926                 if (netif_rx_schedule_prep(dev)) {
927                         /* NOTE: These writes are posted by the readback of
928                          *       the ISTAT register below.
929                          */
930                         bp->istat = istat;
931                         __b44_disable_ints(bp);
932                         __netif_rx_schedule(dev);
933                 } else {
934                         printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
935                                dev->name);
936                 }
937
938 irq_ack:
939                 bw32(bp, B44_ISTAT, istat);
940                 br32(bp, B44_ISTAT);
941         }
942         spin_unlock(&bp->lock);
943         return IRQ_RETVAL(handled);
944 }
945
946 static void b44_tx_timeout(struct net_device *dev)
947 {
948         struct b44 *bp = netdev_priv(dev);
949
950         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
951                dev->name);
952
953         spin_lock_irq(&bp->lock);
954
955         b44_halt(bp);
956         b44_init_rings(bp);
957         b44_init_hw(bp, 1);
958
959         spin_unlock_irq(&bp->lock);
960
961         b44_enable_ints(bp);
962
963         netif_wake_queue(dev);
964 }
965
966 static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
967 {
968         struct b44 *bp = netdev_priv(dev);
969         struct sk_buff *bounce_skb;
970         int rc = NETDEV_TX_OK;
971         dma_addr_t mapping;
972         u32 len, entry, ctrl;
973
974         len = skb->len;
975         spin_lock_irq(&bp->lock);
976
977         /* This is a hard error, log it. */
978         if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
979                 netif_stop_queue(dev);
980                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
981                        dev->name);
982                 goto err_out;
983         }
984
985         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
986         if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
987                 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
988                 if (!dma_mapping_error(mapping))
989                         pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
990
991                 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
992                                              GFP_ATOMIC|GFP_DMA);
993                 if (!bounce_skb)
994                         goto err_out;
995
996                 mapping = pci_map_single(bp->pdev, bounce_skb->data,
997                                          len, PCI_DMA_TODEVICE);
998                 if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) {
999                         if (!dma_mapping_error(mapping))
1000                                 pci_unmap_single(bp->pdev, mapping,
1001                                          len, PCI_DMA_TODEVICE);
1002                         dev_kfree_skb_any(bounce_skb);
1003                         goto err_out;
1004                 }
1005
1006                 memcpy(skb_put(bounce_skb, len), skb->data, skb->len);
1007                 dev_kfree_skb_any(skb);
1008                 skb = bounce_skb;
1009         }
1010
1011         entry = bp->tx_prod;
1012         bp->tx_buffers[entry].skb = skb;
1013         pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
1014
1015         ctrl  = (len & DESC_CTRL_LEN);
1016         ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1017         if (entry == (B44_TX_RING_SIZE - 1))
1018                 ctrl |= DESC_CTRL_EOT;
1019
1020         bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1021         bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1022
1023         if (bp->flags & B44_FLAG_TX_RING_HACK)
1024                 b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
1025                                              entry * sizeof(bp->tx_ring[0]),
1026                                              DMA_TO_DEVICE);
1027
1028         entry = NEXT_TX(entry);
1029
1030         bp->tx_prod = entry;
1031
1032         wmb();
1033
1034         bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1035         if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1036                 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1037         if (bp->flags & B44_FLAG_REORDER_BUG)
1038                 br32(bp, B44_DMATX_PTR);
1039
1040         if (TX_BUFFS_AVAIL(bp) < 1)
1041                 netif_stop_queue(dev);
1042
1043         dev->trans_start = jiffies;
1044
1045 out_unlock:
1046         spin_unlock_irq(&bp->lock);
1047
1048         return rc;
1049
1050 err_out:
1051         rc = NETDEV_TX_BUSY;
1052         goto out_unlock;
1053 }
1054
1055 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1056 {
1057         struct b44 *bp = netdev_priv(dev);
1058
1059         if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1060                 return -EINVAL;
1061
1062         if (!netif_running(dev)) {
1063                 /* We'll just catch it later when the
1064                  * device is up'd.
1065                  */
1066                 dev->mtu = new_mtu;
1067                 return 0;
1068         }
1069
1070         spin_lock_irq(&bp->lock);
1071         b44_halt(bp);
1072         dev->mtu = new_mtu;
1073         b44_init_rings(bp);
1074         b44_init_hw(bp, 1);
1075         spin_unlock_irq(&bp->lock);
1076
1077         b44_enable_ints(bp);
1078
1079         return 0;
1080 }
1081
1082 /* Free up pending packets in all rx/tx rings.
1083  *
1084  * The chip has been shut down and the driver detached from
1085  * the networking, so no interrupts or new tx packets will
1086  * end up in the driver.  bp->lock is not held and we are not
1087  * in an interrupt context and thus may sleep.
1088  */
1089 static void b44_free_rings(struct b44 *bp)
1090 {
1091         struct ring_info *rp;
1092         int i;
1093
1094         for (i = 0; i < B44_RX_RING_SIZE; i++) {
1095                 rp = &bp->rx_buffers[i];
1096
1097                 if (rp->skb == NULL)
1098                         continue;
1099                 pci_unmap_single(bp->pdev,
1100                                  pci_unmap_addr(rp, mapping),
1101                                  RX_PKT_BUF_SZ,
1102                                  PCI_DMA_FROMDEVICE);
1103                 dev_kfree_skb_any(rp->skb);
1104                 rp->skb = NULL;
1105         }
1106
1107         /* XXX needs changes once NETIF_F_SG is set... */
1108         for (i = 0; i < B44_TX_RING_SIZE; i++) {
1109                 rp = &bp->tx_buffers[i];
1110
1111                 if (rp->skb == NULL)
1112                         continue;
1113                 pci_unmap_single(bp->pdev,
1114                                  pci_unmap_addr(rp, mapping),
1115                                  rp->skb->len,
1116                                  PCI_DMA_TODEVICE);
1117                 dev_kfree_skb_any(rp->skb);
1118                 rp->skb = NULL;
1119         }
1120 }
1121
1122 /* Initialize tx/rx rings for packet processing.
1123  *
1124  * The chip has been shut down and the driver detached from
1125  * the networking, so no interrupts or new tx packets will
1126  * end up in the driver.
1127  */
1128 static void b44_init_rings(struct b44 *bp)
1129 {
1130         int i;
1131
1132         b44_free_rings(bp);
1133
1134         memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1135         memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1136
1137         if (bp->flags & B44_FLAG_RX_RING_HACK)
1138                 dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
1139                                            DMA_TABLE_BYTES,
1140                                            PCI_DMA_BIDIRECTIONAL);
1141
1142         if (bp->flags & B44_FLAG_TX_RING_HACK)
1143                 dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
1144                                            DMA_TABLE_BYTES,
1145                                            PCI_DMA_TODEVICE);
1146
1147         for (i = 0; i < bp->rx_pending; i++) {
1148                 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1149                         break;
1150         }
1151 }
1152
1153 /*
1154  * Must not be invoked with interrupt sources disabled and
1155  * the hardware shutdown down.
1156  */
1157 static void b44_free_consistent(struct b44 *bp)
1158 {
1159         kfree(bp->rx_buffers);
1160         bp->rx_buffers = NULL;
1161         kfree(bp->tx_buffers);
1162         bp->tx_buffers = NULL;
1163         if (bp->rx_ring) {
1164                 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1165                         dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
1166                                          DMA_TABLE_BYTES,
1167                                          DMA_BIDIRECTIONAL);
1168                         kfree(bp->rx_ring);
1169                 } else
1170                         pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1171                                             bp->rx_ring, bp->rx_ring_dma);
1172                 bp->rx_ring = NULL;
1173                 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1174         }
1175         if (bp->tx_ring) {
1176                 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1177                         dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
1178                                          DMA_TABLE_BYTES,
1179                                          DMA_TO_DEVICE);
1180                         kfree(bp->tx_ring);
1181                 } else
1182                         pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1183                                             bp->tx_ring, bp->tx_ring_dma);
1184                 bp->tx_ring = NULL;
1185                 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1186         }
1187 }
1188
1189 /*
1190  * Must not be invoked with interrupt sources disabled and
1191  * the hardware shutdown down.  Can sleep.
1192  */
1193 static int b44_alloc_consistent(struct b44 *bp)
1194 {
1195         int size;
1196
1197         size  = B44_RX_RING_SIZE * sizeof(struct ring_info);
1198         bp->rx_buffers = kzalloc(size, GFP_KERNEL);
1199         if (!bp->rx_buffers)
1200                 goto out_err;
1201
1202         size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1203         bp->tx_buffers = kzalloc(size, GFP_KERNEL);
1204         if (!bp->tx_buffers)
1205                 goto out_err;
1206
1207         size = DMA_TABLE_BYTES;
1208         bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
1209         if (!bp->rx_ring) {
1210                 /* Allocation may have failed due to pci_alloc_consistent
1211                    insisting on use of GFP_DMA, which is more restrictive
1212                    than necessary...  */
1213                 struct dma_desc *rx_ring;
1214                 dma_addr_t rx_ring_dma;
1215
1216                 rx_ring = kzalloc(size, GFP_KERNEL);
1217                 if (!rx_ring)
1218                         goto out_err;
1219
1220                 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
1221                                              DMA_TABLE_BYTES,
1222                                              DMA_BIDIRECTIONAL);
1223
1224                 if (dma_mapping_error(rx_ring_dma) ||
1225                         rx_ring_dma + size > B44_DMA_MASK) {
1226                         kfree(rx_ring);
1227                         goto out_err;
1228                 }
1229
1230                 bp->rx_ring = rx_ring;
1231                 bp->rx_ring_dma = rx_ring_dma;
1232                 bp->flags |= B44_FLAG_RX_RING_HACK;
1233         }
1234
1235         bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
1236         if (!bp->tx_ring) {
1237                 /* Allocation may have failed due to pci_alloc_consistent
1238                    insisting on use of GFP_DMA, which is more restrictive
1239                    than necessary...  */
1240                 struct dma_desc *tx_ring;
1241                 dma_addr_t tx_ring_dma;
1242
1243                 tx_ring = kzalloc(size, GFP_KERNEL);
1244                 if (!tx_ring)
1245                         goto out_err;
1246
1247                 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
1248                                              DMA_TABLE_BYTES,
1249                                              DMA_TO_DEVICE);
1250
1251                 if (dma_mapping_error(tx_ring_dma) ||
1252                         tx_ring_dma + size > B44_DMA_MASK) {
1253                         kfree(tx_ring);
1254                         goto out_err;
1255                 }
1256
1257                 bp->tx_ring = tx_ring;
1258                 bp->tx_ring_dma = tx_ring_dma;
1259                 bp->flags |= B44_FLAG_TX_RING_HACK;
1260         }
1261
1262         return 0;
1263
1264 out_err:
1265         b44_free_consistent(bp);
1266         return -ENOMEM;
1267 }
1268
1269 /* bp->lock is held. */
1270 static void b44_clear_stats(struct b44 *bp)
1271 {
1272         unsigned long reg;
1273
1274         bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1275         for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1276                 br32(bp, reg);
1277         for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1278                 br32(bp, reg);
1279 }
1280
1281 /* bp->lock is held. */
1282 static void b44_chip_reset(struct b44 *bp)
1283 {
1284         if (ssb_is_core_up(bp)) {
1285                 bw32(bp, B44_RCV_LAZY, 0);
1286                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1287                 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 100, 1);
1288                 bw32(bp, B44_DMATX_CTRL, 0);
1289                 bp->tx_prod = bp->tx_cons = 0;
1290                 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1291                         b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1292                                      100, 0);
1293                 }
1294                 bw32(bp, B44_DMARX_CTRL, 0);
1295                 bp->rx_prod = bp->rx_cons = 0;
1296         } else {
1297                 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1298                                    SBINTVEC_ENET0 :
1299                                    SBINTVEC_ENET1));
1300         }
1301
1302         ssb_core_reset(bp);
1303
1304         b44_clear_stats(bp);
1305
1306         /* Make PHY accessible. */
1307         bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1308                              (0x0d & MDIO_CTRL_MAXF_MASK)));
1309         br32(bp, B44_MDIO_CTRL);
1310
1311         if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1312                 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1313                 br32(bp, B44_ENET_CTRL);
1314                 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1315         } else {
1316                 u32 val = br32(bp, B44_DEVCTRL);
1317
1318                 if (val & DEVCTRL_EPR) {
1319                         bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1320                         br32(bp, B44_DEVCTRL);
1321                         udelay(100);
1322                 }
1323                 bp->flags |= B44_FLAG_INTERNAL_PHY;
1324         }
1325 }
1326
1327 /* bp->lock is held. */
1328 static void b44_halt(struct b44 *bp)
1329 {
1330         b44_disable_ints(bp);
1331         b44_chip_reset(bp);
1332 }
1333
1334 /* bp->lock is held. */
1335 static void __b44_set_mac_addr(struct b44 *bp)
1336 {
1337         bw32(bp, B44_CAM_CTRL, 0);
1338         if (!(bp->dev->flags & IFF_PROMISC)) {
1339                 u32 val;
1340
1341                 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1342                 val = br32(bp, B44_CAM_CTRL);
1343                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1344         }
1345 }
1346
1347 static int b44_set_mac_addr(struct net_device *dev, void *p)
1348 {
1349         struct b44 *bp = netdev_priv(dev);
1350         struct sockaddr *addr = p;
1351
1352         if (netif_running(dev))
1353                 return -EBUSY;
1354
1355         if (!is_valid_ether_addr(addr->sa_data))
1356                 return -EINVAL;
1357
1358         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1359
1360         spin_lock_irq(&bp->lock);
1361         __b44_set_mac_addr(bp);
1362         spin_unlock_irq(&bp->lock);
1363
1364         return 0;
1365 }
1366
1367 /* Called at device open time to get the chip ready for
1368  * packet processing.  Invoked with bp->lock held.
1369  */
1370 static void __b44_set_rx_mode(struct net_device *);
1371 static void b44_init_hw(struct b44 *bp, int full_reset)
1372 {
1373         u32 val;
1374
1375         b44_chip_reset(bp);
1376         if (full_reset) {
1377                 b44_phy_reset(bp);
1378                 b44_setup_phy(bp);
1379         }
1380
1381         /* Enable CRC32, set proper LED modes and power on PHY */
1382         bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1383         bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1384
1385         /* This sets the MAC address too.  */
1386         __b44_set_rx_mode(bp->dev);
1387
1388         /* MTU + eth header + possible VLAN tag + struct rx_header */
1389         bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1390         bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1391
1392         bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1393         if (full_reset) {
1394                 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1395                 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1396                 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1397                                       (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1398                 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1399
1400                 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1401                 bp->rx_prod = bp->rx_pending;
1402
1403                 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1404         } else {
1405                 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1406                                       (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1407         }
1408
1409         val = br32(bp, B44_ENET_CTRL);
1410         bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1411 }
1412
1413 static int b44_open(struct net_device *dev)
1414 {
1415         struct b44 *bp = netdev_priv(dev);
1416         int err;
1417
1418         err = b44_alloc_consistent(bp);
1419         if (err)
1420                 goto out;
1421
1422         b44_init_rings(bp);
1423         b44_init_hw(bp, 1);
1424
1425         b44_check_phy(bp);
1426
1427         err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1428         if (unlikely(err < 0)) {
1429                 b44_chip_reset(bp);
1430                 b44_free_rings(bp);
1431                 b44_free_consistent(bp);
1432                 goto out;
1433         }
1434
1435         init_timer(&bp->timer);
1436         bp->timer.expires = jiffies + HZ;
1437         bp->timer.data = (unsigned long) bp;
1438         bp->timer.function = b44_timer;
1439         add_timer(&bp->timer);
1440
1441         b44_enable_ints(bp);
1442         netif_start_queue(dev);
1443 out:
1444         return err;
1445 }
1446
1447 #if 0
1448 /*static*/ void b44_dump_state(struct b44 *bp)
1449 {
1450         u32 val32, val32_2, val32_3, val32_4, val32_5;
1451         u16 val16;
1452
1453         pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1454         printk("DEBUG: PCI status [%04x] \n", val16);
1455
1456 }
1457 #endif
1458
1459 #ifdef CONFIG_NET_POLL_CONTROLLER
1460 /*
1461  * Polling receive - used by netconsole and other diagnostic tools
1462  * to allow network i/o with interrupts disabled.
1463  */
1464 static void b44_poll_controller(struct net_device *dev)
1465 {
1466         disable_irq(dev->irq);
1467         b44_interrupt(dev->irq, dev);
1468         enable_irq(dev->irq);
1469 }
1470 #endif
1471
1472 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1473 {
1474         u32 i;
1475         u32 *pattern = (u32 *) pp;
1476
1477         for (i = 0; i < bytes; i += sizeof(u32)) {
1478                 bw32(bp, B44_FILT_ADDR, table_offset + i);
1479                 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1480         }
1481 }
1482
1483 static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1484 {
1485         int magicsync = 6;
1486         int k, j, len = offset;
1487         int ethaddr_bytes = ETH_ALEN;
1488
1489         memset(ppattern + offset, 0xff, magicsync);
1490         for (j = 0; j < magicsync; j++)
1491                 set_bit(len++, (unsigned long *) pmask);
1492
1493         for (j = 0; j < B44_MAX_PATTERNS; j++) {
1494                 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1495                         ethaddr_bytes = ETH_ALEN;
1496                 else
1497                         ethaddr_bytes = B44_PATTERN_SIZE - len;
1498                 if (ethaddr_bytes <=0)
1499                         break;
1500                 for (k = 0; k< ethaddr_bytes; k++) {
1501                         ppattern[offset + magicsync +
1502                                 (j * ETH_ALEN) + k] = macaddr[k];
1503                         len++;
1504                         set_bit(len, (unsigned long *) pmask);
1505                 }
1506         }
1507         return len - 1;
1508 }
1509
1510 /* Setup magic packet patterns in the b44 WOL
1511  * pattern matching filter.
1512  */
1513 static void b44_setup_pseudo_magicp(struct b44 *bp)
1514 {
1515
1516         u32 val;
1517         int plen0, plen1, plen2;
1518         u8 *pwol_pattern;
1519         u8 pwol_mask[B44_PMASK_SIZE];
1520
1521         pwol_pattern = kmalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1522         if (!pwol_pattern) {
1523                 printk(KERN_ERR PFX "Memory not available for WOL\n");
1524                 return;
1525         }
1526
1527         /* Ipv4 magic packet pattern - pattern 0.*/
1528         memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1529         memset(pwol_mask, 0, B44_PMASK_SIZE);
1530         plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1531                                   B44_ETHIPV4UDP_HLEN);
1532
1533         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1534         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1535
1536         /* Raw ethernet II magic packet pattern - pattern 1 */
1537         memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1538         memset(pwol_mask, 0, B44_PMASK_SIZE);
1539         plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1540                                   ETH_HLEN);
1541
1542         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1543                        B44_PATTERN_BASE + B44_PATTERN_SIZE);
1544         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1545                        B44_PMASK_BASE + B44_PMASK_SIZE);
1546
1547         /* Ipv6 magic packet pattern - pattern 2 */
1548         memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1549         memset(pwol_mask, 0, B44_PMASK_SIZE);
1550         plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1551                                   B44_ETHIPV6UDP_HLEN);
1552
1553         bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1554                        B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1555         bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1556                        B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1557
1558         kfree(pwol_pattern);
1559
1560         /* set these pattern's lengths: one less than each real length */
1561         val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1562         bw32(bp, B44_WKUP_LEN, val);
1563
1564         /* enable wakeup pattern matching */
1565         val = br32(bp, B44_DEVCTRL);
1566         bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1567
1568 }
1569
1570 static void b44_setup_wol(struct b44 *bp)
1571 {
1572         u32 val;
1573         u16 pmval;
1574
1575         bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1576
1577         if (bp->flags & B44_FLAG_B0_ANDLATER) {
1578
1579                 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1580
1581                 val = bp->dev->dev_addr[2] << 24 |
1582                         bp->dev->dev_addr[3] << 16 |
1583                         bp->dev->dev_addr[4] << 8 |
1584                         bp->dev->dev_addr[5];
1585                 bw32(bp, B44_ADDR_LO, val);
1586
1587                 val = bp->dev->dev_addr[0] << 8 |
1588                         bp->dev->dev_addr[1];
1589                 bw32(bp, B44_ADDR_HI, val);
1590
1591                 val = br32(bp, B44_DEVCTRL);
1592                 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1593
1594         } else {
1595                 b44_setup_pseudo_magicp(bp);
1596         }
1597
1598         val = br32(bp, B44_SBTMSLOW);
1599         bw32(bp, B44_SBTMSLOW, val | SBTMSLOW_PE);
1600
1601         pci_read_config_word(bp->pdev, SSB_PMCSR, &pmval);
1602         pci_write_config_word(bp->pdev, SSB_PMCSR, pmval | SSB_PE);
1603
1604 }
1605
1606 static int b44_close(struct net_device *dev)
1607 {
1608         struct b44 *bp = netdev_priv(dev);
1609
1610         netif_stop_queue(dev);
1611
1612         netif_poll_disable(dev);
1613
1614         del_timer_sync(&bp->timer);
1615
1616         spin_lock_irq(&bp->lock);
1617
1618 #if 0
1619         b44_dump_state(bp);
1620 #endif
1621         b44_halt(bp);
1622         b44_free_rings(bp);
1623         netif_carrier_off(dev);
1624
1625         spin_unlock_irq(&bp->lock);
1626
1627         free_irq(dev->irq, dev);
1628
1629         netif_poll_enable(dev);
1630
1631         if (bp->flags & B44_FLAG_WOL_ENABLE) {
1632                 b44_init_hw(bp, 0);
1633                 b44_setup_wol(bp);
1634         }
1635
1636         b44_free_consistent(bp);
1637
1638         return 0;
1639 }
1640
1641 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1642 {
1643         struct b44 *bp = netdev_priv(dev);
1644         struct net_device_stats *nstat = &bp->stats;
1645         struct b44_hw_stats *hwstat = &bp->hw_stats;
1646
1647         /* Convert HW stats into netdevice stats. */
1648         nstat->rx_packets = hwstat->rx_pkts;
1649         nstat->tx_packets = hwstat->tx_pkts;
1650         nstat->rx_bytes   = hwstat->rx_octets;
1651         nstat->tx_bytes   = hwstat->tx_octets;
1652         nstat->tx_errors  = (hwstat->tx_jabber_pkts +
1653                              hwstat->tx_oversize_pkts +
1654                              hwstat->tx_underruns +
1655                              hwstat->tx_excessive_cols +
1656                              hwstat->tx_late_cols);
1657         nstat->multicast  = hwstat->tx_multicast_pkts;
1658         nstat->collisions = hwstat->tx_total_cols;
1659
1660         nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1661                                    hwstat->rx_undersize);
1662         nstat->rx_over_errors   = hwstat->rx_missed_pkts;
1663         nstat->rx_frame_errors  = hwstat->rx_align_errs;
1664         nstat->rx_crc_errors    = hwstat->rx_crc_errs;
1665         nstat->rx_errors        = (hwstat->rx_jabber_pkts +
1666                                    hwstat->rx_oversize_pkts +
1667                                    hwstat->rx_missed_pkts +
1668                                    hwstat->rx_crc_align_errs +
1669                                    hwstat->rx_undersize +
1670                                    hwstat->rx_crc_errs +
1671                                    hwstat->rx_align_errs +
1672                                    hwstat->rx_symbol_errs);
1673
1674         nstat->tx_aborted_errors = hwstat->tx_underruns;
1675 #if 0
1676         /* Carrier lost counter seems to be broken for some devices */
1677         nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1678 #endif
1679
1680         return nstat;
1681 }
1682
1683 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1684 {
1685         struct dev_mc_list *mclist;
1686         int i, num_ents;
1687
1688         num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1689         mclist = dev->mc_list;
1690         for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1691                 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1692         }
1693         return i+1;
1694 }
1695
1696 static void __b44_set_rx_mode(struct net_device *dev)
1697 {
1698         struct b44 *bp = netdev_priv(dev);
1699         u32 val;
1700
1701         val = br32(bp, B44_RXCONFIG);
1702         val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1703         if (dev->flags & IFF_PROMISC) {
1704                 val |= RXCONFIG_PROMISC;
1705                 bw32(bp, B44_RXCONFIG, val);
1706         } else {
1707                 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1708                 int i = 0;
1709
1710                 __b44_set_mac_addr(bp);
1711
1712                 if ((dev->flags & IFF_ALLMULTI) ||
1713                     (dev->mc_count > B44_MCAST_TABLE_SIZE))
1714                         val |= RXCONFIG_ALLMULTI;
1715                 else
1716                         i = __b44_load_mcast(bp, dev);
1717
1718                 for (; i < 64; i++)
1719                         __b44_cam_write(bp, zero, i);
1720
1721                 bw32(bp, B44_RXCONFIG, val);
1722                 val = br32(bp, B44_CAM_CTRL);
1723                 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1724         }
1725 }
1726
1727 static void b44_set_rx_mode(struct net_device *dev)
1728 {
1729         struct b44 *bp = netdev_priv(dev);
1730
1731         spin_lock_irq(&bp->lock);
1732         __b44_set_rx_mode(dev);
1733         spin_unlock_irq(&bp->lock);
1734 }
1735
1736 static u32 b44_get_msglevel(struct net_device *dev)
1737 {
1738         struct b44 *bp = netdev_priv(dev);
1739         return bp->msg_enable;
1740 }
1741
1742 static void b44_set_msglevel(struct net_device *dev, u32 value)
1743 {
1744         struct b44 *bp = netdev_priv(dev);
1745         bp->msg_enable = value;
1746 }
1747
1748 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1749 {
1750         struct b44 *bp = netdev_priv(dev);
1751         struct pci_dev *pci_dev = bp->pdev;
1752
1753         strcpy (info->driver, DRV_MODULE_NAME);
1754         strcpy (info->version, DRV_MODULE_VERSION);
1755         strcpy (info->bus_info, pci_name(pci_dev));
1756 }
1757
1758 static int b44_nway_reset(struct net_device *dev)
1759 {
1760         struct b44 *bp = netdev_priv(dev);
1761         u32 bmcr;
1762         int r;
1763
1764         spin_lock_irq(&bp->lock);
1765         b44_readphy(bp, MII_BMCR, &bmcr);
1766         b44_readphy(bp, MII_BMCR, &bmcr);
1767         r = -EINVAL;
1768         if (bmcr & BMCR_ANENABLE) {
1769                 b44_writephy(bp, MII_BMCR,
1770                              bmcr | BMCR_ANRESTART);
1771                 r = 0;
1772         }
1773         spin_unlock_irq(&bp->lock);
1774
1775         return r;
1776 }
1777
1778 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1779 {
1780         struct b44 *bp = netdev_priv(dev);
1781
1782         cmd->supported = (SUPPORTED_Autoneg);
1783         cmd->supported |= (SUPPORTED_100baseT_Half |
1784                           SUPPORTED_100baseT_Full |
1785                           SUPPORTED_10baseT_Half |
1786                           SUPPORTED_10baseT_Full |
1787                           SUPPORTED_MII);
1788
1789         cmd->advertising = 0;
1790         if (bp->flags & B44_FLAG_ADV_10HALF)
1791                 cmd->advertising |= ADVERTISED_10baseT_Half;
1792         if (bp->flags & B44_FLAG_ADV_10FULL)
1793                 cmd->advertising |= ADVERTISED_10baseT_Full;
1794         if (bp->flags & B44_FLAG_ADV_100HALF)
1795                 cmd->advertising |= ADVERTISED_100baseT_Half;
1796         if (bp->flags & B44_FLAG_ADV_100FULL)
1797                 cmd->advertising |= ADVERTISED_100baseT_Full;
1798         cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1799         cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1800                 SPEED_100 : SPEED_10;
1801         cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1802                 DUPLEX_FULL : DUPLEX_HALF;
1803         cmd->port = 0;
1804         cmd->phy_address = bp->phy_addr;
1805         cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1806                 XCVR_INTERNAL : XCVR_EXTERNAL;
1807         cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1808                 AUTONEG_DISABLE : AUTONEG_ENABLE;
1809         if (cmd->autoneg == AUTONEG_ENABLE)
1810                 cmd->advertising |= ADVERTISED_Autoneg;
1811         if (!netif_running(dev)){
1812                 cmd->speed = 0;
1813                 cmd->duplex = 0xff;
1814         }
1815         cmd->maxtxpkt = 0;
1816         cmd->maxrxpkt = 0;
1817         return 0;
1818 }
1819
1820 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1821 {
1822         struct b44 *bp = netdev_priv(dev);
1823
1824         /* We do not support gigabit. */
1825         if (cmd->autoneg == AUTONEG_ENABLE) {
1826                 if (cmd->advertising &
1827                     (ADVERTISED_1000baseT_Half |
1828                      ADVERTISED_1000baseT_Full))
1829                         return -EINVAL;
1830         } else if ((cmd->speed != SPEED_100 &&
1831                     cmd->speed != SPEED_10) ||
1832                    (cmd->duplex != DUPLEX_HALF &&
1833                     cmd->duplex != DUPLEX_FULL)) {
1834                         return -EINVAL;
1835         }
1836
1837         spin_lock_irq(&bp->lock);
1838
1839         if (cmd->autoneg == AUTONEG_ENABLE) {
1840                 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1841                                B44_FLAG_100_BASE_T |
1842                                B44_FLAG_FULL_DUPLEX |
1843                                B44_FLAG_ADV_10HALF |
1844                                B44_FLAG_ADV_10FULL |
1845                                B44_FLAG_ADV_100HALF |
1846                                B44_FLAG_ADV_100FULL);
1847                 if (cmd->advertising == 0) {
1848                         bp->flags |= (B44_FLAG_ADV_10HALF |
1849                                       B44_FLAG_ADV_10FULL |
1850                                       B44_FLAG_ADV_100HALF |
1851                                       B44_FLAG_ADV_100FULL);
1852                 } else {
1853                         if (cmd->advertising & ADVERTISED_10baseT_Half)
1854                                 bp->flags |= B44_FLAG_ADV_10HALF;
1855                         if (cmd->advertising & ADVERTISED_10baseT_Full)
1856                                 bp->flags |= B44_FLAG_ADV_10FULL;
1857                         if (cmd->advertising & ADVERTISED_100baseT_Half)
1858                                 bp->flags |= B44_FLAG_ADV_100HALF;
1859                         if (cmd->advertising & ADVERTISED_100baseT_Full)
1860                                 bp->flags |= B44_FLAG_ADV_100FULL;
1861                 }
1862         } else {
1863                 bp->flags |= B44_FLAG_FORCE_LINK;
1864                 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1865                 if (cmd->speed == SPEED_100)
1866                         bp->flags |= B44_FLAG_100_BASE_T;
1867                 if (cmd->duplex == DUPLEX_FULL)
1868                         bp->flags |= B44_FLAG_FULL_DUPLEX;
1869         }
1870
1871         if (netif_running(dev))
1872                 b44_setup_phy(bp);
1873
1874         spin_unlock_irq(&bp->lock);
1875
1876         return 0;
1877 }
1878
1879 static void b44_get_ringparam(struct net_device *dev,
1880                               struct ethtool_ringparam *ering)
1881 {
1882         struct b44 *bp = netdev_priv(dev);
1883
1884         ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1885         ering->rx_pending = bp->rx_pending;
1886
1887         /* XXX ethtool lacks a tx_max_pending, oops... */
1888 }
1889
1890 static int b44_set_ringparam(struct net_device *dev,
1891                              struct ethtool_ringparam *ering)
1892 {
1893         struct b44 *bp = netdev_priv(dev);
1894
1895         if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1896             (ering->rx_mini_pending != 0) ||
1897             (ering->rx_jumbo_pending != 0) ||
1898             (ering->tx_pending > B44_TX_RING_SIZE - 1))
1899                 return -EINVAL;
1900
1901         spin_lock_irq(&bp->lock);
1902
1903         bp->rx_pending = ering->rx_pending;
1904         bp->tx_pending = ering->tx_pending;
1905
1906         b44_halt(bp);
1907         b44_init_rings(bp);
1908         b44_init_hw(bp, 1);
1909         netif_wake_queue(bp->dev);
1910         spin_unlock_irq(&bp->lock);
1911
1912         b44_enable_ints(bp);
1913
1914         return 0;
1915 }
1916
1917 static void b44_get_pauseparam(struct net_device *dev,
1918                                 struct ethtool_pauseparam *epause)
1919 {
1920         struct b44 *bp = netdev_priv(dev);
1921
1922         epause->autoneg =
1923                 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1924         epause->rx_pause =
1925                 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1926         epause->tx_pause =
1927                 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1928 }
1929
1930 static int b44_set_pauseparam(struct net_device *dev,
1931                                 struct ethtool_pauseparam *epause)
1932 {
1933         struct b44 *bp = netdev_priv(dev);
1934
1935         spin_lock_irq(&bp->lock);
1936         if (epause->autoneg)
1937                 bp->flags |= B44_FLAG_PAUSE_AUTO;
1938         else
1939                 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1940         if (epause->rx_pause)
1941                 bp->flags |= B44_FLAG_RX_PAUSE;
1942         else
1943                 bp->flags &= ~B44_FLAG_RX_PAUSE;
1944         if (epause->tx_pause)
1945                 bp->flags |= B44_FLAG_TX_PAUSE;
1946         else
1947                 bp->flags &= ~B44_FLAG_TX_PAUSE;
1948         if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1949                 b44_halt(bp);
1950                 b44_init_rings(bp);
1951                 b44_init_hw(bp, 1);
1952         } else {
1953                 __b44_set_flow_ctrl(bp, bp->flags);
1954         }
1955         spin_unlock_irq(&bp->lock);
1956
1957         b44_enable_ints(bp);
1958
1959         return 0;
1960 }
1961
1962 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1963 {
1964         switch(stringset) {
1965         case ETH_SS_STATS:
1966                 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1967                 break;
1968         }
1969 }
1970
1971 static int b44_get_stats_count(struct net_device *dev)
1972 {
1973         return ARRAY_SIZE(b44_gstrings);
1974 }
1975
1976 static void b44_get_ethtool_stats(struct net_device *dev,
1977                                   struct ethtool_stats *stats, u64 *data)
1978 {
1979         struct b44 *bp = netdev_priv(dev);
1980         u32 *val = &bp->hw_stats.tx_good_octets;
1981         u32 i;
1982
1983         spin_lock_irq(&bp->lock);
1984
1985         b44_stats_update(bp);
1986
1987         for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1988                 *data++ = *val++;
1989
1990         spin_unlock_irq(&bp->lock);
1991 }
1992
1993 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1994 {
1995         struct b44 *bp = netdev_priv(dev);
1996
1997         wol->supported = WAKE_MAGIC;
1998         if (bp->flags & B44_FLAG_WOL_ENABLE)
1999                 wol->wolopts = WAKE_MAGIC;
2000         else
2001                 wol->wolopts = 0;
2002         memset(&wol->sopass, 0, sizeof(wol->sopass));
2003 }
2004
2005 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2006 {
2007         struct b44 *bp = netdev_priv(dev);
2008
2009         spin_lock_irq(&bp->lock);
2010         if (wol->wolopts & WAKE_MAGIC)
2011                 bp->flags |= B44_FLAG_WOL_ENABLE;
2012         else
2013                 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2014         spin_unlock_irq(&bp->lock);
2015
2016         return 0;
2017 }
2018
2019 static const struct ethtool_ops b44_ethtool_ops = {
2020         .get_drvinfo            = b44_get_drvinfo,
2021         .get_settings           = b44_get_settings,
2022         .set_settings           = b44_set_settings,
2023         .nway_reset             = b44_nway_reset,
2024         .get_link               = ethtool_op_get_link,
2025         .get_wol                = b44_get_wol,
2026         .set_wol                = b44_set_wol,
2027         .get_ringparam          = b44_get_ringparam,
2028         .set_ringparam          = b44_set_ringparam,
2029         .get_pauseparam         = b44_get_pauseparam,
2030         .set_pauseparam         = b44_set_pauseparam,
2031         .get_msglevel           = b44_get_msglevel,
2032         .set_msglevel           = b44_set_msglevel,
2033         .get_strings            = b44_get_strings,
2034         .get_stats_count        = b44_get_stats_count,
2035         .get_ethtool_stats      = b44_get_ethtool_stats,
2036         .get_perm_addr          = ethtool_op_get_perm_addr,
2037 };
2038
2039 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2040 {
2041         struct mii_ioctl_data *data = if_mii(ifr);
2042         struct b44 *bp = netdev_priv(dev);
2043         int err = -EINVAL;
2044
2045         if (!netif_running(dev))
2046                 goto out;
2047
2048         spin_lock_irq(&bp->lock);
2049         err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2050         spin_unlock_irq(&bp->lock);
2051 out:
2052         return err;
2053 }
2054
2055 /* Read 128-bytes of EEPROM. */
2056 static int b44_read_eeprom(struct b44 *bp, u8 *data)
2057 {
2058         long i;
2059         u16 *ptr = (u16 *) data;
2060
2061         for (i = 0; i < 128; i += 2)
2062                 ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
2063
2064         return 0;
2065 }
2066
2067 static int __devinit b44_get_invariants(struct b44 *bp)
2068 {
2069         u8 eeprom[128];
2070         int err;
2071
2072         err = b44_read_eeprom(bp, &eeprom[0]);
2073         if (err)
2074                 goto out;
2075
2076         bp->dev->dev_addr[0] = eeprom[79];
2077         bp->dev->dev_addr[1] = eeprom[78];
2078         bp->dev->dev_addr[2] = eeprom[81];
2079         bp->dev->dev_addr[3] = eeprom[80];
2080         bp->dev->dev_addr[4] = eeprom[83];
2081         bp->dev->dev_addr[5] = eeprom[82];
2082
2083         if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2084                 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
2085                 return -EINVAL;
2086         }
2087
2088         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
2089
2090         bp->phy_addr = eeprom[90] & 0x1f;
2091
2092         /* With this, plus the rx_header prepended to the data by the
2093          * hardware, we'll land the ethernet header on a 2-byte boundary.
2094          */
2095         bp->rx_offset = 30;
2096
2097         bp->imask = IMASK_DEF;
2098
2099         bp->core_unit = ssb_core_unit(bp);
2100         bp->dma_offset = SB_PCI_DMA;
2101
2102         /* XXX - really required?
2103            bp->flags |= B44_FLAG_BUGGY_TXPTR;
2104          */
2105
2106         if (ssb_get_core_rev(bp) >= 7)
2107                 bp->flags |= B44_FLAG_B0_ANDLATER;
2108
2109 out:
2110         return err;
2111 }
2112
2113 static int __devinit b44_init_one(struct pci_dev *pdev,
2114                                   const struct pci_device_id *ent)
2115 {
2116         static int b44_version_printed = 0;
2117         unsigned long b44reg_base, b44reg_len;
2118         struct net_device *dev;
2119         struct b44 *bp;
2120         int err, i;
2121
2122         if (b44_version_printed++ == 0)
2123                 printk(KERN_INFO "%s", version);
2124
2125         err = pci_enable_device(pdev);
2126         if (err) {
2127                 dev_err(&pdev->dev, "Cannot enable PCI device, "
2128                        "aborting.\n");
2129                 return err;
2130         }
2131
2132         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2133                 dev_err(&pdev->dev,
2134                         "Cannot find proper PCI device "
2135                        "base address, aborting.\n");
2136                 err = -ENODEV;
2137                 goto err_out_disable_pdev;
2138         }
2139
2140         err = pci_request_regions(pdev, DRV_MODULE_NAME);
2141         if (err) {
2142                 dev_err(&pdev->dev,
2143                         "Cannot obtain PCI resources, aborting.\n");
2144                 goto err_out_disable_pdev;
2145         }
2146
2147         pci_set_master(pdev);
2148
2149         err = pci_set_dma_mask(pdev, (u64) B44_DMA_MASK);
2150         if (err) {
2151                 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2152                 goto err_out_free_res;
2153         }
2154
2155         err = pci_set_consistent_dma_mask(pdev, (u64) B44_DMA_MASK);
2156         if (err) {
2157                 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2158                 goto err_out_free_res;
2159         }
2160
2161         b44reg_base = pci_resource_start(pdev, 0);
2162         b44reg_len = pci_resource_len(pdev, 0);
2163
2164         dev = alloc_etherdev(sizeof(*bp));
2165         if (!dev) {
2166                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
2167                 err = -ENOMEM;
2168                 goto err_out_free_res;
2169         }
2170
2171         SET_MODULE_OWNER(dev);
2172         SET_NETDEV_DEV(dev,&pdev->dev);
2173
2174         /* No interesting netdevice features in this card... */
2175         dev->features |= 0;
2176
2177         bp = netdev_priv(dev);
2178         bp->pdev = pdev;
2179         bp->dev = dev;
2180
2181         bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2182
2183         spin_lock_init(&bp->lock);
2184
2185         bp->regs = ioremap(b44reg_base, b44reg_len);
2186         if (bp->regs == 0UL) {
2187                 dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
2188                 err = -ENOMEM;
2189                 goto err_out_free_dev;
2190         }
2191
2192         bp->rx_pending = B44_DEF_RX_RING_PENDING;
2193         bp->tx_pending = B44_DEF_TX_RING_PENDING;
2194
2195         dev->open = b44_open;
2196         dev->stop = b44_close;
2197         dev->hard_start_xmit = b44_start_xmit;
2198         dev->get_stats = b44_get_stats;
2199         dev->set_multicast_list = b44_set_rx_mode;
2200         dev->set_mac_address = b44_set_mac_addr;
2201         dev->do_ioctl = b44_ioctl;
2202         dev->tx_timeout = b44_tx_timeout;
2203         dev->poll = b44_poll;
2204         dev->weight = 64;
2205         dev->watchdog_timeo = B44_TX_TIMEOUT;
2206 #ifdef CONFIG_NET_POLL_CONTROLLER
2207         dev->poll_controller = b44_poll_controller;
2208 #endif
2209         dev->change_mtu = b44_change_mtu;
2210         dev->irq = pdev->irq;
2211         SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2212
2213         netif_carrier_off(dev);
2214
2215         err = b44_get_invariants(bp);
2216         if (err) {
2217                 dev_err(&pdev->dev,
2218                         "Problem fetching invariants of chip, aborting.\n");
2219                 goto err_out_iounmap;
2220         }
2221
2222         bp->mii_if.dev = dev;
2223         bp->mii_if.mdio_read = b44_mii_read;
2224         bp->mii_if.mdio_write = b44_mii_write;
2225         bp->mii_if.phy_id = bp->phy_addr;
2226         bp->mii_if.phy_id_mask = 0x1f;
2227         bp->mii_if.reg_num_mask = 0x1f;
2228
2229         /* By default, advertise all speed/duplex settings. */
2230         bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2231                       B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2232
2233         /* By default, auto-negotiate PAUSE. */
2234         bp->flags |= B44_FLAG_PAUSE_AUTO;
2235
2236         err = register_netdev(dev);
2237         if (err) {
2238                 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
2239                 goto err_out_iounmap;
2240         }
2241
2242         pci_set_drvdata(pdev, dev);
2243
2244         pci_save_state(bp->pdev);
2245
2246         /* Chip reset provides power to the b44 MAC & PCI cores, which
2247          * is necessary for MAC register access.
2248          */
2249         b44_chip_reset(bp);
2250
2251         printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
2252         for (i = 0; i < 6; i++)
2253                 printk("%2.2x%c", dev->dev_addr[i],
2254                        i == 5 ? '\n' : ':');
2255
2256         return 0;
2257
2258 err_out_iounmap:
2259         iounmap(bp->regs);
2260
2261 err_out_free_dev:
2262         free_netdev(dev);
2263
2264 err_out_free_res:
2265         pci_release_regions(pdev);
2266
2267 err_out_disable_pdev:
2268         pci_disable_device(pdev);
2269         pci_set_drvdata(pdev, NULL);
2270         return err;
2271 }
2272
2273 static void __devexit b44_remove_one(struct pci_dev *pdev)
2274 {
2275         struct net_device *dev = pci_get_drvdata(pdev);
2276         struct b44 *bp = netdev_priv(dev);
2277
2278         unregister_netdev(dev);
2279         iounmap(bp->regs);
2280         free_netdev(dev);
2281         pci_release_regions(pdev);
2282         pci_disable_device(pdev);
2283         pci_set_drvdata(pdev, NULL);
2284 }
2285
2286 static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2287 {
2288         struct net_device *dev = pci_get_drvdata(pdev);
2289         struct b44 *bp = netdev_priv(dev);
2290
2291         if (!netif_running(dev))
2292                  return 0;
2293
2294         del_timer_sync(&bp->timer);
2295
2296         spin_lock_irq(&bp->lock);
2297
2298         b44_halt(bp);
2299         netif_carrier_off(bp->dev);
2300         netif_device_detach(bp->dev);
2301         b44_free_rings(bp);
2302
2303         spin_unlock_irq(&bp->lock);
2304
2305         free_irq(dev->irq, dev);
2306         if (bp->flags & B44_FLAG_WOL_ENABLE) {
2307                 b44_init_hw(bp, 0);
2308                 b44_setup_wol(bp);
2309         }
2310         pci_disable_device(pdev);
2311         return 0;
2312 }
2313
2314 static int b44_resume(struct pci_dev *pdev)
2315 {
2316         struct net_device *dev = pci_get_drvdata(pdev);
2317         struct b44 *bp = netdev_priv(dev);
2318
2319         pci_restore_state(pdev);
2320         pci_enable_device(pdev);
2321         pci_set_master(pdev);
2322
2323         if (!netif_running(dev))
2324                 return 0;
2325
2326         if (request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev))
2327                 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2328
2329         spin_lock_irq(&bp->lock);
2330
2331         b44_init_rings(bp);
2332         b44_init_hw(bp, 1);
2333         netif_device_attach(bp->dev);
2334         spin_unlock_irq(&bp->lock);
2335
2336         bp->timer.expires = jiffies + HZ;
2337         add_timer(&bp->timer);
2338
2339         b44_enable_ints(bp);
2340         netif_wake_queue(dev);
2341         return 0;
2342 }
2343
2344 static struct pci_driver b44_driver = {
2345         .name           = DRV_MODULE_NAME,
2346         .id_table       = b44_pci_tbl,
2347         .probe          = b44_init_one,
2348         .remove         = __devexit_p(b44_remove_one),
2349         .suspend        = b44_suspend,
2350         .resume         = b44_resume,
2351 };
2352
2353 static int __init b44_init(void)
2354 {
2355         unsigned int dma_desc_align_size = dma_get_cache_alignment();
2356
2357         /* Setup paramaters for syncing RX/TX DMA descriptors */
2358         dma_desc_align_mask = ~(dma_desc_align_size - 1);
2359         dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2360
2361         return pci_register_driver(&b44_driver);
2362 }
2363
2364 static void __exit b44_cleanup(void)
2365 {
2366         pci_unregister_driver(&b44_driver);
2367 }
2368
2369 module_init(b44_init);
2370 module_exit(b44_cleanup);
2371