net/ethernet/jme: disable ASPM
[pandora-kernel.git] / drivers / net / ethernet / jme.c
1 /*
2  * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
3  *
4  * Copyright 2008 JMicron Technology Corporation
5  * http://www.jmicron.com/
6  * Copyright (c) 2009 - 2010 Guo-Fu Tseng <cooldavid@cooldavid.org>
7  *
8  * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  *
23  */
24
25 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26
27 #include <linux/module.h>
28 #include <linux/kernel.h>
29 #include <linux/pci.h>
30 #include <linux/pci-aspm.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/crc32.h>
36 #include <linux/delay.h>
37 #include <linux/spinlock.h>
38 #include <linux/in.h>
39 #include <linux/ip.h>
40 #include <linux/ipv6.h>
41 #include <linux/tcp.h>
42 #include <linux/udp.h>
43 #include <linux/if_vlan.h>
44 #include <linux/slab.h>
45 #include <net/ip6_checksum.h>
46 #include "jme.h"
47
48 static int force_pseudohp = -1;
49 static int no_pseudohp = -1;
50 static int no_extplug = -1;
51 module_param(force_pseudohp, int, 0);
52 MODULE_PARM_DESC(force_pseudohp,
53         "Enable pseudo hot-plug feature manually by driver instead of BIOS.");
54 module_param(no_pseudohp, int, 0);
55 MODULE_PARM_DESC(no_pseudohp, "Disable pseudo hot-plug feature.");
56 module_param(no_extplug, int, 0);
57 MODULE_PARM_DESC(no_extplug,
58         "Do not use external plug signal for pseudo hot-plug.");
59
60 static int
61 jme_mdio_read(struct net_device *netdev, int phy, int reg)
62 {
63         struct jme_adapter *jme = netdev_priv(netdev);
64         int i, val, again = (reg == MII_BMSR) ? 1 : 0;
65
66 read_again:
67         jwrite32(jme, JME_SMI, SMI_OP_REQ |
68                                 smi_phy_addr(phy) |
69                                 smi_reg_addr(reg));
70
71         wmb();
72         for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
73                 udelay(20);
74                 val = jread32(jme, JME_SMI);
75                 if ((val & SMI_OP_REQ) == 0)
76                         break;
77         }
78
79         if (i == 0) {
80                 pr_err("phy(%d) read timeout : %d\n", phy, reg);
81                 return 0;
82         }
83
84         if (again--)
85                 goto read_again;
86
87         return (val & SMI_DATA_MASK) >> SMI_DATA_SHIFT;
88 }
89
90 static void
91 jme_mdio_write(struct net_device *netdev,
92                                 int phy, int reg, int val)
93 {
94         struct jme_adapter *jme = netdev_priv(netdev);
95         int i;
96
97         jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ |
98                 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
99                 smi_phy_addr(phy) | smi_reg_addr(reg));
100
101         wmb();
102         for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
103                 udelay(20);
104                 if ((jread32(jme, JME_SMI) & SMI_OP_REQ) == 0)
105                         break;
106         }
107
108         if (i == 0)
109                 pr_err("phy(%d) write timeout : %d\n", phy, reg);
110 }
111
112 static inline void
113 jme_reset_phy_processor(struct jme_adapter *jme)
114 {
115         u32 val;
116
117         jme_mdio_write(jme->dev,
118                         jme->mii_if.phy_id,
119                         MII_ADVERTISE, ADVERTISE_ALL |
120                         ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
121
122         if (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250)
123                 jme_mdio_write(jme->dev,
124                                 jme->mii_if.phy_id,
125                                 MII_CTRL1000,
126                                 ADVERTISE_1000FULL | ADVERTISE_1000HALF);
127
128         val = jme_mdio_read(jme->dev,
129                                 jme->mii_if.phy_id,
130                                 MII_BMCR);
131
132         jme_mdio_write(jme->dev,
133                         jme->mii_if.phy_id,
134                         MII_BMCR, val | BMCR_RESET);
135 }
136
137 static void
138 jme_setup_wakeup_frame(struct jme_adapter *jme,
139                        const u32 *mask, u32 crc, int fnr)
140 {
141         int i;
142
143         /*
144          * Setup CRC pattern
145          */
146         jwrite32(jme, JME_WFOI, WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL));
147         wmb();
148         jwrite32(jme, JME_WFODP, crc);
149         wmb();
150
151         /*
152          * Setup Mask
153          */
154         for (i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) {
155                 jwrite32(jme, JME_WFOI,
156                                 ((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) |
157                                 (fnr & WFOI_FRAME_SEL));
158                 wmb();
159                 jwrite32(jme, JME_WFODP, mask[i]);
160                 wmb();
161         }
162 }
163
164 static inline void
165 jme_mac_rxclk_off(struct jme_adapter *jme)
166 {
167         jme->reg_gpreg1 |= GPREG1_RXCLKOFF;
168         jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
169 }
170
171 static inline void
172 jme_mac_rxclk_on(struct jme_adapter *jme)
173 {
174         jme->reg_gpreg1 &= ~GPREG1_RXCLKOFF;
175         jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
176 }
177
178 static inline void
179 jme_mac_txclk_off(struct jme_adapter *jme)
180 {
181         jme->reg_ghc &= ~(GHC_TO_CLK_SRC | GHC_TXMAC_CLK_SRC);
182         jwrite32f(jme, JME_GHC, jme->reg_ghc);
183 }
184
185 static inline void
186 jme_mac_txclk_on(struct jme_adapter *jme)
187 {
188         u32 speed = jme->reg_ghc & GHC_SPEED;
189         if (speed == GHC_SPEED_1000M)
190                 jme->reg_ghc |= GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY;
191         else
192                 jme->reg_ghc |= GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
193         jwrite32f(jme, JME_GHC, jme->reg_ghc);
194 }
195
196 static inline void
197 jme_reset_ghc_speed(struct jme_adapter *jme)
198 {
199         jme->reg_ghc &= ~(GHC_SPEED | GHC_DPX);
200         jwrite32f(jme, JME_GHC, jme->reg_ghc);
201 }
202
203 static inline void
204 jme_reset_250A2_workaround(struct jme_adapter *jme)
205 {
206         jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
207                              GPREG1_RSSPATCH);
208         jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
209 }
210
211 static inline void
212 jme_assert_ghc_reset(struct jme_adapter *jme)
213 {
214         jme->reg_ghc |= GHC_SWRST;
215         jwrite32f(jme, JME_GHC, jme->reg_ghc);
216 }
217
218 static inline void
219 jme_clear_ghc_reset(struct jme_adapter *jme)
220 {
221         jme->reg_ghc &= ~GHC_SWRST;
222         jwrite32f(jme, JME_GHC, jme->reg_ghc);
223 }
224
225 static inline void
226 jme_reset_mac_processor(struct jme_adapter *jme)
227 {
228         static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0};
229         u32 crc = 0xCDCDCDCD;
230         u32 gpreg0;
231         int i;
232
233         jme_reset_ghc_speed(jme);
234         jme_reset_250A2_workaround(jme);
235
236         jme_mac_rxclk_on(jme);
237         jme_mac_txclk_on(jme);
238         udelay(1);
239         jme_assert_ghc_reset(jme);
240         udelay(1);
241         jme_mac_rxclk_off(jme);
242         jme_mac_txclk_off(jme);
243         udelay(1);
244         jme_clear_ghc_reset(jme);
245         udelay(1);
246         jme_mac_rxclk_on(jme);
247         jme_mac_txclk_on(jme);
248         udelay(1);
249         jme_mac_rxclk_off(jme);
250         jme_mac_txclk_off(jme);
251
252         jwrite32(jme, JME_RXDBA_LO, 0x00000000);
253         jwrite32(jme, JME_RXDBA_HI, 0x00000000);
254         jwrite32(jme, JME_RXQDC, 0x00000000);
255         jwrite32(jme, JME_RXNDA, 0x00000000);
256         jwrite32(jme, JME_TXDBA_LO, 0x00000000);
257         jwrite32(jme, JME_TXDBA_HI, 0x00000000);
258         jwrite32(jme, JME_TXQDC, 0x00000000);
259         jwrite32(jme, JME_TXNDA, 0x00000000);
260
261         jwrite32(jme, JME_RXMCHT_LO, 0x00000000);
262         jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
263         for (i = 0 ; i < WAKEUP_FRAME_NR ; ++i)
264                 jme_setup_wakeup_frame(jme, mask, crc, i);
265         if (jme->fpgaver)
266                 gpreg0 = GPREG0_DEFAULT | GPREG0_LNKINTPOLL;
267         else
268                 gpreg0 = GPREG0_DEFAULT;
269         jwrite32(jme, JME_GPREG0, gpreg0);
270 }
271
272 static inline void
273 jme_clear_pm(struct jme_adapter *jme)
274 {
275         jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs);
276 }
277
278 static int
279 jme_reload_eeprom(struct jme_adapter *jme)
280 {
281         u32 val;
282         int i;
283
284         val = jread32(jme, JME_SMBCSR);
285
286         if (val & SMBCSR_EEPROMD) {
287                 val |= SMBCSR_CNACK;
288                 jwrite32(jme, JME_SMBCSR, val);
289                 val |= SMBCSR_RELOAD;
290                 jwrite32(jme, JME_SMBCSR, val);
291                 mdelay(12);
292
293                 for (i = JME_EEPROM_RELOAD_TIMEOUT; i > 0; --i) {
294                         mdelay(1);
295                         if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0)
296                                 break;
297                 }
298
299                 if (i == 0) {
300                         pr_err("eeprom reload timeout\n");
301                         return -EIO;
302                 }
303         }
304
305         return 0;
306 }
307
308 static void
309 jme_load_macaddr(struct net_device *netdev)
310 {
311         struct jme_adapter *jme = netdev_priv(netdev);
312         unsigned char macaddr[6];
313         u32 val;
314
315         spin_lock_bh(&jme->macaddr_lock);
316         val = jread32(jme, JME_RXUMA_LO);
317         macaddr[0] = (val >>  0) & 0xFF;
318         macaddr[1] = (val >>  8) & 0xFF;
319         macaddr[2] = (val >> 16) & 0xFF;
320         macaddr[3] = (val >> 24) & 0xFF;
321         val = jread32(jme, JME_RXUMA_HI);
322         macaddr[4] = (val >>  0) & 0xFF;
323         macaddr[5] = (val >>  8) & 0xFF;
324         memcpy(netdev->dev_addr, macaddr, 6);
325         spin_unlock_bh(&jme->macaddr_lock);
326 }
327
328 static inline void
329 jme_set_rx_pcc(struct jme_adapter *jme, int p)
330 {
331         switch (p) {
332         case PCC_OFF:
333                 jwrite32(jme, JME_PCCRX0,
334                         ((PCC_OFF_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
335                         ((PCC_OFF_CNT << PCCRX_SHIFT) & PCCRX_MASK));
336                 break;
337         case PCC_P1:
338                 jwrite32(jme, JME_PCCRX0,
339                         ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
340                         ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK));
341                 break;
342         case PCC_P2:
343                 jwrite32(jme, JME_PCCRX0,
344                         ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
345                         ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK));
346                 break;
347         case PCC_P3:
348                 jwrite32(jme, JME_PCCRX0,
349                         ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
350                         ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK));
351                 break;
352         default:
353                 break;
354         }
355         wmb();
356
357         if (!(test_bit(JME_FLAG_POLL, &jme->flags)))
358                 netif_info(jme, rx_status, jme->dev, "Switched to PCC_P%d\n", p);
359 }
360
361 static void
362 jme_start_irq(struct jme_adapter *jme)
363 {
364         register struct dynpcc_info *dpi = &(jme->dpi);
365
366         jme_set_rx_pcc(jme, PCC_P1);
367         dpi->cur                = PCC_P1;
368         dpi->attempt            = PCC_P1;
369         dpi->cnt                = 0;
370
371         jwrite32(jme, JME_PCCTX,
372                         ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) |
373                         ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) |
374                         PCCTXQ0_EN
375                 );
376
377         /*
378          * Enable Interrupts
379          */
380         jwrite32(jme, JME_IENS, INTR_ENABLE);
381 }
382
383 static inline void
384 jme_stop_irq(struct jme_adapter *jme)
385 {
386         /*
387          * Disable Interrupts
388          */
389         jwrite32f(jme, JME_IENC, INTR_ENABLE);
390 }
391
392 static u32
393 jme_linkstat_from_phy(struct jme_adapter *jme)
394 {
395         u32 phylink, bmsr;
396
397         phylink = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 17);
398         bmsr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMSR);
399         if (bmsr & BMSR_ANCOMP)
400                 phylink |= PHY_LINK_AUTONEG_COMPLETE;
401
402         return phylink;
403 }
404
405 static inline void
406 jme_set_phyfifo_5level(struct jme_adapter *jme)
407 {
408         jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004);
409 }
410
411 static inline void
412 jme_set_phyfifo_8level(struct jme_adapter *jme)
413 {
414         jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000);
415 }
416
417 static int
418 jme_check_link(struct net_device *netdev, int testonly)
419 {
420         struct jme_adapter *jme = netdev_priv(netdev);
421         u32 phylink, cnt = JME_SPDRSV_TIMEOUT, bmcr;
422         char linkmsg[64];
423         int rc = 0;
424
425         linkmsg[0] = '\0';
426
427         if (jme->fpgaver)
428                 phylink = jme_linkstat_from_phy(jme);
429         else
430                 phylink = jread32(jme, JME_PHY_LINK);
431
432         if (phylink & PHY_LINK_UP) {
433                 if (!(phylink & PHY_LINK_AUTONEG_COMPLETE)) {
434                         /*
435                          * If we did not enable AN
436                          * Speed/Duplex Info should be obtained from SMI
437                          */
438                         phylink = PHY_LINK_UP;
439
440                         bmcr = jme_mdio_read(jme->dev,
441                                                 jme->mii_if.phy_id,
442                                                 MII_BMCR);
443
444                         phylink |= ((bmcr & BMCR_SPEED1000) &&
445                                         (bmcr & BMCR_SPEED100) == 0) ?
446                                         PHY_LINK_SPEED_1000M :
447                                         (bmcr & BMCR_SPEED100) ?
448                                         PHY_LINK_SPEED_100M :
449                                         PHY_LINK_SPEED_10M;
450
451                         phylink |= (bmcr & BMCR_FULLDPLX) ?
452                                          PHY_LINK_DUPLEX : 0;
453
454                         strcat(linkmsg, "Forced: ");
455                 } else {
456                         /*
457                          * Keep polling for speed/duplex resolve complete
458                          */
459                         while (!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) &&
460                                 --cnt) {
461
462                                 udelay(1);
463
464                                 if (jme->fpgaver)
465                                         phylink = jme_linkstat_from_phy(jme);
466                                 else
467                                         phylink = jread32(jme, JME_PHY_LINK);
468                         }
469                         if (!cnt)
470                                 pr_err("Waiting speed resolve timeout\n");
471
472                         strcat(linkmsg, "ANed: ");
473                 }
474
475                 if (jme->phylink == phylink) {
476                         rc = 1;
477                         goto out;
478                 }
479                 if (testonly)
480                         goto out;
481
482                 jme->phylink = phylink;
483
484                 /*
485                  * The speed/duplex setting of jme->reg_ghc already cleared
486                  * by jme_reset_mac_processor()
487                  */
488                 switch (phylink & PHY_LINK_SPEED_MASK) {
489                 case PHY_LINK_SPEED_10M:
490                         jme->reg_ghc |= GHC_SPEED_10M;
491                         strcat(linkmsg, "10 Mbps, ");
492                         break;
493                 case PHY_LINK_SPEED_100M:
494                         jme->reg_ghc |= GHC_SPEED_100M;
495                         strcat(linkmsg, "100 Mbps, ");
496                         break;
497                 case PHY_LINK_SPEED_1000M:
498                         jme->reg_ghc |= GHC_SPEED_1000M;
499                         strcat(linkmsg, "1000 Mbps, ");
500                         break;
501                 default:
502                         break;
503                 }
504
505                 if (phylink & PHY_LINK_DUPLEX) {
506                         jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
507                         jwrite32(jme, JME_TXTRHD, TXTRHD_FULLDUPLEX);
508                         jme->reg_ghc |= GHC_DPX;
509                 } else {
510                         jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
511                                                 TXMCS_BACKOFF |
512                                                 TXMCS_CARRIERSENSE |
513                                                 TXMCS_COLLISION);
514                         jwrite32(jme, JME_TXTRHD, TXTRHD_HALFDUPLEX);
515                 }
516
517                 jwrite32(jme, JME_GHC, jme->reg_ghc);
518
519                 if (is_buggy250(jme->pdev->device, jme->chiprev)) {
520                         jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
521                                              GPREG1_RSSPATCH);
522                         if (!(phylink & PHY_LINK_DUPLEX))
523                                 jme->reg_gpreg1 |= GPREG1_HALFMODEPATCH;
524                         switch (phylink & PHY_LINK_SPEED_MASK) {
525                         case PHY_LINK_SPEED_10M:
526                                 jme_set_phyfifo_8level(jme);
527                                 jme->reg_gpreg1 |= GPREG1_RSSPATCH;
528                                 break;
529                         case PHY_LINK_SPEED_100M:
530                                 jme_set_phyfifo_5level(jme);
531                                 jme->reg_gpreg1 |= GPREG1_RSSPATCH;
532                                 break;
533                         case PHY_LINK_SPEED_1000M:
534                                 jme_set_phyfifo_8level(jme);
535                                 break;
536                         default:
537                                 break;
538                         }
539                 }
540                 jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
541
542                 strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ?
543                                         "Full-Duplex, " :
544                                         "Half-Duplex, ");
545                 strcat(linkmsg, (phylink & PHY_LINK_MDI_STAT) ?
546                                         "MDI-X" :
547                                         "MDI");
548                 netif_info(jme, link, jme->dev, "Link is up at %s\n", linkmsg);
549                 netif_carrier_on(netdev);
550         } else {
551                 if (testonly)
552                         goto out;
553
554                 netif_info(jme, link, jme->dev, "Link is down\n");
555                 jme->phylink = 0;
556                 netif_carrier_off(netdev);
557         }
558
559 out:
560         return rc;
561 }
562
563 static int
564 jme_setup_tx_resources(struct jme_adapter *jme)
565 {
566         struct jme_ring *txring = &(jme->txring[0]);
567
568         txring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
569                                    TX_RING_ALLOC_SIZE(jme->tx_ring_size),
570                                    &(txring->dmaalloc),
571                                    GFP_ATOMIC);
572
573         if (!txring->alloc)
574                 goto err_set_null;
575
576         /*
577          * 16 Bytes align
578          */
579         txring->desc            = (void *)ALIGN((unsigned long)(txring->alloc),
580                                                 RING_DESC_ALIGN);
581         txring->dma             = ALIGN(txring->dmaalloc, RING_DESC_ALIGN);
582         txring->next_to_use     = 0;
583         atomic_set(&txring->next_to_clean, 0);
584         atomic_set(&txring->nr_free, jme->tx_ring_size);
585
586         txring->bufinf          = kmalloc(sizeof(struct jme_buffer_info) *
587                                         jme->tx_ring_size, GFP_ATOMIC);
588         if (unlikely(!(txring->bufinf)))
589                 goto err_free_txring;
590
591         /*
592          * Initialize Transmit Descriptors
593          */
594         memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size));
595         memset(txring->bufinf, 0,
596                 sizeof(struct jme_buffer_info) * jme->tx_ring_size);
597
598         return 0;
599
600 err_free_txring:
601         dma_free_coherent(&(jme->pdev->dev),
602                           TX_RING_ALLOC_SIZE(jme->tx_ring_size),
603                           txring->alloc,
604                           txring->dmaalloc);
605
606 err_set_null:
607         txring->desc = NULL;
608         txring->dmaalloc = 0;
609         txring->dma = 0;
610         txring->bufinf = NULL;
611
612         return -ENOMEM;
613 }
614
615 static void
616 jme_free_tx_resources(struct jme_adapter *jme)
617 {
618         int i;
619         struct jme_ring *txring = &(jme->txring[0]);
620         struct jme_buffer_info *txbi;
621
622         if (txring->alloc) {
623                 if (txring->bufinf) {
624                         for (i = 0 ; i < jme->tx_ring_size ; ++i) {
625                                 txbi = txring->bufinf + i;
626                                 if (txbi->skb) {
627                                         dev_kfree_skb(txbi->skb);
628                                         txbi->skb = NULL;
629                                 }
630                                 txbi->mapping           = 0;
631                                 txbi->len               = 0;
632                                 txbi->nr_desc           = 0;
633                                 txbi->start_xmit        = 0;
634                         }
635                         kfree(txring->bufinf);
636                 }
637
638                 dma_free_coherent(&(jme->pdev->dev),
639                                   TX_RING_ALLOC_SIZE(jme->tx_ring_size),
640                                   txring->alloc,
641                                   txring->dmaalloc);
642
643                 txring->alloc           = NULL;
644                 txring->desc            = NULL;
645                 txring->dmaalloc        = 0;
646                 txring->dma             = 0;
647                 txring->bufinf          = NULL;
648         }
649         txring->next_to_use     = 0;
650         atomic_set(&txring->next_to_clean, 0);
651         atomic_set(&txring->nr_free, 0);
652 }
653
654 static inline void
655 jme_enable_tx_engine(struct jme_adapter *jme)
656 {
657         /*
658          * Select Queue 0
659          */
660         jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0);
661         wmb();
662
663         /*
664          * Setup TX Queue 0 DMA Bass Address
665          */
666         jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
667         jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32);
668         jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
669
670         /*
671          * Setup TX Descptor Count
672          */
673         jwrite32(jme, JME_TXQDC, jme->tx_ring_size);
674
675         /*
676          * Enable TX Engine
677          */
678         wmb();
679         jwrite32f(jme, JME_TXCS, jme->reg_txcs |
680                                 TXCS_SELECT_QUEUE0 |
681                                 TXCS_ENABLE);
682
683         /*
684          * Start clock for TX MAC Processor
685          */
686         jme_mac_txclk_on(jme);
687 }
688
689 static inline void
690 jme_restart_tx_engine(struct jme_adapter *jme)
691 {
692         /*
693          * Restart TX Engine
694          */
695         jwrite32(jme, JME_TXCS, jme->reg_txcs |
696                                 TXCS_SELECT_QUEUE0 |
697                                 TXCS_ENABLE);
698 }
699
700 static inline void
701 jme_disable_tx_engine(struct jme_adapter *jme)
702 {
703         int i;
704         u32 val;
705
706         /*
707          * Disable TX Engine
708          */
709         jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0);
710         wmb();
711
712         val = jread32(jme, JME_TXCS);
713         for (i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i) {
714                 mdelay(1);
715                 val = jread32(jme, JME_TXCS);
716                 rmb();
717         }
718
719         if (!i)
720                 pr_err("Disable TX engine timeout\n");
721
722         /*
723          * Stop clock for TX MAC Processor
724          */
725         jme_mac_txclk_off(jme);
726 }
727
728 static void
729 jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
730 {
731         struct jme_ring *rxring = &(jme->rxring[0]);
732         register struct rxdesc *rxdesc = rxring->desc;
733         struct jme_buffer_info *rxbi = rxring->bufinf;
734         rxdesc += i;
735         rxbi += i;
736
737         rxdesc->dw[0] = 0;
738         rxdesc->dw[1] = 0;
739         rxdesc->desc1.bufaddrh  = cpu_to_le32((__u64)rxbi->mapping >> 32);
740         rxdesc->desc1.bufaddrl  = cpu_to_le32(
741                                         (__u64)rxbi->mapping & 0xFFFFFFFFUL);
742         rxdesc->desc1.datalen   = cpu_to_le16(rxbi->len);
743         if (jme->dev->features & NETIF_F_HIGHDMA)
744                 rxdesc->desc1.flags = RXFLAG_64BIT;
745         wmb();
746         rxdesc->desc1.flags     |= RXFLAG_OWN | RXFLAG_INT;
747 }
748
749 static int
750 jme_make_new_rx_buf(struct jme_adapter *jme, int i)
751 {
752         struct jme_ring *rxring = &(jme->rxring[0]);
753         struct jme_buffer_info *rxbi = rxring->bufinf + i;
754         struct sk_buff *skb;
755         dma_addr_t mapping;
756
757         skb = netdev_alloc_skb(jme->dev,
758                 jme->dev->mtu + RX_EXTRA_LEN);
759         if (unlikely(!skb))
760                 return -ENOMEM;
761
762         mapping = pci_map_page(jme->pdev, virt_to_page(skb->data),
763                                offset_in_page(skb->data), skb_tailroom(skb),
764                                PCI_DMA_FROMDEVICE);
765         if (unlikely(pci_dma_mapping_error(jme->pdev, mapping))) {
766                 dev_kfree_skb(skb);
767                 return -ENOMEM;
768         }
769
770         if (likely(rxbi->mapping))
771                 pci_unmap_page(jme->pdev, rxbi->mapping,
772                                rxbi->len, PCI_DMA_FROMDEVICE);
773
774         rxbi->skb = skb;
775         rxbi->len = skb_tailroom(skb);
776         rxbi->mapping = mapping;
777         return 0;
778 }
779
780 static void
781 jme_free_rx_buf(struct jme_adapter *jme, int i)
782 {
783         struct jme_ring *rxring = &(jme->rxring[0]);
784         struct jme_buffer_info *rxbi = rxring->bufinf;
785         rxbi += i;
786
787         if (rxbi->skb) {
788                 pci_unmap_page(jme->pdev,
789                                  rxbi->mapping,
790                                  rxbi->len,
791                                  PCI_DMA_FROMDEVICE);
792                 dev_kfree_skb(rxbi->skb);
793                 rxbi->skb = NULL;
794                 rxbi->mapping = 0;
795                 rxbi->len = 0;
796         }
797 }
798
799 static void
800 jme_free_rx_resources(struct jme_adapter *jme)
801 {
802         int i;
803         struct jme_ring *rxring = &(jme->rxring[0]);
804
805         if (rxring->alloc) {
806                 if (rxring->bufinf) {
807                         for (i = 0 ; i < jme->rx_ring_size ; ++i)
808                                 jme_free_rx_buf(jme, i);
809                         kfree(rxring->bufinf);
810                 }
811
812                 dma_free_coherent(&(jme->pdev->dev),
813                                   RX_RING_ALLOC_SIZE(jme->rx_ring_size),
814                                   rxring->alloc,
815                                   rxring->dmaalloc);
816                 rxring->alloc    = NULL;
817                 rxring->desc     = NULL;
818                 rxring->dmaalloc = 0;
819                 rxring->dma      = 0;
820                 rxring->bufinf   = NULL;
821         }
822         rxring->next_to_use   = 0;
823         atomic_set(&rxring->next_to_clean, 0);
824 }
825
826 static int
827 jme_setup_rx_resources(struct jme_adapter *jme)
828 {
829         int i;
830         struct jme_ring *rxring = &(jme->rxring[0]);
831
832         rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
833                                    RX_RING_ALLOC_SIZE(jme->rx_ring_size),
834                                    &(rxring->dmaalloc),
835                                    GFP_ATOMIC);
836         if (!rxring->alloc)
837                 goto err_set_null;
838
839         /*
840          * 16 Bytes align
841          */
842         rxring->desc            = (void *)ALIGN((unsigned long)(rxring->alloc),
843                                                 RING_DESC_ALIGN);
844         rxring->dma             = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN);
845         rxring->next_to_use     = 0;
846         atomic_set(&rxring->next_to_clean, 0);
847
848         rxring->bufinf          = kmalloc(sizeof(struct jme_buffer_info) *
849                                         jme->rx_ring_size, GFP_ATOMIC);
850         if (unlikely(!(rxring->bufinf)))
851                 goto err_free_rxring;
852
853         /*
854          * Initiallize Receive Descriptors
855          */
856         memset(rxring->bufinf, 0,
857                 sizeof(struct jme_buffer_info) * jme->rx_ring_size);
858         for (i = 0 ; i < jme->rx_ring_size ; ++i) {
859                 if (unlikely(jme_make_new_rx_buf(jme, i))) {
860                         jme_free_rx_resources(jme);
861                         return -ENOMEM;
862                 }
863
864                 jme_set_clean_rxdesc(jme, i);
865         }
866
867         return 0;
868
869 err_free_rxring:
870         dma_free_coherent(&(jme->pdev->dev),
871                           RX_RING_ALLOC_SIZE(jme->rx_ring_size),
872                           rxring->alloc,
873                           rxring->dmaalloc);
874 err_set_null:
875         rxring->desc = NULL;
876         rxring->dmaalloc = 0;
877         rxring->dma = 0;
878         rxring->bufinf = NULL;
879
880         return -ENOMEM;
881 }
882
883 static inline void
884 jme_enable_rx_engine(struct jme_adapter *jme)
885 {
886         /*
887          * Select Queue 0
888          */
889         jwrite32(jme, JME_RXCS, jme->reg_rxcs |
890                                 RXCS_QUEUESEL_Q0);
891         wmb();
892
893         /*
894          * Setup RX DMA Bass Address
895          */
896         jwrite32(jme, JME_RXDBA_LO, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL);
897         jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32);
898         jwrite32(jme, JME_RXNDA, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL);
899
900         /*
901          * Setup RX Descriptor Count
902          */
903         jwrite32(jme, JME_RXQDC, jme->rx_ring_size);
904
905         /*
906          * Setup Unicast Filter
907          */
908         jme_set_unicastaddr(jme->dev);
909         jme_set_multi(jme->dev);
910
911         /*
912          * Enable RX Engine
913          */
914         wmb();
915         jwrite32f(jme, JME_RXCS, jme->reg_rxcs |
916                                 RXCS_QUEUESEL_Q0 |
917                                 RXCS_ENABLE |
918                                 RXCS_QST);
919
920         /*
921          * Start clock for RX MAC Processor
922          */
923         jme_mac_rxclk_on(jme);
924 }
925
926 static inline void
927 jme_restart_rx_engine(struct jme_adapter *jme)
928 {
929         /*
930          * Start RX Engine
931          */
932         jwrite32(jme, JME_RXCS, jme->reg_rxcs |
933                                 RXCS_QUEUESEL_Q0 |
934                                 RXCS_ENABLE |
935                                 RXCS_QST);
936 }
937
938 static inline void
939 jme_disable_rx_engine(struct jme_adapter *jme)
940 {
941         int i;
942         u32 val;
943
944         /*
945          * Disable RX Engine
946          */
947         jwrite32(jme, JME_RXCS, jme->reg_rxcs);
948         wmb();
949
950         val = jread32(jme, JME_RXCS);
951         for (i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i) {
952                 mdelay(1);
953                 val = jread32(jme, JME_RXCS);
954                 rmb();
955         }
956
957         if (!i)
958                 pr_err("Disable RX engine timeout\n");
959
960         /*
961          * Stop clock for RX MAC Processor
962          */
963         jme_mac_rxclk_off(jme);
964 }
965
966 static u16
967 jme_udpsum(struct sk_buff *skb)
968 {
969         u16 csum = 0xFFFFu;
970
971         if (skb->len < (ETH_HLEN + sizeof(struct iphdr)))
972                 return csum;
973         if (skb->protocol != htons(ETH_P_IP))
974                 return csum;
975         skb_set_network_header(skb, ETH_HLEN);
976         if ((ip_hdr(skb)->protocol != IPPROTO_UDP) ||
977             (skb->len < (ETH_HLEN +
978                         (ip_hdr(skb)->ihl << 2) +
979                         sizeof(struct udphdr)))) {
980                 skb_reset_network_header(skb);
981                 return csum;
982         }
983         skb_set_transport_header(skb,
984                         ETH_HLEN + (ip_hdr(skb)->ihl << 2));
985         csum = udp_hdr(skb)->check;
986         skb_reset_transport_header(skb);
987         skb_reset_network_header(skb);
988
989         return csum;
990 }
991
992 static int
993 jme_rxsum_ok(struct jme_adapter *jme, u16 flags, struct sk_buff *skb)
994 {
995         if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4)))
996                 return false;
997
998         if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_TCPON | RXWBFLAG_TCPCS))
999                         == RXWBFLAG_TCPON)) {
1000                 if (flags & RXWBFLAG_IPV4)
1001                         netif_err(jme, rx_err, jme->dev, "TCP Checksum error\n");
1002                 return false;
1003         }
1004
1005         if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS))
1006                         == RXWBFLAG_UDPON) && jme_udpsum(skb)) {
1007                 if (flags & RXWBFLAG_IPV4)
1008                         netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n");
1009                 return false;
1010         }
1011
1012         if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS))
1013                         == RXWBFLAG_IPV4)) {
1014                 netif_err(jme, rx_err, jme->dev, "IPv4 Checksum error\n");
1015                 return false;
1016         }
1017
1018         return true;
1019 }
1020
1021 static void
1022 jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
1023 {
1024         struct jme_ring *rxring = &(jme->rxring[0]);
1025         struct rxdesc *rxdesc = rxring->desc;
1026         struct jme_buffer_info *rxbi = rxring->bufinf;
1027         struct sk_buff *skb;
1028         int framesize;
1029
1030         rxdesc += idx;
1031         rxbi += idx;
1032
1033         skb = rxbi->skb;
1034         pci_dma_sync_single_for_cpu(jme->pdev,
1035                                         rxbi->mapping,
1036                                         rxbi->len,
1037                                         PCI_DMA_FROMDEVICE);
1038
1039         if (unlikely(jme_make_new_rx_buf(jme, idx))) {
1040                 pci_dma_sync_single_for_device(jme->pdev,
1041                                                 rxbi->mapping,
1042                                                 rxbi->len,
1043                                                 PCI_DMA_FROMDEVICE);
1044
1045                 ++(NET_STAT(jme).rx_dropped);
1046         } else {
1047                 framesize = le16_to_cpu(rxdesc->descwb.framesize)
1048                                 - RX_PREPAD_SIZE;
1049
1050                 skb_reserve(skb, RX_PREPAD_SIZE);
1051                 skb_put(skb, framesize);
1052                 skb->protocol = eth_type_trans(skb, jme->dev);
1053
1054                 if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags), skb))
1055                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1056                 else
1057                         skb_checksum_none_assert(skb);
1058
1059                 if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) {
1060                         u16 vid = le16_to_cpu(rxdesc->descwb.vlan);
1061
1062                         __vlan_hwaccel_put_tag(skb, vid);
1063                         NET_STAT(jme).rx_bytes += 4;
1064                 }
1065                 jme->jme_rx(skb);
1066
1067                 if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_DEST)) ==
1068                     cpu_to_le16(RXWBFLAG_DEST_MUL))
1069                         ++(NET_STAT(jme).multicast);
1070
1071                 NET_STAT(jme).rx_bytes += framesize;
1072                 ++(NET_STAT(jme).rx_packets);
1073         }
1074
1075         jme_set_clean_rxdesc(jme, idx);
1076
1077 }
1078
1079 static int
1080 jme_process_receive(struct jme_adapter *jme, int limit)
1081 {
1082         struct jme_ring *rxring = &(jme->rxring[0]);
1083         struct rxdesc *rxdesc = rxring->desc;
1084         int i, j, ccnt, desccnt, mask = jme->rx_ring_mask;
1085
1086         if (unlikely(!atomic_dec_and_test(&jme->rx_cleaning)))
1087                 goto out_inc;
1088
1089         if (unlikely(atomic_read(&jme->link_changing) != 1))
1090                 goto out_inc;
1091
1092         if (unlikely(!netif_carrier_ok(jme->dev)))
1093                 goto out_inc;
1094
1095         i = atomic_read(&rxring->next_to_clean);
1096         while (limit > 0) {
1097                 rxdesc = rxring->desc;
1098                 rxdesc += i;
1099
1100                 if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) ||
1101                 !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
1102                         goto out;
1103                 --limit;
1104
1105                 rmb();
1106                 desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
1107
1108                 if (unlikely(desccnt > 1 ||
1109                 rxdesc->descwb.errstat & RXWBERR_ALLERR)) {
1110
1111                         if (rxdesc->descwb.errstat & RXWBERR_CRCERR)
1112                                 ++(NET_STAT(jme).rx_crc_errors);
1113                         else if (rxdesc->descwb.errstat & RXWBERR_OVERUN)
1114                                 ++(NET_STAT(jme).rx_fifo_errors);
1115                         else
1116                                 ++(NET_STAT(jme).rx_errors);
1117
1118                         if (desccnt > 1)
1119                                 limit -= desccnt - 1;
1120
1121                         for (j = i, ccnt = desccnt ; ccnt-- ; ) {
1122                                 jme_set_clean_rxdesc(jme, j);
1123                                 j = (j + 1) & (mask);
1124                         }
1125
1126                 } else {
1127                         jme_alloc_and_feed_skb(jme, i);
1128                 }
1129
1130                 i = (i + desccnt) & (mask);
1131         }
1132
1133 out:
1134         atomic_set(&rxring->next_to_clean, i);
1135
1136 out_inc:
1137         atomic_inc(&jme->rx_cleaning);
1138
1139         return limit > 0 ? limit : 0;
1140
1141 }
1142
1143 static void
1144 jme_attempt_pcc(struct dynpcc_info *dpi, int atmp)
1145 {
1146         if (likely(atmp == dpi->cur)) {
1147                 dpi->cnt = 0;
1148                 return;
1149         }
1150
1151         if (dpi->attempt == atmp) {
1152                 ++(dpi->cnt);
1153         } else {
1154                 dpi->attempt = atmp;
1155                 dpi->cnt = 0;
1156         }
1157
1158 }
1159
1160 static void
1161 jme_dynamic_pcc(struct jme_adapter *jme)
1162 {
1163         register struct dynpcc_info *dpi = &(jme->dpi);
1164
1165         if ((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD)
1166                 jme_attempt_pcc(dpi, PCC_P3);
1167         else if ((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD ||
1168                  dpi->intr_cnt > PCC_INTR_THRESHOLD)
1169                 jme_attempt_pcc(dpi, PCC_P2);
1170         else
1171                 jme_attempt_pcc(dpi, PCC_P1);
1172
1173         if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
1174                 if (dpi->attempt < dpi->cur)
1175                         tasklet_schedule(&jme->rxclean_task);
1176                 jme_set_rx_pcc(jme, dpi->attempt);
1177                 dpi->cur = dpi->attempt;
1178                 dpi->cnt = 0;
1179         }
1180 }
1181
1182 static void
1183 jme_start_pcc_timer(struct jme_adapter *jme)
1184 {
1185         struct dynpcc_info *dpi = &(jme->dpi);
1186         dpi->last_bytes         = NET_STAT(jme).rx_bytes;
1187         dpi->last_pkts          = NET_STAT(jme).rx_packets;
1188         dpi->intr_cnt           = 0;
1189         jwrite32(jme, JME_TMCSR,
1190                 TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT));
1191 }
1192
1193 static inline void
1194 jme_stop_pcc_timer(struct jme_adapter *jme)
1195 {
1196         jwrite32(jme, JME_TMCSR, 0);
1197 }
1198
1199 static void
1200 jme_shutdown_nic(struct jme_adapter *jme)
1201 {
1202         u32 phylink;
1203
1204         phylink = jme_linkstat_from_phy(jme);
1205
1206         if (!(phylink & PHY_LINK_UP)) {
1207                 /*
1208                  * Disable all interrupt before issue timer
1209                  */
1210                 jme_stop_irq(jme);
1211                 jwrite32(jme, JME_TIMER2, TMCSR_EN | 0xFFFFFE);
1212         }
1213 }
1214
1215 static void
1216 jme_pcc_tasklet(unsigned long arg)
1217 {
1218         struct jme_adapter *jme = (struct jme_adapter *)arg;
1219         struct net_device *netdev = jme->dev;
1220
1221         if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) {
1222                 jme_shutdown_nic(jme);
1223                 return;
1224         }
1225
1226         if (unlikely(!netif_carrier_ok(netdev) ||
1227                 (atomic_read(&jme->link_changing) != 1)
1228         )) {
1229                 jme_stop_pcc_timer(jme);
1230                 return;
1231         }
1232
1233         if (!(test_bit(JME_FLAG_POLL, &jme->flags)))
1234                 jme_dynamic_pcc(jme);
1235
1236         jme_start_pcc_timer(jme);
1237 }
1238
1239 static inline void
1240 jme_polling_mode(struct jme_adapter *jme)
1241 {
1242         jme_set_rx_pcc(jme, PCC_OFF);
1243 }
1244
1245 static inline void
1246 jme_interrupt_mode(struct jme_adapter *jme)
1247 {
1248         jme_set_rx_pcc(jme, PCC_P1);
1249 }
1250
1251 static inline int
1252 jme_pseudo_hotplug_enabled(struct jme_adapter *jme)
1253 {
1254         u32 apmc;
1255         apmc = jread32(jme, JME_APMC);
1256         return apmc & JME_APMC_PSEUDO_HP_EN;
1257 }
1258
1259 static void
1260 jme_start_shutdown_timer(struct jme_adapter *jme)
1261 {
1262         u32 apmc;
1263
1264         apmc = jread32(jme, JME_APMC) | JME_APMC_PCIE_SD_EN;
1265         apmc &= ~JME_APMC_EPIEN_CTRL;
1266         if (!no_extplug) {
1267                 jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_EN);
1268                 wmb();
1269         }
1270         jwrite32f(jme, JME_APMC, apmc);
1271
1272         jwrite32f(jme, JME_TIMER2, 0);
1273         set_bit(JME_FLAG_SHUTDOWN, &jme->flags);
1274         jwrite32(jme, JME_TMCSR,
1275                 TMCSR_EN | ((0xFFFFFF - APMC_PHP_SHUTDOWN_DELAY) & TMCSR_CNT));
1276 }
1277
1278 static void
1279 jme_stop_shutdown_timer(struct jme_adapter *jme)
1280 {
1281         u32 apmc;
1282
1283         jwrite32f(jme, JME_TMCSR, 0);
1284         jwrite32f(jme, JME_TIMER2, 0);
1285         clear_bit(JME_FLAG_SHUTDOWN, &jme->flags);
1286
1287         apmc = jread32(jme, JME_APMC);
1288         apmc &= ~(JME_APMC_PCIE_SD_EN | JME_APMC_EPIEN_CTRL);
1289         jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_DIS);
1290         wmb();
1291         jwrite32f(jme, JME_APMC, apmc);
1292 }
1293
1294 static void
1295 jme_link_change_tasklet(unsigned long arg)
1296 {
1297         struct jme_adapter *jme = (struct jme_adapter *)arg;
1298         struct net_device *netdev = jme->dev;
1299         int rc;
1300
1301         while (!atomic_dec_and_test(&jme->link_changing)) {
1302                 atomic_inc(&jme->link_changing);
1303                 netif_info(jme, intr, jme->dev, "Get link change lock failed\n");
1304                 while (atomic_read(&jme->link_changing) != 1)
1305                         netif_info(jme, intr, jme->dev, "Waiting link change lock\n");
1306         }
1307
1308         if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu)
1309                 goto out;
1310
1311         jme->old_mtu = netdev->mtu;
1312         netif_stop_queue(netdev);
1313         if (jme_pseudo_hotplug_enabled(jme))
1314                 jme_stop_shutdown_timer(jme);
1315
1316         jme_stop_pcc_timer(jme);
1317         tasklet_disable(&jme->txclean_task);
1318         tasklet_disable(&jme->rxclean_task);
1319         tasklet_disable(&jme->rxempty_task);
1320
1321         if (netif_carrier_ok(netdev)) {
1322                 jme_disable_rx_engine(jme);
1323                 jme_disable_tx_engine(jme);
1324                 jme_reset_mac_processor(jme);
1325                 jme_free_rx_resources(jme);
1326                 jme_free_tx_resources(jme);
1327
1328                 if (test_bit(JME_FLAG_POLL, &jme->flags))
1329                         jme_polling_mode(jme);
1330
1331                 netif_carrier_off(netdev);
1332         }
1333
1334         jme_check_link(netdev, 0);
1335         if (netif_carrier_ok(netdev)) {
1336                 rc = jme_setup_rx_resources(jme);
1337                 if (rc) {
1338                         pr_err("Allocating resources for RX error, Device STOPPED!\n");
1339                         goto out_enable_tasklet;
1340                 }
1341
1342                 rc = jme_setup_tx_resources(jme);
1343                 if (rc) {
1344                         pr_err("Allocating resources for TX error, Device STOPPED!\n");
1345                         goto err_out_free_rx_resources;
1346                 }
1347
1348                 jme_enable_rx_engine(jme);
1349                 jme_enable_tx_engine(jme);
1350
1351                 netif_start_queue(netdev);
1352
1353                 if (test_bit(JME_FLAG_POLL, &jme->flags))
1354                         jme_interrupt_mode(jme);
1355
1356                 jme_start_pcc_timer(jme);
1357         } else if (jme_pseudo_hotplug_enabled(jme)) {
1358                 jme_start_shutdown_timer(jme);
1359         }
1360
1361         goto out_enable_tasklet;
1362
1363 err_out_free_rx_resources:
1364         jme_free_rx_resources(jme);
1365 out_enable_tasklet:
1366         tasklet_enable(&jme->txclean_task);
1367         tasklet_hi_enable(&jme->rxclean_task);
1368         tasklet_hi_enable(&jme->rxempty_task);
1369 out:
1370         atomic_inc(&jme->link_changing);
1371 }
1372
1373 static void
1374 jme_rx_clean_tasklet(unsigned long arg)
1375 {
1376         struct jme_adapter *jme = (struct jme_adapter *)arg;
1377         struct dynpcc_info *dpi = &(jme->dpi);
1378
1379         jme_process_receive(jme, jme->rx_ring_size);
1380         ++(dpi->intr_cnt);
1381
1382 }
1383
1384 static int
1385 jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget))
1386 {
1387         struct jme_adapter *jme = jme_napi_priv(holder);
1388         int rest;
1389
1390         rest = jme_process_receive(jme, JME_NAPI_WEIGHT_VAL(budget));
1391
1392         while (atomic_read(&jme->rx_empty) > 0) {
1393                 atomic_dec(&jme->rx_empty);
1394                 ++(NET_STAT(jme).rx_dropped);
1395                 jme_restart_rx_engine(jme);
1396         }
1397         atomic_inc(&jme->rx_empty);
1398
1399         if (rest) {
1400                 JME_RX_COMPLETE(netdev, holder);
1401                 jme_interrupt_mode(jme);
1402         }
1403
1404         JME_NAPI_WEIGHT_SET(budget, rest);
1405         return JME_NAPI_WEIGHT_VAL(budget) - rest;
1406 }
1407
1408 static void
1409 jme_rx_empty_tasklet(unsigned long arg)
1410 {
1411         struct jme_adapter *jme = (struct jme_adapter *)arg;
1412
1413         if (unlikely(atomic_read(&jme->link_changing) != 1))
1414                 return;
1415
1416         if (unlikely(!netif_carrier_ok(jme->dev)))
1417                 return;
1418
1419         netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n");
1420
1421         jme_rx_clean_tasklet(arg);
1422
1423         while (atomic_read(&jme->rx_empty) > 0) {
1424                 atomic_dec(&jme->rx_empty);
1425                 ++(NET_STAT(jme).rx_dropped);
1426                 jme_restart_rx_engine(jme);
1427         }
1428         atomic_inc(&jme->rx_empty);
1429 }
1430
1431 static void
1432 jme_wake_queue_if_stopped(struct jme_adapter *jme)
1433 {
1434         struct jme_ring *txring = &(jme->txring[0]);
1435
1436         smp_wmb();
1437         if (unlikely(netif_queue_stopped(jme->dev) &&
1438         atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) {
1439                 netif_info(jme, tx_done, jme->dev, "TX Queue Waked\n");
1440                 netif_wake_queue(jme->dev);
1441         }
1442
1443 }
1444
1445 static void
1446 jme_tx_clean_tasklet(unsigned long arg)
1447 {
1448         struct jme_adapter *jme = (struct jme_adapter *)arg;
1449         struct jme_ring *txring = &(jme->txring[0]);
1450         struct txdesc *txdesc = txring->desc;
1451         struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
1452         int i, j, cnt = 0, max, err, mask;
1453
1454         tx_dbg(jme, "Into txclean\n");
1455
1456         if (unlikely(!atomic_dec_and_test(&jme->tx_cleaning)))
1457                 goto out;
1458
1459         if (unlikely(atomic_read(&jme->link_changing) != 1))
1460                 goto out;
1461
1462         if (unlikely(!netif_carrier_ok(jme->dev)))
1463                 goto out;
1464
1465         max = jme->tx_ring_size - atomic_read(&txring->nr_free);
1466         mask = jme->tx_ring_mask;
1467
1468         for (i = atomic_read(&txring->next_to_clean) ; cnt < max ; ) {
1469
1470                 ctxbi = txbi + i;
1471
1472                 if (likely(ctxbi->skb &&
1473                 !(txdesc[i].descwb.flags & TXWBFLAG_OWN))) {
1474
1475                         tx_dbg(jme, "txclean: %d+%d@%lu\n",
1476                                i, ctxbi->nr_desc, jiffies);
1477
1478                         err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR;
1479
1480                         for (j = 1 ; j < ctxbi->nr_desc ; ++j) {
1481                                 ttxbi = txbi + ((i + j) & (mask));
1482                                 txdesc[(i + j) & (mask)].dw[0] = 0;
1483
1484                                 pci_unmap_page(jme->pdev,
1485                                                  ttxbi->mapping,
1486                                                  ttxbi->len,
1487                                                  PCI_DMA_TODEVICE);
1488
1489                                 ttxbi->mapping = 0;
1490                                 ttxbi->len = 0;
1491                         }
1492
1493                         dev_kfree_skb(ctxbi->skb);
1494
1495                         cnt += ctxbi->nr_desc;
1496
1497                         if (unlikely(err)) {
1498                                 ++(NET_STAT(jme).tx_carrier_errors);
1499                         } else {
1500                                 ++(NET_STAT(jme).tx_packets);
1501                                 NET_STAT(jme).tx_bytes += ctxbi->len;
1502                         }
1503
1504                         ctxbi->skb = NULL;
1505                         ctxbi->len = 0;
1506                         ctxbi->start_xmit = 0;
1507
1508                 } else {
1509                         break;
1510                 }
1511
1512                 i = (i + ctxbi->nr_desc) & mask;
1513
1514                 ctxbi->nr_desc = 0;
1515         }
1516
1517         tx_dbg(jme, "txclean: done %d@%lu\n", i, jiffies);
1518         atomic_set(&txring->next_to_clean, i);
1519         atomic_add(cnt, &txring->nr_free);
1520
1521         jme_wake_queue_if_stopped(jme);
1522
1523 out:
1524         atomic_inc(&jme->tx_cleaning);
1525 }
1526
1527 static void
1528 jme_intr_msi(struct jme_adapter *jme, u32 intrstat)
1529 {
1530         /*
1531          * Disable interrupt
1532          */
1533         jwrite32f(jme, JME_IENC, INTR_ENABLE);
1534
1535         if (intrstat & (INTR_LINKCH | INTR_SWINTR)) {
1536                 /*
1537                  * Link change event is critical
1538                  * all other events are ignored
1539                  */
1540                 jwrite32(jme, JME_IEVE, intrstat);
1541                 tasklet_schedule(&jme->linkch_task);
1542                 goto out_reenable;
1543         }
1544
1545         if (intrstat & INTR_TMINTR) {
1546                 jwrite32(jme, JME_IEVE, INTR_TMINTR);
1547                 tasklet_schedule(&jme->pcc_task);
1548         }
1549
1550         if (intrstat & (INTR_PCCTXTO | INTR_PCCTX)) {
1551                 jwrite32(jme, JME_IEVE, INTR_PCCTXTO | INTR_PCCTX | INTR_TX0);
1552                 tasklet_schedule(&jme->txclean_task);
1553         }
1554
1555         if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
1556                 jwrite32(jme, JME_IEVE, (intrstat & (INTR_PCCRX0TO |
1557                                                      INTR_PCCRX0 |
1558                                                      INTR_RX0EMP)) |
1559                                         INTR_RX0);
1560         }
1561
1562         if (test_bit(JME_FLAG_POLL, &jme->flags)) {
1563                 if (intrstat & INTR_RX0EMP)
1564                         atomic_inc(&jme->rx_empty);
1565
1566                 if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
1567                         if (likely(JME_RX_SCHEDULE_PREP(jme))) {
1568                                 jme_polling_mode(jme);
1569                                 JME_RX_SCHEDULE(jme);
1570                         }
1571                 }
1572         } else {
1573                 if (intrstat & INTR_RX0EMP) {
1574                         atomic_inc(&jme->rx_empty);
1575                         tasklet_hi_schedule(&jme->rxempty_task);
1576                 } else if (intrstat & (INTR_PCCRX0TO | INTR_PCCRX0)) {
1577                         tasklet_hi_schedule(&jme->rxclean_task);
1578                 }
1579         }
1580
1581 out_reenable:
1582         /*
1583          * Re-enable interrupt
1584          */
1585         jwrite32f(jme, JME_IENS, INTR_ENABLE);
1586 }
1587
1588 static irqreturn_t
1589 jme_intr(int irq, void *dev_id)
1590 {
1591         struct net_device *netdev = dev_id;
1592         struct jme_adapter *jme = netdev_priv(netdev);
1593         u32 intrstat;
1594
1595         intrstat = jread32(jme, JME_IEVE);
1596
1597         /*
1598          * Check if it's really an interrupt for us
1599          */
1600         if (unlikely((intrstat & INTR_ENABLE) == 0))
1601                 return IRQ_NONE;
1602
1603         /*
1604          * Check if the device still exist
1605          */
1606         if (unlikely(intrstat == ~((typeof(intrstat))0)))
1607                 return IRQ_NONE;
1608
1609         jme_intr_msi(jme, intrstat);
1610
1611         return IRQ_HANDLED;
1612 }
1613
1614 static irqreturn_t
1615 jme_msi(int irq, void *dev_id)
1616 {
1617         struct net_device *netdev = dev_id;
1618         struct jme_adapter *jme = netdev_priv(netdev);
1619         u32 intrstat;
1620
1621         intrstat = jread32(jme, JME_IEVE);
1622
1623         jme_intr_msi(jme, intrstat);
1624
1625         return IRQ_HANDLED;
1626 }
1627
1628 static void
1629 jme_reset_link(struct jme_adapter *jme)
1630 {
1631         jwrite32(jme, JME_TMCSR, TMCSR_SWIT);
1632 }
1633
1634 static void
1635 jme_restart_an(struct jme_adapter *jme)
1636 {
1637         u32 bmcr;
1638
1639         spin_lock_bh(&jme->phy_lock);
1640         bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1641         bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
1642         jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1643         spin_unlock_bh(&jme->phy_lock);
1644 }
1645
1646 static int
1647 jme_request_irq(struct jme_adapter *jme)
1648 {
1649         int rc;
1650         struct net_device *netdev = jme->dev;
1651         irq_handler_t handler = jme_intr;
1652         int irq_flags = IRQF_SHARED;
1653
1654         if (!pci_enable_msi(jme->pdev)) {
1655                 set_bit(JME_FLAG_MSI, &jme->flags);
1656                 handler = jme_msi;
1657                 irq_flags = 0;
1658         }
1659
1660         rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name,
1661                           netdev);
1662         if (rc) {
1663                 netdev_err(netdev,
1664                            "Unable to request %s interrupt (return: %d)\n",
1665                            test_bit(JME_FLAG_MSI, &jme->flags) ? "MSI" : "INTx",
1666                            rc);
1667
1668                 if (test_bit(JME_FLAG_MSI, &jme->flags)) {
1669                         pci_disable_msi(jme->pdev);
1670                         clear_bit(JME_FLAG_MSI, &jme->flags);
1671                 }
1672         } else {
1673                 netdev->irq = jme->pdev->irq;
1674         }
1675
1676         return rc;
1677 }
1678
1679 static void
1680 jme_free_irq(struct jme_adapter *jme)
1681 {
1682         free_irq(jme->pdev->irq, jme->dev);
1683         if (test_bit(JME_FLAG_MSI, &jme->flags)) {
1684                 pci_disable_msi(jme->pdev);
1685                 clear_bit(JME_FLAG_MSI, &jme->flags);
1686                 jme->dev->irq = jme->pdev->irq;
1687         }
1688 }
1689
1690 static inline void
1691 jme_new_phy_on(struct jme_adapter *jme)
1692 {
1693         u32 reg;
1694
1695         reg = jread32(jme, JME_PHY_PWR);
1696         reg &= ~(PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
1697                  PHY_PWR_DWN2 | PHY_PWR_CLKSEL);
1698         jwrite32(jme, JME_PHY_PWR, reg);
1699
1700         pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, &reg);
1701         reg &= ~PE1_GPREG0_PBG;
1702         reg |= PE1_GPREG0_ENBG;
1703         pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
1704 }
1705
1706 static inline void
1707 jme_new_phy_off(struct jme_adapter *jme)
1708 {
1709         u32 reg;
1710
1711         reg = jread32(jme, JME_PHY_PWR);
1712         reg |= PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
1713                PHY_PWR_DWN2 | PHY_PWR_CLKSEL;
1714         jwrite32(jme, JME_PHY_PWR, reg);
1715
1716         pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, &reg);
1717         reg &= ~PE1_GPREG0_PBG;
1718         reg |= PE1_GPREG0_PDD3COLD;
1719         pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
1720 }
1721
1722 static inline void
1723 jme_phy_on(struct jme_adapter *jme)
1724 {
1725         u32 bmcr;
1726
1727         bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1728         bmcr &= ~BMCR_PDOWN;
1729         jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1730
1731         if (new_phy_power_ctrl(jme->chip_main_rev))
1732                 jme_new_phy_on(jme);
1733 }
1734
1735 static inline void
1736 jme_phy_off(struct jme_adapter *jme)
1737 {
1738         u32 bmcr;
1739
1740         bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1741         bmcr |= BMCR_PDOWN;
1742         jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1743
1744         if (new_phy_power_ctrl(jme->chip_main_rev))
1745                 jme_new_phy_off(jme);
1746 }
1747
1748 static int
1749 jme_phy_specreg_read(struct jme_adapter *jme, u32 specreg)
1750 {
1751         u32 phy_addr;
1752
1753         phy_addr = JM_PHY_SPEC_REG_READ | specreg;
1754         jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
1755                         phy_addr);
1756         return jme_mdio_read(jme->dev, jme->mii_if.phy_id,
1757                         JM_PHY_SPEC_DATA_REG);
1758 }
1759
1760 static void
1761 jme_phy_specreg_write(struct jme_adapter *jme, u32 ext_reg, u32 phy_data)
1762 {
1763         u32 phy_addr;
1764
1765         phy_addr = JM_PHY_SPEC_REG_WRITE | ext_reg;
1766         jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_DATA_REG,
1767                         phy_data);
1768         jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
1769                         phy_addr);
1770 }
1771
1772 static int
1773 jme_phy_calibration(struct jme_adapter *jme)
1774 {
1775         u32 ctrl1000, phy_data;
1776
1777         jme_phy_off(jme);
1778         jme_phy_on(jme);
1779         /*  Enabel PHY test mode 1 */
1780         ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
1781         ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
1782         ctrl1000 |= PHY_GAD_TEST_MODE_1;
1783         jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
1784
1785         phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
1786         phy_data &= ~JM_PHY_EXT_COMM_2_CALI_MODE_0;
1787         phy_data |= JM_PHY_EXT_COMM_2_CALI_LATCH |
1788                         JM_PHY_EXT_COMM_2_CALI_ENABLE;
1789         jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
1790         msleep(20);
1791         phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
1792         phy_data &= ~(JM_PHY_EXT_COMM_2_CALI_ENABLE |
1793                         JM_PHY_EXT_COMM_2_CALI_MODE_0 |
1794                         JM_PHY_EXT_COMM_2_CALI_LATCH);
1795         jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
1796
1797         /*  Disable PHY test mode */
1798         ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
1799         ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
1800         jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
1801         return 0;
1802 }
1803
1804 static int
1805 jme_phy_setEA(struct jme_adapter *jme)
1806 {
1807         u32 phy_comm0 = 0, phy_comm1 = 0;
1808         u8 nic_ctrl;
1809
1810         pci_read_config_byte(jme->pdev, PCI_PRIV_SHARE_NICCTRL, &nic_ctrl);
1811         if ((nic_ctrl & 0x3) == JME_FLAG_PHYEA_ENABLE)
1812                 return 0;
1813
1814         switch (jme->pdev->device) {
1815         case PCI_DEVICE_ID_JMICRON_JMC250:
1816                 if (((jme->chip_main_rev == 5) &&
1817                         ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
1818                         (jme->chip_sub_rev == 3))) ||
1819                         (jme->chip_main_rev >= 6)) {
1820                         phy_comm0 = 0x008A;
1821                         phy_comm1 = 0x4109;
1822                 }
1823                 if ((jme->chip_main_rev == 3) &&
1824                         ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
1825                         phy_comm0 = 0xE088;
1826                 break;
1827         case PCI_DEVICE_ID_JMICRON_JMC260:
1828                 if (((jme->chip_main_rev == 5) &&
1829                         ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
1830                         (jme->chip_sub_rev == 3))) ||
1831                         (jme->chip_main_rev >= 6)) {
1832                         phy_comm0 = 0x008A;
1833                         phy_comm1 = 0x4109;
1834                 }
1835                 if ((jme->chip_main_rev == 3) &&
1836                         ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
1837                         phy_comm0 = 0xE088;
1838                 if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 0))
1839                         phy_comm0 = 0x608A;
1840                 if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 2))
1841                         phy_comm0 = 0x408A;
1842                 break;
1843         default:
1844                 return -ENODEV;
1845         }
1846         if (phy_comm0)
1847                 jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_0_REG, phy_comm0);
1848         if (phy_comm1)
1849                 jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_1_REG, phy_comm1);
1850
1851         return 0;
1852 }
1853
1854 static int
1855 jme_open(struct net_device *netdev)
1856 {
1857         struct jme_adapter *jme = netdev_priv(netdev);
1858         int rc;
1859
1860         jme_clear_pm(jme);
1861         JME_NAPI_ENABLE(jme);
1862
1863         tasklet_enable(&jme->linkch_task);
1864         tasklet_enable(&jme->txclean_task);
1865         tasklet_hi_enable(&jme->rxclean_task);
1866         tasklet_hi_enable(&jme->rxempty_task);
1867
1868         rc = jme_request_irq(jme);
1869         if (rc)
1870                 goto err_out;
1871
1872         jme_start_irq(jme);
1873
1874         jme_phy_on(jme);
1875         if (test_bit(JME_FLAG_SSET, &jme->flags))
1876                 jme_set_settings(netdev, &jme->old_ecmd);
1877         else
1878                 jme_reset_phy_processor(jme);
1879         jme_phy_calibration(jme);
1880         jme_phy_setEA(jme);
1881         jme_reset_link(jme);
1882
1883         return 0;
1884
1885 err_out:
1886         netif_stop_queue(netdev);
1887         netif_carrier_off(netdev);
1888         return rc;
1889 }
1890
1891 static void
1892 jme_set_100m_half(struct jme_adapter *jme)
1893 {
1894         u32 bmcr, tmp;
1895
1896         jme_phy_on(jme);
1897         bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1898         tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
1899                        BMCR_SPEED1000 | BMCR_FULLDPLX);
1900         tmp |= BMCR_SPEED100;
1901
1902         if (bmcr != tmp)
1903                 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp);
1904
1905         if (jme->fpgaver)
1906                 jwrite32(jme, JME_GHC, GHC_SPEED_100M | GHC_LINK_POLL);
1907         else
1908                 jwrite32(jme, JME_GHC, GHC_SPEED_100M);
1909 }
1910
1911 #define JME_WAIT_LINK_TIME 2000 /* 2000ms */
1912 static void
1913 jme_wait_link(struct jme_adapter *jme)
1914 {
1915         u32 phylink, to = JME_WAIT_LINK_TIME;
1916
1917         mdelay(1000);
1918         phylink = jme_linkstat_from_phy(jme);
1919         while (!(phylink & PHY_LINK_UP) && (to -= 10) > 0) {
1920                 mdelay(10);
1921                 phylink = jme_linkstat_from_phy(jme);
1922         }
1923 }
1924
1925 static void
1926 jme_powersave_phy(struct jme_adapter *jme)
1927 {
1928         if (jme->reg_pmcs) {
1929                 jme_set_100m_half(jme);
1930                 if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
1931                         jme_wait_link(jme);
1932                 jme_clear_pm(jme);
1933         } else {
1934                 jme_phy_off(jme);
1935         }
1936 }
1937
1938 static int
1939 jme_close(struct net_device *netdev)
1940 {
1941         struct jme_adapter *jme = netdev_priv(netdev);
1942
1943         netif_stop_queue(netdev);
1944         netif_carrier_off(netdev);
1945
1946         jme_stop_irq(jme);
1947         jme_free_irq(jme);
1948
1949         JME_NAPI_DISABLE(jme);
1950
1951         tasklet_disable(&jme->linkch_task);
1952         tasklet_disable(&jme->txclean_task);
1953         tasklet_disable(&jme->rxclean_task);
1954         tasklet_disable(&jme->rxempty_task);
1955
1956         jme_disable_rx_engine(jme);
1957         jme_disable_tx_engine(jme);
1958         jme_reset_mac_processor(jme);
1959         jme_free_rx_resources(jme);
1960         jme_free_tx_resources(jme);
1961         jme->phylink = 0;
1962         jme_phy_off(jme);
1963
1964         return 0;
1965 }
1966
1967 static int
1968 jme_alloc_txdesc(struct jme_adapter *jme,
1969                         struct sk_buff *skb)
1970 {
1971         struct jme_ring *txring = &(jme->txring[0]);
1972         int idx, nr_alloc, mask = jme->tx_ring_mask;
1973
1974         idx = txring->next_to_use;
1975         nr_alloc = skb_shinfo(skb)->nr_frags + 2;
1976
1977         if (unlikely(atomic_read(&txring->nr_free) < nr_alloc))
1978                 return -1;
1979
1980         atomic_sub(nr_alloc, &txring->nr_free);
1981
1982         txring->next_to_use = (txring->next_to_use + nr_alloc) & mask;
1983
1984         return idx;
1985 }
1986
1987 static void
1988 jme_fill_tx_map(struct pci_dev *pdev,
1989                 struct txdesc *txdesc,
1990                 struct jme_buffer_info *txbi,
1991                 struct page *page,
1992                 u32 page_offset,
1993                 u32 len,
1994                 bool hidma)
1995 {
1996         dma_addr_t dmaaddr;
1997
1998         dmaaddr = pci_map_page(pdev,
1999                                 page,
2000                                 page_offset,
2001                                 len,
2002                                 PCI_DMA_TODEVICE);
2003
2004         pci_dma_sync_single_for_device(pdev,
2005                                        dmaaddr,
2006                                        len,
2007                                        PCI_DMA_TODEVICE);
2008
2009         txdesc->dw[0] = 0;
2010         txdesc->dw[1] = 0;
2011         txdesc->desc2.flags     = TXFLAG_OWN;
2012         txdesc->desc2.flags     |= (hidma) ? TXFLAG_64BIT : 0;
2013         txdesc->desc2.datalen   = cpu_to_le16(len);
2014         txdesc->desc2.bufaddrh  = cpu_to_le32((__u64)dmaaddr >> 32);
2015         txdesc->desc2.bufaddrl  = cpu_to_le32(
2016                                         (__u64)dmaaddr & 0xFFFFFFFFUL);
2017
2018         txbi->mapping = dmaaddr;
2019         txbi->len = len;
2020 }
2021
2022 static void
2023 jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2024 {
2025         struct jme_ring *txring = &(jme->txring[0]);
2026         struct txdesc *txdesc = txring->desc, *ctxdesc;
2027         struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
2028         bool hidma = jme->dev->features & NETIF_F_HIGHDMA;
2029         int i, nr_frags = skb_shinfo(skb)->nr_frags;
2030         int mask = jme->tx_ring_mask;
2031         const struct skb_frag_struct *frag;
2032         u32 len;
2033
2034         for (i = 0 ; i < nr_frags ; ++i) {
2035                 frag = &skb_shinfo(skb)->frags[i];
2036                 ctxdesc = txdesc + ((idx + i + 2) & (mask));
2037                 ctxbi = txbi + ((idx + i + 2) & (mask));
2038
2039                 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
2040                                 skb_frag_page(frag),
2041                                 frag->page_offset, skb_frag_size(frag), hidma);
2042         }
2043
2044         len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
2045         ctxdesc = txdesc + ((idx + 1) & (mask));
2046         ctxbi = txbi + ((idx + 1) & (mask));
2047         jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
2048                         offset_in_page(skb->data), len, hidma);
2049
2050 }
2051
2052 static int
2053 jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb)
2054 {
2055         if (unlikely(skb_shinfo(skb)->gso_size &&
2056                         skb_header_cloned(skb) &&
2057                         pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) {
2058                 dev_kfree_skb(skb);
2059                 return -1;
2060         }
2061
2062         return 0;
2063 }
2064
2065 static int
2066 jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
2067 {
2068         *mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT);
2069         if (*mss) {
2070                 *flags |= TXFLAG_LSEN;
2071
2072                 if (skb->protocol == htons(ETH_P_IP)) {
2073                         struct iphdr *iph = ip_hdr(skb);
2074
2075                         iph->check = 0;
2076                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2077                                                                 iph->daddr, 0,
2078                                                                 IPPROTO_TCP,
2079                                                                 0);
2080                 } else {
2081                         struct ipv6hdr *ip6h = ipv6_hdr(skb);
2082
2083                         tcp_hdr(skb)->check = ~csum_ipv6_magic(&ip6h->saddr,
2084                                                                 &ip6h->daddr, 0,
2085                                                                 IPPROTO_TCP,
2086                                                                 0);
2087                 }
2088
2089                 return 0;
2090         }
2091
2092         return 1;
2093 }
2094
2095 static void
2096 jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
2097 {
2098         if (skb->ip_summed == CHECKSUM_PARTIAL) {
2099                 u8 ip_proto;
2100
2101                 switch (skb->protocol) {
2102                 case htons(ETH_P_IP):
2103                         ip_proto = ip_hdr(skb)->protocol;
2104                         break;
2105                 case htons(ETH_P_IPV6):
2106                         ip_proto = ipv6_hdr(skb)->nexthdr;
2107                         break;
2108                 default:
2109                         ip_proto = 0;
2110                         break;
2111                 }
2112
2113                 switch (ip_proto) {
2114                 case IPPROTO_TCP:
2115                         *flags |= TXFLAG_TCPCS;
2116                         break;
2117                 case IPPROTO_UDP:
2118                         *flags |= TXFLAG_UDPCS;
2119                         break;
2120                 default:
2121                         netif_err(jme, tx_err, jme->dev, "Error upper layer protocol\n");
2122                         break;
2123                 }
2124         }
2125 }
2126
2127 static inline void
2128 jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags)
2129 {
2130         if (vlan_tx_tag_present(skb)) {
2131                 *flags |= TXFLAG_TAGON;
2132                 *vlan = cpu_to_le16(vlan_tx_tag_get(skb));
2133         }
2134 }
2135
2136 static int
2137 jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2138 {
2139         struct jme_ring *txring = &(jme->txring[0]);
2140         struct txdesc *txdesc;
2141         struct jme_buffer_info *txbi;
2142         u8 flags;
2143
2144         txdesc = (struct txdesc *)txring->desc + idx;
2145         txbi = txring->bufinf + idx;
2146
2147         txdesc->dw[0] = 0;
2148         txdesc->dw[1] = 0;
2149         txdesc->dw[2] = 0;
2150         txdesc->dw[3] = 0;
2151         txdesc->desc1.pktsize = cpu_to_le16(skb->len);
2152         /*
2153          * Set OWN bit at final.
2154          * When kernel transmit faster than NIC.
2155          * And NIC trying to send this descriptor before we tell
2156          * it to start sending this TX queue.
2157          * Other fields are already filled correctly.
2158          */
2159         wmb();
2160         flags = TXFLAG_OWN | TXFLAG_INT;
2161         /*
2162          * Set checksum flags while not tso
2163          */
2164         if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
2165                 jme_tx_csum(jme, skb, &flags);
2166         jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
2167         jme_map_tx_skb(jme, skb, idx);
2168         txdesc->desc1.flags = flags;
2169         /*
2170          * Set tx buffer info after telling NIC to send
2171          * For better tx_clean timing
2172          */
2173         wmb();
2174         txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2;
2175         txbi->skb = skb;
2176         txbi->len = skb->len;
2177         txbi->start_xmit = jiffies;
2178         if (!txbi->start_xmit)
2179                 txbi->start_xmit = (0UL-1);
2180
2181         return 0;
2182 }
2183
2184 static void
2185 jme_stop_queue_if_full(struct jme_adapter *jme)
2186 {
2187         struct jme_ring *txring = &(jme->txring[0]);
2188         struct jme_buffer_info *txbi = txring->bufinf;
2189         int idx = atomic_read(&txring->next_to_clean);
2190
2191         txbi += idx;
2192
2193         smp_wmb();
2194         if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) {
2195                 netif_stop_queue(jme->dev);
2196                 netif_info(jme, tx_queued, jme->dev, "TX Queue Paused\n");
2197                 smp_wmb();
2198                 if (atomic_read(&txring->nr_free)
2199                         >= (jme->tx_wake_threshold)) {
2200                         netif_wake_queue(jme->dev);
2201                         netif_info(jme, tx_queued, jme->dev, "TX Queue Fast Waked\n");
2202                 }
2203         }
2204
2205         if (unlikely(txbi->start_xmit &&
2206                         (jiffies - txbi->start_xmit) >= TX_TIMEOUT &&
2207                         txbi->skb)) {
2208                 netif_stop_queue(jme->dev);
2209                 netif_info(jme, tx_queued, jme->dev,
2210                            "TX Queue Stopped %d@%lu\n", idx, jiffies);
2211         }
2212 }
2213
2214 /*
2215  * This function is already protected by netif_tx_lock()
2216  */
2217
2218 static netdev_tx_t
2219 jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2220 {
2221         struct jme_adapter *jme = netdev_priv(netdev);
2222         int idx;
2223
2224         if (unlikely(jme_expand_header(jme, skb))) {
2225                 ++(NET_STAT(jme).tx_dropped);
2226                 return NETDEV_TX_OK;
2227         }
2228
2229         idx = jme_alloc_txdesc(jme, skb);
2230
2231         if (unlikely(idx < 0)) {
2232                 netif_stop_queue(netdev);
2233                 netif_err(jme, tx_err, jme->dev,
2234                           "BUG! Tx ring full when queue awake!\n");
2235
2236                 return NETDEV_TX_BUSY;
2237         }
2238
2239         jme_fill_tx_desc(jme, skb, idx);
2240
2241         jwrite32(jme, JME_TXCS, jme->reg_txcs |
2242                                 TXCS_SELECT_QUEUE0 |
2243                                 TXCS_QUEUE0S |
2244                                 TXCS_ENABLE);
2245
2246         tx_dbg(jme, "xmit: %d+%d@%lu\n",
2247                idx, skb_shinfo(skb)->nr_frags + 2, jiffies);
2248         jme_stop_queue_if_full(jme);
2249
2250         return NETDEV_TX_OK;
2251 }
2252
2253 static void
2254 jme_set_unicastaddr(struct net_device *netdev)
2255 {
2256         struct jme_adapter *jme = netdev_priv(netdev);
2257         u32 val;
2258
2259         val = (netdev->dev_addr[3] & 0xff) << 24 |
2260               (netdev->dev_addr[2] & 0xff) << 16 |
2261               (netdev->dev_addr[1] & 0xff) <<  8 |
2262               (netdev->dev_addr[0] & 0xff);
2263         jwrite32(jme, JME_RXUMA_LO, val);
2264         val = (netdev->dev_addr[5] & 0xff) << 8 |
2265               (netdev->dev_addr[4] & 0xff);
2266         jwrite32(jme, JME_RXUMA_HI, val);
2267 }
2268
2269 static int
2270 jme_set_macaddr(struct net_device *netdev, void *p)
2271 {
2272         struct jme_adapter *jme = netdev_priv(netdev);
2273         struct sockaddr *addr = p;
2274
2275         if (netif_running(netdev))
2276                 return -EBUSY;
2277
2278         spin_lock_bh(&jme->macaddr_lock);
2279         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2280         jme_set_unicastaddr(netdev);
2281         spin_unlock_bh(&jme->macaddr_lock);
2282
2283         return 0;
2284 }
2285
2286 static void
2287 jme_set_multi(struct net_device *netdev)
2288 {
2289         struct jme_adapter *jme = netdev_priv(netdev);
2290         u32 mc_hash[2] = {};
2291
2292         spin_lock_bh(&jme->rxmcs_lock);
2293
2294         jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME;
2295
2296         if (netdev->flags & IFF_PROMISC) {
2297                 jme->reg_rxmcs |= RXMCS_ALLFRAME;
2298         } else if (netdev->flags & IFF_ALLMULTI) {
2299                 jme->reg_rxmcs |= RXMCS_ALLMULFRAME;
2300         } else if (netdev->flags & IFF_MULTICAST) {
2301                 struct netdev_hw_addr *ha;
2302                 int bit_nr;
2303
2304                 jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
2305                 netdev_for_each_mc_addr(ha, netdev) {
2306                         bit_nr = ether_crc(ETH_ALEN, ha->addr) & 0x3F;
2307                         mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
2308                 }
2309
2310                 jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]);
2311                 jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]);
2312         }
2313
2314         wmb();
2315         jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2316
2317         spin_unlock_bh(&jme->rxmcs_lock);
2318 }
2319
2320 static int
2321 jme_change_mtu(struct net_device *netdev, int new_mtu)
2322 {
2323         struct jme_adapter *jme = netdev_priv(netdev);
2324
2325         if (new_mtu == jme->old_mtu)
2326                 return 0;
2327
2328         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
2329                 ((new_mtu) < IPV6_MIN_MTU))
2330                 return -EINVAL;
2331
2332
2333         netdev->mtu = new_mtu;
2334         netdev_update_features(netdev);
2335
2336         jme_restart_rx_engine(jme);
2337         jme_reset_link(jme);
2338
2339         return 0;
2340 }
2341
2342 static void
2343 jme_tx_timeout(struct net_device *netdev)
2344 {
2345         struct jme_adapter *jme = netdev_priv(netdev);
2346
2347         jme->phylink = 0;
2348         jme_reset_phy_processor(jme);
2349         if (test_bit(JME_FLAG_SSET, &jme->flags))
2350                 jme_set_settings(netdev, &jme->old_ecmd);
2351
2352         /*
2353          * Force to Reset the link again
2354          */
2355         jme_reset_link(jme);
2356 }
2357
2358 static inline void jme_pause_rx(struct jme_adapter *jme)
2359 {
2360         atomic_dec(&jme->link_changing);
2361
2362         jme_set_rx_pcc(jme, PCC_OFF);
2363         if (test_bit(JME_FLAG_POLL, &jme->flags)) {
2364                 JME_NAPI_DISABLE(jme);
2365         } else {
2366                 tasklet_disable(&jme->rxclean_task);
2367                 tasklet_disable(&jme->rxempty_task);
2368         }
2369 }
2370
2371 static inline void jme_resume_rx(struct jme_adapter *jme)
2372 {
2373         struct dynpcc_info *dpi = &(jme->dpi);
2374
2375         if (test_bit(JME_FLAG_POLL, &jme->flags)) {
2376                 JME_NAPI_ENABLE(jme);
2377         } else {
2378                 tasklet_hi_enable(&jme->rxclean_task);
2379                 tasklet_hi_enable(&jme->rxempty_task);
2380         }
2381         dpi->cur                = PCC_P1;
2382         dpi->attempt            = PCC_P1;
2383         dpi->cnt                = 0;
2384         jme_set_rx_pcc(jme, PCC_P1);
2385
2386         atomic_inc(&jme->link_changing);
2387 }
2388
2389 static void
2390 jme_get_drvinfo(struct net_device *netdev,
2391                      struct ethtool_drvinfo *info)
2392 {
2393         struct jme_adapter *jme = netdev_priv(netdev);
2394
2395         strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2396         strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2397         strlcpy(info->bus_info, pci_name(jme->pdev), sizeof(info->bus_info));
2398 }
2399
2400 static int
2401 jme_get_regs_len(struct net_device *netdev)
2402 {
2403         return JME_REG_LEN;
2404 }
2405
2406 static void
2407 mmapio_memcpy(struct jme_adapter *jme, u32 *p, u32 reg, int len)
2408 {
2409         int i;
2410
2411         for (i = 0 ; i < len ; i += 4)
2412                 p[i >> 2] = jread32(jme, reg + i);
2413 }
2414
2415 static void
2416 mdio_memcpy(struct jme_adapter *jme, u32 *p, int reg_nr)
2417 {
2418         int i;
2419         u16 *p16 = (u16 *)p;
2420
2421         for (i = 0 ; i < reg_nr ; ++i)
2422                 p16[i] = jme_mdio_read(jme->dev, jme->mii_if.phy_id, i);
2423 }
2424
2425 static void
2426 jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
2427 {
2428         struct jme_adapter *jme = netdev_priv(netdev);
2429         u32 *p32 = (u32 *)p;
2430
2431         memset(p, 0xFF, JME_REG_LEN);
2432
2433         regs->version = 1;
2434         mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN);
2435
2436         p32 += 0x100 >> 2;
2437         mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN);
2438
2439         p32 += 0x100 >> 2;
2440         mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN);
2441
2442         p32 += 0x100 >> 2;
2443         mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN);
2444
2445         p32 += 0x100 >> 2;
2446         mdio_memcpy(jme, p32, JME_PHY_REG_NR);
2447 }
2448
2449 static int
2450 jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
2451 {
2452         struct jme_adapter *jme = netdev_priv(netdev);
2453
2454         ecmd->tx_coalesce_usecs = PCC_TX_TO;
2455         ecmd->tx_max_coalesced_frames = PCC_TX_CNT;
2456
2457         if (test_bit(JME_FLAG_POLL, &jme->flags)) {
2458                 ecmd->use_adaptive_rx_coalesce = false;
2459                 ecmd->rx_coalesce_usecs = 0;
2460                 ecmd->rx_max_coalesced_frames = 0;
2461                 return 0;
2462         }
2463
2464         ecmd->use_adaptive_rx_coalesce = true;
2465
2466         switch (jme->dpi.cur) {
2467         case PCC_P1:
2468                 ecmd->rx_coalesce_usecs = PCC_P1_TO;
2469                 ecmd->rx_max_coalesced_frames = PCC_P1_CNT;
2470                 break;
2471         case PCC_P2:
2472                 ecmd->rx_coalesce_usecs = PCC_P2_TO;
2473                 ecmd->rx_max_coalesced_frames = PCC_P2_CNT;
2474                 break;
2475         case PCC_P3:
2476                 ecmd->rx_coalesce_usecs = PCC_P3_TO;
2477                 ecmd->rx_max_coalesced_frames = PCC_P3_CNT;
2478                 break;
2479         default:
2480                 break;
2481         }
2482
2483         return 0;
2484 }
2485
2486 static int
2487 jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
2488 {
2489         struct jme_adapter *jme = netdev_priv(netdev);
2490         struct dynpcc_info *dpi = &(jme->dpi);
2491
2492         if (netif_running(netdev))
2493                 return -EBUSY;
2494
2495         if (ecmd->use_adaptive_rx_coalesce &&
2496             test_bit(JME_FLAG_POLL, &jme->flags)) {
2497                 clear_bit(JME_FLAG_POLL, &jme->flags);
2498                 jme->jme_rx = netif_rx;
2499                 dpi->cur                = PCC_P1;
2500                 dpi->attempt            = PCC_P1;
2501                 dpi->cnt                = 0;
2502                 jme_set_rx_pcc(jme, PCC_P1);
2503                 jme_interrupt_mode(jme);
2504         } else if (!(ecmd->use_adaptive_rx_coalesce) &&
2505                    !(test_bit(JME_FLAG_POLL, &jme->flags))) {
2506                 set_bit(JME_FLAG_POLL, &jme->flags);
2507                 jme->jme_rx = netif_receive_skb;
2508                 jme_interrupt_mode(jme);
2509         }
2510
2511         return 0;
2512 }
2513
2514 static void
2515 jme_get_pauseparam(struct net_device *netdev,
2516                         struct ethtool_pauseparam *ecmd)
2517 {
2518         struct jme_adapter *jme = netdev_priv(netdev);
2519         u32 val;
2520
2521         ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0;
2522         ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0;
2523
2524         spin_lock_bh(&jme->phy_lock);
2525         val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
2526         spin_unlock_bh(&jme->phy_lock);
2527
2528         ecmd->autoneg =
2529                 (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
2530 }
2531
2532 static int
2533 jme_set_pauseparam(struct net_device *netdev,
2534                         struct ethtool_pauseparam *ecmd)
2535 {
2536         struct jme_adapter *jme = netdev_priv(netdev);
2537         u32 val;
2538
2539         if (((jme->reg_txpfc & TXPFC_PF_EN) != 0) ^
2540                 (ecmd->tx_pause != 0)) {
2541
2542                 if (ecmd->tx_pause)
2543                         jme->reg_txpfc |= TXPFC_PF_EN;
2544                 else
2545                         jme->reg_txpfc &= ~TXPFC_PF_EN;
2546
2547                 jwrite32(jme, JME_TXPFC, jme->reg_txpfc);
2548         }
2549
2550         spin_lock_bh(&jme->rxmcs_lock);
2551         if (((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) ^
2552                 (ecmd->rx_pause != 0)) {
2553
2554                 if (ecmd->rx_pause)
2555                         jme->reg_rxmcs |= RXMCS_FLOWCTRL;
2556                 else
2557                         jme->reg_rxmcs &= ~RXMCS_FLOWCTRL;
2558
2559                 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2560         }
2561         spin_unlock_bh(&jme->rxmcs_lock);
2562
2563         spin_lock_bh(&jme->phy_lock);
2564         val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
2565         if (((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) ^
2566                 (ecmd->autoneg != 0)) {
2567
2568                 if (ecmd->autoneg)
2569                         val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2570                 else
2571                         val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2572
2573                 jme_mdio_write(jme->dev, jme->mii_if.phy_id,
2574                                 MII_ADVERTISE, val);
2575         }
2576         spin_unlock_bh(&jme->phy_lock);
2577
2578         return 0;
2579 }
2580
2581 static void
2582 jme_get_wol(struct net_device *netdev,
2583                 struct ethtool_wolinfo *wol)
2584 {
2585         struct jme_adapter *jme = netdev_priv(netdev);
2586
2587         wol->supported = WAKE_MAGIC | WAKE_PHY;
2588
2589         wol->wolopts = 0;
2590
2591         if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
2592                 wol->wolopts |= WAKE_PHY;
2593
2594         if (jme->reg_pmcs & PMCS_MFEN)
2595                 wol->wolopts |= WAKE_MAGIC;
2596
2597 }
2598
2599 static int
2600 jme_set_wol(struct net_device *netdev,
2601                 struct ethtool_wolinfo *wol)
2602 {
2603         struct jme_adapter *jme = netdev_priv(netdev);
2604
2605         if (wol->wolopts & (WAKE_MAGICSECURE |
2606                                 WAKE_UCAST |
2607                                 WAKE_MCAST |
2608                                 WAKE_BCAST |
2609                                 WAKE_ARP))
2610                 return -EOPNOTSUPP;
2611
2612         jme->reg_pmcs = 0;
2613
2614         if (wol->wolopts & WAKE_PHY)
2615                 jme->reg_pmcs |= PMCS_LFEN | PMCS_LREN;
2616
2617         if (wol->wolopts & WAKE_MAGIC)
2618                 jme->reg_pmcs |= PMCS_MFEN;
2619
2620         jwrite32(jme, JME_PMCS, jme->reg_pmcs);
2621         device_set_wakeup_enable(&jme->pdev->dev, !!(jme->reg_pmcs));
2622
2623         return 0;
2624 }
2625
2626 static int
2627 jme_get_settings(struct net_device *netdev,
2628                      struct ethtool_cmd *ecmd)
2629 {
2630         struct jme_adapter *jme = netdev_priv(netdev);
2631         int rc;
2632
2633         spin_lock_bh(&jme->phy_lock);
2634         rc = mii_ethtool_gset(&(jme->mii_if), ecmd);
2635         spin_unlock_bh(&jme->phy_lock);
2636         return rc;
2637 }
2638
2639 static int
2640 jme_set_settings(struct net_device *netdev,
2641                      struct ethtool_cmd *ecmd)
2642 {
2643         struct jme_adapter *jme = netdev_priv(netdev);
2644         int rc, fdc = 0;
2645
2646         if (ethtool_cmd_speed(ecmd) == SPEED_1000
2647             && ecmd->autoneg != AUTONEG_ENABLE)
2648                 return -EINVAL;
2649
2650         /*
2651          * Check If user changed duplex only while force_media.
2652          * Hardware would not generate link change interrupt.
2653          */
2654         if (jme->mii_if.force_media &&
2655         ecmd->autoneg != AUTONEG_ENABLE &&
2656         (jme->mii_if.full_duplex != ecmd->duplex))
2657                 fdc = 1;
2658
2659         spin_lock_bh(&jme->phy_lock);
2660         rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
2661         spin_unlock_bh(&jme->phy_lock);
2662
2663         if (!rc) {
2664                 if (fdc)
2665                         jme_reset_link(jme);
2666                 jme->old_ecmd = *ecmd;
2667                 set_bit(JME_FLAG_SSET, &jme->flags);
2668         }
2669
2670         return rc;
2671 }
2672
2673 static int
2674 jme_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
2675 {
2676         int rc;
2677         struct jme_adapter *jme = netdev_priv(netdev);
2678         struct mii_ioctl_data *mii_data = if_mii(rq);
2679         unsigned int duplex_chg;
2680
2681         if (cmd == SIOCSMIIREG) {
2682                 u16 val = mii_data->val_in;
2683                 if (!(val & (BMCR_RESET|BMCR_ANENABLE)) &&
2684                     (val & BMCR_SPEED1000))
2685                         return -EINVAL;
2686         }
2687
2688         spin_lock_bh(&jme->phy_lock);
2689         rc = generic_mii_ioctl(&jme->mii_if, mii_data, cmd, &duplex_chg);
2690         spin_unlock_bh(&jme->phy_lock);
2691
2692         if (!rc && (cmd == SIOCSMIIREG)) {
2693                 if (duplex_chg)
2694                         jme_reset_link(jme);
2695                 jme_get_settings(netdev, &jme->old_ecmd);
2696                 set_bit(JME_FLAG_SSET, &jme->flags);
2697         }
2698
2699         return rc;
2700 }
2701
2702 static u32
2703 jme_get_link(struct net_device *netdev)
2704 {
2705         struct jme_adapter *jme = netdev_priv(netdev);
2706         return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP;
2707 }
2708
2709 static u32
2710 jme_get_msglevel(struct net_device *netdev)
2711 {
2712         struct jme_adapter *jme = netdev_priv(netdev);
2713         return jme->msg_enable;
2714 }
2715
2716 static void
2717 jme_set_msglevel(struct net_device *netdev, u32 value)
2718 {
2719         struct jme_adapter *jme = netdev_priv(netdev);
2720         jme->msg_enable = value;
2721 }
2722
2723 static netdev_features_t
2724 jme_fix_features(struct net_device *netdev, netdev_features_t features)
2725 {
2726         if (netdev->mtu > 1900)
2727                 features &= ~(NETIF_F_ALL_TSO | NETIF_F_ALL_CSUM);
2728         return features;
2729 }
2730
2731 static int
2732 jme_set_features(struct net_device *netdev, netdev_features_t features)
2733 {
2734         struct jme_adapter *jme = netdev_priv(netdev);
2735
2736         spin_lock_bh(&jme->rxmcs_lock);
2737         if (features & NETIF_F_RXCSUM)
2738                 jme->reg_rxmcs |= RXMCS_CHECKSUM;
2739         else
2740                 jme->reg_rxmcs &= ~RXMCS_CHECKSUM;
2741         jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2742         spin_unlock_bh(&jme->rxmcs_lock);
2743
2744         return 0;
2745 }
2746
2747 #ifdef CONFIG_NET_POLL_CONTROLLER
2748 static void jme_netpoll(struct net_device *dev)
2749 {
2750         unsigned long flags;
2751
2752         local_irq_save(flags);
2753         jme_intr(dev->irq, dev);
2754         local_irq_restore(flags);
2755 }
2756 #endif
2757
2758 static int
2759 jme_nway_reset(struct net_device *netdev)
2760 {
2761         struct jme_adapter *jme = netdev_priv(netdev);
2762         jme_restart_an(jme);
2763         return 0;
2764 }
2765
2766 static u8
2767 jme_smb_read(struct jme_adapter *jme, unsigned int addr)
2768 {
2769         u32 val;
2770         int to;
2771
2772         val = jread32(jme, JME_SMBCSR);
2773         to = JME_SMB_BUSY_TIMEOUT;
2774         while ((val & SMBCSR_BUSY) && --to) {
2775                 msleep(1);
2776                 val = jread32(jme, JME_SMBCSR);
2777         }
2778         if (!to) {
2779                 netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
2780                 return 0xFF;
2781         }
2782
2783         jwrite32(jme, JME_SMBINTF,
2784                 ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) |
2785                 SMBINTF_HWRWN_READ |
2786                 SMBINTF_HWCMD);
2787
2788         val = jread32(jme, JME_SMBINTF);
2789         to = JME_SMB_BUSY_TIMEOUT;
2790         while ((val & SMBINTF_HWCMD) && --to) {
2791                 msleep(1);
2792                 val = jread32(jme, JME_SMBINTF);
2793         }
2794         if (!to) {
2795                 netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
2796                 return 0xFF;
2797         }
2798
2799         return (val & SMBINTF_HWDATR) >> SMBINTF_HWDATR_SHIFT;
2800 }
2801
2802 static void
2803 jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data)
2804 {
2805         u32 val;
2806         int to;
2807
2808         val = jread32(jme, JME_SMBCSR);
2809         to = JME_SMB_BUSY_TIMEOUT;
2810         while ((val & SMBCSR_BUSY) && --to) {
2811                 msleep(1);
2812                 val = jread32(jme, JME_SMBCSR);
2813         }
2814         if (!to) {
2815                 netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
2816                 return;
2817         }
2818
2819         jwrite32(jme, JME_SMBINTF,
2820                 ((data << SMBINTF_HWDATW_SHIFT) & SMBINTF_HWDATW) |
2821                 ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) |
2822                 SMBINTF_HWRWN_WRITE |
2823                 SMBINTF_HWCMD);
2824
2825         val = jread32(jme, JME_SMBINTF);
2826         to = JME_SMB_BUSY_TIMEOUT;
2827         while ((val & SMBINTF_HWCMD) && --to) {
2828                 msleep(1);
2829                 val = jread32(jme, JME_SMBINTF);
2830         }
2831         if (!to) {
2832                 netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
2833                 return;
2834         }
2835
2836         mdelay(2);
2837 }
2838
2839 static int
2840 jme_get_eeprom_len(struct net_device *netdev)
2841 {
2842         struct jme_adapter *jme = netdev_priv(netdev);
2843         u32 val;
2844         val = jread32(jme, JME_SMBCSR);
2845         return (val & SMBCSR_EEPROMD) ? JME_SMB_LEN : 0;
2846 }
2847
2848 static int
2849 jme_get_eeprom(struct net_device *netdev,
2850                 struct ethtool_eeprom *eeprom, u8 *data)
2851 {
2852         struct jme_adapter *jme = netdev_priv(netdev);
2853         int i, offset = eeprom->offset, len = eeprom->len;
2854
2855         /*
2856          * ethtool will check the boundary for us
2857          */
2858         eeprom->magic = JME_EEPROM_MAGIC;
2859         for (i = 0 ; i < len ; ++i)
2860                 data[i] = jme_smb_read(jme, i + offset);
2861
2862         return 0;
2863 }
2864
2865 static int
2866 jme_set_eeprom(struct net_device *netdev,
2867                 struct ethtool_eeprom *eeprom, u8 *data)
2868 {
2869         struct jme_adapter *jme = netdev_priv(netdev);
2870         int i, offset = eeprom->offset, len = eeprom->len;
2871
2872         if (eeprom->magic != JME_EEPROM_MAGIC)
2873                 return -EINVAL;
2874
2875         /*
2876          * ethtool will check the boundary for us
2877          */
2878         for (i = 0 ; i < len ; ++i)
2879                 jme_smb_write(jme, i + offset, data[i]);
2880
2881         return 0;
2882 }
2883
2884 static const struct ethtool_ops jme_ethtool_ops = {
2885         .get_drvinfo            = jme_get_drvinfo,
2886         .get_regs_len           = jme_get_regs_len,
2887         .get_regs               = jme_get_regs,
2888         .get_coalesce           = jme_get_coalesce,
2889         .set_coalesce           = jme_set_coalesce,
2890         .get_pauseparam         = jme_get_pauseparam,
2891         .set_pauseparam         = jme_set_pauseparam,
2892         .get_wol                = jme_get_wol,
2893         .set_wol                = jme_set_wol,
2894         .get_settings           = jme_get_settings,
2895         .set_settings           = jme_set_settings,
2896         .get_link               = jme_get_link,
2897         .get_msglevel           = jme_get_msglevel,
2898         .set_msglevel           = jme_set_msglevel,
2899         .nway_reset             = jme_nway_reset,
2900         .get_eeprom_len         = jme_get_eeprom_len,
2901         .get_eeprom             = jme_get_eeprom,
2902         .set_eeprom             = jme_set_eeprom,
2903 };
2904
2905 static int
2906 jme_pci_dma64(struct pci_dev *pdev)
2907 {
2908         if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 &&
2909             !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
2910                 if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2911                         return 1;
2912
2913         if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 &&
2914             !pci_set_dma_mask(pdev, DMA_BIT_MASK(40)))
2915                 if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)))
2916                         return 1;
2917
2918         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
2919                 if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
2920                         return 0;
2921
2922         return -1;
2923 }
2924
2925 static inline void
2926 jme_phy_init(struct jme_adapter *jme)
2927 {
2928         u16 reg26;
2929
2930         reg26 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 26);
2931         jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, reg26 | 0x1000);
2932 }
2933
2934 static inline void
2935 jme_check_hw_ver(struct jme_adapter *jme)
2936 {
2937         u32 chipmode;
2938
2939         chipmode = jread32(jme, JME_CHIPMODE);
2940
2941         jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT;
2942         jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT;
2943         jme->chip_main_rev = jme->chiprev & 0xF;
2944         jme->chip_sub_rev = (jme->chiprev >> 4) & 0xF;
2945 }
2946
2947 static const struct net_device_ops jme_netdev_ops = {
2948         .ndo_open               = jme_open,
2949         .ndo_stop               = jme_close,
2950         .ndo_validate_addr      = eth_validate_addr,
2951         .ndo_do_ioctl           = jme_ioctl,
2952         .ndo_start_xmit         = jme_start_xmit,
2953         .ndo_set_mac_address    = jme_set_macaddr,
2954         .ndo_set_rx_mode        = jme_set_multi,
2955         .ndo_change_mtu         = jme_change_mtu,
2956         .ndo_tx_timeout         = jme_tx_timeout,
2957         .ndo_fix_features       = jme_fix_features,
2958         .ndo_set_features       = jme_set_features,
2959 #ifdef CONFIG_NET_POLL_CONTROLLER
2960         .ndo_poll_controller    = jme_netpoll,
2961 #endif
2962 };
2963
2964 static int __devinit
2965 jme_init_one(struct pci_dev *pdev,
2966              const struct pci_device_id *ent)
2967 {
2968         int rc = 0, using_dac, i;
2969         struct net_device *netdev;
2970         struct jme_adapter *jme;
2971         u16 bmcr, bmsr;
2972         u32 apmc;
2973
2974         /*
2975          * set up PCI device basics
2976          */
2977         pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
2978                                PCIE_LINK_STATE_CLKPM);
2979
2980         rc = pci_enable_device(pdev);
2981         if (rc) {
2982                 pr_err("Cannot enable PCI device\n");
2983                 goto err_out;
2984         }
2985
2986         using_dac = jme_pci_dma64(pdev);
2987         if (using_dac < 0) {
2988                 pr_err("Cannot set PCI DMA Mask\n");
2989                 rc = -EIO;
2990                 goto err_out_disable_pdev;
2991         }
2992
2993         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2994                 pr_err("No PCI resource region found\n");
2995                 rc = -ENOMEM;
2996                 goto err_out_disable_pdev;
2997         }
2998
2999         rc = pci_request_regions(pdev, DRV_NAME);
3000         if (rc) {
3001                 pr_err("Cannot obtain PCI resource region\n");
3002                 goto err_out_disable_pdev;
3003         }
3004
3005         pci_set_master(pdev);
3006
3007         /*
3008          * alloc and init net device
3009          */
3010         netdev = alloc_etherdev(sizeof(*jme));
3011         if (!netdev) {
3012                 rc = -ENOMEM;
3013                 goto err_out_release_regions;
3014         }
3015         netdev->netdev_ops = &jme_netdev_ops;
3016         netdev->ethtool_ops             = &jme_ethtool_ops;
3017         netdev->watchdog_timeo          = TX_TIMEOUT;
3018         netdev->hw_features             =       NETIF_F_IP_CSUM |
3019                                                 NETIF_F_IPV6_CSUM |
3020                                                 NETIF_F_SG |
3021                                                 NETIF_F_TSO |
3022                                                 NETIF_F_TSO6 |
3023                                                 NETIF_F_RXCSUM;
3024         netdev->features                =       NETIF_F_IP_CSUM |
3025                                                 NETIF_F_IPV6_CSUM |
3026                                                 NETIF_F_SG |
3027                                                 NETIF_F_TSO |
3028                                                 NETIF_F_TSO6 |
3029                                                 NETIF_F_HW_VLAN_TX |
3030                                                 NETIF_F_HW_VLAN_RX;
3031         if (using_dac)
3032                 netdev->features        |=      NETIF_F_HIGHDMA;
3033
3034         SET_NETDEV_DEV(netdev, &pdev->dev);
3035         pci_set_drvdata(pdev, netdev);
3036
3037         /*
3038          * init adapter info
3039          */
3040         jme = netdev_priv(netdev);
3041         jme->pdev = pdev;
3042         jme->dev = netdev;
3043         jme->jme_rx = netif_rx;
3044         jme->old_mtu = netdev->mtu = 1500;
3045         jme->phylink = 0;
3046         jme->tx_ring_size = 1 << 10;
3047         jme->tx_ring_mask = jme->tx_ring_size - 1;
3048         jme->tx_wake_threshold = 1 << 9;
3049         jme->rx_ring_size = 1 << 9;
3050         jme->rx_ring_mask = jme->rx_ring_size - 1;
3051         jme->msg_enable = JME_DEF_MSG_ENABLE;
3052         jme->regs = ioremap(pci_resource_start(pdev, 0),
3053                              pci_resource_len(pdev, 0));
3054         if (!(jme->regs)) {
3055                 pr_err("Mapping PCI resource region error\n");
3056                 rc = -ENOMEM;
3057                 goto err_out_free_netdev;
3058         }
3059
3060         if (no_pseudohp) {
3061                 apmc = jread32(jme, JME_APMC) & ~JME_APMC_PSEUDO_HP_EN;
3062                 jwrite32(jme, JME_APMC, apmc);
3063         } else if (force_pseudohp) {
3064                 apmc = jread32(jme, JME_APMC) | JME_APMC_PSEUDO_HP_EN;
3065                 jwrite32(jme, JME_APMC, apmc);
3066         }
3067
3068         NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2)
3069
3070         spin_lock_init(&jme->phy_lock);
3071         spin_lock_init(&jme->macaddr_lock);
3072         spin_lock_init(&jme->rxmcs_lock);
3073
3074         atomic_set(&jme->link_changing, 1);
3075         atomic_set(&jme->rx_cleaning, 1);
3076         atomic_set(&jme->tx_cleaning, 1);
3077         atomic_set(&jme->rx_empty, 1);
3078
3079         tasklet_init(&jme->pcc_task,
3080                      jme_pcc_tasklet,
3081                      (unsigned long) jme);
3082         tasklet_init(&jme->linkch_task,
3083                      jme_link_change_tasklet,
3084                      (unsigned long) jme);
3085         tasklet_init(&jme->txclean_task,
3086                      jme_tx_clean_tasklet,
3087                      (unsigned long) jme);
3088         tasklet_init(&jme->rxclean_task,
3089                      jme_rx_clean_tasklet,
3090                      (unsigned long) jme);
3091         tasklet_init(&jme->rxempty_task,
3092                      jme_rx_empty_tasklet,
3093                      (unsigned long) jme);
3094         tasklet_disable_nosync(&jme->linkch_task);
3095         tasklet_disable_nosync(&jme->txclean_task);
3096         tasklet_disable_nosync(&jme->rxclean_task);
3097         tasklet_disable_nosync(&jme->rxempty_task);
3098         jme->dpi.cur = PCC_P1;
3099
3100         jme->reg_ghc = 0;
3101         jme->reg_rxcs = RXCS_DEFAULT;
3102         jme->reg_rxmcs = RXMCS_DEFAULT;
3103         jme->reg_txpfc = 0;
3104         jme->reg_pmcs = PMCS_MFEN;
3105         jme->reg_gpreg1 = GPREG1_DEFAULT;
3106
3107         if (jme->reg_rxmcs & RXMCS_CHECKSUM)
3108                 netdev->features |= NETIF_F_RXCSUM;
3109
3110         /*
3111          * Get Max Read Req Size from PCI Config Space
3112          */
3113         pci_read_config_byte(pdev, PCI_DCSR_MRRS, &jme->mrrs);
3114         jme->mrrs &= PCI_DCSR_MRRS_MASK;
3115         switch (jme->mrrs) {
3116         case MRRS_128B:
3117                 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B;
3118                 break;
3119         case MRRS_256B:
3120                 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B;
3121                 break;
3122         default:
3123                 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
3124                 break;
3125         }
3126
3127         /*
3128          * Must check before reset_mac_processor
3129          */
3130         jme_check_hw_ver(jme);
3131         jme->mii_if.dev = netdev;
3132         if (jme->fpgaver) {
3133                 jme->mii_if.phy_id = 0;
3134                 for (i = 1 ; i < 32 ; ++i) {
3135                         bmcr = jme_mdio_read(netdev, i, MII_BMCR);
3136                         bmsr = jme_mdio_read(netdev, i, MII_BMSR);
3137                         if (bmcr != 0xFFFFU && (bmcr != 0 || bmsr != 0)) {
3138                                 jme->mii_if.phy_id = i;
3139                                 break;
3140                         }
3141                 }
3142
3143                 if (!jme->mii_if.phy_id) {
3144                         rc = -EIO;
3145                         pr_err("Can not find phy_id\n");
3146                         goto err_out_unmap;
3147                 }
3148
3149                 jme->reg_ghc |= GHC_LINK_POLL;
3150         } else {
3151                 jme->mii_if.phy_id = 1;
3152         }
3153         if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250)
3154                 jme->mii_if.supports_gmii = true;
3155         else
3156                 jme->mii_if.supports_gmii = false;
3157         jme->mii_if.phy_id_mask = 0x1F;
3158         jme->mii_if.reg_num_mask = 0x1F;
3159         jme->mii_if.mdio_read = jme_mdio_read;
3160         jme->mii_if.mdio_write = jme_mdio_write;
3161
3162         jme_clear_pm(jme);
3163         pci_set_power_state(jme->pdev, PCI_D0);
3164         device_set_wakeup_enable(&pdev->dev, true);
3165
3166         jme_set_phyfifo_5level(jme);
3167         jme->pcirev = pdev->revision;
3168         if (!jme->fpgaver)
3169                 jme_phy_init(jme);
3170         jme_phy_off(jme);
3171
3172         /*
3173          * Reset MAC processor and reload EEPROM for MAC Address
3174          */
3175         jme_reset_mac_processor(jme);
3176         rc = jme_reload_eeprom(jme);
3177         if (rc) {
3178                 pr_err("Reload eeprom for reading MAC Address error\n");
3179                 goto err_out_unmap;
3180         }
3181         jme_load_macaddr(netdev);
3182
3183         /*
3184          * Tell stack that we are not ready to work until open()
3185          */
3186         netif_carrier_off(netdev);
3187
3188         rc = register_netdev(netdev);
3189         if (rc) {
3190                 pr_err("Cannot register net device\n");
3191                 goto err_out_unmap;
3192         }
3193
3194         netif_info(jme, probe, jme->dev, "%s%s chiprev:%x pcirev:%x macaddr:%pM\n",
3195                    (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ?
3196                    "JMC250 Gigabit Ethernet" :
3197                    (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ?
3198                    "JMC260 Fast Ethernet" : "Unknown",
3199                    (jme->fpgaver != 0) ? " (FPGA)" : "",
3200                    (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
3201                    jme->pcirev, netdev->dev_addr);
3202
3203         return 0;
3204
3205 err_out_unmap:
3206         iounmap(jme->regs);
3207 err_out_free_netdev:
3208         pci_set_drvdata(pdev, NULL);
3209         free_netdev(netdev);
3210 err_out_release_regions:
3211         pci_release_regions(pdev);
3212 err_out_disable_pdev:
3213         pci_disable_device(pdev);
3214 err_out:
3215         return rc;
3216 }
3217
3218 static void __devexit
3219 jme_remove_one(struct pci_dev *pdev)
3220 {
3221         struct net_device *netdev = pci_get_drvdata(pdev);
3222     &nbs