2 * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
4 * Copyright 2008 JMicron Technology Corporation
5 * http://www.jmicron.com/
6 * Copyright (c) 2009 - 2010 Guo-Fu Tseng <cooldavid@cooldavid.org>
8 * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27 #include <linux/module.h>
28 #include <linux/kernel.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/ethtool.h>
33 #include <linux/mii.h>
34 #include <linux/crc32.h>
35 #include <linux/delay.h>
36 #include <linux/spinlock.h>
39 #include <linux/ipv6.h>
40 #include <linux/tcp.h>
41 #include <linux/udp.h>
42 #include <linux/if_vlan.h>
43 #include <linux/slab.h>
44 #include <net/ip6_checksum.h>
47 static int force_pseudohp = -1;
48 static int no_pseudohp = -1;
49 static int no_extplug = -1;
50 module_param(force_pseudohp, int, 0);
51 MODULE_PARM_DESC(force_pseudohp,
52 "Enable pseudo hot-plug feature manually by driver instead of BIOS.");
53 module_param(no_pseudohp, int, 0);
54 MODULE_PARM_DESC(no_pseudohp, "Disable pseudo hot-plug feature.");
55 module_param(no_extplug, int, 0);
56 MODULE_PARM_DESC(no_extplug,
57 "Do not use external plug signal for pseudo hot-plug.");
60 jme_mdio_read(struct net_device *netdev, int phy, int reg)
62 struct jme_adapter *jme = netdev_priv(netdev);
63 int i, val, again = (reg == MII_BMSR) ? 1 : 0;
66 jwrite32(jme, JME_SMI, SMI_OP_REQ |
71 for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
73 val = jread32(jme, JME_SMI);
74 if ((val & SMI_OP_REQ) == 0)
79 pr_err("phy(%d) read timeout : %d\n", phy, reg);
86 return (val & SMI_DATA_MASK) >> SMI_DATA_SHIFT;
90 jme_mdio_write(struct net_device *netdev,
91 int phy, int reg, int val)
93 struct jme_adapter *jme = netdev_priv(netdev);
96 jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ |
97 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
98 smi_phy_addr(phy) | smi_reg_addr(reg));
101 for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
103 if ((jread32(jme, JME_SMI) & SMI_OP_REQ) == 0)
108 pr_err("phy(%d) write timeout : %d\n", phy, reg);
112 jme_reset_phy_processor(struct jme_adapter *jme)
116 jme_mdio_write(jme->dev,
118 MII_ADVERTISE, ADVERTISE_ALL |
119 ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
121 if (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250)
122 jme_mdio_write(jme->dev,
125 ADVERTISE_1000FULL | ADVERTISE_1000HALF);
127 val = jme_mdio_read(jme->dev,
131 jme_mdio_write(jme->dev,
133 MII_BMCR, val | BMCR_RESET);
137 jme_setup_wakeup_frame(struct jme_adapter *jme,
138 const u32 *mask, u32 crc, int fnr)
145 jwrite32(jme, JME_WFOI, WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL));
147 jwrite32(jme, JME_WFODP, crc);
153 for (i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) {
154 jwrite32(jme, JME_WFOI,
155 ((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) |
156 (fnr & WFOI_FRAME_SEL));
158 jwrite32(jme, JME_WFODP, mask[i]);
164 jme_mac_rxclk_off(struct jme_adapter *jme)
166 jme->reg_gpreg1 |= GPREG1_RXCLKOFF;
167 jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
171 jme_mac_rxclk_on(struct jme_adapter *jme)
173 jme->reg_gpreg1 &= ~GPREG1_RXCLKOFF;
174 jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
178 jme_mac_txclk_off(struct jme_adapter *jme)
180 jme->reg_ghc &= ~(GHC_TO_CLK_SRC | GHC_TXMAC_CLK_SRC);
181 jwrite32f(jme, JME_GHC, jme->reg_ghc);
185 jme_mac_txclk_on(struct jme_adapter *jme)
187 u32 speed = jme->reg_ghc & GHC_SPEED;
188 if (speed == GHC_SPEED_1000M)
189 jme->reg_ghc |= GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY;
191 jme->reg_ghc |= GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
192 jwrite32f(jme, JME_GHC, jme->reg_ghc);
196 jme_reset_ghc_speed(struct jme_adapter *jme)
198 jme->reg_ghc &= ~(GHC_SPEED | GHC_DPX);
199 jwrite32f(jme, JME_GHC, jme->reg_ghc);
203 jme_reset_250A2_workaround(struct jme_adapter *jme)
205 jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
207 jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
211 jme_assert_ghc_reset(struct jme_adapter *jme)
213 jme->reg_ghc |= GHC_SWRST;
214 jwrite32f(jme, JME_GHC, jme->reg_ghc);
218 jme_clear_ghc_reset(struct jme_adapter *jme)
220 jme->reg_ghc &= ~GHC_SWRST;
221 jwrite32f(jme, JME_GHC, jme->reg_ghc);
225 jme_reset_mac_processor(struct jme_adapter *jme)
227 static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0};
228 u32 crc = 0xCDCDCDCD;
232 jme_reset_ghc_speed(jme);
233 jme_reset_250A2_workaround(jme);
235 jme_mac_rxclk_on(jme);
236 jme_mac_txclk_on(jme);
238 jme_assert_ghc_reset(jme);
240 jme_mac_rxclk_off(jme);
241 jme_mac_txclk_off(jme);
243 jme_clear_ghc_reset(jme);
245 jme_mac_rxclk_on(jme);
246 jme_mac_txclk_on(jme);
248 jme_mac_rxclk_off(jme);
249 jme_mac_txclk_off(jme);
251 jwrite32(jme, JME_RXDBA_LO, 0x00000000);
252 jwrite32(jme, JME_RXDBA_HI, 0x00000000);
253 jwrite32(jme, JME_RXQDC, 0x00000000);
254 jwrite32(jme, JME_RXNDA, 0x00000000);
255 jwrite32(jme, JME_TXDBA_LO, 0x00000000);
256 jwrite32(jme, JME_TXDBA_HI, 0x00000000);
257 jwrite32(jme, JME_TXQDC, 0x00000000);
258 jwrite32(jme, JME_TXNDA, 0x00000000);
260 jwrite32(jme, JME_RXMCHT_LO, 0x00000000);
261 jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
262 for (i = 0 ; i < WAKEUP_FRAME_NR ; ++i)
263 jme_setup_wakeup_frame(jme, mask, crc, i);
265 gpreg0 = GPREG0_DEFAULT | GPREG0_LNKINTPOLL;
267 gpreg0 = GPREG0_DEFAULT;
268 jwrite32(jme, JME_GPREG0, gpreg0);
272 jme_clear_pm_enable_wol(struct jme_adapter *jme)
274 jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs);
278 jme_clear_pm_disable_wol(struct jme_adapter *jme)
280 jwrite32(jme, JME_PMCS, PMCS_STMASK);
284 jme_reload_eeprom(struct jme_adapter *jme)
289 val = jread32(jme, JME_SMBCSR);
291 if (val & SMBCSR_EEPROMD) {
293 jwrite32(jme, JME_SMBCSR, val);
294 val |= SMBCSR_RELOAD;
295 jwrite32(jme, JME_SMBCSR, val);
298 for (i = JME_EEPROM_RELOAD_TIMEOUT; i > 0; --i) {
300 if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0)
305 pr_err("eeprom reload timeout\n");
314 jme_load_macaddr(struct net_device *netdev)
316 struct jme_adapter *jme = netdev_priv(netdev);
317 unsigned char macaddr[6];
320 spin_lock_bh(&jme->macaddr_lock);
321 val = jread32(jme, JME_RXUMA_LO);
322 macaddr[0] = (val >> 0) & 0xFF;
323 macaddr[1] = (val >> 8) & 0xFF;
324 macaddr[2] = (val >> 16) & 0xFF;
325 macaddr[3] = (val >> 24) & 0xFF;
326 val = jread32(jme, JME_RXUMA_HI);
327 macaddr[4] = (val >> 0) & 0xFF;
328 macaddr[5] = (val >> 8) & 0xFF;
329 memcpy(netdev->dev_addr, macaddr, 6);
330 spin_unlock_bh(&jme->macaddr_lock);
334 jme_set_rx_pcc(struct jme_adapter *jme, int p)
338 jwrite32(jme, JME_PCCRX0,
339 ((PCC_OFF_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
340 ((PCC_OFF_CNT << PCCRX_SHIFT) & PCCRX_MASK));
343 jwrite32(jme, JME_PCCRX0,
344 ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
345 ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK));
348 jwrite32(jme, JME_PCCRX0,
349 ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
350 ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK));
353 jwrite32(jme, JME_PCCRX0,
354 ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
355 ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK));
362 if (!(test_bit(JME_FLAG_POLL, &jme->flags)))
363 netif_info(jme, rx_status, jme->dev, "Switched to PCC_P%d\n", p);
367 jme_start_irq(struct jme_adapter *jme)
369 register struct dynpcc_info *dpi = &(jme->dpi);
371 jme_set_rx_pcc(jme, PCC_P1);
373 dpi->attempt = PCC_P1;
376 jwrite32(jme, JME_PCCTX,
377 ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) |
378 ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) |
385 jwrite32(jme, JME_IENS, INTR_ENABLE);
389 jme_stop_irq(struct jme_adapter *jme)
394 jwrite32f(jme, JME_IENC, INTR_ENABLE);
398 jme_linkstat_from_phy(struct jme_adapter *jme)
402 phylink = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 17);
403 bmsr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMSR);
404 if (bmsr & BMSR_ANCOMP)
405 phylink |= PHY_LINK_AUTONEG_COMPLETE;
411 jme_set_phyfifo_5level(struct jme_adapter *jme)
413 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004);
417 jme_set_phyfifo_8level(struct jme_adapter *jme)
419 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000);
423 jme_check_link(struct net_device *netdev, int testonly)
425 struct jme_adapter *jme = netdev_priv(netdev);
426 u32 phylink, cnt = JME_SPDRSV_TIMEOUT, bmcr;
433 phylink = jme_linkstat_from_phy(jme);
435 phylink = jread32(jme, JME_PHY_LINK);
437 if (phylink & PHY_LINK_UP) {
438 if (!(phylink & PHY_LINK_AUTONEG_COMPLETE)) {
440 * If we did not enable AN
441 * Speed/Duplex Info should be obtained from SMI
443 phylink = PHY_LINK_UP;
445 bmcr = jme_mdio_read(jme->dev,
449 phylink |= ((bmcr & BMCR_SPEED1000) &&
450 (bmcr & BMCR_SPEED100) == 0) ?
451 PHY_LINK_SPEED_1000M :
452 (bmcr & BMCR_SPEED100) ?
453 PHY_LINK_SPEED_100M :
456 phylink |= (bmcr & BMCR_FULLDPLX) ?
459 strcat(linkmsg, "Forced: ");
462 * Keep polling for speed/duplex resolve complete
464 while (!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) &&
470 phylink = jme_linkstat_from_phy(jme);
472 phylink = jread32(jme, JME_PHY_LINK);
475 pr_err("Waiting speed resolve timeout\n");
477 strcat(linkmsg, "ANed: ");
480 if (jme->phylink == phylink) {
487 jme->phylink = phylink;
490 * The speed/duplex setting of jme->reg_ghc already cleared
491 * by jme_reset_mac_processor()
493 switch (phylink & PHY_LINK_SPEED_MASK) {
494 case PHY_LINK_SPEED_10M:
495 jme->reg_ghc |= GHC_SPEED_10M;
496 strcat(linkmsg, "10 Mbps, ");
498 case PHY_LINK_SPEED_100M:
499 jme->reg_ghc |= GHC_SPEED_100M;
500 strcat(linkmsg, "100 Mbps, ");
502 case PHY_LINK_SPEED_1000M:
503 jme->reg_ghc |= GHC_SPEED_1000M;
504 strcat(linkmsg, "1000 Mbps, ");
510 if (phylink & PHY_LINK_DUPLEX) {
511 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
512 jwrite32(jme, JME_TXTRHD, TXTRHD_FULLDUPLEX);
513 jme->reg_ghc |= GHC_DPX;
515 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
519 jwrite32(jme, JME_TXTRHD, TXTRHD_HALFDUPLEX);
522 jwrite32(jme, JME_GHC, jme->reg_ghc);
524 if (is_buggy250(jme->pdev->device, jme->chiprev)) {
525 jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
527 if (!(phylink & PHY_LINK_DUPLEX))
528 jme->reg_gpreg1 |= GPREG1_HALFMODEPATCH;
529 switch (phylink & PHY_LINK_SPEED_MASK) {
530 case PHY_LINK_SPEED_10M:
531 jme_set_phyfifo_8level(jme);
532 jme->reg_gpreg1 |= GPREG1_RSSPATCH;
534 case PHY_LINK_SPEED_100M:
535 jme_set_phyfifo_5level(jme);
536 jme->reg_gpreg1 |= GPREG1_RSSPATCH;
538 case PHY_LINK_SPEED_1000M:
539 jme_set_phyfifo_8level(jme);
545 jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
547 strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ?
550 strcat(linkmsg, (phylink & PHY_LINK_MDI_STAT) ?
553 netif_info(jme, link, jme->dev, "Link is up at %s\n", linkmsg);
554 netif_carrier_on(netdev);
559 netif_info(jme, link, jme->dev, "Link is down\n");
561 netif_carrier_off(netdev);
569 jme_setup_tx_resources(struct jme_adapter *jme)
571 struct jme_ring *txring = &(jme->txring[0]);
573 txring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
574 TX_RING_ALLOC_SIZE(jme->tx_ring_size),
584 txring->desc = (void *)ALIGN((unsigned long)(txring->alloc),
586 txring->dma = ALIGN(txring->dmaalloc, RING_DESC_ALIGN);
587 txring->next_to_use = 0;
588 atomic_set(&txring->next_to_clean, 0);
589 atomic_set(&txring->nr_free, jme->tx_ring_size);
591 txring->bufinf = kmalloc(sizeof(struct jme_buffer_info) *
592 jme->tx_ring_size, GFP_ATOMIC);
593 if (unlikely(!(txring->bufinf)))
594 goto err_free_txring;
597 * Initialize Transmit Descriptors
599 memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size));
600 memset(txring->bufinf, 0,
601 sizeof(struct jme_buffer_info) * jme->tx_ring_size);
606 dma_free_coherent(&(jme->pdev->dev),
607 TX_RING_ALLOC_SIZE(jme->tx_ring_size),
613 txring->dmaalloc = 0;
615 txring->bufinf = NULL;
621 jme_free_tx_resources(struct jme_adapter *jme)
624 struct jme_ring *txring = &(jme->txring[0]);
625 struct jme_buffer_info *txbi;
628 if (txring->bufinf) {
629 for (i = 0 ; i < jme->tx_ring_size ; ++i) {
630 txbi = txring->bufinf + i;
632 dev_kfree_skb(txbi->skb);
638 txbi->start_xmit = 0;
640 kfree(txring->bufinf);
643 dma_free_coherent(&(jme->pdev->dev),
644 TX_RING_ALLOC_SIZE(jme->tx_ring_size),
648 txring->alloc = NULL;
650 txring->dmaalloc = 0;
652 txring->bufinf = NULL;
654 txring->next_to_use = 0;
655 atomic_set(&txring->next_to_clean, 0);
656 atomic_set(&txring->nr_free, 0);
660 jme_enable_tx_engine(struct jme_adapter *jme)
665 jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0);
669 * Setup TX Queue 0 DMA Bass Address
671 jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
672 jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32);
673 jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
676 * Setup TX Descptor Count
678 jwrite32(jme, JME_TXQDC, jme->tx_ring_size);
684 jwrite32f(jme, JME_TXCS, jme->reg_txcs |
689 * Start clock for TX MAC Processor
691 jme_mac_txclk_on(jme);
695 jme_restart_tx_engine(struct jme_adapter *jme)
700 jwrite32(jme, JME_TXCS, jme->reg_txcs |
706 jme_disable_tx_engine(struct jme_adapter *jme)
714 jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0);
717 val = jread32(jme, JME_TXCS);
718 for (i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i) {
720 val = jread32(jme, JME_TXCS);
725 pr_err("Disable TX engine timeout\n");
728 * Stop clock for TX MAC Processor
730 jme_mac_txclk_off(jme);
734 jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
736 struct jme_ring *rxring = &(jme->rxring[0]);
737 register struct rxdesc *rxdesc = rxring->desc;
738 struct jme_buffer_info *rxbi = rxring->bufinf;
744 rxdesc->desc1.bufaddrh = cpu_to_le32((__u64)rxbi->mapping >> 32);
745 rxdesc->desc1.bufaddrl = cpu_to_le32(
746 (__u64)rxbi->mapping & 0xFFFFFFFFUL);
747 rxdesc->desc1.datalen = cpu_to_le16(rxbi->len);
748 if (jme->dev->features & NETIF_F_HIGHDMA)
749 rxdesc->desc1.flags = RXFLAG_64BIT;
751 rxdesc->desc1.flags |= RXFLAG_OWN | RXFLAG_INT;
755 jme_make_new_rx_buf(struct jme_adapter *jme, int i)
757 struct jme_ring *rxring = &(jme->rxring[0]);
758 struct jme_buffer_info *rxbi = rxring->bufinf + i;
762 skb = netdev_alloc_skb(jme->dev,
763 jme->dev->mtu + RX_EXTRA_LEN);
767 mapping = pci_map_page(jme->pdev, virt_to_page(skb->data),
768 offset_in_page(skb->data), skb_tailroom(skb),
770 if (unlikely(pci_dma_mapping_error(jme->pdev, mapping))) {
775 if (likely(rxbi->mapping))
776 pci_unmap_page(jme->pdev, rxbi->mapping,
777 rxbi->len, PCI_DMA_FROMDEVICE);
780 rxbi->len = skb_tailroom(skb);
781 rxbi->mapping = mapping;
786 jme_free_rx_buf(struct jme_adapter *jme, int i)
788 struct jme_ring *rxring = &(jme->rxring[0]);
789 struct jme_buffer_info *rxbi = rxring->bufinf;
793 pci_unmap_page(jme->pdev,
797 dev_kfree_skb(rxbi->skb);
805 jme_free_rx_resources(struct jme_adapter *jme)
808 struct jme_ring *rxring = &(jme->rxring[0]);
811 if (rxring->bufinf) {
812 for (i = 0 ; i < jme->rx_ring_size ; ++i)
813 jme_free_rx_buf(jme, i);
814 kfree(rxring->bufinf);
817 dma_free_coherent(&(jme->pdev->dev),
818 RX_RING_ALLOC_SIZE(jme->rx_ring_size),
821 rxring->alloc = NULL;
823 rxring->dmaalloc = 0;
825 rxring->bufinf = NULL;
827 rxring->next_to_use = 0;
828 atomic_set(&rxring->next_to_clean, 0);
832 jme_setup_rx_resources(struct jme_adapter *jme)
835 struct jme_ring *rxring = &(jme->rxring[0]);
837 rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
838 RX_RING_ALLOC_SIZE(jme->rx_ring_size),
847 rxring->desc = (void *)ALIGN((unsigned long)(rxring->alloc),
849 rxring->dma = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN);
850 rxring->next_to_use = 0;
851 atomic_set(&rxring->next_to_clean, 0);
853 rxring->bufinf = kmalloc(sizeof(struct jme_buffer_info) *
854 jme->rx_ring_size, GFP_ATOMIC);
855 if (unlikely(!(rxring->bufinf)))
856 goto err_free_rxring;
859 * Initiallize Receive Descriptors
861 memset(rxring->bufinf, 0,
862 sizeof(struct jme_buffer_info) * jme->rx_ring_size);
863 for (i = 0 ; i < jme->rx_ring_size ; ++i) {
864 if (unlikely(jme_make_new_rx_buf(jme, i))) {
865 jme_free_rx_resources(jme);
869 jme_set_clean_rxdesc(jme, i);
875 dma_free_coherent(&(jme->pdev->dev),
876 RX_RING_ALLOC_SIZE(jme->rx_ring_size),
881 rxring->dmaalloc = 0;
883 rxring->bufinf = NULL;
889 jme_enable_rx_engine(struct jme_adapter *jme)
894 jwrite32(jme, JME_RXCS, jme->reg_rxcs |
899 * Setup RX DMA Bass Address
901 jwrite32(jme, JME_RXDBA_LO, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL);
902 jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32);
903 jwrite32(jme, JME_RXNDA, (__u64)(jme->rxring[0].dma) & 0xFFFFFFFFUL);
906 * Setup RX Descriptor Count
908 jwrite32(jme, JME_RXQDC, jme->rx_ring_size);
911 * Setup Unicast Filter
913 jme_set_unicastaddr(jme->dev);
914 jme_set_multi(jme->dev);
920 jwrite32f(jme, JME_RXCS, jme->reg_rxcs |
926 * Start clock for RX MAC Processor
928 jme_mac_rxclk_on(jme);
932 jme_restart_rx_engine(struct jme_adapter *jme)
937 jwrite32(jme, JME_RXCS, jme->reg_rxcs |
944 jme_disable_rx_engine(struct jme_adapter *jme)
952 jwrite32(jme, JME_RXCS, jme->reg_rxcs);
955 val = jread32(jme, JME_RXCS);
956 for (i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i) {
958 val = jread32(jme, JME_RXCS);
963 pr_err("Disable RX engine timeout\n");
966 * Stop clock for RX MAC Processor
968 jme_mac_rxclk_off(jme);
972 jme_udpsum(struct sk_buff *skb)
976 if (skb->len < (ETH_HLEN + sizeof(struct iphdr)))
978 if (skb->protocol != htons(ETH_P_IP))
980 skb_set_network_header(skb, ETH_HLEN);
981 if ((ip_hdr(skb)->protocol != IPPROTO_UDP) ||
982 (skb->len < (ETH_HLEN +
983 (ip_hdr(skb)->ihl << 2) +
984 sizeof(struct udphdr)))) {
985 skb_reset_network_header(skb);
988 skb_set_transport_header(skb,
989 ETH_HLEN + (ip_hdr(skb)->ihl << 2));
990 csum = udp_hdr(skb)->check;
991 skb_reset_transport_header(skb);
992 skb_reset_network_header(skb);
998 jme_rxsum_ok(struct jme_adapter *jme, u16 flags, struct sk_buff *skb)
1000 if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4)))
1003 if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_TCPON | RXWBFLAG_TCPCS))
1004 == RXWBFLAG_TCPON)) {
1005 if (flags & RXWBFLAG_IPV4)
1006 netif_err(jme, rx_err, jme->dev, "TCP Checksum error\n");
1010 if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS))
1011 == RXWBFLAG_UDPON) && jme_udpsum(skb)) {
1012 if (flags & RXWBFLAG_IPV4)
1013 netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n");
1017 if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS))
1018 == RXWBFLAG_IPV4)) {
1019 netif_err(jme, rx_err, jme->dev, "IPv4 Checksum error\n");
1027 jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
1029 struct jme_ring *rxring = &(jme->rxring[0]);
1030 struct rxdesc *rxdesc = rxring->desc;
1031 struct jme_buffer_info *rxbi = rxring->bufinf;
1032 struct sk_buff *skb;
1039 pci_dma_sync_single_for_cpu(jme->pdev,
1042 PCI_DMA_FROMDEVICE);
1044 if (unlikely(jme_make_new_rx_buf(jme, idx))) {
1045 pci_dma_sync_single_for_device(jme->pdev,
1048 PCI_DMA_FROMDEVICE);
1050 ++(NET_STAT(jme).rx_dropped);
1052 framesize = le16_to_cpu(rxdesc->descwb.framesize)
1055 skb_reserve(skb, RX_PREPAD_SIZE);
1056 skb_put(skb, framesize);
1057 skb->protocol = eth_type_trans(skb, jme->dev);
1059 if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags), skb))
1060 skb->ip_summed = CHECKSUM_UNNECESSARY;
1062 skb_checksum_none_assert(skb);
1064 if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) {
1065 u16 vid = le16_to_cpu(rxdesc->descwb.vlan);
1067 __vlan_hwaccel_put_tag(skb, vid);
1068 NET_STAT(jme).rx_bytes += 4;
1072 if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_DEST)) ==
1073 cpu_to_le16(RXWBFLAG_DEST_MUL))
1074 ++(NET_STAT(jme).multicast);
1076 NET_STAT(jme).rx_bytes += framesize;
1077 ++(NET_STAT(jme).rx_packets);
1080 jme_set_clean_rxdesc(jme, idx);
1085 jme_process_receive(struct jme_adapter *jme, int limit)
1087 struct jme_ring *rxring = &(jme->rxring[0]);
1088 struct rxdesc *rxdesc = rxring->desc;
1089 int i, j, ccnt, desccnt, mask = jme->rx_ring_mask;
1091 if (unlikely(!atomic_dec_and_test(&jme->rx_cleaning)))
1094 if (unlikely(atomic_read(&jme->link_changing) != 1))
1097 if (unlikely(!netif_carrier_ok(jme->dev)))
1100 i = atomic_read(&rxring->next_to_clean);
1102 rxdesc = rxring->desc;
1105 if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) ||
1106 !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
1111 desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
1113 if (unlikely(desccnt > 1 ||
1114 rxdesc->descwb.errstat & RXWBERR_ALLERR)) {
1116 if (rxdesc->descwb.errstat & RXWBERR_CRCERR)
1117 ++(NET_STAT(jme).rx_crc_errors);
1118 else if (rxdesc->descwb.errstat & RXWBERR_OVERUN)
1119 ++(NET_STAT(jme).rx_fifo_errors);
1121 ++(NET_STAT(jme).rx_errors);
1124 limit -= desccnt - 1;
1126 for (j = i, ccnt = desccnt ; ccnt-- ; ) {
1127 jme_set_clean_rxdesc(jme, j);
1128 j = (j + 1) & (mask);
1132 jme_alloc_and_feed_skb(jme, i);
1135 i = (i + desccnt) & (mask);
1139 atomic_set(&rxring->next_to_clean, i);
1142 atomic_inc(&jme->rx_cleaning);
1144 return limit > 0 ? limit : 0;
1149 jme_attempt_pcc(struct dynpcc_info *dpi, int atmp)
1151 if (likely(atmp == dpi->cur)) {
1156 if (dpi->attempt == atmp) {
1159 dpi->attempt = atmp;
1166 jme_dynamic_pcc(struct jme_adapter *jme)
1168 register struct dynpcc_info *dpi = &(jme->dpi);
1170 if ((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD)
1171 jme_attempt_pcc(dpi, PCC_P3);
1172 else if ((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD ||
1173 dpi->intr_cnt > PCC_INTR_THRESHOLD)
1174 jme_attempt_pcc(dpi, PCC_P2);
1176 jme_attempt_pcc(dpi, PCC_P1);
1178 if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
1179 if (dpi->attempt < dpi->cur)
1180 tasklet_schedule(&jme->rxclean_task);
1181 jme_set_rx_pcc(jme, dpi->attempt);
1182 dpi->cur = dpi->attempt;
1188 jme_start_pcc_timer(struct jme_adapter *jme)
1190 struct dynpcc_info *dpi = &(jme->dpi);
1191 dpi->last_bytes = NET_STAT(jme).rx_bytes;
1192 dpi->last_pkts = NET_STAT(jme).rx_packets;
1194 jwrite32(jme, JME_TMCSR,
1195 TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT));
1199 jme_stop_pcc_timer(struct jme_adapter *jme)
1201 jwrite32(jme, JME_TMCSR, 0);
1205 jme_shutdown_nic(struct jme_adapter *jme)
1209 phylink = jme_linkstat_from_phy(jme);
1211 if (!(phylink & PHY_LINK_UP)) {
1213 * Disable all interrupt before issue timer
1216 jwrite32(jme, JME_TIMER2, TMCSR_EN | 0xFFFFFE);
1221 jme_pcc_tasklet(unsigned long arg)
1223 struct jme_adapter *jme = (struct jme_adapter *)arg;
1224 struct net_device *netdev = jme->dev;
1226 if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) {
1227 jme_shutdown_nic(jme);
1231 if (unlikely(!netif_carrier_ok(netdev) ||
1232 (atomic_read(&jme->link_changing) != 1)
1234 jme_stop_pcc_timer(jme);
1238 if (!(test_bit(JME_FLAG_POLL, &jme->flags)))
1239 jme_dynamic_pcc(jme);
1241 jme_start_pcc_timer(jme);
1245 jme_polling_mode(struct jme_adapter *jme)
1247 jme_set_rx_pcc(jme, PCC_OFF);
1251 jme_interrupt_mode(struct jme_adapter *jme)
1253 jme_set_rx_pcc(jme, PCC_P1);
1257 jme_pseudo_hotplug_enabled(struct jme_adapter *jme)
1260 apmc = jread32(jme, JME_APMC);
1261 return apmc & JME_APMC_PSEUDO_HP_EN;
1265 jme_start_shutdown_timer(struct jme_adapter *jme)
1269 apmc = jread32(jme, JME_APMC) | JME_APMC_PCIE_SD_EN;
1270 apmc &= ~JME_APMC_EPIEN_CTRL;
1272 jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_EN);
1275 jwrite32f(jme, JME_APMC, apmc);
1277 jwrite32f(jme, JME_TIMER2, 0);
1278 set_bit(JME_FLAG_SHUTDOWN, &jme->flags);
1279 jwrite32(jme, JME_TMCSR,
1280 TMCSR_EN | ((0xFFFFFF - APMC_PHP_SHUTDOWN_DELAY) & TMCSR_CNT));
1284 jme_stop_shutdown_timer(struct jme_adapter *jme)
1288 jwrite32f(jme, JME_TMCSR, 0);
1289 jwrite32f(jme, JME_TIMER2, 0);
1290 clear_bit(JME_FLAG_SHUTDOWN, &jme->flags);
1292 apmc = jread32(jme, JME_APMC);
1293 apmc &= ~(JME_APMC_PCIE_SD_EN | JME_APMC_EPIEN_CTRL);
1294 jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_DIS);
1296 jwrite32f(jme, JME_APMC, apmc);
1300 jme_link_change_tasklet(unsigned long arg)
1302 struct jme_adapter *jme = (struct jme_adapter *)arg;
1303 struct net_device *netdev = jme->dev;
1306 while (!atomic_dec_and_test(&jme->link_changing)) {
1307 atomic_inc(&jme->link_changing);
1308 netif_info(jme, intr, jme->dev, "Get link change lock failed\n");
1309 while (atomic_read(&jme->link_changing) != 1)
1310 netif_info(jme, intr, jme->dev, "Waiting link change lock\n");
1313 if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu)
1316 jme->old_mtu = netdev->mtu;
1317 netif_stop_queue(netdev);
1318 if (jme_pseudo_hotplug_enabled(jme))
1319 jme_stop_shutdown_timer(jme);
1321 jme_stop_pcc_timer(jme);
1322 tasklet_disable(&jme->txclean_task);
1323 tasklet_disable(&jme->rxclean_task);
1324 tasklet_disable(&jme->rxempty_task);
1326 if (netif_carrier_ok(netdev)) {
1327 jme_disable_rx_engine(jme);
1328 jme_disable_tx_engine(jme);
1329 jme_reset_mac_processor(jme);
1330 jme_free_rx_resources(jme);
1331 jme_free_tx_resources(jme);
1333 if (test_bit(JME_FLAG_POLL, &jme->flags))
1334 jme_polling_mode(jme);
1336 netif_carrier_off(netdev);
1339 jme_check_link(netdev, 0);
1340 if (netif_carrier_ok(netdev)) {
1341 rc = jme_setup_rx_resources(jme);
1343 pr_err("Allocating resources for RX error, Device STOPPED!\n");
1344 goto out_enable_tasklet;
1347 rc = jme_setup_tx_resources(jme);
1349 pr_err("Allocating resources for TX error, Device STOPPED!\n");
1350 goto err_out_free_rx_resources;
1353 jme_enable_rx_engine(jme);
1354 jme_enable_tx_engine(jme);
1356 netif_start_queue(netdev);
1358 if (test_bit(JME_FLAG_POLL, &jme->flags))
1359 jme_interrupt_mode(jme);
1361 jme_start_pcc_timer(jme);
1362 } else if (jme_pseudo_hotplug_enabled(jme)) {
1363 jme_start_shutdown_timer(jme);
1366 goto out_enable_tasklet;
1368 err_out_free_rx_resources:
1369 jme_free_rx_resources(jme);
1371 tasklet_enable(&jme->txclean_task);
1372 tasklet_hi_enable(&jme->rxclean_task);
1373 tasklet_hi_enable(&jme->rxempty_task);
1375 atomic_inc(&jme->link_changing);
1379 jme_rx_clean_tasklet(unsigned long arg)
1381 struct jme_adapter *jme = (struct jme_adapter *)arg;
1382 struct dynpcc_info *dpi = &(jme->dpi);
1384 jme_process_receive(jme, jme->rx_ring_size);
1390 jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget))
1392 struct jme_adapter *jme = jme_napi_priv(holder);
1395 rest = jme_process_receive(jme, JME_NAPI_WEIGHT_VAL(budget));
1397 while (atomic_read(&jme->rx_empty) > 0) {
1398 atomic_dec(&jme->rx_empty);
1399 ++(NET_STAT(jme).rx_dropped);
1400 jme_restart_rx_engine(jme);
1402 atomic_inc(&jme->rx_empty);
1405 JME_RX_COMPLETE(netdev, holder);
1406 jme_interrupt_mode(jme);
1409 JME_NAPI_WEIGHT_SET(budget, rest);
1410 return JME_NAPI_WEIGHT_VAL(budget) - rest;
1414 jme_rx_empty_tasklet(unsigned long arg)
1416 struct jme_adapter *jme = (struct jme_adapter *)arg;
1418 if (unlikely(atomic_read(&jme->link_changing) != 1))
1421 if (unlikely(!netif_carrier_ok(jme->dev)))
1424 netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n");
1426 jme_rx_clean_tasklet(arg);
1428 while (atomic_read(&jme->rx_empty) > 0) {
1429 atomic_dec(&jme->rx_empty);
1430 ++(NET_STAT(jme).rx_dropped);
1431 jme_restart_rx_engine(jme);
1433 atomic_inc(&jme->rx_empty);
1437 jme_wake_queue_if_stopped(struct jme_adapter *jme)
1439 struct jme_ring *txring = &(jme->txring[0]);
1442 if (unlikely(netif_queue_stopped(jme->dev) &&
1443 atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) {
1444 netif_info(jme, tx_done, jme->dev, "TX Queue Waked\n");
1445 netif_wake_queue(jme->dev);
1451 jme_tx_clean_tasklet(unsigned long arg)
1453 struct jme_adapter *jme = (struct jme_adapter *)arg;
1454 struct jme_ring *txring = &(jme->txring[0]);
1455 struct txdesc *txdesc = txring->desc;
1456 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
1457 int i, j, cnt = 0, max, err, mask;
1459 tx_dbg(jme, "Into txclean\n");
1461 if (unlikely(!atomic_dec_and_test(&jme->tx_cleaning)))
1464 if (unlikely(atomic_read(&jme->link_changing) != 1))
1467 if (unlikely(!netif_carrier_ok(jme->dev)))
1470 max = jme->tx_ring_size - atomic_read(&txring->nr_free);
1471 mask = jme->tx_ring_mask;
1473 for (i = atomic_read(&txring->next_to_clean) ; cnt < max ; ) {
1477 if (likely(ctxbi->skb &&
1478 !(txdesc[i].descwb.flags & TXWBFLAG_OWN))) {
1480 tx_dbg(jme, "txclean: %d+%d@%lu\n",
1481 i, ctxbi->nr_desc, jiffies);
1483 err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR;
1485 for (j = 1 ; j < ctxbi->nr_desc ; ++j) {
1486 ttxbi = txbi + ((i + j) & (mask));
1487 txdesc[(i + j) & (mask)].dw[0] = 0;
1489 pci_unmap_page(jme->pdev,
1498 dev_kfree_skb(ctxbi->skb);
1500 cnt += ctxbi->nr_desc;
1502 if (unlikely(err)) {
1503 ++(NET_STAT(jme).tx_carrier_errors);
1505 ++(NET_STAT(jme).tx_packets);
1506 NET_STAT(jme).tx_bytes += ctxbi->len;
1511 ctxbi->start_xmit = 0;
1517 i = (i + ctxbi->nr_desc) & mask;
1522 tx_dbg(jme, "txclean: done %d@%lu\n", i, jiffies);
1523 atomic_set(&txring->next_to_clean, i);
1524 atomic_add(cnt, &txring->nr_free);
1526 jme_wake_queue_if_stopped(jme);
1529 atomic_inc(&jme->tx_cleaning);
1533 jme_intr_msi(struct jme_adapter *jme, u32 intrstat)
1538 jwrite32f(jme, JME_IENC, INTR_ENABLE);
1540 if (intrstat & (INTR_LINKCH | INTR_SWINTR)) {
1542 * Link change event is critical
1543 * all other events are ignored
1545 jwrite32(jme, JME_IEVE, intrstat);
1546 tasklet_schedule(&jme->linkch_task);
1550 if (intrstat & INTR_TMINTR) {
1551 jwrite32(jme, JME_IEVE, INTR_TMINTR);
1552 tasklet_schedule(&jme->pcc_task);
1555 if (intrstat & (INTR_PCCTXTO | INTR_PCCTX)) {
1556 jwrite32(jme, JME_IEVE, INTR_PCCTXTO | INTR_PCCTX | INTR_TX0);
1557 tasklet_schedule(&jme->txclean_task);
1560 if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
1561 jwrite32(jme, JME_IEVE, (intrstat & (INTR_PCCRX0TO |
1567 if (test_bit(JME_FLAG_POLL, &jme->flags)) {
1568 if (intrstat & INTR_RX0EMP)
1569 atomic_inc(&jme->rx_empty);
1571 if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
1572 if (likely(JME_RX_SCHEDULE_PREP(jme))) {
1573 jme_polling_mode(jme);
1574 JME_RX_SCHEDULE(jme);
1578 if (intrstat & INTR_RX0EMP) {
1579 atomic_inc(&jme->rx_empty);
1580 tasklet_hi_schedule(&jme->rxempty_task);
1581 } else if (intrstat & (INTR_PCCRX0TO | INTR_PCCRX0)) {
1582 tasklet_hi_schedule(&jme->rxclean_task);
1588 * Re-enable interrupt
1590 jwrite32f(jme, JME_IENS, INTR_ENABLE);
1594 jme_intr(int irq, void *dev_id)
1596 struct net_device *netdev = dev_id;
1597 struct jme_adapter *jme = netdev_priv(netdev);
1600 intrstat = jread32(jme, JME_IEVE);
1603 * Check if it's really an interrupt for us
1605 if (unlikely((intrstat & INTR_ENABLE) == 0))
1609 * Check if the device still exist
1611 if (unlikely(intrstat == ~((typeof(intrstat))0)))
1614 jme_intr_msi(jme, intrstat);
1620 jme_msi(int irq, void *dev_id)
1622 struct net_device *netdev = dev_id;
1623 struct jme_adapter *jme = netdev_priv(netdev);
1626 intrstat = jread32(jme, JME_IEVE);
1628 jme_intr_msi(jme, intrstat);
1634 jme_reset_link(struct jme_adapter *jme)
1636 jwrite32(jme, JME_TMCSR, TMCSR_SWIT);
1640 jme_restart_an(struct jme_adapter *jme)
1644 spin_lock_bh(&jme->phy_lock);
1645 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1646 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
1647 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1648 spin_unlock_bh(&jme->phy_lock);
1652 jme_request_irq(struct jme_adapter *jme)
1655 struct net_device *netdev = jme->dev;
1656 irq_handler_t handler = jme_intr;
1657 int irq_flags = IRQF_SHARED;
1659 if (!pci_enable_msi(jme->pdev)) {
1660 set_bit(JME_FLAG_MSI, &jme->flags);
1665 rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name,
1669 "Unable to request %s interrupt (return: %d)\n",
1670 test_bit(JME_FLAG_MSI, &jme->flags) ? "MSI" : "INTx",
1673 if (test_bit(JME_FLAG_MSI, &jme->flags)) {
1674 pci_disable_msi(jme->pdev);
1675 clear_bit(JME_FLAG_MSI, &jme->flags);
1678 netdev->irq = jme->pdev->irq;
1685 jme_free_irq(struct jme_adapter *jme)
1687 free_irq(jme->pdev->irq, jme->dev);
1688 if (test_bit(JME_FLAG_MSI, &jme->flags)) {
1689 pci_disable_msi(jme->pdev);
1690 clear_bit(JME_FLAG_MSI, &jme->flags);
1691 jme->dev->irq = jme->pdev->irq;
1696 jme_new_phy_on(struct jme_adapter *jme)
1700 reg = jread32(jme, JME_PHY_PWR);
1701 reg &= ~(PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
1702 PHY_PWR_DWN2 | PHY_PWR_CLKSEL);
1703 jwrite32(jme, JME_PHY_PWR, reg);
1705 pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, ®);
1706 reg &= ~PE1_GPREG0_PBG;
1707 reg |= PE1_GPREG0_ENBG;
1708 pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
1712 jme_new_phy_off(struct jme_adapter *jme)
1716 reg = jread32(jme, JME_PHY_PWR);
1717 reg |= PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
1718 PHY_PWR_DWN2 | PHY_PWR_CLKSEL;
1719 jwrite32(jme, JME_PHY_PWR, reg);
1721 pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, ®);
1722 reg &= ~PE1_GPREG0_PBG;
1723 reg |= PE1_GPREG0_PDD3COLD;
1724 pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
1728 jme_phy_on(struct jme_adapter *jme)
1732 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1733 bmcr &= ~BMCR_PDOWN;
1734 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1736 if (new_phy_power_ctrl(jme->chip_main_rev))
1737 jme_new_phy_on(jme);
1741 jme_phy_off(struct jme_adapter *jme)
1745 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1747 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1749 if (new_phy_power_ctrl(jme->chip_main_rev))
1750 jme_new_phy_off(jme);
1754 jme_phy_specreg_read(struct jme_adapter *jme, u32 specreg)
1758 phy_addr = JM_PHY_SPEC_REG_READ | specreg;
1759 jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
1761 return jme_mdio_read(jme->dev, jme->mii_if.phy_id,
1762 JM_PHY_SPEC_DATA_REG);
1766 jme_phy_specreg_write(struct jme_adapter *jme, u32 ext_reg, u32 phy_data)
1770 phy_addr = JM_PHY_SPEC_REG_WRITE | ext_reg;
1771 jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_DATA_REG,
1773 jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
1778 jme_phy_calibration(struct jme_adapter *jme)
1780 u32 ctrl1000, phy_data;
1784 /* Enabel PHY test mode 1 */
1785 ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
1786 ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
1787 ctrl1000 |= PHY_GAD_TEST_MODE_1;
1788 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
1790 phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
1791 phy_data &= ~JM_PHY_EXT_COMM_2_CALI_MODE_0;
1792 phy_data |= JM_PHY_EXT_COMM_2_CALI_LATCH |
1793 JM_PHY_EXT_COMM_2_CALI_ENABLE;
1794 jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
1796 phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
1797 phy_data &= ~(JM_PHY_EXT_COMM_2_CALI_ENABLE |
1798 JM_PHY_EXT_COMM_2_CALI_MODE_0 |
1799 JM_PHY_EXT_COMM_2_CALI_LATCH);
1800 jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
1802 /* Disable PHY test mode */
1803 ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
1804 ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
1805 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
1810 jme_phy_setEA(struct jme_adapter *jme)
1812 u32 phy_comm0 = 0, phy_comm1 = 0;
1815 pci_read_config_byte(jme->pdev, PCI_PRIV_SHARE_NICCTRL, &nic_ctrl);
1816 if ((nic_ctrl & 0x3) == JME_FLAG_PHYEA_ENABLE)
1819 switch (jme->pdev->device) {
1820 case PCI_DEVICE_ID_JMICRON_JMC250:
1821 if (((jme->chip_main_rev == 5) &&
1822 ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
1823 (jme->chip_sub_rev == 3))) ||
1824 (jme->chip_main_rev >= 6)) {
1828 if ((jme->chip_main_rev == 3) &&
1829 ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
1832 case PCI_DEVICE_ID_JMICRON_JMC260:
1833 if (((jme->chip_main_rev == 5) &&
1834 ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
1835 (jme->chip_sub_rev == 3))) ||
1836 (jme->chip_main_rev >= 6)) {
1840 if ((jme->chip_main_rev == 3) &&
1841 ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
1843 if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 0))
1845 if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 2))
1852 jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_0_REG, phy_comm0);
1854 jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_1_REG, phy_comm1);
1860 jme_open(struct net_device *netdev)
1862 struct jme_adapter *jme = netdev_priv(netdev);
1865 jme_clear_pm_disable_wol(jme);
1866 JME_NAPI_ENABLE(jme);
1868 tasklet_enable(&jme->linkch_task);
1869 tasklet_enable(&jme->txclean_task);
1870 tasklet_hi_enable(&jme->rxclean_task);
1871 tasklet_hi_enable(&jme->rxempty_task);
1873 rc = jme_request_irq(jme);
1880 if (test_bit(JME_FLAG_SSET, &jme->flags))
1881 jme_set_settings(netdev, &jme->old_ecmd);
1883 jme_reset_phy_processor(jme);
1884 jme_phy_calibration(jme);
1886 jme_reset_link(jme);
1891 netif_stop_queue(netdev);
1892 netif_carrier_off(netdev);
1897 jme_set_100m_half(struct jme_adapter *jme)
1902 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1903 tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
1904 BMCR_SPEED1000 | BMCR_FULLDPLX);
1905 tmp |= BMCR_SPEED100;
1908 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp);
1911 jwrite32(jme, JME_GHC, GHC_SPEED_100M | GHC_LINK_POLL);
1913 jwrite32(jme, JME_GHC, GHC_SPEED_100M);
1916 #define JME_WAIT_LINK_TIME 2000 /* 2000ms */
1918 jme_wait_link(struct jme_adapter *jme)
1920 u32 phylink, to = JME_WAIT_LINK_TIME;
1923 phylink = jme_linkstat_from_phy(jme);
1924 while (!(phylink & PHY_LINK_UP) && (to -= 10) > 0) {
1926 phylink = jme_linkstat_from_phy(jme);
1931 jme_powersave_phy(struct jme_adapter *jme)
1933 if (jme->reg_pmcs && device_may_wakeup(&jme->pdev->dev)) {
1934 jme_set_100m_half(jme);
1935 if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
1937 jme_clear_pm_enable_wol(jme);
1944 jme_close(struct net_device *netdev)
1946 struct jme_adapter *jme = netdev_priv(netdev);
1948 netif_stop_queue(netdev);
1949 netif_carrier_off(netdev);
1954 JME_NAPI_DISABLE(jme);
1956 tasklet_disable(&jme->linkch_task);
1957 tasklet_disable(&jme->txclean_task);
1958 tasklet_disable(&jme->rxclean_task);
1959 tasklet_disable(&jme->rxempty_task);
1961 jme_disable_rx_engine(jme);
1962 jme_disable_tx_engine(jme);
1963 jme_reset_mac_processor(jme);
1964 jme_free_rx_resources(jme);
1965 jme_free_tx_resources(jme);
1973 jme_alloc_txdesc(struct jme_adapter *jme,
1974 struct sk_buff *skb)
1976 struct jme_ring *txring = &(jme->txring[0]);
1977 int idx, nr_alloc, mask = jme->tx_ring_mask;
1979 idx = txring->next_to_use;
1980 nr_alloc = skb_shinfo(skb)->nr_frags + 2;
1982 if (unlikely(atomic_read(&txring->nr_free) < nr_alloc))
1985 atomic_sub(nr_alloc, &txring->nr_free);
1987 txring->next_to_use = (txring->next_to_use + nr_alloc) & mask;
1993 jme_fill_tx_map(struct pci_dev *pdev,
1994 struct txdesc *txdesc,
1995 struct jme_buffer_info *txbi,
2003 dmaaddr = pci_map_page(pdev,
2009 pci_dma_sync_single_for_device(pdev,
2016 txdesc->desc2.flags = TXFLAG_OWN;
2017 txdesc->desc2.flags |= (hidma) ? TXFLAG_64BIT : 0;
2018 txdesc->desc2.datalen = cpu_to_le16(len);
2019 txdesc->desc2.bufaddrh = cpu_to_le32((__u64)dmaaddr >> 32);
2020 txdesc->desc2.bufaddrl = cpu_to_le32(
2021 (__u64)dmaaddr & 0xFFFFFFFFUL);
2023 txbi->mapping = dmaaddr;
2028 jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2030 struct jme_ring *txring = &(jme->txring[0]);
2031 struct txdesc *txdesc = txring->desc, *ctxdesc;
2032 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
2033 u8 hidma = jme->dev->features & NETIF_F_HIGHDMA;
2034 int i, nr_frags = skb_shinfo(skb)->nr_frags;
2035 int mask = jme->tx_ring_mask;
2036 const struct skb_frag_struct *frag;
2039 for (i = 0 ; i < nr_frags ; ++i) {
2040 frag = &skb_shinfo(skb)->frags[i];
2041 ctxdesc = txdesc + ((idx + i + 2) & (mask));
2042 ctxbi = txbi + ((idx + i + 2) & (mask));
2044 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
2045 skb_frag_page(frag),
2046 frag->page_offset, skb_frag_size(frag), hidma);
2049 len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
2050 ctxdesc = txdesc + ((idx + 1) & (mask));
2051 ctxbi = txbi + ((idx + 1) & (mask));
2052 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
2053 offset_in_page(skb->data), len, hidma);
2058 jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb)
2060 if (unlikely(skb_shinfo(skb)->gso_size &&
2061 skb_header_cloned(skb) &&
2062 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) {
2071 jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
2073 *mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT);
2075 *flags |= TXFLAG_LSEN;
2077 if (skb->protocol == htons(ETH_P_IP)) {
2078 struct iphdr *iph = ip_hdr(skb);
2081 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2086 struct ipv6hdr *ip6h = ipv6_hdr(skb);
2088 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ip6h->saddr,
2101 jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
2103 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2106 switch (skb->protocol) {
2107 case htons(ETH_P_IP):
2108 ip_proto = ip_hdr(skb)->protocol;
2110 case htons(ETH_P_IPV6):
2111 ip_proto = ipv6_hdr(skb)->nexthdr;
2120 *flags |= TXFLAG_TCPCS;
2123 *flags |= TXFLAG_UDPCS;
2126 netif_err(jme, tx_err, jme->dev, "Error upper layer protocol\n");
2133 jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags)
2135 if (vlan_tx_tag_present(skb)) {
2136 *flags |= TXFLAG_TAGON;
2137 *vlan = cpu_to_le16(vlan_tx_tag_get(skb));
2142 jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2144 struct jme_ring *txring = &(jme->txring[0]);
2145 struct txdesc *txdesc;
2146 struct jme_buffer_info *txbi;
2149 txdesc = (struct txdesc *)txring->desc + idx;
2150 txbi = txring->bufinf + idx;
2156 txdesc->desc1.pktsize = cpu_to_le16(skb->len);
2158 * Set OWN bit at final.
2159 * When kernel transmit faster than NIC.
2160 * And NIC trying to send this descriptor before we tell
2161 * it to start sending this TX queue.
2162 * Other fields are already filled correctly.
2165 flags = TXFLAG_OWN | TXFLAG_INT;
2167 * Set checksum flags while not tso
2169 if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
2170 jme_tx_csum(jme, skb, &flags);
2171 jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
2172 jme_map_tx_skb(jme, skb, idx);
2173 txdesc->desc1.flags = flags;
2175 * Set tx buffer info after telling NIC to send
2176 * For better tx_clean timing
2179 txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2;
2181 txbi->len = skb->len;
2182 txbi->start_xmit = jiffies;
2183 if (!txbi->start_xmit)
2184 txbi->start_xmit = (0UL-1);
2190 jme_stop_queue_if_full(struct jme_adapter *jme)
2192 struct jme_ring *txring = &(jme->txring[0]);
2193 struct jme_buffer_info *txbi = txring->bufinf;
2194 int idx = atomic_read(&txring->next_to_clean);
2199 if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) {
2200 netif_stop_queue(jme->dev);
2201 netif_info(jme, tx_queued, jme->dev, "TX Queue Paused\n");
2203 if (atomic_read(&txring->nr_free)
2204 >= (jme->tx_wake_threshold)) {
2205 netif_wake_queue(jme->dev);
2206 netif_info(jme, tx_queued, jme->dev, "TX Queue Fast Waked\n");
2210 if (unlikely(txbi->start_xmit &&
2211 (jiffies - txbi->start_xmit) >= TX_TIMEOUT &&
2213 netif_stop_queue(jme->dev);
2214 netif_info(jme, tx_queued, jme->dev,
2215 "TX Queue Stopped %d@%lu\n", idx, jiffies);
2220 * This function is already protected by netif_tx_lock()
2224 jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2226 struct jme_adapter *jme = netdev_priv(netdev);
2229 if (unlikely(jme_expand_header(jme, skb))) {
2230 ++(NET_STAT(jme).tx_dropped);
2231 return NETDEV_TX_OK;
2234 idx = jme_alloc_txdesc(jme, skb);
2236 if (unlikely(idx < 0)) {
2237 netif_stop_queue(netdev);
2238 netif_err(jme, tx_err, jme->dev,
2239 "BUG! Tx ring full when queue awake!\n");
2241 return NETDEV_TX_BUSY;
2244 jme_fill_tx_desc(jme, skb, idx);
2246 jwrite32(jme, JME_TXCS, jme->reg_txcs |
2247 TXCS_SELECT_QUEUE0 |
2251 tx_dbg(jme, "xmit: %d+%d@%lu\n",
2252 idx, skb_shinfo(skb)->nr_frags + 2, jiffies);
2253 jme_stop_queue_if_full(jme);
2255 return NETDEV_TX_OK;
2259 jme_set_unicastaddr(struct net_device *netdev)
2261 struct jme_adapter *jme = netdev_priv(netdev);
2264 val = (netdev->dev_addr[3] & 0xff) << 24 |
2265 (netdev->dev_addr[2] & 0xff) << 16 |
2266 (netdev->dev_addr[1] & 0xff) << 8 |
2267 (netdev->dev_addr[0] & 0xff);
2268 jwrite32(jme, JME_RXUMA_LO, val);
2269 val = (netdev->dev_addr[5] & 0xff) << 8 |
2270 (netdev->dev_addr[4] & 0xff);
2271 jwrite32(jme, JME_RXUMA_HI, val);
2275 jme_set_macaddr(struct net_device *netdev, void *p)
2277 struct jme_adapter *jme = netdev_priv(netdev);
2278 struct sockaddr *addr = p;
2280 if (netif_running(netdev))
2283 spin_lock_bh(&jme->macaddr_lock);
2284 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2285 jme_set_unicastaddr(netdev);
2286 spin_unlock_bh(&jme->macaddr_lock);
2292 jme_set_multi(struct net_device *netdev)
2294 struct jme_adapter *jme = netdev_priv(netdev);
2295 u32 mc_hash[2] = {};
2297 spin_lock_bh(&jme->rxmcs_lock);
2299 jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME;
2301 if (netdev->flags & IFF_PROMISC) {
2302 jme->reg_rxmcs |= RXMCS_ALLFRAME;
2303 } else if (netdev->flags & IFF_ALLMULTI) {
2304 jme->reg_rxmcs |= RXMCS_ALLMULFRAME;
2305 } else if (netdev->flags & IFF_MULTICAST) {
2306 struct netdev_hw_addr *ha;
2309 jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
2310 netdev_for_each_mc_addr(ha, netdev) {
2311 bit_nr = ether_crc(ETH_ALEN, ha->addr) & 0x3F;
2312 mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
2315 jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]);
2316 jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]);
2320 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2322 spin_unlock_bh(&jme->rxmcs_lock);
2326 jme_change_mtu(struct net_device *netdev, int new_mtu)
2328 struct jme_adapter *jme = netdev_priv(netdev);
2330 if (new_mtu == jme->old_mtu)
2333 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
2334 ((new_mtu) < IPV6_MIN_MTU))
2338 netdev->mtu = new_mtu;
2339 netdev_update_features(netdev);
2341 jme_restart_rx_engine(jme);
2342 jme_reset_link(jme);
2348 jme_tx_timeout(struct net_device *netdev)
2350 struct jme_adapter *jme = netdev_priv(netdev);
2353 jme_reset_phy_processor(jme);
2354 if (test_bit(JME_FLAG_SSET, &jme->flags))
2355 jme_set_settings(netdev, &jme->old_ecmd);
2358 * Force to Reset the link again
2360 jme_reset_link(jme);
2363 static inline void jme_pause_rx(struct jme_adapter *jme)
2365 atomic_dec(&jme->link_changing);
2367 jme_set_rx_pcc(jme, PCC_OFF);
2368 if (test_bit(JME_FLAG_POLL, &jme->flags)) {
2369 JME_NAPI_DISABLE(jme);
2371 tasklet_disable(&jme->rxclean_task);
2372 tasklet_disable(&jme->rxempty_task);
2376 static inline void jme_resume_rx(struct jme_adapter *jme)
2378 struct dynpcc_info *dpi = &(jme->dpi);
2380 if (test_bit(JME_FLAG_POLL, &jme->flags)) {
2381 JME_NAPI_ENABLE(jme);
2383 tasklet_hi_enable(&jme->rxclean_task);
2384 tasklet_hi_enable(&jme->rxempty_task);
2387 dpi->attempt = PCC_P1;
2389 jme_set_rx_pcc(jme, PCC_P1);
2391 atomic_inc(&jme->link_changing);
2395 jme_get_drvinfo(struct net_device *netdev,
2396 struct ethtool_drvinfo *info)
2398 struct jme_adapter *jme = netdev_priv(netdev);
2400 strcpy(info->driver, DRV_NAME);
2401 strcpy(info->version, DRV_VERSION);
2402 strcpy(info->bus_info, pci_name(jme->pdev));
2406 jme_get_regs_len(struct net_device *netdev)
2412 mmapio_memcpy(struct jme_adapter *jme, u32 *p, u32 reg, int len)
2416 for (i = 0 ; i < len ; i += 4)
2417 p[i >> 2] = jread32(jme, reg + i);
2421 mdio_memcpy(struct jme_adapter *jme, u32 *p, int reg_nr)
2424 u16 *p16 = (u16 *)p;
2426 for (i = 0 ; i < reg_nr ; ++i)
2427 p16[i] = jme_mdio_read(jme->dev, jme->mii_if.phy_id, i);
2431 jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
2433 struct jme_adapter *jme = netdev_priv(netdev);
2434 u32 *p32 = (u32 *)p;
2436 memset(p, 0xFF, JME_REG_LEN);
2439 mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN);
2442 mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN);
2445 mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN);
2448 mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN);
2451 mdio_memcpy(jme, p32, JME_PHY_REG_NR);
2455 jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
2457 struct jme_adapter *jme = netdev_priv(netdev);
2459 ecmd->tx_coalesce_usecs = PCC_TX_TO;
2460 ecmd->tx_max_coalesced_frames = PCC_TX_CNT;
2462 if (test_bit(JME_FLAG_POLL, &jme->flags)) {
2463 ecmd->use_adaptive_rx_coalesce = false;
2464 ecmd->rx_coalesce_usecs = 0;
2465 ecmd->rx_max_coalesced_frames = 0;
2469 ecmd->use_adaptive_rx_coalesce = true;
2471 switch (jme->dpi.cur) {
2473 ecmd->rx_coalesce_usecs = PCC_P1_TO;
2474 ecmd->rx_max_coalesced_frames = PCC_P1_CNT;
2477 ecmd->rx_coalesce_usecs = PCC_P2_TO;
2478 ecmd->rx_max_coalesced_frames = PCC_P2_CNT;
2481 ecmd->rx_coalesce_usecs = PCC_P3_TO;
2482 ecmd->rx_max_coalesced_frames = PCC_P3_CNT;
2492 jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
2494 struct jme_adapter *jme = netdev_priv(netdev);
2495 struct dynpcc_info *dpi = &(jme->dpi);
2497 if (netif_running(netdev))
2500 if (ecmd->use_adaptive_rx_coalesce &&
2501 test_bit(JME_FLAG_POLL, &jme->flags)) {
2502 clear_bit(JME_FLAG_POLL, &jme->flags);
2503 jme->jme_rx = netif_rx;
2505 dpi->attempt = PCC_P1;
2507 jme_set_rx_pcc(jme, PCC_P1);
2508 jme_interrupt_mode(jme);
2509 } else if (!(ecmd->use_adaptive_rx_coalesce) &&
2510 !(test_bit(JME_FLAG_POLL, &jme->flags))) {
2511 set_bit(JME_FLAG_POLL, &jme->flags);
2512 jme->jme_rx = netif_receive_skb;
2513 jme_interrupt_mode(jme);
2520 jme_get_pauseparam(struct net_device *netdev,
2521 struct ethtool_pauseparam *ecmd)
2523 struct jme_adapter *jme = netdev_priv(netdev);
2526 ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0;
2527 ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0;
2529 spin_lock_bh(&jme->phy_lock);
2530 val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
2531 spin_unlock_bh(&jme->phy_lock);
2534 (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
2538 jme_set_pauseparam(struct net_device *netdev,
2539 struct ethtool_pauseparam *ecmd)
2541 struct jme_adapter *jme = netdev_priv(netdev);
2544 if (((jme->reg_txpfc & TXPFC_PF_EN) != 0) ^
2545 (ecmd->tx_pause != 0)) {
2548 jme->reg_txpfc |= TXPFC_PF_EN;
2550 jme->reg_txpfc &= ~TXPFC_PF_EN;
2552 jwrite32(jme, JME_TXPFC, jme->reg_txpfc);
2555 spin_lock_bh(&jme->rxmcs_lock);
2556 if (((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) ^
2557 (ecmd->rx_pause != 0)) {
2560 jme->reg_rxmcs |= RXMCS_FLOWCTRL;
2562 jme->reg_rxmcs &= ~RXMCS_FLOWCTRL;
2564 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2566 spin_unlock_bh(&jme->rxmcs_lock);
2568 spin_lock_bh(&jme->phy_lock);
2569 val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
2570 if (((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) ^
2571 (ecmd->autoneg != 0)) {
2574 val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2576 val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2578 jme_mdio_write(jme->dev, jme->mii_if.phy_id,
2579 MII_ADVERTISE, val);
2581 spin_unlock_bh(&jme->phy_lock);
2587 jme_get_wol(struct net_device *netdev,
2588 struct ethtool_wolinfo *wol)
2590 struct jme_adapter *jme = netdev_priv(netdev);
2592 wol->supported = WAKE_MAGIC | WAKE_PHY;
2596 if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
2597 wol->wolopts |= WAKE_PHY;
2599 if (jme->reg_pmcs & PMCS_MFEN)
2600 wol->wolopts |= WAKE_MAGIC;
2605 jme_set_wol(struct net_device *netdev,
2606 struct ethtool_wolinfo *wol)
2608 struct jme_adapter *jme = netdev_priv(netdev);
2610 if (wol->wolopts & (WAKE_MAGICSECURE |
2619 if (wol->wolopts & WAKE_PHY)
2620 jme->reg_pmcs |= PMCS_LFEN | PMCS_LREN;
2622 if (wol->wolopts & WAKE_MAGIC)
2623 jme->reg_pmcs |= PMCS_MFEN;
2629 jme_get_settings(struct net_device *netdev,
2630 struct ethtool_cmd *ecmd)
2632 struct jme_adapter *jme = netdev_priv(netdev);
2635 spin_lock_bh(&jme->phy_lock);
2636 rc = mii_ethtool_gset(&(jme->mii_if), ecmd);
2637 spin_unlock_bh(&jme->phy_lock);
2642 jme_set_settings(struct net_device *netdev,
2643 struct ethtool_cmd *ecmd)
2645 struct jme_adapter *jme = netdev_priv(netdev);
2648 if (ethtool_cmd_speed(ecmd) == SPEED_1000
2649 && ecmd->autoneg != AUTONEG_ENABLE)
2653 * Check If user changed duplex only while force_media.
2654 * Hardware would not generate link change interrupt.
2656 if (jme->mii_if.force_media &&
2657 ecmd->autoneg != AUTONEG_ENABLE &&
2658 (jme->mii_if.full_duplex != ecmd->duplex))
2661 spin_lock_bh(&jme->phy_lock);
2662 rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
2663 spin_unlock_bh(&jme->phy_lock);
2667 jme_reset_link(jme);
2668 jme->old_ecmd = *ecmd;
2669 set_bit(JME_FLAG_SSET, &jme->flags);
2676 jme_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
2679 struct jme_adapter *jme = netdev_priv(netdev);
2680 struct mii_ioctl_data *mii_data = if_mii(rq);
2681 unsigned int duplex_chg;
2683 if (cmd == SIOCSMIIREG) {
2684 u16 val = mii_data->val_in;
2685 if (!(val & (BMCR_RESET|BMCR_ANENABLE)) &&
2686 (val & BMCR_SPEED1000))
2690 spin_lock_bh(&jme->phy_lock);
2691 rc = generic_mii_ioctl(&jme->mii_if, mii_data, cmd, &duplex_chg);
2692 spin_unlock_bh(&jme->phy_lock);
2694 if (!rc && (cmd == SIOCSMIIREG)) {
2696 jme_reset_link(jme);
2697 jme_get_settings(netdev, &jme->old_ecmd);
2698 set_bit(JME_FLAG_SSET, &jme->flags);
2705 jme_get_link(struct net_device *netdev)
2707 struct jme_adapter *jme = netdev_priv(netdev);
2708 return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP;
2712 jme_get_msglevel(struct net_device *netdev)
2714 struct jme_adapter *jme = netdev_priv(netdev);
2715 return jme->msg_enable;
2719 jme_set_msglevel(struct net_device *netdev, u32 value)
2721 struct jme_adapter *jme = netdev_priv(netdev);
2722 jme->msg_enable = value;
2726 jme_fix_features(struct net_device *netdev, u32 features)
2728 if (netdev->mtu > 1900)
2729 features &= ~(NETIF_F_ALL_TSO | NETIF_F_ALL_CSUM);
2734 jme_set_features(struct net_device *netdev, u32 features)
2736 struct jme_adapter *jme = netdev_priv(netdev);
2738 spin_lock_bh(&jme->rxmcs_lock);
2739 if (features & NETIF_F_RXCSUM)
2740 jme->reg_rxmcs |= RXMCS_CHECKSUM;
2742 jme->reg_rxmcs &= ~RXMCS_CHECKSUM;
2743 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2744 spin_unlock_bh(&jme->rxmcs_lock);
2750 jme_nway_reset(struct net_device *netdev)
2752 struct jme_adapter *jme = netdev_priv(netdev);
2753 jme_restart_an(jme);
2758 jme_smb_read(struct jme_adapter *jme, unsigned int addr)
2763 val = jread32(jme, JME_SMBCSR);
2764 to = JME_SMB_BUSY_TIMEOUT;
2765 while ((val & SMBCSR_BUSY) && --to) {
2767 val = jread32(jme, JME_SMBCSR);
2770 netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
2774 jwrite32(jme, JME_SMBINTF,
2775 ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) |
2776 SMBINTF_HWRWN_READ |
2779 val = jread32(jme, JME_SMBINTF);
2780 to = JME_SMB_BUSY_TIMEOUT;
2781 while ((val & SMBINTF_HWCMD) && --to) {
2783 val = jread32(jme, JME_SMBINTF);
2786 netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
2790 return (val & SMBINTF_HWDATR) >> SMBINTF_HWDATR_SHIFT;
2794 jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data)
2799 val = jread32(jme, JME_SMBCSR);
2800 to = JME_SMB_BUSY_TIMEOUT;
2801 while ((val & SMBCSR_BUSY) && --to) {
2803 val = jread32(jme, JME_SMBCSR);
2806 netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
2810 jwrite32(jme, JME_SMBINTF,
2811 ((data << SMBINTF_HWDATW_SHIFT) & SMBINTF_HWDATW) |
2812 ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) |
2813 SMBINTF_HWRWN_WRITE |
2816 val = jread32(jme, JME_SMBINTF);
2817 to = JME_SMB_BUSY_TIMEOUT;
2818 while ((val & SMBINTF_HWCMD) && --to) {
2820 val = jread32(jme, JME_SMBINTF);
2823 netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
2831 jme_get_eeprom_len(struct net_device *netdev)
2833 struct jme_adapter *jme = netdev_priv(netdev);
2835 val = jread32(jme, JME_SMBCSR);
2836 return (val & SMBCSR_EEPROMD) ? JME_SMB_LEN : 0;
2840 jme_get_eeprom(struct net_device *netdev,
2841 struct ethtool_eeprom *eeprom, u8 *data)
2843 struct jme_adapter *jme = netdev_priv(netdev);
2844 int i, offset = eeprom->offset, len = eeprom->len;
2847 * ethtool will check the boundary for us
2849 eeprom->magic = JME_EEPROM_MAGIC;
2850 for (i = 0 ; i < len ; ++i)
2851 data[i] = jme_smb_read(jme, i + offset);
2857 jme_set_eeprom(struct net_device *netdev,
2858 struct ethtool_eeprom *eeprom, u8 *data)
2860 struct jme_adapter *jme = netdev_priv(netdev);
2861 int i, offset = eeprom->offset, len = eeprom->len;
2863 if (eeprom->magic != JME_EEPROM_MAGIC)
2867 * ethtool will check the boundary for us
2869 for (i = 0 ; i < len ; ++i)
2870 jme_smb_write(jme, i + offset, data[i]);
2875 static const struct ethtool_ops jme_ethtool_ops = {
2876 .get_drvinfo = jme_get_drvinfo,
2877 .get_regs_len = jme_get_regs_len,
2878 .get_regs = jme_get_regs,
2879 .get_coalesce = jme_get_coalesce,
2880 .set_coalesce = jme_set_coalesce,
2881 .get_pauseparam = jme_get_pauseparam,
2882 .set_pauseparam = jme_set_pauseparam,
2883 .get_wol = jme_get_wol,
2884 .set_wol = jme_set_wol,
2885 .get_settings = jme_get_settings,
2886 .set_settings = jme_set_settings,
2887 .get_link = jme_get_link,
2888 .get_msglevel = jme_get_msglevel,
2889 .set_msglevel = jme_set_msglevel,
2890 .nway_reset = jme_nway_reset,
2891 .get_eeprom_len = jme_get_eeprom_len,
2892 .get_eeprom = jme_get_eeprom,
2893 .set_eeprom = jme_set_eeprom,
2897 jme_pci_dma64(struct pci_dev *pdev)
2899 if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 &&
2900 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
2901 if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2904 if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 &&
2905 !pci_set_dma_mask(pdev, DMA_BIT_MASK(40)))
2906 if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)))
2909 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
2910 if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
2917 jme_phy_init(struct jme_adapter *jme)
2921 reg26 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 26);
2922 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, reg26 | 0x1000);
2926 jme_check_hw_ver(struct jme_adapter *jme)
2930 chipmode = jread32(jme, JME_CHIPMODE);
2932 jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT;
2933 jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT;
2934 jme->chip_main_rev = jme->chiprev & 0xF;
2935 jme->chip_sub_rev = (jme->chiprev >> 4) & 0xF;
2938 static const struct net_device_ops jme_netdev_ops = {
2939 .ndo_open = jme_open,
2940 .ndo_stop = jme_close,
2941 .ndo_validate_addr = eth_validate_addr,
2942 .ndo_do_ioctl = jme_ioctl,
2943 .ndo_start_xmit = jme_start_xmit,
2944 .ndo_set_mac_address = jme_set_macaddr,
2945 .ndo_set_rx_mode = jme_set_multi,
2946 .ndo_change_mtu = jme_change_mtu,
2947 .ndo_tx_timeout = jme_tx_timeout,
2948 .ndo_fix_features = jme_fix_features,
2949 .ndo_set_features = jme_set_features,
2952 static int __devinit
2953 jme_init_one(struct pci_dev *pdev,
2954 const struct pci_device_id *ent)
2956 int rc = 0, using_dac, i;
2957 struct net_device *netdev;
2958 struct jme_adapter *jme;
2963 * set up PCI device basics
2965 rc = pci_enable_device(pdev);
2967 pr_err("Cannot enable PCI device\n");
2971 using_dac = jme_pci_dma64(pdev);
2972 if (using_dac < 0) {
2973 pr_err("Cannot set PCI DMA Mask\n");
2975 goto err_out_disable_pdev;
2978 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2979 pr_err("No PCI resource region found\n");
2981 goto err_out_disable_pdev;
2984 rc = pci_request_regions(pdev, DRV_NAME);
2986 pr_err("Cannot obtain PCI resource region\n");
2987 goto err_out_disable_pdev;
2990 pci_set_master(pdev);
2993 * alloc and init net device
2995 netdev = alloc_etherdev(sizeof(*jme));
2997 pr_err("Cannot allocate netdev structure\n");
2999 goto err_out_release_regions;
3001 netdev->netdev_ops = &jme_netdev_ops;
3002 netdev->ethtool_ops = &jme_ethtool_ops;
3003 netdev->watchdog_timeo = TX_TIMEOUT;
3004 netdev->hw_features = NETIF_F_IP_CSUM |
3010 netdev->features = NETIF_F_IP_CSUM |
3015 NETIF_F_HW_VLAN_TX |
3018 netdev->features |= NETIF_F_HIGHDMA;
3020 SET_NETDEV_DEV(netdev, &pdev->dev);
3021 pci_set_drvdata(pdev, netdev);
3026 jme = netdev_priv(netdev);
3029 jme->jme_rx = netif_rx;
3030 jme->old_mtu = netdev->mtu = 1500;
3032 jme->tx_ring_size = 1 << 10;
3033 jme->tx_ring_mask = jme->tx_ring_size - 1;
3034 jme->tx_wake_threshold = 1 << 9;
3035 jme->rx_ring_size = 1 << 9;
3036 jme->rx_ring_mask = jme->rx_ring_size - 1;
3037 jme->msg_enable = JME_DEF_MSG_ENABLE;
3038 jme->regs = ioremap(pci_resource_start(pdev, 0),
3039 pci_resource_len(pdev, 0));
3041 pr_err("Mapping PCI resource region error\n");
3043 goto err_out_free_netdev;
3047 apmc = jread32(jme, JME_APMC) & ~JME_APMC_PSEUDO_HP_EN;
3048 jwrite32(jme, JME_APMC, apmc);
3049 } else if (force_pseudohp) {
3050 apmc = jread32(jme, JME_APMC) | JME_APMC_PSEUDO_HP_EN;
3051 jwrite32(jme, JME_APMC, apmc);
3054 NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2)
3056 spin_lock_init(&jme->phy_lock);
3057 spin_lock_init(&jme->macaddr_lock);
3058 spin_lock_init(&jme->rxmcs_lock);
3060 atomic_set(&jme->link_changing, 1);
3061 atomic_set(&jme->rx_cleaning, 1);
3062 atomic_set(&jme->tx_cleaning, 1);
3063 atomic_set(&jme->rx_empty, 1);
3065 tasklet_init(&jme->pcc_task,
3067 (unsigned long) jme);
3068 tasklet_init(&jme->linkch_task,
3069 jme_link_change_tasklet,
3070 (unsigned long) jme);
3071 tasklet_init(&jme->txclean_task,
3072 jme_tx_clean_tasklet,
3073 (unsigned long) jme);
3074 tasklet_init(&jme->rxclean_task,
3075 jme_rx_clean_tasklet,
3076 (unsigned long) jme);
3077 tasklet_init(&jme->rxempty_task,
3078 jme_rx_empty_tasklet,
3079 (unsigned long) jme);
3080 tasklet_disable_nosync(&jme->linkch_task);
3081 tasklet_disable_nosync(&jme->txclean_task);
3082 tasklet_disable_nosync(&jme->rxclean_task);
3083 tasklet_disable_nosync(&jme->rxempty_task);
3084 jme->dpi.cur = PCC_P1;
3087 jme->reg_rxcs = RXCS_DEFAULT;
3088 jme->reg_rxmcs = RXMCS_DEFAULT;
3090 jme->reg_pmcs = PMCS_MFEN;
3091 jme->reg_gpreg1 = GPREG1_DEFAULT;
3093 if (jme->reg_rxmcs & RXMCS_CHECKSUM)
3094 netdev->features |= NETIF_F_RXCSUM;
3097 * Get Max Read Req Size from PCI Config Space
3099 pci_read_config_byte(pdev, PCI_DCSR_MRRS, &jme->mrrs);
3100 jme->mrrs &= PCI_DCSR_MRRS_MASK;
3101 switch (jme->mrrs) {
3103 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B;
3106 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B;
3109 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
3114 * Must check before reset_mac_processor
3116 jme_check_hw_ver(jme);
3117 jme->mii_if.dev = netdev;
3119 jme->mii_if.phy_id = 0;
3120 for (i = 1 ; i < 32 ; ++i) {
3121 bmcr = jme_mdio_read(netdev, i, MII_BMCR);
3122 bmsr = jme_mdio_read(netdev, i, MII_BMSR);
3123 if (bmcr != 0xFFFFU && (bmcr != 0 || bmsr != 0)) {
3124 jme->mii_if.phy_id = i;
3129 if (!jme->mii_if.phy_id) {
3131 pr_err("Can not find phy_id\n");
3135 jme->reg_ghc |= GHC_LINK_POLL;
3137 jme->mii_if.phy_id = 1;
3139 if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250)
3140 jme->mii_if.supports_gmii = true;
3142 jme->mii_if.supports_gmii = false;
3143 jme->mii_if.phy_id_mask = 0x1F;
3144 jme->mii_if.reg_num_mask = 0x1F;
3145 jme->mii_if.mdio_read = jme_mdio_read;
3146 jme->mii_if.mdio_write = jme_mdio_write;
3148 jme_clear_pm_disable_wol(jme);
3149 pci_set_power_state(jme->pdev, PCI_D0);
3150 device_init_wakeup(&pdev->dev, true);
3152 jme_set_phyfifo_5level(jme);
3153 jme->pcirev = pdev->revision;
3159 * Reset MAC processor and reload EEPROM for MAC Address
3161 jme_reset_mac_processor(jme);
3162 rc = jme_reload_eeprom(jme);
3164 pr_err("Reload eeprom for reading MAC Address error\n");
3167 jme_load_macaddr(netdev);
3170 * Tell stack that we are not ready to work until open()
3172 netif_carrier_off(netdev);
3174 rc = register_netdev(netdev);
3176 pr_err("Cannot register net device\n");
3180 netif_info(jme, probe, jme->dev, "%s%s chiprev:%x pcirev:%x macaddr:%pM\n",
3181 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ?
3182 "JMC250 Gigabit Ethernet" :
3183 (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ?
3184 "JMC260 Fast Ethernet" : "Unknown",
3185 (jme->fpgaver != 0) ? " (FPGA)" : "",
3186 (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
3187 jme->pcirev, netdev->dev_addr);
3193 err_out_free_netdev:
3194 pci_set_drvdata(pdev, NULL);
3195 free_netdev(netdev);
3196 err_out_release_regions:
3197 pci_release_regions(pdev);
3198 err_out_disable_pdev:
3199 pci_disable_device(pdev);
3204 static void __devexit
3205 jme_remove_one(struct pci_dev *pdev)
3207 struct net_device *netdev = pci_get_drvdata(pdev);
3208 struct jme_adapter *jme = netdev_priv(netdev);
3210 unregister_netdev(netdev);
3212 pci_set_drvdata(pdev, NULL);
3213 free_netdev(netdev);
3214 pci_release_regions(pdev);
3215 pci_disable_device(pdev);
3220 jme_shutdown(struct pci_dev *pdev)
3222 struct net_device *netdev = pci_get_drvdata(pdev);
3223 struct jme_adapter *jme = netdev_priv(netdev);
3225 jme_powersave_phy(jme);
3226 pci_pme_active(pdev, true);
3229 #ifdef CONFIG_PM_SLEEP
3231 jme_suspend(struct device *dev)
3233 struct pci_dev *pdev = to_pci_dev(dev);
3234 struct net_device *netdev = pci_get_drvdata(pdev);
3235 struct jme_adapter *jme = netdev_priv(netdev);
3237 if (!netif_running(netdev))
3240 atomic_dec(&jme->link_changing);
3242 netif_device_detach(netdev);
3243 netif_stop_queue(netdev);
3246 tasklet_disable(&jme->txclean_task);
3247 tasklet_disable(&jme->rxclean_task);
3248 tasklet_disable(&jme->rxempty_task);
3250 if (netif_carrier_ok(netdev)) {
3251 if (test_bit(JME_FLAG_POLL, &jme->flags))
3252 jme_polling_mode(jme);
3254 jme_stop_pcc_timer(jme);
3255 jme_disable_rx_engine(jme);
3256 jme_disable_tx_engine(jme);
3257 jme_reset_mac_processor(jme);
3258 jme_free_rx_resources(jme);
3259 jme_free_tx_resources(jme);
3260 netif_carrier_off(netdev);
3264 tasklet_enable(&jme->txclean_task);
3265 tasklet_hi_enable(&jme->rxclean_task);
3266 tasklet_hi_enable(&jme->rxempty_task);
3268 jme_powersave_phy(jme);
3274 jme_resume(struct device *dev)
3276 struct pci_dev *pdev = to_pci_dev(dev);
3277 struct net_device *netdev = pci_get_drvdata(pdev);
3278 struct jme_adapter *jme = netdev_priv(netdev);
3280 if (!netif_running(netdev))
3283 jme_clear_pm_disable_wol(jme);
3285 if (test_bit(JME_FLAG_SSET, &jme->flags))
3286 jme_set_settings(netdev, &jme->old_ecmd);
3288 jme_reset_phy_processor(jme);
3289 jme_phy_calibration(jme);
3291 netif_device_attach(netdev);
3293 atomic_inc(&jme->link_changing);
3295 jme_reset_link(jme);
3302 static SIMPLE_DEV_PM_OPS(jme_pm_ops, jme_suspend, jme_resume);
3303 #define JME_PM_OPS (&jme_pm_ops)
3307 #define JME_PM_OPS NULL
3310 static DEFINE_PCI_DEVICE_TABLE(jme_pci_tbl) = {
3311 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) },
3312 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) },
3316 static struct pci_driver jme_driver = {
3318 .id_table = jme_pci_tbl,
3319 .probe = jme_init_one,
3320 .remove = __devexit_p(jme_remove_one),
3321 .shutdown = jme_shutdown,
3322 .driver.pm = JME_PM_OPS,
3326 jme_init_module(void)
3328 pr_info("JMicron JMC2XX ethernet driver version %s\n", DRV_VERSION);
3329 return pci_register_driver(&jme_driver);
3333 jme_cleanup_module(void)
3335 pci_unregister_driver(&jme_driver);
3338 module_init(jme_init_module);
3339 module_exit(jme_cleanup_module);
3341 MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>");
3342 MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
3343 MODULE_LICENSE("GPL");
3344 MODULE_VERSION(DRV_VERSION);
3345 MODULE_DEVICE_TABLE(pci, jme_pci_tbl);