Merge branch 'pm-cpufreq'
[pandora-kernel.git] / drivers / net / ethernet / renesas / sh_eth.c
1 /*  SuperH Ethernet device driver
2  *
3  *  Copyright (C) 2014  Renesas Electronics Corporation
4  *  Copyright (C) 2006-2012 Nobuhiro Iwamatsu
5  *  Copyright (C) 2008-2014 Renesas Solutions Corp.
6  *  Copyright (C) 2013-2014 Cogent Embedded, Inc.
7  *  Copyright (C) 2014 Codethink Limited
8  *
9  *  This program is free software; you can redistribute it and/or modify it
10  *  under the terms and conditions of the GNU General Public License,
11  *  version 2, as published by the Free Software Foundation.
12  *
13  *  This program is distributed in the hope it will be useful, but WITHOUT
14  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  *  more details.
17  *
18  *  The full GNU General Public License is included in this distribution in
19  *  the file called "COPYING".
20  */
21
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/etherdevice.h>
28 #include <linux/delay.h>
29 #include <linux/platform_device.h>
30 #include <linux/mdio-bitbang.h>
31 #include <linux/netdevice.h>
32 #include <linux/of.h>
33 #include <linux/of_device.h>
34 #include <linux/of_irq.h>
35 #include <linux/of_net.h>
36 #include <linux/phy.h>
37 #include <linux/cache.h>
38 #include <linux/io.h>
39 #include <linux/pm_runtime.h>
40 #include <linux/slab.h>
41 #include <linux/ethtool.h>
42 #include <linux/if_vlan.h>
43 #include <linux/clk.h>
44 #include <linux/sh_eth.h>
45 #include <linux/of_mdio.h>
46
47 #include "sh_eth.h"
48
49 #define SH_ETH_DEF_MSG_ENABLE \
50                 (NETIF_MSG_LINK | \
51                 NETIF_MSG_TIMER | \
52                 NETIF_MSG_RX_ERR| \
53                 NETIF_MSG_TX_ERR)
54
55 static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
56         [EDSR]          = 0x0000,
57         [EDMR]          = 0x0400,
58         [EDTRR]         = 0x0408,
59         [EDRRR]         = 0x0410,
60         [EESR]          = 0x0428,
61         [EESIPR]        = 0x0430,
62         [TDLAR]         = 0x0010,
63         [TDFAR]         = 0x0014,
64         [TDFXR]         = 0x0018,
65         [TDFFR]         = 0x001c,
66         [RDLAR]         = 0x0030,
67         [RDFAR]         = 0x0034,
68         [RDFXR]         = 0x0038,
69         [RDFFR]         = 0x003c,
70         [TRSCER]        = 0x0438,
71         [RMFCR]         = 0x0440,
72         [TFTR]          = 0x0448,
73         [FDR]           = 0x0450,
74         [RMCR]          = 0x0458,
75         [RPADIR]        = 0x0460,
76         [FCFTR]         = 0x0468,
77         [CSMR]          = 0x04E4,
78
79         [ECMR]          = 0x0500,
80         [ECSR]          = 0x0510,
81         [ECSIPR]        = 0x0518,
82         [PIR]           = 0x0520,
83         [PSR]           = 0x0528,
84         [PIPR]          = 0x052c,
85         [RFLR]          = 0x0508,
86         [APR]           = 0x0554,
87         [MPR]           = 0x0558,
88         [PFTCR]         = 0x055c,
89         [PFRCR]         = 0x0560,
90         [TPAUSER]       = 0x0564,
91         [GECMR]         = 0x05b0,
92         [BCULR]         = 0x05b4,
93         [MAHR]          = 0x05c0,
94         [MALR]          = 0x05c8,
95         [TROCR]         = 0x0700,
96         [CDCR]          = 0x0708,
97         [LCCR]          = 0x0710,
98         [CEFCR]         = 0x0740,
99         [FRECR]         = 0x0748,
100         [TSFRCR]        = 0x0750,
101         [TLFRCR]        = 0x0758,
102         [RFCR]          = 0x0760,
103         [CERCR]         = 0x0768,
104         [CEECR]         = 0x0770,
105         [MAFCR]         = 0x0778,
106         [RMII_MII]      = 0x0790,
107
108         [ARSTR]         = 0x0000,
109         [TSU_CTRST]     = 0x0004,
110         [TSU_FWEN0]     = 0x0010,
111         [TSU_FWEN1]     = 0x0014,
112         [TSU_FCM]       = 0x0018,
113         [TSU_BSYSL0]    = 0x0020,
114         [TSU_BSYSL1]    = 0x0024,
115         [TSU_PRISL0]    = 0x0028,
116         [TSU_PRISL1]    = 0x002c,
117         [TSU_FWSL0]     = 0x0030,
118         [TSU_FWSL1]     = 0x0034,
119         [TSU_FWSLC]     = 0x0038,
120         [TSU_QTAG0]     = 0x0040,
121         [TSU_QTAG1]     = 0x0044,
122         [TSU_FWSR]      = 0x0050,
123         [TSU_FWINMK]    = 0x0054,
124         [TSU_ADQT0]     = 0x0048,
125         [TSU_ADQT1]     = 0x004c,
126         [TSU_VTAG0]     = 0x0058,
127         [TSU_VTAG1]     = 0x005c,
128         [TSU_ADSBSY]    = 0x0060,
129         [TSU_TEN]       = 0x0064,
130         [TSU_POST1]     = 0x0070,
131         [TSU_POST2]     = 0x0074,
132         [TSU_POST3]     = 0x0078,
133         [TSU_POST4]     = 0x007c,
134         [TSU_ADRH0]     = 0x0100,
135         [TSU_ADRL0]     = 0x0104,
136         [TSU_ADRH31]    = 0x01f8,
137         [TSU_ADRL31]    = 0x01fc,
138
139         [TXNLCR0]       = 0x0080,
140         [TXALCR0]       = 0x0084,
141         [RXNLCR0]       = 0x0088,
142         [RXALCR0]       = 0x008c,
143         [FWNLCR0]       = 0x0090,
144         [FWALCR0]       = 0x0094,
145         [TXNLCR1]       = 0x00a0,
146         [TXALCR1]       = 0x00a0,
147         [RXNLCR1]       = 0x00a8,
148         [RXALCR1]       = 0x00ac,
149         [FWNLCR1]       = 0x00b0,
150         [FWALCR1]       = 0x00b4,
151 };
152
153 static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
154         [EDSR]          = 0x0000,
155         [EDMR]          = 0x0400,
156         [EDTRR]         = 0x0408,
157         [EDRRR]         = 0x0410,
158         [EESR]          = 0x0428,
159         [EESIPR]        = 0x0430,
160         [TDLAR]         = 0x0010,
161         [TDFAR]         = 0x0014,
162         [TDFXR]         = 0x0018,
163         [TDFFR]         = 0x001c,
164         [RDLAR]         = 0x0030,
165         [RDFAR]         = 0x0034,
166         [RDFXR]         = 0x0038,
167         [RDFFR]         = 0x003c,
168         [TRSCER]        = 0x0438,
169         [RMFCR]         = 0x0440,
170         [TFTR]          = 0x0448,
171         [FDR]           = 0x0450,
172         [RMCR]          = 0x0458,
173         [RPADIR]        = 0x0460,
174         [FCFTR]         = 0x0468,
175         [CSMR]          = 0x04E4,
176
177         [ECMR]          = 0x0500,
178         [RFLR]          = 0x0508,
179         [ECSR]          = 0x0510,
180         [ECSIPR]        = 0x0518,
181         [PIR]           = 0x0520,
182         [APR]           = 0x0554,
183         [MPR]           = 0x0558,
184         [PFTCR]         = 0x055c,
185         [PFRCR]         = 0x0560,
186         [TPAUSER]       = 0x0564,
187         [MAHR]          = 0x05c0,
188         [MALR]          = 0x05c8,
189         [CEFCR]         = 0x0740,
190         [FRECR]         = 0x0748,
191         [TSFRCR]        = 0x0750,
192         [TLFRCR]        = 0x0758,
193         [RFCR]          = 0x0760,
194         [MAFCR]         = 0x0778,
195
196         [ARSTR]         = 0x0000,
197         [TSU_CTRST]     = 0x0004,
198         [TSU_VTAG0]     = 0x0058,
199         [TSU_ADSBSY]    = 0x0060,
200         [TSU_TEN]       = 0x0064,
201         [TSU_ADRH0]     = 0x0100,
202         [TSU_ADRL0]     = 0x0104,
203         [TSU_ADRH31]    = 0x01f8,
204         [TSU_ADRL31]    = 0x01fc,
205
206         [TXNLCR0]       = 0x0080,
207         [TXALCR0]       = 0x0084,
208         [RXNLCR0]       = 0x0088,
209         [RXALCR0]       = 0x008C,
210 };
211
212 static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
213         [ECMR]          = 0x0300,
214         [RFLR]          = 0x0308,
215         [ECSR]          = 0x0310,
216         [ECSIPR]        = 0x0318,
217         [PIR]           = 0x0320,
218         [PSR]           = 0x0328,
219         [RDMLR]         = 0x0340,
220         [IPGR]          = 0x0350,
221         [APR]           = 0x0354,
222         [MPR]           = 0x0358,
223         [RFCF]          = 0x0360,
224         [TPAUSER]       = 0x0364,
225         [TPAUSECR]      = 0x0368,
226         [MAHR]          = 0x03c0,
227         [MALR]          = 0x03c8,
228         [TROCR]         = 0x03d0,
229         [CDCR]          = 0x03d4,
230         [LCCR]          = 0x03d8,
231         [CNDCR]         = 0x03dc,
232         [CEFCR]         = 0x03e4,
233         [FRECR]         = 0x03e8,
234         [TSFRCR]        = 0x03ec,
235         [TLFRCR]        = 0x03f0,
236         [RFCR]          = 0x03f4,
237         [MAFCR]         = 0x03f8,
238
239         [EDMR]          = 0x0200,
240         [EDTRR]         = 0x0208,
241         [EDRRR]         = 0x0210,
242         [TDLAR]         = 0x0218,
243         [RDLAR]         = 0x0220,
244         [EESR]          = 0x0228,
245         [EESIPR]        = 0x0230,
246         [TRSCER]        = 0x0238,
247         [RMFCR]         = 0x0240,
248         [TFTR]          = 0x0248,
249         [FDR]           = 0x0250,
250         [RMCR]          = 0x0258,
251         [TFUCR]         = 0x0264,
252         [RFOCR]         = 0x0268,
253         [RMIIMODE]      = 0x026c,
254         [FCFTR]         = 0x0270,
255         [TRIMD]         = 0x027c,
256 };
257
258 static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
259         [ECMR]          = 0x0100,
260         [RFLR]          = 0x0108,
261         [ECSR]          = 0x0110,
262         [ECSIPR]        = 0x0118,
263         [PIR]           = 0x0120,
264         [PSR]           = 0x0128,
265         [RDMLR]         = 0x0140,
266         [IPGR]          = 0x0150,
267         [APR]           = 0x0154,
268         [MPR]           = 0x0158,
269         [TPAUSER]       = 0x0164,
270         [RFCF]          = 0x0160,
271         [TPAUSECR]      = 0x0168,
272         [BCFRR]         = 0x016c,
273         [MAHR]          = 0x01c0,
274         [MALR]          = 0x01c8,
275         [TROCR]         = 0x01d0,
276         [CDCR]          = 0x01d4,
277         [LCCR]          = 0x01d8,
278         [CNDCR]         = 0x01dc,
279         [CEFCR]         = 0x01e4,
280         [FRECR]         = 0x01e8,
281         [TSFRCR]        = 0x01ec,
282         [TLFRCR]        = 0x01f0,
283         [RFCR]          = 0x01f4,
284         [MAFCR]         = 0x01f8,
285         [RTRATE]        = 0x01fc,
286
287         [EDMR]          = 0x0000,
288         [EDTRR]         = 0x0008,
289         [EDRRR]         = 0x0010,
290         [TDLAR]         = 0x0018,
291         [RDLAR]         = 0x0020,
292         [EESR]          = 0x0028,
293         [EESIPR]        = 0x0030,
294         [TRSCER]        = 0x0038,
295         [RMFCR]         = 0x0040,
296         [TFTR]          = 0x0048,
297         [FDR]           = 0x0050,
298         [RMCR]          = 0x0058,
299         [TFUCR]         = 0x0064,
300         [RFOCR]         = 0x0068,
301         [FCFTR]         = 0x0070,
302         [RPADIR]        = 0x0078,
303         [TRIMD]         = 0x007c,
304         [RBWAR]         = 0x00c8,
305         [RDFAR]         = 0x00cc,
306         [TBRAR]         = 0x00d4,
307         [TDFAR]         = 0x00d8,
308 };
309
310 static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
311         [EDMR]          = 0x0000,
312         [EDTRR]         = 0x0004,
313         [EDRRR]         = 0x0008,
314         [TDLAR]         = 0x000c,
315         [RDLAR]         = 0x0010,
316         [EESR]          = 0x0014,
317         [EESIPR]        = 0x0018,
318         [TRSCER]        = 0x001c,
319         [RMFCR]         = 0x0020,
320         [TFTR]          = 0x0024,
321         [FDR]           = 0x0028,
322         [RMCR]          = 0x002c,
323         [EDOCR]         = 0x0030,
324         [FCFTR]         = 0x0034,
325         [RPADIR]        = 0x0038,
326         [TRIMD]         = 0x003c,
327         [RBWAR]         = 0x0040,
328         [RDFAR]         = 0x0044,
329         [TBRAR]         = 0x004c,
330         [TDFAR]         = 0x0050,
331
332         [ECMR]          = 0x0160,
333         [ECSR]          = 0x0164,
334         [ECSIPR]        = 0x0168,
335         [PIR]           = 0x016c,
336         [MAHR]          = 0x0170,
337         [MALR]          = 0x0174,
338         [RFLR]          = 0x0178,
339         [PSR]           = 0x017c,
340         [TROCR]         = 0x0180,
341         [CDCR]          = 0x0184,
342         [LCCR]          = 0x0188,
343         [CNDCR]         = 0x018c,
344         [CEFCR]         = 0x0194,
345         [FRECR]         = 0x0198,
346         [TSFRCR]        = 0x019c,
347         [TLFRCR]        = 0x01a0,
348         [RFCR]          = 0x01a4,
349         [MAFCR]         = 0x01a8,
350         [IPGR]          = 0x01b4,
351         [APR]           = 0x01b8,
352         [MPR]           = 0x01bc,
353         [TPAUSER]       = 0x01c4,
354         [BCFR]          = 0x01cc,
355
356         [ARSTR]         = 0x0000,
357         [TSU_CTRST]     = 0x0004,
358         [TSU_FWEN0]     = 0x0010,
359         [TSU_FWEN1]     = 0x0014,
360         [TSU_FCM]       = 0x0018,
361         [TSU_BSYSL0]    = 0x0020,
362         [TSU_BSYSL1]    = 0x0024,
363         [TSU_PRISL0]    = 0x0028,
364         [TSU_PRISL1]    = 0x002c,
365         [TSU_FWSL0]     = 0x0030,
366         [TSU_FWSL1]     = 0x0034,
367         [TSU_FWSLC]     = 0x0038,
368         [TSU_QTAGM0]    = 0x0040,
369         [TSU_QTAGM1]    = 0x0044,
370         [TSU_ADQT0]     = 0x0048,
371         [TSU_ADQT1]     = 0x004c,
372         [TSU_FWSR]      = 0x0050,
373         [TSU_FWINMK]    = 0x0054,
374         [TSU_ADSBSY]    = 0x0060,
375         [TSU_TEN]       = 0x0064,
376         [TSU_POST1]     = 0x0070,
377         [TSU_POST2]     = 0x0074,
378         [TSU_POST3]     = 0x0078,
379         [TSU_POST4]     = 0x007c,
380
381         [TXNLCR0]       = 0x0080,
382         [TXALCR0]       = 0x0084,
383         [RXNLCR0]       = 0x0088,
384         [RXALCR0]       = 0x008c,
385         [FWNLCR0]       = 0x0090,
386         [FWALCR0]       = 0x0094,
387         [TXNLCR1]       = 0x00a0,
388         [TXALCR1]       = 0x00a0,
389         [RXNLCR1]       = 0x00a8,
390         [RXALCR1]       = 0x00ac,
391         [FWNLCR1]       = 0x00b0,
392         [FWALCR1]       = 0x00b4,
393
394         [TSU_ADRH0]     = 0x0100,
395         [TSU_ADRL0]     = 0x0104,
396         [TSU_ADRL31]    = 0x01fc,
397 };
398
399 static void sh_eth_rcv_snd_disable(struct net_device *ndev);
400 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
401
402 static bool sh_eth_is_gether(struct sh_eth_private *mdp)
403 {
404         return mdp->reg_offset == sh_eth_offset_gigabit;
405 }
406
407 static bool sh_eth_is_rz_fast_ether(struct sh_eth_private *mdp)
408 {
409         return mdp->reg_offset == sh_eth_offset_fast_rz;
410 }
411
412 static void sh_eth_select_mii(struct net_device *ndev)
413 {
414         u32 value = 0x0;
415         struct sh_eth_private *mdp = netdev_priv(ndev);
416
417         switch (mdp->phy_interface) {
418         case PHY_INTERFACE_MODE_GMII:
419                 value = 0x2;
420                 break;
421         case PHY_INTERFACE_MODE_MII:
422                 value = 0x1;
423                 break;
424         case PHY_INTERFACE_MODE_RMII:
425                 value = 0x0;
426                 break;
427         default:
428                 netdev_warn(ndev,
429                             "PHY interface mode was not setup. Set to MII.\n");
430                 value = 0x1;
431                 break;
432         }
433
434         sh_eth_write(ndev, value, RMII_MII);
435 }
436
437 static void sh_eth_set_duplex(struct net_device *ndev)
438 {
439         struct sh_eth_private *mdp = netdev_priv(ndev);
440
441         if (mdp->duplex) /* Full */
442                 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
443         else            /* Half */
444                 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
445 }
446
447 /* There is CPU dependent code */
448 static void sh_eth_set_rate_r8a777x(struct net_device *ndev)
449 {
450         struct sh_eth_private *mdp = netdev_priv(ndev);
451
452         switch (mdp->speed) {
453         case 10: /* 10BASE */
454                 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR);
455                 break;
456         case 100:/* 100BASE */
457                 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR);
458                 break;
459         default:
460                 break;
461         }
462 }
463
464 /* R8A7778/9 */
465 static struct sh_eth_cpu_data r8a777x_data = {
466         .set_duplex     = sh_eth_set_duplex,
467         .set_rate       = sh_eth_set_rate_r8a777x,
468
469         .register_type  = SH_ETH_REG_FAST_RCAR,
470
471         .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
472         .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
473         .eesipr_value   = 0x01ff009f,
474
475         .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
476         .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
477                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
478                           EESR_ECI,
479         .fdr_value      = 0x00000f0f,
480
481         .apr            = 1,
482         .mpr            = 1,
483         .tpauser        = 1,
484         .hw_swap        = 1,
485 };
486
487 /* R8A7790/1 */
488 static struct sh_eth_cpu_data r8a779x_data = {
489         .set_duplex     = sh_eth_set_duplex,
490         .set_rate       = sh_eth_set_rate_r8a777x,
491
492         .register_type  = SH_ETH_REG_FAST_RCAR,
493
494         .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
495         .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
496         .eesipr_value   = 0x01ff009f,
497
498         .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
499         .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
500                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
501                           EESR_ECI,
502         .fdr_value      = 0x00000f0f,
503
504         .trscer_err_mask = DESC_I_RINT8,
505
506         .apr            = 1,
507         .mpr            = 1,
508         .tpauser        = 1,
509         .hw_swap        = 1,
510         .rmiimode       = 1,
511         .shift_rd0      = 1,
512 };
513
514 static void sh_eth_set_rate_sh7724(struct net_device *ndev)
515 {
516         struct sh_eth_private *mdp = netdev_priv(ndev);
517
518         switch (mdp->speed) {
519         case 10: /* 10BASE */
520                 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
521                 break;
522         case 100:/* 100BASE */
523                 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
524                 break;
525         default:
526                 break;
527         }
528 }
529
530 /* SH7724 */
531 static struct sh_eth_cpu_data sh7724_data = {
532         .set_duplex     = sh_eth_set_duplex,
533         .set_rate       = sh_eth_set_rate_sh7724,
534
535         .register_type  = SH_ETH_REG_FAST_SH4,
536
537         .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
538         .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
539         .eesipr_value   = 0x01ff009f,
540
541         .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
542         .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
543                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
544                           EESR_ECI,
545
546         .apr            = 1,
547         .mpr            = 1,
548         .tpauser        = 1,
549         .hw_swap        = 1,
550         .rpadir         = 1,
551         .rpadir_value   = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
552 };
553
554 static void sh_eth_set_rate_sh7757(struct net_device *ndev)
555 {
556         struct sh_eth_private *mdp = netdev_priv(ndev);
557
558         switch (mdp->speed) {
559         case 10: /* 10BASE */
560                 sh_eth_write(ndev, 0, RTRATE);
561                 break;
562         case 100:/* 100BASE */
563                 sh_eth_write(ndev, 1, RTRATE);
564                 break;
565         default:
566                 break;
567         }
568 }
569
570 /* SH7757 */
571 static struct sh_eth_cpu_data sh7757_data = {
572         .set_duplex     = sh_eth_set_duplex,
573         .set_rate       = sh_eth_set_rate_sh7757,
574
575         .register_type  = SH_ETH_REG_FAST_SH4,
576
577         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
578
579         .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
580         .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
581                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
582                           EESR_ECI,
583
584         .irq_flags      = IRQF_SHARED,
585         .apr            = 1,
586         .mpr            = 1,
587         .tpauser        = 1,
588         .hw_swap        = 1,
589         .no_ade         = 1,
590         .rpadir         = 1,
591         .rpadir_value   = 2 << 16,
592 };
593
594 #define SH_GIGA_ETH_BASE        0xfee00000UL
595 #define GIGA_MALR(port)         (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
596 #define GIGA_MAHR(port)         (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
597 static void sh_eth_chip_reset_giga(struct net_device *ndev)
598 {
599         int i;
600         unsigned long mahr[2], malr[2];
601
602         /* save MAHR and MALR */
603         for (i = 0; i < 2; i++) {
604                 malr[i] = ioread32((void *)GIGA_MALR(i));
605                 mahr[i] = ioread32((void *)GIGA_MAHR(i));
606         }
607
608         /* reset device */
609         iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
610         mdelay(1);
611
612         /* restore MAHR and MALR */
613         for (i = 0; i < 2; i++) {
614                 iowrite32(malr[i], (void *)GIGA_MALR(i));
615                 iowrite32(mahr[i], (void *)GIGA_MAHR(i));
616         }
617 }
618
619 static void sh_eth_set_rate_giga(struct net_device *ndev)
620 {
621         struct sh_eth_private *mdp = netdev_priv(ndev);
622
623         switch (mdp->speed) {
624         case 10: /* 10BASE */
625                 sh_eth_write(ndev, 0x00000000, GECMR);
626                 break;
627         case 100:/* 100BASE */
628                 sh_eth_write(ndev, 0x00000010, GECMR);
629                 break;
630         case 1000: /* 1000BASE */
631                 sh_eth_write(ndev, 0x00000020, GECMR);
632                 break;
633         default:
634                 break;
635         }
636 }
637
638 /* SH7757(GETHERC) */
639 static struct sh_eth_cpu_data sh7757_data_giga = {
640         .chip_reset     = sh_eth_chip_reset_giga,
641         .set_duplex     = sh_eth_set_duplex,
642         .set_rate       = sh_eth_set_rate_giga,
643
644         .register_type  = SH_ETH_REG_GIGABIT,
645
646         .ecsr_value     = ECSR_ICD | ECSR_MPD,
647         .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
648         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
649
650         .tx_check       = EESR_TC1 | EESR_FTC,
651         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
652                           EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
653                           EESR_TDE | EESR_ECI,
654         .fdr_value      = 0x0000072f,
655
656         .irq_flags      = IRQF_SHARED,
657         .apr            = 1,
658         .mpr            = 1,
659         .tpauser        = 1,
660         .bculr          = 1,
661         .hw_swap        = 1,
662         .rpadir         = 1,
663         .rpadir_value   = 2 << 16,
664         .no_trimd       = 1,
665         .no_ade         = 1,
666         .tsu            = 1,
667 };
668
669 static void sh_eth_chip_reset(struct net_device *ndev)
670 {
671         struct sh_eth_private *mdp = netdev_priv(ndev);
672
673         /* reset device */
674         sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
675         mdelay(1);
676 }
677
678 static void sh_eth_set_rate_gether(struct net_device *ndev)
679 {
680         struct sh_eth_private *mdp = netdev_priv(ndev);
681
682         switch (mdp->speed) {
683         case 10: /* 10BASE */
684                 sh_eth_write(ndev, GECMR_10, GECMR);
685                 break;
686         case 100:/* 100BASE */
687                 sh_eth_write(ndev, GECMR_100, GECMR);
688                 break;
689         case 1000: /* 1000BASE */
690                 sh_eth_write(ndev, GECMR_1000, GECMR);
691                 break;
692         default:
693                 break;
694         }
695 }
696
697 /* SH7734 */
698 static struct sh_eth_cpu_data sh7734_data = {
699         .chip_reset     = sh_eth_chip_reset,
700         .set_duplex     = sh_eth_set_duplex,
701         .set_rate       = sh_eth_set_rate_gether,
702
703         .register_type  = SH_ETH_REG_GIGABIT,
704
705         .ecsr_value     = ECSR_ICD | ECSR_MPD,
706         .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
707         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
708
709         .tx_check       = EESR_TC1 | EESR_FTC,
710         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
711                           EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
712                           EESR_TDE | EESR_ECI,
713
714         .apr            = 1,
715         .mpr            = 1,
716         .tpauser        = 1,
717         .bculr          = 1,
718         .hw_swap        = 1,
719         .no_trimd       = 1,
720         .no_ade         = 1,
721         .tsu            = 1,
722         .hw_crc         = 1,
723         .select_mii     = 1,
724 };
725
726 /* SH7763 */
727 static struct sh_eth_cpu_data sh7763_data = {
728         .chip_reset     = sh_eth_chip_reset,
729         .set_duplex     = sh_eth_set_duplex,
730         .set_rate       = sh_eth_set_rate_gether,
731
732         .register_type  = SH_ETH_REG_GIGABIT,
733
734         .ecsr_value     = ECSR_ICD | ECSR_MPD,
735         .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
736         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
737
738         .tx_check       = EESR_TC1 | EESR_FTC,
739         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
740                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
741                           EESR_ECI,
742
743         .apr            = 1,
744         .mpr            = 1,
745         .tpauser        = 1,
746         .bculr          = 1,
747         .hw_swap        = 1,
748         .no_trimd       = 1,
749         .no_ade         = 1,
750         .tsu            = 1,
751         .irq_flags      = IRQF_SHARED,
752 };
753
754 static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
755 {
756         struct sh_eth_private *mdp = netdev_priv(ndev);
757
758         /* reset device */
759         sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
760         mdelay(1);
761
762         sh_eth_select_mii(ndev);
763 }
764
765 /* R8A7740 */
766 static struct sh_eth_cpu_data r8a7740_data = {
767         .chip_reset     = sh_eth_chip_reset_r8a7740,
768         .set_duplex     = sh_eth_set_duplex,
769         .set_rate       = sh_eth_set_rate_gether,
770
771         .register_type  = SH_ETH_REG_GIGABIT,
772
773         .ecsr_value     = ECSR_ICD | ECSR_MPD,
774         .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
775         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
776
777         .tx_check       = EESR_TC1 | EESR_FTC,
778         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
779                           EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
780                           EESR_TDE | EESR_ECI,
781         .fdr_value      = 0x0000070f,
782
783         .apr            = 1,
784         .mpr            = 1,
785         .tpauser        = 1,
786         .bculr          = 1,
787         .hw_swap        = 1,
788         .rpadir         = 1,
789         .rpadir_value   = 2 << 16,
790         .no_trimd       = 1,
791         .no_ade         = 1,
792         .tsu            = 1,
793         .select_mii     = 1,
794         .shift_rd0      = 1,
795 };
796
797 /* R7S72100 */
798 static struct sh_eth_cpu_data r7s72100_data = {
799         .chip_reset     = sh_eth_chip_reset,
800         .set_duplex     = sh_eth_set_duplex,
801
802         .register_type  = SH_ETH_REG_FAST_RZ,
803
804         .ecsr_value     = ECSR_ICD,
805         .ecsipr_value   = ECSIPR_ICDIP,
806         .eesipr_value   = 0xff7f009f,
807
808         .tx_check       = EESR_TC1 | EESR_FTC,
809         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
810                           EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
811                           EESR_TDE | EESR_ECI,
812         .fdr_value      = 0x0000070f,
813
814         .no_psr         = 1,
815         .apr            = 1,
816         .mpr            = 1,
817         .tpauser        = 1,
818         .hw_swap        = 1,
819         .rpadir         = 1,
820         .rpadir_value   = 2 << 16,
821         .no_trimd       = 1,
822         .no_ade         = 1,
823         .hw_crc         = 1,
824         .tsu            = 1,
825         .shift_rd0      = 1,
826 };
827
828 static struct sh_eth_cpu_data sh7619_data = {
829         .register_type  = SH_ETH_REG_FAST_SH3_SH2,
830
831         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
832
833         .apr            = 1,
834         .mpr            = 1,
835         .tpauser        = 1,
836         .hw_swap        = 1,
837 };
838
839 static struct sh_eth_cpu_data sh771x_data = {
840         .register_type  = SH_ETH_REG_FAST_SH3_SH2,
841
842         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
843         .tsu            = 1,
844 };
845
846 static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
847 {
848         if (!cd->ecsr_value)
849                 cd->ecsr_value = DEFAULT_ECSR_INIT;
850
851         if (!cd->ecsipr_value)
852                 cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
853
854         if (!cd->fcftr_value)
855                 cd->fcftr_value = DEFAULT_FIFO_F_D_RFF |
856                                   DEFAULT_FIFO_F_D_RFD;
857
858         if (!cd->fdr_value)
859                 cd->fdr_value = DEFAULT_FDR_INIT;
860
861         if (!cd->tx_check)
862                 cd->tx_check = DEFAULT_TX_CHECK;
863
864         if (!cd->eesr_err_check)
865                 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
866
867         if (!cd->trscer_err_mask)
868                 cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK;
869 }
870
871 static int sh_eth_check_reset(struct net_device *ndev)
872 {
873         int ret = 0;
874         int cnt = 100;
875
876         while (cnt > 0) {
877                 if (!(sh_eth_read(ndev, EDMR) & 0x3))
878                         break;
879                 mdelay(1);
880                 cnt--;
881         }
882         if (cnt <= 0) {
883                 netdev_err(ndev, "Device reset failed\n");
884                 ret = -ETIMEDOUT;
885         }
886         return ret;
887 }
888
889 static int sh_eth_reset(struct net_device *ndev)
890 {
891         struct sh_eth_private *mdp = netdev_priv(ndev);
892         int ret = 0;
893
894         if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) {
895                 sh_eth_write(ndev, EDSR_ENALL, EDSR);
896                 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
897                              EDMR);
898
899                 ret = sh_eth_check_reset(ndev);
900                 if (ret)
901                         return ret;
902
903                 /* Table Init */
904                 sh_eth_write(ndev, 0x0, TDLAR);
905                 sh_eth_write(ndev, 0x0, TDFAR);
906                 sh_eth_write(ndev, 0x0, TDFXR);
907                 sh_eth_write(ndev, 0x0, TDFFR);
908                 sh_eth_write(ndev, 0x0, RDLAR);
909                 sh_eth_write(ndev, 0x0, RDFAR);
910                 sh_eth_write(ndev, 0x0, RDFXR);
911                 sh_eth_write(ndev, 0x0, RDFFR);
912
913                 /* Reset HW CRC register */
914                 if (mdp->cd->hw_crc)
915                         sh_eth_write(ndev, 0x0, CSMR);
916
917                 /* Select MII mode */
918                 if (mdp->cd->select_mii)
919                         sh_eth_select_mii(ndev);
920         } else {
921                 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
922                              EDMR);
923                 mdelay(3);
924                 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
925                              EDMR);
926         }
927
928         return ret;
929 }
930
931 static void sh_eth_set_receive_align(struct sk_buff *skb)
932 {
933         uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1);
934
935         if (reserve)
936                 skb_reserve(skb, SH_ETH_RX_ALIGN - reserve);
937 }
938
939
940 /* CPU <-> EDMAC endian convert */
941 static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
942 {
943         switch (mdp->edmac_endian) {
944         case EDMAC_LITTLE_ENDIAN:
945                 return cpu_to_le32(x);
946         case EDMAC_BIG_ENDIAN:
947                 return cpu_to_be32(x);
948         }
949         return x;
950 }
951
952 static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
953 {
954         switch (mdp->edmac_endian) {
955         case EDMAC_LITTLE_ENDIAN:
956                 return le32_to_cpu(x);
957         case EDMAC_BIG_ENDIAN:
958                 return be32_to_cpu(x);
959         }
960         return x;
961 }
962
963 /* Program the hardware MAC address from dev->dev_addr. */
964 static void update_mac_address(struct net_device *ndev)
965 {
966         sh_eth_write(ndev,
967                      (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
968                      (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
969         sh_eth_write(ndev,
970                      (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
971 }
972
973 /* Get MAC address from SuperH MAC address register
974  *
975  * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
976  * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
977  * When you want use this device, you must set MAC address in bootloader.
978  *
979  */
980 static void read_mac_address(struct net_device *ndev, unsigned char *mac)
981 {
982         if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
983                 memcpy(ndev->dev_addr, mac, ETH_ALEN);
984         } else {
985                 ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
986                 ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
987                 ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
988                 ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
989                 ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
990                 ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
991         }
992 }
993
994 static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
995 {
996         if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp))
997                 return EDTRR_TRNS_GETHER;
998         else
999                 return EDTRR_TRNS_ETHER;
1000 }
1001
1002 struct bb_info {
1003         void (*set_gate)(void *addr);
1004         struct mdiobb_ctrl ctrl;
1005         void *addr;
1006         u32 mmd_msk;/* MMD */
1007         u32 mdo_msk;
1008         u32 mdi_msk;
1009         u32 mdc_msk;
1010 };
1011
1012 /* PHY bit set */
1013 static void bb_set(void *addr, u32 msk)
1014 {
1015         iowrite32(ioread32(addr) | msk, addr);
1016 }
1017
1018 /* PHY bit clear */
1019 static void bb_clr(void *addr, u32 msk)
1020 {
1021         iowrite32((ioread32(addr) & ~msk), addr);
1022 }
1023
1024 /* PHY bit read */
1025 static int bb_read(void *addr, u32 msk)
1026 {
1027         return (ioread32(addr) & msk) != 0;
1028 }
1029
1030 /* Data I/O pin control */
1031 static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1032 {
1033         struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1034
1035         if (bitbang->set_gate)
1036                 bitbang->set_gate(bitbang->addr);
1037
1038         if (bit)
1039                 bb_set(bitbang->addr, bitbang->mmd_msk);
1040         else
1041                 bb_clr(bitbang->addr, bitbang->mmd_msk);
1042 }
1043
1044 /* Set bit data*/
1045 static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
1046 {
1047         struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1048
1049         if (bitbang->set_gate)
1050                 bitbang->set_gate(bitbang->addr);
1051
1052         if (bit)
1053                 bb_set(bitbang->addr, bitbang->mdo_msk);
1054         else
1055                 bb_clr(bitbang->addr, bitbang->mdo_msk);
1056 }
1057
1058 /* Get bit data*/
1059 static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
1060 {
1061         struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1062
1063         if (bitbang->set_gate)
1064                 bitbang->set_gate(bitbang->addr);
1065
1066         return bb_read(bitbang->addr, bitbang->mdi_msk);
1067 }
1068
1069 /* MDC pin control */
1070 static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1071 {
1072         struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1073
1074         if (bitbang->set_gate)
1075                 bitbang->set_gate(bitbang->addr);
1076
1077         if (bit)
1078                 bb_set(bitbang->addr, bitbang->mdc_msk);
1079         else
1080                 bb_clr(bitbang->addr, bitbang->mdc_msk);
1081 }
1082
1083 /* mdio bus control struct */
1084 static struct mdiobb_ops bb_ops = {
1085         .owner = THIS_MODULE,
1086         .set_mdc = sh_mdc_ctrl,
1087         .set_mdio_dir = sh_mmd_ctrl,
1088         .set_mdio_data = sh_set_mdio,
1089         .get_mdio_data = sh_get_mdio,
1090 };
1091
1092 /* free skb and descriptor buffer */
1093 static void sh_eth_ring_free(struct net_device *ndev)
1094 {
1095         struct sh_eth_private *mdp = netdev_priv(ndev);
1096         int i;
1097
1098         /* Free Rx skb ringbuffer */
1099         if (mdp->rx_skbuff) {
1100                 for (i = 0; i < mdp->num_rx_ring; i++)
1101                         dev_kfree_skb(mdp->rx_skbuff[i]);
1102         }
1103         kfree(mdp->rx_skbuff);
1104         mdp->rx_skbuff = NULL;
1105
1106         /* Free Tx skb ringbuffer */
1107         if (mdp->tx_skbuff) {
1108                 for (i = 0; i < mdp->num_tx_ring; i++)
1109                         dev_kfree_skb(mdp->tx_skbuff[i]);
1110         }
1111         kfree(mdp->tx_skbuff);
1112         mdp->tx_skbuff = NULL;
1113 }
1114
1115 /* format skb and descriptor buffer */
1116 static void sh_eth_ring_format(struct net_device *ndev)
1117 {
1118         struct sh_eth_private *mdp = netdev_priv(ndev);
1119         int i;
1120         struct sk_buff *skb;
1121         struct sh_eth_rxdesc *rxdesc = NULL;
1122         struct sh_eth_txdesc *txdesc = NULL;
1123         int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1124         int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
1125         int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
1126         dma_addr_t dma_addr;
1127
1128         mdp->cur_rx = 0;
1129         mdp->cur_tx = 0;
1130         mdp->dirty_rx = 0;
1131         mdp->dirty_tx = 0;
1132
1133         memset(mdp->rx_ring, 0, rx_ringsize);
1134
1135         /* build Rx ring buffer */
1136         for (i = 0; i < mdp->num_rx_ring; i++) {
1137                 /* skb */
1138                 mdp->rx_skbuff[i] = NULL;
1139                 skb = netdev_alloc_skb(ndev, skbuff_size);
1140                 if (skb == NULL)
1141                         break;
1142                 sh_eth_set_receive_align(skb);
1143
1144                 /* RX descriptor */
1145                 rxdesc = &mdp->rx_ring[i];
1146                 /* The size of the buffer is a multiple of 16 bytes. */
1147                 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1148                 dma_addr = dma_map_single(&ndev->dev, skb->data,
1149                                           rxdesc->buffer_length,
1150                                           DMA_FROM_DEVICE);
1151                 if (dma_mapping_error(&ndev->dev, dma_addr)) {
1152                         kfree_skb(skb);
1153                         break;
1154                 }
1155                 mdp->rx_skbuff[i] = skb;
1156                 rxdesc->addr = dma_addr;
1157                 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1158
1159                 /* Rx descriptor address set */
1160                 if (i == 0) {
1161                         sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
1162                         if (sh_eth_is_gether(mdp) ||
1163                             sh_eth_is_rz_fast_ether(mdp))
1164                                 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
1165                 }
1166         }
1167
1168         mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
1169
1170         /* Mark the last entry as wrapping the ring. */
1171         rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
1172
1173         memset(mdp->tx_ring, 0, tx_ringsize);
1174
1175         /* build Tx ring buffer */
1176         for (i = 0; i < mdp->num_tx_ring; i++) {
1177                 mdp->tx_skbuff[i] = NULL;
1178                 txdesc = &mdp->tx_ring[i];
1179                 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1180                 txdesc->buffer_length = 0;
1181                 if (i == 0) {
1182                         /* Tx descriptor address set */
1183                         sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
1184                         if (sh_eth_is_gether(mdp) ||
1185                             sh_eth_is_rz_fast_ether(mdp))
1186                                 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
1187                 }
1188         }
1189
1190         txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1191 }
1192
1193 /* Get skb and descriptor buffer */
1194 static int sh_eth_ring_init(struct net_device *ndev)
1195 {
1196         struct sh_eth_private *mdp = netdev_priv(ndev);
1197         int rx_ringsize, tx_ringsize, ret = 0;
1198
1199         /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
1200          * card needs room to do 8 byte alignment, +2 so we can reserve
1201          * the first 2 bytes, and +16 gets room for the status word from the
1202          * card.
1203          */
1204         mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
1205                           (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
1206         if (mdp->cd->rpadir)
1207                 mdp->rx_buf_sz += NET_IP_ALIGN;
1208
1209         /* Allocate RX and TX skb rings */
1210         mdp->rx_skbuff = kmalloc_array(mdp->num_rx_ring,
1211                                        sizeof(*mdp->rx_skbuff), GFP_KERNEL);
1212         if (!mdp->rx_skbuff) {
1213                 ret = -ENOMEM;
1214                 return ret;
1215         }
1216
1217         mdp->tx_skbuff = kmalloc_array(mdp->num_tx_ring,
1218                                        sizeof(*mdp->tx_skbuff), GFP_KERNEL);
1219         if (!mdp->tx_skbuff) {
1220                 ret = -ENOMEM;
1221                 goto skb_ring_free;
1222         }
1223
1224         /* Allocate all Rx descriptors. */
1225         rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1226         mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
1227                                           GFP_KERNEL);
1228         if (!mdp->rx_ring) {
1229                 ret = -ENOMEM;
1230                 goto desc_ring_free;
1231         }
1232
1233         mdp->dirty_rx = 0;
1234
1235         /* Allocate all Tx descriptors. */
1236         tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1237         mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
1238                                           GFP_KERNEL);
1239         if (!mdp->tx_ring) {
1240                 ret = -ENOMEM;
1241                 goto desc_ring_free;
1242         }
1243         return ret;
1244
1245 desc_ring_free:
1246         /* free DMA buffer */
1247         dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
1248
1249 skb_ring_free:
1250         /* Free Rx and Tx skb ring buffer */
1251         sh_eth_ring_free(ndev);
1252         mdp->tx_ring = NULL;
1253         mdp->rx_ring = NULL;
1254
1255         return ret;
1256 }
1257
1258 static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp)
1259 {
1260         int ringsize;
1261
1262         if (mdp->rx_ring) {
1263                 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1264                 dma_free_coherent(NULL, ringsize, mdp->rx_ring,
1265                                   mdp->rx_desc_dma);
1266                 mdp->rx_ring = NULL;
1267         }
1268
1269         if (mdp->tx_ring) {
1270                 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1271                 dma_free_coherent(NULL, ringsize, mdp->tx_ring,
1272                                   mdp->tx_desc_dma);
1273                 mdp->tx_ring = NULL;
1274         }
1275 }
1276
1277 static int sh_eth_dev_init(struct net_device *ndev, bool start)
1278 {
1279         int ret = 0;
1280         struct sh_eth_private *mdp = netdev_priv(ndev);
1281         u32 val;
1282
1283         /* Soft Reset */
1284         ret = sh_eth_reset(ndev);
1285         if (ret)
1286                 return ret;
1287
1288         if (mdp->cd->rmiimode)
1289                 sh_eth_write(ndev, 0x1, RMIIMODE);
1290
1291         /* Descriptor format */
1292         sh_eth_ring_format(ndev);
1293         if (mdp->cd->rpadir)
1294                 sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
1295
1296         /* all sh_eth int mask */
1297         sh_eth_write(ndev, 0, EESIPR);
1298
1299 #if defined(__LITTLE_ENDIAN)
1300         if (mdp->cd->hw_swap)
1301                 sh_eth_write(ndev, EDMR_EL, EDMR);
1302         else
1303 #endif
1304                 sh_eth_write(ndev, 0, EDMR);
1305
1306         /* FIFO size set */
1307         sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
1308         sh_eth_write(ndev, 0, TFTR);
1309
1310         /* Frame recv control (enable multiple-packets per rx irq) */
1311         sh_eth_write(ndev, RMCR_RNC, RMCR);
1312
1313         sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER);
1314
1315         if (mdp->cd->bculr)
1316                 sh_eth_write(ndev, 0x800, BCULR);       /* Burst sycle set */
1317
1318         sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
1319
1320         if (!mdp->cd->no_trimd)
1321                 sh_eth_write(ndev, 0, TRIMD);
1322
1323         /* Recv frame limit set register */
1324         sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
1325                      RFLR);
1326
1327         sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
1328         if (start) {
1329                 mdp->irq_enabled = true;
1330                 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1331         }
1332
1333         /* PAUSE Prohibition */
1334         val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
1335                 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
1336
1337         sh_eth_write(ndev, val, ECMR);
1338
1339         if (mdp->cd->set_rate)
1340                 mdp->cd->set_rate(ndev);
1341
1342         /* E-MAC Status Register clear */
1343         sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
1344
1345         /* E-MAC Interrupt Enable register */
1346         if (start)
1347                 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
1348
1349         /* Set MAC address */
1350         update_mac_address(ndev);
1351
1352         /* mask reset */
1353         if (mdp->cd->apr)
1354                 sh_eth_write(ndev, APR_AP, APR);
1355         if (mdp->cd->mpr)
1356                 sh_eth_write(ndev, MPR_MP, MPR);
1357         if (mdp->cd->tpauser)
1358                 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
1359
1360         if (start) {
1361                 /* Setting the Rx mode will start the Rx process. */
1362                 sh_eth_write(ndev, EDRRR_R, EDRRR);
1363
1364                 netif_start_queue(ndev);
1365         }
1366
1367         return ret;
1368 }
1369
1370 static void sh_eth_dev_exit(struct net_device *ndev)
1371 {
1372         struct sh_eth_private *mdp = netdev_priv(ndev);
1373         int i;
1374
1375         /* Deactivate all TX descriptors, so DMA should stop at next
1376          * packet boundary if it's currently running
1377          */
1378         for (i = 0; i < mdp->num_tx_ring; i++)
1379                 mdp->tx_ring[i].status &= ~cpu_to_edmac(mdp, TD_TACT);
1380
1381         /* Disable TX FIFO egress to MAC */
1382         sh_eth_rcv_snd_disable(ndev);
1383
1384         /* Stop RX DMA at next packet boundary */
1385         sh_eth_write(ndev, 0, EDRRR);
1386
1387         /* Aside from TX DMA, we can't tell when the hardware is
1388          * really stopped, so we need to reset to make sure.
1389          * Before doing that, wait for long enough to *probably*
1390          * finish transmitting the last packet and poll stats.
1391          */
1392         msleep(2); /* max frame time at 10 Mbps < 1250 us */
1393         sh_eth_get_stats(ndev);
1394         sh_eth_reset(ndev);
1395 }
1396
1397 /* free Tx skb function */
1398 static int sh_eth_txfree(struct net_device *ndev)
1399 {
1400         struct sh_eth_private *mdp = netdev_priv(ndev);
1401         struct sh_eth_txdesc *txdesc;
1402         int free_num = 0;
1403         int entry = 0;
1404
1405         for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1406                 entry = mdp->dirty_tx % mdp->num_tx_ring;
1407                 txdesc = &mdp->tx_ring[entry];
1408                 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
1409                         break;
1410                 /* Free the original skb. */
1411                 if (mdp->tx_skbuff[entry]) {
1412                         dma_unmap_single(&ndev->dev, txdesc->addr,
1413                                          txdesc->buffer_length, DMA_TO_DEVICE);
1414                         dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1415                         mdp->tx_skbuff[entry] = NULL;
1416                         free_num++;
1417                 }
1418                 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1419                 if (entry >= mdp->num_tx_ring - 1)
1420                         txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1421
1422                 ndev->stats.tx_packets++;
1423                 ndev->stats.tx_bytes += txdesc->buffer_length;
1424         }
1425         return free_num;
1426 }
1427
1428 /* Packet receive function */
1429 static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1430 {
1431         struct sh_eth_private *mdp = netdev_priv(ndev);
1432         struct sh_eth_rxdesc *rxdesc;
1433
1434         int entry = mdp->cur_rx % mdp->num_rx_ring;
1435         int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
1436         int limit;
1437         struct sk_buff *skb;
1438         u16 pkt_len = 0;
1439         u32 desc_status;
1440         int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
1441         dma_addr_t dma_addr;
1442
1443         boguscnt = min(boguscnt, *quota);
1444         limit = boguscnt;
1445         rxdesc = &mdp->rx_ring[entry];
1446         while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
1447                 desc_status = edmac_to_cpu(mdp, rxdesc->status);
1448                 pkt_len = rxdesc->frame_length;
1449
1450                 if (--boguscnt < 0)
1451                         break;
1452
1453                 if (!(desc_status & RDFEND))
1454                         ndev->stats.rx_length_errors++;
1455
1456                 /* In case of almost all GETHER/ETHERs, the Receive Frame State
1457                  * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
1458                  * bit 0. However, in case of the R8A7740, R8A779x, and
1459                  * R7S72100 the RFS bits are from bit 25 to bit 16. So, the
1460                  * driver needs right shifting by 16.
1461                  */
1462                 if (mdp->cd->shift_rd0)
1463                         desc_status >>= 16;
1464
1465                 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1466                                    RD_RFS5 | RD_RFS6 | RD_RFS10)) {
1467                         ndev->stats.rx_errors++;
1468                         if (desc_status & RD_RFS1)
1469                                 ndev->stats.rx_crc_errors++;
1470                         if (desc_status & RD_RFS2)
1471                                 ndev->stats.rx_frame_errors++;
1472                         if (desc_status & RD_RFS3)
1473                                 ndev->stats.rx_length_errors++;
1474                         if (desc_status & RD_RFS4)
1475                                 ndev->stats.rx_length_errors++;
1476                         if (desc_status & RD_RFS6)
1477                                 ndev->stats.rx_missed_errors++;
1478                         if (desc_status & RD_RFS10)
1479                                 ndev->stats.rx_over_errors++;
1480                 } else {
1481                         if (!mdp->cd->hw_swap)
1482                                 sh_eth_soft_swap(
1483                                         phys_to_virt(ALIGN(rxdesc->addr, 4)),
1484                                         pkt_len + 2);
1485                         skb = mdp->rx_skbuff[entry];
1486                         mdp->rx_skbuff[entry] = NULL;
1487                         if (mdp->cd->rpadir)
1488                                 skb_reserve(skb, NET_IP_ALIGN);
1489                         dma_unmap_single(&ndev->dev, rxdesc->addr,
1490                                          ALIGN(mdp->rx_buf_sz, 16),
1491                                          DMA_FROM_DEVICE);
1492                         skb_put(skb, pkt_len);
1493                         skb->protocol = eth_type_trans(skb, ndev);
1494                         netif_receive_skb(skb);
1495                         ndev->stats.rx_packets++;
1496                         ndev->stats.rx_bytes += pkt_len;
1497                 }
1498                 entry = (++mdp->cur_rx) % mdp->num_rx_ring;
1499                 rxdesc = &mdp->rx_ring[entry];
1500         }
1501
1502         /* Refill the Rx ring buffers. */
1503         for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
1504                 entry = mdp->dirty_rx % mdp->num_rx_ring;
1505                 rxdesc = &mdp->rx_ring[entry];
1506                 /* The size of the buffer is 16 byte boundary. */
1507                 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1508
1509                 if (mdp->rx_skbuff[entry] == NULL) {
1510                         skb = netdev_alloc_skb(ndev, skbuff_size);
1511                         if (skb == NULL)
1512                                 break;  /* Better luck next round. */
1513                         sh_eth_set_receive_align(skb);
1514                         dma_addr = dma_map_single(&ndev->dev, skb->data,
1515                                                   rxdesc->buffer_length,
1516                                                   DMA_FROM_DEVICE);
1517                         if (dma_mapping_error(&ndev->dev, dma_addr)) {
1518                                 kfree_skb(skb);
1519                                 break;
1520                         }
1521                         mdp->rx_skbuff[entry] = skb;
1522
1523                         skb_checksum_none_assert(skb);
1524                         rxdesc->addr = dma_addr;
1525                 }
1526                 if (entry >= mdp->num_rx_ring - 1)
1527                         rxdesc->status |=
1528                                 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
1529                 else
1530                         rxdesc->status |=
1531                                 cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1532         }
1533
1534         /* Restart Rx engine if stopped. */
1535         /* If we don't need to check status, don't. -KDU */
1536         if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
1537                 /* fix the values for the next receiving if RDE is set */
1538                 if (intr_status & EESR_RDE) {
1539                         u32 count = (sh_eth_read(ndev, RDFAR) -
1540                                      sh_eth_read(ndev, RDLAR)) >> 4;
1541
1542                         mdp->cur_rx = count;
1543                         mdp->dirty_rx = count;
1544                 }
1545                 sh_eth_write(ndev, EDRRR_R, EDRRR);
1546         }
1547
1548         *quota -= limit - boguscnt - 1;
1549
1550         return *quota <= 0;
1551 }
1552
1553 static void sh_eth_rcv_snd_disable(struct net_device *ndev)
1554 {
1555         /* disable tx and rx */
1556         sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
1557                 ~(ECMR_RE | ECMR_TE), ECMR);
1558 }
1559
1560 static void sh_eth_rcv_snd_enable(struct net_device *ndev)
1561 {
1562         /* enable tx and rx */
1563         sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
1564                 (ECMR_RE | ECMR_TE), ECMR);
1565 }
1566
1567 /* error control function */
1568 static void sh_eth_error(struct net_device *ndev, int intr_status)
1569 {
1570         struct sh_eth_private *mdp = netdev_priv(ndev);
1571         u32 felic_stat;
1572         u32 link_stat;
1573         u32 mask;
1574
1575         if (intr_status & EESR_ECI) {
1576                 felic_stat = sh_eth_read(ndev, ECSR);
1577                 sh_eth_write(ndev, felic_stat, ECSR);   /* clear int */
1578                 if (felic_stat & ECSR_ICD)
1579                         ndev->stats.tx_carrier_errors++;
1580                 if (felic_stat & ECSR_LCHNG) {
1581                         /* Link Changed */
1582                         if (mdp->cd->no_psr || mdp->no_ether_link) {
1583                                 goto ignore_link;
1584                         } else {
1585                                 link_stat = (sh_eth_read(ndev, PSR));
1586                                 if (mdp->ether_link_active_low)
1587                                         link_stat = ~link_stat;
1588                         }
1589                         if (!(link_stat & PHY_ST_LINK)) {
1590                                 sh_eth_rcv_snd_disable(ndev);
1591                         } else {
1592                                 /* Link Up */
1593                                 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
1594                                                    ~DMAC_M_ECI, EESIPR);
1595                                 /* clear int */
1596                                 sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
1597                                              ECSR);
1598                                 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
1599                                                    DMAC_M_ECI, EESIPR);
1600                                 /* enable tx and rx */
1601                                 sh_eth_rcv_snd_enable(ndev);
1602                         }
1603                 }
1604         }
1605
1606 ignore_link:
1607         if (intr_status & EESR_TWB) {
1608                 /* Unused write back interrupt */
1609                 if (intr_status & EESR_TABT) {  /* Transmit Abort int */
1610                         ndev->stats.tx_aborted_errors++;
1611                         netif_err(mdp, tx_err, ndev, "Transmit Abort\n");
1612                 }
1613         }
1614
1615         if (intr_status & EESR_RABT) {
1616                 /* Receive Abort int */
1617                 if (intr_status & EESR_RFRMER) {
1618                         /* Receive Frame Overflow int */
1619                         ndev->stats.rx_frame_errors++;
1620                 }
1621         }
1622
1623         if (intr_status & EESR_TDE) {
1624                 /* Transmit Descriptor Empty int */
1625                 ndev->stats.tx_fifo_errors++;
1626                 netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n");
1627         }
1628
1629         if (intr_status & EESR_TFE) {
1630                 /* FIFO under flow */
1631                 ndev->stats.tx_fifo_errors++;
1632                 netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n");
1633         }
1634
1635         if (intr_status & EESR_RDE) {
1636                 /* Receive Descriptor Empty int */
1637                 ndev->stats.rx_over_errors++;
1638         }
1639
1640         if (intr_status & EESR_RFE) {
1641                 /* Receive FIFO Overflow int */
1642                 ndev->stats.rx_fifo_errors++;
1643         }
1644
1645         if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1646                 /* Address Error */
1647                 ndev->stats.tx_fifo_errors++;
1648                 netif_err(mdp, tx_err, ndev, "Address Error\n");
1649         }
1650
1651         mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1652         if (mdp->cd->no_ade)
1653                 mask &= ~EESR_ADE;
1654         if (intr_status & mask) {
1655                 /* Tx error */
1656                 u32 edtrr = sh_eth_read(ndev, EDTRR);
1657
1658                 /* dmesg */
1659                 netdev_err(ndev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1660                            intr_status, mdp->cur_tx, mdp->dirty_tx,
1661                            (u32)ndev->state, edtrr);
1662                 /* dirty buffer free */
1663                 sh_eth_txfree(ndev);
1664
1665                 /* SH7712 BUG */
1666                 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
1667                         /* tx dma start */
1668                         sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1669                 }
1670                 /* wakeup */
1671                 netif_wake_queue(ndev);
1672         }
1673 }
1674
1675 static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1676 {
1677         struct net_device *ndev = netdev;
1678         struct sh_eth_private *mdp = netdev_priv(ndev);
1679         struct sh_eth_cpu_data *cd = mdp->cd;
1680         irqreturn_t ret = IRQ_NONE;
1681         unsigned long intr_status, intr_enable;
1682
1683         spin_lock(&mdp->lock);
1684
1685         /* Get interrupt status */
1686         intr_status = sh_eth_read(ndev, EESR);
1687         /* Mask it with the interrupt mask, forcing ECI interrupt to be always
1688          * enabled since it's the one that  comes thru regardless of the mask,
1689          * and we need to fully handle it in sh_eth_error() in order to quench
1690          * it as it doesn't get cleared by just writing 1 to the ECI bit...
1691          */
1692         intr_enable = sh_eth_read(ndev, EESIPR);
1693         intr_status &= intr_enable | DMAC_M_ECI;
1694         if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
1695                 ret = IRQ_HANDLED;
1696         else
1697                 goto out;
1698
1699         if (!likely(mdp->irq_enabled)) {
1700                 sh_eth_write(ndev, 0, EESIPR);
1701                 goto out;
1702         }
1703
1704         if (intr_status & EESR_RX_CHECK) {
1705                 if (napi_schedule_prep(&mdp->napi)) {
1706                         /* Mask Rx interrupts */
1707                         sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK,
1708                                      EESIPR);
1709                         __napi_schedule(&mdp->napi);
1710                 } else {
1711                         netdev_warn(ndev,
1712                                     "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n",
1713                                     intr_status, intr_enable);
1714                 }
1715         }
1716
1717         /* Tx Check */
1718         if (intr_status & cd->tx_check) {
1719                 /* Clear Tx interrupts */
1720                 sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
1721
1722                 sh_eth_txfree(ndev);
1723                 netif_wake_queue(ndev);
1724         }
1725
1726         if (intr_status & cd->eesr_err_check) {
1727                 /* Clear error interrupts */
1728                 sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR);
1729
1730                 sh_eth_error(ndev, intr_status);
1731         }
1732
1733 out:
1734         spin_unlock(&mdp->lock);
1735
1736         return ret;
1737 }
1738
1739 static int sh_eth_poll(struct napi_struct *napi, int budget)
1740 {
1741         struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private,
1742                                                   napi);
1743         struct net_device *ndev = napi->dev;
1744         int quota = budget;
1745         unsigned long intr_status;
1746
1747         for (;;) {
1748                 intr_status = sh_eth_read(ndev, EESR);
1749                 if (!(intr_status & EESR_RX_CHECK))
1750                         break;
1751                 /* Clear Rx interrupts */
1752                 sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR);
1753
1754                 if (sh_eth_rx(ndev, intr_status, &quota))
1755                         goto out;
1756         }
1757
1758         napi_complete(napi);
1759
1760         /* Reenable Rx interrupts */
1761         if (mdp->irq_enabled)
1762                 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1763 out:
1764         return budget - quota;
1765 }
1766
1767 /* PHY state control function */
1768 static void sh_eth_adjust_link(struct net_device *ndev)
1769 {
1770         struct sh_eth_private *mdp = netdev_priv(ndev);
1771         struct phy_device *phydev = mdp->phydev;
1772         int new_state = 0;
1773
1774         if (phydev->link) {
1775                 if (phydev->duplex != mdp->duplex) {
1776                         new_state = 1;
1777                         mdp->duplex = phydev->duplex;
1778                         if (mdp->cd->set_duplex)
1779                                 mdp->cd->set_duplex(ndev);
1780                 }
1781
1782                 if (phydev->speed != mdp->speed) {
1783                         new_state = 1;
1784                         mdp->speed = phydev->speed;
1785                         if (mdp->cd->set_rate)
1786                                 mdp->cd->set_rate(ndev);
1787                 }
1788                 if (!mdp->link) {
1789                         sh_eth_write(ndev,
1790                                      sh_eth_read(ndev, ECMR) & ~ECMR_TXF,
1791                                      ECMR);
1792                         new_state = 1;
1793                         mdp->link = phydev->link;
1794                         if (mdp->cd->no_psr || mdp->no_ether_link)
1795                                 sh_eth_rcv_snd_enable(ndev);
1796                 }
1797         } else if (mdp->link) {
1798                 new_state = 1;
1799                 mdp->link = 0;
1800                 mdp->speed = 0;
1801                 mdp->duplex = -1;
1802                 if (mdp->cd->no_psr || mdp->no_ether_link)
1803                         sh_eth_rcv_snd_disable(ndev);
1804         }
1805
1806         if (new_state && netif_msg_link(mdp))
1807                 phy_print_status(phydev);
1808 }
1809
1810 /* PHY init function */
1811 static int sh_eth_phy_init(struct net_device *ndev)
1812 {
1813         struct device_node *np = ndev->dev.parent->of_node;
1814         struct sh_eth_private *mdp = netdev_priv(ndev);
1815         struct phy_device *phydev = NULL;
1816
1817         mdp->link = 0;
1818         mdp->speed = 0;
1819         mdp->duplex = -1;
1820
1821         /* Try connect to PHY */
1822         if (np) {
1823                 struct device_node *pn;
1824
1825                 pn = of_parse_phandle(np, "phy-handle", 0);
1826                 phydev = of_phy_connect(ndev, pn,
1827                                         sh_eth_adjust_link, 0,
1828                                         mdp->phy_interface);
1829
1830                 if (!phydev)
1831                         phydev = ERR_PTR(-ENOENT);
1832         } else {
1833                 char phy_id[MII_BUS_ID_SIZE + 3];
1834
1835                 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1836                          mdp->mii_bus->id, mdp->phy_id);
1837
1838                 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1839                                      mdp->phy_interface);
1840         }
1841
1842         if (IS_ERR(phydev)) {
1843                 netdev_err(ndev, "failed to connect PHY\n");
1844                 return PTR_ERR(phydev);
1845         }
1846
1847         netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
1848                     phydev->addr, phydev->irq, phydev->drv->name);
1849
1850         mdp->phydev = phydev;
1851
1852         return 0;
1853 }
1854
1855 /* PHY control start function */
1856 static int sh_eth_phy_start(struct net_device *ndev)
1857 {
1858         struct sh_eth_private *mdp = netdev_priv(ndev);
1859         int ret;
1860
1861         ret = sh_eth_phy_init(ndev);
1862         if (ret)
1863                 return ret;
1864
1865         phy_start(mdp->phydev);
1866
1867         return 0;
1868 }
1869
1870 static int sh_eth_get_settings(struct net_device *ndev,
1871                                struct ethtool_cmd *ecmd)
1872 {
1873         struct sh_eth_private *mdp = netdev_priv(ndev);
1874         unsigned long flags;
1875         int ret;
1876
1877         if (!mdp->phydev)
1878                 return -ENODEV;
1879
1880         spin_lock_irqsave(&mdp->lock, flags);
1881         ret = phy_ethtool_gset(mdp->phydev, ecmd);
1882         spin_unlock_irqrestore(&mdp->lock, flags);
1883
1884         return ret;
1885 }
1886
1887 static int sh_eth_set_settings(struct net_device *ndev,
1888                                struct ethtool_cmd *ecmd)
1889 {
1890         struct sh_eth_private *mdp = netdev_priv(ndev);
1891         unsigned long flags;
1892         int ret;
1893
1894         if (!mdp->phydev)
1895                 return -ENODEV;
1896
1897         spin_lock_irqsave(&mdp->lock, flags);
1898
1899         /* disable tx and rx */
1900         sh_eth_rcv_snd_disable(ndev);
1901
1902         ret = phy_ethtool_sset(mdp->phydev, ecmd);
1903         if (ret)
1904                 goto error_exit;
1905
1906         if (ecmd->duplex == DUPLEX_FULL)
1907                 mdp->duplex = 1;
1908         else
1909                 mdp->duplex = 0;
1910
1911         if (mdp->cd->set_duplex)
1912                 mdp->cd->set_duplex(ndev);
1913
1914 error_exit:
1915         mdelay(1);
1916
1917         /* enable tx and rx */
1918         sh_eth_rcv_snd_enable(ndev);
1919
1920         spin_unlock_irqrestore(&mdp->lock, flags);
1921
1922         return ret;
1923 }
1924
1925 static int sh_eth_nway_reset(struct net_device *ndev)
1926 {
1927         struct sh_eth_private *mdp = netdev_priv(ndev);
1928         unsigned long flags;
1929         int ret;
1930
1931         if (!mdp->phydev)
1932                 return -ENODEV;
1933
1934         spin_lock_irqsave(&mdp->lock, flags);
1935         ret = phy_start_aneg(mdp->phydev);
1936         spin_unlock_irqrestore(&mdp->lock, flags);
1937
1938         return ret;
1939 }
1940
1941 static u32 sh_eth_get_msglevel(struct net_device *ndev)
1942 {
1943         struct sh_eth_private *mdp = netdev_priv(ndev);
1944         return mdp->msg_enable;
1945 }
1946
1947 static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
1948 {
1949         struct sh_eth_private *mdp = netdev_priv(ndev);
1950         mdp->msg_enable = value;
1951 }
1952
1953 static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
1954         "rx_current", "tx_current",
1955         "rx_dirty", "tx_dirty",
1956 };
1957 #define SH_ETH_STATS_LEN  ARRAY_SIZE(sh_eth_gstrings_stats)
1958
1959 static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
1960 {
1961         switch (sset) {
1962         case ETH_SS_STATS:
1963                 return SH_ETH_STATS_LEN;
1964         default:
1965                 return -EOPNOTSUPP;
1966         }
1967 }
1968
1969 static void sh_eth_get_ethtool_stats(struct net_device *ndev,
1970                                      struct ethtool_stats *stats, u64 *data)
1971 {
1972         struct sh_eth_private *mdp = netdev_priv(ndev);
1973         int i = 0;
1974
1975         /* device-specific stats */
1976         data[i++] = mdp->cur_rx;
1977         data[i++] = mdp->cur_tx;
1978         data[i++] = mdp->dirty_rx;
1979         data[i++] = mdp->dirty_tx;
1980 }
1981
1982 static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1983 {
1984         switch (stringset) {
1985         case ETH_SS_STATS:
1986                 memcpy(data, *sh_eth_gstrings_stats,
1987                        sizeof(sh_eth_gstrings_stats));
1988                 break;
1989         }
1990 }
1991
1992 static void sh_eth_get_ringparam(struct net_device *ndev,
1993                                  struct ethtool_ringparam *ring)
1994 {
1995         struct sh_eth_private *mdp = netdev_priv(ndev);
1996
1997         ring->rx_max_pending = RX_RING_MAX;
1998         ring->tx_max_pending = TX_RING_MAX;
1999         ring->rx_pending = mdp->num_rx_ring;
2000         ring->tx_pending = mdp->num_tx_ring;
2001 }
2002
2003 static int sh_eth_set_ringparam(struct net_device *ndev,
2004                                 struct ethtool_ringparam *ring)
2005 {
2006         struct sh_eth_private *mdp = netdev_priv(ndev);
2007         int ret;
2008
2009         if (ring->tx_pending > TX_RING_MAX ||
2010             ring->rx_pending > RX_RING_MAX ||
2011             ring->tx_pending < TX_RING_MIN ||
2012             ring->rx_pending < RX_RING_MIN)
2013                 return -EINVAL;
2014         if (ring->rx_mini_pending || ring->rx_jumbo_pending)
2015                 return -EINVAL;
2016
2017         if (netif_running(ndev)) {
2018                 netif_device_detach(ndev);
2019                 netif_tx_disable(ndev);
2020
2021                 /* Serialise with the interrupt handler and NAPI, then
2022                  * disable interrupts.  We have to clear the
2023                  * irq_enabled flag first to ensure that interrupts
2024                  * won't be re-enabled.
2025                  */
2026                 mdp->irq_enabled = false;
2027                 synchronize_irq(ndev->irq);
2028                 napi_synchronize(&mdp->napi);
2029                 sh_eth_write(ndev, 0x0000, EESIPR);
2030
2031                 sh_eth_dev_exit(ndev);
2032
2033                 /* Free all the skbuffs in the Rx queue. */
2034                 sh_eth_ring_free(ndev);
2035                 /* Free DMA buffer */
2036                 sh_eth_free_dma_buffer(mdp);
2037         }
2038
2039         /* Set new parameters */
2040         mdp->num_rx_ring = ring->rx_pending;
2041         mdp->num_tx_ring = ring->tx_pending;
2042
2043         if (netif_running(ndev)) {
2044                 ret = sh_eth_ring_init(ndev);
2045                 if (ret < 0) {
2046                         netdev_err(ndev, "%s: sh_eth_ring_init failed.\n",
2047                                    __func__);
2048                         return ret;
2049                 }
2050                 ret = sh_eth_dev_init(ndev, false);
2051                 if (ret < 0) {
2052                         netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
2053                                    __func__);
2054                         return ret;
2055                 }
2056
2057                 mdp->irq_enabled = true;
2058                 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
2059                 /* Setting the Rx mode will start the Rx process. */
2060                 sh_eth_write(ndev, EDRRR_R, EDRRR);
2061                 netif_device_attach(ndev);
2062         }
2063
2064         return 0;
2065 }
2066
2067 static const struct ethtool_ops sh_eth_ethtool_ops = {
2068         .get_settings   = sh_eth_get_settings,
2069         .set_settings   = sh_eth_set_settings,
2070         .nway_reset     = sh_eth_nway_reset,
2071         .get_msglevel   = sh_eth_get_msglevel,
2072         .set_msglevel   = sh_eth_set_msglevel,
2073         .get_link       = ethtool_op_get_link,
2074         .get_strings    = sh_eth_get_strings,
2075         .get_ethtool_stats  = sh_eth_get_ethtool_stats,
2076         .get_sset_count     = sh_eth_get_sset_count,
2077         .get_ringparam  = sh_eth_get_ringparam,
2078         .set_ringparam  = sh_eth_set_ringparam,
2079 };
2080
2081 /* network device open function */
2082 static int sh_eth_open(struct net_device *ndev)
2083 {
2084         int ret = 0;
2085         struct sh_eth_private *mdp = netdev_priv(ndev);
2086
2087         pm_runtime_get_sync(&mdp->pdev->dev);
2088
2089         napi_enable(&mdp->napi);
2090
2091         ret = request_irq(ndev->irq, sh_eth_interrupt,
2092                           mdp->cd->irq_flags, ndev->name, ndev);
2093         if (ret) {
2094                 netdev_err(ndev, "Can not assign IRQ number\n");
2095                 goto out_napi_off;
2096         }
2097
2098         /* Descriptor set */
2099         ret = sh_eth_ring_init(ndev);
2100         if (ret)
2101                 goto out_free_irq;
2102
2103         /* device init */
2104         ret = sh_eth_dev_init(ndev, true);
2105         if (ret)
2106                 goto out_free_irq;
2107
2108         /* PHY control start*/
2109         ret = sh_eth_phy_start(ndev);
2110         if (ret)
2111                 goto out_free_irq;
2112
2113         mdp->is_opened = 1;
2114
2115         return ret;
2116
2117 out_free_irq:
2118         free_irq(ndev->irq, ndev);
2119 out_napi_off:
2120         napi_disable(&mdp->napi);
2121         pm_runtime_put_sync(&mdp->pdev->dev);
2122         return ret;
2123 }
2124
2125 /* Timeout function */
2126 static void sh_eth_tx_timeout(struct net_device *ndev)
2127 {
2128         struct sh_eth_private *mdp = netdev_priv(ndev);
2129         struct sh_eth_rxdesc *rxdesc;
2130         int i;
2131
2132         netif_stop_queue(ndev);
2133
2134         netif_err(mdp, timer, ndev,
2135                   "transmit timed out, status %8.8x, resetting...\n",
2136                   (int)sh_eth_read(ndev, EESR));
2137
2138         /* tx_errors count up */
2139         ndev->stats.tx_errors++;
2140
2141         /* Free all the skbuffs in the Rx queue. */
2142         for (i = 0; i < mdp->num_rx_ring; i++) {
2143                 rxdesc = &mdp->rx_ring[i];
2144                 rxdesc->status = 0;
2145                 rxdesc->addr = 0xBADF00D0;
2146                 dev_kfree_skb(mdp->rx_skbuff[i]);
2147                 mdp->rx_skbuff[i] = NULL;
2148         }
2149         for (i = 0; i < mdp->num_tx_ring; i++) {
2150                 dev_kfree_skb(mdp->tx_skbuff[i]);
2151                 mdp->tx_skbuff[i] = NULL;
2152         }
2153
2154         /* device init */
2155         sh_eth_dev_init(ndev, true);
2156 }
2157
2158 /* Packet transmit function */
2159 static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2160 {
2161         struct sh_eth_private *mdp = netdev_priv(ndev);
2162         struct sh_eth_txdesc *txdesc;
2163         u32 entry;
2164         unsigned long flags;
2165
2166         spin_lock_irqsave(&mdp->lock, flags);
2167         if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
2168                 if (!sh_eth_txfree(ndev)) {
2169                         netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
2170                         netif_stop_queue(ndev);
2171                         spin_unlock_irqrestore(&mdp->lock, flags);
2172                         return NETDEV_TX_BUSY;
2173                 }
2174         }
2175         spin_unlock_irqrestore(&mdp->lock, flags);
2176
2177         if (skb_padto(skb, ETH_ZLEN))
2178                 return NETDEV_TX_OK;
2179
2180         entry = mdp->cur_tx % mdp->num_tx_ring;
2181         mdp->tx_skbuff[entry] = skb;
2182         txdesc = &mdp->tx_ring[entry];
2183         /* soft swap. */
2184         if (!mdp->cd->hw_swap)
2185                 sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
2186                                  skb->len + 2);
2187         txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
2188                                       DMA_TO_DEVICE);
2189         if (dma_mapping_error(&ndev->dev, txdesc->addr)) {
2190                 kfree_skb(skb);
2191                 return NETDEV_TX_OK;
2192         }
2193         txdesc->buffer_length = skb->len;
2194
2195         if (entry >= mdp->num_tx_ring - 1)
2196                 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
2197         else
2198                 txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
2199
2200         mdp->cur_tx++;
2201
2202         if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
2203                 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
2204
2205         return NETDEV_TX_OK;
2206 }
2207
2208 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2209 {
2210         struct sh_eth_private *mdp = netdev_priv(ndev);
2211
2212         if (sh_eth_is_rz_fast_ether(mdp))
2213                 return &ndev->stats;
2214
2215         if (!mdp->is_opened)
2216                 return &ndev->stats;
2217
2218         ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
2219         sh_eth_write(ndev, 0, TROCR);   /* (write clear) */
2220         ndev->stats.collisions += sh_eth_read(ndev, CDCR);
2221         sh_eth_write(ndev, 0, CDCR);    /* (write clear) */
2222         ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
2223         sh_eth_write(ndev, 0, LCCR);    /* (write clear) */
2224
2225         if (sh_eth_is_gether(mdp)) {
2226                 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
2227                 sh_eth_write(ndev, 0, CERCR);   /* (write clear) */
2228                 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
2229                 sh_eth_write(ndev, 0, CEECR);   /* (write clear) */
2230         } else {
2231                 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
2232                 sh_eth_write(ndev, 0, CNDCR);   /* (write clear) */
2233         }
2234
2235         return &ndev->stats;
2236 }
2237
2238 /* device close function */
2239 static int sh_eth_close(struct net_device *ndev)
2240 {
2241         struct sh_eth_private *mdp = netdev_priv(ndev);
2242
2243         netif_stop_queue(ndev);
2244
2245         /* Serialise with the interrupt handler and NAPI, then disable
2246          * interrupts.  We have to clear the irq_enabled flag first to
2247          * ensure that interrupts won't be re-enabled.
2248          */
2249         mdp->irq_enabled = false;
2250         synchronize_irq(ndev->irq);
2251         napi_disable(&mdp->napi);
2252         sh_eth_write(ndev, 0x0000, EESIPR);
2253
2254         sh_eth_dev_exit(ndev);
2255
2256         /* PHY Disconnect */
2257         if (mdp->phydev) {
2258                 phy_stop(mdp->phydev);
2259                 phy_disconnect(mdp->phydev);
2260                 mdp->phydev = NULL;
2261         }
2262
2263         free_irq(ndev->irq, ndev);
2264
2265         /* Free all the skbuffs in the Rx queue. */
2266         sh_eth_ring_free(ndev);
2267
2268         /* free DMA buffer */
2269         sh_eth_free_dma_buffer(mdp);
2270
2271         pm_runtime_put_sync(&mdp->pdev->dev);
2272
2273         mdp->is_opened = 0;
2274
2275         return 0;
2276 }
2277
2278 /* ioctl to device function */
2279 static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2280 {
2281         struct sh_eth_private *mdp = netdev_priv(ndev);
2282         struct phy_device *phydev = mdp->phydev;
2283
2284         if (!netif_running(ndev))
2285                 return -EINVAL;
2286
2287         if (!phydev)
2288                 return -ENODEV;
2289
2290         return phy_mii_ioctl(phydev, rq, cmd);
2291 }
2292
2293 /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
2294 static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
2295                                             int entry)
2296 {
2297         return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4);
2298 }
2299
2300 static u32 sh_eth_tsu_get_post_mask(int entry)
2301 {
2302         return 0x0f << (28 - ((entry % 8) * 4));
2303 }
2304
2305 static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
2306 {
2307         return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
2308 }
2309
2310 static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
2311                                              int entry)
2312 {
2313         struct sh_eth_private *mdp = netdev_priv(ndev);
2314         u32 tmp;
2315         void *reg_offset;
2316
2317         reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2318         tmp = ioread32(reg_offset);
2319         iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset);
2320 }
2321
2322 static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
2323                                               int entry)
2324 {
2325         struct sh_eth_private *mdp = netdev_priv(ndev);
2326         u32 post_mask, ref_mask, tmp;
2327         void *reg_offset;
2328
2329         reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2330         post_mask = sh_eth_tsu_get_post_mask(entry);
2331         ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
2332
2333         tmp = ioread32(reg_offset);
2334         iowrite32(tmp & ~post_mask, reg_offset);
2335
2336         /* If other port enables, the function returns "true" */
2337         return tmp & ref_mask;
2338 }
2339
2340 static int sh_eth_tsu_busy(struct net_device *ndev)
2341 {
2342         int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
2343         struct sh_eth_private *mdp = netdev_priv(ndev);
2344
2345         while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
2346                 udelay(10);
2347                 timeout--;
2348                 if (timeout <= 0) {
2349                         netdev_err(ndev, "%s: timeout\n", __func__);
2350                         return -ETIMEDOUT;
2351                 }
2352         }
2353
2354         return 0;
2355 }
2356
2357 static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg,
2358                                   const u8 *addr)
2359 {
2360         u32 val;
2361
2362         val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
2363         iowrite32(val, reg);
2364         if (sh_eth_tsu_busy(ndev) < 0)
2365                 return -EBUSY;
2366
2367         val = addr[4] << 8 | addr[5];
2368         iowrite32(val, reg + 4);
2369         if (sh_eth_tsu_busy(ndev) < 0)
2370                 return -EBUSY;
2371
2372         return 0;
2373 }
2374
2375 static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
2376 {
2377         u32 val;
2378
2379         val = ioread32(reg);
2380         addr[0] = (val >> 24) & 0xff;
2381         addr[1] = (val >> 16) & 0xff;
2382         addr[2] = (val >> 8) & 0xff;
2383         addr[3] = val & 0xff;
2384         val = ioread32(reg + 4);
2385         addr[4] = (val >> 8) & 0xff;
2386         addr[5] = val & 0xff;
2387 }
2388
2389
2390 static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
2391 {
2392         struct sh_eth_private *mdp = netdev_priv(ndev);
2393         void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2394         int i;
2395         u8 c_addr[ETH_ALEN];
2396
2397         for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2398                 sh_eth_tsu_read_entry(reg_offset, c_addr);
2399                 if (ether_addr_equal(addr, c_addr))
2400                         return i;
2401         }
2402
2403         return -ENOENT;
2404 }
2405
2406 static int sh_eth_tsu_find_empty(struct net_device *ndev)
2407 {
2408         u8 blank[ETH_ALEN];
2409         int entry;
2410
2411         memset(blank, 0, sizeof(blank));
2412         entry = sh_eth_tsu_find_entry(ndev, blank);
2413         return (entry < 0) ? -ENOMEM : entry;
2414 }
2415
2416 static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
2417                                               int entry)
2418 {
2419         struct sh_eth_private *mdp = netdev_priv(ndev);
2420         void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2421         int ret;
2422         u8 blank[ETH_ALEN];
2423
2424         sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
2425                          ~(1 << (31 - entry)), TSU_TEN);
2426
2427         memset(blank, 0, sizeof(blank));
2428         ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
2429         if (ret < 0)
2430                 return ret;
2431         return 0;
2432 }
2433
2434 static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
2435 {
2436         struct sh_eth_private *mdp = netdev_priv(ndev);
2437         void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2438         int i, ret;
2439
2440         if (!mdp->cd->tsu)
2441                 return 0;
2442
2443         i = sh_eth_tsu_find_entry(ndev, addr);
2444         if (i < 0) {
2445                 /* No entry found, create one */
2446                 i = sh_eth_tsu_find_empty(ndev);
2447                 if (i < 0)
2448                         return -ENOMEM;
2449                 ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
2450                 if (ret < 0)
2451                         return ret;
2452
2453                 /* Enable the entry */
2454                 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
2455                                  (1 << (31 - i)), TSU_TEN);
2456         }
2457
2458         /* Entry found or created, enable POST */
2459         sh_eth_tsu_enable_cam_entry_post(ndev, i);
2460
2461         return 0;
2462 }
2463
2464 static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
2465 {
2466         struct sh_eth_private *mdp = netdev_priv(ndev);
2467         int i, ret;
2468
2469         if (!mdp->cd->tsu)
2470                 return 0;
2471
2472         i = sh_eth_tsu_find_entry(ndev, addr);
2473         if (i) {
2474                 /* Entry found */
2475                 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2476                         goto done;
2477
2478                 /* Disable the entry if both ports was disabled */
2479                 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2480                 if (ret < 0)
2481                         return ret;
2482         }
2483 done:
2484         return 0;
2485 }
2486
2487 static int sh_eth_tsu_purge_all(struct net_device *ndev)
2488 {
2489         struct sh_eth_private *mdp = netdev_priv(ndev);
2490         int i, ret;
2491
2492         if (!mdp->cd->tsu)
2493                 return 0;
2494
2495         for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
2496                 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2497                         continue;
2498
2499                 /* Disable the entry if both ports was disabled */
2500                 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2501                 if (ret < 0)
2502                         return ret;
2503         }
2504
2505         return 0;
2506 }
2507
2508 static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
2509 {
2510         struct sh_eth_private *mdp = netdev_priv(ndev);
2511         u8 addr[ETH_ALEN];
2512         void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2513         int i;
2514
2515         if (!mdp->cd->tsu)
2516                 return;
2517
2518         for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2519                 sh_eth_tsu_read_entry(reg_offset, addr);
2520                 if (is_multicast_ether_addr(addr))
2521                         sh_eth_tsu_del_entry(ndev, addr);
2522         }
2523 }
2524
2525 /* Update promiscuous flag and multicast filter */
2526 static void sh_eth_set_rx_mode(struct net_device *ndev)
2527 {
2528         struct sh_eth_private *mdp = netdev_priv(ndev);
2529         u32 ecmr_bits;
2530         int mcast_all = 0;
2531         unsigned long flags;
2532
2533         spin_lock_irqsave(&mdp->lock, flags);
2534         /* Initial condition is MCT = 1, PRM = 0.
2535          * Depending on ndev->flags, set PRM or clear MCT
2536          */
2537         ecmr_bits = sh_eth_read(ndev, ECMR) & ~ECMR_PRM;
2538         if (mdp->cd->tsu)
2539                 ecmr_bits |= ECMR_MCT;
2540
2541         if (!(ndev->flags & IFF_MULTICAST)) {
2542                 sh_eth_tsu_purge_mcast(ndev);
2543                 mcast_all = 1;
2544         }
2545         if (ndev->flags & IFF_ALLMULTI) {
2546                 sh_eth_tsu_purge_mcast(ndev);
2547                 ecmr_bits &= ~ECMR_MCT;
2548                 mcast_all = 1;
2549         }
2550
2551         if (ndev->flags & IFF_PROMISC) {
2552                 sh_eth_tsu_purge_all(ndev);
2553                 ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
2554         } else if (mdp->cd->tsu) {
2555                 struct netdev_hw_addr *ha;
2556                 netdev_for_each_mc_addr(ha, ndev) {
2557                         if (mcast_all && is_multicast_ether_addr(ha->addr))
2558                                 continue;
2559
2560                         if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
2561                                 if (!mcast_all) {
2562                                         sh_eth_tsu_purge_mcast(ndev);
2563                                         ecmr_bits &= ~ECMR_MCT;
2564                                         mcast_all = 1;
2565                                 }
2566                         }
2567                 }
2568         }
2569
2570         /* update the ethernet mode */
2571         sh_eth_write(ndev, ecmr_bits, ECMR);
2572
2573         spin_unlock_irqrestore(&mdp->lock, flags);
2574 }
2575
2576 static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
2577 {
2578         if (!mdp->port)
2579                 return TSU_VTAG0;
2580         else
2581                 return TSU_VTAG1;
2582 }
2583
2584 static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
2585                                   __be16 proto, u16 vid)
2586 {
2587         struct sh_eth_private *mdp = netdev_priv(ndev);
2588         int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2589
2590         if (unlikely(!mdp->cd->tsu))
2591                 return -EPERM;
2592
2593         /* No filtering if vid = 0 */
2594         if (!vid)
2595                 return 0;
2596
2597         mdp->vlan_num_ids++;
2598
2599         /* The controller has one VLAN tag HW filter. So, if the filter is
2600          * already enabled, the driver disables it and the filte
2601          */
2602         if (mdp->vlan_num_ids > 1) {
2603                 /* disable VLAN filter */
2604                 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2605                 return 0;
2606         }
2607
2608         sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
2609                          vtag_reg_index);
2610
2611         return 0;
2612 }
2613
2614 static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
2615                                    __be16 proto, u16 vid)
2616 {
2617         struct sh_eth_private *mdp = netdev_priv(ndev);
2618         int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2619
2620         if (unlikely(!mdp->cd->tsu))
2621                 return -EPERM;
2622
2623         /* No filtering if vid = 0 */
2624         if (!vid)
2625                 return 0;
2626
2627         mdp->vlan_num_ids--;
2628         sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2629
2630         return 0;
2631 }
2632
2633 /* SuperH's TSU register init function */
2634 static void sh_eth_tsu_init(struct sh_eth_private *mdp)
2635 {
2636         if (sh_eth_is_rz_fast_ether(mdp)) {
2637                 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
2638                 return;
2639         }
2640
2641         sh_eth_tsu_write(mdp, 0, TSU_FWEN0);    /* Disable forward(0->1) */
2642         sh_eth_tsu_write(mdp, 0, TSU_FWEN1);    /* Disable forward(1->0) */
2643         sh_eth_tsu_write(mdp, 0, TSU_FCM);      /* forward fifo 3k-3k */
2644         sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
2645         sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
2646         sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
2647         sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
2648         sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
2649         sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
2650         sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
2651         if (sh_eth_is_gether(mdp)) {
2652                 sh_eth_tsu_write(mdp, 0, TSU_QTAG0);    /* Disable QTAG(0->1) */
2653                 sh_eth_tsu_write(mdp, 0, TSU_QTAG1);    /* Disable QTAG(1->0) */
2654         } else {
2655                 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0);   /* Disable QTAG(0->1) */
2656                 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1);   /* Disable QTAG(1->0) */
2657         }
2658         sh_eth_tsu_write(mdp, 0, TSU_FWSR);     /* all interrupt status clear */
2659         sh_eth_tsu_write(mdp, 0, TSU_FWINMK);   /* Disable all interrupt */
2660         sh_eth_tsu_write(mdp, 0, TSU_TEN);      /* Disable all CAM entry */
2661         sh_eth_tsu_write(mdp, 0, TSU_POST1);    /* Disable CAM entry [ 0- 7] */
2662         sh_eth_tsu_write(mdp, 0, TSU_POST2);    /* Disable CAM entry [ 8-15] */
2663         sh_eth_tsu_write(mdp, 0, TSU_POST3);    /* Disable CAM entry [16-23] */
2664         sh_eth_tsu_write(mdp, 0, TSU_POST4);    /* Disable CAM entry [24-31] */
2665 }
2666
2667 /* MDIO bus release function */
2668 static int sh_mdio_release(struct sh_eth_private *mdp)
2669 {
2670         /* unregister mdio bus */
2671         mdiobus_unregister(mdp->mii_bus);
2672
2673         /* free bitbang info */
2674         free_mdio_bitbang(mdp->mii_bus);
2675
2676         return 0;
2677 }
2678
2679 /* MDIO bus init function */
2680 static int sh_mdio_init(struct sh_eth_private *mdp,
2681                         struct sh_eth_plat_data *pd)
2682 {
2683         int ret, i;
2684         struct bb_info *bitbang;
2685         struct platform_device *pdev = mdp->pdev;
2686         struct device *dev = &mdp->pdev->dev;
2687
2688         /* create bit control struct for PHY */
2689         bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL);
2690         if (!bitbang)
2691                 return -ENOMEM;
2692
2693         /* bitbang init */
2694         bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
2695         bitbang->set_gate = pd->set_mdio_gate;
2696         bitbang->mdi_msk = PIR_MDI;
2697         bitbang->mdo_msk = PIR_MDO;
2698         bitbang->mmd_msk = PIR_MMD;
2699         bitbang->mdc_msk = PIR_MDC;
2700         bitbang->ctrl.ops = &bb_ops;
2701
2702         /* MII controller setting */
2703         mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
2704         if (!mdp->mii_bus)
2705                 return -ENOMEM;
2706
2707         /* Hook up MII support for ethtool */
2708         mdp->mii_bus->name = "sh_mii";
2709         mdp->mii_bus->parent = dev;
2710         snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2711                  pdev->name, pdev->id);
2712
2713         /* PHY IRQ */
2714         mdp->mii_bus->irq = devm_kmalloc_array(dev, PHY_MAX_ADDR, sizeof(int),
2715                                                GFP_KERNEL);
2716         if (!mdp->mii_bus->irq) {
2717                 ret = -ENOMEM;
2718                 goto out_free_bus;
2719         }
2720
2721         /* register MDIO bus */
2722         if (dev->of_node) {
2723                 ret = of_mdiobus_register(mdp->mii_bus, dev->of_node);
2724         } else {
2725                 for (i = 0; i < PHY_MAX_ADDR; i++)
2726                         mdp->mii_bus->irq[i] = PHY_POLL;
2727                 if (pd->phy_irq > 0)
2728                         mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
2729
2730                 ret = mdiobus_register(mdp->mii_bus);
2731         }
2732
2733         if (ret)
2734                 goto out_free_bus;
2735
2736         return 0;
2737
2738 out_free_bus:
2739         free_mdio_bitbang(mdp->mii_bus);
2740         return ret;
2741 }
2742
2743 static const u16 *sh_eth_get_register_offset(int register_type)
2744 {
2745         const u16 *reg_offset = NULL;
2746
2747         switch (register_type) {
2748         case SH_ETH_REG_GIGABIT:
2749                 reg_offset = sh_eth_offset_gigabit;
2750                 break;
2751         case SH_ETH_REG_FAST_RZ:
2752                 reg_offset = sh_eth_offset_fast_rz;
2753                 break;
2754         case SH_ETH_REG_FAST_RCAR:
2755                 reg_offset = sh_eth_offset_fast_rcar;
2756                 break;
2757         case SH_ETH_REG_FAST_SH4:
2758                 reg_offset = sh_eth_offset_fast_sh4;
2759                 break;
2760         case SH_ETH_REG_FAST_SH3_SH2:
2761                 reg_offset = sh_eth_offset_fast_sh3_sh2;
2762                 break;
2763         default:
2764                 break;
2765         }
2766
2767         return reg_offset;
2768 }
2769
2770 static const struct net_device_ops sh_eth_netdev_ops = {
2771         .ndo_open               = sh_eth_open,
2772         .ndo_stop               = sh_eth_close,
2773         .ndo_start_xmit         = sh_eth_start_xmit,
2774         .ndo_get_stats          = sh_eth_get_stats,
2775         .ndo_set_rx_mode        = sh_eth_set_rx_mode,
2776         .ndo_tx_timeout         = sh_eth_tx_timeout,
2777         .ndo_do_ioctl           = sh_eth_do_ioctl,
2778         .ndo_validate_addr      = eth_validate_addr,
2779         .ndo_set_mac_address    = eth_mac_addr,
2780         .ndo_change_mtu         = eth_change_mtu,
2781 };
2782
2783 static const struct net_device_ops sh_eth_netdev_ops_tsu = {
2784         .ndo_open               = sh_eth_open,
2785         .ndo_stop               = sh_eth_close,
2786         .ndo_start_xmit         = sh_eth_start_xmit,
2787         .ndo_get_stats          = sh_eth_get_stats,
2788         .ndo_set_rx_mode        = sh_eth_set_rx_mode,
2789         .ndo_vlan_rx_add_vid    = sh_eth_vlan_rx_add_vid,
2790         .ndo_vlan_rx_kill_vid   = sh_eth_vlan_rx_kill_vid,
2791         .ndo_tx_timeout         = sh_eth_tx_timeout,
2792         .ndo_do_ioctl           = sh_eth_do_ioctl,
2793         .ndo_validate_addr      = eth_validate_addr,
2794         .ndo_set_mac_address    = eth_mac_addr,
2795         .ndo_change_mtu         = eth_change_mtu,
2796 };
2797
2798 #ifdef CONFIG_OF
2799 static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
2800 {
2801         struct device_node *np = dev->of_node;
2802         struct sh_eth_plat_data *pdata;
2803         const char *mac_addr;
2804
2805         pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2806         if (!pdata)
2807                 return NULL;
2808
2809         pdata->phy_interface = of_get_phy_mode(np);
2810
2811         mac_addr = of_get_mac_address(np);
2812         if (mac_addr)
2813                 memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
2814
2815         pdata->no_ether_link =
2816                 of_property_read_bool(np, "renesas,no-ether-link");
2817         pdata->ether_link_active_low =
2818                 of_property_read_bool(np, "renesas,ether-link-active-low");
2819
2820         return pdata;
2821 }
2822
2823 static const struct of_device_id sh_eth_match_table[] = {
2824         { .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data },
2825         { .compatible = "renesas,ether-r8a7778", .data = &r8a777x_data },
2826         { .compatible = "renesas,ether-r8a7779", .data = &r8a777x_data },
2827         { .compatible = "renesas,ether-r8a7790", .data = &r8a779x_data },
2828         { .compatible = "renesas,ether-r8a7791", .data = &r8a779x_data },
2829         { .compatible = "renesas,ether-r8a7793", .data = &r8a779x_data },
2830         { .compatible = "renesas,ether-r8a7794", .data = &r8a779x_data },
2831         { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
2832         { }
2833 };
2834 MODULE_DEVICE_TABLE(of, sh_eth_match_table);
2835 #else
2836 static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
2837 {
2838         return NULL;
2839 }
2840 #endif
2841
2842 static int sh_eth_drv_probe(struct platform_device *pdev)
2843 {
2844         int ret, devno = 0;
2845         struct resource *res;
2846         struct net_device *ndev = NULL;
2847         struct sh_eth_private *mdp = NULL;
2848         struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev);
2849         const struct platform_device_id *id = platform_get_device_id(pdev);
2850
2851         /* get base addr */
2852         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2853
2854         ndev = alloc_etherdev(sizeof(struct sh_eth_private));
2855         if (!ndev)
2856                 return -ENOMEM;
2857
2858         pm_runtime_enable(&pdev->dev);
2859         pm_runtime_get_sync(&pdev->dev);
2860
2861         devno = pdev->id;
2862         if (devno < 0)
2863                 devno = 0;
2864
2865         ndev->dma = -1;
2866         ret = platform_get_irq(pdev, 0);
2867         if (ret < 0) {
2868                 ret = -ENODEV;
2869                 goto out_release;
2870         }
2871         ndev->irq = ret;
2872
2873         SET_NETDEV_DEV(ndev, &pdev->dev);
2874
2875         mdp = netdev_priv(ndev);
2876         mdp->num_tx_ring = TX_RING_SIZE;
2877         mdp->num_rx_ring = RX_RING_SIZE;
2878         mdp->addr = devm_ioremap_resource(&pdev->dev, res);
2879         if (IS_ERR(mdp->addr)) {
2880                 ret = PTR_ERR(mdp->addr);
2881                 goto out_release;
2882         }
2883
2884         ndev->base_addr = res->start;
2885
2886         spin_lock_init(&mdp->lock);
2887         mdp->pdev = pdev;
2888
2889         if (pdev->dev.of_node)
2890                 pd = sh_eth_parse_dt(&pdev->dev);
2891         if (!pd) {
2892                 dev_err(&pdev->dev, "no platform data\n");
2893                 ret = -EINVAL;
2894                 goto out_release;
2895         }
2896
2897         /* get PHY ID */
2898         mdp->phy_id = pd->phy;
2899         mdp->phy_interface = pd->phy_interface;
2900         /* EDMAC endian */
2901         mdp->edmac_endian = pd->edmac_endian;
2902         mdp->no_ether_link = pd->no_ether_link;
2903         mdp->ether_link_active_low = pd->ether_link_active_low;
2904
2905         /* set cpu data */
2906         if (id) {
2907                 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
2908         } else  {
2909                 const struct of_device_id *match;
2910
2911                 match = of_match_device(of_match_ptr(sh_eth_match_table),
2912                                         &pdev->dev);
2913                 mdp->cd = (struct sh_eth_cpu_data *)match->data;
2914         }
2915         mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
2916         if (!mdp->reg_offset) {
2917                 dev_err(&pdev->dev, "Unknown register type (%d)\n",
2918                         mdp->cd->register_type);
2919                 ret = -EINVAL;
2920                 goto out_release;
2921         }
2922         sh_eth_set_default_cpu_data(mdp->cd);
2923
2924         /* set function */
2925         if (mdp->cd->tsu)
2926                 ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
2927         else
2928                 ndev->netdev_ops = &sh_eth_netdev_ops;
2929         ndev->ethtool_ops = &sh_eth_ethtool_ops;
2930         ndev->watchdog_timeo = TX_TIMEOUT;
2931
2932         /* debug message level */
2933         mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
2934
2935         /* read and set MAC address */
2936         read_mac_address(ndev, pd->mac_addr);
2937         if (!is_valid_ether_addr(ndev->dev_addr)) {
2938                 dev_warn(&pdev->dev,
2939                          "no valid MAC address supplied, using a random one.\n");
2940                 eth_hw_addr_random(ndev);
2941         }
2942
2943         /* ioremap the TSU registers */
2944         if (mdp->cd->tsu) {
2945                 struct resource *rtsu;
2946                 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2947                 mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
2948                 if (IS_ERR(mdp->tsu_addr)) {
2949                         ret = PTR_ERR(mdp->tsu_addr);
2950                         goto out_release;
2951                 }
2952                 mdp->port = devno % 2;
2953                 ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
2954         }
2955
2956         /* initialize first or needed device */
2957         if (!devno || pd->needs_init) {
2958                 if (mdp->cd->chip_reset)
2959                         mdp->cd->chip_reset(ndev);
2960
2961                 if (mdp->cd->tsu) {
2962                         /* TSU init (Init only)*/
2963                         sh_eth_tsu_init(mdp);
2964                 }
2965         }
2966
2967         if (mdp->cd->rmiimode)
2968                 sh_eth_write(ndev, 0x1, RMIIMODE);
2969
2970         /* MDIO bus init */
2971         ret = sh_mdio_init(mdp, pd);
2972         if (ret) {
2973                 dev_err(&ndev->dev, "failed to initialise MDIO\n");
2974                 goto out_release;
2975         }
2976
2977         netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
2978
2979         /* network device register */
2980         ret = register_netdev(ndev);
2981         if (ret)
2982                 goto out_napi_del;
2983
2984         /* print device information */
2985         netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n",
2986                     (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
2987
2988         pm_runtime_put(&pdev->dev);
2989         platform_set_drvdata(pdev, ndev);
2990
2991         return ret;
2992
2993 out_napi_del:
2994         netif_napi_del(&mdp->napi);
2995         sh_mdio_release(mdp);
2996
2997 out_release:
2998         /* net_dev free */
2999         if (ndev)
3000                 free_netdev(ndev);
3001
3002         pm_runtime_put(&pdev->dev);
3003         pm_runtime_disable(&pdev->dev);
3004         return ret;
3005 }
3006
3007 static int sh_eth_drv_remove(struct platform_device *pdev)
3008 {
3009         struct net_device *ndev = platform_get_drvdata(pdev);
3010         struct sh_eth_private *mdp = netdev_priv(ndev);
3011
3012         unregister_netdev(ndev);
3013         netif_napi_del(&mdp->napi);
3014         sh_mdio_release(mdp);
3015         pm_runtime_disable(&pdev->dev);
3016         free_netdev(ndev);
3017
3018         return 0;
3019 }
3020
3021 #ifdef CONFIG_PM
3022 static int sh_eth_runtime_nop(struct device *dev)
3023 {
3024         /* Runtime PM callback shared between ->runtime_suspend()
3025          * and ->runtime_resume(). Simply returns success.
3026          *
3027          * This driver re-initializes all registers after
3028          * pm_runtime_get_sync() anyway so there is no need
3029          * to save and restore registers here.
3030          */
3031         return 0;
3032 }
3033
3034 static const struct dev_pm_ops sh_eth_dev_pm_ops = {
3035         .runtime_suspend = sh_eth_runtime_nop,
3036         .runtime_resume = sh_eth_runtime_nop,
3037 };
3038 #define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
3039 #else
3040 #define SH_ETH_PM_OPS NULL
3041 #endif
3042
3043 static struct platform_device_id sh_eth_id_table[] = {
3044         { "sh7619-ether", (kernel_ulong_t)&sh7619_data },
3045         { "sh771x-ether", (kernel_ulong_t)&sh771x_data },
3046         { "sh7724-ether", (kernel_ulong_t)&sh7724_data },
3047         { "sh7734-gether", (kernel_ulong_t)&sh7734_data },
3048         { "sh7757-ether", (kernel_ulong_t)&sh7757_data },
3049         { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
3050         { "sh7763-gether", (kernel_ulong_t)&sh7763_data },
3051         { "r7s72100-ether", (kernel_ulong_t)&r7s72100_data },
3052         { "r8a7740-gether", (kernel_ulong_t)&r8a7740_data },
3053         { "r8a777x-ether", (kernel_ulong_t)&r8a777x_data },
3054         { "r8a7790-ether", (kernel_ulong_t)&r8a779x_data },
3055         { "r8a7791-ether", (kernel_ulong_t)&r8a779x_data },
3056         { "r8a7793-ether", (kernel_ulong_t)&r8a779x_data },
3057         { "r8a7794-ether", (kernel_ulong_t)&r8a779x_data },
3058         { }
3059 };
3060 MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
3061
3062 static struct platform_driver sh_eth_driver = {
3063         .probe = sh_eth_drv_probe,
3064         .remove = sh_eth_drv_remove,
3065         .id_table = sh_eth_id_table,
3066         .driver = {
3067                    .name = CARDNAME,
3068                    .pm = SH_ETH_PM_OPS,
3069                    .of_match_table = of_match_ptr(sh_eth_match_table),
3070         },
3071 };
3072
3073 module_platform_driver(sh_eth_driver);
3074
3075 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
3076 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
3077 MODULE_LICENSE("GPL v2");