Merge git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc-next
[deliverable/linux.git] / drivers / net / ethernet / renesas / sh_eth.c
CommitLineData
86a74ff2
NI
1/*
2 * SuperH Ethernet device driver
3 *
b0ca2a21 4 * Copyright (C) 2006-2008 Nobuhiro Iwamatsu
380af9e3 5 * Copyright (C) 2008-2009 Renesas Solutions Corp.
86a74ff2
NI
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 */
22
86a74ff2 23#include <linux/init.h>
0654011d
YS
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/spinlock.h>
6a27cded 27#include <linux/interrupt.h>
86a74ff2
NI
28#include <linux/dma-mapping.h>
29#include <linux/etherdevice.h>
30#include <linux/delay.h>
31#include <linux/platform_device.h>
32#include <linux/mdio-bitbang.h>
33#include <linux/netdevice.h>
34#include <linux/phy.h>
35#include <linux/cache.h>
36#include <linux/io.h>
bcd5149d 37#include <linux/pm_runtime.h>
5a0e3ad6 38#include <linux/slab.h>
dc19e4e5 39#include <linux/ethtool.h>
fdb37a7f 40#include <linux/if_vlan.h>
d4fa0e35 41#include <linux/sh_eth.h>
86a74ff2
NI
42
43#include "sh_eth.h"
44
dc19e4e5
NI
45#define SH_ETH_DEF_MSG_ENABLE \
46 (NETIF_MSG_LINK | \
47 NETIF_MSG_TIMER | \
48 NETIF_MSG_RX_ERR| \
49 NETIF_MSG_TX_ERR)
50
380af9e3 51/* There is CPU dependent code */
65ac8851
YS
52#if defined(CONFIG_CPU_SUBTYPE_SH7724)
53#define SH_ETH_RESET_DEFAULT 1
54static void sh_eth_set_duplex(struct net_device *ndev)
55{
56 struct sh_eth_private *mdp = netdev_priv(ndev);
65ac8851
YS
57
58 if (mdp->duplex) /* Full */
4a55530f 59 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
65ac8851 60 else /* Half */
4a55530f 61 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
65ac8851
YS
62}
63
64static void sh_eth_set_rate(struct net_device *ndev)
65{
66 struct sh_eth_private *mdp = netdev_priv(ndev);
65ac8851
YS
67
68 switch (mdp->speed) {
69 case 10: /* 10BASE */
4a55530f 70 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
65ac8851
YS
71 break;
72 case 100:/* 100BASE */
4a55530f 73 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
65ac8851
YS
74 break;
75 default:
76 break;
77 }
78}
79
80/* SH7724 */
81static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
82 .set_duplex = sh_eth_set_duplex,
83 .set_rate = sh_eth_set_rate,
84
85 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
86 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
87 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f,
88
89 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
90 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
91 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
92 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
93
94 .apr = 1,
95 .mpr = 1,
96 .tpauser = 1,
97 .hw_swap = 1,
503914cf
MD
98 .rpadir = 1,
99 .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
65ac8851 100};
f29a3d04 101#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
8fcd4961
YS
102#define SH_ETH_HAS_BOTH_MODULES 1
103#define SH_ETH_HAS_TSU 1
f29a3d04
YS
104static void sh_eth_set_duplex(struct net_device *ndev)
105{
106 struct sh_eth_private *mdp = netdev_priv(ndev);
f29a3d04
YS
107
108 if (mdp->duplex) /* Full */
4a55530f 109 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
f29a3d04 110 else /* Half */
4a55530f 111 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
f29a3d04
YS
112}
113
114static void sh_eth_set_rate(struct net_device *ndev)
115{
116 struct sh_eth_private *mdp = netdev_priv(ndev);
f29a3d04
YS
117
118 switch (mdp->speed) {
119 case 10: /* 10BASE */
4a55530f 120 sh_eth_write(ndev, 0, RTRATE);
f29a3d04
YS
121 break;
122 case 100:/* 100BASE */
4a55530f 123 sh_eth_write(ndev, 1, RTRATE);
f29a3d04
YS
124 break;
125 default:
126 break;
127 }
128}
129
130/* SH7757 */
131static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
132 .set_duplex = sh_eth_set_duplex,
133 .set_rate = sh_eth_set_rate,
134
135 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
136 .rmcr_value = 0x00000001,
137
138 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
139 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
140 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
141 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
142
143 .apr = 1,
144 .mpr = 1,
145 .tpauser = 1,
146 .hw_swap = 1,
147 .no_ade = 1,
2e98e797
YS
148 .rpadir = 1,
149 .rpadir_value = 2 << 16,
f29a3d04 150};
65ac8851 151
8fcd4961
YS
152#define SH_GIGA_ETH_BASE 0xfee00000
153#define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
154#define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
155static void sh_eth_chip_reset_giga(struct net_device *ndev)
156{
157 int i;
158 unsigned long mahr[2], malr[2];
159
160 /* save MAHR and MALR */
161 for (i = 0; i < 2; i++) {
ae70644d
YS
162 malr[i] = ioread32((void *)GIGA_MALR(i));
163 mahr[i] = ioread32((void *)GIGA_MAHR(i));
8fcd4961
YS
164 }
165
166 /* reset device */
ae70644d 167 iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
8fcd4961
YS
168 mdelay(1);
169
170 /* restore MAHR and MALR */
171 for (i = 0; i < 2; i++) {
ae70644d
YS
172 iowrite32(malr[i], (void *)GIGA_MALR(i));
173 iowrite32(mahr[i], (void *)GIGA_MAHR(i));
8fcd4961
YS
174 }
175}
176
177static int sh_eth_is_gether(struct sh_eth_private *mdp);
178static void sh_eth_reset(struct net_device *ndev)
179{
180 struct sh_eth_private *mdp = netdev_priv(ndev);
181 int cnt = 100;
182
183 if (sh_eth_is_gether(mdp)) {
184 sh_eth_write(ndev, 0x03, EDSR);
185 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
186 EDMR);
187 while (cnt > 0) {
188 if (!(sh_eth_read(ndev, EDMR) & 0x3))
189 break;
190 mdelay(1);
191 cnt--;
192 }
193 if (cnt < 0)
194 printk(KERN_ERR "Device reset fail\n");
195
196 /* Table Init */
197 sh_eth_write(ndev, 0x0, TDLAR);
198 sh_eth_write(ndev, 0x0, TDFAR);
199 sh_eth_write(ndev, 0x0, TDFXR);
200 sh_eth_write(ndev, 0x0, TDFFR);
201 sh_eth_write(ndev, 0x0, RDLAR);
202 sh_eth_write(ndev, 0x0, RDFAR);
203 sh_eth_write(ndev, 0x0, RDFXR);
204 sh_eth_write(ndev, 0x0, RDFFR);
205 } else {
206 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
207 EDMR);
208 mdelay(3);
209 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
210 EDMR);
211 }
212}
213
214static void sh_eth_set_duplex_giga(struct net_device *ndev)
215{
216 struct sh_eth_private *mdp = netdev_priv(ndev);
217
218 if (mdp->duplex) /* Full */
219 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
220 else /* Half */
221 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
222}
223
224static void sh_eth_set_rate_giga(struct net_device *ndev)
225{
226 struct sh_eth_private *mdp = netdev_priv(ndev);
227
228 switch (mdp->speed) {
229 case 10: /* 10BASE */
230 sh_eth_write(ndev, 0x00000000, GECMR);
231 break;
232 case 100:/* 100BASE */
233 sh_eth_write(ndev, 0x00000010, GECMR);
234 break;
235 case 1000: /* 1000BASE */
236 sh_eth_write(ndev, 0x00000020, GECMR);
237 break;
238 default:
239 break;
240 }
241}
242
243/* SH7757(GETHERC) */
244static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = {
245 .chip_reset = sh_eth_chip_reset_giga,
246 .set_duplex = sh_eth_set_duplex_giga,
247 .set_rate = sh_eth_set_rate_giga,
248
249 .ecsr_value = ECSR_ICD | ECSR_MPD,
250 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
251 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
252
253 .tx_check = EESR_TC1 | EESR_FTC,
254 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
255 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
256 EESR_ECI,
257 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
258 EESR_TFE,
259 .fdr_value = 0x0000072f,
260 .rmcr_value = 0x00000001,
261
262 .apr = 1,
263 .mpr = 1,
264 .tpauser = 1,
265 .bculr = 1,
266 .hw_swap = 1,
267 .rpadir = 1,
268 .rpadir_value = 2 << 16,
269 .no_trimd = 1,
270 .no_ade = 1,
271};
272
273static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp)
274{
275 if (sh_eth_is_gether(mdp))
276 return &sh_eth_my_cpu_data_giga;
277 else
278 return &sh_eth_my_cpu_data;
279}
280
65ac8851 281#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
380af9e3
YS
282#define SH_ETH_HAS_TSU 1
283static void sh_eth_chip_reset(struct net_device *ndev)
284{
4986b996
YS
285 struct sh_eth_private *mdp = netdev_priv(ndev);
286
380af9e3 287 /* reset device */
4986b996 288 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
380af9e3
YS
289 mdelay(1);
290}
291
292static void sh_eth_reset(struct net_device *ndev)
293{
380af9e3
YS
294 int cnt = 100;
295
4a55530f 296 sh_eth_write(ndev, EDSR_ENALL, EDSR);
c5ed5368 297 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
380af9e3 298 while (cnt > 0) {
4a55530f 299 if (!(sh_eth_read(ndev, EDMR) & 0x3))
380af9e3
YS
300 break;
301 mdelay(1);
302 cnt--;
303 }
890c8c18 304 if (cnt == 0)
380af9e3
YS
305 printk(KERN_ERR "Device reset fail\n");
306
307 /* Table Init */
4a55530f
YS
308 sh_eth_write(ndev, 0x0, TDLAR);
309 sh_eth_write(ndev, 0x0, TDFAR);
310 sh_eth_write(ndev, 0x0, TDFXR);
311 sh_eth_write(ndev, 0x0, TDFFR);
312 sh_eth_write(ndev, 0x0, RDLAR);
313 sh_eth_write(ndev, 0x0, RDFAR);
314 sh_eth_write(ndev, 0x0, RDFXR);
315 sh_eth_write(ndev, 0x0, RDFFR);
380af9e3
YS
316}
317
318static void sh_eth_set_duplex(struct net_device *ndev)
319{
320 struct sh_eth_private *mdp = netdev_priv(ndev);
380af9e3
YS
321
322 if (mdp->duplex) /* Full */
4a55530f 323 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
380af9e3 324 else /* Half */
4a55530f 325 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
380af9e3
YS
326}
327
328static void sh_eth_set_rate(struct net_device *ndev)
329{
330 struct sh_eth_private *mdp = netdev_priv(ndev);
380af9e3
YS
331
332 switch (mdp->speed) {
333 case 10: /* 10BASE */
4a55530f 334 sh_eth_write(ndev, GECMR_10, GECMR);
380af9e3
YS
335 break;
336 case 100:/* 100BASE */
4a55530f 337 sh_eth_write(ndev, GECMR_100, GECMR);
380af9e3
YS
338 break;
339 case 1000: /* 1000BASE */
4a55530f 340 sh_eth_write(ndev, GECMR_1000, GECMR);
380af9e3
YS
341 break;
342 default:
343 break;
344 }
345}
346
347/* sh7763 */
348static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
349 .chip_reset = sh_eth_chip_reset,
350 .set_duplex = sh_eth_set_duplex,
351 .set_rate = sh_eth_set_rate,
352
353 .ecsr_value = ECSR_ICD | ECSR_MPD,
354 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
355 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
356
357 .tx_check = EESR_TC1 | EESR_FTC,
358 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
359 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
360 EESR_ECI,
361 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
362 EESR_TFE,
363
364 .apr = 1,
365 .mpr = 1,
366 .tpauser = 1,
367 .bculr = 1,
368 .hw_swap = 1,
380af9e3
YS
369 .no_trimd = 1,
370 .no_ade = 1,
4986b996 371 .tsu = 1,
380af9e3
YS
372};
373
374#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
375#define SH_ETH_RESET_DEFAULT 1
376static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
377 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
378
379 .apr = 1,
380 .mpr = 1,
381 .tpauser = 1,
382 .hw_swap = 1,
383};
384#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
385#define SH_ETH_RESET_DEFAULT 1
386#define SH_ETH_HAS_TSU 1
387static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
388 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
4986b996 389 .tsu = 1,
380af9e3
YS
390};
391#endif
392
393static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
394{
395 if (!cd->ecsr_value)
396 cd->ecsr_value = DEFAULT_ECSR_INIT;
397
398 if (!cd->ecsipr_value)
399 cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
400
401 if (!cd->fcftr_value)
402 cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \
403 DEFAULT_FIFO_F_D_RFD;
404
405 if (!cd->fdr_value)
406 cd->fdr_value = DEFAULT_FDR_INIT;
407
408 if (!cd->rmcr_value)
409 cd->rmcr_value = DEFAULT_RMCR_VALUE;
410
411 if (!cd->tx_check)
412 cd->tx_check = DEFAULT_TX_CHECK;
413
414 if (!cd->eesr_err_check)
415 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
416
417 if (!cd->tx_error_check)
418 cd->tx_error_check = DEFAULT_TX_ERROR_CHECK;
419}
420
421#if defined(SH_ETH_RESET_DEFAULT)
422/* Chip Reset */
423static void sh_eth_reset(struct net_device *ndev)
424{
c5ed5368 425 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR);
380af9e3 426 mdelay(3);
c5ed5368 427 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR);
380af9e3
YS
428}
429#endif
430
431#if defined(CONFIG_CPU_SH4)
432static void sh_eth_set_receive_align(struct sk_buff *skb)
433{
434 int reserve;
435
436 reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
437 if (reserve)
438 skb_reserve(skb, reserve);
439}
440#else
441static void sh_eth_set_receive_align(struct sk_buff *skb)
442{
443 skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
444}
445#endif
446
447
71557a37
YS
448/* CPU <-> EDMAC endian convert */
449static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
450{
451 switch (mdp->edmac_endian) {
452 case EDMAC_LITTLE_ENDIAN:
453 return cpu_to_le32(x);
454 case EDMAC_BIG_ENDIAN:
455 return cpu_to_be32(x);
456 }
457 return x;
458}
459
460static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
461{
462 switch (mdp->edmac_endian) {
463 case EDMAC_LITTLE_ENDIAN:
464 return le32_to_cpu(x);
465 case EDMAC_BIG_ENDIAN:
466 return be32_to_cpu(x);
467 }
468 return x;
469}
470
86a74ff2
NI
471/*
472 * Program the hardware MAC address from dev->dev_addr.
473 */
474static void update_mac_address(struct net_device *ndev)
475{
4a55530f
YS
476 sh_eth_write(ndev,
477 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
478 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
479 sh_eth_write(ndev,
480 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
86a74ff2
NI
481}
482
483/*
484 * Get MAC address from SuperH MAC address register
485 *
486 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
487 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
488 * When you want use this device, you must set MAC address in bootloader.
489 *
490 */
748031f9 491static void read_mac_address(struct net_device *ndev, unsigned char *mac)
86a74ff2 492{
748031f9
MD
493 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
494 memcpy(ndev->dev_addr, mac, 6);
495 } else {
4a55530f
YS
496 ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
497 ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
498 ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
499 ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
500 ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
501 ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
748031f9 502 }
86a74ff2
NI
503}
504
c5ed5368
YS
505static int sh_eth_is_gether(struct sh_eth_private *mdp)
506{
507 if (mdp->reg_offset == sh_eth_offset_gigabit)
508 return 1;
509 else
510 return 0;
511}
512
513static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
514{
515 if (sh_eth_is_gether(mdp))
516 return EDTRR_TRNS_GETHER;
517 else
518 return EDTRR_TRNS_ETHER;
519}
520
86a74ff2 521struct bb_info {
ae70644d 522 void (*set_gate)(void *addr);
86a74ff2 523 struct mdiobb_ctrl ctrl;
ae70644d 524 void *addr;
86a74ff2
NI
525 u32 mmd_msk;/* MMD */
526 u32 mdo_msk;
527 u32 mdi_msk;
528 u32 mdc_msk;
529};
530
531/* PHY bit set */
ae70644d 532static void bb_set(void *addr, u32 msk)
86a74ff2 533{
ae70644d 534 iowrite32(ioread32(addr) | msk, addr);
86a74ff2
NI
535}
536
537/* PHY bit clear */
ae70644d 538static void bb_clr(void *addr, u32 msk)
86a74ff2 539{
ae70644d 540 iowrite32((ioread32(addr) & ~msk), addr);
86a74ff2
NI
541}
542
543/* PHY bit read */
ae70644d 544static int bb_read(void *addr, u32 msk)
86a74ff2 545{
ae70644d 546 return (ioread32(addr) & msk) != 0;
86a74ff2
NI
547}
548
549/* Data I/O pin control */
550static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
551{
552 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
b3017e6a
YS
553
554 if (bitbang->set_gate)
555 bitbang->set_gate(bitbang->addr);
556
86a74ff2
NI
557 if (bit)
558 bb_set(bitbang->addr, bitbang->mmd_msk);
559 else
560 bb_clr(bitbang->addr, bitbang->mmd_msk);
561}
562
563/* Set bit data*/
564static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
565{
566 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
567
b3017e6a
YS
568 if (bitbang->set_gate)
569 bitbang->set_gate(bitbang->addr);
570
86a74ff2
NI
571 if (bit)
572 bb_set(bitbang->addr, bitbang->mdo_msk);
573 else
574 bb_clr(bitbang->addr, bitbang->mdo_msk);
575}
576
577/* Get bit data*/
578static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
579{
580 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
b3017e6a
YS
581
582 if (bitbang->set_gate)
583 bitbang->set_gate(bitbang->addr);
584
86a74ff2
NI
585 return bb_read(bitbang->addr, bitbang->mdi_msk);
586}
587
588/* MDC pin control */
589static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
590{
591 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
592
b3017e6a
YS
593 if (bitbang->set_gate)
594 bitbang->set_gate(bitbang->addr);
595
86a74ff2
NI
596 if (bit)
597 bb_set(bitbang->addr, bitbang->mdc_msk);
598 else
599 bb_clr(bitbang->addr, bitbang->mdc_msk);
600}
601
602/* mdio bus control struct */
603static struct mdiobb_ops bb_ops = {
604 .owner = THIS_MODULE,
605 .set_mdc = sh_mdc_ctrl,
606 .set_mdio_dir = sh_mmd_ctrl,
607 .set_mdio_data = sh_set_mdio,
608 .get_mdio_data = sh_get_mdio,
609};
610
86a74ff2
NI
611/* free skb and descriptor buffer */
612static void sh_eth_ring_free(struct net_device *ndev)
613{
614 struct sh_eth_private *mdp = netdev_priv(ndev);
615 int i;
616
617 /* Free Rx skb ringbuffer */
618 if (mdp->rx_skbuff) {
619 for (i = 0; i < RX_RING_SIZE; i++) {
620 if (mdp->rx_skbuff[i])
621 dev_kfree_skb(mdp->rx_skbuff[i]);
622 }
623 }
624 kfree(mdp->rx_skbuff);
625
626 /* Free Tx skb ringbuffer */
627 if (mdp->tx_skbuff) {
628 for (i = 0; i < TX_RING_SIZE; i++) {
629 if (mdp->tx_skbuff[i])
630 dev_kfree_skb(mdp->tx_skbuff[i]);
631 }
632 }
633 kfree(mdp->tx_skbuff);
634}
635
636/* format skb and descriptor buffer */
637static void sh_eth_ring_format(struct net_device *ndev)
638{
639 struct sh_eth_private *mdp = netdev_priv(ndev);
640 int i;
641 struct sk_buff *skb;
642 struct sh_eth_rxdesc *rxdesc = NULL;
643 struct sh_eth_txdesc *txdesc = NULL;
644 int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE;
645 int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE;
646
647 mdp->cur_rx = mdp->cur_tx = 0;
648 mdp->dirty_rx = mdp->dirty_tx = 0;
649
650 memset(mdp->rx_ring, 0, rx_ringsize);
651
652 /* build Rx ring buffer */
653 for (i = 0; i < RX_RING_SIZE; i++) {
654 /* skb */
655 mdp->rx_skbuff[i] = NULL;
dae2e9f4 656 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
86a74ff2
NI
657 mdp->rx_skbuff[i] = skb;
658 if (skb == NULL)
659 break;
bb7d92e3 660 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
e88aae7b 661 DMA_FROM_DEVICE);
380af9e3
YS
662 sh_eth_set_receive_align(skb);
663
86a74ff2
NI
664 /* RX descriptor */
665 rxdesc = &mdp->rx_ring[i];
0029d64a 666 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
71557a37 667 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
86a74ff2
NI
668
669 /* The size of the buffer is 16 byte boundary. */
0029d64a 670 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
b0ca2a21
NI
671 /* Rx descriptor address set */
672 if (i == 0) {
4a55530f 673 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
c5ed5368
YS
674 if (sh_eth_is_gether(mdp))
675 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
b0ca2a21 676 }
86a74ff2
NI
677 }
678
679 mdp->dirty_rx = (u32) (i - RX_RING_SIZE);
680
681 /* Mark the last entry as wrapping the ring. */
71557a37 682 rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
86a74ff2
NI
683
684 memset(mdp->tx_ring, 0, tx_ringsize);
685
686 /* build Tx ring buffer */
687 for (i = 0; i < TX_RING_SIZE; i++) {
688 mdp->tx_skbuff[i] = NULL;
689 txdesc = &mdp->tx_ring[i];
71557a37 690 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
86a74ff2 691 txdesc->buffer_length = 0;
b0ca2a21 692 if (i == 0) {
71557a37 693 /* Tx descriptor address set */
4a55530f 694 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
c5ed5368
YS
695 if (sh_eth_is_gether(mdp))
696 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
b0ca2a21 697 }
86a74ff2
NI
698 }
699
71557a37 700 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
86a74ff2
NI
701}
702
703/* Get skb and descriptor buffer */
704static int sh_eth_ring_init(struct net_device *ndev)
705{
706 struct sh_eth_private *mdp = netdev_priv(ndev);
707 int rx_ringsize, tx_ringsize, ret = 0;
708
709 /*
710 * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
711 * card needs room to do 8 byte alignment, +2 so we can reserve
712 * the first 2 bytes, and +16 gets room for the status word from the
713 * card.
714 */
715 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
716 (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
503914cf
MD
717 if (mdp->cd->rpadir)
718 mdp->rx_buf_sz += NET_IP_ALIGN;
86a74ff2
NI
719
720 /* Allocate RX and TX skb rings */
721 mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE,
722 GFP_KERNEL);
723 if (!mdp->rx_skbuff) {
380af9e3 724 dev_err(&ndev->dev, "Cannot allocate Rx skb\n");
86a74ff2
NI
725 ret = -ENOMEM;
726 return ret;
727 }
728
729 mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE,
730 GFP_KERNEL);
731 if (!mdp->tx_skbuff) {
380af9e3 732 dev_err(&ndev->dev, "Cannot allocate Tx skb\n");
86a74ff2
NI
733 ret = -ENOMEM;
734 goto skb_ring_free;
735 }
736
737 /* Allocate all Rx descriptors. */
738 rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
739 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
740 GFP_KERNEL);
741
742 if (!mdp->rx_ring) {
380af9e3
YS
743 dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n",
744 rx_ringsize);
86a74ff2
NI
745 ret = -ENOMEM;
746 goto desc_ring_free;
747 }
748
749 mdp->dirty_rx = 0;
750
751 /* Allocate all Tx descriptors. */
752 tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
753 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
754 GFP_KERNEL);
755 if (!mdp->tx_ring) {
380af9e3
YS
756 dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n",
757 tx_ringsize);
86a74ff2
NI
758 ret = -ENOMEM;
759 goto desc_ring_free;
760 }
761 return ret;
762
763desc_ring_free:
764 /* free DMA buffer */
765 dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
766
767skb_ring_free:
768 /* Free Rx and Tx skb ring buffer */
769 sh_eth_ring_free(ndev);
770
771 return ret;
772}
773
774static int sh_eth_dev_init(struct net_device *ndev)
775{
776 int ret = 0;
777 struct sh_eth_private *mdp = netdev_priv(ndev);
86a74ff2
NI
778 u_int32_t rx_int_var, tx_int_var;
779 u32 val;
780
781 /* Soft Reset */
782 sh_eth_reset(ndev);
783
b0ca2a21
NI
784 /* Descriptor format */
785 sh_eth_ring_format(ndev);
380af9e3 786 if (mdp->cd->rpadir)
4a55530f 787 sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
86a74ff2
NI
788
789 /* all sh_eth int mask */
4a55530f 790 sh_eth_write(ndev, 0, EESIPR);
86a74ff2 791
380af9e3
YS
792#if defined(__LITTLE_ENDIAN__)
793 if (mdp->cd->hw_swap)
4a55530f 794 sh_eth_write(ndev, EDMR_EL, EDMR);
380af9e3 795 else
b0ca2a21 796#endif
4a55530f 797 sh_eth_write(ndev, 0, EDMR);
86a74ff2 798
b0ca2a21 799 /* FIFO size set */
4a55530f
YS
800 sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
801 sh_eth_write(ndev, 0, TFTR);
86a74ff2 802
b0ca2a21 803 /* Frame recv control */
4a55530f 804 sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
86a74ff2
NI
805
806 rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
807 tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
4a55530f 808 sh_eth_write(ndev, rx_int_var | tx_int_var, TRSCER);
86a74ff2 809
380af9e3 810 if (mdp->cd->bculr)
4a55530f 811 sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
b0ca2a21 812
4a55530f 813 sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
86a74ff2 814
380af9e3 815 if (!mdp->cd->no_trimd)
4a55530f 816 sh_eth_write(ndev, 0, TRIMD);
86a74ff2 817
b0ca2a21 818 /* Recv frame limit set register */
fdb37a7f
YS
819 sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
820 RFLR);
86a74ff2 821
4a55530f
YS
822 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
823 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
86a74ff2
NI
824
825 /* PAUSE Prohibition */
4a55530f 826 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
86a74ff2
NI
827 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
828
4a55530f 829 sh_eth_write(ndev, val, ECMR);
b0ca2a21 830
380af9e3
YS
831 if (mdp->cd->set_rate)
832 mdp->cd->set_rate(ndev);
833
b0ca2a21 834 /* E-MAC Status Register clear */
4a55530f 835 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
b0ca2a21
NI
836
837 /* E-MAC Interrupt Enable register */
4a55530f 838 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
86a74ff2
NI
839
840 /* Set MAC address */
841 update_mac_address(ndev);
842
843 /* mask reset */
380af9e3 844 if (mdp->cd->apr)
4a55530f 845 sh_eth_write(ndev, APR_AP, APR);
380af9e3 846 if (mdp->cd->mpr)
4a55530f 847 sh_eth_write(ndev, MPR_MP, MPR);
380af9e3 848 if (mdp->cd->tpauser)
4a55530f 849 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
b0ca2a21 850
86a74ff2 851 /* Setting the Rx mode will start the Rx process. */
4a55530f 852 sh_eth_write(ndev, EDRRR_R, EDRRR);
86a74ff2
NI
853
854 netif_start_queue(ndev);
855
856 return ret;
857}
858
859/* free Tx skb function */
860static int sh_eth_txfree(struct net_device *ndev)
861{
862 struct sh_eth_private *mdp = netdev_priv(ndev);
863 struct sh_eth_txdesc *txdesc;
864 int freeNum = 0;
865 int entry = 0;
866
867 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
868 entry = mdp->dirty_tx % TX_RING_SIZE;
869 txdesc = &mdp->tx_ring[entry];
71557a37 870 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
86a74ff2
NI
871 break;
872 /* Free the original skb. */
873 if (mdp->tx_skbuff[entry]) {
31fcb99d
YS
874 dma_unmap_single(&ndev->dev, txdesc->addr,
875 txdesc->buffer_length, DMA_TO_DEVICE);
86a74ff2
NI
876 dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
877 mdp->tx_skbuff[entry] = NULL;
878 freeNum++;
879 }
71557a37 880 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
86a74ff2 881 if (entry >= TX_RING_SIZE - 1)
71557a37 882 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
86a74ff2 883
bb7d92e3
ED
884 ndev->stats.tx_packets++;
885 ndev->stats.tx_bytes += txdesc->buffer_length;
86a74ff2
NI
886 }
887 return freeNum;
888}
889
890/* Packet receive function */
891static int sh_eth_rx(struct net_device *ndev)
892{
893 struct sh_eth_private *mdp = netdev_priv(ndev);
894 struct sh_eth_rxdesc *rxdesc;
895
896 int entry = mdp->cur_rx % RX_RING_SIZE;
897 int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx;
898 struct sk_buff *skb;
899 u16 pkt_len = 0;
380af9e3 900 u32 desc_status;
86a74ff2
NI
901
902 rxdesc = &mdp->rx_ring[entry];
71557a37
YS
903 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
904 desc_status = edmac_to_cpu(mdp, rxdesc->status);
86a74ff2
NI
905 pkt_len = rxdesc->frame_length;
906
907 if (--boguscnt < 0)
908 break;
909
910 if (!(desc_status & RDFEND))
bb7d92e3 911 ndev->stats.rx_length_errors++;
86a74ff2
NI
912
913 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
914 RD_RFS5 | RD_RFS6 | RD_RFS10)) {
bb7d92e3 915 ndev->stats.rx_errors++;
86a74ff2 916 if (desc_status & RD_RFS1)
bb7d92e3 917 ndev->stats.rx_crc_errors++;
86a74ff2 918 if (desc_status & RD_RFS2)
bb7d92e3 919 ndev->stats.rx_frame_errors++;
86a74ff2 920 if (desc_status & RD_RFS3)
bb7d92e3 921 ndev->stats.rx_length_errors++;
86a74ff2 922 if (desc_status & RD_RFS4)
bb7d92e3 923 ndev->stats.rx_length_errors++;
86a74ff2 924 if (desc_status & RD_RFS6)
bb7d92e3 925 ndev->stats.rx_missed_errors++;
86a74ff2 926 if (desc_status & RD_RFS10)
bb7d92e3 927 ndev->stats.rx_over_errors++;
86a74ff2 928 } else {
380af9e3
YS
929 if (!mdp->cd->hw_swap)
930 sh_eth_soft_swap(
931 phys_to_virt(ALIGN(rxdesc->addr, 4)),
932 pkt_len + 2);
86a74ff2
NI
933 skb = mdp->rx_skbuff[entry];
934 mdp->rx_skbuff[entry] = NULL;
503914cf
MD
935 if (mdp->cd->rpadir)
936 skb_reserve(skb, NET_IP_ALIGN);
86a74ff2
NI
937 skb_put(skb, pkt_len);
938 skb->protocol = eth_type_trans(skb, ndev);
939 netif_rx(skb);
bb7d92e3
ED
940 ndev->stats.rx_packets++;
941 ndev->stats.rx_bytes += pkt_len;
86a74ff2 942 }
71557a37 943 rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
86a74ff2 944 entry = (++mdp->cur_rx) % RX_RING_SIZE;
862df497 945 rxdesc = &mdp->rx_ring[entry];
86a74ff2
NI
946 }
947
948 /* Refill the Rx ring buffers. */
949 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
950 entry = mdp->dirty_rx % RX_RING_SIZE;
951 rxdesc = &mdp->rx_ring[entry];
b0ca2a21 952 /* The size of the buffer is 16 byte boundary. */
0029d64a 953 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
b0ca2a21 954
86a74ff2 955 if (mdp->rx_skbuff[entry] == NULL) {
dae2e9f4 956 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
86a74ff2
NI
957 mdp->rx_skbuff[entry] = skb;
958 if (skb == NULL)
959 break; /* Better luck next round. */
bb7d92e3 960 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
e88aae7b 961 DMA_FROM_DEVICE);
380af9e3
YS
962 sh_eth_set_receive_align(skb);
963
bc8acf2c 964 skb_checksum_none_assert(skb);
0029d64a 965 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
86a74ff2 966 }
86a74ff2
NI
967 if (entry >= RX_RING_SIZE - 1)
968 rxdesc->status |=
71557a37 969 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
86a74ff2
NI
970 else
971 rxdesc->status |=
71557a37 972 cpu_to_edmac(mdp, RD_RACT | RD_RFP);
86a74ff2
NI
973 }
974
975 /* Restart Rx engine if stopped. */
976 /* If we don't need to check status, don't. -KDU */
4a55530f
YS
977 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R))
978 sh_eth_write(ndev, EDRRR_R, EDRRR);
86a74ff2
NI
979
980 return 0;
981}
982
4a55530f 983static void sh_eth_rcv_snd_disable(struct net_device *ndev)
dc19e4e5
NI
984{
985 /* disable tx and rx */
4a55530f
YS
986 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
987 ~(ECMR_RE | ECMR_TE), ECMR);
dc19e4e5
NI
988}
989
4a55530f 990static void sh_eth_rcv_snd_enable(struct net_device *ndev)
dc19e4e5
NI
991{
992 /* enable tx and rx */
4a55530f
YS
993 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
994 (ECMR_RE | ECMR_TE), ECMR);
dc19e4e5
NI
995}
996
86a74ff2
NI
997/* error control function */
998static void sh_eth_error(struct net_device *ndev, int intr_status)
999{
1000 struct sh_eth_private *mdp = netdev_priv(ndev);
86a74ff2 1001 u32 felic_stat;
380af9e3
YS
1002 u32 link_stat;
1003 u32 mask;
86a74ff2
NI
1004
1005 if (intr_status & EESR_ECI) {
4a55530f
YS
1006 felic_stat = sh_eth_read(ndev, ECSR);
1007 sh_eth_write(ndev, felic_stat, ECSR); /* clear int */
86a74ff2 1008 if (felic_stat & ECSR_ICD)
bb7d92e3 1009 ndev->stats.tx_carrier_errors++;
86a74ff2
NI
1010 if (felic_stat & ECSR_LCHNG) {
1011 /* Link Changed */
4923576b 1012 if (mdp->cd->no_psr || mdp->no_ether_link) {
380af9e3
YS
1013 if (mdp->link == PHY_DOWN)
1014 link_stat = 0;
1015 else
1016 link_stat = PHY_ST_LINK;
1017 } else {
4a55530f 1018 link_stat = (sh_eth_read(ndev, PSR));
4923576b
YS
1019 if (mdp->ether_link_active_low)
1020 link_stat = ~link_stat;
380af9e3 1021 }
dc19e4e5 1022 if (!(link_stat & PHY_ST_LINK))
4a55530f 1023 sh_eth_rcv_snd_disable(ndev);
dc19e4e5 1024 else {
86a74ff2 1025 /* Link Up */
4a55530f
YS
1026 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
1027 ~DMAC_M_ECI, EESIPR);
86a74ff2 1028 /*clear int */
4a55530f
YS
1029 sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
1030 ECSR);
1031 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
1032 DMAC_M_ECI, EESIPR);
86a74ff2 1033 /* enable tx and rx */
4a55530f 1034 sh_eth_rcv_snd_enable(ndev);
86a74ff2
NI
1035 }
1036 }
1037 }
1038
1039 if (intr_status & EESR_TWB) {
1040 /* Write buck end. unused write back interrupt */
1041 if (intr_status & EESR_TABT) /* Transmit Abort int */
bb7d92e3 1042 ndev->stats.tx_aborted_errors++;
dc19e4e5
NI
1043 if (netif_msg_tx_err(mdp))
1044 dev_err(&ndev->dev, "Transmit Abort\n");
86a74ff2
NI
1045 }
1046
1047 if (intr_status & EESR_RABT) {
1048 /* Receive Abort int */
1049 if (intr_status & EESR_RFRMER) {
1050 /* Receive Frame Overflow int */
bb7d92e3 1051 ndev->stats.rx_frame_errors++;
dc19e4e5
NI
1052 if (netif_msg_rx_err(mdp))
1053 dev_err(&ndev->dev, "Receive Abort\n");
86a74ff2
NI
1054 }
1055 }
380af9e3 1056
dc19e4e5
NI
1057 if (intr_status & EESR_TDE) {
1058 /* Transmit Descriptor Empty int */
bb7d92e3 1059 ndev->stats.tx_fifo_errors++;
dc19e4e5
NI
1060 if (netif_msg_tx_err(mdp))
1061 dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
1062 }
1063
1064 if (intr_status & EESR_TFE) {
1065 /* FIFO under flow */
bb7d92e3 1066 ndev->stats.tx_fifo_errors++;
dc19e4e5
NI
1067 if (netif_msg_tx_err(mdp))
1068 dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
86a74ff2
NI
1069 }
1070
1071 if (intr_status & EESR_RDE) {
1072 /* Receive Descriptor Empty int */
bb7d92e3 1073 ndev->stats.rx_over_errors++;
86a74ff2 1074
4a55530f
YS
1075 if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R)
1076 sh_eth_write(ndev, EDRRR_R, EDRRR);
dc19e4e5
NI
1077 if (netif_msg_rx_err(mdp))
1078 dev_err(&ndev->dev, "Receive Descriptor Empty\n");
86a74ff2 1079 }
dc19e4e5 1080
86a74ff2
NI
1081 if (intr_status & EESR_RFE) {
1082 /* Receive FIFO Overflow int */
bb7d92e3 1083 ndev->stats.rx_fifo_errors++;
dc19e4e5
NI
1084 if (netif_msg_rx_err(mdp))
1085 dev_err(&ndev->dev, "Receive FIFO Overflow\n");
1086 }
1087
1088 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1089 /* Address Error */
bb7d92e3 1090 ndev->stats.tx_fifo_errors++;
dc19e4e5
NI
1091 if (netif_msg_tx_err(mdp))
1092 dev_err(&ndev->dev, "Address Error\n");
86a74ff2 1093 }
380af9e3
YS
1094
1095 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1096 if (mdp->cd->no_ade)
1097 mask &= ~EESR_ADE;
1098 if (intr_status & mask) {
86a74ff2 1099 /* Tx error */
4a55530f 1100 u32 edtrr = sh_eth_read(ndev, EDTRR);
86a74ff2 1101 /* dmesg */
380af9e3
YS
1102 dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
1103 intr_status, mdp->cur_tx);
1104 dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
86a74ff2
NI
1105 mdp->dirty_tx, (u32) ndev->state, edtrr);
1106 /* dirty buffer free */
1107 sh_eth_txfree(ndev);
1108
1109 /* SH7712 BUG */
c5ed5368 1110 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
86a74ff2 1111 /* tx dma start */
c5ed5368 1112 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
86a74ff2
NI
1113 }
1114 /* wakeup */
1115 netif_wake_queue(ndev);
1116 }
1117}
1118
1119static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1120{
1121 struct net_device *ndev = netdev;
1122 struct sh_eth_private *mdp = netdev_priv(ndev);
380af9e3 1123 struct sh_eth_cpu_data *cd = mdp->cd;
0e0fde3c 1124 irqreturn_t ret = IRQ_NONE;
4a55530f 1125 u32 intr_status = 0;
86a74ff2 1126
86a74ff2
NI
1127 spin_lock(&mdp->lock);
1128
b0ca2a21 1129 /* Get interrpt stat */
4a55530f 1130 intr_status = sh_eth_read(ndev, EESR);
86a74ff2 1131 /* Clear interrupt */
0e0fde3c
NI
1132 if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
1133 EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
380af9e3 1134 cd->tx_check | cd->eesr_err_check)) {
4a55530f 1135 sh_eth_write(ndev, intr_status, EESR);
0e0fde3c
NI
1136 ret = IRQ_HANDLED;
1137 } else
1138 goto other_irq;
86a74ff2 1139
b0ca2a21
NI
1140 if (intr_status & (EESR_FRC | /* Frame recv*/
1141 EESR_RMAF | /* Multi cast address recv*/
1142 EESR_RRF | /* Bit frame recv */
1143 EESR_RTLF | /* Long frame recv*/
1144 EESR_RTSF | /* short frame recv */
1145 EESR_PRE | /* PHY-LSI recv error */
1146 EESR_CERF)){ /* recv frame CRC error */
86a74ff2 1147 sh_eth_rx(ndev);
b0ca2a21 1148 }
86a74ff2 1149
b0ca2a21 1150 /* Tx Check */
380af9e3 1151 if (intr_status & cd->tx_check) {
86a74ff2
NI
1152 sh_eth_txfree(ndev);
1153 netif_wake_queue(ndev);
1154 }
1155
380af9e3 1156 if (intr_status & cd->eesr_err_check)
86a74ff2
NI
1157 sh_eth_error(ndev, intr_status);
1158
0e0fde3c 1159other_irq:
86a74ff2
NI
1160 spin_unlock(&mdp->lock);
1161
0e0fde3c 1162 return ret;
86a74ff2
NI
1163}
1164
1165static void sh_eth_timer(unsigned long data)
1166{
1167 struct net_device *ndev = (struct net_device *)data;
1168 struct sh_eth_private *mdp = netdev_priv(ndev);
1169
1170 mod_timer(&mdp->timer, jiffies + (10 * HZ));
1171}
1172
1173/* PHY state control function */
1174static void sh_eth_adjust_link(struct net_device *ndev)
1175{
1176 struct sh_eth_private *mdp = netdev_priv(ndev);
1177 struct phy_device *phydev = mdp->phydev;
86a74ff2
NI
1178 int new_state = 0;
1179
1180 if (phydev->link != PHY_DOWN) {
1181 if (phydev->duplex != mdp->duplex) {
1182 new_state = 1;
1183 mdp->duplex = phydev->duplex;
380af9e3
YS
1184 if (mdp->cd->set_duplex)
1185 mdp->cd->set_duplex(ndev);
86a74ff2
NI
1186 }
1187
1188 if (phydev->speed != mdp->speed) {
1189 new_state = 1;
1190 mdp->speed = phydev->speed;
380af9e3
YS
1191 if (mdp->cd->set_rate)
1192 mdp->cd->set_rate(ndev);
86a74ff2
NI
1193 }
1194 if (mdp->link == PHY_DOWN) {
91a56152
YS
1195 sh_eth_write(ndev,
1196 (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
86a74ff2
NI
1197 new_state = 1;
1198 mdp->link = phydev->link;
86a74ff2
NI
1199 }
1200 } else if (mdp->link) {
1201 new_state = 1;
1202 mdp->link = PHY_DOWN;
1203 mdp->speed = 0;
1204 mdp->duplex = -1;
86a74ff2
NI
1205 }
1206
dc19e4e5 1207 if (new_state && netif_msg_link(mdp))
86a74ff2
NI
1208 phy_print_status(phydev);
1209}
1210
1211/* PHY init function */
1212static int sh_eth_phy_init(struct net_device *ndev)
1213{
1214 struct sh_eth_private *mdp = netdev_priv(ndev);
0a372eb9 1215 char phy_id[MII_BUS_ID_SIZE + 3];
86a74ff2
NI
1216 struct phy_device *phydev = NULL;
1217
fb28ad35 1218 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
86a74ff2
NI
1219 mdp->mii_bus->id , mdp->phy_id);
1220
1221 mdp->link = PHY_DOWN;
1222 mdp->speed = 0;
1223 mdp->duplex = -1;
1224
1225 /* Try connect to PHY */
c061b18d 1226 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
e47c9052 1227 0, mdp->phy_interface);
86a74ff2
NI
1228 if (IS_ERR(phydev)) {
1229 dev_err(&ndev->dev, "phy_connect failed\n");
1230 return PTR_ERR(phydev);
1231 }
380af9e3 1232
86a74ff2 1233 dev_info(&ndev->dev, "attached phy %i to driver %s\n",
380af9e3 1234 phydev->addr, phydev->drv->name);
86a74ff2
NI
1235
1236 mdp->phydev = phydev;
1237
1238 return 0;
1239}
1240
1241/* PHY control start function */
1242static int sh_eth_phy_start(struct net_device *ndev)
1243{
1244 struct sh_eth_private *mdp = netdev_priv(ndev);
1245 int ret;
1246
1247 ret = sh_eth_phy_init(ndev);
1248 if (ret)
1249 return ret;
1250
1251 /* reset phy - this also wakes it from PDOWN */
1252 phy_write(mdp->phydev, MII_BMCR, BMCR_RESET);
1253 phy_start(mdp->phydev);
1254
1255 return 0;
1256}
1257
dc19e4e5
NI
1258static int sh_eth_get_settings(struct net_device *ndev,
1259 struct ethtool_cmd *ecmd)
1260{
1261 struct sh_eth_private *mdp = netdev_priv(ndev);
1262 unsigned long flags;
1263 int ret;
1264
1265 spin_lock_irqsave(&mdp->lock, flags);
1266 ret = phy_ethtool_gset(mdp->phydev, ecmd);
1267 spin_unlock_irqrestore(&mdp->lock, flags);
1268
1269 return ret;
1270}
1271
1272static int sh_eth_set_settings(struct net_device *ndev,
1273 struct ethtool_cmd *ecmd)
1274{
1275 struct sh_eth_private *mdp = netdev_priv(ndev);
1276 unsigned long flags;
1277 int ret;
dc19e4e5
NI
1278
1279 spin_lock_irqsave(&mdp->lock, flags);
1280
1281 /* disable tx and rx */
4a55530f 1282 sh_eth_rcv_snd_disable(ndev);
dc19e4e5
NI
1283
1284 ret = phy_ethtool_sset(mdp->phydev, ecmd);
1285 if (ret)
1286 goto error_exit;
1287
1288 if (ecmd->duplex == DUPLEX_FULL)
1289 mdp->duplex = 1;
1290 else
1291 mdp->duplex = 0;
1292
1293 if (mdp->cd->set_duplex)
1294 mdp->cd->set_duplex(ndev);
1295
1296error_exit:
1297 mdelay(1);
1298
1299 /* enable tx and rx */
4a55530f 1300 sh_eth_rcv_snd_enable(ndev);
dc19e4e5
NI
1301
1302 spin_unlock_irqrestore(&mdp->lock, flags);
1303
1304 return ret;
1305}
1306
1307static int sh_eth_nway_reset(struct net_device *ndev)
1308{
1309 struct sh_eth_private *mdp = netdev_priv(ndev);
1310 unsigned long flags;
1311 int ret;
1312
1313 spin_lock_irqsave(&mdp->lock, flags);
1314 ret = phy_start_aneg(mdp->phydev);
1315 spin_unlock_irqrestore(&mdp->lock, flags);
1316
1317 return ret;
1318}
1319
1320static u32 sh_eth_get_msglevel(struct net_device *ndev)
1321{
1322 struct sh_eth_private *mdp = netdev_priv(ndev);
1323 return mdp->msg_enable;
1324}
1325
1326static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
1327{
1328 struct sh_eth_private *mdp = netdev_priv(ndev);
1329 mdp->msg_enable = value;
1330}
1331
1332static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
1333 "rx_current", "tx_current",
1334 "rx_dirty", "tx_dirty",
1335};
1336#define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats)
1337
1338static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
1339{
1340 switch (sset) {
1341 case ETH_SS_STATS:
1342 return SH_ETH_STATS_LEN;
1343 default:
1344 return -EOPNOTSUPP;
1345 }
1346}
1347
1348static void sh_eth_get_ethtool_stats(struct net_device *ndev,
1349 struct ethtool_stats *stats, u64 *data)
1350{
1351 struct sh_eth_private *mdp = netdev_priv(ndev);
1352 int i = 0;
1353
1354 /* device-specific stats */
1355 data[i++] = mdp->cur_rx;
1356 data[i++] = mdp->cur_tx;
1357 data[i++] = mdp->dirty_rx;
1358 data[i++] = mdp->dirty_tx;
1359}
1360
1361static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1362{
1363 switch (stringset) {
1364 case ETH_SS_STATS:
1365 memcpy(data, *sh_eth_gstrings_stats,
1366 sizeof(sh_eth_gstrings_stats));
1367 break;
1368 }
1369}
1370
9b07be4b 1371static const struct ethtool_ops sh_eth_ethtool_ops = {
dc19e4e5
NI
1372 .get_settings = sh_eth_get_settings,
1373 .set_settings = sh_eth_set_settings,
9b07be4b 1374 .nway_reset = sh_eth_nway_reset,
dc19e4e5
NI
1375 .get_msglevel = sh_eth_get_msglevel,
1376 .set_msglevel = sh_eth_set_msglevel,
9b07be4b 1377 .get_link = ethtool_op_get_link,
dc19e4e5
NI
1378 .get_strings = sh_eth_get_strings,
1379 .get_ethtool_stats = sh_eth_get_ethtool_stats,
1380 .get_sset_count = sh_eth_get_sset_count,
1381};
1382
86a74ff2
NI
1383/* network device open function */
1384static int sh_eth_open(struct net_device *ndev)
1385{
1386 int ret = 0;
1387 struct sh_eth_private *mdp = netdev_priv(ndev);
1388
bcd5149d
MD
1389 pm_runtime_get_sync(&mdp->pdev->dev);
1390
a0607fd3 1391 ret = request_irq(ndev->irq, sh_eth_interrupt,
f29a3d04 1392#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \
dc19e4e5
NI
1393 defined(CONFIG_CPU_SUBTYPE_SH7764) || \
1394 defined(CONFIG_CPU_SUBTYPE_SH7757)
0e0fde3c
NI
1395 IRQF_SHARED,
1396#else
1397 0,
1398#endif
1399 ndev->name, ndev);
86a74ff2 1400 if (ret) {
380af9e3 1401 dev_err(&ndev->dev, "Can not assign IRQ number\n");
86a74ff2
NI
1402 return ret;
1403 }
1404
1405 /* Descriptor set */
1406 ret = sh_eth_ring_init(ndev);
1407 if (ret)
1408 goto out_free_irq;
1409
1410 /* device init */
1411 ret = sh_eth_dev_init(ndev);
1412 if (ret)
1413 goto out_free_irq;
1414
1415 /* PHY control start*/
1416 ret = sh_eth_phy_start(ndev);
1417 if (ret)
1418 goto out_free_irq;
1419
1420 /* Set the timer to check for link beat. */
1421 init_timer(&mdp->timer);
1422 mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
b0ca2a21 1423 setup_timer(&mdp->timer, sh_eth_timer, (unsigned long)ndev);
86a74ff2
NI
1424
1425 return ret;
1426
1427out_free_irq:
1428 free_irq(ndev->irq, ndev);
bcd5149d 1429 pm_runtime_put_sync(&mdp->pdev->dev);
86a74ff2
NI
1430 return ret;
1431}
1432
1433/* Timeout function */
1434static void sh_eth_tx_timeout(struct net_device *ndev)
1435{
1436 struct sh_eth_private *mdp = netdev_priv(ndev);
86a74ff2
NI
1437 struct sh_eth_rxdesc *rxdesc;
1438 int i;
1439
1440 netif_stop_queue(ndev);
1441
dc19e4e5
NI
1442 if (netif_msg_timer(mdp))
1443 dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
4a55530f 1444 " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR));
86a74ff2
NI
1445
1446 /* tx_errors count up */
bb7d92e3 1447 ndev->stats.tx_errors++;
86a74ff2
NI
1448
1449 /* timer off */
1450 del_timer_sync(&mdp->timer);
1451
1452 /* Free all the skbuffs in the Rx queue. */
1453 for (i = 0; i < RX_RING_SIZE; i++) {
1454 rxdesc = &mdp->rx_ring[i];
1455 rxdesc->status = 0;
1456 rxdesc->addr = 0xBADF00D0;
1457 if (mdp->rx_skbuff[i])
1458 dev_kfree_skb(mdp->rx_skbuff[i]);
1459 mdp->rx_skbuff[i] = NULL;
1460 }
1461 for (i = 0; i < TX_RING_SIZE; i++) {
1462 if (mdp->tx_skbuff[i])
1463 dev_kfree_skb(mdp->tx_skbuff[i]);
1464 mdp->tx_skbuff[i] = NULL;
1465 }
1466
1467 /* device init */
1468 sh_eth_dev_init(ndev);
1469
1470 /* timer on */
1471 mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
1472 add_timer(&mdp->timer);
1473}
1474
1475/* Packet transmit function */
1476static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1477{
1478 struct sh_eth_private *mdp = netdev_priv(ndev);
1479 struct sh_eth_txdesc *txdesc;
1480 u32 entry;
fb5e2f9b 1481 unsigned long flags;
86a74ff2
NI
1482
1483 spin_lock_irqsave(&mdp->lock, flags);
1484 if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
1485 if (!sh_eth_txfree(ndev)) {
dc19e4e5
NI
1486 if (netif_msg_tx_queued(mdp))
1487 dev_warn(&ndev->dev, "TxFD exhausted.\n");
86a74ff2
NI
1488 netif_stop_queue(ndev);
1489 spin_unlock_irqrestore(&mdp->lock, flags);
5b548140 1490 return NETDEV_TX_BUSY;
86a74ff2
NI
1491 }
1492 }
1493 spin_unlock_irqrestore(&mdp->lock, flags);
1494
1495 entry = mdp->cur_tx % TX_RING_SIZE;
1496 mdp->tx_skbuff[entry] = skb;
1497 txdesc = &mdp->tx_ring[entry];
86a74ff2 1498 /* soft swap. */
380af9e3
YS
1499 if (!mdp->cd->hw_swap)
1500 sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
1501 skb->len + 2);
31fcb99d
YS
1502 txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
1503 DMA_TO_DEVICE);
86a74ff2
NI
1504 if (skb->len < ETHERSMALL)
1505 txdesc->buffer_length = ETHERSMALL;
1506 else
1507 txdesc->buffer_length = skb->len;
1508
1509 if (entry >= TX_RING_SIZE - 1)
71557a37 1510 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
86a74ff2 1511 else
71557a37 1512 txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
86a74ff2
NI
1513
1514 mdp->cur_tx++;
1515
c5ed5368
YS
1516 if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
1517 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
b0ca2a21 1518
6ed10654 1519 return NETDEV_TX_OK;
86a74ff2
NI
1520}
1521
1522/* device close function */
1523static int sh_eth_close(struct net_device *ndev)
1524{
1525 struct sh_eth_private *mdp = netdev_priv(ndev);
86a74ff2
NI
1526 int ringsize;
1527
1528 netif_stop_queue(ndev);
1529
1530 /* Disable interrupts by clearing the interrupt mask. */
4a55530f 1531 sh_eth_write(ndev, 0x0000, EESIPR);
86a74ff2
NI
1532
1533 /* Stop the chip's Tx and Rx processes. */
4a55530f
YS
1534 sh_eth_write(ndev, 0, EDTRR);
1535 sh_eth_write(ndev, 0, EDRRR);
86a74ff2
NI
1536
1537 /* PHY Disconnect */
1538 if (mdp->phydev) {
1539 phy_stop(mdp->phydev);
1540 phy_disconnect(mdp->phydev);
1541 }
1542
1543 free_irq(ndev->irq, ndev);
1544
1545 del_timer_sync(&mdp->timer);
1546
1547 /* Free all the skbuffs in the Rx queue. */
1548 sh_eth_ring_free(ndev);
1549
1550 /* free DMA buffer */
1551 ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
1552 dma_free_coherent(NULL, ringsize, mdp->rx_ring, mdp->rx_desc_dma);
1553
1554 /* free DMA buffer */
1555 ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
1556 dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma);
1557
bcd5149d
MD
1558 pm_runtime_put_sync(&mdp->pdev->dev);
1559
86a74ff2
NI
1560 return 0;
1561}
1562
1563static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
1564{
1565 struct sh_eth_private *mdp = netdev_priv(ndev);
86a74ff2 1566
bcd5149d
MD
1567 pm_runtime_get_sync(&mdp->pdev->dev);
1568
bb7d92e3 1569 ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
4a55530f 1570 sh_eth_write(ndev, 0, TROCR); /* (write clear) */
bb7d92e3 1571 ndev->stats.collisions += sh_eth_read(ndev, CDCR);
4a55530f 1572 sh_eth_write(ndev, 0, CDCR); /* (write clear) */
bb7d92e3 1573 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
4a55530f 1574 sh_eth_write(ndev, 0, LCCR); /* (write clear) */
c5ed5368 1575 if (sh_eth_is_gether(mdp)) {
bb7d92e3 1576 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
c5ed5368 1577 sh_eth_write(ndev, 0, CERCR); /* (write clear) */
bb7d92e3 1578 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
c5ed5368
YS
1579 sh_eth_write(ndev, 0, CEECR); /* (write clear) */
1580 } else {
bb7d92e3 1581 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
c5ed5368
YS
1582 sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
1583 }
bcd5149d
MD
1584 pm_runtime_put_sync(&mdp->pdev->dev);
1585
bb7d92e3 1586 return &ndev->stats;
86a74ff2
NI
1587}
1588
bb7d92e3 1589/* ioctl to device function */
86a74ff2
NI
1590static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
1591 int cmd)
1592{
1593 struct sh_eth_private *mdp = netdev_priv(ndev);
1594 struct phy_device *phydev = mdp->phydev;
1595
1596 if (!netif_running(ndev))
1597 return -EINVAL;
1598
1599 if (!phydev)
1600 return -ENODEV;
1601
28b04113 1602 return phy_mii_ioctl(phydev, rq, cmd);
86a74ff2
NI
1603}
1604
380af9e3 1605#if defined(SH_ETH_HAS_TSU)
86a74ff2
NI
1606/* Multicast reception directions set */
1607static void sh_eth_set_multicast_list(struct net_device *ndev)
1608{
86a74ff2
NI
1609 if (ndev->flags & IFF_PROMISC) {
1610 /* Set promiscuous. */
4a55530f
YS
1611 sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_MCT) |
1612 ECMR_PRM, ECMR);
86a74ff2
NI
1613 } else {
1614 /* Normal, unicast/broadcast-only mode. */
4a55530f
YS
1615 sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) |
1616 ECMR_MCT, ECMR);
86a74ff2
NI
1617 }
1618}
4986b996 1619#endif /* SH_ETH_HAS_TSU */
86a74ff2
NI
1620
1621/* SuperH's TSU register init function */
4a55530f 1622static void sh_eth_tsu_init(struct sh_eth_private *mdp)
86a74ff2 1623{
4a55530f
YS
1624 sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */
1625 sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */
1626 sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */
1627 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
1628 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
1629 sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
1630 sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
1631 sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
1632 sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
1633 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
c5ed5368
YS
1634 if (sh_eth_is_gether(mdp)) {
1635 sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */
1636 sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */
1637 } else {
1638 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */
1639 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */
1640 }
4a55530f
YS
1641 sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */
1642 sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */
1643 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
1644 sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */
1645 sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */
1646 sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */
1647 sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */
86a74ff2
NI
1648}
1649
1650/* MDIO bus release function */
1651static int sh_mdio_release(struct net_device *ndev)
1652{
1653 struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
1654
1655 /* unregister mdio bus */
1656 mdiobus_unregister(bus);
1657
1658 /* remove mdio bus info from net_device */
1659 dev_set_drvdata(&ndev->dev, NULL);
1660
0f0b405c
DK
1661 /* free interrupts memory */
1662 kfree(bus->irq);
1663
86a74ff2
NI
1664 /* free bitbang info */
1665 free_mdio_bitbang(bus);
1666
1667 return 0;
1668}
1669
1670/* MDIO bus init function */
b3017e6a
YS
1671static int sh_mdio_init(struct net_device *ndev, int id,
1672 struct sh_eth_plat_data *pd)
86a74ff2
NI
1673{
1674 int ret, i;
1675 struct bb_info *bitbang;
1676 struct sh_eth_private *mdp = netdev_priv(ndev);
1677
1678 /* create bit control struct for PHY */
1679 bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
1680 if (!bitbang) {
1681 ret = -ENOMEM;
1682 goto out;
1683 }
1684
1685 /* bitbang init */
ae70644d 1686 bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
b3017e6a 1687 bitbang->set_gate = pd->set_mdio_gate;
86a74ff2
NI
1688 bitbang->mdi_msk = 0x08;
1689 bitbang->mdo_msk = 0x04;
1690 bitbang->mmd_msk = 0x02;/* MMD */
1691 bitbang->mdc_msk = 0x01;
1692 bitbang->ctrl.ops = &bb_ops;
1693
c2e07b3a 1694 /* MII controller setting */
86a74ff2
NI
1695 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
1696 if (!mdp->mii_bus) {
1697 ret = -ENOMEM;
1698 goto out_free_bitbang;
1699 }
1700
1701 /* Hook up MII support for ethtool */
1702 mdp->mii_bus->name = "sh_mii";
18ee49dd 1703 mdp->mii_bus->parent = &ndev->dev;
5278fb54 1704 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
34aa6f14 1705 mdp->pdev->name, id);
86a74ff2
NI
1706
1707 /* PHY IRQ */
1708 mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1709 if (!mdp->mii_bus->irq) {
1710 ret = -ENOMEM;
1711 goto out_free_bus;
1712 }
1713
1714 for (i = 0; i < PHY_MAX_ADDR; i++)
1715 mdp->mii_bus->irq[i] = PHY_POLL;
1716
1717 /* regist mdio bus */
1718 ret = mdiobus_register(mdp->mii_bus);
1719 if (ret)
1720 goto out_free_irq;
1721
1722 dev_set_drvdata(&ndev->dev, mdp->mii_bus);
1723
1724 return 0;
1725
1726out_free_irq:
1727 kfree(mdp->mii_bus->irq);
1728
1729out_free_bus:
298cf9be 1730 free_mdio_bitbang(mdp->mii_bus);
86a74ff2
NI
1731
1732out_free_bitbang:
1733 kfree(bitbang);
1734
1735out:
1736 return ret;
1737}
1738
4a55530f
YS
1739static const u16 *sh_eth_get_register_offset(int register_type)
1740{
1741 const u16 *reg_offset = NULL;
1742
1743 switch (register_type) {
1744 case SH_ETH_REG_GIGABIT:
1745 reg_offset = sh_eth_offset_gigabit;
1746 break;
1747 case SH_ETH_REG_FAST_SH4:
1748 reg_offset = sh_eth_offset_fast_sh4;
1749 break;
1750 case SH_ETH_REG_FAST_SH3_SH2:
1751 reg_offset = sh_eth_offset_fast_sh3_sh2;
1752 break;
1753 default:
1754 printk(KERN_ERR "Unknown register type (%d)\n", register_type);
1755 break;
1756 }
1757
1758 return reg_offset;
1759}
1760
ebf84eaa
AB
1761static const struct net_device_ops sh_eth_netdev_ops = {
1762 .ndo_open = sh_eth_open,
1763 .ndo_stop = sh_eth_close,
1764 .ndo_start_xmit = sh_eth_start_xmit,
1765 .ndo_get_stats = sh_eth_get_stats,
380af9e3 1766#if defined(SH_ETH_HAS_TSU)
afc4b13d 1767 .ndo_set_rx_mode = sh_eth_set_multicast_list,
380af9e3 1768#endif
ebf84eaa
AB
1769 .ndo_tx_timeout = sh_eth_tx_timeout,
1770 .ndo_do_ioctl = sh_eth_do_ioctl,
1771 .ndo_validate_addr = eth_validate_addr,
1772 .ndo_set_mac_address = eth_mac_addr,
1773 .ndo_change_mtu = eth_change_mtu,
1774};
1775
86a74ff2
NI
1776static int sh_eth_drv_probe(struct platform_device *pdev)
1777{
9c38657c 1778 int ret, devno = 0;
86a74ff2
NI
1779 struct resource *res;
1780 struct net_device *ndev = NULL;
ec0d7551 1781 struct sh_eth_private *mdp = NULL;
71557a37 1782 struct sh_eth_plat_data *pd;
86a74ff2
NI
1783
1784 /* get base addr */
1785 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1786 if (unlikely(res == NULL)) {
1787 dev_err(&pdev->dev, "invalid resource\n");
1788 ret = -EINVAL;
1789 goto out;
1790 }
1791
1792 ndev = alloc_etherdev(sizeof(struct sh_eth_private));
1793 if (!ndev) {
86a74ff2
NI
1794 ret = -ENOMEM;
1795 goto out;
1796 }
1797
1798 /* The sh Ether-specific entries in the device structure. */
1799 ndev->base_addr = res->start;
1800 devno = pdev->id;
1801 if (devno < 0)
1802 devno = 0;
1803
1804 ndev->dma = -1;
cc3c080d 1805 ret = platform_get_irq(pdev, 0);
1806 if (ret < 0) {
86a74ff2
NI
1807 ret = -ENODEV;
1808 goto out_release;
1809 }
cc3c080d 1810 ndev->irq = ret;
86a74ff2
NI
1811
1812 SET_NETDEV_DEV(ndev, &pdev->dev);
1813
1814 /* Fill in the fields of the device structure with ethernet values. */
1815 ether_setup(ndev);
1816
1817 mdp = netdev_priv(ndev);
ae70644d
YS
1818 mdp->addr = ioremap(res->start, resource_size(res));
1819 if (mdp->addr == NULL) {
1820 ret = -ENOMEM;
1821 dev_err(&pdev->dev, "ioremap failed.\n");
1822 goto out_release;
1823 }
1824
86a74ff2 1825 spin_lock_init(&mdp->lock);
bcd5149d
MD
1826 mdp->pdev = pdev;
1827 pm_runtime_enable(&pdev->dev);
1828 pm_runtime_resume(&pdev->dev);
86a74ff2 1829
71557a37 1830 pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);
86a74ff2 1831 /* get PHY ID */
71557a37 1832 mdp->phy_id = pd->phy;
e47c9052 1833 mdp->phy_interface = pd->phy_interface;
71557a37
YS
1834 /* EDMAC endian */
1835 mdp->edmac_endian = pd->edmac_endian;
4923576b
YS
1836 mdp->no_ether_link = pd->no_ether_link;
1837 mdp->ether_link_active_low = pd->ether_link_active_low;
4a55530f 1838 mdp->reg_offset = sh_eth_get_register_offset(pd->register_type);
86a74ff2 1839
380af9e3 1840 /* set cpu data */
8fcd4961
YS
1841#if defined(SH_ETH_HAS_BOTH_MODULES)
1842 mdp->cd = sh_eth_get_cpu_data(mdp);
1843#else
380af9e3 1844 mdp->cd = &sh_eth_my_cpu_data;
8fcd4961 1845#endif
380af9e3
YS
1846 sh_eth_set_default_cpu_data(mdp->cd);
1847
86a74ff2 1848 /* set function */
ebf84eaa 1849 ndev->netdev_ops = &sh_eth_netdev_ops;
dc19e4e5 1850 SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
86a74ff2
NI
1851 ndev->watchdog_timeo = TX_TIMEOUT;
1852
dc19e4e5
NI
1853 /* debug message level */
1854 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
86a74ff2
NI
1855 mdp->post_rx = POST_RX >> (devno << 1);
1856 mdp->post_fw = POST_FW >> (devno << 1);
1857
1858 /* read and set MAC address */
748031f9 1859 read_mac_address(ndev, pd->mac_addr);
86a74ff2
NI
1860
1861 /* First device only init */
1862 if (!devno) {
4986b996
YS
1863 if (mdp->cd->tsu) {
1864 struct resource *rtsu;
1865 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1866 if (!rtsu) {
1867 dev_err(&pdev->dev, "Not found TSU resource\n");
1868 goto out_release;
1869 }
1870 mdp->tsu_addr = ioremap(rtsu->start,
1871 resource_size(rtsu));
1872 }
380af9e3
YS
1873 if (mdp->cd->chip_reset)
1874 mdp->cd->chip_reset(ndev);
86a74ff2 1875
4986b996
YS
1876 if (mdp->cd->tsu) {
1877 /* TSU init (Init only)*/
1878 sh_eth_tsu_init(mdp);
1879 }
86a74ff2
NI
1880 }
1881
1882 /* network device register */
1883 ret = register_netdev(ndev);
1884 if (ret)
1885 goto out_release;
1886
1887 /* mdio bus init */
b3017e6a 1888 ret = sh_mdio_init(ndev, pdev->id, pd);
86a74ff2
NI
1889 if (ret)
1890 goto out_unregister;
1891
25985edc 1892 /* print device information */
6cd9b49d
HS
1893 pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
1894 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
86a74ff2
NI
1895
1896 platform_set_drvdata(pdev, ndev);
1897
1898 return ret;
1899
1900out_unregister:
1901 unregister_netdev(ndev);
1902
1903out_release:
1904 /* net_dev free */
ae70644d
YS
1905 if (mdp && mdp->addr)
1906 iounmap(mdp->addr);
ec0d7551 1907 if (mdp && mdp->tsu_addr)
4986b996 1908 iounmap(mdp->tsu_addr);
86a74ff2
NI
1909 if (ndev)
1910 free_netdev(ndev);
1911
1912out:
1913 return ret;
1914}
1915
1916static int sh_eth_drv_remove(struct platform_device *pdev)
1917{
1918 struct net_device *ndev = platform_get_drvdata(pdev);
4986b996 1919 struct sh_eth_private *mdp = netdev_priv(ndev);
86a74ff2 1920
4986b996 1921 iounmap(mdp->tsu_addr);
86a74ff2
NI
1922 sh_mdio_release(ndev);
1923 unregister_netdev(ndev);
bcd5149d 1924 pm_runtime_disable(&pdev->dev);
ae70644d 1925 iounmap(mdp->addr);
86a74ff2
NI
1926 free_netdev(ndev);
1927 platform_set_drvdata(pdev, NULL);
1928
1929 return 0;
1930}
1931
bcd5149d
MD
1932static int sh_eth_runtime_nop(struct device *dev)
1933{
1934 /*
1935 * Runtime PM callback shared between ->runtime_suspend()
1936 * and ->runtime_resume(). Simply returns success.
1937 *
1938 * This driver re-initializes all registers after
1939 * pm_runtime_get_sync() anyway so there is no need
1940 * to save and restore registers here.
1941 */
1942 return 0;
1943}
1944
1945static struct dev_pm_ops sh_eth_dev_pm_ops = {
1946 .runtime_suspend = sh_eth_runtime_nop,
1947 .runtime_resume = sh_eth_runtime_nop,
1948};
1949
86a74ff2
NI
1950static struct platform_driver sh_eth_driver = {
1951 .probe = sh_eth_drv_probe,
1952 .remove = sh_eth_drv_remove,
1953 .driver = {
1954 .name = CARDNAME,
bcd5149d 1955 .pm = &sh_eth_dev_pm_ops,
86a74ff2
NI
1956 },
1957};
1958
db62f684 1959module_platform_driver(sh_eth_driver);
86a74ff2
NI
1960
1961MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
1962MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
1963MODULE_LICENSE("GPL v2");
This page took 0.597626 seconds and 5 git commands to generate.