2 * New driver for Marvell Yukon chipset and SysKonnect Gigabit
3 * Ethernet adapters. Based on earlier sk98lin, e100 and
4 * FreeBSD if_sk drivers.
6 * This driver intentionally does not support all the features
7 * of the original driver such as link fail-over and link management because
8 * those should be done at higher levels.
10 * Copyright (C) 2004, 2005 Stephen Hemminger <shemminger@osdl.org>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/moduleparam.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/ethtool.h>
35 #include <linux/pci.h>
36 #include <linux/if_vlan.h>
38 #include <linux/delay.h>
39 #include <linux/crc32.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/debugfs.h>
42 #include <linux/sched.h>
43 #include <linux/seq_file.h>
44 #include <linux/mii.h>
49 #define DRV_NAME "skge"
50 #define DRV_VERSION "1.13"
52 #define DEFAULT_TX_RING_SIZE 128
53 #define DEFAULT_RX_RING_SIZE 512
54 #define MAX_TX_RING_SIZE 1024
55 #define TX_LOW_WATER (MAX_SKB_FRAGS + 1)
56 #define MAX_RX_RING_SIZE 4096
57 #define RX_COPY_THRESHOLD 128
58 #define RX_BUF_SIZE 1536
59 #define PHY_RETRIES 1000
60 #define ETH_JUMBO_MTU 9000
61 #define TX_WATCHDOG (5 * HZ)
62 #define NAPI_WEIGHT 64
66 #define SKGE_EEPROM_MAGIC 0x9933aabb
69 MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver");
70 MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>");
71 MODULE_LICENSE("GPL");
72 MODULE_VERSION(DRV_VERSION
);
74 static const u32 default_msg
= (NETIF_MSG_DRV
| NETIF_MSG_PROBE
|
75 NETIF_MSG_LINK
| NETIF_MSG_IFUP
|
78 static int debug
= -1; /* defaults above */
79 module_param(debug
, int, 0);
80 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
82 static DEFINE_PCI_DEVICE_TABLE(skge_id_table
) = {
83 { PCI_DEVICE(PCI_VENDOR_ID_3COM
, PCI_DEVICE_ID_3COM_3C940
) },
84 { PCI_DEVICE(PCI_VENDOR_ID_3COM
, PCI_DEVICE_ID_3COM_3C940B
) },
85 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_GE
) },
86 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_YU
) },
87 { PCI_DEVICE(PCI_VENDOR_ID_DLINK
, PCI_DEVICE_ID_DLINK_DGE510T
) },
88 { PCI_DEVICE(PCI_VENDOR_ID_DLINK
, 0x4b01) }, /* DGE-530T */
89 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL
, 0x4320) },
90 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL
, 0x5005) }, /* Belkin */
91 { PCI_DEVICE(PCI_VENDOR_ID_CNET
, PCI_DEVICE_ID_CNET_GIGACARD
) },
92 { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS
, PCI_DEVICE_ID_LINKSYS_EG1064
) },
93 { PCI_VENDOR_ID_LINKSYS
, 0x1032, PCI_ANY_ID
, 0x0015 },
96 MODULE_DEVICE_TABLE(pci
, skge_id_table
);
98 static int skge_up(struct net_device
*dev
);
99 static int skge_down(struct net_device
*dev
);
100 static void skge_phy_reset(struct skge_port
*skge
);
101 static void skge_tx_clean(struct net_device
*dev
);
102 static int xm_phy_write(struct skge_hw
*hw
, int port
, u16 reg
, u16 val
);
103 static int gm_phy_write(struct skge_hw
*hw
, int port
, u16 reg
, u16 val
);
104 static void genesis_get_stats(struct skge_port
*skge
, u64
*data
);
105 static void yukon_get_stats(struct skge_port
*skge
, u64
*data
);
106 static void yukon_init(struct skge_hw
*hw
, int port
);
107 static void genesis_mac_init(struct skge_hw
*hw
, int port
);
108 static void genesis_link_up(struct skge_port
*skge
);
109 static void skge_set_multicast(struct net_device
*dev
);
111 /* Avoid conditionals by using array */
112 static const int txqaddr
[] = { Q_XA1
, Q_XA2
};
113 static const int rxqaddr
[] = { Q_R1
, Q_R2
};
114 static const u32 rxirqmask
[] = { IS_R1_F
, IS_R2_F
};
115 static const u32 txirqmask
[] = { IS_XA1_F
, IS_XA2_F
};
116 static const u32 napimask
[] = { IS_R1_F
|IS_XA1_F
, IS_R2_F
|IS_XA2_F
};
117 static const u32 portmask
[] = { IS_PORT_1
, IS_PORT_2
};
119 static int skge_get_regs_len(struct net_device
*dev
)
125 * Returns copy of whole control register region
126 * Note: skip RAM address register because accessing it will
129 static void skge_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
,
132 const struct skge_port
*skge
= netdev_priv(dev
);
133 const void __iomem
*io
= skge
->hw
->regs
;
136 memset(p
, 0, regs
->len
);
137 memcpy_fromio(p
, io
, B3_RAM_ADDR
);
139 memcpy_fromio(p
+ B3_RI_WTO_R1
, io
+ B3_RI_WTO_R1
,
140 regs
->len
- B3_RI_WTO_R1
);
143 /* Wake on Lan only supported on Yukon chips with rev 1 or above */
144 static u32
wol_supported(const struct skge_hw
*hw
)
146 if (hw
->chip_id
== CHIP_ID_GENESIS
)
149 if (hw
->chip_id
== CHIP_ID_YUKON
&& hw
->chip_rev
== 0)
152 return WAKE_MAGIC
| WAKE_PHY
;
155 static void skge_wol_init(struct skge_port
*skge
)
157 struct skge_hw
*hw
= skge
->hw
;
158 int port
= skge
->port
;
161 skge_write16(hw
, B0_CTST
, CS_RST_CLR
);
162 skge_write16(hw
, SK_REG(port
, GMAC_LINK_CTRL
), GMLC_RST_CLR
);
165 skge_write8(hw
, B0_POWER_CTRL
,
166 PC_VAUX_ENA
| PC_VCC_ENA
| PC_VAUX_ON
| PC_VCC_OFF
);
168 /* WA code for COMA mode -- clear PHY reset */
169 if (hw
->chip_id
== CHIP_ID_YUKON_LITE
&&
170 hw
->chip_rev
>= CHIP_REV_YU_LITE_A3
) {
171 u32 reg
= skge_read32(hw
, B2_GP_IO
);
174 skge_write32(hw
, B2_GP_IO
, reg
);
177 skge_write32(hw
, SK_REG(port
, GPHY_CTRL
),
179 GPC_HWCFG_M_3
| GPC_HWCFG_M_2
| GPC_HWCFG_M_1
| GPC_HWCFG_M_0
|
180 GPC_ANEG_1
| GPC_RST_SET
);
182 skge_write32(hw
, SK_REG(port
, GPHY_CTRL
),
184 GPC_HWCFG_M_3
| GPC_HWCFG_M_2
| GPC_HWCFG_M_1
| GPC_HWCFG_M_0
|
185 GPC_ANEG_1
| GPC_RST_CLR
);
187 skge_write32(hw
, SK_REG(port
, GMAC_CTRL
), GMC_RST_CLR
);
189 /* Force to 10/100 skge_reset will re-enable on resume */
190 gm_phy_write(hw
, port
, PHY_MARV_AUNE_ADV
,
191 (PHY_AN_100FULL
| PHY_AN_100HALF
|
192 PHY_AN_10FULL
| PHY_AN_10HALF
| PHY_AN_CSMA
));
194 gm_phy_write(hw
, port
, PHY_MARV_1000T_CTRL
, 0);
195 gm_phy_write(hw
, port
, PHY_MARV_CTRL
,
196 PHY_CT_RESET
| PHY_CT_SPS_LSB
| PHY_CT_ANE
|
197 PHY_CT_RE_CFG
| PHY_CT_DUP_MD
);
200 /* Set GMAC to no flow control and auto update for speed/duplex */
201 gma_write16(hw
, port
, GM_GP_CTRL
,
202 GM_GPCR_FC_TX_DIS
|GM_GPCR_TX_ENA
|GM_GPCR_RX_ENA
|
203 GM_GPCR_DUP_FULL
|GM_GPCR_FC_RX_DIS
|GM_GPCR_AU_FCT_DIS
);
205 /* Set WOL address */
206 memcpy_toio(hw
->regs
+ WOL_REGS(port
, WOL_MAC_ADDR
),
207 skge
->netdev
->dev_addr
, ETH_ALEN
);
209 /* Turn on appropriate WOL control bits */
210 skge_write16(hw
, WOL_REGS(port
, WOL_CTRL_STAT
), WOL_CTL_CLEAR_RESULT
);
212 if (skge
->wol
& WAKE_PHY
)
213 ctrl
|= WOL_CTL_ENA_PME_ON_LINK_CHG
|WOL_CTL_ENA_LINK_CHG_UNIT
;
215 ctrl
|= WOL_CTL_DIS_PME_ON_LINK_CHG
|WOL_CTL_DIS_LINK_CHG_UNIT
;
217 if (skge
->wol
& WAKE_MAGIC
)
218 ctrl
|= WOL_CTL_ENA_PME_ON_MAGIC_PKT
|WOL_CTL_ENA_MAGIC_PKT_UNIT
;
220 ctrl
|= WOL_CTL_DIS_PME_ON_MAGIC_PKT
|WOL_CTL_DIS_MAGIC_PKT_UNIT
;
222 ctrl
|= WOL_CTL_DIS_PME_ON_PATTERN
|WOL_CTL_DIS_PATTERN_UNIT
;
223 skge_write16(hw
, WOL_REGS(port
, WOL_CTRL_STAT
), ctrl
);
226 skge_write8(hw
, SK_REG(port
, RX_GMF_CTRL_T
), GMF_RST_SET
);
229 static void skge_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
231 struct skge_port
*skge
= netdev_priv(dev
);
233 wol
->supported
= wol_supported(skge
->hw
);
234 wol
->wolopts
= skge
->wol
;
237 static int skge_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
239 struct skge_port
*skge
= netdev_priv(dev
);
240 struct skge_hw
*hw
= skge
->hw
;
242 if ((wol
->wolopts
& ~wol_supported(hw
)) ||
243 !device_can_wakeup(&hw
->pdev
->dev
))
246 skge
->wol
= wol
->wolopts
;
248 device_set_wakeup_enable(&hw
->pdev
->dev
, skge
->wol
);
253 /* Determine supported/advertised modes based on hardware.
254 * Note: ethtool ADVERTISED_xxx == SUPPORTED_xxx
256 static u32
skge_supported_modes(const struct skge_hw
*hw
)
261 supported
= (SUPPORTED_10baseT_Half
|
262 SUPPORTED_10baseT_Full
|
263 SUPPORTED_100baseT_Half
|
264 SUPPORTED_100baseT_Full
|
265 SUPPORTED_1000baseT_Half
|
266 SUPPORTED_1000baseT_Full
|
270 if (hw
->chip_id
== CHIP_ID_GENESIS
)
271 supported
&= ~(SUPPORTED_10baseT_Half
|
272 SUPPORTED_10baseT_Full
|
273 SUPPORTED_100baseT_Half
|
274 SUPPORTED_100baseT_Full
);
276 else if (hw
->chip_id
== CHIP_ID_YUKON
)
277 supported
&= ~SUPPORTED_1000baseT_Half
;
279 supported
= (SUPPORTED_1000baseT_Full
|
280 SUPPORTED_1000baseT_Half
|
287 static int skge_get_settings(struct net_device
*dev
,
288 struct ethtool_cmd
*ecmd
)
290 struct skge_port
*skge
= netdev_priv(dev
);
291 struct skge_hw
*hw
= skge
->hw
;
293 ecmd
->transceiver
= XCVR_INTERNAL
;
294 ecmd
->supported
= skge_supported_modes(hw
);
297 ecmd
->port
= PORT_TP
;
298 ecmd
->phy_address
= hw
->phy_addr
;
300 ecmd
->port
= PORT_FIBRE
;
302 ecmd
->advertising
= skge
->advertising
;
303 ecmd
->autoneg
= skge
->autoneg
;
304 ecmd
->speed
= skge
->speed
;
305 ecmd
->duplex
= skge
->duplex
;
309 static int skge_set_settings(struct net_device
*dev
, struct ethtool_cmd
*ecmd
)
311 struct skge_port
*skge
= netdev_priv(dev
);
312 const struct skge_hw
*hw
= skge
->hw
;
313 u32 supported
= skge_supported_modes(hw
);
316 if (ecmd
->autoneg
== AUTONEG_ENABLE
) {
317 ecmd
->advertising
= supported
;
323 switch (ecmd
->speed
) {
325 if (ecmd
->duplex
== DUPLEX_FULL
)
326 setting
= SUPPORTED_1000baseT_Full
;
327 else if (ecmd
->duplex
== DUPLEX_HALF
)
328 setting
= SUPPORTED_1000baseT_Half
;
333 if (ecmd
->duplex
== DUPLEX_FULL
)
334 setting
= SUPPORTED_100baseT_Full
;
335 else if (ecmd
->duplex
== DUPLEX_HALF
)
336 setting
= SUPPORTED_100baseT_Half
;
342 if (ecmd
->duplex
== DUPLEX_FULL
)
343 setting
= SUPPORTED_10baseT_Full
;
344 else if (ecmd
->duplex
== DUPLEX_HALF
)
345 setting
= SUPPORTED_10baseT_Half
;
353 if ((setting
& supported
) == 0)
356 skge
->speed
= ecmd
->speed
;
357 skge
->duplex
= ecmd
->duplex
;
360 skge
->autoneg
= ecmd
->autoneg
;
361 skge
->advertising
= ecmd
->advertising
;
363 if (netif_running(dev
)) {
375 static void skge_get_drvinfo(struct net_device
*dev
,
376 struct ethtool_drvinfo
*info
)
378 struct skge_port
*skge
= netdev_priv(dev
);
380 strcpy(info
->driver
, DRV_NAME
);
381 strcpy(info
->version
, DRV_VERSION
);
382 strcpy(info
->fw_version
, "N/A");
383 strcpy(info
->bus_info
, pci_name(skge
->hw
->pdev
));
386 static const struct skge_stat
{
387 char name
[ETH_GSTRING_LEN
];
391 { "tx_bytes", XM_TXO_OK_HI
, GM_TXO_OK_HI
},
392 { "rx_bytes", XM_RXO_OK_HI
, GM_RXO_OK_HI
},
394 { "tx_broadcast", XM_TXF_BC_OK
, GM_TXF_BC_OK
},
395 { "rx_broadcast", XM_RXF_BC_OK
, GM_RXF_BC_OK
},
396 { "tx_multicast", XM_TXF_MC_OK
, GM_TXF_MC_OK
},
397 { "rx_multicast", XM_RXF_MC_OK
, GM_RXF_MC_OK
},
398 { "tx_unicast", XM_TXF_UC_OK
, GM_TXF_UC_OK
},
399 { "rx_unicast", XM_RXF_UC_OK
, GM_RXF_UC_OK
},
400 { "tx_mac_pause", XM_TXF_MPAUSE
, GM_TXF_MPAUSE
},
401 { "rx_mac_pause", XM_RXF_MPAUSE
, GM_RXF_MPAUSE
},
403 { "collisions", XM_TXF_SNG_COL
, GM_TXF_SNG_COL
},
404 { "multi_collisions", XM_TXF_MUL_COL
, GM_TXF_MUL_COL
},
405 { "aborted", XM_TXF_ABO_COL
, GM_TXF_ABO_COL
},
406 { "late_collision", XM_TXF_LAT_COL
, GM_TXF_LAT_COL
},
407 { "fifo_underrun", XM_TXE_FIFO_UR
, GM_TXE_FIFO_UR
},
408 { "fifo_overflow", XM_RXE_FIFO_OV
, GM_RXE_FIFO_OV
},
410 { "rx_toolong", XM_RXF_LNG_ERR
, GM_RXF_LNG_ERR
},
411 { "rx_jabber", XM_RXF_JAB_PKT
, GM_RXF_JAB_PKT
},
412 { "rx_runt", XM_RXE_RUNT
, GM_RXE_FRAG
},
413 { "rx_too_long", XM_RXF_LNG_ERR
, GM_RXF_LNG_ERR
},
414 { "rx_fcs_error", XM_RXF_FCS_ERR
, GM_RXF_FCS_ERR
},
417 static int skge_get_sset_count(struct net_device
*dev
, int sset
)
421 return ARRAY_SIZE(skge_stats
);
427 static void skge_get_ethtool_stats(struct net_device
*dev
,
428 struct ethtool_stats
*stats
, u64
*data
)
430 struct skge_port
*skge
= netdev_priv(dev
);
432 if (skge
->hw
->chip_id
== CHIP_ID_GENESIS
)
433 genesis_get_stats(skge
, data
);
435 yukon_get_stats(skge
, data
);
438 /* Use hardware MIB variables for critical path statistics and
439 * transmit feedback not reported at interrupt.
440 * Other errors are accounted for in interrupt handler.
442 static struct net_device_stats
*skge_get_stats(struct net_device
*dev
)
444 struct skge_port
*skge
= netdev_priv(dev
);
445 u64 data
[ARRAY_SIZE(skge_stats
)];
447 if (skge
->hw
->chip_id
== CHIP_ID_GENESIS
)
448 genesis_get_stats(skge
, data
);
450 yukon_get_stats(skge
, data
);
452 dev
->stats
.tx_bytes
= data
[0];
453 dev
->stats
.rx_bytes
= data
[1];
454 dev
->stats
.tx_packets
= data
[2] + data
[4] + data
[6];
455 dev
->stats
.rx_packets
= data
[3] + data
[5] + data
[7];
456 dev
->stats
.multicast
= data
[3] + data
[5];
457 dev
->stats
.collisions
= data
[10];
458 dev
->stats
.tx_aborted_errors
= data
[12];
463 static void skge_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
469 for (i
= 0; i
< ARRAY_SIZE(skge_stats
); i
++)
470 memcpy(data
+ i
* ETH_GSTRING_LEN
,
471 skge_stats
[i
].name
, ETH_GSTRING_LEN
);
476 static void skge_get_ring_param(struct net_device
*dev
,
477 struct ethtool_ringparam
*p
)
479 struct skge_port
*skge
= netdev_priv(dev
);
481 p
->rx_max_pending
= MAX_RX_RING_SIZE
;
482 p
->tx_max_pending
= MAX_TX_RING_SIZE
;
483 p
->rx_mini_max_pending
= 0;
484 p
->rx_jumbo_max_pending
= 0;
486 p
->rx_pending
= skge
->rx_ring
.count
;
487 p
->tx_pending
= skge
->tx_ring
.count
;
488 p
->rx_mini_pending
= 0;
489 p
->rx_jumbo_pending
= 0;
492 static int skge_set_ring_param(struct net_device
*dev
,
493 struct ethtool_ringparam
*p
)
495 struct skge_port
*skge
= netdev_priv(dev
);
498 if (p
->rx_pending
== 0 || p
->rx_pending
> MAX_RX_RING_SIZE
||
499 p
->tx_pending
< TX_LOW_WATER
|| p
->tx_pending
> MAX_TX_RING_SIZE
)
502 skge
->rx_ring
.count
= p
->rx_pending
;
503 skge
->tx_ring
.count
= p
->tx_pending
;
505 if (netif_running(dev
)) {
515 static u32
skge_get_msglevel(struct net_device
*netdev
)
517 struct skge_port
*skge
= netdev_priv(netdev
);
518 return skge
->msg_enable
;
521 static void skge_set_msglevel(struct net_device
*netdev
, u32 value
)
523 struct skge_port
*skge
= netdev_priv(netdev
);
524 skge
->msg_enable
= value
;
527 static int skge_nway_reset(struct net_device
*dev
)
529 struct skge_port
*skge
= netdev_priv(dev
);
531 if (skge
->autoneg
!= AUTONEG_ENABLE
|| !netif_running(dev
))
534 skge_phy_reset(skge
);
538 static int skge_set_sg(struct net_device
*dev
, u32 data
)
540 struct skge_port
*skge
= netdev_priv(dev
);
541 struct skge_hw
*hw
= skge
->hw
;
543 if (hw
->chip_id
== CHIP_ID_GENESIS
&& data
)
545 return ethtool_op_set_sg(dev
, data
);
548 static int skge_set_tx_csum(struct net_device
*dev
, u32 data
)
550 struct skge_port
*skge
= netdev_priv(dev
);
551 struct skge_hw
*hw
= skge
->hw
;
553 if (hw
->chip_id
== CHIP_ID_GENESIS
&& data
)
556 return ethtool_op_set_tx_csum(dev
, data
);
559 static u32
skge_get_rx_csum(struct net_device
*dev
)
561 struct skge_port
*skge
= netdev_priv(dev
);
563 return skge
->rx_csum
;
566 /* Only Yukon supports checksum offload. */
567 static int skge_set_rx_csum(struct net_device
*dev
, u32 data
)
569 struct skge_port
*skge
= netdev_priv(dev
);
571 if (skge
->hw
->chip_id
== CHIP_ID_GENESIS
&& data
)
574 skge
->rx_csum
= data
;
578 static void skge_get_pauseparam(struct net_device
*dev
,
579 struct ethtool_pauseparam
*ecmd
)
581 struct skge_port
*skge
= netdev_priv(dev
);
583 ecmd
->rx_pause
= ((skge
->flow_control
== FLOW_MODE_SYMMETRIC
) ||
584 (skge
->flow_control
== FLOW_MODE_SYM_OR_REM
));
585 ecmd
->tx_pause
= (ecmd
->rx_pause
||
586 (skge
->flow_control
== FLOW_MODE_LOC_SEND
));
588 ecmd
->autoneg
= ecmd
->rx_pause
|| ecmd
->tx_pause
;
591 static int skge_set_pauseparam(struct net_device
*dev
,
592 struct ethtool_pauseparam
*ecmd
)
594 struct skge_port
*skge
= netdev_priv(dev
);
595 struct ethtool_pauseparam old
;
598 skge_get_pauseparam(dev
, &old
);
600 if (ecmd
->autoneg
!= old
.autoneg
)
601 skge
->flow_control
= ecmd
->autoneg
? FLOW_MODE_NONE
: FLOW_MODE_SYMMETRIC
;
603 if (ecmd
->rx_pause
&& ecmd
->tx_pause
)
604 skge
->flow_control
= FLOW_MODE_SYMMETRIC
;
605 else if (ecmd
->rx_pause
&& !ecmd
->tx_pause
)
606 skge
->flow_control
= FLOW_MODE_SYM_OR_REM
;
607 else if (!ecmd
->rx_pause
&& ecmd
->tx_pause
)
608 skge
->flow_control
= FLOW_MODE_LOC_SEND
;
610 skge
->flow_control
= FLOW_MODE_NONE
;
613 if (netif_running(dev
)) {
625 /* Chip internal frequency for clock calculations */
626 static inline u32
hwkhz(const struct skge_hw
*hw
)
628 return (hw
->chip_id
== CHIP_ID_GENESIS
) ? 53125 : 78125;
631 /* Chip HZ to microseconds */
632 static inline u32
skge_clk2usec(const struct skge_hw
*hw
, u32 ticks
)
634 return (ticks
* 1000) / hwkhz(hw
);
637 /* Microseconds to chip HZ */
638 static inline u32
skge_usecs2clk(const struct skge_hw
*hw
, u32 usec
)
640 return hwkhz(hw
) * usec
/ 1000;
643 static int skge_get_coalesce(struct net_device
*dev
,
644 struct ethtool_coalesce
*ecmd
)
646 struct skge_port
*skge
= netdev_priv(dev
);
647 struct skge_hw
*hw
= skge
->hw
;
648 int port
= skge
->port
;
650 ecmd
->rx_coalesce_usecs
= 0;
651 ecmd
->tx_coalesce_usecs
= 0;
653 if (skge_read32(hw
, B2_IRQM_CTRL
) & TIM_START
) {
654 u32 delay
= skge_clk2usec(hw
, skge_read32(hw
, B2_IRQM_INI
));
655 u32 msk
= skge_read32(hw
, B2_IRQM_MSK
);
657 if (msk
& rxirqmask
[port
])
658 ecmd
->rx_coalesce_usecs
= delay
;
659 if (msk
& txirqmask
[port
])
660 ecmd
->tx_coalesce_usecs
= delay
;
666 /* Note: interrupt timer is per board, but can turn on/off per port */
667 static int skge_set_coalesce(struct net_device
*dev
,
668 struct ethtool_coalesce
*ecmd
)
670 struct skge_port
*skge
= netdev_priv(dev
);
671 struct skge_hw
*hw
= skge
->hw
;
672 int port
= skge
->port
;
673 u32 msk
= skge_read32(hw
, B2_IRQM_MSK
);
676 if (ecmd
->rx_coalesce_usecs
== 0)
677 msk
&= ~rxirqmask
[port
];
678 else if (ecmd
->rx_coalesce_usecs
< 25 ||
679 ecmd
->rx_coalesce_usecs
> 33333)
682 msk
|= rxirqmask
[port
];
683 delay
= ecmd
->rx_coalesce_usecs
;
686 if (ecmd
->tx_coalesce_usecs
== 0)
687 msk
&= ~txirqmask
[port
];
688 else if (ecmd
->tx_coalesce_usecs
< 25 ||
689 ecmd
->tx_coalesce_usecs
> 33333)
692 msk
|= txirqmask
[port
];
693 delay
= min(delay
, ecmd
->rx_coalesce_usecs
);
696 skge_write32(hw
, B2_IRQM_MSK
, msk
);
698 skge_write32(hw
, B2_IRQM_CTRL
, TIM_STOP
);
700 skge_write32(hw
, B2_IRQM_INI
, skge_usecs2clk(hw
, delay
));
701 skge_write32(hw
, B2_IRQM_CTRL
, TIM_START
);
706 enum led_mode
{ LED_MODE_OFF
, LED_MODE_ON
, LED_MODE_TST
};
707 static void skge_led(struct skge_port
*skge
, enum led_mode mode
)
709 struct skge_hw
*hw
= skge
->hw
;
710 int port
= skge
->port
;
712 spin_lock_bh(&hw
->phy_lock
);
713 if (hw
->chip_id
== CHIP_ID_GENESIS
) {
716 if (hw
->phy_type
== SK_PHY_BCOM
)
717 xm_phy_write(hw
, port
, PHY_BCOM_P_EXT_CTRL
, PHY_B_PEC_LED_OFF
);
719 skge_write32(hw
, SK_REG(port
, TX_LED_VAL
), 0);
720 skge_write8(hw
, SK_REG(port
, TX_LED_CTRL
), LED_T_OFF
);
722 skge_write8(hw
, SK_REG(port
, LNK_LED_REG
), LINKLED_OFF
);
723 skge_write32(hw
, SK_REG(port
, RX_LED_VAL
), 0);
724 skge_write8(hw
, SK_REG(port
, RX_LED_CTRL
), LED_T_OFF
);
728 skge_write8(hw
, SK_REG(port
, LNK_LED_REG
), LINKLED_ON
);
729 skge_write8(hw
, SK_REG(port
, LNK_LED_REG
), LINKLED_LINKSYNC_ON
);
731 skge_write8(hw
, SK_REG(port
, RX_LED_CTRL
), LED_START
);
732 skge_write8(hw
, SK_REG(port
, TX_LED_CTRL
), LED_START
);
737 skge_write8(hw
, SK_REG(port
, RX_LED_TST
), LED_T_ON
);
738 skge_write32(hw
, SK_REG(port
, RX_LED_VAL
), 100);
739 skge_write8(hw
, SK_REG(port
, RX_LED_CTRL
), LED_START
);
741 if (hw
->phy_type
== SK_PHY_BCOM
)
742 xm_phy_write(hw
, port
, PHY_BCOM_P_EXT_CTRL
, PHY_B_PEC_LED_ON
);
744 skge_write8(hw
, SK_REG(port
, TX_LED_TST
), LED_T_ON
);
745 skge_write32(hw
, SK_REG(port
, TX_LED_VAL
), 100);
746 skge_write8(hw
, SK_REG(port
, TX_LED_CTRL
), LED_START
);
753 gm_phy_write(hw
, port
, PHY_MARV_LED_CTRL
, 0);
754 gm_phy_write(hw
, port
, PHY_MARV_LED_OVER
,
755 PHY_M_LED_MO_DUP(MO_LED_OFF
) |
756 PHY_M_LED_MO_10(MO_LED_OFF
) |
757 PHY_M_LED_MO_100(MO_LED_OFF
) |
758 PHY_M_LED_MO_1000(MO_LED_OFF
) |
759 PHY_M_LED_MO_RX(MO_LED_OFF
));
762 gm_phy_write(hw
, port
, PHY_MARV_LED_CTRL
,
763 PHY_M_LED_PULS_DUR(PULS_170MS
) |
764 PHY_M_LED_BLINK_RT(BLINK_84MS
) |
768 gm_phy_write(hw
, port
, PHY_MARV_LED_OVER
,
769 PHY_M_LED_MO_RX(MO_LED_OFF
) |
770 (skge
->speed
== SPEED_100
?
771 PHY_M_LED_MO_100(MO_LED_ON
) : 0));
774 gm_phy_write(hw
, port
, PHY_MARV_LED_CTRL
, 0);
775 gm_phy_write(hw
, port
, PHY_MARV_LED_OVER
,
776 PHY_M_LED_MO_DUP(MO_LED_ON
) |
777 PHY_M_LED_MO_10(MO_LED_ON
) |
778 PHY_M_LED_MO_100(MO_LED_ON
) |
779 PHY_M_LED_MO_1000(MO_LED_ON
) |
780 PHY_M_LED_MO_RX(MO_LED_ON
));
783 spin_unlock_bh(&hw
->phy_lock
);
786 /* blink LED's for finding board */
787 static int skge_phys_id(struct net_device
*dev
, u32 data
)
789 struct skge_port
*skge
= netdev_priv(dev
);
791 enum led_mode mode
= LED_MODE_TST
;
793 if (!data
|| data
> (u32
)(MAX_SCHEDULE_TIMEOUT
/ HZ
))
794 ms
= jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT
/ HZ
) * 1000;
799 skge_led(skge
, mode
);
800 mode
^= LED_MODE_TST
;
802 if (msleep_interruptible(BLINK_MS
))
807 /* back to regular LED state */
808 skge_led(skge
, netif_running(dev
) ? LED_MODE_ON
: LED_MODE_OFF
);
813 static int skge_get_eeprom_len(struct net_device
*dev
)
815 struct skge_port
*skge
= netdev_priv(dev
);
818 pci_read_config_dword(skge
->hw
->pdev
, PCI_DEV_REG2
, ®2
);
819 return 1 << (((reg2
& PCI_VPD_ROM_SZ
) >> 14) + 8);
822 static u32
skge_vpd_read(struct pci_dev
*pdev
, int cap
, u16 offset
)
826 pci_write_config_word(pdev
, cap
+ PCI_VPD_ADDR
, offset
);
829 pci_read_config_word(pdev
, cap
+ PCI_VPD_ADDR
, &offset
);
830 } while (!(offset
& PCI_VPD_ADDR_F
));
832 pci_read_config_dword(pdev
, cap
+ PCI_VPD_DATA
, &val
);
836 static void skge_vpd_write(struct pci_dev
*pdev
, int cap
, u16 offset
, u32 val
)
838 pci_write_config_dword(pdev
, cap
+ PCI_VPD_DATA
, val
);
839 pci_write_config_word(pdev
, cap
+ PCI_VPD_ADDR
,
840 offset
| PCI_VPD_ADDR_F
);
843 pci_read_config_word(pdev
, cap
+ PCI_VPD_ADDR
, &offset
);
844 } while (offset
& PCI_VPD_ADDR_F
);
847 static int skge_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
,
850 struct skge_port
*skge
= netdev_priv(dev
);
851 struct pci_dev
*pdev
= skge
->hw
->pdev
;
852 int cap
= pci_find_capability(pdev
, PCI_CAP_ID_VPD
);
853 int length
= eeprom
->len
;
854 u16 offset
= eeprom
->offset
;
859 eeprom
->magic
= SKGE_EEPROM_MAGIC
;
862 u32 val
= skge_vpd_read(pdev
, cap
, offset
);
863 int n
= min_t(int, length
, sizeof(val
));
865 memcpy(data
, &val
, n
);
873 static int skge_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
,
876 struct skge_port
*skge
= netdev_priv(dev
);
877 struct pci_dev
*pdev
= skge
->hw
->pdev
;
878 int cap
= pci_find_capability(pdev
, PCI_CAP_ID_VPD
);
879 int length
= eeprom
->len
;
880 u16 offset
= eeprom
->offset
;
885 if (eeprom
->magic
!= SKGE_EEPROM_MAGIC
)
890 int n
= min_t(int, length
, sizeof(val
));
893 val
= skge_vpd_read(pdev
, cap
, offset
);
894 memcpy(&val
, data
, n
);
896 skge_vpd_write(pdev
, cap
, offset
, val
);
905 static const struct ethtool_ops skge_ethtool_ops
= {
906 .get_settings
= skge_get_settings
,
907 .set_settings
= skge_set_settings
,
908 .get_drvinfo
= skge_get_drvinfo
,
909 .get_regs_len
= skge_get_regs_len
,
910 .get_regs
= skge_get_regs
,
911 .get_wol
= skge_get_wol
,
912 .set_wol
= skge_set_wol
,
913 .get_msglevel
= skge_get_msglevel
,
914 .set_msglevel
= skge_set_msglevel
,
915 .nway_reset
= skge_nway_reset
,
916 .get_link
= ethtool_op_get_link
,
917 .get_eeprom_len
= skge_get_eeprom_len
,
918 .get_eeprom
= skge_get_eeprom
,
919 .set_eeprom
= skge_set_eeprom
,
920 .get_ringparam
= skge_get_ring_param
,
921 .set_ringparam
= skge_set_ring_param
,
922 .get_pauseparam
= skge_get_pauseparam
,
923 .set_pauseparam
= skge_set_pauseparam
,
924 .get_coalesce
= skge_get_coalesce
,
925 .set_coalesce
= skge_set_coalesce
,
926 .set_sg
= skge_set_sg
,
927 .set_tx_csum
= skge_set_tx_csum
,
928 .get_rx_csum
= skge_get_rx_csum
,
929 .set_rx_csum
= skge_set_rx_csum
,
930 .get_strings
= skge_get_strings
,
931 .phys_id
= skge_phys_id
,
932 .get_sset_count
= skge_get_sset_count
,
933 .get_ethtool_stats
= skge_get_ethtool_stats
,
937 * Allocate ring elements and chain them together
938 * One-to-one association of board descriptors with ring elements
940 static int skge_ring_alloc(struct skge_ring
*ring
, void *vaddr
, u32 base
)
942 struct skge_tx_desc
*d
;
943 struct skge_element
*e
;
946 ring
->start
= kcalloc(ring
->count
, sizeof(*e
), GFP_KERNEL
);
950 for (i
= 0, e
= ring
->start
, d
= vaddr
; i
< ring
->count
; i
++, e
++, d
++) {
952 if (i
== ring
->count
- 1) {
953 e
->next
= ring
->start
;
954 d
->next_offset
= base
;
957 d
->next_offset
= base
+ (i
+1) * sizeof(*d
);
960 ring
->to_use
= ring
->to_clean
= ring
->start
;
965 /* Allocate and setup a new buffer for receiving */
966 static void skge_rx_setup(struct skge_port
*skge
, struct skge_element
*e
,
967 struct sk_buff
*skb
, unsigned int bufsize
)
969 struct skge_rx_desc
*rd
= e
->desc
;
972 map
= pci_map_single(skge
->hw
->pdev
, skb
->data
, bufsize
,
976 rd
->dma_hi
= map
>> 32;
978 rd
->csum1_start
= ETH_HLEN
;
979 rd
->csum2_start
= ETH_HLEN
;
985 rd
->control
= BMU_OWN
| BMU_STF
| BMU_IRQ_EOF
| BMU_TCP_CHECK
| bufsize
;
986 pci_unmap_addr_set(e
, mapaddr
, map
);
987 pci_unmap_len_set(e
, maplen
, bufsize
);
990 /* Resume receiving using existing skb,
991 * Note: DMA address is not changed by chip.
992 * MTU not changed while receiver active.
994 static inline void skge_rx_reuse(struct skge_element
*e
, unsigned int size
)
996 struct skge_rx_desc
*rd
= e
->desc
;
999 rd
->csum2_start
= ETH_HLEN
;
1003 rd
->control
= BMU_OWN
| BMU_STF
| BMU_IRQ_EOF
| BMU_TCP_CHECK
| size
;
1007 /* Free all buffers in receive ring, assumes receiver stopped */
1008 static void skge_rx_clean(struct skge_port
*skge
)
1010 struct skge_hw
*hw
= skge
->hw
;
1011 struct skge_ring
*ring
= &skge
->rx_ring
;
1012 struct skge_element
*e
;
1016 struct skge_rx_desc
*rd
= e
->desc
;
1019 pci_unmap_single(hw
->pdev
,
1020 pci_unmap_addr(e
, mapaddr
),
1021 pci_unmap_len(e
, maplen
),
1022 PCI_DMA_FROMDEVICE
);
1023 dev_kfree_skb(e
->skb
);
1026 } while ((e
= e
->next
) != ring
->start
);
1030 /* Allocate buffers for receive ring
1031 * For receive: to_clean is next received frame.
1033 static int skge_rx_fill(struct net_device
*dev
)
1035 struct skge_port
*skge
= netdev_priv(dev
);
1036 struct skge_ring
*ring
= &skge
->rx_ring
;
1037 struct skge_element
*e
;
1041 struct sk_buff
*skb
;
1043 skb
= __netdev_alloc_skb(dev
, skge
->rx_buf_size
+ NET_IP_ALIGN
,
1048 skb_reserve(skb
, NET_IP_ALIGN
);
1049 skge_rx_setup(skge
, e
, skb
, skge
->rx_buf_size
);
1050 } while ((e
= e
->next
) != ring
->start
);
1052 ring
->to_clean
= ring
->start
;
1056 static const char *skge_pause(enum pause_status status
)
1059 case FLOW_STAT_NONE
:
1061 case FLOW_STAT_REM_SEND
:
1063 case FLOW_STAT_LOC_SEND
:
1065 case FLOW_STAT_SYMMETRIC
: /* Both station may send PAUSE */
1068 return "indeterminated";
1073 static void skge_link_up(struct skge_port
*skge
)
1075 skge_write8(skge
->hw
, SK_REG(skge
->port
, LNK_LED_REG
),
1076 LED_BLK_OFF
|LED_SYNC_OFF
|LED_ON
);
1078 netif_carrier_on(skge
->netdev
);
1079 netif_wake_queue(skge
->netdev
);
1081 netif_info(skge
, link
, skge
->netdev
,
1082 "Link is up at %d Mbps, %s duplex, flow control %s\n",
1084 skge
->duplex
== DUPLEX_FULL
? "full" : "half",
1085 skge_pause(skge
->flow_status
));
1088 static void skge_link_down(struct skge_port
*skge
)
1090 skge_write8(skge
->hw
, SK_REG(skge
->port
, LNK_LED_REG
), LED_OFF
);
1091 netif_carrier_off(skge
->netdev
);
1092 netif_stop_queue(skge
->netdev
);
1094 netif_info(skge
, link
, skge
->netdev
, "Link is down\n");
1098 static void xm_link_down(struct skge_hw
*hw
, int port
)
1100 struct net_device
*dev
= hw
->dev
[port
];
1101 struct skge_port
*skge
= netdev_priv(dev
);
1103 xm_write16(hw
, port
, XM_IMSK
, XM_IMSK_DISABLE
);
1105 if (netif_carrier_ok(dev
))
1106 skge_link_down(skge
);
1109 static int __xm_phy_read(struct skge_hw
*hw
, int port
, u16 reg
, u16
*val
)
1113 xm_write16(hw
, port
, XM_PHY_ADDR
, reg
| hw
->phy_addr
);
1114 *val
= xm_read16(hw
, port
, XM_PHY_DATA
);
1116 if (hw
->phy_type
== SK_PHY_XMAC
)
1119 for (i
= 0; i
< PHY_RETRIES
; i
++) {
1120 if (xm_read16(hw
, port
, XM_MMU_CMD
) & XM_MMU_PHY_RDY
)
1127 *val
= xm_read16(hw
, port
, XM_PHY_DATA
);
1132 static u16
xm_phy_read(struct skge_hw
*hw
, int port
, u16 reg
)
1135 if (__xm_phy_read(hw
, port
, reg
, &v
))
1136 pr_warning("%s: phy read timed out\n", hw
->dev
[port
]->name
);
1140 static int xm_phy_write(struct skge_hw
*hw
, int port
, u16 reg
, u16 val
)
1144 xm_write16(hw
, port
, XM_PHY_ADDR
, reg
| hw
->phy_addr
);
1145 for (i
= 0; i
< PHY_RETRIES
; i
++) {
1146 if (!(xm_read16(hw
, port
, XM_MMU_CMD
) & XM_MMU_PHY_BUSY
))
1153 xm_write16(hw
, port
, XM_PHY_DATA
, val
);
1154 for (i
= 0; i
< PHY_RETRIES
; i
++) {
1155 if (!(xm_read16(hw
, port
, XM_MMU_CMD
) & XM_MMU_PHY_BUSY
))
1162 static void genesis_init(struct skge_hw
*hw
)
1164 /* set blink source counter */
1165 skge_write32(hw
, B2_BSC_INI
, (SK_BLK_DUR
* SK_FACT_53
) / 100);
1166 skge_write8(hw
, B2_BSC_CTRL
, BSC_START
);
1168 /* configure mac arbiter */
1169 skge_write16(hw
, B3_MA_TO_CTRL
, MA_RST_CLR
);
1171 /* configure mac arbiter timeout values */
1172 skge_write8(hw
, B3_MA_TOINI_RX1
, SK_MAC_TO_53
);
1173 skge_write8(hw
, B3_MA_TOINI_RX2
, SK_MAC_TO_53
);
1174 skge_write8(hw
, B3_MA_TOINI_TX1
, SK_MAC_TO_53
);
1175 skge_write8(hw
, B3_MA_TOINI_TX2
, SK_MAC_TO_53
);
1177 skge_write8(hw
, B3_MA_RCINI_RX1
, 0);
1178 skge_write8(hw
, B3_MA_RCINI_RX2
, 0);
1179 skge_write8(hw
, B3_MA_RCINI_TX1
, 0);
1180 skge_write8(hw
, B3_MA_RCINI_TX2
, 0);
1182 /* configure packet arbiter timeout */
1183 skge_write16(hw
, B3_PA_CTRL
, PA_RST_CLR
);
1184 skge_write16(hw
, B3_PA_TOINI_RX1
, SK_PKT_TO_MAX
);
1185 skge_write16(hw
, B3_PA_TOINI_TX1
, SK_PKT_TO_MAX
);
1186 skge_write16(hw
, B3_PA_TOINI_RX2
, SK_PKT_TO_MAX
);
1187 skge_write16(hw
, B3_PA_TOINI_TX2
, SK_PKT_TO_MAX
);
1190 static void genesis_reset(struct skge_hw
*hw
, int port
)
1192 const u8 zero
[8] = { 0 };
1195 skge_write8(hw
, SK_REG(port
, GMAC_IRQ_MSK
), 0);
1197 /* reset the statistics module */
1198 xm_write32(hw
, port
, XM_GP_PORT
, XM_GP_RES_STAT
);
1199 xm_write16(hw
, port
, XM_IMSK
, XM_IMSK_DISABLE
);
1200 xm_write32(hw
, port
, XM_MODE
, 0); /* clear Mode Reg */
1201 xm_write16(hw
, port
, XM_TX_CMD
, 0); /* reset TX CMD Reg */
1202 xm_write16(hw
, port
, XM_RX_CMD
, 0); /* reset RX CMD Reg */
1204 /* disable Broadcom PHY IRQ */
1205 if (hw
->phy_type
== SK_PHY_BCOM
)
1206 xm_write16(hw
, port
, PHY_BCOM_INT_MASK
, 0xffff);
1208 xm_outhash(hw
, port
, XM_HSM
, zero
);
1210 /* Flush TX and RX fifo */
1211 reg
= xm_read32(hw
, port
, XM_MODE
);
1212 xm_write32(hw
, port
, XM_MODE
, reg
| XM_MD_FTF
);
1213 xm_write32(hw
, port
, XM_MODE
, reg
| XM_MD_FRF
);
1217 /* Convert mode to MII values */
1218 static const u16 phy_pause_map
[] = {
1219 [FLOW_MODE_NONE
] = 0,
1220 [FLOW_MODE_LOC_SEND
] = PHY_AN_PAUSE_ASYM
,
1221 [FLOW_MODE_SYMMETRIC
] = PHY_AN_PAUSE_CAP
,
1222 [FLOW_MODE_SYM_OR_REM
] = PHY_AN_PAUSE_CAP
| PHY_AN_PAUSE_ASYM
,
1225 /* special defines for FIBER (88E1011S only) */
1226 static const u16 fiber_pause_map
[] = {
1227 [FLOW_MODE_NONE
] = PHY_X_P_NO_PAUSE
,
1228 [FLOW_MODE_LOC_SEND
] = PHY_X_P_ASYM_MD
,
1229 [FLOW_MODE_SYMMETRIC
] = PHY_X_P_SYM_MD
,
1230 [FLOW_MODE_SYM_OR_REM
] = PHY_X_P_BOTH_MD
,
1234 /* Check status of Broadcom phy link */
1235 static void bcom_check_link(struct skge_hw
*hw
, int port
)
1237 struct net_device
*dev
= hw
->dev
[port
];
1238 struct skge_port
*skge
= netdev_priv(dev
);
1241 /* read twice because of latch */
1242 xm_phy_read(hw
, port
, PHY_BCOM_STAT
);
1243 status
= xm_phy_read(hw
, port
, PHY_BCOM_STAT
);
1245 if ((status
& PHY_ST_LSYNC
) == 0) {
1246 xm_link_down(hw
, port
);
1250 if (skge
->autoneg
== AUTONEG_ENABLE
) {
1253 if (!(status
& PHY_ST_AN_OVER
))
1256 lpa
= xm_phy_read(hw
, port
, PHY_XMAC_AUNE_LP
);
1257 if (lpa
& PHY_B_AN_RF
) {
1258 netdev_notice(dev
, "remote fault\n");
1262 aux
= xm_phy_read(hw
, port
, PHY_BCOM_AUX_STAT
);
1264 /* Check Duplex mismatch */
1265 switch (aux
& PHY_B_AS_AN_RES_MSK
) {
1266 case PHY_B_RES_1000FD
:
1267 skge
->duplex
= DUPLEX_FULL
;
1269 case PHY_B_RES_1000HD
:
1270 skge
->duplex
= DUPLEX_HALF
;
1273 netdev_notice(dev
, "duplex mismatch\n");
1277 /* We are using IEEE 802.3z/D5.0 Table 37-4 */
1278 switch (aux
& PHY_B_AS_PAUSE_MSK
) {
1279 case PHY_B_AS_PAUSE_MSK
:
1280 skge
->flow_status
= FLOW_STAT_SYMMETRIC
;
1283 skge
->flow_status
= FLOW_STAT_REM_SEND
;
1286 skge
->flow_status
= FLOW_STAT_LOC_SEND
;
1289 skge
->flow_status
= FLOW_STAT_NONE
;
1291 skge
->speed
= SPEED_1000
;
1294 if (!netif_carrier_ok(dev
))
1295 genesis_link_up(skge
);
1298 /* Broadcom 5400 only supports giagabit! SysKonnect did not put an additional
1299 * Phy on for 100 or 10Mbit operation
1301 static void bcom_phy_init(struct skge_port
*skge
)
1303 struct skge_hw
*hw
= skge
->hw
;
1304 int port
= skge
->port
;
1306 u16 id1
, r
, ext
, ctl
;
1308 /* magic workaround patterns for Broadcom */
1309 static const struct {
1313 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 },
1314 { 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 },
1315 { 0x15, 0x0132 }, { 0x17, 0x8006 }, { 0x15, 0x0232 },
1316 { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
1318 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 },
1319 { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 },
1322 /* read Id from external PHY (all have the same address) */
1323 id1
= xm_phy_read(hw
, port
, PHY_XMAC_ID1
);
1325 /* Optimize MDIO transfer by suppressing preamble. */
1326 r
= xm_read16(hw
, port
, XM_MMU_CMD
);
1328 xm_write16(hw
, port
, XM_MMU_CMD
, r
);
1331 case PHY_BCOM_ID1_C0
:
1333 * Workaround BCOM Errata for the C0 type.
1334 * Write magic patterns to reserved registers.
1336 for (i
= 0; i
< ARRAY_SIZE(C0hack
); i
++)
1337 xm_phy_write(hw
, port
,
1338 C0hack
[i
].reg
, C0hack
[i
].val
);
1341 case PHY_BCOM_ID1_A1
:
1343 * Workaround BCOM Errata for the A1 type.
1344 * Write magic patterns to reserved registers.
1346 for (i
= 0; i
< ARRAY_SIZE(A1hack
); i
++)
1347 xm_phy_write(hw
, port
,
1348 A1hack
[i
].reg
, A1hack
[i
].val
);
1353 * Workaround BCOM Errata (#10523) for all BCom PHYs.
1354 * Disable Power Management after reset.
1356 r
= xm_phy_read(hw
, port
, PHY_BCOM_AUX_CTRL
);
1357 r
|= PHY_B_AC_DIS_PM
;
1358 xm_phy_write(hw
, port
, PHY_BCOM_AUX_CTRL
, r
);
1361 xm_read16(hw
, port
, XM_ISRC
);
1363 ext
= PHY_B_PEC_EN_LTR
; /* enable tx led */
1364 ctl
= PHY_CT_SP1000
; /* always 1000mbit */
1366 if (skge
->autoneg
== AUTONEG_ENABLE
) {
1368 * Workaround BCOM Errata #1 for the C5 type.
1369 * 1000Base-T Link Acquisition Failure in Slave Mode
1370 * Set Repeater/DTE bit 10 of the 1000Base-T Control Register
1372 u16 adv
= PHY_B_1000C_RD
;
1373 if (skge
->advertising
& ADVERTISED_1000baseT_Half
)
1374 adv
|= PHY_B_1000C_AHD
;
1375 if (skge
->advertising
& ADVERTISED_1000baseT_Full
)
1376 adv
|= PHY_B_1000C_AFD
;
1377 xm_phy_write(hw
, port
, PHY_BCOM_1000T_CTRL
, adv
);
1379 ctl
|= PHY_CT_ANE
| PHY_CT_RE_CFG
;
1381 if (skge
->duplex
== DUPLEX_FULL
)
1382 ctl
|= PHY_CT_DUP_MD
;
1383 /* Force to slave */
1384 xm_phy_write(hw
, port
, PHY_BCOM_1000T_CTRL
, PHY_B_1000C_MSE
);
1387 /* Set autonegotiation pause parameters */
1388 xm_phy_write(hw
, port
, PHY_BCOM_AUNE_ADV
,
1389 phy_pause_map
[skge
->flow_control
] | PHY_AN_CSMA
);
1391 /* Handle Jumbo frames */
1392 if (hw
->dev
[port
]->mtu
> ETH_DATA_LEN
) {
1393 xm_phy_write(hw
, port
, PHY_BCOM_AUX_CTRL
,
1394 PHY_B_AC_TX_TST
| PHY_B_AC_LONG_PACK
);
1396 ext
|= PHY_B_PEC_HIGH_LA
;
1400 xm_phy_write(hw
, port
, PHY_BCOM_P_EXT_CTRL
, ext
);
1401 xm_phy_write(hw
, port
, PHY_BCOM_CTRL
, ctl
);
1403 /* Use link status change interrupt */
1404 xm_phy_write(hw
, port
, PHY_BCOM_INT_MASK
, PHY_B_DEF_MSK
);
1407 static void xm_phy_init(struct skge_port
*skge
)
1409 struct skge_hw
*hw
= skge
->hw
;
1410 int port
= skge
->port
;
1413 if (skge
->autoneg
== AUTONEG_ENABLE
) {
1414 if (skge
->advertising
& ADVERTISED_1000baseT_Half
)
1415 ctrl
|= PHY_X_AN_HD
;
1416 if (skge
->advertising
& ADVERTISED_1000baseT_Full
)
1417 ctrl
|= PHY_X_AN_FD
;
1419 ctrl
|= fiber_pause_map
[skge
->flow_control
];
1421 xm_phy_write(hw
, port
, PHY_XMAC_AUNE_ADV
, ctrl
);
1423 /* Restart Auto-negotiation */
1424 ctrl
= PHY_CT_ANE
| PHY_CT_RE_CFG
;
1426 /* Set DuplexMode in Config register */
1427 if (skge
->duplex
== DUPLEX_FULL
)
1428 ctrl
|= PHY_CT_DUP_MD
;
1430 * Do NOT enable Auto-negotiation here. This would hold
1431 * the link down because no IDLEs are transmitted
1435 xm_phy_write(hw
, port
, PHY_XMAC_CTRL
, ctrl
);
1437 /* Poll PHY for status changes */
1438 mod_timer(&skge
->link_timer
, jiffies
+ LINK_HZ
);
1441 static int xm_check_link(struct net_device
*dev
)
1443 struct skge_port
*skge
= netdev_priv(dev
);
1444 struct skge_hw
*hw
= skge
->hw
;
1445 int port
= skge
->port
;
1448 /* read twice because of latch */
1449 xm_phy_read(hw
, port
, PHY_XMAC_STAT
);
1450 status
= xm_phy_read(hw
, port
, PHY_XMAC_STAT
);
1452 if ((status
& PHY_ST_LSYNC
) == 0) {
1453 xm_link_down(hw
, port
);
1457 if (skge
->autoneg
== AUTONEG_ENABLE
) {
1460 if (!(status
& PHY_ST_AN_OVER
))
1463 lpa
= xm_phy_read(hw
, port
, PHY_XMAC_AUNE_LP
);
1464 if (lpa
& PHY_B_AN_RF
) {
1465 netdev_notice(dev
, "remote fault\n");
1469 res
= xm_phy_read(hw
, port
, PHY_XMAC_RES_ABI
);
1471 /* Check Duplex mismatch */
1472 switch (res
& (PHY_X_RS_HD
| PHY_X_RS_FD
)) {
1474 skge
->duplex
= DUPLEX_FULL
;
1477 skge
->duplex
= DUPLEX_HALF
;
1480 netdev_notice(dev
, "duplex mismatch\n");
1484 /* We are using IEEE 802.3z/D5.0 Table 37-4 */
1485 if ((skge
->flow_control
== FLOW_MODE_SYMMETRIC
||
1486 skge
->flow_control
== FLOW_MODE_SYM_OR_REM
) &&
1487 (lpa
& PHY_X_P_SYM_MD
))
1488 skge
->flow_status
= FLOW_STAT_SYMMETRIC
;
1489 else if (skge
->flow_control
== FLOW_MODE_SYM_OR_REM
&&
1490 (lpa
& PHY_X_RS_PAUSE
) == PHY_X_P_ASYM_MD
)
1491 /* Enable PAUSE receive, disable PAUSE transmit */
1492 skge
->flow_status
= FLOW_STAT_REM_SEND
;
1493 else if (skge
->flow_control
== FLOW_MODE_LOC_SEND
&&
1494 (lpa
& PHY_X_RS_PAUSE
) == PHY_X_P_BOTH_MD
)
1495 /* Disable PAUSE receive, enable PAUSE transmit */
1496 skge
->flow_status
= FLOW_STAT_LOC_SEND
;
1498 skge
->flow_status
= FLOW_STAT_NONE
;
1500 skge
->speed
= SPEED_1000
;
1503 if (!netif_carrier_ok(dev
))
1504 genesis_link_up(skge
);
1508 /* Poll to check for link coming up.
1510 * Since internal PHY is wired to a level triggered pin, can't
1511 * get an interrupt when carrier is detected, need to poll for
1514 static void xm_link_timer(unsigned long arg
)
1516 struct skge_port
*skge
= (struct skge_port
*) arg
;
1517 struct net_device
*dev
= skge
->netdev
;
1518 struct skge_hw
*hw
= skge
->hw
;
1519 int port
= skge
->port
;
1521 unsigned long flags
;
1523 if (!netif_running(dev
))
1526 spin_lock_irqsave(&hw
->phy_lock
, flags
);
1529 * Verify that the link by checking GPIO register three times.
1530 * This pin has the signal from the link_sync pin connected to it.
1532 for (i
= 0; i
< 3; i
++) {
1533 if (xm_read16(hw
, port
, XM_GP_PORT
) & XM_GP_INP_ASS
)
1537 /* Re-enable interrupt to detect link down */
1538 if (xm_check_link(dev
)) {
1539 u16 msk
= xm_read16(hw
, port
, XM_IMSK
);
1540 msk
&= ~XM_IS_INP_ASS
;
1541 xm_write16(hw
, port
, XM_IMSK
, msk
);
1542 xm_read16(hw
, port
, XM_ISRC
);
1545 mod_timer(&skge
->link_timer
,
1546 round_jiffies(jiffies
+ LINK_HZ
));
1548 spin_unlock_irqrestore(&hw
->phy_lock
, flags
);
1551 static void genesis_mac_init(struct skge_hw
*hw
, int port
)
1553 struct net_device
*dev
= hw
->dev
[port
];
1554 struct skge_port
*skge
= netdev_priv(dev
);
1555 int jumbo
= hw
->dev
[port
]->mtu
> ETH_DATA_LEN
;
1558 const u8 zero
[6] = { 0 };
1560 for (i
= 0; i
< 10; i
++) {
1561 skge_write16(hw
, SK_REG(port
, TX_MFF_CTRL1
),
1563 if (skge_read16(hw
, SK_REG(port
, TX_MFF_CTRL1
)) & MFF_SET_MAC_RST
)
1568 netdev_warn(dev
, "genesis reset failed\n");
1571 /* Unreset the XMAC. */
1572 skge_write16(hw
, SK_REG(port
, TX_MFF_CTRL1
), MFF_CLR_MAC_RST
);
1575 * Perform additional initialization for external PHYs,
1576 * namely for the 1000baseTX cards that use the XMAC's
1579 if (hw
->phy_type
!= SK_PHY_XMAC
) {
1580 /* Take external Phy out of reset */
1581 r
= skge_read32(hw
, B2_GP_IO
);
1583 r
|= GP_DIR_0
|GP_IO_0
;
1585 r
|= GP_DIR_2
|GP_IO_2
;
1587 skge_write32(hw
, B2_GP_IO
, r
);
1589 /* Enable GMII interface */
1590 xm_write16(hw
, port
, XM_HW_CFG
, XM_HW_GMII_MD
);
1594 switch (hw
->phy_type
) {
1599 bcom_phy_init(skge
);
1600 bcom_check_link(hw
, port
);
1603 /* Set Station Address */
1604 xm_outaddr(hw
, port
, XM_SA
, dev
->dev_addr
);
1606 /* We don't use match addresses so clear */
1607 for (i
= 1; i
< 16; i
++)
1608 xm_outaddr(hw
, port
, XM_EXM(i
), zero
);
1610 /* Clear MIB counters */
1611 xm_write16(hw
, port
, XM_STAT_CMD
,
1612 XM_SC_CLR_RXC
| XM_SC_CLR_TXC
);
1613 /* Clear two times according to Errata #3 */
1614 xm_write16(hw
, port
, XM_STAT_CMD
,
1615 XM_SC_CLR_RXC
| XM_SC_CLR_TXC
);
1617 /* configure Rx High Water Mark (XM_RX_HI_WM) */
1618 xm_write16(hw
, port
, XM_RX_HI_WM
, 1450);
1620 /* We don't need the FCS appended to the packet. */
1621 r
= XM_RX_LENERR_OK
| XM_RX_STRIP_FCS
;
1623 r
|= XM_RX_BIG_PK_OK
;
1625 if (skge
->duplex
== DUPLEX_HALF
) {
1627 * If in manual half duplex mode the other side might be in
1628 * full duplex mode, so ignore if a carrier extension is not seen
1629 * on frames received
1631 r
|= XM_RX_DIS_CEXT
;
1633 xm_write16(hw
, port
, XM_RX_CMD
, r
);
1635 /* We want short frames padded to 60 bytes. */
1636 xm_write16(hw
, port
, XM_TX_CMD
, XM_TX_AUTO_PAD
);
1638 /* Increase threshold for jumbo frames on dual port */
1639 if (hw
->ports
> 1 && jumbo
)
1640 xm_write16(hw
, port
, XM_TX_THR
, 1020);
1642 xm_write16(hw
, port
, XM_TX_THR
, 512);
1645 * Enable the reception of all error frames. This is is
1646 * a necessary evil due to the design of the XMAC. The
1647 * XMAC's receive FIFO is only 8K in size, however jumbo
1648 * frames can be up to 9000 bytes in length. When bad
1649 * frame filtering is enabled, the XMAC's RX FIFO operates
1650 * in 'store and forward' mode. For this to work, the
1651 * entire frame has to fit into the FIFO, but that means
1652 * that jumbo frames larger than 8192 bytes will be
1653 * truncated. Disabling all bad frame filtering causes
1654 * the RX FIFO to operate in streaming mode, in which
1655 * case the XMAC will start transferring frames out of the
1656 * RX FIFO as soon as the FIFO threshold is reached.
1658 xm_write32(hw
, port
, XM_MODE
, XM_DEF_MODE
);
1662 * Initialize the Receive Counter Event Mask (XM_RX_EV_MSK)
1663 * - Enable all bits excepting 'Octets Rx OK Low CntOv'
1664 * and 'Octets Rx OK Hi Cnt Ov'.
1666 xm_write32(hw
, port
, XM_RX_EV_MSK
, XMR_DEF_MSK
);
1669 * Initialize the Transmit Counter Event Mask (XM_TX_EV_MSK)
1670 * - Enable all bits excepting 'Octets Tx OK Low CntOv'
1671 * and 'Octets Tx OK Hi Cnt Ov'.
1673 xm_write32(hw
, port
, XM_TX_EV_MSK
, XMT_DEF_MSK
);
1675 /* Configure MAC arbiter */
1676 skge_write16(hw
, B3_MA_TO_CTRL
, MA_RST_CLR
);
1678 /* configure timeout values */
1679 skge_write8(hw
, B3_MA_TOINI_RX1
, 72);
1680 skge_write8(hw
, B3_MA_TOINI_RX2
, 72);
1681 skge_write8(hw
, B3_MA_TOINI_TX1
, 72);
1682 skge_write8(hw
, B3_MA_TOINI_TX2
, 72);
1684 skge_write8(hw
, B3_MA_RCINI_RX1
, 0);
1685 skge_write8(hw
, B3_MA_RCINI_RX2
, 0);
1686 skge_write8(hw
, B3_MA_RCINI_TX1
, 0);
1687 skge_write8(hw
, B3_MA_RCINI_TX2
, 0);
1689 /* Configure Rx MAC FIFO */
1690 skge_write8(hw
, SK_REG(port
, RX_MFF_CTRL2
), MFF_RST_CLR
);
1691 skge_write16(hw
, SK_REG(port
, RX_MFF_CTRL1
), MFF_ENA_TIM_PAT
);
1692 skge_write8(hw
, SK_REG(port
, RX_MFF_CTRL2
), MFF_ENA_OP_MD
);
1694 /* Configure Tx MAC FIFO */
1695 skge_write8(hw
, SK_REG(port
, TX_MFF_CTRL2
), MFF_RST_CLR
);
1696 skge_write16(hw
, SK_REG(port
, TX_MFF_CTRL1
), MFF_TX_CTRL_DEF
);
1697 skge_write8(hw
, SK_REG(port
, TX_MFF_CTRL2
), MFF_ENA_OP_MD
);
1700 /* Enable frame flushing if jumbo frames used */
1701 skge_write16(hw
, SK_REG(port
, RX_MFF_CTRL1
), MFF_ENA_FLUSH
);
1703 /* enable timeout timers if normal frames */
1704 skge_write16(hw
, B3_PA_CTRL
,
1705 (port
== 0) ? PA_ENA_TO_TX1
: PA_ENA_TO_TX2
);
1709 static void genesis_stop(struct skge_port
*skge
)
1711 struct skge_hw
*hw
= skge
->hw
;
1712 int port
= skge
->port
;
1713 unsigned retries
= 1000;
1716 /* Disable Tx and Rx */
1717 cmd
= xm_read16(hw
, port
, XM_MMU_CMD
);
1718 cmd
&= ~(XM_MMU_ENA_RX
| XM_MMU_ENA_TX
);
1719 xm_write16(hw
, port
, XM_MMU_CMD
, cmd
);
1721 genesis_reset(hw
, port
);
1723 /* Clear Tx packet arbiter timeout IRQ */
1724 skge_write16(hw
, B3_PA_CTRL
,
1725 port
== 0 ? PA_CLR_TO_TX1
: PA_CLR_TO_TX2
);
1728 skge_write16(hw
, SK_REG(port
, TX_MFF_CTRL1
), MFF_CLR_MAC_RST
);
1730 skge_write16(hw
, SK_REG(port
, TX_MFF_CTRL1
), MFF_SET_MAC_RST
);
1731 if (!(skge_read16(hw
, SK_REG(port
, TX_MFF_CTRL1
)) & MFF_SET_MAC_RST
))
1733 } while (--retries
> 0);
1735 /* For external PHYs there must be special handling */
1736 if (hw
->phy_type
!= SK_PHY_XMAC
) {
1737 u32 reg
= skge_read32(hw
, B2_GP_IO
);
1745 skge_write32(hw
, B2_GP_IO
, reg
);
1746 skge_read32(hw
, B2_GP_IO
);
1749 xm_write16(hw
, port
, XM_MMU_CMD
,
1750 xm_read16(hw
, port
, XM_MMU_CMD
)
1751 & ~(XM_MMU_ENA_RX
| XM_MMU_ENA_TX
));
1753 xm_read16(hw
, port
, XM_MMU_CMD
);
1757 static void genesis_get_stats(struct skge_port
*skge
, u64
*data
)
1759 struct skge_hw
*hw
= skge
->hw
;
1760 int port
= skge
->port
;
1762 unsigned long timeout
= jiffies
+ HZ
;
1764 xm_write16(hw
, port
,
1765 XM_STAT_CMD
, XM_SC_SNP_TXC
| XM_SC_SNP_RXC
);
1767 /* wait for update to complete */
1768 while (xm_read16(hw
, port
, XM_STAT_CMD
)
1769 & (XM_SC_SNP_TXC
| XM_SC_SNP_RXC
)) {
1770 if (time_after(jiffies
, timeout
))
1775 /* special case for 64 bit octet counter */
1776 data
[0] = (u64
) xm_read32(hw
, port
, XM_TXO_OK_HI
) << 32
1777 | xm_read32(hw
, port
, XM_TXO_OK_LO
);
1778 data
[1] = (u64
) xm_read32(hw
, port
, XM_RXO_OK_HI
) << 32
1779 | xm_read32(hw
, port
, XM_RXO_OK_LO
);
1781 for (i
= 2; i
< ARRAY_SIZE(skge_stats
); i
++)
1782 data
[i
] = xm_read32(hw
, port
, skge_stats
[i
].xmac_offset
);
1785 static void genesis_mac_intr(struct skge_hw
*hw
, int port
)
1787 struct net_device
*dev
= hw
->dev
[port
];
1788 struct skge_port
*skge
= netdev_priv(dev
);
1789 u16 status
= xm_read16(hw
, port
, XM_ISRC
);
1791 netif_printk(skge
, intr
, KERN_DEBUG
, skge
->netdev
,
1792 "mac interrupt status 0x%x\n", status
);
1794 if (hw
->phy_type
== SK_PHY_XMAC
&& (status
& XM_IS_INP_ASS
)) {
1795 xm_link_down(hw
, port
);
1796 mod_timer(&skge
->link_timer
, jiffies
+ 1);
1799 if (status
& XM_IS_TXF_UR
) {
1800 xm_write32(hw
, port
, XM_MODE
, XM_MD_FTF
);
1801 ++dev
->stats
.tx_fifo_errors
;
1805 static void genesis_link_up(struct skge_port
*skge
)
1807 struct skge_hw
*hw
= skge
->hw
;
1808 int port
= skge
->port
;
1812 cmd
= xm_read16(hw
, port
, XM_MMU_CMD
);
1815 * enabling pause frame reception is required for 1000BT
1816 * because the XMAC is not reset if the link is going down
1818 if (skge
->flow_status
== FLOW_STAT_NONE
||
1819 skge
->flow_status
== FLOW_STAT_LOC_SEND
)
1820 /* Disable Pause Frame Reception */
1821 cmd
|= XM_MMU_IGN_PF
;
1823 /* Enable Pause Frame Reception */
1824 cmd
&= ~XM_MMU_IGN_PF
;
1826 xm_write16(hw
, port
, XM_MMU_CMD
, cmd
);
1828 mode
= xm_read32(hw
, port
, XM_MODE
);
1829 if (skge
->flow_status
== FLOW_STAT_SYMMETRIC
||
1830 skge
->flow_status
== FLOW_STAT_LOC_SEND
) {
1832 * Configure Pause Frame Generation
1833 * Use internal and external Pause Frame Generation.
1834 * Sending pause frames is edge triggered.
1835 * Send a Pause frame with the maximum pause time if
1836 * internal oder external FIFO full condition occurs.
1837 * Send a zero pause time frame to re-start transmission.
1839 /* XM_PAUSE_DA = '010000C28001' (default) */
1840 /* XM_MAC_PTIME = 0xffff (maximum) */
1841 /* remember this value is defined in big endian (!) */
1842 xm_write16(hw
, port
, XM_MAC_PTIME
, 0xffff);
1844 mode
|= XM_PAUSE_MODE
;
1845 skge_write16(hw
, SK_REG(port
, RX_MFF_CTRL1
), MFF_ENA_PAUSE
);
1848 * disable pause frame generation is required for 1000BT
1849 * because the XMAC is not reset if the link is going down
1851 /* Disable Pause Mode in Mode Register */
1852 mode
&= ~XM_PAUSE_MODE
;
1854 skge_write16(hw
, SK_REG(port
, RX_MFF_CTRL1
), MFF_DIS_PAUSE
);
1857 xm_write32(hw
, port
, XM_MODE
, mode
);
1859 /* Turn on detection of Tx underrun */
1860 msk
= xm_read16(hw
, port
, XM_IMSK
);
1861 msk
&= ~XM_IS_TXF_UR
;
1862 xm_write16(hw
, port
, XM_IMSK
, msk
);
1864 xm_read16(hw
, port
, XM_ISRC
);
1866 /* get MMU Command Reg. */
1867 cmd
= xm_read16(hw
, port
, XM_MMU_CMD
);
1868 if (hw
->phy_type
!= SK_PHY_XMAC
&& skge
->duplex
== DUPLEX_FULL
)
1869 cmd
|= XM_MMU_GMII_FD
;
1872 * Workaround BCOM Errata (#10523) for all BCom Phys
1873 * Enable Power Management after link up
1875 if (hw
->phy_type
== SK_PHY_BCOM
) {
1876 xm_phy_write(hw
, port
, PHY_BCOM_AUX_CTRL
,
1877 xm_phy_read(hw
, port
, PHY_BCOM_AUX_CTRL
)
1878 & ~PHY_B_AC_DIS_PM
);
1879 xm_phy_write(hw
, port
, PHY_BCOM_INT_MASK
, PHY_B_DEF_MSK
);
1883 xm_write16(hw
, port
, XM_MMU_CMD
,
1884 cmd
| XM_MMU_ENA_RX
| XM_MMU_ENA_TX
);
1889 static inline void bcom_phy_intr(struct skge_port
*skge
)
1891 struct skge_hw
*hw
= skge
->hw
;
1892 int port
= skge
->port
;
1895 isrc
= xm_phy_read(hw
, port
, PHY_BCOM_INT_STAT
);
1896 netif_printk(skge
, intr
, KERN_DEBUG
, skge
->netdev
,
1897 "phy interrupt status 0x%x\n", isrc
);
1899 if (isrc
& PHY_B_IS_PSE
)
1900 pr_err("%s: uncorrectable pair swap error\n",
1901 hw
->dev
[port
]->name
);
1903 /* Workaround BCom Errata:
1904 * enable and disable loopback mode if "NO HCD" occurs.
1906 if (isrc
& PHY_B_IS_NO_HDCL
) {
1907 u16 ctrl
= xm_phy_read(hw
, port
, PHY_BCOM_CTRL
);
1908 xm_phy_write(hw
, port
, PHY_BCOM_CTRL
,
1909 ctrl
| PHY_CT_LOOP
);
1910 xm_phy_write(hw
, port
, PHY_BCOM_CTRL
,
1911 ctrl
& ~PHY_CT_LOOP
);
1914 if (isrc
& (PHY_B_IS_AN_PR
| PHY_B_IS_LST_CHANGE
))
1915 bcom_check_link(hw
, port
);
1919 static int gm_phy_write(struct skge_hw
*hw
, int port
, u16 reg
, u16 val
)
1923 gma_write16(hw
, port
, GM_SMI_DATA
, val
);
1924 gma_write16(hw
, port
, GM_SMI_CTRL
,
1925 GM_SMI_CT_PHY_AD(hw
->phy_addr
) | GM_SMI_CT_REG_AD(reg
));
1926 for (i
= 0; i
< PHY_RETRIES
; i
++) {
1929 if (!(gma_read16(hw
, port
, GM_SMI_CTRL
) & GM_SMI_CT_BUSY
))
1933 pr_warning("%s: phy write timeout\n", hw
->dev
[port
]->name
);
1937 static int __gm_phy_read(struct skge_hw
*hw
, int port
, u16 reg
, u16
*val
)
1941 gma_write16(hw
, port
, GM_SMI_CTRL
,
1942 GM_SMI_CT_PHY_AD(hw
->phy_addr
)
1943 | GM_SMI_CT_REG_AD(reg
) | GM_SMI_CT_OP_RD
);
1945 for (i
= 0; i
< PHY_RETRIES
; i
++) {
1947 if (gma_read16(hw
, port
, GM_SMI_CTRL
) & GM_SMI_CT_RD_VAL
)
1953 *val
= gma_read16(hw
, port
, GM_SMI_DATA
);
1957 static u16
gm_phy_read(struct skge_hw
*hw
, int port
, u16 reg
)
1960 if (__gm_phy_read(hw
, port
, reg
, &v
))
1961 pr_warning("%s: phy read timeout\n", hw
->dev
[port
]->name
);
1965 /* Marvell Phy Initialization */
1966 static void yukon_init(struct skge_hw
*hw
, int port
)
1968 struct skge_port
*skge
= netdev_priv(hw
->dev
[port
]);
1969 u16 ctrl
, ct1000
, adv
;
1971 if (skge
->autoneg
== AUTONEG_ENABLE
) {
1972 u16 ectrl
= gm_phy_read(hw
, port
, PHY_MARV_EXT_CTRL
);
1974 ectrl
&= ~(PHY_M_EC_M_DSC_MSK
| PHY_M_EC_S_DSC_MSK
|
1975 PHY_M_EC_MAC_S_MSK
);
1976 ectrl
|= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ
);
1978 ectrl
|= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
1980 gm_phy_write(hw
, port
, PHY_MARV_EXT_CTRL
, ectrl
);
1983 ctrl
= gm_phy_read(hw
, port
, PHY_MARV_CTRL
);
1984 if (skge
->autoneg
== AUTONEG_DISABLE
)
1985 ctrl
&= ~PHY_CT_ANE
;
1987 ctrl
|= PHY_CT_RESET
;
1988 gm_phy_write(hw
, port
, PHY_MARV_CTRL
, ctrl
);
1994 if (skge
->autoneg
== AUTONEG_ENABLE
) {
1996 if (skge
->advertising
& ADVERTISED_1000baseT_Full
)
1997 ct1000
|= PHY_M_1000C_AFD
;
1998 if (skge
->advertising
& ADVERTISED_1000baseT_Half
)
1999 ct1000
|= PHY_M_1000C_AHD
;
2000 if (skge
->advertising
& ADVERTISED_100baseT_Full
)
2001 adv
|= PHY_M_AN_100_FD
;
2002 if (skge
->advertising
& ADVERTISED_100baseT_Half
)
2003 adv
|= PHY_M_AN_100_HD
;
2004 if (skge
->advertising
& ADVERTISED_10baseT_Full
)
2005 adv
|= PHY_M_AN_10_FD
;
2006 if (skge
->advertising
& ADVERTISED_10baseT_Half
)
2007 adv
|= PHY_M_AN_10_HD
;
2009 /* Set Flow-control capabilities */
2010 adv
|= phy_pause_map
[skge
->flow_control
];
2012 if (skge
->advertising
& ADVERTISED_1000baseT_Full
)
2013 adv
|= PHY_M_AN_1000X_AFD
;
2014 if (skge
->advertising
& ADVERTISED_1000baseT_Half
)
2015 adv
|= PHY_M_AN_1000X_AHD
;
2017 adv
|= fiber_pause_map
[skge
->flow_control
];
2020 /* Restart Auto-negotiation */
2021 ctrl
|= PHY_CT_ANE
| PHY_CT_RE_CFG
;
2023 /* forced speed/duplex settings */
2024 ct1000
= PHY_M_1000C_MSE
;
2026 if (skge
->duplex
== DUPLEX_FULL
)
2027 ctrl
|= PHY_CT_DUP_MD
;
2029 switch (skge
->speed
) {
2031 ctrl
|= PHY_CT_SP1000
;
2034 ctrl
|= PHY_CT_SP100
;
2038 ctrl
|= PHY_CT_RESET
;
2041 gm_phy_write(hw
, port
, PHY_MARV_1000T_CTRL
, ct1000
);
2043 gm_phy_write(hw
, port
, PHY_MARV_AUNE_ADV
, adv
);
2044 gm_phy_write(hw
, port
, PHY_MARV_CTRL
, ctrl
);
2046 /* Enable phy interrupt on autonegotiation complete (or link up) */
2047 if (skge
->autoneg
== AUTONEG_ENABLE
)
2048 gm_phy_write(hw
, port
, PHY_MARV_INT_MASK
, PHY_M_IS_AN_MSK
);
2050 gm_phy_write(hw
, port
, PHY_MARV_INT_MASK
, PHY_M_IS_DEF_MSK
);
2053 static void yukon_reset(struct skge_hw
*hw
, int port
)
2055 gm_phy_write(hw
, port
, PHY_MARV_INT_MASK
, 0);/* disable PHY IRQs */
2056 gma_write16(hw
, port
, GM_MC_ADDR_H1
, 0); /* clear MC hash */
2057 gma_write16(hw
, port
, GM_MC_ADDR_H2
, 0);
2058 gma_write16(hw
, port
, GM_MC_ADDR_H3
, 0);
2059 gma_write16(hw
, port
, GM_MC_ADDR_H4
, 0);
2061 gma_write16(hw
, port
, GM_RX_CTRL
,
2062 gma_read16(hw
, port
, GM_RX_CTRL
)
2063 | GM_RXCR_UCF_ENA
| GM_RXCR_MCF_ENA
);
2066 /* Apparently, early versions of Yukon-Lite had wrong chip_id? */
2067 static int is_yukon_lite_a0(struct skge_hw
*hw
)
2072 if (hw
->chip_id
!= CHIP_ID_YUKON
)
2075 reg
= skge_read32(hw
, B2_FAR
);
2076 skge_write8(hw
, B2_FAR
+ 3, 0xff);
2077 ret
= (skge_read8(hw
, B2_FAR
+ 3) != 0);
2078 skge_write32(hw
, B2_FAR
, reg
);
2082 static void yukon_mac_init(struct skge_hw
*hw
, int port
)
2084 struct skge_port
*skge
= netdev_priv(hw
->dev
[port
]);
2087 const u8
*addr
= hw
->dev
[port
]->dev_addr
;
2089 /* WA code for COMA mode -- set PHY reset */
2090 if (hw
->chip_id
== CHIP_ID_YUKON_LITE
&&
2091 hw
->chip_rev
>= CHIP_REV_YU_LITE_A3
) {
2092 reg
= skge_read32(hw
, B2_GP_IO
);
2093 reg
|= GP_DIR_9
| GP_IO_9
;
2094 skge_write32(hw
, B2_GP_IO
, reg
);
2098 skge_write32(hw
, SK_REG(port
, GPHY_CTRL
), GPC_RST_SET
);
2099 skge_write32(hw
, SK_REG(port
, GMAC_CTRL
), GMC_RST_SET
);
2101 /* WA code for COMA mode -- clear PHY reset */
2102 if (hw
->chip_id
== CHIP_ID_YUKON_LITE
&&
2103 hw
->chip_rev
>= CHIP_REV_YU_LITE_A3
) {
2104 reg
= skge_read32(hw
, B2_GP_IO
);
2107 skge_write32(hw
, B2_GP_IO
, reg
);
2110 /* Set hardware config mode */
2111 reg
= GPC_INT_POL_HI
| GPC_DIS_FC
| GPC_DIS_SLEEP
|
2112 GPC_ENA_XC
| GPC_ANEG_ADV_ALL_M
| GPC_ENA_PAUSE
;
2113 reg
|= hw
->copper
? GPC_HWCFG_GMII_COP
: GPC_HWCFG_GMII_FIB
;
2115 /* Clear GMC reset */
2116 skge_write32(hw
, SK_REG(port
, GPHY_CTRL
), reg
| GPC_RST_SET
);
2117 skge_write32(hw
, SK_REG(port
, GPHY_CTRL
), reg
| GPC_RST_CLR
);
2118 skge_write32(hw
, SK_REG(port
, GMAC_CTRL
), GMC_PAUSE_ON
| GMC_RST_CLR
);
2120 if (skge
->autoneg
== AUTONEG_DISABLE
) {
2121 reg
= GM_GPCR_AU_ALL_DIS
;
2122 gma_write16(hw
, port
, GM_GP_CTRL
,
2123 gma_read16(hw
, port
, GM_GP_CTRL
) | reg
);
2125 switch (skge
->speed
) {
2127 reg
&= ~GM_GPCR_SPEED_100
;
2128 reg
|= GM_GPCR_SPEED_1000
;
2131 reg
&= ~GM_GPCR_SPEED_1000
;
2132 reg
|= GM_GPCR_SPEED_100
;
2135 reg
&= ~(GM_GPCR_SPEED_1000
| GM_GPCR_SPEED_100
);
2139 if (skge
->duplex
== DUPLEX_FULL
)
2140 reg
|= GM_GPCR_DUP_FULL
;
2142 reg
= GM_GPCR_SPEED_1000
| GM_GPCR_SPEED_100
| GM_GPCR_DUP_FULL
;
2144 switch (skge
->flow_control
) {
2145 case FLOW_MODE_NONE
:
2146 skge_write32(hw
, SK_REG(port
, GMAC_CTRL
), GMC_PAUSE_OFF
);
2147 reg
|= GM_GPCR_FC_TX_DIS
| GM_GPCR_FC_RX_DIS
| GM_GPCR_AU_FCT_DIS
;
2149 case FLOW_MODE_LOC_SEND
:
2150 /* disable Rx flow-control */
2151 reg
|= GM_GPCR_FC_RX_DIS
| GM_GPCR_AU_FCT_DIS
;
2153 case FLOW_MODE_SYMMETRIC
:
2154 case FLOW_MODE_SYM_OR_REM
:
2155 /* enable Tx & Rx flow-control */
2159 gma_write16(hw
, port
, GM_GP_CTRL
, reg
);
2160 skge_read16(hw
, SK_REG(port
, GMAC_IRQ_SRC
));
2162 yukon_init(hw
, port
);
2165 reg
= gma_read16(hw
, port
, GM_PHY_ADDR
);
2166 gma_write16(hw
, port
, GM_PHY_ADDR
, reg
| GM_PAR_MIB_CLR
);
2168 for (i
= 0; i
< GM_MIB_CNT_SIZE
; i
++)
2169 gma_read16(hw
, port
, GM_MIB_CNT_BASE
+ 8*i
);
2170 gma_write16(hw
, port
, GM_PHY_ADDR
, reg
);
2172 /* transmit control */
2173 gma_write16(hw
, port
, GM_TX_CTRL
, TX_COL_THR(TX_COL_DEF
));
2175 /* receive control reg: unicast + multicast + no FCS */
2176 gma_write16(hw
, port
, GM_RX_CTRL
,
2177 GM_RXCR_UCF_ENA
| GM_RXCR_CRC_DIS
| GM_RXCR_MCF_ENA
);
2179 /* transmit flow control */
2180 gma_write16(hw
, port
, GM_TX_FLOW_CTRL
, 0xffff);
2182 /* transmit parameter */
2183 gma_write16(hw
, port
, GM_TX_PARAM
,
2184 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF
) |
2185 TX_JAM_IPG_VAL(TX_JAM_IPG_DEF
) |
2186 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF
));
2188 /* configure the Serial Mode Register */
2189 reg
= DATA_BLIND_VAL(DATA_BLIND_DEF
)
2191 | IPG_DATA_VAL(IPG_DATA_DEF
);
2193 if (hw
->dev
[port
]->mtu
> ETH_DATA_LEN
)
2194 reg
|= GM_SMOD_JUMBO_ENA
;
2196 gma_write16(hw
, port
, GM_SERIAL_MODE
, reg
);
2198 /* physical address: used for pause frames */
2199 gma_set_addr(hw
, port
, GM_SRC_ADDR_1L
, addr
);
2200 /* virtual address for data */
2201 gma_set_addr(hw
, port
, GM_SRC_ADDR_2L
, addr
);
2203 /* enable interrupt mask for counter overflows */
2204 gma_write16(hw
, port
, GM_TX_IRQ_MSK
, 0);
2205 gma_write16(hw
, port
, GM_RX_IRQ_MSK
, 0);
2206 gma_write16(hw
, port
, GM_TR_IRQ_MSK
, 0);
2208 /* Initialize Mac Fifo */
2210 /* Configure Rx MAC FIFO */
2211 skge_write16(hw
, SK_REG(port
, RX_GMF_FL_MSK
), RX_FF_FL_DEF_MSK
);
2212 reg
= GMF_OPER_ON
| GMF_RX_F_FL_ON
;
2214 /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */
2215 if (is_yukon_lite_a0(hw
))
2216 reg
&= ~GMF_RX_F_FL_ON
;
2218 skge_write8(hw
, SK_REG(port
, RX_GMF_CTRL_T
), GMF_RST_CLR
);
2219 skge_write16(hw
, SK_REG(port
, RX_GMF_CTRL_T
), reg
);
2221 * because Pause Packet Truncation in GMAC is not working
2222 * we have to increase the Flush Threshold to 64 bytes
2223 * in order to flush pause packets in Rx FIFO on Yukon-1
2225 skge_write16(hw
, SK_REG(port
, RX_GMF_FL_THR
), RX_GMF_FL_THR_DEF
+1);
2227 /* Configure Tx MAC FIFO */
2228 skge_write8(hw
, SK_REG(port
, TX_GMF_CTRL_T
), GMF_RST_CLR
);
2229 skge_write16(hw
, SK_REG(port
, TX_GMF_CTRL_T
), GMF_OPER_ON
);
2232 /* Go into power down mode */
2233 static void yukon_suspend(struct skge_hw
*hw
, int port
)
2237 ctrl
= gm_phy_read(hw
, port
, PHY_MARV_PHY_CTRL
);
2238 ctrl
|= PHY_M_PC_POL_R_DIS
;
2239 gm_phy_write(hw
, port
, PHY_MARV_PHY_CTRL
, ctrl
);
2241 ctrl
= gm_phy_read(hw
, port
, PHY_MARV_CTRL
);
2242 ctrl
|= PHY_CT_RESET
;
2243 gm_phy_write(hw
, port
, PHY_MARV_CTRL
, ctrl
);
2245 /* switch IEEE compatible power down mode on */
2246 ctrl
= gm_phy_read(hw
, port
, PHY_MARV_CTRL
);
2247 ctrl
|= PHY_CT_PDOWN
;
2248 gm_phy_write(hw
, port
, PHY_MARV_CTRL
, ctrl
);
2251 static void yukon_stop(struct skge_port
*skge
)
2253 struct skge_hw
*hw
= skge
->hw
;
2254 int port
= skge
->port
;
2256 skge_write8(hw
, SK_REG(port
, GMAC_IRQ_MSK
), 0);
2257 yukon_reset(hw
, port
);
2259 gma_write16(hw
, port
, GM_GP_CTRL
,
2260 gma_read16(hw
, port
, GM_GP_CTRL
)
2261 & ~(GM_GPCR_TX_ENA
|GM_GPCR_RX_ENA
));
2262 gma_read16(hw
, port
, GM_GP_CTRL
);
2264 yukon_suspend(hw
, port
);
2266 /* set GPHY Control reset */
2267 skge_write8(hw
, SK_REG(port
, GPHY_CTRL
), GPC_RST_SET
);
2268 skge_write8(hw
, SK_REG(port
, GMAC_CTRL
), GMC_RST_SET
);
2271 static void yukon_get_stats(struct skge_port
*skge
, u64
*data
)
2273 struct skge_hw
*hw
= skge
->hw
;
2274 int port
= skge
->port
;
2277 data
[0] = (u64
) gma_read32(hw
, port
, GM_TXO_OK_HI
) << 32
2278 | gma_read32(hw
, port
, GM_TXO_OK_LO
);
2279 data
[1] = (u64
) gma_read32(hw
, port
, GM_RXO_OK_HI
) << 32
2280 | gma_read32(hw
, port
, GM_RXO_OK_LO
);
2282 for (i
= 2; i
< ARRAY_SIZE(skge_stats
); i
++)
2283 data
[i
] = gma_read32(hw
, port
,
2284 skge_stats
[i
].gma_offset
);
2287 static void yukon_mac_intr(struct skge_hw
*hw
, int port
)
2289 struct net_device
*dev
= hw
->dev
[port
];
2290 struct skge_port
*skge
= netdev_priv(dev
);
2291 u8 status
= skge_read8(hw
, SK_REG(port
, GMAC_IRQ_SRC
));
2293 netif_printk(skge
, intr
, KERN_DEBUG
, skge
->netdev
,
2294 "mac interrupt status 0x%x\n", status
);
2296 if (status
& GM_IS_RX_FF_OR
) {
2297 ++dev
->stats
.rx_fifo_errors
;
2298 skge_write8(hw
, SK_REG(port
, RX_GMF_CTRL_T
), GMF_CLI_RX_FO
);
2301 if (status
& GM_IS_TX_FF_UR
) {
2302 ++dev
->stats
.tx_fifo_errors
;
2303 skge_write8(hw
, SK_REG(port
, TX_GMF_CTRL_T
), GMF_CLI_TX_FU
);
2308 static u16
yukon_speed(const struct skge_hw
*hw
, u16 aux
)
2310 switch (aux
& PHY_M_PS_SPEED_MSK
) {
2311 case PHY_M_PS_SPEED_1000
:
2313 case PHY_M_PS_SPEED_100
:
2320 static void yukon_link_up(struct skge_port
*skge
)
2322 struct skge_hw
*hw
= skge
->hw
;
2323 int port
= skge
->port
;
2326 /* Enable Transmit FIFO Underrun */
2327 skge_write8(hw
, SK_REG(port
, GMAC_IRQ_MSK
), GMAC_DEF_MSK
);
2329 reg
= gma_read16(hw
, port
, GM_GP_CTRL
);
2330 if (skge
->duplex
== DUPLEX_FULL
|| skge
->autoneg
== AUTONEG_ENABLE
)
2331 reg
|= GM_GPCR_DUP_FULL
;
2334 reg
|= GM_GPCR_RX_ENA
| GM_GPCR_TX_ENA
;
2335 gma_write16(hw
, port
, GM_GP_CTRL
, reg
);
2337 gm_phy_write(hw
, port
, PHY_MARV_INT_MASK
, PHY_M_IS_DEF_MSK
);
2341 static void yukon_link_down(struct skge_port
*skge
)
2343 struct skge_hw
*hw
= skge
->hw
;
2344 int port
= skge
->port
;
2347 ctrl
= gma_read16(hw
, port
, GM_GP_CTRL
);
2348 ctrl
&= ~(GM_GPCR_RX_ENA
| GM_GPCR_TX_ENA
);
2349 gma_write16(hw
, port
, GM_GP_CTRL
, ctrl
);
2351 if (skge
->flow_status
== FLOW_STAT_REM_SEND
) {
2352 ctrl
= gm_phy_read(hw
, port
, PHY_MARV_AUNE_ADV
);
2353 ctrl
|= PHY_M_AN_ASP
;
2354 /* restore Asymmetric Pause bit */
2355 gm_phy_write(hw
, port
, PHY_MARV_AUNE_ADV
, ctrl
);
2358 skge_link_down(skge
);
2360 yukon_init(hw
, port
);
2363 static void yukon_phy_intr(struct skge_port
*skge
)
2365 struct skge_hw
*hw
= skge
->hw
;
2366 int port
= skge
->port
;
2367 const char *reason
= NULL
;
2368 u16 istatus
, phystat
;
2370 istatus
= gm_phy_read(hw
, port
, PHY_MARV_INT_STAT
);
2371 phystat
= gm_phy_read(hw
, port
, PHY_MARV_PHY_STAT
);
2373 netif_printk(skge
, intr
, KERN_DEBUG
, skge
->netdev
,
2374 "phy interrupt status 0x%x 0x%x\n", istatus
, phystat
);
2376 if (istatus
& PHY_M_IS_AN_COMPL
) {
2377 if (gm_phy_read(hw
, port
, PHY_MARV_AUNE_LP
)
2379 reason
= "remote fault";
2383 if (gm_phy_read(hw
, port
, PHY_MARV_1000T_STAT
) & PHY_B_1000S_MSF
) {
2384 reason
= "master/slave fault";
2388 if (!(phystat
& PHY_M_PS_SPDUP_RES
)) {
2389 reason
= "speed/duplex";
2393 skge
->duplex
= (phystat
& PHY_M_PS_FULL_DUP
)
2394 ? DUPLEX_FULL
: DUPLEX_HALF
;
2395 skge
->speed
= yukon_speed(hw
, phystat
);
2397 /* We are using IEEE 802.3z/D5.0 Table 37-4 */
2398 switch (phystat
& PHY_M_PS_PAUSE_MSK
) {
2399 case PHY_M_PS_PAUSE_MSK
:
2400 skge
->flow_status
= FLOW_STAT_SYMMETRIC
;
2402 case PHY_M_PS_RX_P_EN
:
2403 skge
->flow_status
= FLOW_STAT_REM_SEND
;
2405 case PHY_M_PS_TX_P_EN
:
2406 skge
->flow_status
= FLOW_STAT_LOC_SEND
;
2409 skge
->flow_status
= FLOW_STAT_NONE
;
2412 if (skge
->flow_status
== FLOW_STAT_NONE
||
2413 (skge
->speed
< SPEED_1000
&& skge
->duplex
== DUPLEX_HALF
))
2414 skge_write8(hw
, SK_REG(port
, GMAC_CTRL
), GMC_PAUSE_OFF
);
2416 skge_write8(hw
, SK_REG(port
, GMAC_CTRL
), GMC_PAUSE_ON
);
2417 yukon_link_up(skge
);
2421 if (istatus
& PHY_M_IS_LSP_CHANGE
)
2422 skge
->speed
= yukon_speed(hw
, phystat
);
2424 if (istatus
& PHY_M_IS_DUP_CHANGE
)
2425 skge
->duplex
= (phystat
& PHY_M_PS_FULL_DUP
) ? DUPLEX_FULL
: DUPLEX_HALF
;
2426 if (istatus
& PHY_M_IS_LST_CHANGE
) {
2427 if (phystat
& PHY_M_PS_LINK_UP
)
2428 yukon_link_up(skge
);
2430 yukon_link_down(skge
);
2434 pr_err("%s: autonegotiation failed (%s)\n", skge
->netdev
->name
, reason
);
2436 /* XXX restart autonegotiation? */
2439 static void skge_phy_reset(struct skge_port
*skge
)
2441 struct skge_hw
*hw
= skge
->hw
;
2442 int port
= skge
->port
;
2443 struct net_device
*dev
= hw
->dev
[port
];
2445 netif_stop_queue(skge
->netdev
);
2446 netif_carrier_off(skge
->netdev
);
2448 spin_lock_bh(&hw
->phy_lock
);
2449 if (hw
->chip_id
== CHIP_ID_GENESIS
) {
2450 genesis_reset(hw
, port
);
2451 genesis_mac_init(hw
, port
);
2453 yukon_reset(hw
, port
);
2454 yukon_init(hw
, port
);
2456 spin_unlock_bh(&hw
->phy_lock
);
2458 skge_set_multicast(dev
);
2461 /* Basic MII support */
2462 static int skge_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
2464 struct mii_ioctl_data
*data
= if_mii(ifr
);
2465 struct skge_port
*skge
= netdev_priv(dev
);
2466 struct skge_hw
*hw
= skge
->hw
;
2467 int err
= -EOPNOTSUPP
;
2469 if (!netif_running(dev
))
2470 return -ENODEV
; /* Phy still in reset */
2474 data
->phy_id
= hw
->phy_addr
;
2479 spin_lock_bh(&hw
->phy_lock
);
2480 if (hw
->chip_id
== CHIP_ID_GENESIS
)
2481 err
= __xm_phy_read(hw
, skge
->port
, data
->reg_num
& 0x1f, &val
);
2483 err
= __gm_phy_read(hw
, skge
->port
, data
->reg_num
& 0x1f, &val
);
2484 spin_unlock_bh(&hw
->phy_lock
);
2485 data
->val_out
= val
;
2490 spin_lock_bh(&hw
->phy_lock
);
2491 if (hw
->chip_id
== CHIP_ID_GENESIS
)
2492 err
= xm_phy_write(hw
, skge
->port
, data
->reg_num
& 0x1f,
2495 err
= gm_phy_write(hw
, skge
->port
, data
->reg_num
& 0x1f,
2497 spin_unlock_bh(&hw
->phy_lock
);
2503 static void skge_ramset(struct skge_hw
*hw
, u16 q
, u32 start
, size_t len
)
2509 end
= start
+ len
- 1;
2511 skge_write8(hw
, RB_ADDR(q
, RB_CTRL
), RB_RST_CLR
);
2512 skge_write32(hw
, RB_ADDR(q
, RB_START
), start
);
2513 skge_write32(hw
, RB_ADDR(q
, RB_WP
), start
);
2514 skge_write32(hw
, RB_ADDR(q
, RB_RP
), start
);
2515 skge_write32(hw
, RB_ADDR(q
, RB_END
), end
);
2517 if (q
== Q_R1
|| q
== Q_R2
) {
2518 /* Set thresholds on receive queue's */
2519 skge_write32(hw
, RB_ADDR(q
, RB_RX_UTPP
),
2521 skge_write32(hw
, RB_ADDR(q
, RB_RX_LTPP
),
2524 /* Enable store & forward on Tx queue's because
2525 * Tx FIFO is only 4K on Genesis and 1K on Yukon
2527 skge_write8(hw
, RB_ADDR(q
, RB_CTRL
), RB_ENA_STFWD
);
2530 skge_write8(hw
, RB_ADDR(q
, RB_CTRL
), RB_ENA_OP_MD
);
2533 /* Setup Bus Memory Interface */
2534 static void skge_qset(struct skge_port
*skge
, u16 q
,
2535 const struct skge_element
*e
)
2537 struct skge_hw
*hw
= skge
->hw
;
2538 u32 watermark
= 0x600;
2539 u64 base
= skge
->dma
+ (e
->desc
- skge
->mem
);
2541 /* optimization to reduce window on 32bit/33mhz */
2542 if ((skge_read16(hw
, B0_CTST
) & (CS_BUS_CLOCK
| CS_BUS_SLOT_SZ
)) == 0)
2545 skge_write32(hw
, Q_ADDR(q
, Q_CSR
), CSR_CLR_RESET
);
2546 skge_write32(hw
, Q_ADDR(q
, Q_F
), watermark
);
2547 skge_write32(hw
, Q_ADDR(q
, Q_DA_H
), (u32
)(base
>> 32));
2548 skge_write32(hw
, Q_ADDR(q
, Q_DA_L
), (u32
)base
);
2551 static int skge_up(struct net_device
*dev
)
2553 struct skge_port
*skge
= netdev_priv(dev
);
2554 struct skge_hw
*hw
= skge
->hw
;
2555 int port
= skge
->port
;
2556 u32 chunk
, ram_addr
;
2557 size_t rx_size
, tx_size
;
2560 if (!is_valid_ether_addr(dev
->dev_addr
))
2563 netif_info(skge
, ifup
, skge
->netdev
, "enabling interface\n");
2565 if (dev
->mtu
> RX_BUF_SIZE
)
2566 skge
->rx_buf_size
= dev
->mtu
+ ETH_HLEN
;
2568 skge
->rx_buf_size
= RX_BUF_SIZE
;
2571 rx_size
= skge
->rx_ring
.count
* sizeof(struct skge_rx_desc
);
2572 tx_size
= skge
->tx_ring
.count
* sizeof(struct skge_tx_desc
);
2573 skge
->mem_size
= tx_size
+ rx_size
;
2574 skge
->mem
= pci_alloc_consistent(hw
->pdev
, skge
->mem_size
, &skge
->dma
);
2578 BUG_ON(skge
->dma
& 7);
2580 if ((u64
)skge
->dma
>> 32 != ((u64
) skge
->dma
+ skge
->mem_size
) >> 32) {
2581 dev_err(&hw
->pdev
->dev
, "pci_alloc_consistent region crosses 4G boundary\n");
2586 memset(skge
->mem
, 0, skge
->mem_size
);
2588 err
= skge_ring_alloc(&skge
->rx_ring
, skge
->mem
, skge
->dma
);
2592 err
= skge_rx_fill(dev
);
2596 err
= skge_ring_alloc(&skge
->tx_ring
, skge
->mem
+ rx_size
,
2597 skge
->dma
+ rx_size
);
2601 /* Initialize MAC */
2602 spin_lock_bh(&hw
->phy_lock
);
2603 if (hw
->chip_id
== CHIP_ID_GENESIS
)
2604 genesis_mac_init(hw
, port
);
2606 yukon_mac_init(hw
, port
);
2607 spin_unlock_bh(&hw
->phy_lock
);
2609 /* Configure RAMbuffers - equally between ports and tx/rx */
2610 chunk
= (hw
->ram_size
- hw
->ram_offset
) / (hw
->ports
* 2);
2611 ram_addr
= hw
->ram_offset
+ 2 * chunk
* port
;
2613 skge_ramset(hw
, rxqaddr
[port
], ram_addr
, chunk
);
2614 skge_qset(skge
, rxqaddr
[port
], skge
->rx_ring
.to_clean
);
2616 BUG_ON(skge
->tx_ring
.to_use
!= skge
->tx_ring
.to_clean
);
2617 skge_ramset(hw
, txqaddr
[port
], ram_addr
+chunk
, chunk
);
2618 skge_qset(skge
, txqaddr
[port
], skge
->tx_ring
.to_use
);
2620 /* Start receiver BMU */
2622 skge_write8(hw
, Q_ADDR(rxqaddr
[port
], Q_CSR
), CSR_START
| CSR_IRQ_CL_F
);
2623 skge_led(skge
, LED_MODE_ON
);
2625 spin_lock_irq(&hw
->hw_lock
);
2626 hw
->intr_mask
|= portmask
[port
];
2627 skge_write32(hw
, B0_IMSK
, hw
->intr_mask
);
2628 spin_unlock_irq(&hw
->hw_lock
);
2630 napi_enable(&skge
->napi
);
2634 skge_rx_clean(skge
);
2635 kfree(skge
->rx_ring
.start
);
2637 pci_free_consistent(hw
->pdev
, skge
->mem_size
, skge
->mem
, skge
->dma
);
2644 static void skge_rx_stop(struct skge_hw
*hw
, int port
)
2646 skge_write8(hw
, Q_ADDR(rxqaddr
[port
], Q_CSR
), CSR_STOP
);
2647 skge_write32(hw
, RB_ADDR(port
? Q_R2
: Q_R1
, RB_CTRL
),
2648 RB_RST_SET
|RB_DIS_OP_MD
);
2649 skge_write32(hw
, Q_ADDR(rxqaddr
[port
], Q_CSR
), CSR_SET_RESET
);
2652 static int skge_down(struct net_device
*dev
)
2654 struct skge_port
*skge
= netdev_priv(dev
);
2655 struct skge_hw
*hw
= skge
->hw
;
2656 int port
= skge
->port
;
2658 if (skge
->mem
== NULL
)
2661 netif_info(skge
, ifdown
, skge
->netdev
, "disabling interface\n");
2663 netif_tx_disable(dev
);
2665 if (hw
->chip_id
== CHIP_ID_GENESIS
&& hw
->phy_type
== SK_PHY_XMAC
)
2666 del_timer_sync(&skge
->link_timer
);
2668 napi_disable(&skge
->napi
);
2669 netif_carrier_off(dev
);
2671 spin_lock_irq(&hw
->hw_lock
);
2672 hw
->intr_mask
&= ~portmask
[port
];
2673 skge_write32(hw
, B0_IMSK
, hw
->intr_mask
);
2674 spin_unlock_irq(&hw
->hw_lock
);
2676 skge_write8(skge
->hw
, SK_REG(skge
->port
, LNK_LED_REG
), LED_OFF
);
2677 if (hw
->chip_id
== CHIP_ID_GENESIS
)
2682 /* Stop transmitter */
2683 skge_write8(hw
, Q_ADDR(txqaddr
[port
], Q_CSR
), CSR_STOP
);
2684 skge_write32(hw
, RB_ADDR(txqaddr
[port
], RB_CTRL
),
2685 RB_RST_SET
|RB_DIS_OP_MD
);
2688 /* Disable Force Sync bit and Enable Alloc bit */
2689 skge_write8(hw
, SK_REG(port
, TXA_CTRL
),
2690 TXA_DIS_FSYNC
| TXA_DIS_ALLOC
| TXA_STOP_RC
);
2692 /* Stop Interval Timer and Limit Counter of Tx Arbiter */
2693 skge_write32(hw
, SK_REG(port
, TXA_ITI_INI
), 0L);
2694 skge_write32(hw
, SK_REG(port
, TXA_LIM_INI
), 0L);
2696 /* Reset PCI FIFO */
2697 skge_write32(hw
, Q_ADDR(txqaddr
[port
], Q_CSR
), CSR_SET_RESET
);
2698 skge_write32(hw
, RB_ADDR(txqaddr
[port
], RB_CTRL
), RB_RST_SET
);
2700 /* Reset the RAM Buffer async Tx queue */
2701 skge_write8(hw
, RB_ADDR(port
== 0 ? Q_XA1
: Q_XA2
, RB_CTRL
), RB_RST_SET
);
2703 skge_rx_stop(hw
, port
);
2705 if (hw
->chip_id
== CHIP_ID_GENESIS
) {
2706 skge_write8(hw
, SK_REG(port
, TX_MFF_CTRL2
), MFF_RST_SET
);
2707 skge_write8(hw
, SK_REG(port
, RX_MFF_CTRL2
), MFF_RST_SET
);
2709 skge_write8(hw
, SK_REG(port
, RX_GMF_CTRL_T
), GMF_RST_SET
);
2710 skge_write8(hw
, SK_REG(port
, TX_GMF_CTRL_T
), GMF_RST_SET
);
2713 skge_led(skge
, LED_MODE_OFF
);
2715 netif_tx_lock_bh(dev
);
2717 netif_tx_unlock_bh(dev
);
2719 skge_rx_clean(skge
);
2721 kfree(skge
->rx_ring
.start
);
2722 kfree(skge
->tx_ring
.start
);
2723 pci_free_consistent(hw
->pdev
, skge
->mem_size
, skge
->mem
, skge
->dma
);
2728 static inline int skge_avail(const struct skge_ring
*ring
)
2731 return ((ring
->to_clean
> ring
->to_use
) ? 0 : ring
->count
)
2732 + (ring
->to_clean
- ring
->to_use
) - 1;
2735 static netdev_tx_t
skge_xmit_frame(struct sk_buff
*skb
,
2736 struct net_device
*dev
)
2738 struct skge_port
*skge
= netdev_priv(dev
);
2739 struct skge_hw
*hw
= skge
->hw
;
2740 struct skge_element
*e
;
2741 struct skge_tx_desc
*td
;
2746 if (skb_padto(skb
, ETH_ZLEN
))
2747 return NETDEV_TX_OK
;
2749 if (unlikely(skge_avail(&skge
->tx_ring
) < skb_shinfo(skb
)->nr_frags
+ 1))
2750 return NETDEV_TX_BUSY
;
2752 e
= skge
->tx_ring
.to_use
;
2754 BUG_ON(td
->control
& BMU_OWN
);
2756 len
= skb_headlen(skb
);
2757 map
= pci_map_single(hw
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
2758 pci_unmap_addr_set(e
, mapaddr
, map
);
2759 pci_unmap_len_set(e
, maplen
, len
);
2762 td
->dma_hi
= map
>> 32;
2764 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2765 const int offset
= skb_transport_offset(skb
);
2767 /* This seems backwards, but it is what the sk98lin
2768 * does. Looks like hardware is wrong?
2770 if (ipip_hdr(skb
)->protocol
== IPPROTO_UDP
&&
2771 hw
->chip_rev
== 0 && hw
->chip_id
== CHIP_ID_YUKON
)
2772 control
= BMU_TCP_CHECK
;
2774 control
= BMU_UDP_CHECK
;
2777 td
->csum_start
= offset
;
2778 td
->csum_write
= offset
+ skb
->csum_offset
;
2780 control
= BMU_CHECK
;
2782 if (!skb_shinfo(skb
)->nr_frags
) /* single buffer i.e. no fragments */
2783 control
|= BMU_EOF
| BMU_IRQ_EOF
;
2785 struct skge_tx_desc
*tf
= td
;
2787 control
|= BMU_STFWD
;
2788 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2789 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2791 map
= pci_map_page(hw
->pdev
, frag
->page
, frag
->page_offset
,
2792 frag
->size
, PCI_DMA_TODEVICE
);
2797 BUG_ON(tf
->control
& BMU_OWN
);
2800 tf
->dma_hi
= (u64
) map
>> 32;
2801 pci_unmap_addr_set(e
, mapaddr
, map
);
2802 pci_unmap_len_set(e
, maplen
, frag
->size
);
2804 tf
->control
= BMU_OWN
| BMU_SW
| control
| frag
->size
;
2806 tf
->control
|= BMU_EOF
| BMU_IRQ_EOF
;
2808 /* Make sure all the descriptors written */
2810 td
->control
= BMU_OWN
| BMU_SW
| BMU_STF
| control
| len
;
2813 skge_write8(hw
, Q_ADDR(txqaddr
[skge
->port
], Q_CSR
), CSR_START
);
2815 netif_printk(skge
, tx_queued
, KERN_DEBUG
, skge
->netdev
,
2816 "tx queued, slot %td, len %d\n",
2817 e
- skge
->tx_ring
.start
, skb
->len
);
2819 skge
->tx_ring
.to_use
= e
->next
;
2822 if (skge_avail(&skge
->tx_ring
) <= TX_LOW_WATER
) {
2823 netdev_dbg(dev
, "transmit queue full\n");
2824 netif_stop_queue(dev
);
2827 return NETDEV_TX_OK
;
2831 /* Free resources associated with this reing element */
2832 static void skge_tx_free(struct skge_port
*skge
, struct skge_element
*e
,
2835 struct pci_dev
*pdev
= skge
->hw
->pdev
;
2837 /* skb header vs. fragment */
2838 if (control
& BMU_STF
)
2839 pci_unmap_single(pdev
, pci_unmap_addr(e
, mapaddr
),
2840 pci_unmap_len(e
, maplen
),
2843 pci_unmap_page(pdev
, pci_unmap_addr(e
, mapaddr
),
2844 pci_unmap_len(e
, maplen
),
2847 if (control
& BMU_EOF
) {
2848 netif_printk(skge
, tx_done
, KERN_DEBUG
, skge
->netdev
,
2849 "tx done slot %td\n", e
- skge
->tx_ring
.start
);
2851 dev_kfree_skb(e
->skb
);
2855 /* Free all buffers in transmit ring */
2856 static void skge_tx_clean(struct net_device
*dev
)
2858 struct skge_port
*skge
= netdev_priv(dev
);
2859 struct skge_element
*e
;
2861 for (e
= skge
->tx_ring
.to_clean
; e
!= skge
->tx_ring
.to_use
; e
= e
->next
) {
2862 struct skge_tx_desc
*td
= e
->desc
;
2863 skge_tx_free(skge
, e
, td
->control
);
2867 skge
->tx_ring
.to_clean
= e
;
2870 static void skge_tx_timeout(struct net_device
*dev
)
2872 struct skge_port
*skge
= netdev_priv(dev
);
2874 netif_printk(skge
, timer
, KERN_DEBUG
, skge
->netdev
, "tx timeout\n");
2876 skge_write8(skge
->hw
, Q_ADDR(txqaddr
[skge
->port
], Q_CSR
), CSR_STOP
);
2878 netif_wake_queue(dev
);
2881 static int skge_change_mtu(struct net_device
*dev
, int new_mtu
)
2885 if (new_mtu
< ETH_ZLEN
|| new_mtu
> ETH_JUMBO_MTU
)
2888 if (!netif_running(dev
)) {
2904 static const u8 pause_mc_addr
[ETH_ALEN
] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 };
2906 static void genesis_add_filter(u8 filter
[8], const u8
*addr
)
2910 crc
= ether_crc_le(ETH_ALEN
, addr
);
2912 filter
[bit
/8] |= 1 << (bit
%8);
2915 static void genesis_set_multicast(struct net_device
*dev
)
2917 struct skge_port
*skge
= netdev_priv(dev
);
2918 struct skge_hw
*hw
= skge
->hw
;
2919 int port
= skge
->port
;
2920 struct dev_mc_list
*list
;
2924 mode
= xm_read32(hw
, port
, XM_MODE
);
2925 mode
|= XM_MD_ENA_HASH
;
2926 if (dev
->flags
& IFF_PROMISC
)
2927 mode
|= XM_MD_ENA_PROM
;
2929 mode
&= ~XM_MD_ENA_PROM
;
2931 if (dev
->flags
& IFF_ALLMULTI
)
2932 memset(filter
, 0xff, sizeof(filter
));
2934 memset(filter
, 0, sizeof(filter
));
2936 if (skge
->flow_status
== FLOW_STAT_REM_SEND
||
2937 skge
->flow_status
== FLOW_STAT_SYMMETRIC
)
2938 genesis_add_filter(filter
, pause_mc_addr
);
2940 netdev_for_each_mc_addr(list
, dev
)
2941 genesis_add_filter(filter
, list
->dmi_addr
);
2944 xm_write32(hw
, port
, XM_MODE
, mode
);
2945 xm_outhash(hw
, port
, XM_HSM
, filter
);
2948 static void yukon_add_filter(u8 filter
[8], const u8
*addr
)
2950 u32 bit
= ether_crc(ETH_ALEN
, addr
) & 0x3f;
2951 filter
[bit
/8] |= 1 << (bit
%8);
2954 static void yukon_set_multicast(struct net_device
*dev
)
2956 struct skge_port
*skge
= netdev_priv(dev
);
2957 struct skge_hw
*hw
= skge
->hw
;
2958 int port
= skge
->port
;
2959 struct dev_mc_list
*list
;
2960 int rx_pause
= (skge
->flow_status
== FLOW_STAT_REM_SEND
||
2961 skge
->flow_status
== FLOW_STAT_SYMMETRIC
);
2965 memset(filter
, 0, sizeof(filter
));
2967 reg
= gma_read16(hw
, port
, GM_RX_CTRL
);
2968 reg
|= GM_RXCR_UCF_ENA
;
2970 if (dev
->flags
& IFF_PROMISC
) /* promiscuous */
2971 reg
&= ~(GM_RXCR_UCF_ENA
| GM_RXCR_MCF_ENA
);
2972 else if (dev
->flags
& IFF_ALLMULTI
) /* all multicast */
2973 memset(filter
, 0xff, sizeof(filter
));
2974 else if (netdev_mc_empty(dev
) && !rx_pause
)/* no multicast */
2975 reg
&= ~GM_RXCR_MCF_ENA
;
2977 reg
|= GM_RXCR_MCF_ENA
;
2980 yukon_add_filter(filter
, pause_mc_addr
);
2982 netdev_for_each_mc_addr(list
, dev
)
2983 yukon_add_filter(filter
, list
->dmi_addr
);
2987 gma_write16(hw
, port
, GM_MC_ADDR_H1
,
2988 (u16
)filter
[0] | ((u16
)filter
[1] << 8));
2989 gma_write16(hw
, port
, GM_MC_ADDR_H2
,
2990 (u16
)filter
[2] | ((u16
)filter
[3] << 8));
2991 gma_write16(hw
, port
, GM_MC_ADDR_H3
,
2992 (u16
)filter
[4] | ((u16
)filter
[5] << 8));
2993 gma_write16(hw
, port
, GM_MC_ADDR_H4
,
2994 (u16
)filter
[6] | ((u16
)filter
[7] << 8));
2996 gma_write16(hw
, port
, GM_RX_CTRL
, reg
);
2999 static inline u16
phy_length(const struct skge_hw
*hw
, u32 status
)
3001 if (hw
->chip_id
== CHIP_ID_GENESIS
)
3002 return status
>> XMR_FS_LEN_SHIFT
;
3004 return status
>> GMR_FS_LEN_SHIFT
;
3007 static inline int bad_phy_status(const struct skge_hw
*hw
, u32 status
)
3009 if (hw
->chip_id
== CHIP_ID_GENESIS
)
3010 return (status
& (XMR_FS_ERR
| XMR_FS_2L_VLAN
)) != 0;
3012 return (status
& GMR_FS_ANY_ERR
) ||
3013 (status
& GMR_FS_RX_OK
) == 0;
3016 static void skge_set_multicast(struct net_device
*dev
)
3018 struct skge_port
*skge
= netdev_priv(dev
);
3019 struct skge_hw
*hw
= skge
->hw
;
3021 if (hw
->chip_id
== CHIP_ID_GENESIS
)
3022 genesis_set_multicast(dev
);
3024 yukon_set_multicast(dev
);
3029 /* Get receive buffer from descriptor.
3030 * Handles copy of small buffers and reallocation failures
3032 static struct sk_buff
*skge_rx_get(struct net_device
*dev
,
3033 struct skge_element
*e
,
3034 u32 control
, u32 status
, u16 csum
)
3036 struct skge_port
*skge
= netdev_priv(dev
);
3037 struct sk_buff
*skb
;
3038 u16 len
= control
& BMU_BBC
;
3040 netif_printk(skge
, rx_status
, KERN_DEBUG
, skge
->netdev
,
3041 "rx slot %td status 0x%x len %d\n",
3042 e
- skge
->rx_ring
.start
, status
, len
);
3044 if (len
> skge
->rx_buf_size
)
3047 if ((control
& (BMU_EOF
|BMU_STF
)) != (BMU_STF
|BMU_EOF
))
3050 if (bad_phy_status(skge
->hw
, status
))
3053 if (phy_length(skge
->hw
, status
) != len
)
3056 if (len
< RX_COPY_THRESHOLD
) {
3057 skb
= netdev_alloc_skb_ip_align(dev
, len
);
3061 pci_dma_sync_single_for_cpu(skge
->hw
->pdev
,
3062 pci_unmap_addr(e
, mapaddr
),
3063 len
, PCI_DMA_FROMDEVICE
);
3064 skb_copy_from_linear_data(e
->skb
, skb
->data
, len
);
3065 pci_dma_sync_single_for_device(skge
->hw
->pdev
,
3066 pci_unmap_addr(e
, mapaddr
),
3067 len
, PCI_DMA_FROMDEVICE
);
3068 skge_rx_reuse(e
, skge
->rx_buf_size
);
3070 struct sk_buff
*nskb
;
3072 nskb
= netdev_alloc_skb_ip_align(dev
, skge
->rx_buf_size
);
3076 pci_unmap_single(skge
->hw
->pdev
,
3077 pci_unmap_addr(e
, mapaddr
),
3078 pci_unmap_len(e
, maplen
),
3079 PCI_DMA_FROMDEVICE
);
3081 prefetch(skb
->data
);
3082 skge_rx_setup(skge
, e
, nskb
, skge
->rx_buf_size
);
3086 if (skge
->rx_csum
) {
3088 skb
->ip_summed
= CHECKSUM_COMPLETE
;
3091 skb
->protocol
= eth_type_trans(skb
, dev
);
3096 netif_printk(skge
, rx_err
, KERN_DEBUG
, skge
->netdev
,
3097 "rx err, slot %td control 0x%x status 0x%x\n",
3098 e
- skge
->rx_ring
.start
, control
, status
);
3100 if (skge
->hw
->chip_id
== CHIP_ID_GENESIS
) {
3101 if (status
& (XMR_FS_RUNT
|XMR_FS_LNG_ERR
))
3102 dev
->stats
.rx_length_errors
++;
3103 if (status
& XMR_FS_FRA_ERR
)
3104 dev
->stats
.rx_frame_errors
++;
3105 if (status
& XMR_FS_FCS_ERR
)
3106 dev
->stats
.rx_crc_errors
++;
3108 if (status
& (GMR_FS_LONG_ERR
|GMR_FS_UN_SIZE
))
3109 dev
->stats
.rx_length_errors
++;
3110 if (status
& GMR_FS_FRAGMENT
)
3111 dev
->stats
.rx_frame_errors
++;
3112 if (status
& GMR_FS_CRC_ERR
)
3113 dev
->stats
.rx_crc_errors
++;
3117 skge_rx_reuse(e
, skge
->rx_buf_size
);
3121 /* Free all buffers in Tx ring which are no longer owned by device */
3122 static void skge_tx_done(struct net_device
*dev
)
3124 struct skge_port
*skge
= netdev_priv(dev
);
3125 struct skge_ring
*ring
= &skge
->tx_ring
;
3126 struct skge_element
*e
;
3128 skge_write8(skge
->hw
, Q_ADDR(txqaddr
[skge
->port
], Q_CSR
), CSR_IRQ_CL_F
);
3130 for (e
= ring
->to_clean
; e
!= ring
->to_use
; e
= e
->next
) {
3131 u32 control
= ((const struct skge_tx_desc
*) e
->desc
)->control
;
3133 if (control
& BMU_OWN
)
3136 skge_tx_free(skge
, e
, control
);
3138 skge
->tx_ring
.to_clean
= e
;
3140 /* Can run lockless until we need to synchronize to restart queue. */
3143 if (unlikely(netif_queue_stopped(dev
) &&
3144 skge_avail(&skge
->tx_ring
) > TX_LOW_WATER
)) {
3146 if (unlikely(netif_queue_stopped(dev
) &&
3147 skge_avail(&skge
->tx_ring
) > TX_LOW_WATER
)) {
3148 netif_wake_queue(dev
);
3151 netif_tx_unlock(dev
);
3155 static int skge_poll(struct napi_struct
*napi
, int to_do
)
3157 struct skge_port
*skge
= container_of(napi
, struct skge_port
, napi
);
3158 struct net_device
*dev
= skge
->netdev
;
3159 struct skge_hw
*hw
= skge
->hw
;
3160 struct skge_ring
*ring
= &skge
->rx_ring
;
3161 struct skge_element
*e
;
3166 skge_write8(hw
, Q_ADDR(rxqaddr
[skge
->port
], Q_CSR
), CSR_IRQ_CL_F
);
3168 for (e
= ring
->to_clean
; prefetch(e
->next
), work_done
< to_do
; e
= e
->next
) {
3169 struct skge_rx_desc
*rd
= e
->desc
;
3170 struct sk_buff
*skb
;
3174 control
= rd
->control
;
3175 if (control
& BMU_OWN
)
3178 skb
= skge_rx_get(dev
, e
, control
, rd
->status
, rd
->csum2
);
3180 netif_receive_skb(skb
);
3187 /* restart receiver */
3189 skge_write8(hw
, Q_ADDR(rxqaddr
[skge
->port
], Q_CSR
), CSR_START
);
3191 if (work_done
< to_do
) {
3192 unsigned long flags
;
3194 spin_lock_irqsave(&hw
->hw_lock
, flags
);
3195 __napi_complete(napi
);
3196 hw
->intr_mask
|= napimask
[skge
->port
];
3197 skge_write32(hw
, B0_IMSK
, hw
->intr_mask
);
3198 skge_read32(hw
, B0_IMSK
);
3199 spin_unlock_irqrestore(&hw
->hw_lock
, flags
);
3205 /* Parity errors seem to happen when Genesis is connected to a switch
3206 * with no other ports present. Heartbeat error??
3208 static void skge_mac_parity(struct skge_hw
*hw
, int port
)
3210 struct net_device
*dev
= hw
->dev
[port
];
3212 ++dev
->stats
.tx_heartbeat_errors
;
3214 if (hw
->chip_id
== CHIP_ID_GENESIS
)
3215 skge_write16(hw
, SK_REG(port
, TX_MFF_CTRL1
),
3218 /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */
3219 skge_write8(hw
, SK_REG(port
, TX_GMF_CTRL_T
),
3220 (hw
->chip_id
== CHIP_ID_YUKON
&& hw
->chip_rev
== 0)
3221 ? GMF_CLI_TX_FC
: GMF_CLI_TX_PE
);
3224 static void skge_mac_intr(struct skge_hw
*hw
, int port
)
3226 if (hw
->chip_id
== CHIP_ID_GENESIS
)
3227 genesis_mac_intr(hw
, port
);
3229 yukon_mac_intr(hw
, port
);
3232 /* Handle device specific framing and timeout interrupts */
3233 static void skge_error_irq(struct skge_hw
*hw
)
3235 struct pci_dev
*pdev
= hw
->pdev
;
3236 u32 hwstatus
= skge_read32(hw
, B0_HWE_ISRC
);
3238 if (hw
->chip_id
== CHIP_ID_GENESIS
) {
3239 /* clear xmac errors */
3240 if (hwstatus
& (IS_NO_STAT_M1
|IS_NO_TIST_M1
))
3241 skge_write16(hw
, RX_MFF_CTRL1
, MFF_CLR_INSTAT
);
3242 if (hwstatus
& (IS_NO_STAT_M2
|IS_NO_TIST_M2
))
3243 skge_write16(hw
, RX_MFF_CTRL2
, MFF_CLR_INSTAT
);
3245 /* Timestamp (unused) overflow */
3246 if (hwstatus
& IS_IRQ_TIST_OV
)
3247 skge_write8(hw
, GMAC_TI_ST_CTRL
, GMT_ST_CLR_IRQ
);
3250 if (hwstatus
& IS_RAM_RD_PAR
) {
3251 dev_err(&pdev
->dev
, "Ram read data parity error\n");
3252 skge_write16(hw
, B3_RI_CTRL
, RI_CLR_RD_PERR
);
3255 if (hwstatus
& IS_RAM_WR_PAR
) {
3256 dev_err(&pdev
->dev
, "Ram write data parity error\n");
3257 skge_write16(hw
, B3_RI_CTRL
, RI_CLR_WR_PERR
);
3260 if (hwstatus
& IS_M1_PAR_ERR
)
3261 skge_mac_parity(hw
, 0);
3263 if (hwstatus
& IS_M2_PAR_ERR
)
3264 skge_mac_parity(hw
, 1);
3266 if (hwstatus
& IS_R1_PAR_ERR
) {
3267 dev_err(&pdev
->dev
, "%s: receive queue parity error\n",
3269 skge_write32(hw
, B0_R1_CSR
, CSR_IRQ_CL_P
);
3272 if (hwstatus
& IS_R2_PAR_ERR
) {
3273 dev_err(&pdev
->dev
, "%s: receive queue parity error\n",
3275 skge_write32(hw
, B0_R2_CSR
, CSR_IRQ_CL_P
);
3278 if (hwstatus
& (IS_IRQ_MST_ERR
|IS_IRQ_STAT
)) {
3279 u16 pci_status
, pci_cmd
;
3281 pci_read_config_word(pdev
, PCI_COMMAND
, &pci_cmd
);
3282 pci_read_config_word(pdev
, PCI_STATUS
, &pci_status
);
3284 dev_err(&pdev
->dev
, "PCI error cmd=%#x status=%#x\n",
3285 pci_cmd
, pci_status
);
3287 /* Write the error bits back to clear them. */
3288 pci_status
&= PCI_STATUS_ERROR_BITS
;
3289 skge_write8(hw
, B2_TST_CTRL1
, TST_CFG_WRITE_ON
);
3290 pci_write_config_word(pdev
, PCI_COMMAND
,
3291 pci_cmd
| PCI_COMMAND_SERR
| PCI_COMMAND_PARITY
);
3292 pci_write_config_word(pdev
, PCI_STATUS
, pci_status
);
3293 skge_write8(hw
, B2_TST_CTRL1
, TST_CFG_WRITE_OFF
);
3295 /* if error still set then just ignore it */
3296 hwstatus
= skge_read32(hw
, B0_HWE_ISRC
);
3297 if (hwstatus
& IS_IRQ_STAT
) {
3298 dev_warn(&hw
->pdev
->dev
, "unable to clear error (so ignoring them)\n");
3299 hw
->intr_mask
&= ~IS_HW_ERR
;
3305 * Interrupt from PHY are handled in tasklet (softirq)
3306 * because accessing phy registers requires spin wait which might
3307 * cause excess interrupt latency.
3309 static void skge_extirq(unsigned long arg
)
3311 struct skge_hw
*hw
= (struct skge_hw
*) arg
;
3314 for (port
= 0; port
< hw
->ports
; port
++) {
3315 struct net_device
*dev
= hw
->dev
[port
];
3317 if (netif_running(dev
)) {
3318 struct skge_port
*skge
= netdev_priv(dev
);
3320 spin_lock(&hw
->phy_lock
);
3321 if (hw
->chip_id
!= CHIP_ID_GENESIS
)
3322 yukon_phy_intr(skge
);
3323 else if (hw
->phy_type
== SK_PHY_BCOM
)
3324 bcom_phy_intr(skge
);
3325 spin_unlock(&hw
->phy_lock
);
3329 spin_lock_irq(&hw
->hw_lock
);
3330 hw
->intr_mask
|= IS_EXT_REG
;
3331 skge_write32(hw
, B0_IMSK
, hw
->intr_mask
);
3332 skge_read32(hw
, B0_IMSK
);
3333 spin_unlock_irq(&hw
->hw_lock
);
3336 static irqreturn_t
skge_intr(int irq
, void *dev_id
)
3338 struct skge_hw
*hw
= dev_id
;
3342 spin_lock(&hw
->hw_lock
);
3343 /* Reading this register masks IRQ */
3344 status
= skge_read32(hw
, B0_SP_ISRC
);
3345 if (status
== 0 || status
== ~0)
3349 status
&= hw
->intr_mask
;
3350 if (status
& IS_EXT_REG
) {
3351 hw
->intr_mask
&= ~IS_EXT_REG
;
3352 tasklet_schedule(&hw
->phy_task
);
3355 if (status
& (IS_XA1_F
|IS_R1_F
)) {
3356 struct skge_port
*skge
= netdev_priv(hw
->dev
[0]);
3357 hw
->intr_mask
&= ~(IS_XA1_F
|IS_R1_F
);
3358 napi_schedule(&skge
->napi
);
3361 if (status
& IS_PA_TO_TX1
)
3362 skge_write16(hw
, B3_PA_CTRL
, PA_CLR_TO_TX1
);
3364 if (status
& IS_PA_TO_RX1
) {
3365 ++hw
->dev
[0]->stats
.rx_over_errors
;
3366 skge_write16(hw
, B3_PA_CTRL
, PA_CLR_TO_RX1
);
3370 if (status
& IS_MAC1
)
3371 skge_mac_intr(hw
, 0);
3374 struct skge_port
*skge
= netdev_priv(hw
->dev
[1]);
3376 if (status
& (IS_XA2_F
|IS_R2_F
)) {
3377 hw
->intr_mask
&= ~(IS_XA2_F
|IS_R2_F
);
3378 napi_schedule(&skge
->napi
);
3381 if (status
& IS_PA_TO_RX2
) {
3382 ++hw
->dev
[1]->stats
.rx_over_errors
;
3383 skge_write16(hw
, B3_PA_CTRL
, PA_CLR_TO_RX2
);
3386 if (status
& IS_PA_TO_TX2
)
3387 skge_write16(hw
, B3_PA_CTRL
, PA_CLR_TO_TX2
);
3389 if (status
& IS_MAC2
)
3390 skge_mac_intr(hw
, 1);
3393 if (status
& IS_HW_ERR
)
3396 skge_write32(hw
, B0_IMSK
, hw
->intr_mask
);
3397 skge_read32(hw
, B0_IMSK
);
3399 spin_unlock(&hw
->hw_lock
);
3401 return IRQ_RETVAL(handled
);
3404 #ifdef CONFIG_NET_POLL_CONTROLLER
3405 static void skge_netpoll(struct net_device
*dev
)
3407 struct skge_port
*skge
= netdev_priv(dev
);
3409 disable_irq(dev
->irq
);
3410 skge_intr(dev
->irq
, skge
->hw
);
3411 enable_irq(dev
->irq
);
3415 static int skge_set_mac_address(struct net_device
*dev
, void *p
)
3417 struct skge_port
*skge
= netdev_priv(dev
);
3418 struct skge_hw
*hw
= skge
->hw
;
3419 unsigned port
= skge
->port
;
3420 const struct sockaddr
*addr
= p
;
3423 if (!is_valid_ether_addr(addr
->sa_data
))
3424 return -EADDRNOTAVAIL
;
3426 memcpy(dev
->dev_addr
, addr
->sa_data
, ETH_ALEN
);
3428 if (!netif_running(dev
)) {
3429 memcpy_toio(hw
->regs
+ B2_MAC_1
+ port
*8, dev
->dev_addr
, ETH_ALEN
);
3430 memcpy_toio(hw
->regs
+ B2_MAC_2
+ port
*8, dev
->dev_addr
, ETH_ALEN
);
3433 spin_lock_bh(&hw
->phy_lock
);
3434 ctrl
= gma_read16(hw
, port
, GM_GP_CTRL
);
3435 gma_write16(hw
, port
, GM_GP_CTRL
, ctrl
& ~GM_GPCR_RX_ENA
);
3437 memcpy_toio(hw
->regs
+ B2_MAC_1
+ port
*8, dev
->dev_addr
, ETH_ALEN
);
3438 memcpy_toio(hw
->regs
+ B2_MAC_2
+ port
*8, dev
->dev_addr
, ETH_ALEN
);
3440 if (hw
->chip_id
== CHIP_ID_GENESIS
)
3441 xm_outaddr(hw
, port
, XM_SA
, dev
->dev_addr
);
3443 gma_set_addr(hw
, port
, GM_SRC_ADDR_1L
, dev
->dev_addr
);
3444 gma_set_addr(hw
, port
, GM_SRC_ADDR_2L
, dev
->dev_addr
);
3447 gma_write16(hw
, port
, GM_GP_CTRL
, ctrl
);
3448 spin_unlock_bh(&hw
->phy_lock
);
3454 static const struct {
3458 { CHIP_ID_GENESIS
, "Genesis" },
3459 { CHIP_ID_YUKON
, "Yukon" },
3460 { CHIP_ID_YUKON_LITE
, "Yukon-Lite"},
3461 { CHIP_ID_YUKON_LP
, "Yukon-LP"},
3464 static const char *skge_board_name(const struct skge_hw
*hw
)
3467 static char buf
[16];
3469 for (i
= 0; i
< ARRAY_SIZE(skge_chips
); i
++)
3470 if (skge_chips
[i
].id
== hw
->chip_id
)
3471 return skge_chips
[i
].name
;
3473 snprintf(buf
, sizeof buf
, "chipid 0x%x", hw
->chip_id
);
3479 * Setup the board data structure, but don't bring up
3482 static int skge_reset(struct skge_hw
*hw
)
3485 u16 ctst
, pci_status
;
3486 u8 t8
, mac_cfg
, pmd_type
;
3489 ctst
= skge_read16(hw
, B0_CTST
);
3492 skge_write8(hw
, B0_CTST
, CS_RST_SET
);
3493 skge_write8(hw
, B0_CTST
, CS_RST_CLR
);
3495 /* clear PCI errors, if any */
3496 skge_write8(hw
, B2_TST_CTRL1
, TST_CFG_WRITE_ON
);
3497 skge_write8(hw
, B2_TST_CTRL2
, 0);
3499 pci_read_config_word(hw
->pdev
, PCI_STATUS
, &pci_status
);
3500 pci_write_config_word(hw
->pdev
, PCI_STATUS
,
3501 pci_status
| PCI_STATUS_ERROR_BITS
);
3502 skge_write8(hw
, B2_TST_CTRL1
, TST_CFG_WRITE_OFF
);
3503 skge_write8(hw
, B0_CTST
, CS_MRST_CLR
);
3505 /* restore CLK_RUN bits (for Yukon-Lite) */
3506 skge_write16(hw
, B0_CTST
,
3507 ctst
& (CS_CLK_RUN_HOT
|CS_CLK_RUN_RST
|CS_CLK_RUN_ENA
));
3509 hw
->chip_id
= skge_read8(hw
, B2_CHIP_ID
);
3510 hw
->phy_type
= skge_read8(hw
, B2_E_1
) & 0xf;
3511 pmd_type
= skge_read8(hw
, B2_PMD_TYP
);
3512 hw
->copper
= (pmd_type
== 'T' || pmd_type
== '1');
3514 switch (hw
->chip_id
) {
3515 case CHIP_ID_GENESIS
:
3516 switch (hw
->phy_type
) {
3518 hw
->phy_addr
= PHY_ADDR_XMAC
;
3521 hw
->phy_addr
= PHY_ADDR_BCOM
;
3524 dev_err(&hw
->pdev
->dev
, "unsupported phy type 0x%x\n",
3531 case CHIP_ID_YUKON_LITE
:
3532 case CHIP_ID_YUKON_LP
:
3533 if (hw
->phy_type
< SK_PHY_MARV_COPPER
&& pmd_type
!= 'S')
3536 hw
->phy_addr
= PHY_ADDR_MARV
;
3540 dev_err(&hw
->pdev
->dev
, "unsupported chip type 0x%x\n",
3545 mac_cfg
= skge_read8(hw
, B2_MAC_CFG
);
3546 hw
->ports
= (mac_cfg
& CFG_SNG_MAC
) ? 1 : 2;
3547 hw
->chip_rev
= (mac_cfg
& CFG_CHIP_R_MSK
) >> 4;
3549 /* read the adapters RAM size */
3550 t8
= skge_read8(hw
, B2_E_0
);
3551 if (hw
->chip_id
== CHIP_ID_GENESIS
) {
3553 /* special case: 4 x 64k x 36, offset = 0x80000 */
3554 hw
->ram_size
= 0x100000;
3555 hw
->ram_offset
= 0x80000;
3557 hw
->ram_size
= t8
* 512;
3559 hw
->ram_size
= 0x20000;
3561 hw
->ram_size
= t8
* 4096;
3563 hw
->intr_mask
= IS_HW_ERR
;
3565 /* Use PHY IRQ for all but fiber based Genesis board */
3566 if (!(hw
->chip_id
== CHIP_ID_GENESIS
&& hw
->phy_type
== SK_PHY_XMAC
))
3567 hw
->intr_mask
|= IS_EXT_REG
;
3569 if (hw
->chip_id
== CHIP_ID_GENESIS
)
3572 /* switch power to VCC (WA for VAUX problem) */
3573 skge_write8(hw
, B0_POWER_CTRL
,
3574 PC_VAUX_ENA
| PC_VCC_ENA
| PC_VAUX_OFF
| PC_VCC_ON
);
3576 /* avoid boards with stuck Hardware error bits */
3577 if ((skge_read32(hw
, B0_ISRC
) & IS_HW_ERR
) &&
3578 (skge_read32(hw
, B0_HWE_ISRC
) & IS_IRQ_SENSOR
)) {
3579 dev_warn(&hw
->pdev
->dev
, "stuck hardware sensor bit\n");
3580 hw
->intr_mask
&= ~IS_HW_ERR
;
3583 /* Clear PHY COMA */
3584 skge_write8(hw
, B2_TST_CTRL1
, TST_CFG_WRITE_ON
);
3585 pci_read_config_dword(hw
->pdev
, PCI_DEV_REG1
, ®
);
3586 reg
&= ~PCI_PHY_COMA
;
3587 pci_write_config_dword(hw
->pdev
, PCI_DEV_REG1
, reg
);
3588 skge_write8(hw
, B2_TST_CTRL1
, TST_CFG_WRITE_OFF
);
3591 for (i
= 0; i
< hw
->ports
; i
++) {
3592 skge_write16(hw
, SK_REG(i
, GMAC_LINK_CTRL
), GMLC_RST_SET
);
3593 skge_write16(hw
, SK_REG(i
, GMAC_LINK_CTRL
), GMLC_RST_CLR
);
3597 /* turn off hardware timer (unused) */
3598 skge_write8(hw
, B2_TI_CTRL
, TIM_STOP
);
3599 skge_write8(hw
, B2_TI_CTRL
, TIM_CLR_IRQ
);
3600 skge_write8(hw
, B0_LED
, LED_STAT_ON
);
3602 /* enable the Tx Arbiters */
3603 for (i
= 0; i
< hw
->ports
; i
++)
3604 skge_write8(hw
, SK_REG(i
, TXA_CTRL
), TXA_ENA_ARB
);
3606 /* Initialize ram interface */
3607 skge_write16(hw
, B3_RI_CTRL
, RI_RST_CLR
);
3609 skge_write8(hw
, B3_RI_WTO_R1
, SK_RI_TO_53
);
3610 skge_write8(hw
, B3_RI_WTO_XA1
, SK_RI_TO_53
);
3611 skge_write8(hw
, B3_RI_WTO_XS1
, SK_RI_TO_53
);
3612 skge_write8(hw
, B3_RI_RTO_R1
, SK_RI_TO_53
);
3613 skge_write8(hw
, B3_RI_RTO_XA1
, SK_RI_TO_53
);
3614 skge_write8(hw
, B3_RI_RTO_XS1
, SK_RI_TO_53
);
3615 skge_write8(hw
, B3_RI_WTO_R2
, SK_RI_TO_53
);
3616 skge_write8(hw
, B3_RI_WTO_XA2
, SK_RI_TO_53
);
3617 skge_write8(hw
, B3_RI_WTO_XS2
, SK_RI_TO_53
);
3618 skge_write8(hw
, B3_RI_RTO_R2
, SK_RI_TO_53
);
3619 skge_write8(hw
, B3_RI_RTO_XA2
, SK_RI_TO_53
);
3620 skge_write8(hw
, B3_RI_RTO_XS2
, SK_RI_TO_53
);
3622 skge_write32(hw
, B0_HWE_IMSK
, IS_ERR_MSK
);
3624 /* Set interrupt moderation for Transmit only
3625 * Receive interrupts avoided by NAPI
3627 skge_write32(hw
, B2_IRQM_MSK
, IS_XA1_F
|IS_XA2_F
);
3628 skge_write32(hw
, B2_IRQM_INI
, skge_usecs2clk(hw
, 100));
3629 skge_write32(hw
, B2_IRQM_CTRL
, TIM_START
);
3631 skge_write32(hw
, B0_IMSK
, hw
->intr_mask
);
3633 for (i
= 0; i
< hw
->ports
; i
++) {
3634 if (hw
->chip_id
== CHIP_ID_GENESIS
)
3635 genesis_reset(hw
, i
);
3644 #ifdef CONFIG_SKGE_DEBUG
3646 static struct dentry
*skge_debug
;
3648 static int skge_debug_show(struct seq_file
*seq
, void *v
)
3650 struct net_device
*dev
= seq
->private;
3651 const struct skge_port
*skge
= netdev_priv(dev
);
3652 const struct skge_hw
*hw
= skge
->hw
;
3653 const struct skge_element
*e
;
3655 if (!netif_running(dev
))
3658 seq_printf(seq
, "IRQ src=%x mask=%x\n", skge_read32(hw
, B0_ISRC
),
3659 skge_read32(hw
, B0_IMSK
));
3661 seq_printf(seq
, "Tx Ring: (%d)\n", skge_avail(&skge
->tx_ring
));
3662 for (e
= skge
->tx_ring
.to_clean
; e
!= skge
->tx_ring
.to_use
; e
= e
->next
) {
3663 const struct skge_tx_desc
*t
= e
->desc
;
3664 seq_printf(seq
, "%#x dma=%#x%08x %#x csum=%#x/%x/%x\n",
3665 t
->control
, t
->dma_hi
, t
->dma_lo
, t
->status
,
3666 t
->csum_offs
, t
->csum_write
, t
->csum_start
);
3669 seq_printf(seq
, "\nRx Ring: \n");
3670 for (e
= skge
->rx_ring
.to_clean
; ; e
= e
->next
) {
3671 const struct skge_rx_desc
*r
= e
->desc
;
3673 if (r
->control
& BMU_OWN
)
3676 seq_printf(seq
, "%#x dma=%#x%08x %#x %#x csum=%#x/%x\n",
3677 r
->control
, r
->dma_hi
, r
->dma_lo
, r
->status
,
3678 r
->timestamp
, r
->csum1
, r
->csum1_start
);
3684 static int skge_debug_open(struct inode
*inode
, struct file
*file
)
3686 return single_open(file
, skge_debug_show
, inode
->i_private
);
3689 static const struct file_operations skge_debug_fops
= {
3690 .owner
= THIS_MODULE
,
3691 .open
= skge_debug_open
,
3693 .llseek
= seq_lseek
,
3694 .release
= single_release
,
3698 * Use network device events to create/remove/rename
3699 * debugfs file entries
3701 static int skge_device_event(struct notifier_block
*unused
,
3702 unsigned long event
, void *ptr
)
3704 struct net_device
*dev
= ptr
;
3705 struct skge_port
*skge
;
3708 if (dev
->netdev_ops
->ndo_open
!= &skge_up
|| !skge_debug
)
3711 skge
= netdev_priv(dev
);
3713 case NETDEV_CHANGENAME
:
3714 if (skge
->debugfs
) {
3715 d
= debugfs_rename(skge_debug
, skge
->debugfs
,
3716 skge_debug
, dev
->name
);
3720 netdev_info(dev
, "rename failed\n");
3721 debugfs_remove(skge
->debugfs
);
3726 case NETDEV_GOING_DOWN
:
3727 if (skge
->debugfs
) {
3728 debugfs_remove(skge
->debugfs
);
3729 skge
->debugfs
= NULL
;
3734 d
= debugfs_create_file(dev
->name
, S_IRUGO
,
3737 if (!d
|| IS_ERR(d
))
3738 netdev_info(dev
, "debugfs create failed\n");
3748 static struct notifier_block skge_notifier
= {
3749 .notifier_call
= skge_device_event
,
3753 static __init
void skge_debug_init(void)
3757 ent
= debugfs_create_dir("skge", NULL
);
3758 if (!ent
|| IS_ERR(ent
)) {
3759 pr_info("debugfs create directory failed\n");
3764 register_netdevice_notifier(&skge_notifier
);
3767 static __exit
void skge_debug_cleanup(void)
3770 unregister_netdevice_notifier(&skge_notifier
);
3771 debugfs_remove(skge_debug
);
3777 #define skge_debug_init()
3778 #define skge_debug_cleanup()
3781 static const struct net_device_ops skge_netdev_ops
= {
3782 .ndo_open
= skge_up
,
3783 .ndo_stop
= skge_down
,
3784 .ndo_start_xmit
= skge_xmit_frame
,
3785 .ndo_do_ioctl
= skge_ioctl
,
3786 .ndo_get_stats
= skge_get_stats
,
3787 .ndo_tx_timeout
= skge_tx_timeout
,
3788 .ndo_change_mtu
= skge_change_mtu
,
3789 .ndo_validate_addr
= eth_validate_addr
,
3790 .ndo_set_multicast_list
= skge_set_multicast
,
3791 .ndo_set_mac_address
= skge_set_mac_address
,
3792 #ifdef CONFIG_NET_POLL_CONTROLLER
3793 .ndo_poll_controller
= skge_netpoll
,
3798 /* Initialize network device */
3799 static struct net_device
*skge_devinit(struct skge_hw
*hw
, int port
,
3802 struct skge_port
*skge
;
3803 struct net_device
*dev
= alloc_etherdev(sizeof(*skge
));
3806 dev_err(&hw
->pdev
->dev
, "etherdev alloc failed\n");
3810 SET_NETDEV_DEV(dev
, &hw
->pdev
->dev
);
3811 dev
->netdev_ops
= &skge_netdev_ops
;
3812 dev
->ethtool_ops
= &skge_ethtool_ops
;
3813 dev
->watchdog_timeo
= TX_WATCHDOG
;
3814 dev
->irq
= hw
->pdev
->irq
;
3817 dev
->features
|= NETIF_F_HIGHDMA
;
3819 skge
= netdev_priv(dev
);
3820 netif_napi_add(dev
, &skge
->napi
, skge_poll
, NAPI_WEIGHT
);
3823 skge
->msg_enable
= netif_msg_init(debug
, default_msg
);
3825 skge
->tx_ring
.count
= DEFAULT_TX_RING_SIZE
;
3826 skge
->rx_ring
.count
= DEFAULT_RX_RING_SIZE
;
3828 /* Auto speed and flow control */
3829 skge
->autoneg
= AUTONEG_ENABLE
;
3830 skge
->flow_control
= FLOW_MODE_SYM_OR_REM
;
3833 skge
->advertising
= skge_supported_modes(hw
);
3835 if (device_can_wakeup(&hw
->pdev
->dev
)) {
3836 skge
->wol
= wol_supported(hw
) & WAKE_MAGIC
;
3837 device_set_wakeup_enable(&hw
->pdev
->dev
, skge
->wol
);
3840 hw
->dev
[port
] = dev
;
3844 /* Only used for Genesis XMAC */
3845 setup_timer(&skge
->link_timer
, xm_link_timer
, (unsigned long) skge
);
3847 if (hw
->chip_id
!= CHIP_ID_GENESIS
) {
3848 dev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_SG
;
3852 /* read the mac address */
3853 memcpy_fromio(dev
->dev_addr
, hw
->regs
+ B2_MAC_1
+ port
*8, ETH_ALEN
);
3854 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
3856 /* device is off until link detection */
3857 netif_carrier_off(dev
);
3858 netif_stop_queue(dev
);
3863 static void __devinit
skge_show_addr(struct net_device
*dev
)
3865 const struct skge_port
*skge
= netdev_priv(dev
);
3867 netif_info(skge
, probe
, skge
->netdev
, "addr %pM\n", dev
->dev_addr
);
3870 static int __devinit
skge_probe(struct pci_dev
*pdev
,
3871 const struct pci_device_id
*ent
)
3873 struct net_device
*dev
, *dev1
;
3875 int err
, using_dac
= 0;
3877 err
= pci_enable_device(pdev
);
3879 dev_err(&pdev
->dev
, "cannot enable PCI device\n");
3883 err
= pci_request_regions(pdev
, DRV_NAME
);
3885 dev_err(&pdev
->dev
, "cannot obtain PCI resources\n");
3886 goto err_out_disable_pdev
;
3889 pci_set_master(pdev
);
3891 if (!pci_set_dma_mask(pdev
, DMA_BIT_MASK(64))) {
3893 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
3894 } else if (!(err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32)))) {
3896 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
3900 dev_err(&pdev
->dev
, "no usable DMA configuration\n");
3901 goto err_out_free_regions
;
3905 /* byte swap descriptors in hardware */
3909 pci_read_config_dword(pdev
, PCI_DEV_REG2
, ®
);
3910 reg
|= PCI_REV_DESC
;
3911 pci_write_config_dword(pdev
, PCI_DEV_REG2
, reg
);
3916 /* space for skge@pci:0000:04:00.0 */
3917 hw
= kzalloc(sizeof(*hw
) + strlen(DRV_NAME
"@pci:")
3918 + strlen(pci_name(pdev
)) + 1, GFP_KERNEL
);
3920 dev_err(&pdev
->dev
, "cannot allocate hardware struct\n");
3921 goto err_out_free_regions
;
3923 sprintf(hw
->irq_name
, DRV_NAME
"@pci:%s", pci_name(pdev
));
3926 spin_lock_init(&hw
->hw_lock
);
3927 spin_lock_init(&hw
->phy_lock
);
3928 tasklet_init(&hw
->phy_task
, skge_extirq
, (unsigned long) hw
);
3930 hw
->regs
= ioremap_nocache(pci_resource_start(pdev
, 0), 0x4000);
3932 dev_err(&pdev
->dev
, "cannot map device registers\n");
3933 goto err_out_free_hw
;
3936 err
= skge_reset(hw
);
3938 goto err_out_iounmap
;
3940 pr_info("%s addr 0x%llx irq %d chip %s rev %d\n",
3942 (unsigned long long)pci_resource_start(pdev
, 0), pdev
->irq
,
3943 skge_board_name(hw
), hw
->chip_rev
);
3945 dev
= skge_devinit(hw
, 0, using_dac
);
3947 goto err_out_led_off
;
3949 /* Some motherboards are broken and has zero in ROM. */
3950 if (!is_valid_ether_addr(dev
->dev_addr
))
3951 dev_warn(&pdev
->dev
, "bad (zero?) ethernet address in rom\n");
3953 err
= register_netdev(dev
);
3955 dev_err(&pdev
->dev
, "cannot register net device\n");
3956 goto err_out_free_netdev
;
3959 err
= request_irq(pdev
->irq
, skge_intr
, IRQF_SHARED
, hw
->irq_name
, hw
);
3961 dev_err(&pdev
->dev
, "%s: cannot assign irq %d\n",
3962 dev
->name
, pdev
->irq
);
3963 goto err_out_unregister
;
3965 skge_show_addr(dev
);
3967 if (hw
->ports
> 1) {
3968 dev1
= skge_devinit(hw
, 1, using_dac
);
3969 if (dev1
&& register_netdev(dev1
) == 0)
3970 skge_show_addr(dev1
);
3972 /* Failure to register second port need not be fatal */
3973 dev_warn(&pdev
->dev
, "register of second port failed\n");
3980 pci_set_drvdata(pdev
, hw
);
3985 unregister_netdev(dev
);
3986 err_out_free_netdev
:
3989 skge_write16(hw
, B0_LED
, LED_STAT_OFF
);
3994 err_out_free_regions
:
3995 pci_release_regions(pdev
);
3996 err_out_disable_pdev
:
3997 pci_disable_device(pdev
);
3998 pci_set_drvdata(pdev
, NULL
);
4003 static void __devexit
skge_remove(struct pci_dev
*pdev
)
4005 struct skge_hw
*hw
= pci_get_drvdata(pdev
);
4006 struct net_device
*dev0
, *dev1
;
4011 flush_scheduled_work();
4015 unregister_netdev(dev1
);
4017 unregister_netdev(dev0
);
4019 tasklet_disable(&hw
->phy_task
);
4021 spin_lock_irq(&hw
->hw_lock
);
4023 skge_write32(hw
, B0_IMSK
, 0);
4024 skge_read32(hw
, B0_IMSK
);
4025 spin_unlock_irq(&hw
->hw_lock
);
4027 skge_write16(hw
, B0_LED
, LED_STAT_OFF
);
4028 skge_write8(hw
, B0_CTST
, CS_RST_SET
);
4030 free_irq(pdev
->irq
, hw
);
4031 pci_release_regions(pdev
);
4032 pci_disable_device(pdev
);
4039 pci_set_drvdata(pdev
, NULL
);
4043 static int skge_suspend(struct pci_dev
*pdev
, pm_message_t state
)
4045 struct skge_hw
*hw
= pci_get_drvdata(pdev
);
4046 int i
, err
, wol
= 0;
4051 err
= pci_save_state(pdev
);
4055 for (i
= 0; i
< hw
->ports
; i
++) {
4056 struct net_device
*dev
= hw
->dev
[i
];
4057 struct skge_port
*skge
= netdev_priv(dev
);
4059 if (netif_running(dev
))
4062 skge_wol_init(skge
);
4067 skge_write32(hw
, B0_IMSK
, 0);
4069 pci_prepare_to_sleep(pdev
);
4074 static int skge_resume(struct pci_dev
*pdev
)
4076 struct skge_hw
*hw
= pci_get_drvdata(pdev
);
4082 err
= pci_back_from_sleep(pdev
);
4086 err
= pci_restore_state(pdev
);
4090 err
= skge_reset(hw
);
4094 for (i
= 0; i
< hw
->ports
; i
++) {
4095 struct net_device
*dev
= hw
->dev
[i
];
4097 if (netif_running(dev
)) {
4101 netdev_err(dev
, "could not up: %d\n", err
);
4112 static void skge_shutdown(struct pci_dev
*pdev
)
4114 struct skge_hw
*hw
= pci_get_drvdata(pdev
);
4120 for (i
= 0; i
< hw
->ports
; i
++) {
4121 struct net_device
*dev
= hw
->dev
[i
];
4122 struct skge_port
*skge
= netdev_priv(dev
);
4125 skge_wol_init(skge
);
4129 if (pci_enable_wake(pdev
, PCI_D3cold
, wol
))
4130 pci_enable_wake(pdev
, PCI_D3hot
, wol
);
4132 pci_disable_device(pdev
);
4133 pci_set_power_state(pdev
, PCI_D3hot
);
4137 static struct pci_driver skge_driver
= {
4139 .id_table
= skge_id_table
,
4140 .probe
= skge_probe
,
4141 .remove
= __devexit_p(skge_remove
),
4143 .suspend
= skge_suspend
,
4144 .resume
= skge_resume
,
4146 .shutdown
= skge_shutdown
,
4149 static int __init
skge_init_module(void)
4152 return pci_register_driver(&skge_driver
);
4155 static void __exit
skge_cleanup_module(void)
4157 pci_unregister_driver(&skge_driver
);
4158 skge_debug_cleanup();
4161 module_init(skge_init_module
);
4162 module_exit(skge_cleanup_module
);