1 /* niu.c: Neptune ethernet driver.
3 * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
6 #include <linux/module.h>
7 #include <linux/init.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/netdevice.h>
11 #include <linux/ethtool.h>
12 #include <linux/etherdevice.h>
13 #include <linux/platform_device.h>
14 #include <linux/delay.h>
15 #include <linux/bitops.h>
16 #include <linux/mii.h>
17 #include <linux/if_ether.h>
18 #include <linux/if_vlan.h>
21 #include <linux/ipv6.h>
22 #include <linux/log2.h>
23 #include <linux/jiffies.h>
24 #include <linux/crc32.h>
29 #include <linux/of_device.h>
34 #define DRV_MODULE_NAME "niu"
35 #define PFX DRV_MODULE_NAME ": "
36 #define DRV_MODULE_VERSION "0.9"
37 #define DRV_MODULE_RELDATE "May 4, 2008"
39 static char version
[] __devinitdata
=
40 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
42 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
43 MODULE_DESCRIPTION("NIU ethernet driver");
44 MODULE_LICENSE("GPL");
45 MODULE_VERSION(DRV_MODULE_VERSION
);
47 #ifndef DMA_44BIT_MASK
48 #define DMA_44BIT_MASK 0x00000fffffffffffULL
52 static u64
readq(void __iomem
*reg
)
54 return (((u64
)readl(reg
+ 0x4UL
) << 32) |
58 static void writeq(u64 val
, void __iomem
*reg
)
60 writel(val
& 0xffffffff, reg
);
61 writel(val
>> 32, reg
+ 0x4UL
);
65 static struct pci_device_id niu_pci_tbl
[] = {
66 {PCI_DEVICE(PCI_VENDOR_ID_SUN
, 0xabcd)},
70 MODULE_DEVICE_TABLE(pci
, niu_pci_tbl
);
72 #define NIU_TX_TIMEOUT (5 * HZ)
74 #define nr64(reg) readq(np->regs + (reg))
75 #define nw64(reg, val) writeq((val), np->regs + (reg))
77 #define nr64_mac(reg) readq(np->mac_regs + (reg))
78 #define nw64_mac(reg, val) writeq((val), np->mac_regs + (reg))
80 #define nr64_ipp(reg) readq(np->regs + np->ipp_off + (reg))
81 #define nw64_ipp(reg, val) writeq((val), np->regs + np->ipp_off + (reg))
83 #define nr64_pcs(reg) readq(np->regs + np->pcs_off + (reg))
84 #define nw64_pcs(reg, val) writeq((val), np->regs + np->pcs_off + (reg))
86 #define nr64_xpcs(reg) readq(np->regs + np->xpcs_off + (reg))
87 #define nw64_xpcs(reg, val) writeq((val), np->regs + np->xpcs_off + (reg))
89 #define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
92 static int debug
= -1;
93 module_param(debug
, int, 0);
94 MODULE_PARM_DESC(debug
, "NIU debug level");
96 #define niudbg(TYPE, f, a...) \
97 do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \
98 printk(KERN_DEBUG PFX f, ## a); \
101 #define niuinfo(TYPE, f, a...) \
102 do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \
103 printk(KERN_INFO PFX f, ## a); \
106 #define niuwarn(TYPE, f, a...) \
107 do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \
108 printk(KERN_WARNING PFX f, ## a); \
111 #define niu_lock_parent(np, flags) \
112 spin_lock_irqsave(&np->parent->lock, flags)
113 #define niu_unlock_parent(np, flags) \
114 spin_unlock_irqrestore(&np->parent->lock, flags)
116 static int serdes_init_10g_serdes(struct niu
*np
);
118 static int __niu_wait_bits_clear_mac(struct niu
*np
, unsigned long reg
,
119 u64 bits
, int limit
, int delay
)
121 while (--limit
>= 0) {
122 u64 val
= nr64_mac(reg
);
133 static int __niu_set_and_wait_clear_mac(struct niu
*np
, unsigned long reg
,
134 u64 bits
, int limit
, int delay
,
135 const char *reg_name
)
140 err
= __niu_wait_bits_clear_mac(np
, reg
, bits
, limit
, delay
);
142 dev_err(np
->device
, PFX
"%s: bits (%llx) of register %s "
143 "would not clear, val[%llx]\n",
144 np
->dev
->name
, (unsigned long long) bits
, reg_name
,
145 (unsigned long long) nr64_mac(reg
));
149 #define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
150 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
151 __niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
154 static int __niu_wait_bits_clear_ipp(struct niu
*np
, unsigned long reg
,
155 u64 bits
, int limit
, int delay
)
157 while (--limit
>= 0) {
158 u64 val
= nr64_ipp(reg
);
169 static int __niu_set_and_wait_clear_ipp(struct niu
*np
, unsigned long reg
,
170 u64 bits
, int limit
, int delay
,
171 const char *reg_name
)
180 err
= __niu_wait_bits_clear_ipp(np
, reg
, bits
, limit
, delay
);
182 dev_err(np
->device
, PFX
"%s: bits (%llx) of register %s "
183 "would not clear, val[%llx]\n",
184 np
->dev
->name
, (unsigned long long) bits
, reg_name
,
185 (unsigned long long) nr64_ipp(reg
));
189 #define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
190 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
191 __niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
194 static int __niu_wait_bits_clear(struct niu
*np
, unsigned long reg
,
195 u64 bits
, int limit
, int delay
)
197 while (--limit
>= 0) {
209 #define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \
210 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
211 __niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \
214 static int __niu_set_and_wait_clear(struct niu
*np
, unsigned long reg
,
215 u64 bits
, int limit
, int delay
,
216 const char *reg_name
)
221 err
= __niu_wait_bits_clear(np
, reg
, bits
, limit
, delay
);
223 dev_err(np
->device
, PFX
"%s: bits (%llx) of register %s "
224 "would not clear, val[%llx]\n",
225 np
->dev
->name
, (unsigned long long) bits
, reg_name
,
226 (unsigned long long) nr64(reg
));
230 #define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
231 ({ BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
232 __niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
235 static void niu_ldg_rearm(struct niu
*np
, struct niu_ldg
*lp
, int on
)
237 u64 val
= (u64
) lp
->timer
;
240 val
|= LDG_IMGMT_ARM
;
242 nw64(LDG_IMGMT(lp
->ldg_num
), val
);
245 static int niu_ldn_irq_enable(struct niu
*np
, int ldn
, int on
)
247 unsigned long mask_reg
, bits
;
250 if (ldn
< 0 || ldn
> LDN_MAX
)
254 mask_reg
= LD_IM0(ldn
);
257 mask_reg
= LD_IM1(ldn
- 64);
261 val
= nr64(mask_reg
);
271 static int niu_enable_ldn_in_ldg(struct niu
*np
, struct niu_ldg
*lp
, int on
)
273 struct niu_parent
*parent
= np
->parent
;
276 for (i
= 0; i
<= LDN_MAX
; i
++) {
279 if (parent
->ldg_map
[i
] != lp
->ldg_num
)
282 err
= niu_ldn_irq_enable(np
, i
, on
);
289 static int niu_enable_interrupts(struct niu
*np
, int on
)
293 for (i
= 0; i
< np
->num_ldg
; i
++) {
294 struct niu_ldg
*lp
= &np
->ldg
[i
];
297 err
= niu_enable_ldn_in_ldg(np
, lp
, on
);
301 for (i
= 0; i
< np
->num_ldg
; i
++)
302 niu_ldg_rearm(np
, &np
->ldg
[i
], on
);
307 static u32
phy_encode(u32 type
, int port
)
309 return (type
<< (port
* 2));
312 static u32
phy_decode(u32 val
, int port
)
314 return (val
>> (port
* 2)) & PORT_TYPE_MASK
;
317 static int mdio_wait(struct niu
*np
)
322 while (--limit
> 0) {
323 val
= nr64(MIF_FRAME_OUTPUT
);
324 if ((val
>> MIF_FRAME_OUTPUT_TA_SHIFT
) & 0x1)
325 return val
& MIF_FRAME_OUTPUT_DATA
;
333 static int mdio_read(struct niu
*np
, int port
, int dev
, int reg
)
337 nw64(MIF_FRAME_OUTPUT
, MDIO_ADDR_OP(port
, dev
, reg
));
342 nw64(MIF_FRAME_OUTPUT
, MDIO_READ_OP(port
, dev
));
343 return mdio_wait(np
);
346 static int mdio_write(struct niu
*np
, int port
, int dev
, int reg
, int data
)
350 nw64(MIF_FRAME_OUTPUT
, MDIO_ADDR_OP(port
, dev
, reg
));
355 nw64(MIF_FRAME_OUTPUT
, MDIO_WRITE_OP(port
, dev
, data
));
363 static int mii_read(struct niu
*np
, int port
, int reg
)
365 nw64(MIF_FRAME_OUTPUT
, MII_READ_OP(port
, reg
));
366 return mdio_wait(np
);
369 static int mii_write(struct niu
*np
, int port
, int reg
, int data
)
373 nw64(MIF_FRAME_OUTPUT
, MII_WRITE_OP(port
, reg
, data
));
381 static int esr2_set_tx_cfg(struct niu
*np
, unsigned long channel
, u32 val
)
385 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
386 ESR2_TI_PLL_TX_CFG_L(channel
),
389 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
390 ESR2_TI_PLL_TX_CFG_H(channel
),
395 static int esr2_set_rx_cfg(struct niu
*np
, unsigned long channel
, u32 val
)
399 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
400 ESR2_TI_PLL_RX_CFG_L(channel
),
403 err
= mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
404 ESR2_TI_PLL_RX_CFG_H(channel
),
409 /* Mode is always 10G fiber. */
410 static int serdes_init_niu(struct niu
*np
)
412 struct niu_link_config
*lp
= &np
->link_config
;
416 tx_cfg
= (PLL_TX_CFG_ENTX
| PLL_TX_CFG_SWING_1375MV
);
417 rx_cfg
= (PLL_RX_CFG_ENRX
| PLL_RX_CFG_TERM_0P8VDDT
|
418 PLL_RX_CFG_ALIGN_ENA
| PLL_RX_CFG_LOS_LTHRESH
|
419 PLL_RX_CFG_EQ_LP_ADAPTIVE
);
421 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
422 u16 test_cfg
= PLL_TEST_CFG_LOOPBACK_CML_DIS
;
424 mdio_write(np
, np
->port
, NIU_ESR2_DEV_ADDR
,
425 ESR2_TI_PLL_TEST_CFG_L
, test_cfg
);
427 tx_cfg
|= PLL_TX_CFG_ENTEST
;
428 rx_cfg
|= PLL_RX_CFG_ENTEST
;
431 /* Initialize all 4 lanes of the SERDES. */
432 for (i
= 0; i
< 4; i
++) {
433 int err
= esr2_set_tx_cfg(np
, i
, tx_cfg
);
438 for (i
= 0; i
< 4; i
++) {
439 int err
= esr2_set_rx_cfg(np
, i
, rx_cfg
);
447 static int esr_read_rxtx_ctrl(struct niu
*np
, unsigned long chan
, u32
*val
)
451 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
, ESR_RXTX_CTRL_L(chan
));
453 *val
= (err
& 0xffff);
454 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
,
455 ESR_RXTX_CTRL_H(chan
));
457 *val
|= ((err
& 0xffff) << 16);
463 static int esr_read_glue0(struct niu
*np
, unsigned long chan
, u32
*val
)
467 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
,
468 ESR_GLUE_CTRL0_L(chan
));
470 *val
= (err
& 0xffff);
471 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
,
472 ESR_GLUE_CTRL0_H(chan
));
474 *val
|= ((err
& 0xffff) << 16);
481 static int esr_read_reset(struct niu
*np
, u32
*val
)
485 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
,
486 ESR_RXTX_RESET_CTRL_L
);
488 *val
= (err
& 0xffff);
489 err
= mdio_read(np
, np
->port
, NIU_ESR_DEV_ADDR
,
490 ESR_RXTX_RESET_CTRL_H
);
492 *val
|= ((err
& 0xffff) << 16);
499 static int esr_write_rxtx_ctrl(struct niu
*np
, unsigned long chan
, u32 val
)
503 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
504 ESR_RXTX_CTRL_L(chan
), val
& 0xffff);
506 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
507 ESR_RXTX_CTRL_H(chan
), (val
>> 16));
511 static int esr_write_glue0(struct niu
*np
, unsigned long chan
, u32 val
)
515 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
516 ESR_GLUE_CTRL0_L(chan
), val
& 0xffff);
518 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
519 ESR_GLUE_CTRL0_H(chan
), (val
>> 16));
523 static int esr_reset(struct niu
*np
)
528 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
529 ESR_RXTX_RESET_CTRL_L
, 0x0000);
532 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
533 ESR_RXTX_RESET_CTRL_H
, 0xffff);
538 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
539 ESR_RXTX_RESET_CTRL_L
, 0xffff);
544 err
= mdio_write(np
, np
->port
, NIU_ESR_DEV_ADDR
,
545 ESR_RXTX_RESET_CTRL_H
, 0x0000);
550 err
= esr_read_reset(np
, &reset
);
554 dev_err(np
->device
, PFX
"Port %u ESR_RESET "
555 "did not clear [%08x]\n",
563 static int serdes_init_10g(struct niu
*np
)
565 struct niu_link_config
*lp
= &np
->link_config
;
566 unsigned long ctrl_reg
, test_cfg_reg
, i
;
567 u64 ctrl_val
, test_cfg_val
, sig
, mask
, val
;
572 ctrl_reg
= ENET_SERDES_0_CTRL_CFG
;
573 test_cfg_reg
= ENET_SERDES_0_TEST_CFG
;
576 ctrl_reg
= ENET_SERDES_1_CTRL_CFG
;
577 test_cfg_reg
= ENET_SERDES_1_TEST_CFG
;
583 ctrl_val
= (ENET_SERDES_CTRL_SDET_0
|
584 ENET_SERDES_CTRL_SDET_1
|
585 ENET_SERDES_CTRL_SDET_2
|
586 ENET_SERDES_CTRL_SDET_3
|
587 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT
) |
588 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT
) |
589 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT
) |
590 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT
) |
591 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT
) |
592 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT
) |
593 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT
) |
594 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT
));
597 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
598 test_cfg_val
|= ((ENET_TEST_MD_PAD_LOOPBACK
<<
599 ENET_SERDES_TEST_MD_0_SHIFT
) |
600 (ENET_TEST_MD_PAD_LOOPBACK
<<
601 ENET_SERDES_TEST_MD_1_SHIFT
) |
602 (ENET_TEST_MD_PAD_LOOPBACK
<<
603 ENET_SERDES_TEST_MD_2_SHIFT
) |
604 (ENET_TEST_MD_PAD_LOOPBACK
<<
605 ENET_SERDES_TEST_MD_3_SHIFT
));
608 nw64(ctrl_reg
, ctrl_val
);
609 nw64(test_cfg_reg
, test_cfg_val
);
611 /* Initialize all 4 lanes of the SERDES. */
612 for (i
= 0; i
< 4; i
++) {
613 u32 rxtx_ctrl
, glue0
;
615 err
= esr_read_rxtx_ctrl(np
, i
, &rxtx_ctrl
);
618 err
= esr_read_glue0(np
, i
, &glue0
);
622 rxtx_ctrl
&= ~(ESR_RXTX_CTRL_VMUXLO
);
623 rxtx_ctrl
|= (ESR_RXTX_CTRL_ENSTRETCH
|
624 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT
));
626 glue0
&= ~(ESR_GLUE_CTRL0_SRATE
|
627 ESR_GLUE_CTRL0_THCNT
|
628 ESR_GLUE_CTRL0_BLTIME
);
629 glue0
|= (ESR_GLUE_CTRL0_RXLOSENAB
|
630 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT
) |
631 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT
) |
632 (BLTIME_300_CYCLES
<<
633 ESR_GLUE_CTRL0_BLTIME_SHIFT
));
635 err
= esr_write_rxtx_ctrl(np
, i
, rxtx_ctrl
);
638 err
= esr_write_glue0(np
, i
, glue0
);
647 sig
= nr64(ESR_INT_SIGNALS
);
650 mask
= ESR_INT_SIGNALS_P0_BITS
;
651 val
= (ESR_INT_SRDY0_P0
|
661 mask
= ESR_INT_SIGNALS_P1_BITS
;
662 val
= (ESR_INT_SRDY0_P1
|
675 if ((sig
& mask
) != val
) {
676 if (np
->flags
& NIU_FLAGS_HOTPLUG_PHY
) {
677 np
->flags
&= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT
;
680 dev_err(np
->device
, PFX
"Port %u signal bits [%08x] are not "
681 "[%08x]\n", np
->port
, (int) (sig
& mask
), (int) val
);
684 if (np
->flags
& NIU_FLAGS_HOTPLUG_PHY
)
685 np
->flags
|= NIU_FLAGS_HOTPLUG_PHY_PRESENT
;
689 static int serdes_init_1g(struct niu
*np
)
693 val
= nr64(ENET_SERDES_1_PLL_CFG
);
694 val
&= ~ENET_SERDES_PLL_FBDIV2
;
697 val
|= ENET_SERDES_PLL_HRATE0
;
700 val
|= ENET_SERDES_PLL_HRATE1
;
703 val
|= ENET_SERDES_PLL_HRATE2
;
706 val
|= ENET_SERDES_PLL_HRATE3
;
711 nw64(ENET_SERDES_1_PLL_CFG
, val
);
716 static int serdes_init_1g_serdes(struct niu
*np
)
718 struct niu_link_config
*lp
= &np
->link_config
;
719 unsigned long ctrl_reg
, test_cfg_reg
, pll_cfg
, i
;
720 u64 ctrl_val
, test_cfg_val
, sig
, mask
, val
;
722 u64 reset_val
, val_rd
;
724 val
= ENET_SERDES_PLL_HRATE0
| ENET_SERDES_PLL_HRATE1
|
725 ENET_SERDES_PLL_HRATE2
| ENET_SERDES_PLL_HRATE3
|
726 ENET_SERDES_PLL_FBDIV0
;
729 reset_val
= ENET_SERDES_RESET_0
;
730 ctrl_reg
= ENET_SERDES_0_CTRL_CFG
;
731 test_cfg_reg
= ENET_SERDES_0_TEST_CFG
;
732 pll_cfg
= ENET_SERDES_0_PLL_CFG
;
735 reset_val
= ENET_SERDES_RESET_1
;
736 ctrl_reg
= ENET_SERDES_1_CTRL_CFG
;
737 test_cfg_reg
= ENET_SERDES_1_TEST_CFG
;
738 pll_cfg
= ENET_SERDES_1_PLL_CFG
;
744 ctrl_val
= (ENET_SERDES_CTRL_SDET_0
|
745 ENET_SERDES_CTRL_SDET_1
|
746 ENET_SERDES_CTRL_SDET_2
|
747 ENET_SERDES_CTRL_SDET_3
|
748 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT
) |
749 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT
) |
750 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT
) |
751 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT
) |
752 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT
) |
753 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT
) |
754 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT
) |
755 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT
));
758 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
759 test_cfg_val
|= ((ENET_TEST_MD_PAD_LOOPBACK
<<
760 ENET_SERDES_TEST_MD_0_SHIFT
) |
761 (ENET_TEST_MD_PAD_LOOPBACK
<<
762 ENET_SERDES_TEST_MD_1_SHIFT
) |
763 (ENET_TEST_MD_PAD_LOOPBACK
<<
764 ENET_SERDES_TEST_MD_2_SHIFT
) |
765 (ENET_TEST_MD_PAD_LOOPBACK
<<
766 ENET_SERDES_TEST_MD_3_SHIFT
));
769 nw64(ENET_SERDES_RESET
, reset_val
);
771 val_rd
= nr64(ENET_SERDES_RESET
);
772 val_rd
&= ~reset_val
;
774 nw64(ctrl_reg
, ctrl_val
);
775 nw64(test_cfg_reg
, test_cfg_val
);
776 nw64(ENET_SERDES_RESET
, val_rd
);
779 /* Initialize all 4 lanes of the SERDES. */
780 for (i
= 0; i
< 4; i
++) {
781 u32 rxtx_ctrl
, glue0
;
783 err
= esr_read_rxtx_ctrl(np
, i
, &rxtx_ctrl
);
786 err
= esr_read_glue0(np
, i
, &glue0
);
790 rxtx_ctrl
&= ~(ESR_RXTX_CTRL_VMUXLO
);
791 rxtx_ctrl
|= (ESR_RXTX_CTRL_ENSTRETCH
|
792 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT
));
794 glue0
&= ~(ESR_GLUE_CTRL0_SRATE
|
795 ESR_GLUE_CTRL0_THCNT
|
796 ESR_GLUE_CTRL0_BLTIME
);
797 glue0
|= (ESR_GLUE_CTRL0_RXLOSENAB
|
798 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT
) |
799 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT
) |
800 (BLTIME_300_CYCLES
<<
801 ESR_GLUE_CTRL0_BLTIME_SHIFT
));
803 err
= esr_write_rxtx_ctrl(np
, i
, rxtx_ctrl
);
806 err
= esr_write_glue0(np
, i
, glue0
);
812 sig
= nr64(ESR_INT_SIGNALS
);
815 val
= (ESR_INT_SRDY0_P0
| ESR_INT_DET0_P0
);
820 val
= (ESR_INT_SRDY0_P1
| ESR_INT_DET0_P1
);
828 if ((sig
& mask
) != val
) {
829 dev_err(np
->device
, PFX
"Port %u signal bits [%08x] are not "
830 "[%08x]\n", np
->port
, (int) (sig
& mask
), (int) val
);
837 static int link_status_1g_serdes(struct niu
*np
, int *link_up_p
)
839 struct niu_link_config
*lp
= &np
->link_config
;
847 current_speed
= SPEED_INVALID
;
848 current_duplex
= DUPLEX_INVALID
;
850 spin_lock_irqsave(&np
->lock
, flags
);
852 val
= nr64_pcs(PCS_MII_STAT
);
854 if (val
& PCS_MII_STAT_LINK_STATUS
) {
856 current_speed
= SPEED_1000
;
857 current_duplex
= DUPLEX_FULL
;
860 lp
->active_speed
= current_speed
;
861 lp
->active_duplex
= current_duplex
;
862 spin_unlock_irqrestore(&np
->lock
, flags
);
864 *link_up_p
= link_up
;
868 static int link_status_10g_serdes(struct niu
*np
, int *link_up_p
)
871 struct niu_link_config
*lp
= &np
->link_config
;
878 if (!(np
->flags
& NIU_FLAGS_10G
))
879 return link_status_1g_serdes(np
, link_up_p
);
881 current_speed
= SPEED_INVALID
;
882 current_duplex
= DUPLEX_INVALID
;
883 spin_lock_irqsave(&np
->lock
, flags
);
885 val
= nr64_xpcs(XPCS_STATUS(0));
886 val2
= nr64_mac(XMAC_INTER2
);
887 if (val2
& 0x01000000)
890 if ((val
& 0x1000ULL
) && link_ok
) {
892 current_speed
= SPEED_10000
;
893 current_duplex
= DUPLEX_FULL
;
895 lp
->active_speed
= current_speed
;
896 lp
->active_duplex
= current_duplex
;
897 spin_unlock_irqrestore(&np
->lock
, flags
);
898 *link_up_p
= link_up
;
902 static int link_status_1g_rgmii(struct niu
*np
, int *link_up_p
)
904 struct niu_link_config
*lp
= &np
->link_config
;
905 u16 current_speed
, bmsr
;
911 current_speed
= SPEED_INVALID
;
912 current_duplex
= DUPLEX_INVALID
;
914 spin_lock_irqsave(&np
->lock
, flags
);
918 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
923 if (bmsr
& BMSR_LSTATUS
) {
924 u16 adv
, lpa
, common
, estat
;
926 err
= mii_read(np
, np
->phy_addr
, MII_ADVERTISE
);
931 err
= mii_read(np
, np
->phy_addr
, MII_LPA
);
938 err
= mii_read(np
, np
->phy_addr
, MII_ESTATUS
);
943 current_speed
= SPEED_1000
;
944 current_duplex
= DUPLEX_FULL
;
947 lp
->active_speed
= current_speed
;
948 lp
->active_duplex
= current_duplex
;
952 spin_unlock_irqrestore(&np
->lock
, flags
);
954 *link_up_p
= link_up
;
958 static int bcm8704_reset(struct niu
*np
)
962 err
= mdio_read(np
, np
->phy_addr
,
963 BCM8704_PHYXS_DEV_ADDR
, MII_BMCR
);
967 err
= mdio_write(np
, np
->phy_addr
, BCM8704_PHYXS_DEV_ADDR
,
973 while (--limit
>= 0) {
974 err
= mdio_read(np
, np
->phy_addr
,
975 BCM8704_PHYXS_DEV_ADDR
, MII_BMCR
);
978 if (!(err
& BMCR_RESET
))
982 dev_err(np
->device
, PFX
"Port %u PHY will not reset "
983 "(bmcr=%04x)\n", np
->port
, (err
& 0xffff));
989 /* When written, certain PHY registers need to be read back twice
990 * in order for the bits to settle properly.
992 static int bcm8704_user_dev3_readback(struct niu
*np
, int reg
)
994 int err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
, reg
);
997 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
, reg
);
1003 static int bcm8706_init_user_dev3(struct niu
*np
)
1008 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1009 BCM8704_USER_OPT_DIGITAL_CTRL
);
1012 err
&= ~USER_ODIG_CTRL_GPIOS
;
1013 err
|= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT
);
1014 err
|= USER_ODIG_CTRL_RESV2
;
1015 err
= mdio_write(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1016 BCM8704_USER_OPT_DIGITAL_CTRL
, err
);
1025 static int bcm8704_init_user_dev3(struct niu
*np
)
1029 err
= mdio_write(np
, np
->phy_addr
,
1030 BCM8704_USER_DEV3_ADDR
, BCM8704_USER_CONTROL
,
1031 (USER_CONTROL_OPTXRST_LVL
|
1032 USER_CONTROL_OPBIASFLT_LVL
|
1033 USER_CONTROL_OBTMPFLT_LVL
|
1034 USER_CONTROL_OPPRFLT_LVL
|
1035 USER_CONTROL_OPTXFLT_LVL
|
1036 USER_CONTROL_OPRXLOS_LVL
|
1037 USER_CONTROL_OPRXFLT_LVL
|
1038 USER_CONTROL_OPTXON_LVL
|
1039 (0x3f << USER_CONTROL_RES1_SHIFT
)));
1043 err
= mdio_write(np
, np
->phy_addr
,
1044 BCM8704_USER_DEV3_ADDR
, BCM8704_USER_PMD_TX_CONTROL
,
1045 (USER_PMD_TX_CTL_XFP_CLKEN
|
1046 (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH
) |
1047 (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH
) |
1048 USER_PMD_TX_CTL_TSCK_LPWREN
));
1052 err
= bcm8704_user_dev3_readback(np
, BCM8704_USER_CONTROL
);
1055 err
= bcm8704_user_dev3_readback(np
, BCM8704_USER_PMD_TX_CONTROL
);
1059 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1060 BCM8704_USER_OPT_DIGITAL_CTRL
);
1063 err
&= ~USER_ODIG_CTRL_GPIOS
;
1064 err
|= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT
);
1065 err
= mdio_write(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1066 BCM8704_USER_OPT_DIGITAL_CTRL
, err
);
1075 static int mrvl88x2011_act_led(struct niu
*np
, int val
)
1079 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV2_ADDR
,
1080 MRVL88X2011_LED_8_TO_11_CTL
);
1084 err
&= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT
,MRVL88X2011_LED_CTL_MASK
);
1085 err
|= MRVL88X2011_LED(MRVL88X2011_LED_ACT
,val
);
1087 return mdio_write(np
, np
->phy_addr
, MRVL88X2011_USER_DEV2_ADDR
,
1088 MRVL88X2011_LED_8_TO_11_CTL
, err
);
1091 static int mrvl88x2011_led_blink_rate(struct niu
*np
, int rate
)
1095 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV2_ADDR
,
1096 MRVL88X2011_LED_BLINK_CTL
);
1098 err
&= ~MRVL88X2011_LED_BLKRATE_MASK
;
1101 err
= mdio_write(np
, np
->phy_addr
, MRVL88X2011_USER_DEV2_ADDR
,
1102 MRVL88X2011_LED_BLINK_CTL
, err
);
1108 static int xcvr_init_10g_mrvl88x2011(struct niu
*np
)
1112 /* Set LED functions */
1113 err
= mrvl88x2011_led_blink_rate(np
, MRVL88X2011_LED_BLKRATE_134MS
);
1118 err
= mrvl88x2011_act_led(np
, MRVL88X2011_LED_CTL_OFF
);
1122 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV3_ADDR
,
1123 MRVL88X2011_GENERAL_CTL
);
1127 err
|= MRVL88X2011_ENA_XFPREFCLK
;
1129 err
= mdio_write(np
, np
->phy_addr
, MRVL88X2011_USER_DEV3_ADDR
,
1130 MRVL88X2011_GENERAL_CTL
, err
);
1134 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV1_ADDR
,
1135 MRVL88X2011_PMA_PMD_CTL_1
);
1139 if (np
->link_config
.loopback_mode
== LOOPBACK_MAC
)
1140 err
|= MRVL88X2011_LOOPBACK
;
1142 err
&= ~MRVL88X2011_LOOPBACK
;
1144 err
= mdio_write(np
, np
->phy_addr
, MRVL88X2011_USER_DEV1_ADDR
,
1145 MRVL88X2011_PMA_PMD_CTL_1
, err
);
1150 return mdio_write(np
, np
->phy_addr
, MRVL88X2011_USER_DEV1_ADDR
,
1151 MRVL88X2011_10G_PMD_TX_DIS
, MRVL88X2011_ENA_PMDTX
);
1155 static int xcvr_diag_bcm870x(struct niu
*np
)
1157 u16 analog_stat0
, tx_alarm_status
;
1161 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PMA_PMD_DEV_ADDR
,
1165 pr_info(PFX
"Port %u PMA_PMD(MII_STAT1000) [%04x]\n",
1168 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
, 0x20);
1171 pr_info(PFX
"Port %u USER_DEV3(0x20) [%04x]\n",
1174 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PHYXS_DEV_ADDR
,
1178 pr_info(PFX
"Port %u PHYXS(MII_NWAYTEST) [%04x]\n",
1182 /* XXX dig this out it might not be so useful XXX */
1183 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1184 BCM8704_USER_ANALOG_STATUS0
);
1187 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1188 BCM8704_USER_ANALOG_STATUS0
);
1193 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1194 BCM8704_USER_TX_ALARM_STATUS
);
1197 err
= mdio_read(np
, np
->phy_addr
, BCM8704_USER_DEV3_ADDR
,
1198 BCM8704_USER_TX_ALARM_STATUS
);
1201 tx_alarm_status
= err
;
1203 if (analog_stat0
!= 0x03fc) {
1204 if ((analog_stat0
== 0x43bc) && (tx_alarm_status
!= 0)) {
1205 pr_info(PFX
"Port %u cable not connected "
1206 "or bad cable.\n", np
->port
);
1207 } else if (analog_stat0
== 0x639c) {
1208 pr_info(PFX
"Port %u optical module is bad "
1209 "or missing.\n", np
->port
);
1216 static int xcvr_10g_set_lb_bcm870x(struct niu
*np
)
1218 struct niu_link_config
*lp
= &np
->link_config
;
1221 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PCS_DEV_ADDR
,
1226 err
&= ~BMCR_LOOPBACK
;
1228 if (lp
->loopback_mode
== LOOPBACK_MAC
)
1229 err
|= BMCR_LOOPBACK
;
1231 err
= mdio_write(np
, np
->phy_addr
, BCM8704_PCS_DEV_ADDR
,
1239 static int xcvr_init_10g_bcm8706(struct niu
*np
)
1244 if ((np
->flags
& NIU_FLAGS_HOTPLUG_PHY
) &&
1245 (np
->flags
& NIU_FLAGS_HOTPLUG_PHY_PRESENT
) == 0)
1248 val
= nr64_mac(XMAC_CONFIG
);
1249 val
&= ~XMAC_CONFIG_LED_POLARITY
;
1250 val
|= XMAC_CONFIG_FORCE_LED_ON
;
1251 nw64_mac(XMAC_CONFIG
, val
);
1253 val
= nr64(MIF_CONFIG
);
1254 val
|= MIF_CONFIG_INDIRECT_MODE
;
1255 nw64(MIF_CONFIG
, val
);
1257 err
= bcm8704_reset(np
);
1261 err
= xcvr_10g_set_lb_bcm870x(np
);
1265 err
= bcm8706_init_user_dev3(np
);
1269 err
= xcvr_diag_bcm870x(np
);
1276 static int xcvr_init_10g_bcm8704(struct niu
*np
)
1280 err
= bcm8704_reset(np
);
1284 err
= bcm8704_init_user_dev3(np
);
1288 err
= xcvr_10g_set_lb_bcm870x(np
);
1292 err
= xcvr_diag_bcm870x(np
);
1299 static int xcvr_init_10g(struct niu
*np
)
1304 val
= nr64_mac(XMAC_CONFIG
);
1305 val
&= ~XMAC_CONFIG_LED_POLARITY
;
1306 val
|= XMAC_CONFIG_FORCE_LED_ON
;
1307 nw64_mac(XMAC_CONFIG
, val
);
1309 /* XXX shared resource, lock parent XXX */
1310 val
= nr64(MIF_CONFIG
);
1311 val
|= MIF_CONFIG_INDIRECT_MODE
;
1312 nw64(MIF_CONFIG
, val
);
1314 phy_id
= phy_decode(np
->parent
->port_phy
, np
->port
);
1315 phy_id
= np
->parent
->phy_probe_info
.phy_id
[phy_id
][np
->port
];
1317 /* handle different phy types */
1318 switch (phy_id
& NIU_PHY_ID_MASK
) {
1319 case NIU_PHY_ID_MRVL88X2011
:
1320 err
= xcvr_init_10g_mrvl88x2011(np
);
1323 default: /* bcom 8704 */
1324 err
= xcvr_init_10g_bcm8704(np
);
1331 static int mii_reset(struct niu
*np
)
1335 err
= mii_write(np
, np
->phy_addr
, MII_BMCR
, BMCR_RESET
);
1340 while (--limit
>= 0) {
1342 err
= mii_read(np
, np
->phy_addr
, MII_BMCR
);
1345 if (!(err
& BMCR_RESET
))
1349 dev_err(np
->device
, PFX
"Port %u MII would not reset, "
1350 "bmcr[%04x]\n", np
->port
, err
);
1357 static int xcvr_init_1g_rgmii(struct niu
*np
)
1361 u16 bmcr
, bmsr
, estat
;
1363 val
= nr64(MIF_CONFIG
);
1364 val
&= ~MIF_CONFIG_INDIRECT_MODE
;
1365 nw64(MIF_CONFIG
, val
);
1367 err
= mii_reset(np
);
1371 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1377 if (bmsr
& BMSR_ESTATEN
) {
1378 err
= mii_read(np
, np
->phy_addr
, MII_ESTATUS
);
1385 err
= mii_write(np
, np
->phy_addr
, MII_BMCR
, bmcr
);
1389 if (bmsr
& BMSR_ESTATEN
) {
1392 if (estat
& ESTATUS_1000_TFULL
)
1393 ctrl1000
|= ADVERTISE_1000FULL
;
1394 err
= mii_write(np
, np
->phy_addr
, MII_CTRL1000
, ctrl1000
);
1399 bmcr
= (BMCR_SPEED1000
| BMCR_FULLDPLX
);
1401 err
= mii_write(np
, np
->phy_addr
, MII_BMCR
, bmcr
);
1405 err
= mii_read(np
, np
->phy_addr
, MII_BMCR
);
1408 bmcr
= mii_read(np
, np
->phy_addr
, MII_BMCR
);
1410 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1417 static int mii_init_common(struct niu
*np
)
1419 struct niu_link_config
*lp
= &np
->link_config
;
1420 u16 bmcr
, bmsr
, adv
, estat
;
1423 err
= mii_reset(np
);
1427 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1433 if (bmsr
& BMSR_ESTATEN
) {
1434 err
= mii_read(np
, np
->phy_addr
, MII_ESTATUS
);
1441 err
= mii_write(np
, np
->phy_addr
, MII_BMCR
, bmcr
);
1445 if (lp
->loopback_mode
== LOOPBACK_MAC
) {
1446 bmcr
|= BMCR_LOOPBACK
;
1447 if (lp
->active_speed
== SPEED_1000
)
1448 bmcr
|= BMCR_SPEED1000
;
1449 if (lp
->active_duplex
== DUPLEX_FULL
)
1450 bmcr
|= BMCR_FULLDPLX
;
1453 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
1456 aux
= (BCM5464R_AUX_CTL_EXT_LB
|
1457 BCM5464R_AUX_CTL_WRITE_1
);
1458 err
= mii_write(np
, np
->phy_addr
, BCM5464R_AUX_CTL
, aux
);
1463 /* XXX configurable XXX */
1464 /* XXX for now don't advertise half-duplex or asym pause... XXX */
1465 adv
= ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
;
1466 if (bmsr
& BMSR_10FULL
)
1467 adv
|= ADVERTISE_10FULL
;
1468 if (bmsr
& BMSR_100FULL
)
1469 adv
|= ADVERTISE_100FULL
;
1470 err
= mii_write(np
, np
->phy_addr
, MII_ADVERTISE
, adv
);
1474 if (bmsr
& BMSR_ESTATEN
) {
1477 if (estat
& ESTATUS_1000_TFULL
)
1478 ctrl1000
|= ADVERTISE_1000FULL
;
1479 err
= mii_write(np
, np
->phy_addr
, MII_CTRL1000
, ctrl1000
);
1483 bmcr
|= (BMCR_ANENABLE
| BMCR_ANRESTART
);
1485 err
= mii_write(np
, np
->phy_addr
, MII_BMCR
, bmcr
);
1489 err
= mii_read(np
, np
->phy_addr
, MII_BMCR
);
1492 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1496 pr_info(PFX
"Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
1497 np
->port
, bmcr
, bmsr
);
1503 static int xcvr_init_1g(struct niu
*np
)
1507 /* XXX shared resource, lock parent XXX */
1508 val
= nr64(MIF_CONFIG
);
1509 val
&= ~MIF_CONFIG_INDIRECT_MODE
;
1510 nw64(MIF_CONFIG
, val
);
1512 return mii_init_common(np
);
1515 static int niu_xcvr_init(struct niu
*np
)
1517 const struct niu_phy_ops
*ops
= np
->phy_ops
;
1522 err
= ops
->xcvr_init(np
);
1527 static int niu_serdes_init(struct niu
*np
)
1529 const struct niu_phy_ops
*ops
= np
->phy_ops
;
1533 if (ops
->serdes_init
)
1534 err
= ops
->serdes_init(np
);
1539 static void niu_init_xif(struct niu
*);
1540 static void niu_handle_led(struct niu
*, int status
);
1542 static int niu_link_status_common(struct niu
*np
, int link_up
)
1544 struct niu_link_config
*lp
= &np
->link_config
;
1545 struct net_device
*dev
= np
->dev
;
1546 unsigned long flags
;
1548 if (!netif_carrier_ok(dev
) && link_up
) {
1549 niuinfo(LINK
, "%s: Link is up at %s, %s duplex\n",
1551 (lp
->active_speed
== SPEED_10000
?
1553 (lp
->active_speed
== SPEED_1000
?
1555 (lp
->active_speed
== SPEED_100
?
1556 "100Mbit/sec" : "10Mbit/sec"))),
1557 (lp
->active_duplex
== DUPLEX_FULL
?
1560 spin_lock_irqsave(&np
->lock
, flags
);
1562 niu_handle_led(np
, 1);
1563 spin_unlock_irqrestore(&np
->lock
, flags
);
1565 netif_carrier_on(dev
);
1566 } else if (netif_carrier_ok(dev
) && !link_up
) {
1567 niuwarn(LINK
, "%s: Link is down\n", dev
->name
);
1568 spin_lock_irqsave(&np
->lock
, flags
);
1569 niu_handle_led(np
, 0);
1570 spin_unlock_irqrestore(&np
->lock
, flags
);
1571 netif_carrier_off(dev
);
1577 static int link_status_10g_mrvl(struct niu
*np
, int *link_up_p
)
1579 int err
, link_up
, pma_status
, pcs_status
;
1583 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV1_ADDR
,
1584 MRVL88X2011_10G_PMD_STATUS_2
);
1588 /* Check PMA/PMD Register: 1.0001.2 == 1 */
1589 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV1_ADDR
,
1590 MRVL88X2011_PMA_PMD_STATUS_1
);
1594 pma_status
= ((err
& MRVL88X2011_LNK_STATUS_OK
) ? 1 : 0);
1596 /* Check PMC Register : 3.0001.2 == 1: read twice */
1597 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV3_ADDR
,
1598 MRVL88X2011_PMA_PMD_STATUS_1
);
1602 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV3_ADDR
,
1603 MRVL88X2011_PMA_PMD_STATUS_1
);
1607 pcs_status
= ((err
& MRVL88X2011_LNK_STATUS_OK
) ? 1 : 0);
1609 /* Check XGXS Register : 4.0018.[0-3,12] */
1610 err
= mdio_read(np
, np
->phy_addr
, MRVL88X2011_USER_DEV4_ADDR
,
1611 MRVL88X2011_10G_XGXS_LANE_STAT
);
1615 if (err
== (PHYXS_XGXS_LANE_STAT_ALINGED
| PHYXS_XGXS_LANE_STAT_LANE3
|
1616 PHYXS_XGXS_LANE_STAT_LANE2
| PHYXS_XGXS_LANE_STAT_LANE1
|
1617 PHYXS_XGXS_LANE_STAT_LANE0
| PHYXS_XGXS_LANE_STAT_MAGIC
|
1619 link_up
= (pma_status
&& pcs_status
) ? 1 : 0;
1621 np
->link_config
.active_speed
= SPEED_10000
;
1622 np
->link_config
.active_duplex
= DUPLEX_FULL
;
1625 mrvl88x2011_act_led(np
, (link_up
?
1626 MRVL88X2011_LED_CTL_PCS_ACT
:
1627 MRVL88X2011_LED_CTL_OFF
));
1629 *link_up_p
= link_up
;
1633 static int link_status_10g_bcm8706(struct niu
*np
, int *link_up_p
)
1638 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PMA_PMD_DEV_ADDR
,
1639 BCM8704_PMD_RCV_SIGDET
);
1642 if (!(err
& PMD_RCV_SIGDET_GLOBAL
)) {
1647 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PCS_DEV_ADDR
,
1648 BCM8704_PCS_10G_R_STATUS
);
1652 if (!(err
& PCS_10G_R_STATUS_BLK_LOCK
)) {
1657 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PHYXS_DEV_ADDR
,
1658 BCM8704_PHYXS_XGXS_LANE_STAT
);
1661 if (err
!= (PHYXS_XGXS_LANE_STAT_ALINGED
|
1662 PHYXS_XGXS_LANE_STAT_MAGIC
|
1663 PHYXS_XGXS_LANE_STAT_PATTEST
|
1664 PHYXS_XGXS_LANE_STAT_LANE3
|
1665 PHYXS_XGXS_LANE_STAT_LANE2
|
1666 PHYXS_XGXS_LANE_STAT_LANE1
|
1667 PHYXS_XGXS_LANE_STAT_LANE0
)) {
1669 np
->link_config
.active_speed
= SPEED_INVALID
;
1670 np
->link_config
.active_duplex
= DUPLEX_INVALID
;
1675 np
->link_config
.active_speed
= SPEED_10000
;
1676 np
->link_config
.active_duplex
= DUPLEX_FULL
;
1680 *link_up_p
= link_up
;
1681 if (np
->flags
& NIU_FLAGS_HOTPLUG_PHY
)
1686 static int link_status_10g_bcom(struct niu
*np
, int *link_up_p
)
1692 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PMA_PMD_DEV_ADDR
,
1693 BCM8704_PMD_RCV_SIGDET
);
1696 if (!(err
& PMD_RCV_SIGDET_GLOBAL
)) {
1701 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PCS_DEV_ADDR
,
1702 BCM8704_PCS_10G_R_STATUS
);
1705 if (!(err
& PCS_10G_R_STATUS_BLK_LOCK
)) {
1710 err
= mdio_read(np
, np
->phy_addr
, BCM8704_PHYXS_DEV_ADDR
,
1711 BCM8704_PHYXS_XGXS_LANE_STAT
);
1715 if (err
!= (PHYXS_XGXS_LANE_STAT_ALINGED
|
1716 PHYXS_XGXS_LANE_STAT_MAGIC
|
1717 PHYXS_XGXS_LANE_STAT_LANE3
|
1718 PHYXS_XGXS_LANE_STAT_LANE2
|
1719 PHYXS_XGXS_LANE_STAT_LANE1
|
1720 PHYXS_XGXS_LANE_STAT_LANE0
)) {
1726 np
->link_config
.active_speed
= SPEED_10000
;
1727 np
->link_config
.active_duplex
= DUPLEX_FULL
;
1731 *link_up_p
= link_up
;
1735 static int link_status_10g(struct niu
*np
, int *link_up_p
)
1737 unsigned long flags
;
1740 spin_lock_irqsave(&np
->lock
, flags
);
1742 if (np
->link_config
.loopback_mode
== LOOPBACK_DISABLED
) {
1745 phy_id
= phy_decode(np
->parent
->port_phy
, np
->port
);
1746 phy_id
= np
->parent
->phy_probe_info
.phy_id
[phy_id
][np
->port
];
1748 /* handle different phy types */
1749 switch (phy_id
& NIU_PHY_ID_MASK
) {
1750 case NIU_PHY_ID_MRVL88X2011
:
1751 err
= link_status_10g_mrvl(np
, link_up_p
);
1754 default: /* bcom 8704 */
1755 err
= link_status_10g_bcom(np
, link_up_p
);
1760 spin_unlock_irqrestore(&np
->lock
, flags
);
1765 static int niu_10g_phy_present(struct niu
*np
)
1769 sig
= nr64(ESR_INT_SIGNALS
);
1772 mask
= ESR_INT_SIGNALS_P0_BITS
;
1773 val
= (ESR_INT_SRDY0_P0
|
1776 ESR_INT_XDP_P0_CH3
|
1777 ESR_INT_XDP_P0_CH2
|
1778 ESR_INT_XDP_P0_CH1
|
1779 ESR_INT_XDP_P0_CH0
);
1783 mask
= ESR_INT_SIGNALS_P1_BITS
;
1784 val
= (ESR_INT_SRDY0_P1
|
1787 ESR_INT_XDP_P1_CH3
|
1788 ESR_INT_XDP_P1_CH2
|
1789 ESR_INT_XDP_P1_CH1
|
1790 ESR_INT_XDP_P1_CH0
);
1797 if ((sig
& mask
) != val
)
1802 static int link_status_10g_hotplug(struct niu
*np
, int *link_up_p
)
1804 unsigned long flags
;
1807 int phy_present_prev
;
1809 spin_lock_irqsave(&np
->lock
, flags
);
1811 if (np
->link_config
.loopback_mode
== LOOPBACK_DISABLED
) {
1812 phy_present_prev
= (np
->flags
& NIU_FLAGS_HOTPLUG_PHY_PRESENT
) ?
1814 phy_present
= niu_10g_phy_present(np
);
1815 if (phy_present
!= phy_present_prev
) {
1818 np
->flags
|= NIU_FLAGS_HOTPLUG_PHY_PRESENT
;
1819 if (np
->phy_ops
->xcvr_init
)
1820 err
= np
->phy_ops
->xcvr_init(np
);
1823 np
->flags
&= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT
;
1826 np
->flags
&= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT
;
1828 niuwarn(LINK
, "%s: Hotplug PHY Removed\n",
1832 if (np
->flags
& NIU_FLAGS_HOTPLUG_PHY_PRESENT
)
1833 err
= link_status_10g_bcm8706(np
, link_up_p
);
1836 spin_unlock_irqrestore(&np
->lock
, flags
);
1841 static int link_status_1g(struct niu
*np
, int *link_up_p
)
1843 struct niu_link_config
*lp
= &np
->link_config
;
1844 u16 current_speed
, bmsr
;
1845 unsigned long flags
;
1850 current_speed
= SPEED_INVALID
;
1851 current_duplex
= DUPLEX_INVALID
;
1853 spin_lock_irqsave(&np
->lock
, flags
);
1856 if (np
->link_config
.loopback_mode
!= LOOPBACK_DISABLED
)
1859 err
= mii_read(np
, np
->phy_addr
, MII_BMSR
);
1864 if (bmsr
& BMSR_LSTATUS
) {
1865 u16 adv
, lpa
, common
, estat
;
1867 err
= mii_read(np
, np
->phy_addr
, MII_ADVERTISE
);
1872 err
= mii_read(np
, np
->phy_addr
, MII_LPA
);
1879 err
= mii_read(np
, np
->phy_addr
, MII_ESTATUS
);
1885 if (estat
& (ESTATUS_1000_TFULL
| ESTATUS_1000_THALF
)) {
1886 current_speed
= SPEED_1000
;
1887 if (estat
& ESTATUS_1000_TFULL
)
1888 current_duplex
= DUPLEX_FULL
;
1890 current_duplex
= DUPLEX_HALF
;
1892 if (common
& ADVERTISE_100BASE4
) {
1893 current_speed
= SPEED_100
;
1894 current_duplex
= DUPLEX_HALF
;
1895 } else if (common
& ADVERTISE_100FULL
) {
1896 current_speed
= SPEED_100
;
1897 current_duplex
= DUPLEX_FULL
;
1898 } else if (common
& ADVERTISE_100HALF
) {
1899 current_speed
= SPEED_100
;
1900 current_duplex
= DUPLEX_HALF
;
1901 } else if (common
& ADVERTISE_10FULL
) {
1902 current_speed
= SPEED_10
;
1903 current_duplex
= DUPLEX_FULL
;
1904 } else if (common
& ADVERTISE_10HALF
) {
1905 current_speed
= SPEED_10
;
1906 current_duplex
= DUPLEX_HALF
;
1911 lp
->active_speed
= current_speed
;
1912 lp
->active_duplex
= current_duplex
;
1916 spin_unlock_irqrestore(&np
->lock
, flags
);
1918 *link_up_p
= link_up
;
1922 static int niu_link_status(struct niu
*np
, int *link_up_p
)
1924 const struct niu_phy_ops
*ops
= np
->phy_ops
;
1928 if (ops
->link_status
)
1929 err
= ops
->link_status(np
, link_up_p
);
1934 static void niu_timer(unsigned long __opaque
)
1936 struct niu
*np
= (struct niu
*) __opaque
;
1940 err
= niu_link_status(np
, &link_up
);
1942 niu_link_status_common(np
, link_up
);
1944 if (netif_carrier_ok(np
->dev
))
1948 np
->timer
.expires
= jiffies
+ off
;
1950 add_timer(&np
->timer
);
1953 static const struct niu_phy_ops phy_ops_10g_serdes
= {
1954 .serdes_init
= serdes_init_10g_serdes
,
1955 .link_status
= link_status_10g_serdes
,
1958 static const struct niu_phy_ops phy_ops_1g_rgmii
= {
1959 .xcvr_init
= xcvr_init_1g_rgmii
,
1960 .link_status
= link_status_1g_rgmii
,
1963 static const struct niu_phy_ops phy_ops_10g_fiber_niu
= {
1964 .serdes_init
= serdes_init_niu
,
1965 .xcvr_init
= xcvr_init_10g
,
1966 .link_status
= link_status_10g
,
1969 static const struct niu_phy_ops phy_ops_10g_fiber
= {
1970 .serdes_init
= serdes_init_10g
,
1971 .xcvr_init
= xcvr_init_10g
,
1972 .link_status
= link_status_10g
,
1975 static const struct niu_phy_ops phy_ops_10g_fiber_hotplug
= {
1976 .serdes_init
= serdes_init_10g
,
1977 .xcvr_init
= xcvr_init_10g_bcm8706
,
1978 .link_status
= link_status_10g_hotplug
,
1981 static const struct niu_phy_ops phy_ops_10g_copper
= {
1982 .serdes_init
= serdes_init_10g
,
1983 .link_status
= link_status_10g
, /* XXX */
1986 static const struct niu_phy_ops phy_ops_1g_fiber
= {
1987 .serdes_init
= serdes_init_1g
,
1988 .xcvr_init
= xcvr_init_1g
,
1989 .link_status
= link_status_1g
,
1992 static const struct niu_phy_ops phy_ops_1g_copper
= {
1993 .xcvr_init
= xcvr_init_1g
,
1994 .link_status
= link_status_1g
,
1997 struct niu_phy_template
{
1998 const struct niu_phy_ops
*ops
;
2002 static const struct niu_phy_template phy_template_niu
= {
2003 .ops
= &phy_ops_10g_fiber_niu
,
2004 .phy_addr_base
= 16,
2007 static const struct niu_phy_template phy_template_10g_fiber
= {
2008 .ops
= &phy_ops_10g_fiber
,
2012 static const struct niu_phy_template phy_template_10g_fiber_hotplug
= {
2013 .ops
= &phy_ops_10g_fiber_hotplug
,
2017 static const struct niu_phy_template phy_template_10g_copper
= {
2018 .ops
= &phy_ops_10g_copper
,
2019 .phy_addr_base
= 10,
2022 static const struct niu_phy_template phy_template_1g_fiber
= {
2023 .ops
= &phy_ops_1g_fiber
,
2027 static const struct niu_phy_template phy_template_1g_copper
= {
2028 .ops
= &phy_ops_1g_copper
,
2032 static const struct niu_phy_template phy_template_1g_rgmii
= {
2033 .ops
= &phy_ops_1g_rgmii
,
2037 static const struct niu_phy_template phy_template_10g_serdes
= {
2038 .ops
= &phy_ops_10g_serdes
,
2042 static int niu_atca_port_num
[4] = {
2046 static int serdes_init_10g_serdes(struct niu
*np
)
2048 struct niu_link_config
*lp
= &np
->link_config
;
2049 unsigned long ctrl_reg
, test_cfg_reg
, pll_cfg
, i
;
2050 u64 ctrl_val
, test_cfg_val
, sig
, mask
, val
;
2056 reset_val
= ENET_SERDES_RESET_0
;
2057 ctrl_reg
= ENET_SERDES_0_CTRL_CFG
;
2058 test_cfg_reg
= ENET_SERDES_0_TEST_CFG
;
2059 pll_cfg
= ENET_SERDES_0_PLL_CFG
;
2062 reset_val
= ENET_SERDES_RESET_1
;
2063 ctrl_reg
= ENET_SERDES_1_CTRL_CFG
;
2064 test_cfg_reg
= ENET_SERDES_1_TEST_CFG
;
2065 pll_cfg
= ENET_SERDES_1_PLL_CFG
;
2071 ctrl_val
= (ENET_SERDES_CTRL_SDET_0
|
2072 ENET_SERDES_CTRL_SDET_1
|
2073 ENET_SERDES_CTRL_SDET_2
|
2074 ENET_SERDES_CTRL_SDET_3
|
2075 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT
) |
2076 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT
) |
2077 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT
) |
2078 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT
) |
2079 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT
) |
2080 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT
) |
2081 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT
) |
2082 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT
));
2085 if (lp
->loopback_mode
== LOOPBACK_PHY
) {
2086 test_cfg_val
|= ((ENET_TEST_MD_PAD_LOOPBACK
<<
2087 ENET_SERDES_TEST_MD_0_SHIFT
) |
2088 (ENET_TEST_MD_PAD_LOOPBACK
<<
2089 ENET_SERDES_TEST_MD_1_SHIFT
) |
2090 (ENET_TEST_MD_PAD_LOOPBACK
<<
2091 ENET_SERDES_TEST_MD_2_SHIFT
) |
2092 (ENET_TEST_MD_PAD_LOOPBACK
<<
2093 ENET_SERDES_TEST_MD_3_SHIFT
));
2097 nw64(pll_cfg
, ENET_SERDES_PLL_FBDIV2
);
2098 nw64(ctrl_reg
, ctrl_val
);
2099 nw64(test_cfg_reg
, test_cfg_val
);
2101 /* Initialize all 4 lanes of the SERDES. */
2102 for (i
= 0; i
< 4; i
++) {
2103 u32 rxtx_ctrl
, glue0
;
2105 err
= esr_read_rxtx_ctrl(np
, i
, &rxtx_ctrl
);
2108 err
= esr_read_glue0(np
, i
, &glue0
);
2112 rxtx_ctrl
&= ~(ESR_RXTX_CTRL_VMUXLO
);
2113 rxtx_ctrl
|= (ESR_RXTX_CTRL_ENSTRETCH
|
2114 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT
));
2116 glue0
&= ~(ESR_GLUE_CTRL0_SRATE
|
2117 ESR_GLUE_CTRL0_THCNT
|
2118 ESR_GLUE_CTRL0_BLTIME
);
2119 glue0
|= (ESR_GLUE_CTRL0_RXLOSENAB
|
2120 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT
) |
2121 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT
) |
2122 (BLTIME_300_CYCLES
<<
2123 ESR_GLUE_CTRL0_BLTIME_SHIFT
));
2125 err
= esr_write_rxtx_ctrl(np
, i
, rxtx_ctrl
);
2128 err
= esr_write_glue0(np
, i
, glue0
);
2134 sig
= nr64(ESR_INT_SIGNALS
);
2137 mask
= ESR_INT_SIGNALS_P0_BITS
;
2138 val
= (ESR_INT_SRDY0_P0
|
2141 ESR_INT_XDP_P0_CH3
|
2142 ESR_INT_XDP_P0_CH2
|
2143 ESR_INT_XDP_P0_CH1
|
2144 ESR_INT_XDP_P0_CH0
);
2148 mask
= ESR_INT_SIGNALS_P1_BITS
;
2149 val
= (ESR_INT_SRDY0_P1
|
2152 ESR_INT_XDP_P1_CH3
|
2153 ESR_INT_XDP_P1_CH2
|
2154 ESR_INT_XDP_P1_CH1
|
2155 ESR_INT_XDP_P1_CH0
);
2162 if ((sig
& mask
) != val
) {
2164 err
= serdes_init_1g_serdes(np
);
2166 np
->flags
&= ~NIU_FLAGS_10G
;
2167 np
->mac_xcvr
= MAC_XCVR_PCS
;
2169 dev_err(np
->device
, PFX
"Port %u 10G/1G SERDES Link Failed \n",
2178 static int niu_determine_phy_disposition(struct niu
*np
)
2180 struct niu_parent
*parent
= np
->parent
;
2181 u8 plat_type
= parent
->plat_type
;
2182 const struct niu_phy_template
*tp
;
2183 u32 phy_addr_off
= 0;
2185 if (plat_type
== PLAT_TYPE_NIU
) {
2186 tp
= &phy_template_niu
;
2187 phy_addr_off
+= np
->port
;
2192 NIU_FLAGS_XCVR_SERDES
)) {
2195 tp
= &phy_template_1g_copper
;
2196 if (plat_type
== PLAT_TYPE_VF_P0
)
2198 else if (plat_type
== PLAT_TYPE_VF_P1
)
2201 phy_addr_off
+= (np
->port
^ 0x3);
2206 tp
= &phy_template_1g_copper
;
2209 case NIU_FLAGS_FIBER
:
2211 tp
= &phy_template_1g_fiber
;
2214 case NIU_FLAGS_10G
| NIU_FLAGS_FIBER
:
2216 tp
= &phy_template_10g_fiber
;
2217 if (plat_type
== PLAT_TYPE_VF_P0
||
2218 plat_type
== PLAT_TYPE_VF_P1
)
2220 phy_addr_off
+= np
->port
;
2221 if (np
->flags
& NIU_FLAGS_HOTPLUG_PHY
) {
2222 tp
= &phy_template_10g_fiber_hotplug
;
2230 case NIU_FLAGS_10G
| NIU_FLAGS_XCVR_SERDES
:
2231 case NIU_FLAGS_XCVR_SERDES
| NIU_FLAGS_FIBER
:
2232 case NIU_FLAGS_XCVR_SERDES
:
2236 tp
= &phy_template_10g_serdes
;
2240 tp
= &phy_template_1g_rgmii
;
2246 phy_addr_off
= niu_atca_port_num
[np
->port
];
2254 np
->phy_ops
= tp
->ops
;
2255 np
->phy_addr
= tp
->phy_addr_base
+ phy_addr_off
;
2260 static int niu_init_link(struct niu
*np
)
2262 struct niu_parent
*parent
= np
->parent
;
2265 if (parent
->plat_type
== PLAT_TYPE_NIU
) {
2266 err
= niu_xcvr_init(np
);
2271 err
= niu_serdes_init(np
);
2275 err
= niu_xcvr_init(np
);
2277 niu_link_status(np
, &ignore
);
2281 static void niu_set_primary_mac(struct niu
*np
, unsigned char *addr
)
2283 u16 reg0
= addr
[4] << 8 | addr
[5];
2284 u16 reg1
= addr
[2] << 8 | addr
[3];
2285 u16 reg2
= addr
[0] << 8 | addr
[1];
2287 if (np
->flags
& NIU_FLAGS_XMAC
) {
2288 nw64_mac(XMAC_ADDR0
, reg0
);
2289 nw64_mac(XMAC_ADDR1
, reg1
);
2290 nw64_mac(XMAC_ADDR2
, reg2
);
2292 nw64_mac(BMAC_ADDR0
, reg0
);
2293 nw64_mac(BMAC_ADDR1
, reg1
);
2294 nw64_mac(BMAC_ADDR2
, reg2
);
2298 static int niu_num_alt_addr(struct niu
*np
)
2300 if (np
->flags
& NIU_FLAGS_XMAC
)
2301 return XMAC_NUM_ALT_ADDR
;
2303 return BMAC_NUM_ALT_ADDR
;
2306 static int niu_set_alt_mac(struct niu
*np
, int index
, unsigned char *addr
)
2308 u16 reg0
= addr
[4] << 8 | addr
[5];
2309 u16 reg1
= addr
[2] << 8 | addr
[3];
2310 u16 reg2
= addr
[0] << 8 | addr
[1];
2312 if (index
>= niu_num_alt_addr(np
))
2315 if (np
->flags
& NIU_FLAGS_XMAC
) {
2316 nw64_mac(XMAC_ALT_ADDR0(index
), reg0
);
2317 nw64_mac(XMAC_ALT_ADDR1(index
), reg1
);
2318 nw64_mac(XMAC_ALT_ADDR2(index
), reg2
);
2320 nw64_mac(BMAC_ALT_ADDR0(index
), reg0
);
2321 nw64_mac(BMAC_ALT_ADDR1(index
), reg1
);
2322 nw64_mac(BMAC_ALT_ADDR2(index
), reg2
);
2328 static int niu_enable_alt_mac(struct niu
*np
, int index
, int on
)
2333 if (index
>= niu_num_alt_addr(np
))
2336 if (np
->flags
& NIU_FLAGS_XMAC
) {
2337 reg
= XMAC_ADDR_CMPEN
;
2340 reg
= BMAC_ADDR_CMPEN
;
2341 mask
= 1 << (index
+ 1);
2344 val
= nr64_mac(reg
);
2354 static void __set_rdc_table_num_hw(struct niu
*np
, unsigned long reg
,
2355 int num
, int mac_pref
)
2357 u64 val
= nr64_mac(reg
);
2358 val
&= ~(HOST_INFO_MACRDCTBLN
| HOST_INFO_MPR
);
2361 val
|= HOST_INFO_MPR
;
2365 static int __set_rdc_table_num(struct niu
*np
,
2366 int xmac_index
, int bmac_index
,
2367 int rdc_table_num
, int mac_pref
)
2371 if (rdc_table_num
& ~HOST_INFO_MACRDCTBLN
)
2373 if (np
->flags
& NIU_FLAGS_XMAC
)
2374 reg
= XMAC_HOST_INFO(xmac_index
);
2376 reg
= BMAC_HOST_INFO(bmac_index
);
2377 __set_rdc_table_num_hw(np
, reg
, rdc_table_num
, mac_pref
);
2381 static int niu_set_primary_mac_rdc_table(struct niu
*np
, int table_num
,
2384 return __set_rdc_table_num(np
, 17, 0, table_num
, mac_pref
);
2387 static int niu_set_multicast_mac_rdc_table(struct niu
*np
, int table_num
,
2390 return __set_rdc_table_num(np
, 16, 8, table_num
, mac_pref
);
2393 static int niu_set_alt_mac_rdc_table(struct niu
*np
, int idx
,
2394 int table_num
, int mac_pref
)
2396 if (idx
>= niu_num_alt_addr(np
))
2398 return __set_rdc_table_num(np
, idx
, idx
+ 1, table_num
, mac_pref
);
2401 static u64
vlan_entry_set_parity(u64 reg_val
)
2406 port01_mask
= 0x00ff;
2407 port23_mask
= 0xff00;
2409 if (hweight64(reg_val
& port01_mask
) & 1)
2410 reg_val
|= ENET_VLAN_TBL_PARITY0
;
2412 reg_val
&= ~ENET_VLAN_TBL_PARITY0
;
2414 if (hweight64(reg_val
& port23_mask
) & 1)
2415 reg_val
|= ENET_VLAN_TBL_PARITY1
;
2417 reg_val
&= ~ENET_VLAN_TBL_PARITY1
;
2422 static void vlan_tbl_write(struct niu
*np
, unsigned long index
,
2423 int port
, int vpr
, int rdc_table
)
2425 u64 reg_val
= nr64(ENET_VLAN_TBL(index
));
2427 reg_val
&= ~((ENET_VLAN_TBL_VPR
|
2428 ENET_VLAN_TBL_VLANRDCTBLN
) <<
2429 ENET_VLAN_TBL_SHIFT(port
));
2431 reg_val
|= (ENET_VLAN_TBL_VPR
<<
2432 ENET_VLAN_TBL_SHIFT(port
));
2433 reg_val
|= (rdc_table
<< ENET_VLAN_TBL_SHIFT(port
));
2435 reg_val
= vlan_entry_set_parity(reg_val
);
2437 nw64(ENET_VLAN_TBL(index
), reg_val
);
2440 static void vlan_tbl_clear(struct niu
*np
)
2444 for (i
= 0; i
< ENET_VLAN_TBL_NUM_ENTRIES
; i
++)
2445 nw64(ENET_VLAN_TBL(i
), 0);
2448 static int tcam_wait_bit(struct niu
*np
, u64 bit
)
2452 while (--limit
> 0) {
2453 if (nr64(TCAM_CTL
) & bit
)
2463 static int tcam_flush(struct niu
*np
, int index
)
2465 nw64(TCAM_KEY_0
, 0x00);
2466 nw64(TCAM_KEY_MASK_0
, 0xff);
2467 nw64(TCAM_CTL
, (TCAM_CTL_RWC_TCAM_WRITE
| index
));
2469 return tcam_wait_bit(np
, TCAM_CTL_STAT
);
2473 static int tcam_read(struct niu
*np
, int index
,
2474 u64
*key
, u64
*mask
)
2478 nw64(TCAM_CTL
, (TCAM_CTL_RWC_TCAM_READ
| index
));
2479 err
= tcam_wait_bit(np
, TCAM_CTL_STAT
);
2481 key
[0] = nr64(TCAM_KEY_0
);
2482 key
[1] = nr64(TCAM_KEY_1
);
2483 key
[2] = nr64(TCAM_KEY_2
);
2484 key
[3] = nr64(TCAM_KEY_3
);
2485 mask
[0] = nr64(TCAM_KEY_MASK_0
);
2486 mask
[1] = nr64(TCAM_KEY_MASK_1
);
2487 mask
[2] = nr64(TCAM_KEY_MASK_2
);
2488 mask
[3] = nr64(TCAM_KEY_MASK_3
);
2494 static int tcam_write(struct niu
*np
, int index
,
2495 u64
*key
, u64
*mask
)
2497 nw64(TCAM_KEY_0
, key
[0]);
2498 nw64(TCAM_KEY_1
, key
[1]);
2499 nw64(TCAM_KEY_2
, key
[2]);
2500 nw64(TCAM_KEY_3
, key
[3]);
2501 nw64(TCAM_KEY_MASK_0
, mask
[0]);
2502 nw64(TCAM_KEY_MASK_1
, mask
[1]);
2503 nw64(TCAM_KEY_MASK_2
, mask
[2]);
2504 nw64(TCAM_KEY_MASK_3
, mask
[3]);
2505 nw64(TCAM_CTL
, (TCAM_CTL_RWC_TCAM_WRITE
| index
));
2507 return tcam_wait_bit(np
, TCAM_CTL_STAT
);
2511 static int tcam_assoc_read(struct niu
*np
, int index
, u64
*data
)
2515 nw64(TCAM_CTL
, (TCAM_CTL_RWC_RAM_READ
| index
));
2516 err
= tcam_wait_bit(np
, TCAM_CTL_STAT
);
2518 *data
= nr64(TCAM_KEY_1
);
2524 static int tcam_assoc_write(struct niu
*np
, int index
, u64 assoc_data
)
2526 nw64(TCAM_KEY_1
, assoc_data
);
2527 nw64(TCAM_CTL
, (TCAM_CTL_RWC_RAM_WRITE
| index
));
2529 return tcam_wait_bit(np
, TCAM_CTL_STAT
);
2532 static void tcam_enable(struct niu
*np
, int on
)
2534 u64 val
= nr64(FFLP_CFG_1
);
2537 val
&= ~FFLP_CFG_1_TCAM_DIS
;
2539 val
|= FFLP_CFG_1_TCAM_DIS
;
2540 nw64(FFLP_CFG_1
, val
);
2543 static void tcam_set_lat_and_ratio(struct niu
*np
, u64 latency
, u64 ratio
)
2545 u64 val
= nr64(FFLP_CFG_1
);
2547 val
&= ~(FFLP_CFG_1_FFLPINITDONE
|
2549 FFLP_CFG_1_CAMRATIO
);
2550 val
|= (latency
<< FFLP_CFG_1_CAMLAT_SHIFT
);
2551 val
|= (ratio
<< FFLP_CFG_1_CAMRATIO_SHIFT
);
2552 nw64(FFLP_CFG_1
, val
);
2554 val
= nr64(FFLP_CFG_1
);
2555 val
|= FFLP_CFG_1_FFLPINITDONE
;
2556 nw64(FFLP_CFG_1
, val
);
2559 static int tcam_user_eth_class_enable(struct niu
*np
, unsigned long class,
2565 if (class < CLASS_CODE_ETHERTYPE1
||
2566 class > CLASS_CODE_ETHERTYPE2
)
2569 reg
= L2_CLS(class - CLASS_CODE_ETHERTYPE1
);
2581 static int tcam_user_eth_class_set(struct niu
*np
, unsigned long class,
2587 if (class < CLASS_CODE_ETHERTYPE1
||
2588 class > CLASS_CODE_ETHERTYPE2
||
2589 (ether_type
& ~(u64
)0xffff) != 0)
2592 reg
= L2_CLS(class - CLASS_CODE_ETHERTYPE1
);
2594 val
&= ~L2_CLS_ETYPE
;
2595 val
|= (ether_type
<< L2_CLS_ETYPE_SHIFT
);
2602 static int tcam_user_ip_class_enable(struct niu
*np
, unsigned long class,
2608 if (class < CLASS_CODE_USER_PROG1
||
2609 class > CLASS_CODE_USER_PROG4
)
2612 reg
= L3_CLS(class - CLASS_CODE_USER_PROG1
);
2615 val
|= L3_CLS_VALID
;
2617 val
&= ~L3_CLS_VALID
;
2624 static int tcam_user_ip_class_set(struct niu
*np
, unsigned long class,
2625 int ipv6
, u64 protocol_id
,
2626 u64 tos_mask
, u64 tos_val
)
2631 if (class < CLASS_CODE_USER_PROG1
||
2632 class > CLASS_CODE_USER_PROG4
||
2633 (protocol_id
& ~(u64
)0xff) != 0 ||
2634 (tos_mask
& ~(u64
)0xff) != 0 ||
2635 (tos_val
& ~(u64
)0xff) != 0)
2638 reg
= L3_CLS(class - CLASS_CODE_USER_PROG1
);
2640 val
&= ~(L3_CLS_IPVER
| L3_CLS_PID
|
2641 L3_CLS_TOSMASK
| L3_CLS_TOS
);
2643 val
|= L3_CLS_IPVER
;
2644 val
|= (protocol_id
<< L3_CLS_PID_SHIFT
);
2645 val
|= (tos_mask
<< L3_CLS_TOSMASK_SHIFT
);
2646 val
|= (tos_val
<< L3_CLS_TOS_SHIFT
);
2653 static int tcam_early_init(struct niu
*np
)
2659 tcam_set_lat_and_ratio(np
,
2660 DEFAULT_TCAM_LATENCY
,
2661 DEFAULT_TCAM_ACCESS_RATIO
);
2662 for (i
= CLASS_CODE_ETHERTYPE1
; i
<= CLASS_CODE_ETHERTYPE2
; i
++) {
2663 err
= tcam_user_eth_class_enable(np
, i
, 0);
2667 for (i
= CLASS_CODE_USER_PROG1
; i
<= CLASS_CODE_USER_PROG4
; i
++) {
2668 err
= tcam_user_ip_class_enable(np
, i
, 0);
2676 static int tcam_flush_all(struct niu
*np
)
2680 for (i
= 0; i
< np
->parent
->tcam_num_entries
; i
++) {
2681 int err
= tcam_flush(np
, i
);
2688 static u64
hash_addr_regval(unsigned long index
, unsigned long num_entries
)
2690 return ((u64
)index
| (num_entries
== 1 ?
2691 HASH_TBL_ADDR_AUTOINC
: 0));
2695 static int hash_read(struct niu
*np
, unsigned long partition
,
2696 unsigned long index
, unsigned long num_entries
,
2699 u64 val
= hash_addr_regval(index
, num_entries
);
2702 if (partition
>= FCRAM_NUM_PARTITIONS
||
2703 index
+ num_entries
> FCRAM_SIZE
)
2706 nw64(HASH_TBL_ADDR(partition
), val
);
2707 for (i
= 0; i
< num_entries
; i
++)
2708 data
[i
] = nr64(HASH_TBL_DATA(partition
));
2714 static int hash_write(struct niu
*np
, unsigned long partition
,
2715 unsigned long index
, unsigned long num_entries
,
2718 u64 val
= hash_addr_regval(index
, num_entries
);
2721 if (partition
>= FCRAM_NUM_PARTITIONS
||
2722 index
+ (num_entries
* 8) > FCRAM_SIZE
)
2725 nw64(HASH_TBL_ADDR(partition
), val
);
2726 for (i
= 0; i
< num_entries
; i
++)
2727 nw64(HASH_TBL_DATA(partition
), data
[i
]);
2732 static void fflp_reset(struct niu
*np
)
2736 nw64(FFLP_CFG_1
, FFLP_CFG_1_PIO_FIO_RST
);
2738 nw64(FFLP_CFG_1
, 0);
2740 val
= FFLP_CFG_1_FCRAMOUTDR_NORMAL
| FFLP_CFG_1_FFLPINITDONE
;
2741 nw64(FFLP_CFG_1
, val
);
2744 static void fflp_set_timings(struct niu
*np
)
2746 u64 val
= nr64(FFLP_CFG_1
);
2748 val
&= ~FFLP_CFG_1_FFLPINITDONE
;
2749 val
|= (DEFAULT_FCRAMRATIO
<< FFLP_CFG_1_FCRAMRATIO_SHIFT
);
2750 nw64(FFLP_CFG_1
, val
);
2752 val
= nr64(FFLP_CFG_1
);
2753 val
|= FFLP_CFG_1_FFLPINITDONE
;
2754 nw64(FFLP_CFG_1
, val
);
2756 val
= nr64(FCRAM_REF_TMR
);
2757 val
&= ~(FCRAM_REF_TMR_MAX
| FCRAM_REF_TMR_MIN
);
2758 val
|= (DEFAULT_FCRAM_REFRESH_MAX
<< FCRAM_REF_TMR_MAX_SHIFT
);
2759 val
|= (DEFAULT_FCRAM_REFRESH_MIN
<< FCRAM_REF_TMR_MIN_SHIFT
);
2760 nw64(FCRAM_REF_TMR
, val
);
2763 static int fflp_set_partition(struct niu
*np
, u64 partition
,
2764 u64 mask
, u64 base
, int enable
)
2769 if (partition
>= FCRAM_NUM_PARTITIONS
||
2770 (mask
& ~(u64
)0x1f) != 0 ||
2771 (base
& ~(u64
)0x1f) != 0)
2774 reg
= FLW_PRT_SEL(partition
);
2777 val
&= ~(FLW_PRT_SEL_EXT
| FLW_PRT_SEL_MASK
| FLW_PRT_SEL_BASE
);
2778 val
|= (mask
<< FLW_PRT_SEL_MASK_SHIFT
);
2779 val
|= (base
<< FLW_PRT_SEL_BASE_SHIFT
);
2781 val
|= FLW_PRT_SEL_EXT
;
2787 static int fflp_disable_all_partitions(struct niu
*np
)
2791 for (i
= 0; i
< FCRAM_NUM_PARTITIONS
; i
++) {
2792 int err
= fflp_set_partition(np
, 0, 0, 0, 0);
2799 static void fflp_llcsnap_enable(struct niu
*np
, int on
)
2801 u64 val
= nr64(FFLP_CFG_1
);
2804 val
|= FFLP_CFG_1_LLCSNAP
;
2806 val
&= ~FFLP_CFG_1_LLCSNAP
;
2807 nw64(FFLP_CFG_1
, val
);
2810 static void fflp_errors_enable(struct niu
*np
, int on
)
2812 u64 val
= nr64(FFLP_CFG_1
);
2815 val
&= ~FFLP_CFG_1_ERRORDIS
;
2817 val
|= FFLP_CFG_1_ERRORDIS
;
2818 nw64(FFLP_CFG_1
, val
);
2821 static int fflp_hash_clear(struct niu
*np
)
2823 struct fcram_hash_ipv4 ent
;
2826 /* IPV4 hash entry with valid bit clear, rest is don't care. */
2827 memset(&ent
, 0, sizeof(ent
));
2828 ent
.header
= HASH_HEADER_EXT
;
2830 for (i
= 0; i
< FCRAM_SIZE
; i
+= sizeof(ent
)) {
2831 int err
= hash_write(np
, 0, i
, 1, (u64
*) &ent
);
2838 static int fflp_early_init(struct niu
*np
)
2840 struct niu_parent
*parent
;
2841 unsigned long flags
;
2844 niu_lock_parent(np
, flags
);
2846 parent
= np
->parent
;
2848 if (!(parent
->flags
& PARENT_FLGS_CLS_HWINIT
)) {
2849 niudbg(PROBE
, "fflp_early_init: Initting hw on port %u\n",
2851 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
) {
2853 fflp_set_timings(np
);
2854 err
= fflp_disable_all_partitions(np
);
2856 niudbg(PROBE
, "fflp_disable_all_partitions "
2857 "failed, err=%d\n", err
);
2862 err
= tcam_early_init(np
);
2864 niudbg(PROBE
, "tcam_early_init failed, err=%d\n",
2868 fflp_llcsnap_enable(np
, 1);
2869 fflp_errors_enable(np
, 0);
2873 err
= tcam_flush_all(np
);
2875 niudbg(PROBE
, "tcam_flush_all failed, err=%d\n",
2879 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
) {
2880 err
= fflp_hash_clear(np
);
2882 niudbg(PROBE
, "fflp_hash_clear failed, "
2890 niudbg(PROBE
, "fflp_early_init: Success\n");
2891 parent
->flags
|= PARENT_FLGS_CLS_HWINIT
;
2894 niu_unlock_parent(np
, flags
);
2898 static int niu_set_flow_key(struct niu
*np
, unsigned long class_code
, u64 key
)
2900 if (class_code
< CLASS_CODE_USER_PROG1
||
2901 class_code
> CLASS_CODE_SCTP_IPV6
)
2904 nw64(FLOW_KEY(class_code
- CLASS_CODE_USER_PROG1
), key
);
2908 static int niu_set_tcam_key(struct niu
*np
, unsigned long class_code
, u64 key
)
2910 if (class_code
< CLASS_CODE_USER_PROG1
||
2911 class_code
> CLASS_CODE_SCTP_IPV6
)
2914 nw64(TCAM_KEY(class_code
- CLASS_CODE_USER_PROG1
), key
);
2918 static void niu_rx_skb_append(struct sk_buff
*skb
, struct page
*page
,
2919 u32 offset
, u32 size
)
2921 int i
= skb_shinfo(skb
)->nr_frags
;
2922 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2925 frag
->page_offset
= offset
;
2929 skb
->data_len
+= size
;
2930 skb
->truesize
+= size
;
2932 skb_shinfo(skb
)->nr_frags
= i
+ 1;
2935 static unsigned int niu_hash_rxaddr(struct rx_ring_info
*rp
, u64 a
)
2938 a
^= (a
>> ilog2(MAX_RBR_RING_SIZE
));
2940 return (a
& (MAX_RBR_RING_SIZE
- 1));
2943 static struct page
*niu_find_rxpage(struct rx_ring_info
*rp
, u64 addr
,
2944 struct page
***link
)
2946 unsigned int h
= niu_hash_rxaddr(rp
, addr
);
2947 struct page
*p
, **pp
;
2950 pp
= &rp
->rxhash
[h
];
2951 for (; (p
= *pp
) != NULL
; pp
= (struct page
**) &p
->mapping
) {
2952 if (p
->index
== addr
) {
2961 static void niu_hash_page(struct rx_ring_info
*rp
, struct page
*page
, u64 base
)
2963 unsigned int h
= niu_hash_rxaddr(rp
, base
);
2966 page
->mapping
= (struct address_space
*) rp
->rxhash
[h
];
2967 rp
->rxhash
[h
] = page
;
2970 static int niu_rbr_add_page(struct niu
*np
, struct rx_ring_info
*rp
,
2971 gfp_t mask
, int start_index
)
2977 page
= alloc_page(mask
);
2981 addr
= np
->ops
->map_page(np
->device
, page
, 0,
2982 PAGE_SIZE
, DMA_FROM_DEVICE
);
2984 niu_hash_page(rp
, page
, addr
);
2985 if (rp
->rbr_blocks_per_page
> 1)
2986 atomic_add(rp
->rbr_blocks_per_page
- 1,
2987 &compound_head(page
)->_count
);
2989 for (i
= 0; i
< rp
->rbr_blocks_per_page
; i
++) {
2990 __le32
*rbr
= &rp
->rbr
[start_index
+ i
];
2992 *rbr
= cpu_to_le32(addr
>> RBR_DESCR_ADDR_SHIFT
);
2993 addr
+= rp
->rbr_block_size
;
2999 static void niu_rbr_refill(struct niu
*np
, struct rx_ring_info
*rp
, gfp_t mask
)
3001 int index
= rp
->rbr_index
;
3004 if ((rp
->rbr_pending
% rp
->rbr_blocks_per_page
) == 0) {
3005 int err
= niu_rbr_add_page(np
, rp
, mask
, index
);
3007 if (unlikely(err
)) {
3012 rp
->rbr_index
+= rp
->rbr_blocks_per_page
;
3013 BUG_ON(rp
->rbr_index
> rp
->rbr_table_size
);
3014 if (rp
->rbr_index
== rp
->rbr_table_size
)
3017 if (rp
->rbr_pending
>= rp
->rbr_kick_thresh
) {
3018 nw64(RBR_KICK(rp
->rx_channel
), rp
->rbr_pending
);
3019 rp
->rbr_pending
= 0;
3024 static int niu_rx_pkt_ignore(struct niu
*np
, struct rx_ring_info
*rp
)
3026 unsigned int index
= rp
->rcr_index
;
3031 struct page
*page
, **link
;
3037 val
= le64_to_cpup(&rp
->rcr
[index
]);
3038 addr
= (val
& RCR_ENTRY_PKT_BUF_ADDR
) <<
3039 RCR_ENTRY_PKT_BUF_ADDR_SHIFT
;
3040 page
= niu_find_rxpage(rp
, addr
, &link
);
3042 rcr_size
= rp
->rbr_sizes
[(val
& RCR_ENTRY_PKTBUFSZ
) >>
3043 RCR_ENTRY_PKTBUFSZ_SHIFT
];
3044 if ((page
->index
+ PAGE_SIZE
) - rcr_size
== addr
) {
3045 *link
= (struct page
*) page
->mapping
;
3046 np
->ops
->unmap_page(np
->device
, page
->index
,
3047 PAGE_SIZE
, DMA_FROM_DEVICE
);
3049 page
->mapping
= NULL
;
3051 rp
->rbr_refill_pending
++;
3054 index
= NEXT_RCR(rp
, index
);
3055 if (!(val
& RCR_ENTRY_MULTI
))
3059 rp
->rcr_index
= index
;
3064 static int niu_process_rx_pkt(struct niu
*np
, struct rx_ring_info
*rp
)
3066 unsigned int index
= rp
->rcr_index
;
3067 struct sk_buff
*skb
;
3070 skb
= netdev_alloc_skb(np
->dev
, RX_SKB_ALLOC_SIZE
);
3072 return niu_rx_pkt_ignore(np
, rp
);
3076 struct page
*page
, **link
;
3077 u32 rcr_size
, append_size
;
3082 val
= le64_to_cpup(&rp
->rcr
[index
]);
3084 len
= (val
& RCR_ENTRY_L2_LEN
) >>
3085 RCR_ENTRY_L2_LEN_SHIFT
;
3088 addr
= (val
& RCR_ENTRY_PKT_BUF_ADDR
) <<
3089 RCR_ENTRY_PKT_BUF_ADDR_SHIFT
;
3090 page
= niu_find_rxpage(rp
, addr
, &link
);
3092 rcr_size
= rp
->rbr_sizes
[(val
& RCR_ENTRY_PKTBUFSZ
) >>
3093 RCR_ENTRY_PKTBUFSZ_SHIFT
];
3095 off
= addr
& ~PAGE_MASK
;
3096 append_size
= rcr_size
;
3103 ptype
= (val
>> RCR_ENTRY_PKT_TYPE_SHIFT
);
3104 if ((ptype
== RCR_PKT_TYPE_TCP
||
3105 ptype
== RCR_PKT_TYPE_UDP
) &&
3106 !(val
& (RCR_ENTRY_NOPORT
|
3108 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
3110 skb
->ip_summed
= CHECKSUM_NONE
;
3112 if (!(val
& RCR_ENTRY_MULTI
))
3113 append_size
= len
- skb
->len
;
3115 niu_rx_skb_append(skb
, page
, off
, append_size
);
3116 if ((page
->index
+ rp
->rbr_block_size
) - rcr_size
== addr
) {
3117 *link
= (struct page
*) page
->mapping
;
3118 np
->ops
->unmap_page(np
->device
, page
->index
,
3119 PAGE_SIZE
, DMA_FROM_DEVICE
);
3121 page
->mapping
= NULL
;
3122 rp
->rbr_refill_pending
++;
3126 index
= NEXT_RCR(rp
, index
);
3127 if (!(val
& RCR_ENTRY_MULTI
))
3131 rp
->rcr_index
= index
;
3133 skb_reserve(skb
, NET_IP_ALIGN
);
3134 __pskb_pull_tail(skb
, min(len
, NIU_RXPULL_MAX
));
3137 rp
->rx_bytes
+= skb
->len
;
3139 skb
->protocol
= eth_type_trans(skb
, np
->dev
);
3140 netif_receive_skb(skb
);
3142 np
->dev
->last_rx
= jiffies
;
3147 static int niu_rbr_fill(struct niu
*np
, struct rx_ring_info
*rp
, gfp_t mask
)
3149 int blocks_per_page
= rp
->rbr_blocks_per_page
;
3150 int err
, index
= rp
->rbr_index
;
3153 while (index
< (rp
->rbr_table_size
- blocks_per_page
)) {
3154 err
= niu_rbr_add_page(np
, rp
, mask
, index
);
3158 index
+= blocks_per_page
;
3161 rp
->rbr_index
= index
;
3165 static void niu_rbr_free(struct niu
*np
, struct rx_ring_info
*rp
)
3169 for (i
= 0; i
< MAX_RBR_RING_SIZE
; i
++) {
3172 page
= rp
->rxhash
[i
];
3174 struct page
*next
= (struct page
*) page
->mapping
;
3175 u64 base
= page
->index
;
3177 np
->ops
->unmap_page(np
->device
, base
, PAGE_SIZE
,
3180 page
->mapping
= NULL
;
3188 for (i
= 0; i
< rp
->rbr_table_size
; i
++)
3189 rp
->rbr
[i
] = cpu_to_le32(0);
3193 static int release_tx_packet(struct niu
*np
, struct tx_ring_info
*rp
, int idx
)
3195 struct tx_buff_info
*tb
= &rp
->tx_buffs
[idx
];
3196 struct sk_buff
*skb
= tb
->skb
;
3197 struct tx_pkt_hdr
*tp
;
3201 tp
= (struct tx_pkt_hdr
*) skb
->data
;
3202 tx_flags
= le64_to_cpup(&tp
->flags
);
3205 rp
->tx_bytes
+= (((tx_flags
& TXHDR_LEN
) >> TXHDR_LEN_SHIFT
) -
3206 ((tx_flags
& TXHDR_PAD
) / 2));
3208 len
= skb_headlen(skb
);
3209 np
->ops
->unmap_single(np
->device
, tb
->mapping
,
3210 len
, DMA_TO_DEVICE
);
3212 if (le64_to_cpu(rp
->descr
[idx
]) & TX_DESC_MARK
)
3217 idx
= NEXT_TX(rp
, idx
);
3218 len
-= MAX_TX_DESC_LEN
;
3221 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
3222 tb
= &rp
->tx_buffs
[idx
];
3223 BUG_ON(tb
->skb
!= NULL
);
3224 np
->ops
->unmap_page(np
->device
, tb
->mapping
,
3225 skb_shinfo(skb
)->frags
[i
].size
,
3227 idx
= NEXT_TX(rp
, idx
);
3235 #define NIU_TX_WAKEUP_THRESH(rp) ((rp)->pending / 4)
3237 static void niu_tx_work(struct niu
*np
, struct tx_ring_info
*rp
)
3239 struct netdev_queue
*txq
;
3244 index
= (rp
- np
->tx_rings
);
3245 txq
= netdev_get_tx_queue(np
->dev
, index
);
3248 if (unlikely(!(cs
& (TX_CS_MK
| TX_CS_MMK
))))
3251 tmp
= pkt_cnt
= (cs
& TX_CS_PKT_CNT
) >> TX_CS_PKT_CNT_SHIFT
;
3252 pkt_cnt
= (pkt_cnt
- rp
->last_pkt_cnt
) &
3253 (TX_CS_PKT_CNT
>> TX_CS_PKT_CNT_SHIFT
);
3255 rp
->last_pkt_cnt
= tmp
;
3259 niudbg(TX_DONE
, "%s: niu_tx_work() pkt_cnt[%u] cons[%d]\n",
3260 np
->dev
->name
, pkt_cnt
, cons
);
3263 cons
= release_tx_packet(np
, rp
, cons
);
3269 if (unlikely(netif_tx_queue_stopped(txq
) &&
3270 (niu_tx_avail(rp
) > NIU_TX_WAKEUP_THRESH(rp
)))) {
3271 __netif_tx_lock(txq
, smp_processor_id());
3272 if (netif_tx_queue_stopped(txq
) &&
3273 (niu_tx_avail(rp
) > NIU_TX_WAKEUP_THRESH(rp
)))
3274 netif_tx_wake_queue(txq
);
3275 __netif_tx_unlock(txq
);
3279 static int niu_rx_work(struct niu
*np
, struct rx_ring_info
*rp
, int budget
)
3281 int qlen
, rcr_done
= 0, work_done
= 0;
3282 struct rxdma_mailbox
*mbox
= rp
->mbox
;
3286 stat
= nr64(RX_DMA_CTL_STAT(rp
->rx_channel
));
3287 qlen
= nr64(RCRSTAT_A(rp
->rx_channel
)) & RCRSTAT_A_QLEN
;
3289 stat
= le64_to_cpup(&mbox
->rx_dma_ctl_stat
);
3290 qlen
= (le64_to_cpup(&mbox
->rcrstat_a
) & RCRSTAT_A_QLEN
);
3292 mbox
->rx_dma_ctl_stat
= 0;
3293 mbox
->rcrstat_a
= 0;
3295 niudbg(RX_STATUS
, "%s: niu_rx_work(chan[%d]), stat[%llx] qlen=%d\n",
3296 np
->dev
->name
, rp
->rx_channel
, (unsigned long long) stat
, qlen
);
3298 rcr_done
= work_done
= 0;
3299 qlen
= min(qlen
, budget
);
3300 while (work_done
< qlen
) {
3301 rcr_done
+= niu_process_rx_pkt(np
, rp
);
3305 if (rp
->rbr_refill_pending
>= rp
->rbr_kick_thresh
) {
3308 for (i
= 0; i
< rp
->rbr_refill_pending
; i
++)
3309 niu_rbr_refill(np
, rp
, GFP_ATOMIC
);
3310 rp
->rbr_refill_pending
= 0;
3313 stat
= (RX_DMA_CTL_STAT_MEX
|
3314 ((u64
)work_done
<< RX_DMA_CTL_STAT_PKTREAD_SHIFT
) |
3315 ((u64
)rcr_done
<< RX_DMA_CTL_STAT_PTRREAD_SHIFT
));
3317 nw64(RX_DMA_CTL_STAT(rp
->rx_channel
), stat
);
3322 static int niu_poll_core(struct niu
*np
, struct niu_ldg
*lp
, int budget
)
3325 u32 tx_vec
= (v0
>> 32);
3326 u32 rx_vec
= (v0
& 0xffffffff);
3327 int i
, work_done
= 0;
3329 niudbg(INTR
, "%s: niu_poll_core() v0[%016llx]\n",
3330 np
->dev
->name
, (unsigned long long) v0
);
3332 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
3333 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
3334 if (tx_vec
& (1 << rp
->tx_channel
))
3335 niu_tx_work(np
, rp
);
3336 nw64(LD_IM0(LDN_TXDMA(rp
->tx_channel
)), 0);
3339 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
3340 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
3342 if (rx_vec
& (1 << rp
->rx_channel
)) {
3345 this_work_done
= niu_rx_work(np
, rp
,
3348 budget
-= this_work_done
;
3349 work_done
+= this_work_done
;
3351 nw64(LD_IM0(LDN_RXDMA(rp
->rx_channel
)), 0);
3357 static int niu_poll(struct napi_struct
*napi
, int budget
)
3359 struct niu_ldg
*lp
= container_of(napi
, struct niu_ldg
, napi
);
3360 struct niu
*np
= lp
->np
;
3363 work_done
= niu_poll_core(np
, lp
, budget
);
3365 if (work_done
< budget
) {
3366 netif_rx_complete(np
->dev
, napi
);
3367 niu_ldg_rearm(np
, lp
, 1);
3372 static void niu_log_rxchan_errors(struct niu
*np
, struct rx_ring_info
*rp
,
3375 dev_err(np
->device
, PFX
"%s: RX channel %u errors ( ",
3376 np
->dev
->name
, rp
->rx_channel
);
3378 if (stat
& RX_DMA_CTL_STAT_RBR_TMOUT
)
3379 printk("RBR_TMOUT ");
3380 if (stat
& RX_DMA_CTL_STAT_RSP_CNT_ERR
)
3382 if (stat
& RX_DMA_CTL_STAT_BYTE_EN_BUS
)
3383 printk("BYTE_EN_BUS ");
3384 if (stat
& RX_DMA_CTL_STAT_RSP_DAT_ERR
)
3386 if (stat
& RX_DMA_CTL_STAT_RCR_ACK_ERR
)
3388 if (stat
& RX_DMA_CTL_STAT_RCR_SHA_PAR
)
3389 printk("RCR_SHA_PAR ");
3390 if (stat
& RX_DMA_CTL_STAT_RBR_PRE_PAR
)
3391 printk("RBR_PRE_PAR ");
3392 if (stat
& RX_DMA_CTL_STAT_CONFIG_ERR
)
3394 if (stat
& RX_DMA_CTL_STAT_RCRINCON
)
3395 printk("RCRINCON ");
3396 if (stat
& RX_DMA_CTL_STAT_RCRFULL
)
3398 if (stat
& RX_DMA_CTL_STAT_RBRFULL
)
3400 if (stat
& RX_DMA_CTL_STAT_RBRLOGPAGE
)
3401 printk("RBRLOGPAGE ");
3402 if (stat
& RX_DMA_CTL_STAT_CFIGLOGPAGE
)
3403 printk("CFIGLOGPAGE ");
3404 if (stat
& RX_DMA_CTL_STAT_DC_FIFO_ERR
)
3410 static int niu_rx_error(struct niu
*np
, struct rx_ring_info
*rp
)
3412 u64 stat
= nr64(RX_DMA_CTL_STAT(rp
->rx_channel
));
3416 if (stat
& (RX_DMA_CTL_STAT_CHAN_FATAL
|
3417 RX_DMA_CTL_STAT_PORT_FATAL
))
3421 dev_err(np
->device
, PFX
"%s: RX channel %u error, stat[%llx]\n",
3422 np
->dev
->name
, rp
->rx_channel
,
3423 (unsigned long long) stat
);
3425 niu_log_rxchan_errors(np
, rp
, stat
);
3428 nw64(RX_DMA_CTL_STAT(rp
->rx_channel
),
3429 stat
& RX_DMA_CTL_WRITE_CLEAR_ERRS
);
3434 static void niu_log_txchan_errors(struct niu
*np
, struct tx_ring_info
*rp
,
3437 dev_err(np
->device
, PFX
"%s: TX channel %u errors ( ",
3438 np
->dev
->name
, rp
->tx_channel
);
3440 if (cs
& TX_CS_MBOX_ERR
)
3442 if (cs
& TX_CS_PKT_SIZE_ERR
)
3443 printk("PKT_SIZE ");
3444 if (cs
& TX_CS_TX_RING_OFLOW
)
3445 printk("TX_RING_OFLOW ");
3446 if (cs
& TX_CS_PREF_BUF_PAR_ERR
)
3447 printk("PREF_BUF_PAR ");
3448 if (cs
& TX_CS_NACK_PREF
)
3449 printk("NACK_PREF ");
3450 if (cs
& TX_CS_NACK_PKT_RD
)
3451 printk("NACK_PKT_RD ");
3452 if (cs
& TX_CS_CONF_PART_ERR
)
3453 printk("CONF_PART ");
3454 if (cs
& TX_CS_PKT_PRT_ERR
)
3460 static int niu_tx_error(struct niu
*np
, struct tx_ring_info
*rp
)
3464 cs
= nr64(TX_CS(rp
->tx_channel
));
3465 logh
= nr64(TX_RNG_ERR_LOGH(rp
->tx_channel
));
3466 logl
= nr64(TX_RNG_ERR_LOGL(rp
->tx_channel
));
3468 dev_err(np
->device
, PFX
"%s: TX channel %u error, "
3469 "cs[%llx] logh[%llx] logl[%llx]\n",
3470 np
->dev
->name
, rp
->tx_channel
,
3471 (unsigned long long) cs
,
3472 (unsigned long long) logh
,
3473 (unsigned long long) logl
);
3475 niu_log_txchan_errors(np
, rp
, cs
);
3480 static int niu_mif_interrupt(struct niu
*np
)
3482 u64 mif_status
= nr64(MIF_STATUS
);
3485 if (np
->flags
& NIU_FLAGS_XMAC
) {
3486 u64 xrxmac_stat
= nr64_mac(XRXMAC_STATUS
);
3488 if (xrxmac_stat
& XRXMAC_STATUS_PHY_MDINT
)
3492 dev_err(np
->device
, PFX
"%s: MIF interrupt, "
3493 "stat[%llx] phy_mdint(%d)\n",
3494 np
->dev
->name
, (unsigned long long) mif_status
, phy_mdint
);
3499 static void niu_xmac_interrupt(struct niu
*np
)
3501 struct niu_xmac_stats
*mp
= &np
->mac_stats
.xmac
;
3504 val
= nr64_mac(XTXMAC_STATUS
);
3505 if (val
& XTXMAC_STATUS_FRAME_CNT_EXP
)
3506 mp
->tx_frames
+= TXMAC_FRM_CNT_COUNT
;
3507 if (val
& XTXMAC_STATUS_BYTE_CNT_EXP
)
3508 mp
->tx_bytes
+= TXMAC_BYTE_CNT_COUNT
;
3509 if (val
& XTXMAC_STATUS_TXFIFO_XFR_ERR
)
3510 mp
->tx_fifo_errors
++;
3511 if (val
& XTXMAC_STATUS_TXMAC_OFLOW
)
3512 mp
->tx_overflow_errors
++;
3513 if (val
& XTXMAC_STATUS_MAX_PSIZE_ERR
)
3514 mp
->tx_max_pkt_size_errors
++;
3515 if (val
& XTXMAC_STATUS_TXMAC_UFLOW
)
3516 mp
->tx_underflow_errors
++;
3518 val
= nr64_mac(XRXMAC_STATUS
);
3519 if (val
& XRXMAC_STATUS_LCL_FLT_STATUS
)
3520 mp
->rx_local_faults
++;
3521 if (val
& XRXMAC_STATUS_RFLT_DET
)
3522 mp
->rx_remote_faults
++;
3523 if (val
& XRXMAC_STATUS_LFLT_CNT_EXP
)
3524 mp
->rx_link_faults
+= LINK_FAULT_CNT_COUNT
;
3525 if (val
& XRXMAC_STATUS_ALIGNERR_CNT_EXP
)
3526 mp
->rx_align_errors
+= RXMAC_ALIGN_ERR_CNT_COUNT
;
3527 if (val
& XRXMAC_STATUS_RXFRAG_CNT_EXP
)
3528 mp
->rx_frags
+= RXMAC_FRAG_CNT_COUNT
;
3529 if (val
& XRXMAC_STATUS_RXMULTF_CNT_EXP
)
3530 mp
->rx_mcasts
+= RXMAC_MC_FRM_CNT_COUNT
;
3531 if (val
& XRXMAC_STATUS_RXBCAST_CNT_EXP
)
3532 mp
->rx_bcasts
+= RXMAC_BC_FRM_CNT_COUNT
;
3533 if (val
& XRXMAC_STATUS_RXBCAST_CNT_EXP
)
3534 mp
->rx_bcasts
+= RXMAC_BC_FRM_CNT_COUNT
;
3535 if (val
& XRXMAC_STATUS_RXHIST1_CNT_EXP
)
3536 mp
->rx_hist_cnt1
+= RXMAC_HIST_CNT1_COUNT
;
3537 if (val
& XRXMAC_STATUS_RXHIST2_CNT_EXP
)
3538 mp
->rx_hist_cnt2
+= RXMAC_HIST_CNT2_COUNT
;
3539 if (val
& XRXMAC_STATUS_RXHIST3_CNT_EXP
)
3540 mp
->rx_hist_cnt3
+= RXMAC_HIST_CNT3_COUNT
;
3541 if (val
& XRXMAC_STATUS_RXHIST4_CNT_EXP
)
3542 mp
->rx_hist_cnt4
+= RXMAC_HIST_CNT4_COUNT
;
3543 if (val
& XRXMAC_STATUS_RXHIST5_CNT_EXP
)
3544 mp
->rx_hist_cnt5
+= RXMAC_HIST_CNT5_COUNT
;
3545 if (val
& XRXMAC_STATUS_RXHIST6_CNT_EXP
)
3546 mp
->rx_hist_cnt6
+= RXMAC_HIST_CNT6_COUNT
;
3547 if (val
& XRXMAC_STATUS_RXHIST7_CNT_EXP
)
3548 mp
->rx_hist_cnt7
+= RXMAC_HIST_CNT7_COUNT
;
3549 if (val
& XRXMAC_STAT_MSK_RXOCTET_CNT_EXP
)
3550 mp
->rx_octets
+= RXMAC_BT_CNT_COUNT
;
3551 if (val
& XRXMAC_STATUS_CVIOLERR_CNT_EXP
)
3552 mp
->rx_code_violations
+= RXMAC_CD_VIO_CNT_COUNT
;
3553 if (val
& XRXMAC_STATUS_LENERR_CNT_EXP
)
3554 mp
->rx_len_errors
+= RXMAC_MPSZER_CNT_COUNT
;
3555 if (val
& XRXMAC_STATUS_CRCERR_CNT_EXP
)
3556 mp
->rx_crc_errors
+= RXMAC_CRC_ER_CNT_COUNT
;
3557 if (val
& XRXMAC_STATUS_RXUFLOW
)
3558 mp
->rx_underflows
++;
3559 if (val
& XRXMAC_STATUS_RXOFLOW
)
3562 val
= nr64_mac(XMAC_FC_STAT
);
3563 if (val
& XMAC_FC_STAT_TX_MAC_NPAUSE
)
3564 mp
->pause_off_state
++;
3565 if (val
& XMAC_FC_STAT_TX_MAC_PAUSE
)
3566 mp
->pause_on_state
++;
3567 if (val
& XMAC_FC_STAT_RX_MAC_RPAUSE
)
3568 mp
->pause_received
++;
3571 static void niu_bmac_interrupt(struct niu
*np
)
3573 struct niu_bmac_stats
*mp
= &np
->mac_stats
.bmac
;
3576 val
= nr64_mac(BTXMAC_STATUS
);
3577 if (val
& BTXMAC_STATUS_UNDERRUN
)
3578 mp
->tx_underflow_errors
++;
3579 if (val
& BTXMAC_STATUS_MAX_PKT_ERR
)
3580 mp
->tx_max_pkt_size_errors
++;
3581 if (val
& BTXMAC_STATUS_BYTE_CNT_EXP
)
3582 mp
->tx_bytes
+= BTXMAC_BYTE_CNT_COUNT
;
3583 if (val
& BTXMAC_STATUS_FRAME_CNT_EXP
)
3584 mp
->tx_frames
+= BTXMAC_FRM_CNT_COUNT
;
3586 val
= nr64_mac(BRXMAC_STATUS
);
3587 if (val
& BRXMAC_STATUS_OVERFLOW
)
3589 if (val
& BRXMAC_STATUS_FRAME_CNT_EXP
)
3590 mp
->rx_frames
+= BRXMAC_FRAME_CNT_COUNT
;
3591 if (val
& BRXMAC_STATUS_ALIGN_ERR_EXP
)
3592 mp
->rx_align_errors
+= BRXMAC_ALIGN_ERR_CNT_COUNT
;
3593 if (val
& BRXMAC_STATUS_CRC_ERR_EXP
)
3594 mp
->rx_crc_errors
+= BRXMAC_ALIGN_ERR_CNT_COUNT
;
3595 if (val
& BRXMAC_STATUS_LEN_ERR_EXP
)
3596 mp
->rx_len_errors
+= BRXMAC_CODE_VIOL_ERR_CNT_COUNT
;
3598 val
= nr64_mac(BMAC_CTRL_STATUS
);
3599 if (val
& BMAC_CTRL_STATUS_NOPAUSE
)
3600 mp
->pause_off_state
++;
3601 if (val
& BMAC_CTRL_STATUS_PAUSE
)
3602 mp
->pause_on_state
++;
3603 if (val
& BMAC_CTRL_STATUS_PAUSE_RECV
)
3604 mp
->pause_received
++;
3607 static int niu_mac_interrupt(struct niu
*np
)
3609 if (np
->flags
& NIU_FLAGS_XMAC
)
3610 niu_xmac_interrupt(np
);
3612 niu_bmac_interrupt(np
);
3617 static void niu_log_device_error(struct niu
*np
, u64 stat
)
3619 dev_err(np
->device
, PFX
"%s: Core device errors ( ",
3622 if (stat
& SYS_ERR_MASK_META2
)
3624 if (stat
& SYS_ERR_MASK_META1
)
3626 if (stat
& SYS_ERR_MASK_PEU
)
3628 if (stat
& SYS_ERR_MASK_TXC
)
3630 if (stat
& SYS_ERR_MASK_RDMC
)
3632 if (stat
& SYS_ERR_MASK_TDMC
)
3634 if (stat
& SYS_ERR_MASK_ZCP
)
3636 if (stat
& SYS_ERR_MASK_FFLP
)
3638 if (stat
& SYS_ERR_MASK_IPP
)
3640 if (stat
& SYS_ERR_MASK_MAC
)
3642 if (stat
& SYS_ERR_MASK_SMX
)
3648 static int niu_device_error(struct niu
*np
)
3650 u64 stat
= nr64(SYS_ERR_STAT
);
3652 dev_err(np
->device
, PFX
"%s: Core device error, stat[%llx]\n",
3653 np
->dev
->name
, (unsigned long long) stat
);
3655 niu_log_device_error(np
, stat
);
3660 static int niu_slowpath_interrupt(struct niu
*np
, struct niu_ldg
*lp
,
3661 u64 v0
, u64 v1
, u64 v2
)
3670 if (v1
& 0x00000000ffffffffULL
) {
3671 u32 rx_vec
= (v1
& 0xffffffff);
3673 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
3674 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
3676 if (rx_vec
& (1 << rp
->rx_channel
)) {
3677 int r
= niu_rx_error(np
, rp
);
3682 nw64(RX_DMA_CTL_STAT(rp
->rx_channel
),
3683 RX_DMA_CTL_STAT_MEX
);
3688 if (v1
& 0x7fffffff00000000ULL
) {
3689 u32 tx_vec
= (v1
>> 32) & 0x7fffffff;
3691 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
3692 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
3694 if (tx_vec
& (1 << rp
->tx_channel
)) {
3695 int r
= niu_tx_error(np
, rp
);
3701 if ((v0
| v1
) & 0x8000000000000000ULL
) {
3702 int r
= niu_mif_interrupt(np
);
3708 int r
= niu_mac_interrupt(np
);
3713 int r
= niu_device_error(np
);
3720 niu_enable_interrupts(np
, 0);
3725 static void niu_rxchan_intr(struct niu
*np
, struct rx_ring_info
*rp
,
3728 struct rxdma_mailbox
*mbox
= rp
->mbox
;
3729 u64 stat_write
, stat
= le64_to_cpup(&mbox
->rx_dma_ctl_stat
);
3731 stat_write
= (RX_DMA_CTL_STAT_RCRTHRES
|
3732 RX_DMA_CTL_STAT_RCRTO
);
3733 nw64(RX_DMA_CTL_STAT(rp
->rx_channel
), stat_write
);
3735 niudbg(INTR
, "%s: rxchan_intr stat[%llx]\n",
3736 np
->dev
->name
, (unsigned long long) stat
);
3739 static void niu_txchan_intr(struct niu
*np
, struct tx_ring_info
*rp
,
3742 rp
->tx_cs
= nr64(TX_CS(rp
->tx_channel
));
3744 niudbg(INTR
, "%s: txchan_intr cs[%llx]\n",
3745 np
->dev
->name
, (unsigned long long) rp
->tx_cs
);
3748 static void __niu_fastpath_interrupt(struct niu
*np
, int ldg
, u64 v0
)
3750 struct niu_parent
*parent
= np
->parent
;
3754 tx_vec
= (v0
>> 32);
3755 rx_vec
= (v0
& 0xffffffff);
3757 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
3758 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
3759 int ldn
= LDN_RXDMA(rp
->rx_channel
);
3761 if (parent
->ldg_map
[ldn
] != ldg
)
3764 nw64(LD_IM0(ldn
), LD_IM0_MASK
);
3765 if (rx_vec
& (1 << rp
->rx_channel
))
3766 niu_rxchan_intr(np
, rp
, ldn
);
3769 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
3770 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
3771 int ldn
= LDN_TXDMA(rp
->tx_channel
);
3773 if (parent
->ldg_map
[ldn
] != ldg
)
3776 nw64(LD_IM0(ldn
), LD_IM0_MASK
);
3777 if (tx_vec
& (1 << rp
->tx_channel
))
3778 niu_txchan_intr(np
, rp
, ldn
);
3782 static void niu_schedule_napi(struct niu
*np
, struct niu_ldg
*lp
,
3783 u64 v0
, u64 v1
, u64 v2
)
3785 if (likely(netif_rx_schedule_prep(np
->dev
, &lp
->napi
))) {
3789 __niu_fastpath_interrupt(np
, lp
->ldg_num
, v0
);
3790 __netif_rx_schedule(np
->dev
, &lp
->napi
);
3794 static irqreturn_t
niu_interrupt(int irq
, void *dev_id
)
3796 struct niu_ldg
*lp
= dev_id
;
3797 struct niu
*np
= lp
->np
;
3798 int ldg
= lp
->ldg_num
;
3799 unsigned long flags
;
3802 if (netif_msg_intr(np
))
3803 printk(KERN_DEBUG PFX
"niu_interrupt() ldg[%p](%d) ",
3806 spin_lock_irqsave(&np
->lock
, flags
);
3808 v0
= nr64(LDSV0(ldg
));
3809 v1
= nr64(LDSV1(ldg
));
3810 v2
= nr64(LDSV2(ldg
));
3812 if (netif_msg_intr(np
))
3813 printk("v0[%llx] v1[%llx] v2[%llx]\n",
3814 (unsigned long long) v0
,
3815 (unsigned long long) v1
,
3816 (unsigned long long) v2
);
3818 if (unlikely(!v0
&& !v1
&& !v2
)) {
3819 spin_unlock_irqrestore(&np
->lock
, flags
);
3823 if (unlikely((v0
& ((u64
)1 << LDN_MIF
)) || v1
|| v2
)) {
3824 int err
= niu_slowpath_interrupt(np
, lp
, v0
, v1
, v2
);
3828 if (likely(v0
& ~((u64
)1 << LDN_MIF
)))
3829 niu_schedule_napi(np
, lp
, v0
, v1
, v2
);
3831 niu_ldg_rearm(np
, lp
, 1);
3833 spin_unlock_irqrestore(&np
->lock
, flags
);
3838 static void niu_free_rx_ring_info(struct niu
*np
, struct rx_ring_info
*rp
)
3841 np
->ops
->free_coherent(np
->device
,
3842 sizeof(struct rxdma_mailbox
),
3843 rp
->mbox
, rp
->mbox_dma
);
3847 np
->ops
->free_coherent(np
->device
,
3848 MAX_RCR_RING_SIZE
* sizeof(__le64
),
3849 rp
->rcr
, rp
->rcr_dma
);
3851 rp
->rcr_table_size
= 0;
3855 niu_rbr_free(np
, rp
);
3857 np
->ops
->free_coherent(np
->device
,
3858 MAX_RBR_RING_SIZE
* sizeof(__le32
),
3859 rp
->rbr
, rp
->rbr_dma
);
3861 rp
->rbr_table_size
= 0;
3868 static void niu_free_tx_ring_info(struct niu
*np
, struct tx_ring_info
*rp
)
3871 np
->ops
->free_coherent(np
->device
,
3872 sizeof(struct txdma_mailbox
),
3873 rp
->mbox
, rp
->mbox_dma
);
3879 for (i
= 0; i
< MAX_TX_RING_SIZE
; i
++) {
3880 if (rp
->tx_buffs
[i
].skb
)
3881 (void) release_tx_packet(np
, rp
, i
);
3884 np
->ops
->free_coherent(np
->device
,
3885 MAX_TX_RING_SIZE
* sizeof(__le64
),
3886 rp
->descr
, rp
->descr_dma
);
3895 static void niu_free_channels(struct niu
*np
)
3900 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
3901 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
3903 niu_free_rx_ring_info(np
, rp
);
3905 kfree(np
->rx_rings
);
3906 np
->rx_rings
= NULL
;
3907 np
->num_rx_rings
= 0;
3911 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
3912 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
3914 niu_free_tx_ring_info(np
, rp
);
3916 kfree(np
->tx_rings
);
3917 np
->tx_rings
= NULL
;
3918 np
->num_tx_rings
= 0;
3922 static int niu_alloc_rx_ring_info(struct niu
*np
,
3923 struct rx_ring_info
*rp
)
3925 BUILD_BUG_ON(sizeof(struct rxdma_mailbox
) != 64);
3927 rp
->rxhash
= kzalloc(MAX_RBR_RING_SIZE
* sizeof(struct page
*),
3932 rp
->mbox
= np
->ops
->alloc_coherent(np
->device
,
3933 sizeof(struct rxdma_mailbox
),
3934 &rp
->mbox_dma
, GFP_KERNEL
);
3937 if ((unsigned long)rp
->mbox
& (64UL - 1)) {
3938 dev_err(np
->device
, PFX
"%s: Coherent alloc gives misaligned "
3939 "RXDMA mailbox %p\n", np
->dev
->name
, rp
->mbox
);
3943 rp
->rcr
= np
->ops
->alloc_coherent(np
->device
,
3944 MAX_RCR_RING_SIZE
* sizeof(__le64
),
3945 &rp
->rcr_dma
, GFP_KERNEL
);
3948 if ((unsigned long)rp
->rcr
& (64UL - 1)) {
3949 dev_err(np
->device
, PFX
"%s: Coherent alloc gives misaligned "
3950 "RXDMA RCR table %p\n", np
->dev
->name
, rp
->rcr
);
3953 rp
->rcr_table_size
= MAX_RCR_RING_SIZE
;
3956 rp
->rbr
= np
->ops
->alloc_coherent(np
->device
,
3957 MAX_RBR_RING_SIZE
* sizeof(__le32
),
3958 &rp
->rbr_dma
, GFP_KERNEL
);
3961 if ((unsigned long)rp
->rbr
& (64UL - 1)) {
3962 dev_err(np
->device
, PFX
"%s: Coherent alloc gives misaligned "
3963 "RXDMA RBR table %p\n", np
->dev
->name
, rp
->rbr
);
3966 rp
->rbr_table_size
= MAX_RBR_RING_SIZE
;
3968 rp
->rbr_pending
= 0;
3973 static void niu_set_max_burst(struct niu
*np
, struct tx_ring_info
*rp
)
3975 int mtu
= np
->dev
->mtu
;
3977 /* These values are recommended by the HW designers for fair
3978 * utilization of DRR amongst the rings.
3980 rp
->max_burst
= mtu
+ 32;
3981 if (rp
->max_burst
> 4096)
3982 rp
->max_burst
= 4096;
3985 static int niu_alloc_tx_ring_info(struct niu
*np
,
3986 struct tx_ring_info
*rp
)
3988 BUILD_BUG_ON(sizeof(struct txdma_mailbox
) != 64);
3990 rp
->mbox
= np
->ops
->alloc_coherent(np
->device
,
3991 sizeof(struct txdma_mailbox
),
3992 &rp
->mbox_dma
, GFP_KERNEL
);
3995 if ((unsigned long)rp
->mbox
& (64UL - 1)) {
3996 dev_err(np
->device
, PFX
"%s: Coherent alloc gives misaligned "
3997 "TXDMA mailbox %p\n", np
->dev
->name
, rp
->mbox
);
4001 rp
->descr
= np
->ops
->alloc_coherent(np
->device
,
4002 MAX_TX_RING_SIZE
* sizeof(__le64
),
4003 &rp
->descr_dma
, GFP_KERNEL
);
4006 if ((unsigned long)rp
->descr
& (64UL - 1)) {
4007 dev_err(np
->device
, PFX
"%s: Coherent alloc gives misaligned "
4008 "TXDMA descr table %p\n", np
->dev
->name
, rp
->descr
);
4012 rp
->pending
= MAX_TX_RING_SIZE
;
4017 /* XXX make these configurable... XXX */
4018 rp
->mark_freq
= rp
->pending
/ 4;
4020 niu_set_max_burst(np
, rp
);
4025 static void niu_size_rbr(struct niu
*np
, struct rx_ring_info
*rp
)
4029 bss
= min(PAGE_SHIFT
, 15);
4031 rp
->rbr_block_size
= 1 << bss
;
4032 rp
->rbr_blocks_per_page
= 1 << (PAGE_SHIFT
-bss
);
4034 rp
->rbr_sizes
[0] = 256;
4035 rp
->rbr_sizes
[1] = 1024;
4036 if (np
->dev
->mtu
> ETH_DATA_LEN
) {
4037 switch (PAGE_SIZE
) {
4039 rp
->rbr_sizes
[2] = 4096;
4043 rp
->rbr_sizes
[2] = 8192;
4047 rp
->rbr_sizes
[2] = 2048;
4049 rp
->rbr_sizes
[3] = rp
->rbr_block_size
;
4052 static int niu_alloc_channels(struct niu
*np
)
4054 struct niu_parent
*parent
= np
->parent
;
4055 int first_rx_channel
, first_tx_channel
;
4059 first_rx_channel
= first_tx_channel
= 0;
4060 for (i
= 0; i
< port
; i
++) {
4061 first_rx_channel
+= parent
->rxchan_per_port
[i
];
4062 first_tx_channel
+= parent
->txchan_per_port
[i
];
4065 np
->num_rx_rings
= parent
->rxchan_per_port
[port
];
4066 np
->num_tx_rings
= parent
->txchan_per_port
[port
];
4068 np
->dev
->real_num_tx_queues
= np
->num_tx_rings
;
4070 np
->rx_rings
= kzalloc(np
->num_rx_rings
* sizeof(struct rx_ring_info
),
4076 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
4077 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
4080 rp
->rx_channel
= first_rx_channel
+ i
;
4082 err
= niu_alloc_rx_ring_info(np
, rp
);
4086 niu_size_rbr(np
, rp
);
4088 /* XXX better defaults, configurable, etc... XXX */
4089 rp
->nonsyn_window
= 64;
4090 rp
->nonsyn_threshold
= rp
->rcr_table_size
- 64;
4091 rp
->syn_window
= 64;
4092 rp
->syn_threshold
= rp
->rcr_table_size
- 64;
4093 rp
->rcr_pkt_threshold
= 16;
4094 rp
->rcr_timeout
= 8;
4095 rp
->rbr_kick_thresh
= RBR_REFILL_MIN
;
4096 if (rp
->rbr_kick_thresh
< rp
->rbr_blocks_per_page
)
4097 rp
->rbr_kick_thresh
= rp
->rbr_blocks_per_page
;
4099 err
= niu_rbr_fill(np
, rp
, GFP_KERNEL
);
4104 np
->tx_rings
= kzalloc(np
->num_tx_rings
* sizeof(struct tx_ring_info
),
4110 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
4111 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
4114 rp
->tx_channel
= first_tx_channel
+ i
;
4116 err
= niu_alloc_tx_ring_info(np
, rp
);
4124 niu_free_channels(np
);
4128 static int niu_tx_cs_sng_poll(struct niu
*np
, int channel
)
4132 while (--limit
> 0) {
4133 u64 val
= nr64(TX_CS(channel
));
4134 if (val
& TX_CS_SNG_STATE
)
4140 static int niu_tx_channel_stop(struct niu
*np
, int channel
)
4142 u64 val
= nr64(TX_CS(channel
));
4144 val
|= TX_CS_STOP_N_GO
;
4145 nw64(TX_CS(channel
), val
);
4147 return niu_tx_cs_sng_poll(np
, channel
);
4150 static int niu_tx_cs_reset_poll(struct niu
*np
, int channel
)
4154 while (--limit
> 0) {
4155 u64 val
= nr64(TX_CS(channel
));
4156 if (!(val
& TX_CS_RST
))
4162 static int niu_tx_channel_reset(struct niu
*np
, int channel
)
4164 u64 val
= nr64(TX_CS(channel
));
4168 nw64(TX_CS(channel
), val
);
4170 err
= niu_tx_cs_reset_poll(np
, channel
);
4172 nw64(TX_RING_KICK(channel
), 0);
4177 static int niu_tx_channel_lpage_init(struct niu
*np
, int channel
)
4181 nw64(TX_LOG_MASK1(channel
), 0);
4182 nw64(TX_LOG_VAL1(channel
), 0);
4183 nw64(TX_LOG_MASK2(channel
), 0);
4184 nw64(TX_LOG_VAL2(channel
), 0);
4185 nw64(TX_LOG_PAGE_RELO1(channel
), 0);
4186 nw64(TX_LOG_PAGE_RELO2(channel
), 0);
4187 nw64(TX_LOG_PAGE_HDL(channel
), 0);
4189 val
= (u64
)np
->port
<< TX_LOG_PAGE_VLD_FUNC_SHIFT
;
4190 val
|= (TX_LOG_PAGE_VLD_PAGE0
| TX_LOG_PAGE_VLD_PAGE1
);
4191 nw64(TX_LOG_PAGE_VLD(channel
), val
);
4193 /* XXX TXDMA 32bit mode? XXX */
4198 static void niu_txc_enable_port(struct niu
*np
, int on
)
4200 unsigned long flags
;
4203 niu_lock_parent(np
, flags
);
4204 val
= nr64(TXC_CONTROL
);
4205 mask
= (u64
)1 << np
->port
;
4207 val
|= TXC_CONTROL_ENABLE
| mask
;
4210 if ((val
& ~TXC_CONTROL_ENABLE
) == 0)
4211 val
&= ~TXC_CONTROL_ENABLE
;
4213 nw64(TXC_CONTROL
, val
);
4214 niu_unlock_parent(np
, flags
);
4217 static void niu_txc_set_imask(struct niu
*np
, u64 imask
)
4219 unsigned long flags
;
4222 niu_lock_parent(np
, flags
);
4223 val
= nr64(TXC_INT_MASK
);
4224 val
&= ~TXC_INT_MASK_VAL(np
->port
);
4225 val
|= (imask
<< TXC_INT_MASK_VAL_SHIFT(np
->port
));
4226 niu_unlock_parent(np
, flags
);
4229 static void niu_txc_port_dma_enable(struct niu
*np
, int on
)
4236 for (i
= 0; i
< np
->num_tx_rings
; i
++)
4237 val
|= (1 << np
->tx_rings
[i
].tx_channel
);
4239 nw64(TXC_PORT_DMA(np
->port
), val
);
4242 static int niu_init_one_tx_channel(struct niu
*np
, struct tx_ring_info
*rp
)
4244 int err
, channel
= rp
->tx_channel
;
4247 err
= niu_tx_channel_stop(np
, channel
);
4251 err
= niu_tx_channel_reset(np
, channel
);
4255 err
= niu_tx_channel_lpage_init(np
, channel
);
4259 nw64(TXC_DMA_MAX(channel
), rp
->max_burst
);
4260 nw64(TX_ENT_MSK(channel
), 0);
4262 if (rp
->descr_dma
& ~(TX_RNG_CFIG_STADDR_BASE
|
4263 TX_RNG_CFIG_STADDR
)) {
4264 dev_err(np
->device
, PFX
"%s: TX ring channel %d "
4265 "DMA addr (%llx) is not aligned.\n",
4266 np
->dev
->name
, channel
,
4267 (unsigned long long) rp
->descr_dma
);
4271 /* The length field in TX_RNG_CFIG is measured in 64-byte
4272 * blocks. rp->pending is the number of TX descriptors in
4273 * our ring, 8 bytes each, thus we divide by 8 bytes more
4274 * to get the proper value the chip wants.
4276 ring_len
= (rp
->pending
/ 8);
4278 val
= ((ring_len
<< TX_RNG_CFIG_LEN_SHIFT
) |
4280 nw64(TX_RNG_CFIG(channel
), val
);
4282 if (((rp
->mbox_dma
>> 32) & ~TXDMA_MBH_MBADDR
) ||
4283 ((u32
)rp
->mbox_dma
& ~TXDMA_MBL_MBADDR
)) {
4284 dev_err(np
->device
, PFX
"%s: TX ring channel %d "
4285 "MBOX addr (%llx) is has illegal bits.\n",
4286 np
->dev
->name
, channel
,
4287 (unsigned long long) rp
->mbox_dma
);
4290 nw64(TXDMA_MBH(channel
), rp
->mbox_dma
>> 32);
4291 nw64(TXDMA_MBL(channel
), rp
->mbox_dma
& TXDMA_MBL_MBADDR
);
4293 nw64(TX_CS(channel
), 0);
4295 rp
->last_pkt_cnt
= 0;
4300 static void niu_init_rdc_groups(struct niu
*np
)
4302 struct niu_rdc_tables
*tp
= &np
->parent
->rdc_group_cfg
[np
->port
];
4303 int i
, first_table_num
= tp
->first_table_num
;
4305 for (i
= 0; i
< tp
->num_tables
; i
++) {
4306 struct rdc_table
*tbl
= &tp
->tables
[i
];
4307 int this_table
= first_table_num
+ i
;
4310 for (slot
= 0; slot
< NIU_RDC_TABLE_SLOTS
; slot
++)
4311 nw64(RDC_TBL(this_table
, slot
),
4312 tbl
->rxdma_channel
[slot
]);
4315 nw64(DEF_RDC(np
->port
), np
->parent
->rdc_default
[np
->port
]);
4318 static void niu_init_drr_weight(struct niu
*np
)
4320 int type
= phy_decode(np
->parent
->port_phy
, np
->port
);
4325 val
= PT_DRR_WEIGHT_DEFAULT_10G
;
4330 val
= PT_DRR_WEIGHT_DEFAULT_1G
;
4333 nw64(PT_DRR_WT(np
->port
), val
);
4336 static int niu_init_hostinfo(struct niu
*np
)
4338 struct niu_parent
*parent
= np
->parent
;
4339 struct niu_rdc_tables
*tp
= &parent
->rdc_group_cfg
[np
->port
];
4340 int i
, err
, num_alt
= niu_num_alt_addr(np
);
4341 int first_rdc_table
= tp
->first_table_num
;
4343 err
= niu_set_primary_mac_rdc_table(np
, first_rdc_table
, 1);
4347 err
= niu_set_multicast_mac_rdc_table(np
, first_rdc_table
, 1);
4351 for (i
= 0; i
< num_alt
; i
++) {
4352 err
= niu_set_alt_mac_rdc_table(np
, i
, first_rdc_table
, 1);
4360 static int niu_rx_channel_reset(struct niu
*np
, int channel
)
4362 return niu_set_and_wait_clear(np
, RXDMA_CFIG1(channel
),
4363 RXDMA_CFIG1_RST
, 1000, 10,
4367 static int niu_rx_channel_lpage_init(struct niu
*np
, int channel
)
4371 nw64(RX_LOG_MASK1(channel
), 0);
4372 nw64(RX_LOG_VAL1(channel
), 0);
4373 nw64(RX_LOG_MASK2(channel
), 0);
4374 nw64(RX_LOG_VAL2(channel
), 0);
4375 nw64(RX_LOG_PAGE_RELO1(channel
), 0);
4376 nw64(RX_LOG_PAGE_RELO2(channel
), 0);
4377 nw64(RX_LOG_PAGE_HDL(channel
), 0);
4379 val
= (u64
)np
->port
<< RX_LOG_PAGE_VLD_FUNC_SHIFT
;
4380 val
|= (RX_LOG_PAGE_VLD_PAGE0
| RX_LOG_PAGE_VLD_PAGE1
);
4381 nw64(RX_LOG_PAGE_VLD(channel
), val
);
4386 static void niu_rx_channel_wred_init(struct niu
*np
, struct rx_ring_info
*rp
)
4390 val
= (((u64
)rp
->nonsyn_window
<< RDC_RED_PARA_WIN_SHIFT
) |
4391 ((u64
)rp
->nonsyn_threshold
<< RDC_RED_PARA_THRE_SHIFT
) |
4392 ((u64
)rp
->syn_window
<< RDC_RED_PARA_WIN_SYN_SHIFT
) |
4393 ((u64
)rp
->syn_threshold
<< RDC_RED_PARA_THRE_SYN_SHIFT
));
4394 nw64(RDC_RED_PARA(rp
->rx_channel
), val
);
4397 static int niu_compute_rbr_cfig_b(struct rx_ring_info
*rp
, u64
*ret
)
4401 switch (rp
->rbr_block_size
) {
4403 val
|= (RBR_BLKSIZE_4K
<< RBR_CFIG_B_BLKSIZE_SHIFT
);
4406 val
|= (RBR_BLKSIZE_8K
<< RBR_CFIG_B_BLKSIZE_SHIFT
);
4409 val
|= (RBR_BLKSIZE_16K
<< RBR_CFIG_B_BLKSIZE_SHIFT
);
4412 val
|= (RBR_BLKSIZE_32K
<< RBR_CFIG_B_BLKSIZE_SHIFT
);
4417 val
|= RBR_CFIG_B_VLD2
;
4418 switch (rp
->rbr_sizes
[2]) {
4420 val
|= (RBR_BUFSZ2_2K
<< RBR_CFIG_B_BUFSZ2_SHIFT
);
4423 val
|= (RBR_BUFSZ2_4K
<< RBR_CFIG_B_BUFSZ2_SHIFT
);
4426 val
|= (RBR_BUFSZ2_8K
<< RBR_CFIG_B_BUFSZ2_SHIFT
);
4429 val
|= (RBR_BUFSZ2_16K
<< RBR_CFIG_B_BUFSZ2_SHIFT
);
4435 val
|= RBR_CFIG_B_VLD1
;
4436 switch (rp
->rbr_sizes
[1]) {
4438 val
|= (RBR_BUFSZ1_1K
<< RBR_CFIG_B_BUFSZ1_SHIFT
);
4441 val
|= (RBR_BUFSZ1_2K
<< RBR_CFIG_B_BUFSZ1_SHIFT
);
4444 val
|= (RBR_BUFSZ1_4K
<< RBR_CFIG_B_BUFSZ1_SHIFT
);
4447 val
|= (RBR_BUFSZ1_8K
<< RBR_CFIG_B_BUFSZ1_SHIFT
);
4453 val
|= RBR_CFIG_B_VLD0
;
4454 switch (rp
->rbr_sizes
[0]) {
4456 val
|= (RBR_BUFSZ0_256
<< RBR_CFIG_B_BUFSZ0_SHIFT
);
4459 val
|= (RBR_BUFSZ0_512
<< RBR_CFIG_B_BUFSZ0_SHIFT
);
4462 val
|= (RBR_BUFSZ0_1K
<< RBR_CFIG_B_BUFSZ0_SHIFT
);
4465 val
|= (RBR_BUFSZ0_2K
<< RBR_CFIG_B_BUFSZ0_SHIFT
);
4476 static int niu_enable_rx_channel(struct niu
*np
, int channel
, int on
)
4478 u64 val
= nr64(RXDMA_CFIG1(channel
));
4482 val
|= RXDMA_CFIG1_EN
;
4484 val
&= ~RXDMA_CFIG1_EN
;
4485 nw64(RXDMA_CFIG1(channel
), val
);
4488 while (--limit
> 0) {
4489 if (nr64(RXDMA_CFIG1(channel
)) & RXDMA_CFIG1_QST
)
4498 static int niu_init_one_rx_channel(struct niu
*np
, struct rx_ring_info
*rp
)
4500 int err
, channel
= rp
->rx_channel
;
4503 err
= niu_rx_channel_reset(np
, channel
);
4507 err
= niu_rx_channel_lpage_init(np
, channel
);
4511 niu_rx_channel_wred_init(np
, rp
);
4513 nw64(RX_DMA_ENT_MSK(channel
), RX_DMA_ENT_MSK_RBR_EMPTY
);
4514 nw64(RX_DMA_CTL_STAT(channel
),
4515 (RX_DMA_CTL_STAT_MEX
|
4516 RX_DMA_CTL_STAT_RCRTHRES
|
4517 RX_DMA_CTL_STAT_RCRTO
|
4518 RX_DMA_CTL_STAT_RBR_EMPTY
));
4519 nw64(RXDMA_CFIG1(channel
), rp
->mbox_dma
>> 32);
4520 nw64(RXDMA_CFIG2(channel
), (rp
->mbox_dma
& 0x00000000ffffffc0));
4521 nw64(RBR_CFIG_A(channel
),
4522 ((u64
)rp
->rbr_table_size
<< RBR_CFIG_A_LEN_SHIFT
) |
4523 (rp
->rbr_dma
& (RBR_CFIG_A_STADDR_BASE
| RBR_CFIG_A_STADDR
)));
4524 err
= niu_compute_rbr_cfig_b(rp
, &val
);
4527 nw64(RBR_CFIG_B(channel
), val
);
4528 nw64(RCRCFIG_A(channel
),
4529 ((u64
)rp
->rcr_table_size
<< RCRCFIG_A_LEN_SHIFT
) |
4530 (rp
->rcr_dma
& (RCRCFIG_A_STADDR_BASE
| RCRCFIG_A_STADDR
)));
4531 nw64(RCRCFIG_B(channel
),
4532 ((u64
)rp
->rcr_pkt_threshold
<< RCRCFIG_B_PTHRES_SHIFT
) |
4534 ((u64
)rp
->rcr_timeout
<< RCRCFIG_B_TIMEOUT_SHIFT
));
4536 err
= niu_enable_rx_channel(np
, channel
, 1);
4540 nw64(RBR_KICK(channel
), rp
->rbr_index
);
4542 val
= nr64(RX_DMA_CTL_STAT(channel
));
4543 val
|= RX_DMA_CTL_STAT_RBR_EMPTY
;
4544 nw64(RX_DMA_CTL_STAT(channel
), val
);
4549 static int niu_init_rx_channels(struct niu
*np
)
4551 unsigned long flags
;
4552 u64 seed
= jiffies_64
;
4555 niu_lock_parent(np
, flags
);
4556 nw64(RX_DMA_CK_DIV
, np
->parent
->rxdma_clock_divider
);
4557 nw64(RED_RAN_INIT
, RED_RAN_INIT_OPMODE
| (seed
& RED_RAN_INIT_VAL
));
4558 niu_unlock_parent(np
, flags
);
4560 /* XXX RXDMA 32bit mode? XXX */
4562 niu_init_rdc_groups(np
);
4563 niu_init_drr_weight(np
);
4565 err
= niu_init_hostinfo(np
);
4569 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
4570 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
4572 err
= niu_init_one_rx_channel(np
, rp
);
4580 static int niu_set_ip_frag_rule(struct niu
*np
)
4582 struct niu_parent
*parent
= np
->parent
;
4583 struct niu_classifier
*cp
= &np
->clas
;
4584 struct niu_tcam_entry
*tp
;
4587 /* XXX fix this allocation scheme XXX */
4588 index
= cp
->tcam_index
;
4589 tp
= &parent
->tcam
[index
];
4591 /* Note that the noport bit is the same in both ipv4 and
4592 * ipv6 format TCAM entries.
4594 memset(tp
, 0, sizeof(*tp
));
4595 tp
->key
[1] = TCAM_V4KEY1_NOPORT
;
4596 tp
->key_mask
[1] = TCAM_V4KEY1_NOPORT
;
4597 tp
->assoc_data
= (TCAM_ASSOCDATA_TRES_USE_OFFSET
|
4598 ((u64
)0 << TCAM_ASSOCDATA_OFFSET_SHIFT
));
4599 err
= tcam_write(np
, index
, tp
->key
, tp
->key_mask
);
4602 err
= tcam_assoc_write(np
, index
, tp
->assoc_data
);
4609 static int niu_init_classifier_hw(struct niu
*np
)
4611 struct niu_parent
*parent
= np
->parent
;
4612 struct niu_classifier
*cp
= &np
->clas
;
4615 nw64(H1POLY
, cp
->h1_init
);
4616 nw64(H2POLY
, cp
->h2_init
);
4618 err
= niu_init_hostinfo(np
);
4622 for (i
= 0; i
< ENET_VLAN_TBL_NUM_ENTRIES
; i
++) {
4623 struct niu_vlan_rdc
*vp
= &cp
->vlan_mappings
[i
];
4625 vlan_tbl_write(np
, i
, np
->port
,
4626 vp
->vlan_pref
, vp
->rdc_num
);
4629 for (i
= 0; i
< cp
->num_alt_mac_mappings
; i
++) {
4630 struct niu_altmac_rdc
*ap
= &cp
->alt_mac_mappings
[i
];
4632 err
= niu_set_alt_mac_rdc_table(np
, ap
->alt_mac_num
,
4633 ap
->rdc_num
, ap
->mac_pref
);
4638 for (i
= CLASS_CODE_USER_PROG1
; i
<= CLASS_CODE_SCTP_IPV6
; i
++) {
4639 int index
= i
- CLASS_CODE_USER_PROG1
;
4641 err
= niu_set_tcam_key(np
, i
, parent
->tcam_key
[index
]);
4644 err
= niu_set_flow_key(np
, i
, parent
->flow_key
[index
]);
4649 err
= niu_set_ip_frag_rule(np
);
4658 static int niu_zcp_write(struct niu
*np
, int index
, u64
*data
)
4660 nw64(ZCP_RAM_DATA0
, data
[0]);
4661 nw64(ZCP_RAM_DATA1
, data
[1]);
4662 nw64(ZCP_RAM_DATA2
, data
[2]);
4663 nw64(ZCP_RAM_DATA3
, data
[3]);
4664 nw64(ZCP_RAM_DATA4
, data
[4]);
4665 nw64(ZCP_RAM_BE
, ZCP_RAM_BE_VAL
);
4667 (ZCP_RAM_ACC_WRITE
|
4668 (0 << ZCP_RAM_ACC_ZFCID_SHIFT
) |
4669 (ZCP_RAM_SEL_CFIFO(np
->port
) << ZCP_RAM_ACC_RAM_SEL_SHIFT
)));
4671 return niu_wait_bits_clear(np
, ZCP_RAM_ACC
, ZCP_RAM_ACC_BUSY
,
4675 static int niu_zcp_read(struct niu
*np
, int index
, u64
*data
)
4679 err
= niu_wait_bits_clear(np
, ZCP_RAM_ACC
, ZCP_RAM_ACC_BUSY
,
4682 dev_err(np
->device
, PFX
"%s: ZCP read busy won't clear, "
4683 "ZCP_RAM_ACC[%llx]\n", np
->dev
->name
,
4684 (unsigned long long) nr64(ZCP_RAM_ACC
));
4690 (0 << ZCP_RAM_ACC_ZFCID_SHIFT
) |
4691 (ZCP_RAM_SEL_CFIFO(np
->port
) << ZCP_RAM_ACC_RAM_SEL_SHIFT
)));
4693 err
= niu_wait_bits_clear(np
, ZCP_RAM_ACC
, ZCP_RAM_ACC_BUSY
,
4696 dev_err(np
->device
, PFX
"%s: ZCP read busy2 won't clear, "
4697 "ZCP_RAM_ACC[%llx]\n", np
->dev
->name
,
4698 (unsigned long long) nr64(ZCP_RAM_ACC
));
4702 data
[0] = nr64(ZCP_RAM_DATA0
);
4703 data
[1] = nr64(ZCP_RAM_DATA1
);
4704 data
[2] = nr64(ZCP_RAM_DATA2
);
4705 data
[3] = nr64(ZCP_RAM_DATA3
);
4706 data
[4] = nr64(ZCP_RAM_DATA4
);
4711 static void niu_zcp_cfifo_reset(struct niu
*np
)
4713 u64 val
= nr64(RESET_CFIFO
);
4715 val
|= RESET_CFIFO_RST(np
->port
);
4716 nw64(RESET_CFIFO
, val
);
4719 val
&= ~RESET_CFIFO_RST(np
->port
);
4720 nw64(RESET_CFIFO
, val
);
4723 static int niu_init_zcp(struct niu
*np
)
4725 u64 data
[5], rbuf
[5];
4728 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
) {
4729 if (np
->port
== 0 || np
->port
== 1)
4730 max
= ATLAS_P0_P1_CFIFO_ENTRIES
;
4732 max
= ATLAS_P2_P3_CFIFO_ENTRIES
;
4734 max
= NIU_CFIFO_ENTRIES
;
4742 for (i
= 0; i
< max
; i
++) {
4743 err
= niu_zcp_write(np
, i
, data
);
4746 err
= niu_zcp_read(np
, i
, rbuf
);
4751 niu_zcp_cfifo_reset(np
);
4752 nw64(CFIFO_ECC(np
->port
), 0);
4753 nw64(ZCP_INT_STAT
, ZCP_INT_STAT_ALL
);
4754 (void) nr64(ZCP_INT_STAT
);
4755 nw64(ZCP_INT_MASK
, ZCP_INT_MASK_ALL
);
4760 static void niu_ipp_write(struct niu
*np
, int index
, u64
*data
)
4762 u64 val
= nr64_ipp(IPP_CFIG
);
4764 nw64_ipp(IPP_CFIG
, val
| IPP_CFIG_DFIFO_PIO_W
);
4765 nw64_ipp(IPP_DFIFO_WR_PTR
, index
);
4766 nw64_ipp(IPP_DFIFO_WR0
, data
[0]);
4767 nw64_ipp(IPP_DFIFO_WR1
, data
[1]);
4768 nw64_ipp(IPP_DFIFO_WR2
, data
[2]);
4769 nw64_ipp(IPP_DFIFO_WR3
, data
[3]);
4770 nw64_ipp(IPP_DFIFO_WR4
, data
[4]);
4771 nw64_ipp(IPP_CFIG
, val
& ~IPP_CFIG_DFIFO_PIO_W
);
4774 static void niu_ipp_read(struct niu
*np
, int index
, u64
*data
)
4776 nw64_ipp(IPP_DFIFO_RD_PTR
, index
);
4777 data
[0] = nr64_ipp(IPP_DFIFO_RD0
);
4778 data
[1] = nr64_ipp(IPP_DFIFO_RD1
);
4779 data
[2] = nr64_ipp(IPP_DFIFO_RD2
);
4780 data
[3] = nr64_ipp(IPP_DFIFO_RD3
);
4781 data
[4] = nr64_ipp(IPP_DFIFO_RD4
);
4784 static int niu_ipp_reset(struct niu
*np
)
4786 return niu_set_and_wait_clear_ipp(np
, IPP_CFIG
, IPP_CFIG_SOFT_RST
,
4787 1000, 100, "IPP_CFIG");
4790 static int niu_init_ipp(struct niu
*np
)
4792 u64 data
[5], rbuf
[5], val
;
4795 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
) {
4796 if (np
->port
== 0 || np
->port
== 1)
4797 max
= ATLAS_P0_P1_DFIFO_ENTRIES
;
4799 max
= ATLAS_P2_P3_DFIFO_ENTRIES
;
4801 max
= NIU_DFIFO_ENTRIES
;
4809 for (i
= 0; i
< max
; i
++) {
4810 niu_ipp_write(np
, i
, data
);
4811 niu_ipp_read(np
, i
, rbuf
);
4814 (void) nr64_ipp(IPP_INT_STAT
);
4815 (void) nr64_ipp(IPP_INT_STAT
);
4817 err
= niu_ipp_reset(np
);
4821 (void) nr64_ipp(IPP_PKT_DIS
);
4822 (void) nr64_ipp(IPP_BAD_CS_CNT
);
4823 (void) nr64_ipp(IPP_ECC
);
4825 (void) nr64_ipp(IPP_INT_STAT
);
4827 nw64_ipp(IPP_MSK
, ~IPP_MSK_ALL
);
4829 val
= nr64_ipp(IPP_CFIG
);
4830 val
&= ~IPP_CFIG_IP_MAX_PKT
;
4831 val
|= (IPP_CFIG_IPP_ENABLE
|
4832 IPP_CFIG_DFIFO_ECC_EN
|
4833 IPP_CFIG_DROP_BAD_CRC
|
4835 (0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT
));
4836 nw64_ipp(IPP_CFIG
, val
);
4841 static void niu_handle_led(struct niu
*np
, int status
)
4844 val
= nr64_mac(XMAC_CONFIG
);
4846 if ((np
->flags
& NIU_FLAGS_10G
) != 0 &&
4847 (np
->flags
& NIU_FLAGS_FIBER
) != 0) {
4849 val
|= XMAC_CONFIG_LED_POLARITY
;
4850 val
&= ~XMAC_CONFIG_FORCE_LED_ON
;
4852 val
|= XMAC_CONFIG_FORCE_LED_ON
;
4853 val
&= ~XMAC_CONFIG_LED_POLARITY
;
4857 nw64_mac(XMAC_CONFIG
, val
);
4860 static void niu_init_xif_xmac(struct niu
*np
)
4862 struct niu_link_config
*lp
= &np
->link_config
;
4865 if (np
->flags
& NIU_FLAGS_XCVR_SERDES
) {
4866 val
= nr64(MIF_CONFIG
);
4867 val
|= MIF_CONFIG_ATCA_GE
;
4868 nw64(MIF_CONFIG
, val
);
4871 val
= nr64_mac(XMAC_CONFIG
);
4872 val
&= ~XMAC_CONFIG_SEL_POR_CLK_SRC
;
4874 val
|= XMAC_CONFIG_TX_OUTPUT_EN
;
4876 if (lp
->loopback_mode
== LOOPBACK_MAC
) {
4877 val
&= ~XMAC_CONFIG_SEL_POR_CLK_SRC
;
4878 val
|= XMAC_CONFIG_LOOPBACK
;
4880 val
&= ~XMAC_CONFIG_LOOPBACK
;
4883 if (np
->flags
& NIU_FLAGS_10G
) {
4884 val
&= ~XMAC_CONFIG_LFS_DISABLE
;
4886 val
|= XMAC_CONFIG_LFS_DISABLE
;
4887 if (!(np
->flags
& NIU_FLAGS_FIBER
) &&
4888 !(np
->flags
& NIU_FLAGS_XCVR_SERDES
))
4889 val
|= XMAC_CONFIG_1G_PCS_BYPASS
;
4891 val
&= ~XMAC_CONFIG_1G_PCS_BYPASS
;
4894 val
&= ~XMAC_CONFIG_10G_XPCS_BYPASS
;
4896 if (lp
->active_speed
== SPEED_100
)
4897 val
|= XMAC_CONFIG_SEL_CLK_25MHZ
;
4899 val
&= ~XMAC_CONFIG_SEL_CLK_25MHZ
;
4901 nw64_mac(XMAC_CONFIG
, val
);
4903 val
= nr64_mac(XMAC_CONFIG
);
4904 val
&= ~XMAC_CONFIG_MODE_MASK
;
4905 if (np
->flags
& NIU_FLAGS_10G
) {
4906 val
|= XMAC_CONFIG_MODE_XGMII
;
4908 if (lp
->active_speed
== SPEED_100
)
4909 val
|= XMAC_CONFIG_MODE_MII
;
4911 val
|= XMAC_CONFIG_MODE_GMII
;
4914 nw64_mac(XMAC_CONFIG
, val
);
4917 static void niu_init_xif_bmac(struct niu
*np
)
4919 struct niu_link_config
*lp
= &np
->link_config
;
4922 val
= BMAC_XIF_CONFIG_TX_OUTPUT_EN
;
4924 if (lp
->loopback_mode
== LOOPBACK_MAC
)
4925 val
|= BMAC_XIF_CONFIG_MII_LOOPBACK
;
4927 val
&= ~BMAC_XIF_CONFIG_MII_LOOPBACK
;
4929 if (lp
->active_speed
== SPEED_1000
)
4930 val
|= BMAC_XIF_CONFIG_GMII_MODE
;
4932 val
&= ~BMAC_XIF_CONFIG_GMII_MODE
;
4934 val
&= ~(BMAC_XIF_CONFIG_LINK_LED
|
4935 BMAC_XIF_CONFIG_LED_POLARITY
);
4937 if (!(np
->flags
& NIU_FLAGS_10G
) &&
4938 !(np
->flags
& NIU_FLAGS_FIBER
) &&
4939 lp
->active_speed
== SPEED_100
)
4940 val
|= BMAC_XIF_CONFIG_25MHZ_CLOCK
;
4942 val
&= ~BMAC_XIF_CONFIG_25MHZ_CLOCK
;
4944 nw64_mac(BMAC_XIF_CONFIG
, val
);
4947 static void niu_init_xif(struct niu
*np
)
4949 if (np
->flags
& NIU_FLAGS_XMAC
)
4950 niu_init_xif_xmac(np
);
4952 niu_init_xif_bmac(np
);
4955 static void niu_pcs_mii_reset(struct niu
*np
)
4958 u64 val
= nr64_pcs(PCS_MII_CTL
);
4959 val
|= PCS_MII_CTL_RST
;
4960 nw64_pcs(PCS_MII_CTL
, val
);
4961 while ((--limit
>= 0) && (val
& PCS_MII_CTL_RST
)) {
4963 val
= nr64_pcs(PCS_MII_CTL
);
4967 static void niu_xpcs_reset(struct niu
*np
)
4970 u64 val
= nr64_xpcs(XPCS_CONTROL1
);
4971 val
|= XPCS_CONTROL1_RESET
;
4972 nw64_xpcs(XPCS_CONTROL1
, val
);
4973 while ((--limit
>= 0) && (val
& XPCS_CONTROL1_RESET
)) {
4975 val
= nr64_xpcs(XPCS_CONTROL1
);
4979 static int niu_init_pcs(struct niu
*np
)
4981 struct niu_link_config
*lp
= &np
->link_config
;
4984 switch (np
->flags
& (NIU_FLAGS_10G
|
4986 NIU_FLAGS_XCVR_SERDES
)) {
4987 case NIU_FLAGS_FIBER
:
4989 nw64_pcs(PCS_CONF
, PCS_CONF_MASK
| PCS_CONF_ENABLE
);
4990 nw64_pcs(PCS_DPATH_MODE
, 0);
4991 niu_pcs_mii_reset(np
);
4995 case NIU_FLAGS_10G
| NIU_FLAGS_FIBER
:
4996 case NIU_FLAGS_10G
| NIU_FLAGS_XCVR_SERDES
:
4998 if (!(np
->flags
& NIU_FLAGS_XMAC
))
5001 /* 10G copper or fiber */
5002 val
= nr64_mac(XMAC_CONFIG
);
5003 val
&= ~XMAC_CONFIG_10G_XPCS_BYPASS
;
5004 nw64_mac(XMAC_CONFIG
, val
);
5008 val
= nr64_xpcs(XPCS_CONTROL1
);
5009 if (lp
->loopback_mode
== LOOPBACK_PHY
)
5010 val
|= XPCS_CONTROL1_LOOPBACK
;
5012 val
&= ~XPCS_CONTROL1_LOOPBACK
;
5013 nw64_xpcs(XPCS_CONTROL1
, val
);
5015 nw64_xpcs(XPCS_DESKEW_ERR_CNT
, 0);
5016 (void) nr64_xpcs(XPCS_SYMERR_CNT01
);
5017 (void) nr64_xpcs(XPCS_SYMERR_CNT23
);
5021 case NIU_FLAGS_XCVR_SERDES
:
5023 niu_pcs_mii_reset(np
);
5024 nw64_pcs(PCS_CONF
, PCS_CONF_MASK
| PCS_CONF_ENABLE
);
5025 nw64_pcs(PCS_DPATH_MODE
, 0);
5030 case NIU_FLAGS_XCVR_SERDES
| NIU_FLAGS_FIBER
:
5031 /* 1G RGMII FIBER */
5032 nw64_pcs(PCS_DPATH_MODE
, PCS_DPATH_MODE_MII
);
5033 niu_pcs_mii_reset(np
);
5043 static int niu_reset_tx_xmac(struct niu
*np
)
5045 return niu_set_and_wait_clear_mac(np
, XTXMAC_SW_RST
,
5046 (XTXMAC_SW_RST_REG_RS
|
5047 XTXMAC_SW_RST_SOFT_RST
),
5048 1000, 100, "XTXMAC_SW_RST");
5051 static int niu_reset_tx_bmac(struct niu
*np
)
5055 nw64_mac(BTXMAC_SW_RST
, BTXMAC_SW_RST_RESET
);
5057 while (--limit
>= 0) {
5058 if (!(nr64_mac(BTXMAC_SW_RST
) & BTXMAC_SW_RST_RESET
))
5063 dev_err(np
->device
, PFX
"Port %u TX BMAC would not reset, "
5064 "BTXMAC_SW_RST[%llx]\n",
5066 (unsigned long long) nr64_mac(BTXMAC_SW_RST
));
5073 static int niu_reset_tx_mac(struct niu
*np
)
5075 if (np
->flags
& NIU_FLAGS_XMAC
)
5076 return niu_reset_tx_xmac(np
);
5078 return niu_reset_tx_bmac(np
);
5081 static void niu_init_tx_xmac(struct niu
*np
, u64 min
, u64 max
)
5085 val
= nr64_mac(XMAC_MIN
);
5086 val
&= ~(XMAC_MIN_TX_MIN_PKT_SIZE
|
5087 XMAC_MIN_RX_MIN_PKT_SIZE
);
5088 val
|= (min
<< XMAC_MIN_RX_MIN_PKT_SIZE_SHFT
);
5089 val
|= (min
<< XMAC_MIN_TX_MIN_PKT_SIZE_SHFT
);
5090 nw64_mac(XMAC_MIN
, val
);
5092 nw64_mac(XMAC_MAX
, max
);
5094 nw64_mac(XTXMAC_STAT_MSK
, ~(u64
)0);
5096 val
= nr64_mac(XMAC_IPG
);
5097 if (np
->flags
& NIU_FLAGS_10G
) {
5098 val
&= ~XMAC_IPG_IPG_XGMII
;
5099 val
|= (IPG_12_15_XGMII
<< XMAC_IPG_IPG_XGMII_SHIFT
);
5101 val
&= ~XMAC_IPG_IPG_MII_GMII
;
5102 val
|= (IPG_12_MII_GMII
<< XMAC_IPG_IPG_MII_GMII_SHIFT
);
5104 nw64_mac(XMAC_IPG
, val
);
5106 val
= nr64_mac(XMAC_CONFIG
);
5107 val
&= ~(XMAC_CONFIG_ALWAYS_NO_CRC
|
5108 XMAC_CONFIG_STRETCH_MODE
|
5109 XMAC_CONFIG_VAR_MIN_IPG_EN
|
5110 XMAC_CONFIG_TX_ENABLE
);
5111 nw64_mac(XMAC_CONFIG
, val
);
5113 nw64_mac(TXMAC_FRM_CNT
, 0);
5114 nw64_mac(TXMAC_BYTE_CNT
, 0);
5117 static void niu_init_tx_bmac(struct niu
*np
, u64 min
, u64 max
)
5121 nw64_mac(BMAC_MIN_FRAME
, min
);
5122 nw64_mac(BMAC_MAX_FRAME
, max
);
5124 nw64_mac(BTXMAC_STATUS_MASK
, ~(u64
)0);
5125 nw64_mac(BMAC_CTRL_TYPE
, 0x8808);
5126 nw64_mac(BMAC_PREAMBLE_SIZE
, 7);
5128 val
= nr64_mac(BTXMAC_CONFIG
);
5129 val
&= ~(BTXMAC_CONFIG_FCS_DISABLE
|
5130 BTXMAC_CONFIG_ENABLE
);
5131 nw64_mac(BTXMAC_CONFIG
, val
);
5134 static void niu_init_tx_mac(struct niu
*np
)
5139 if (np
->dev
->mtu
> ETH_DATA_LEN
)
5144 /* The XMAC_MIN register only accepts values for TX min which
5145 * have the low 3 bits cleared.
5147 BUILD_BUG_ON(min
& 0x7);
5149 if (np
->flags
& NIU_FLAGS_XMAC
)
5150 niu_init_tx_xmac(np
, min
, max
);
5152 niu_init_tx_bmac(np
, min
, max
);
5155 static int niu_reset_rx_xmac(struct niu
*np
)
5159 nw64_mac(XRXMAC_SW_RST
,
5160 XRXMAC_SW_RST_REG_RS
| XRXMAC_SW_RST_SOFT_RST
);
5162 while (--limit
>= 0) {
5163 if (!(nr64_mac(XRXMAC_SW_RST
) & (XRXMAC_SW_RST_REG_RS
|
5164 XRXMAC_SW_RST_SOFT_RST
)))
5169 dev_err(np
->device
, PFX
"Port %u RX XMAC would not reset, "
5170 "XRXMAC_SW_RST[%llx]\n",
5172 (unsigned long long) nr64_mac(XRXMAC_SW_RST
));
5179 static int niu_reset_rx_bmac(struct niu
*np
)
5183 nw64_mac(BRXMAC_SW_RST
, BRXMAC_SW_RST_RESET
);
5185 while (--limit
>= 0) {
5186 if (!(nr64_mac(BRXMAC_SW_RST
) & BRXMAC_SW_RST_RESET
))
5191 dev_err(np
->device
, PFX
"Port %u RX BMAC would not reset, "
5192 "BRXMAC_SW_RST[%llx]\n",
5194 (unsigned long long) nr64_mac(BRXMAC_SW_RST
));
5201 static int niu_reset_rx_mac(struct niu
*np
)
5203 if (np
->flags
& NIU_FLAGS_XMAC
)
5204 return niu_reset_rx_xmac(np
);
5206 return niu_reset_rx_bmac(np
);
5209 static void niu_init_rx_xmac(struct niu
*np
)
5211 struct niu_parent
*parent
= np
->parent
;
5212 struct niu_rdc_tables
*tp
= &parent
->rdc_group_cfg
[np
->port
];
5213 int first_rdc_table
= tp
->first_table_num
;
5217 nw64_mac(XMAC_ADD_FILT0
, 0);
5218 nw64_mac(XMAC_ADD_FILT1
, 0);
5219 nw64_mac(XMAC_ADD_FILT2
, 0);
5220 nw64_mac(XMAC_ADD_FILT12_MASK
, 0);
5221 nw64_mac(XMAC_ADD_FILT00_MASK
, 0);
5222 for (i
= 0; i
< MAC_NUM_HASH
; i
++)
5223 nw64_mac(XMAC_HASH_TBL(i
), 0);
5224 nw64_mac(XRXMAC_STAT_MSK
, ~(u64
)0);
5225 niu_set_primary_mac_rdc_table(np
, first_rdc_table
, 1);
5226 niu_set_multicast_mac_rdc_table(np
, first_rdc_table
, 1);
5228 val
= nr64_mac(XMAC_CONFIG
);
5229 val
&= ~(XMAC_CONFIG_RX_MAC_ENABLE
|
5230 XMAC_CONFIG_PROMISCUOUS
|
5231 XMAC_CONFIG_PROMISC_GROUP
|
5232 XMAC_CONFIG_ERR_CHK_DIS
|
5233 XMAC_CONFIG_RX_CRC_CHK_DIS
|
5234 XMAC_CONFIG_RESERVED_MULTICAST
|
5235 XMAC_CONFIG_RX_CODEV_CHK_DIS
|
5236 XMAC_CONFIG_ADDR_FILTER_EN
|
5237 XMAC_CONFIG_RCV_PAUSE_ENABLE
|
5238 XMAC_CONFIG_STRIP_CRC
|
5239 XMAC_CONFIG_PASS_FLOW_CTRL
|
5240 XMAC_CONFIG_MAC2IPP_PKT_CNT_EN
);
5241 val
|= (XMAC_CONFIG_HASH_FILTER_EN
);
5242 nw64_mac(XMAC_CONFIG
, val
);
5244 nw64_mac(RXMAC_BT_CNT
, 0);
5245 nw64_mac(RXMAC_BC_FRM_CNT
, 0);
5246 nw64_mac(RXMAC_MC_FRM_CNT
, 0);
5247 nw64_mac(RXMAC_FRAG_CNT
, 0);
5248 nw64_mac(RXMAC_HIST_CNT1
, 0);
5249 nw64_mac(RXMAC_HIST_CNT2
, 0);
5250 nw64_mac(RXMAC_HIST_CNT3
, 0);
5251 nw64_mac(RXMAC_HIST_CNT4
, 0);
5252 nw64_mac(RXMAC_HIST_CNT5
, 0);
5253 nw64_mac(RXMAC_HIST_CNT6
, 0);
5254 nw64_mac(RXMAC_HIST_CNT7
, 0);
5255 nw64_mac(RXMAC_MPSZER_CNT
, 0);
5256 nw64_mac(RXMAC_CRC_ER_CNT
, 0);
5257 nw64_mac(RXMAC_CD_VIO_CNT
, 0);
5258 nw64_mac(LINK_FAULT_CNT
, 0);
5261 static void niu_init_rx_bmac(struct niu
*np
)
5263 struct niu_parent
*parent
= np
->parent
;
5264 struct niu_rdc_tables
*tp
= &parent
->rdc_group_cfg
[np
->port
];
5265 int first_rdc_table
= tp
->first_table_num
;
5269 nw64_mac(BMAC_ADD_FILT0
, 0);
5270 nw64_mac(BMAC_ADD_FILT1
, 0);
5271 nw64_mac(BMAC_ADD_FILT2
, 0);
5272 nw64_mac(BMAC_ADD_FILT12_MASK
, 0);
5273 nw64_mac(BMAC_ADD_FILT00_MASK
, 0);
5274 for (i
= 0; i
< MAC_NUM_HASH
; i
++)
5275 nw64_mac(BMAC_HASH_TBL(i
), 0);
5276 niu_set_primary_mac_rdc_table(np
, first_rdc_table
, 1);
5277 niu_set_multicast_mac_rdc_table(np
, first_rdc_table
, 1);
5278 nw64_mac(BRXMAC_STATUS_MASK
, ~(u64
)0);
5280 val
= nr64_mac(BRXMAC_CONFIG
);
5281 val
&= ~(BRXMAC_CONFIG_ENABLE
|
5282 BRXMAC_CONFIG_STRIP_PAD
|
5283 BRXMAC_CONFIG_STRIP_FCS
|
5284 BRXMAC_CONFIG_PROMISC
|
5285 BRXMAC_CONFIG_PROMISC_GRP
|
5286 BRXMAC_CONFIG_ADDR_FILT_EN
|
5287 BRXMAC_CONFIG_DISCARD_DIS
);
5288 val
|= (BRXMAC_CONFIG_HASH_FILT_EN
);
5289 nw64_mac(BRXMAC_CONFIG
, val
);
5291 val
= nr64_mac(BMAC_ADDR_CMPEN
);
5292 val
|= BMAC_ADDR_CMPEN_EN0
;
5293 nw64_mac(BMAC_ADDR_CMPEN
, val
);
5296 static void niu_init_rx_mac(struct niu
*np
)
5298 niu_set_primary_mac(np
, np
->dev
->dev_addr
);
5300 if (np
->flags
& NIU_FLAGS_XMAC
)
5301 niu_init_rx_xmac(np
);
5303 niu_init_rx_bmac(np
);
5306 static void niu_enable_tx_xmac(struct niu
*np
, int on
)
5308 u64 val
= nr64_mac(XMAC_CONFIG
);
5311 val
|= XMAC_CONFIG_TX_ENABLE
;
5313 val
&= ~XMAC_CONFIG_TX_ENABLE
;
5314 nw64_mac(XMAC_CONFIG
, val
);
5317 static void niu_enable_tx_bmac(struct niu
*np
, int on
)
5319 u64 val
= nr64_mac(BTXMAC_CONFIG
);
5322 val
|= BTXMAC_CONFIG_ENABLE
;
5324 val
&= ~BTXMAC_CONFIG_ENABLE
;
5325 nw64_mac(BTXMAC_CONFIG
, val
);
5328 static void niu_enable_tx_mac(struct niu
*np
, int on
)
5330 if (np
->flags
& NIU_FLAGS_XMAC
)
5331 niu_enable_tx_xmac(np
, on
);
5333 niu_enable_tx_bmac(np
, on
);
5336 static void niu_enable_rx_xmac(struct niu
*np
, int on
)
5338 u64 val
= nr64_mac(XMAC_CONFIG
);
5340 val
&= ~(XMAC_CONFIG_HASH_FILTER_EN
|
5341 XMAC_CONFIG_PROMISCUOUS
);
5343 if (np
->flags
& NIU_FLAGS_MCAST
)
5344 val
|= XMAC_CONFIG_HASH_FILTER_EN
;
5345 if (np
->flags
& NIU_FLAGS_PROMISC
)
5346 val
|= XMAC_CONFIG_PROMISCUOUS
;
5349 val
|= XMAC_CONFIG_RX_MAC_ENABLE
;
5351 val
&= ~XMAC_CONFIG_RX_MAC_ENABLE
;
5352 nw64_mac(XMAC_CONFIG
, val
);
5355 static void niu_enable_rx_bmac(struct niu
*np
, int on
)
5357 u64 val
= nr64_mac(BRXMAC_CONFIG
);
5359 val
&= ~(BRXMAC_CONFIG_HASH_FILT_EN
|
5360 BRXMAC_CONFIG_PROMISC
);
5362 if (np
->flags
& NIU_FLAGS_MCAST
)
5363 val
|= BRXMAC_CONFIG_HASH_FILT_EN
;
5364 if (np
->flags
& NIU_FLAGS_PROMISC
)
5365 val
|= BRXMAC_CONFIG_PROMISC
;
5368 val
|= BRXMAC_CONFIG_ENABLE
;
5370 val
&= ~BRXMAC_CONFIG_ENABLE
;
5371 nw64_mac(BRXMAC_CONFIG
, val
);
5374 static void niu_enable_rx_mac(struct niu
*np
, int on
)
5376 if (np
->flags
& NIU_FLAGS_XMAC
)
5377 niu_enable_rx_xmac(np
, on
);
5379 niu_enable_rx_bmac(np
, on
);
5382 static int niu_init_mac(struct niu
*np
)
5387 err
= niu_init_pcs(np
);
5391 err
= niu_reset_tx_mac(np
);
5394 niu_init_tx_mac(np
);
5395 err
= niu_reset_rx_mac(np
);
5398 niu_init_rx_mac(np
);
5400 /* This looks hookey but the RX MAC reset we just did will
5401 * undo some of the state we setup in niu_init_tx_mac() so we
5402 * have to call it again. In particular, the RX MAC reset will
5403 * set the XMAC_MAX register back to it's default value.
5405 niu_init_tx_mac(np
);
5406 niu_enable_tx_mac(np
, 1);
5408 niu_enable_rx_mac(np
, 1);
5413 static void niu_stop_one_tx_channel(struct niu
*np
, struct tx_ring_info
*rp
)
5415 (void) niu_tx_channel_stop(np
, rp
->tx_channel
);
5418 static void niu_stop_tx_channels(struct niu
*np
)
5422 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
5423 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
5425 niu_stop_one_tx_channel(np
, rp
);
5429 static void niu_reset_one_tx_channel(struct niu
*np
, struct tx_ring_info
*rp
)
5431 (void) niu_tx_channel_reset(np
, rp
->tx_channel
);
5434 static void niu_reset_tx_channels(struct niu
*np
)
5438 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
5439 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
5441 niu_reset_one_tx_channel(np
, rp
);
5445 static void niu_stop_one_rx_channel(struct niu
*np
, struct rx_ring_info
*rp
)
5447 (void) niu_enable_rx_channel(np
, rp
->rx_channel
, 0);
5450 static void niu_stop_rx_channels(struct niu
*np
)
5454 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
5455 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
5457 niu_stop_one_rx_channel(np
, rp
);
5461 static void niu_reset_one_rx_channel(struct niu
*np
, struct rx_ring_info
*rp
)
5463 int channel
= rp
->rx_channel
;
5465 (void) niu_rx_channel_reset(np
, channel
);
5466 nw64(RX_DMA_ENT_MSK(channel
), RX_DMA_ENT_MSK_ALL
);
5467 nw64(RX_DMA_CTL_STAT(channel
), 0);
5468 (void) niu_enable_rx_channel(np
, channel
, 0);
5471 static void niu_reset_rx_channels(struct niu
*np
)
5475 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
5476 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
5478 niu_reset_one_rx_channel(np
, rp
);
5482 static void niu_disable_ipp(struct niu
*np
)
5487 rd
= nr64_ipp(IPP_DFIFO_RD_PTR
);
5488 wr
= nr64_ipp(IPP_DFIFO_WR_PTR
);
5490 while (--limit
>= 0 && (rd
!= wr
)) {
5491 rd
= nr64_ipp(IPP_DFIFO_RD_PTR
);
5492 wr
= nr64_ipp(IPP_DFIFO_WR_PTR
);
5495 (rd
!= 0 && wr
!= 1)) {
5496 dev_err(np
->device
, PFX
"%s: IPP would not quiesce, "
5497 "rd_ptr[%llx] wr_ptr[%llx]\n",
5499 (unsigned long long) nr64_ipp(IPP_DFIFO_RD_PTR
),
5500 (unsigned long long) nr64_ipp(IPP_DFIFO_WR_PTR
));
5503 val
= nr64_ipp(IPP_CFIG
);
5504 val
&= ~(IPP_CFIG_IPP_ENABLE
|
5505 IPP_CFIG_DFIFO_ECC_EN
|
5506 IPP_CFIG_DROP_BAD_CRC
|
5508 nw64_ipp(IPP_CFIG
, val
);
5510 (void) niu_ipp_reset(np
);
5513 static int niu_init_hw(struct niu
*np
)
5517 niudbg(IFUP
, "%s: Initialize TXC\n", np
->dev
->name
);
5518 niu_txc_enable_port(np
, 1);
5519 niu_txc_port_dma_enable(np
, 1);
5520 niu_txc_set_imask(np
, 0);
5522 niudbg(IFUP
, "%s: Initialize TX channels\n", np
->dev
->name
);
5523 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
5524 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
5526 err
= niu_init_one_tx_channel(np
, rp
);
5531 niudbg(IFUP
, "%s: Initialize RX channels\n", np
->dev
->name
);
5532 err
= niu_init_rx_channels(np
);
5534 goto out_uninit_tx_channels
;
5536 niudbg(IFUP
, "%s: Initialize classifier\n", np
->dev
->name
);
5537 err
= niu_init_classifier_hw(np
);
5539 goto out_uninit_rx_channels
;
5541 niudbg(IFUP
, "%s: Initialize ZCP\n", np
->dev
->name
);
5542 err
= niu_init_zcp(np
);
5544 goto out_uninit_rx_channels
;
5546 niudbg(IFUP
, "%s: Initialize IPP\n", np
->dev
->name
);
5547 err
= niu_init_ipp(np
);
5549 goto out_uninit_rx_channels
;
5551 niudbg(IFUP
, "%s: Initialize MAC\n", np
->dev
->name
);
5552 err
= niu_init_mac(np
);
5554 goto out_uninit_ipp
;
5559 niudbg(IFUP
, "%s: Uninit IPP\n", np
->dev
->name
);
5560 niu_disable_ipp(np
);
5562 out_uninit_rx_channels
:
5563 niudbg(IFUP
, "%s: Uninit RX channels\n", np
->dev
->name
);
5564 niu_stop_rx_channels(np
);
5565 niu_reset_rx_channels(np
);
5567 out_uninit_tx_channels
:
5568 niudbg(IFUP
, "%s: Uninit TX channels\n", np
->dev
->name
);
5569 niu_stop_tx_channels(np
);
5570 niu_reset_tx_channels(np
);
5575 static void niu_stop_hw(struct niu
*np
)
5577 niudbg(IFDOWN
, "%s: Disable interrupts\n", np
->dev
->name
);
5578 niu_enable_interrupts(np
, 0);
5580 niudbg(IFDOWN
, "%s: Disable RX MAC\n", np
->dev
->name
);
5581 niu_enable_rx_mac(np
, 0);
5583 niudbg(IFDOWN
, "%s: Disable IPP\n", np
->dev
->name
);
5584 niu_disable_ipp(np
);
5586 niudbg(IFDOWN
, "%s: Stop TX channels\n", np
->dev
->name
);
5587 niu_stop_tx_channels(np
);
5589 niudbg(IFDOWN
, "%s: Stop RX channels\n", np
->dev
->name
);
5590 niu_stop_rx_channels(np
);
5592 niudbg(IFDOWN
, "%s: Reset TX channels\n", np
->dev
->name
);
5593 niu_reset_tx_channels(np
);
5595 niudbg(IFDOWN
, "%s: Reset RX channels\n", np
->dev
->name
);
5596 niu_reset_rx_channels(np
);
5599 static int niu_request_irq(struct niu
*np
)
5604 for (i
= 0; i
< np
->num_ldg
; i
++) {
5605 struct niu_ldg
*lp
= &np
->ldg
[i
];
5607 err
= request_irq(lp
->irq
, niu_interrupt
,
5608 IRQF_SHARED
| IRQF_SAMPLE_RANDOM
,
5618 for (j
= 0; j
< i
; j
++) {
5619 struct niu_ldg
*lp
= &np
->ldg
[j
];
5621 free_irq(lp
->irq
, lp
);
5626 static void niu_free_irq(struct niu
*np
)
5630 for (i
= 0; i
< np
->num_ldg
; i
++) {
5631 struct niu_ldg
*lp
= &np
->ldg
[i
];
5633 free_irq(lp
->irq
, lp
);
5637 static void niu_enable_napi(struct niu
*np
)
5641 for (i
= 0; i
< np
->num_ldg
; i
++)
5642 napi_enable(&np
->ldg
[i
].napi
);
5645 static void niu_disable_napi(struct niu
*np
)
5649 for (i
= 0; i
< np
->num_ldg
; i
++)
5650 napi_disable(&np
->ldg
[i
].napi
);
5653 static int niu_open(struct net_device
*dev
)
5655 struct niu
*np
= netdev_priv(dev
);
5658 netif_carrier_off(dev
);
5660 err
= niu_alloc_channels(np
);
5664 err
= niu_enable_interrupts(np
, 0);
5666 goto out_free_channels
;
5668 err
= niu_request_irq(np
);
5670 goto out_free_channels
;
5672 niu_enable_napi(np
);
5674 spin_lock_irq(&np
->lock
);
5676 err
= niu_init_hw(np
);
5678 init_timer(&np
->timer
);
5679 np
->timer
.expires
= jiffies
+ HZ
;
5680 np
->timer
.data
= (unsigned long) np
;
5681 np
->timer
.function
= niu_timer
;
5683 err
= niu_enable_interrupts(np
, 1);
5688 spin_unlock_irq(&np
->lock
);
5691 niu_disable_napi(np
);
5695 netif_tx_start_all_queues(dev
);
5697 if (np
->link_config
.loopback_mode
!= LOOPBACK_DISABLED
)
5698 netif_carrier_on(dev
);
5700 add_timer(&np
->timer
);
5708 niu_free_channels(np
);
5714 static void niu_full_shutdown(struct niu
*np
, struct net_device
*dev
)
5716 cancel_work_sync(&np
->reset_task
);
5718 niu_disable_napi(np
);
5719 netif_tx_stop_all_queues(dev
);
5721 del_timer_sync(&np
->timer
);
5723 spin_lock_irq(&np
->lock
);
5727 spin_unlock_irq(&np
->lock
);
5730 static int niu_close(struct net_device
*dev
)
5732 struct niu
*np
= netdev_priv(dev
);
5734 niu_full_shutdown(np
, dev
);
5738 niu_free_channels(np
);
5740 niu_handle_led(np
, 0);
5745 static void niu_sync_xmac_stats(struct niu
*np
)
5747 struct niu_xmac_stats
*mp
= &np
->mac_stats
.xmac
;
5749 mp
->tx_frames
+= nr64_mac(TXMAC_FRM_CNT
);
5750 mp
->tx_bytes
+= nr64_mac(TXMAC_BYTE_CNT
);
5752 mp
->rx_link_faults
+= nr64_mac(LINK_FAULT_CNT
);
5753 mp
->rx_align_errors
+= nr64_mac(RXMAC_ALIGN_ERR_CNT
);
5754 mp
->rx_frags
+= nr64_mac(RXMAC_FRAG_CNT
);
5755 mp
->rx_mcasts
+= nr64_mac(RXMAC_MC_FRM_CNT
);
5756 mp
->rx_bcasts
+= nr64_mac(RXMAC_BC_FRM_CNT
);
5757 mp
->rx_hist_cnt1
+= nr64_mac(RXMAC_HIST_CNT1
);
5758 mp
->rx_hist_cnt2
+= nr64_mac(RXMAC_HIST_CNT2
);
5759 mp
->rx_hist_cnt3
+= nr64_mac(RXMAC_HIST_CNT3
);
5760 mp
->rx_hist_cnt4
+= nr64_mac(RXMAC_HIST_CNT4
);
5761 mp
->rx_hist_cnt5
+= nr64_mac(RXMAC_HIST_CNT5
);
5762 mp
->rx_hist_cnt6
+= nr64_mac(RXMAC_HIST_CNT6
);
5763 mp
->rx_hist_cnt7
+= nr64_mac(RXMAC_HIST_CNT7
);
5764 mp
->rx_octets
+= nr64_mac(RXMAC_BT_CNT
);
5765 mp
->rx_code_violations
+= nr64_mac(RXMAC_CD_VIO_CNT
);
5766 mp
->rx_len_errors
+= nr64_mac(RXMAC_MPSZER_CNT
);
5767 mp
->rx_crc_errors
+= nr64_mac(RXMAC_CRC_ER_CNT
);
5770 static void niu_sync_bmac_stats(struct niu
*np
)
5772 struct niu_bmac_stats
*mp
= &np
->mac_stats
.bmac
;
5774 mp
->tx_bytes
+= nr64_mac(BTXMAC_BYTE_CNT
);
5775 mp
->tx_frames
+= nr64_mac(BTXMAC_FRM_CNT
);
5777 mp
->rx_frames
+= nr64_mac(BRXMAC_FRAME_CNT
);
5778 mp
->rx_align_errors
+= nr64_mac(BRXMAC_ALIGN_ERR_CNT
);
5779 mp
->rx_crc_errors
+= nr64_mac(BRXMAC_ALIGN_ERR_CNT
);
5780 mp
->rx_len_errors
+= nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT
);
5783 static void niu_sync_mac_stats(struct niu
*np
)
5785 if (np
->flags
& NIU_FLAGS_XMAC
)
5786 niu_sync_xmac_stats(np
);
5788 niu_sync_bmac_stats(np
);
5791 static void niu_get_rx_stats(struct niu
*np
)
5793 unsigned long pkts
, dropped
, errors
, bytes
;
5796 pkts
= dropped
= errors
= bytes
= 0;
5797 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
5798 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
5800 pkts
+= rp
->rx_packets
;
5801 bytes
+= rp
->rx_bytes
;
5802 dropped
+= rp
->rx_dropped
;
5803 errors
+= rp
->rx_errors
;
5805 np
->net_stats
.rx_packets
= pkts
;
5806 np
->net_stats
.rx_bytes
= bytes
;
5807 np
->net_stats
.rx_dropped
= dropped
;
5808 np
->net_stats
.rx_errors
= errors
;
5811 static void niu_get_tx_stats(struct niu
*np
)
5813 unsigned long pkts
, errors
, bytes
;
5816 pkts
= errors
= bytes
= 0;
5817 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
5818 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
5820 pkts
+= rp
->tx_packets
;
5821 bytes
+= rp
->tx_bytes
;
5822 errors
+= rp
->tx_errors
;
5824 np
->net_stats
.tx_packets
= pkts
;
5825 np
->net_stats
.tx_bytes
= bytes
;
5826 np
->net_stats
.tx_errors
= errors
;
5829 static struct net_device_stats
*niu_get_stats(struct net_device
*dev
)
5831 struct niu
*np
= netdev_priv(dev
);
5833 niu_get_rx_stats(np
);
5834 niu_get_tx_stats(np
);
5836 return &np
->net_stats
;
5839 static void niu_load_hash_xmac(struct niu
*np
, u16
*hash
)
5843 for (i
= 0; i
< 16; i
++)
5844 nw64_mac(XMAC_HASH_TBL(i
), hash
[i
]);
5847 static void niu_load_hash_bmac(struct niu
*np
, u16
*hash
)
5851 for (i
= 0; i
< 16; i
++)
5852 nw64_mac(BMAC_HASH_TBL(i
), hash
[i
]);
5855 static void niu_load_hash(struct niu
*np
, u16
*hash
)
5857 if (np
->flags
& NIU_FLAGS_XMAC
)
5858 niu_load_hash_xmac(np
, hash
);
5860 niu_load_hash_bmac(np
, hash
);
5863 static void niu_set_rx_mode(struct net_device
*dev
)
5865 struct niu
*np
= netdev_priv(dev
);
5866 int i
, alt_cnt
, err
;
5867 struct dev_addr_list
*addr
;
5868 unsigned long flags
;
5869 u16 hash
[16] = { 0, };
5871 spin_lock_irqsave(&np
->lock
, flags
);
5872 niu_enable_rx_mac(np
, 0);
5874 np
->flags
&= ~(NIU_FLAGS_MCAST
| NIU_FLAGS_PROMISC
);
5875 if (dev
->flags
& IFF_PROMISC
)
5876 np
->flags
|= NIU_FLAGS_PROMISC
;
5877 if ((dev
->flags
& IFF_ALLMULTI
) || (dev
->mc_count
> 0))
5878 np
->flags
|= NIU_FLAGS_MCAST
;
5880 alt_cnt
= dev
->uc_count
;
5881 if (alt_cnt
> niu_num_alt_addr(np
)) {
5883 np
->flags
|= NIU_FLAGS_PROMISC
;
5889 for (addr
= dev
->uc_list
; addr
; addr
= addr
->next
) {
5890 err
= niu_set_alt_mac(np
, index
,
5893 printk(KERN_WARNING PFX
"%s: Error %d "
5894 "adding alt mac %d\n",
5895 dev
->name
, err
, index
);
5896 err
= niu_enable_alt_mac(np
, index
, 1);
5898 printk(KERN_WARNING PFX
"%s: Error %d "
5899 "enabling alt mac %d\n",
5900 dev
->name
, err
, index
);
5906 if (np
->flags
& NIU_FLAGS_XMAC
)
5910 for (i
= alt_start
; i
< niu_num_alt_addr(np
); i
++) {
5911 err
= niu_enable_alt_mac(np
, i
, 0);
5913 printk(KERN_WARNING PFX
"%s: Error %d "
5914 "disabling alt mac %d\n",
5918 if (dev
->flags
& IFF_ALLMULTI
) {
5919 for (i
= 0; i
< 16; i
++)
5921 } else if (dev
->mc_count
> 0) {
5922 for (addr
= dev
->mc_list
; addr
; addr
= addr
->next
) {
5923 u32 crc
= ether_crc_le(ETH_ALEN
, addr
->da_addr
);
5926 hash
[crc
>> 4] |= (1 << (15 - (crc
& 0xf)));
5930 if (np
->flags
& NIU_FLAGS_MCAST
)
5931 niu_load_hash(np
, hash
);
5933 niu_enable_rx_mac(np
, 1);
5934 spin_unlock_irqrestore(&np
->lock
, flags
);
5937 static int niu_set_mac_addr(struct net_device
*dev
, void *p
)
5939 struct niu
*np
= netdev_priv(dev
);
5940 struct sockaddr
*addr
= p
;
5941 unsigned long flags
;
5943 if (!is_valid_ether_addr(addr
->sa_data
))
5946 memcpy(dev
->dev_addr
, addr
->sa_data
, ETH_ALEN
);
5948 if (!netif_running(dev
))
5951 spin_lock_irqsave(&np
->lock
, flags
);
5952 niu_enable_rx_mac(np
, 0);
5953 niu_set_primary_mac(np
, dev
->dev_addr
);
5954 niu_enable_rx_mac(np
, 1);
5955 spin_unlock_irqrestore(&np
->lock
, flags
);
5960 static int niu_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
5965 static void niu_netif_stop(struct niu
*np
)
5967 np
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
5969 niu_disable_napi(np
);
5971 netif_tx_disable(np
->dev
);
5974 static void niu_netif_start(struct niu
*np
)
5976 /* NOTE: unconditional netif_wake_queue is only appropriate
5977 * so long as all callers are assured to have free tx slots
5978 * (such as after niu_init_hw).
5980 netif_tx_wake_all_queues(np
->dev
);
5982 niu_enable_napi(np
);
5984 niu_enable_interrupts(np
, 1);
5987 static void niu_reset_buffers(struct niu
*np
)
5992 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
5993 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
5995 for (j
= 0, k
= 0; j
< MAX_RBR_RING_SIZE
; j
++) {
5998 page
= rp
->rxhash
[j
];
6001 (struct page
*) page
->mapping
;
6002 u64 base
= page
->index
;
6003 base
= base
>> RBR_DESCR_ADDR_SHIFT
;
6004 rp
->rbr
[k
++] = cpu_to_le32(base
);
6008 for (; k
< MAX_RBR_RING_SIZE
; k
++) {
6009 err
= niu_rbr_add_page(np
, rp
, GFP_ATOMIC
, k
);
6014 rp
->rbr_index
= rp
->rbr_table_size
- 1;
6016 rp
->rbr_pending
= 0;
6017 rp
->rbr_refill_pending
= 0;
6021 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
6022 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
6024 for (j
= 0; j
< MAX_TX_RING_SIZE
; j
++) {
6025 if (rp
->tx_buffs
[j
].skb
)
6026 (void) release_tx_packet(np
, rp
, j
);
6029 rp
->pending
= MAX_TX_RING_SIZE
;
6037 static void niu_reset_task(struct work_struct
*work
)
6039 struct niu
*np
= container_of(work
, struct niu
, reset_task
);
6040 unsigned long flags
;
6043 spin_lock_irqsave(&np
->lock
, flags
);
6044 if (!netif_running(np
->dev
)) {
6045 spin_unlock_irqrestore(&np
->lock
, flags
);
6049 spin_unlock_irqrestore(&np
->lock
, flags
);
6051 del_timer_sync(&np
->timer
);
6055 spin_lock_irqsave(&np
->lock
, flags
);
6059 spin_unlock_irqrestore(&np
->lock
, flags
);
6061 niu_reset_buffers(np
);
6063 spin_lock_irqsave(&np
->lock
, flags
);
6065 err
= niu_init_hw(np
);
6067 np
->timer
.expires
= jiffies
+ HZ
;
6068 add_timer(&np
->timer
);
6069 niu_netif_start(np
);
6072 spin_unlock_irqrestore(&np
->lock
, flags
);
6075 static void niu_tx_timeout(struct net_device
*dev
)
6077 struct niu
*np
= netdev_priv(dev
);
6079 dev_err(np
->device
, PFX
"%s: Transmit timed out, resetting\n",
6082 schedule_work(&np
->reset_task
);
6085 static void niu_set_txd(struct tx_ring_info
*rp
, int index
,
6086 u64 mapping
, u64 len
, u64 mark
,
6089 __le64
*desc
= &rp
->descr
[index
];
6091 *desc
= cpu_to_le64(mark
|
6092 (n_frags
<< TX_DESC_NUM_PTR_SHIFT
) |
6093 (len
<< TX_DESC_TR_LEN_SHIFT
) |
6094 (mapping
& TX_DESC_SAD
));
6097 static u64
niu_compute_tx_flags(struct sk_buff
*skb
, struct ethhdr
*ehdr
,
6098 u64 pad_bytes
, u64 len
)
6100 u16 eth_proto
, eth_proto_inner
;
6101 u64 csum_bits
, l3off
, ihl
, ret
;
6105 eth_proto
= be16_to_cpu(ehdr
->h_proto
);
6106 eth_proto_inner
= eth_proto
;
6107 if (eth_proto
== ETH_P_8021Q
) {
6108 struct vlan_ethhdr
*vp
= (struct vlan_ethhdr
*) ehdr
;
6109 __be16 val
= vp
->h_vlan_encapsulated_proto
;
6111 eth_proto_inner
= be16_to_cpu(val
);
6115 switch (skb
->protocol
) {
6116 case __constant_htons(ETH_P_IP
):
6117 ip_proto
= ip_hdr(skb
)->protocol
;
6118 ihl
= ip_hdr(skb
)->ihl
;
6120 case __constant_htons(ETH_P_IPV6
):
6121 ip_proto
= ipv6_hdr(skb
)->nexthdr
;
6130 csum_bits
= TXHDR_CSUM_NONE
;
6131 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
6134 csum_bits
= (ip_proto
== IPPROTO_TCP
?
6136 (ip_proto
== IPPROTO_UDP
?
6137 TXHDR_CSUM_UDP
: TXHDR_CSUM_SCTP
));
6139 start
= skb_transport_offset(skb
) -
6140 (pad_bytes
+ sizeof(struct tx_pkt_hdr
));
6141 stuff
= start
+ skb
->csum_offset
;
6143 csum_bits
|= (start
/ 2) << TXHDR_L4START_SHIFT
;
6144 csum_bits
|= (stuff
/ 2) << TXHDR_L4STUFF_SHIFT
;
6147 l3off
= skb_network_offset(skb
) -
6148 (pad_bytes
+ sizeof(struct tx_pkt_hdr
));
6150 ret
= (((pad_bytes
/ 2) << TXHDR_PAD_SHIFT
) |
6151 (len
<< TXHDR_LEN_SHIFT
) |
6152 ((l3off
/ 2) << TXHDR_L3START_SHIFT
) |
6153 (ihl
<< TXHDR_IHL_SHIFT
) |
6154 ((eth_proto_inner
< 1536) ? TXHDR_LLC
: 0) |
6155 ((eth_proto
== ETH_P_8021Q
) ? TXHDR_VLAN
: 0) |
6156 (ipv6
? TXHDR_IP_VER
: 0) |
6162 static int niu_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
6164 struct niu
*np
= netdev_priv(dev
);
6165 unsigned long align
, headroom
;
6166 struct netdev_queue
*txq
;
6167 struct tx_ring_info
*rp
;
6168 struct tx_pkt_hdr
*tp
;
6169 unsigned int len
, nfg
;
6170 struct ethhdr
*ehdr
;
6174 i
= skb_get_queue_mapping(skb
);
6175 rp
= &np
->tx_rings
[i
];
6176 txq
= netdev_get_tx_queue(dev
, i
);
6178 if (niu_tx_avail(rp
) <= (skb_shinfo(skb
)->nr_frags
+ 1)) {
6179 netif_tx_stop_queue(txq
);
6180 dev_err(np
->device
, PFX
"%s: BUG! Tx ring full when "
6181 "queue awake!\n", dev
->name
);
6183 return NETDEV_TX_BUSY
;
6186 if (skb
->len
< ETH_ZLEN
) {
6187 unsigned int pad_bytes
= ETH_ZLEN
- skb
->len
;
6189 if (skb_pad(skb
, pad_bytes
))
6191 skb_put(skb
, pad_bytes
);
6194 len
= sizeof(struct tx_pkt_hdr
) + 15;
6195 if (skb_headroom(skb
) < len
) {
6196 struct sk_buff
*skb_new
;
6198 skb_new
= skb_realloc_headroom(skb
, len
);
6208 align
= ((unsigned long) skb
->data
& (16 - 1));
6209 headroom
= align
+ sizeof(struct tx_pkt_hdr
);
6211 ehdr
= (struct ethhdr
*) skb
->data
;
6212 tp
= (struct tx_pkt_hdr
*) skb_push(skb
, headroom
);
6214 len
= skb
->len
- sizeof(struct tx_pkt_hdr
);
6215 tp
->flags
= cpu_to_le64(niu_compute_tx_flags(skb
, ehdr
, align
, len
));
6218 len
= skb_headlen(skb
);
6219 mapping
= np
->ops
->map_single(np
->device
, skb
->data
,
6220 len
, DMA_TO_DEVICE
);
6224 rp
->tx_buffs
[prod
].skb
= skb
;
6225 rp
->tx_buffs
[prod
].mapping
= mapping
;
6228 if (++rp
->mark_counter
== rp
->mark_freq
) {
6229 rp
->mark_counter
= 0;
6230 mrk
|= TX_DESC_MARK
;
6235 nfg
= skb_shinfo(skb
)->nr_frags
;
6237 tlen
-= MAX_TX_DESC_LEN
;
6242 unsigned int this_len
= len
;
6244 if (this_len
> MAX_TX_DESC_LEN
)
6245 this_len
= MAX_TX_DESC_LEN
;
6247 niu_set_txd(rp
, prod
, mapping
, this_len
, mrk
, nfg
);
6250 prod
= NEXT_TX(rp
, prod
);
6251 mapping
+= this_len
;
6255 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
6256 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
6259 mapping
= np
->ops
->map_page(np
->device
, frag
->page
,
6260 frag
->page_offset
, len
,
6263 rp
->tx_buffs
[prod
].skb
= NULL
;
6264 rp
->tx_buffs
[prod
].mapping
= mapping
;
6266 niu_set_txd(rp
, prod
, mapping
, len
, 0, 0);
6268 prod
= NEXT_TX(rp
, prod
);
6271 if (prod
< rp
->prod
)
6272 rp
->wrap_bit
^= TX_RING_KICK_WRAP
;
6275 nw64(TX_RING_KICK(rp
->tx_channel
), rp
->wrap_bit
| (prod
<< 3));
6277 if (unlikely(niu_tx_avail(rp
) <= (MAX_SKB_FRAGS
+ 1))) {
6278 netif_tx_stop_queue(txq
);
6279 if (niu_tx_avail(rp
) > NIU_TX_WAKEUP_THRESH(rp
))
6280 netif_tx_wake_queue(txq
);
6283 dev
->trans_start
= jiffies
;
6286 return NETDEV_TX_OK
;
6294 static int niu_change_mtu(struct net_device
*dev
, int new_mtu
)
6296 struct niu
*np
= netdev_priv(dev
);
6297 int err
, orig_jumbo
, new_jumbo
;
6299 if (new_mtu
< 68 || new_mtu
> NIU_MAX_MTU
)
6302 orig_jumbo
= (dev
->mtu
> ETH_DATA_LEN
);
6303 new_jumbo
= (new_mtu
> ETH_DATA_LEN
);
6307 if (!netif_running(dev
) ||
6308 (orig_jumbo
== new_jumbo
))
6311 niu_full_shutdown(np
, dev
);
6313 niu_free_channels(np
);
6315 niu_enable_napi(np
);
6317 err
= niu_alloc_channels(np
);
6321 spin_lock_irq(&np
->lock
);
6323 err
= niu_init_hw(np
);
6325 init_timer(&np
->timer
);
6326 np
->timer
.expires
= jiffies
+ HZ
;
6327 np
->timer
.data
= (unsigned long) np
;
6328 np
->timer
.function
= niu_timer
;
6330 err
= niu_enable_interrupts(np
, 1);
6335 spin_unlock_irq(&np
->lock
);
6338 netif_tx_start_all_queues(dev
);
6339 if (np
->link_config
.loopback_mode
!= LOOPBACK_DISABLED
)
6340 netif_carrier_on(dev
);
6342 add_timer(&np
->timer
);
6348 static void niu_get_drvinfo(struct net_device
*dev
,
6349 struct ethtool_drvinfo
*info
)
6351 struct niu
*np
= netdev_priv(dev
);
6352 struct niu_vpd
*vpd
= &np
->vpd
;
6354 strcpy(info
->driver
, DRV_MODULE_NAME
);
6355 strcpy(info
->version
, DRV_MODULE_VERSION
);
6356 sprintf(info
->fw_version
, "%d.%d",
6357 vpd
->fcode_major
, vpd
->fcode_minor
);
6358 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
)
6359 strcpy(info
->bus_info
, pci_name(np
->pdev
));
6362 static int niu_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
6364 struct niu
*np
= netdev_priv(dev
);
6365 struct niu_link_config
*lp
;
6367 lp
= &np
->link_config
;
6369 memset(cmd
, 0, sizeof(*cmd
));
6370 cmd
->phy_address
= np
->phy_addr
;
6371 cmd
->supported
= lp
->supported
;
6372 cmd
->advertising
= lp
->advertising
;
6373 cmd
->autoneg
= lp
->autoneg
;
6374 cmd
->speed
= lp
->active_speed
;
6375 cmd
->duplex
= lp
->active_duplex
;
6380 static int niu_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
6385 static u32
niu_get_msglevel(struct net_device
*dev
)
6387 struct niu
*np
= netdev_priv(dev
);
6388 return np
->msg_enable
;
6391 static void niu_set_msglevel(struct net_device
*dev
, u32 value
)
6393 struct niu
*np
= netdev_priv(dev
);
6394 np
->msg_enable
= value
;
6397 static int niu_get_eeprom_len(struct net_device
*dev
)
6399 struct niu
*np
= netdev_priv(dev
);
6401 return np
->eeprom_len
;
6404 static int niu_get_eeprom(struct net_device
*dev
,
6405 struct ethtool_eeprom
*eeprom
, u8
*data
)
6407 struct niu
*np
= netdev_priv(dev
);
6408 u32 offset
, len
, val
;
6410 offset
= eeprom
->offset
;
6413 if (offset
+ len
< offset
)
6415 if (offset
>= np
->eeprom_len
)
6417 if (offset
+ len
> np
->eeprom_len
)
6418 len
= eeprom
->len
= np
->eeprom_len
- offset
;
6421 u32 b_offset
, b_count
;
6423 b_offset
= offset
& 3;
6424 b_count
= 4 - b_offset
;
6428 val
= nr64(ESPC_NCR((offset
- b_offset
) / 4));
6429 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
6435 val
= nr64(ESPC_NCR(offset
/ 4));
6436 memcpy(data
, &val
, 4);
6442 val
= nr64(ESPC_NCR(offset
/ 4));
6443 memcpy(data
, &val
, len
);
6448 static int niu_ethflow_to_class(int flow_type
, u64
*class)
6450 switch (flow_type
) {
6452 *class = CLASS_CODE_TCP_IPV4
;
6455 *class = CLASS_CODE_UDP_IPV4
;
6457 case AH_ESP_V4_FLOW
:
6458 *class = CLASS_CODE_AH_ESP_IPV4
;
6461 *class = CLASS_CODE_SCTP_IPV4
;
6464 *class = CLASS_CODE_TCP_IPV6
;
6467 *class = CLASS_CODE_UDP_IPV6
;
6469 case AH_ESP_V6_FLOW
:
6470 *class = CLASS_CODE_AH_ESP_IPV6
;
6473 *class = CLASS_CODE_SCTP_IPV6
;
6482 static u64
niu_flowkey_to_ethflow(u64 flow_key
)
6486 if (flow_key
& FLOW_KEY_PORT
)
6487 ethflow
|= RXH_DEV_PORT
;
6488 if (flow_key
& FLOW_KEY_L2DA
)
6489 ethflow
|= RXH_L2DA
;
6490 if (flow_key
& FLOW_KEY_VLAN
)
6491 ethflow
|= RXH_VLAN
;
6492 if (flow_key
& FLOW_KEY_IPSA
)
6493 ethflow
|= RXH_IP_SRC
;
6494 if (flow_key
& FLOW_KEY_IPDA
)
6495 ethflow
|= RXH_IP_DST
;
6496 if (flow_key
& FLOW_KEY_PROTO
)
6497 ethflow
|= RXH_L3_PROTO
;
6498 if (flow_key
& (FLOW_KEY_L4_BYTE12
<< FLOW_KEY_L4_0_SHIFT
))
6499 ethflow
|= RXH_L4_B_0_1
;
6500 if (flow_key
& (FLOW_KEY_L4_BYTE12
<< FLOW_KEY_L4_1_SHIFT
))
6501 ethflow
|= RXH_L4_B_2_3
;
6507 static int niu_ethflow_to_flowkey(u64 ethflow
, u64
*flow_key
)
6511 if (ethflow
& RXH_DEV_PORT
)
6512 key
|= FLOW_KEY_PORT
;
6513 if (ethflow
& RXH_L2DA
)
6514 key
|= FLOW_KEY_L2DA
;
6515 if (ethflow
& RXH_VLAN
)
6516 key
|= FLOW_KEY_VLAN
;
6517 if (ethflow
& RXH_IP_SRC
)
6518 key
|= FLOW_KEY_IPSA
;
6519 if (ethflow
& RXH_IP_DST
)
6520 key
|= FLOW_KEY_IPDA
;
6521 if (ethflow
& RXH_L3_PROTO
)
6522 key
|= FLOW_KEY_PROTO
;
6523 if (ethflow
& RXH_L4_B_0_1
)
6524 key
|= (FLOW_KEY_L4_BYTE12
<< FLOW_KEY_L4_0_SHIFT
);
6525 if (ethflow
& RXH_L4_B_2_3
)
6526 key
|= (FLOW_KEY_L4_BYTE12
<< FLOW_KEY_L4_1_SHIFT
);
6534 static int niu_get_hash_opts(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
)
6536 struct niu
*np
= netdev_priv(dev
);
6541 if (!niu_ethflow_to_class(cmd
->flow_type
, &class))
6544 if (np
->parent
->tcam_key
[class - CLASS_CODE_USER_PROG1
] &
6546 cmd
->data
= RXH_DISCARD
;
6549 cmd
->data
= niu_flowkey_to_ethflow(np
->parent
->flow_key
[class -
6550 CLASS_CODE_USER_PROG1
]);
6554 static int niu_set_hash_opts(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
)
6556 struct niu
*np
= netdev_priv(dev
);
6559 unsigned long flags
;
6561 if (!niu_ethflow_to_class(cmd
->flow_type
, &class))
6564 if (class < CLASS_CODE_USER_PROG1
||
6565 class > CLASS_CODE_SCTP_IPV6
)
6568 if (cmd
->data
& RXH_DISCARD
) {
6569 niu_lock_parent(np
, flags
);
6570 flow_key
= np
->parent
->tcam_key
[class -
6571 CLASS_CODE_USER_PROG1
];
6572 flow_key
|= TCAM_KEY_DISC
;
6573 nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1
), flow_key
);
6574 np
->parent
->tcam_key
[class - CLASS_CODE_USER_PROG1
] = flow_key
;
6575 niu_unlock_parent(np
, flags
);
6578 /* Discard was set before, but is not set now */
6579 if (np
->parent
->tcam_key
[class - CLASS_CODE_USER_PROG1
] &
6581 niu_lock_parent(np
, flags
);
6582 flow_key
= np
->parent
->tcam_key
[class -
6583 CLASS_CODE_USER_PROG1
];
6584 flow_key
&= ~TCAM_KEY_DISC
;
6585 nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1
),
6587 np
->parent
->tcam_key
[class - CLASS_CODE_USER_PROG1
] =
6589 niu_unlock_parent(np
, flags
);
6593 if (!niu_ethflow_to_flowkey(cmd
->data
, &flow_key
))
6596 niu_lock_parent(np
, flags
);
6597 nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1
), flow_key
);
6598 np
->parent
->flow_key
[class - CLASS_CODE_USER_PROG1
] = flow_key
;
6599 niu_unlock_parent(np
, flags
);
6604 static const struct {
6605 const char string
[ETH_GSTRING_LEN
];
6606 } niu_xmac_stat_keys
[] = {
6609 { "tx_fifo_errors" },
6610 { "tx_overflow_errors" },
6611 { "tx_max_pkt_size_errors" },
6612 { "tx_underflow_errors" },
6613 { "rx_local_faults" },
6614 { "rx_remote_faults" },
6615 { "rx_link_faults" },
6616 { "rx_align_errors" },
6628 { "rx_code_violations" },
6629 { "rx_len_errors" },
6630 { "rx_crc_errors" },
6631 { "rx_underflows" },
6633 { "pause_off_state" },
6634 { "pause_on_state" },
6635 { "pause_received" },
6638 #define NUM_XMAC_STAT_KEYS ARRAY_SIZE(niu_xmac_stat_keys)
6640 static const struct {
6641 const char string
[ETH_GSTRING_LEN
];
6642 } niu_bmac_stat_keys
[] = {
6643 { "tx_underflow_errors" },
6644 { "tx_max_pkt_size_errors" },
6649 { "rx_align_errors" },
6650 { "rx_crc_errors" },
6651 { "rx_len_errors" },
6652 { "pause_off_state" },
6653 { "pause_on_state" },
6654 { "pause_received" },
6657 #define NUM_BMAC_STAT_KEYS ARRAY_SIZE(niu_bmac_stat_keys)
6659 static const struct {
6660 const char string
[ETH_GSTRING_LEN
];
6661 } niu_rxchan_stat_keys
[] = {
6669 #define NUM_RXCHAN_STAT_KEYS ARRAY_SIZE(niu_rxchan_stat_keys)
6671 static const struct {
6672 const char string
[ETH_GSTRING_LEN
];
6673 } niu_txchan_stat_keys
[] = {
6680 #define NUM_TXCHAN_STAT_KEYS ARRAY_SIZE(niu_txchan_stat_keys)
6682 static void niu_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
6684 struct niu
*np
= netdev_priv(dev
);
6687 if (stringset
!= ETH_SS_STATS
)
6690 if (np
->flags
& NIU_FLAGS_XMAC
) {
6691 memcpy(data
, niu_xmac_stat_keys
,
6692 sizeof(niu_xmac_stat_keys
));
6693 data
+= sizeof(niu_xmac_stat_keys
);
6695 memcpy(data
, niu_bmac_stat_keys
,
6696 sizeof(niu_bmac_stat_keys
));
6697 data
+= sizeof(niu_bmac_stat_keys
);
6699 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
6700 memcpy(data
, niu_rxchan_stat_keys
,
6701 sizeof(niu_rxchan_stat_keys
));
6702 data
+= sizeof(niu_rxchan_stat_keys
);
6704 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
6705 memcpy(data
, niu_txchan_stat_keys
,
6706 sizeof(niu_txchan_stat_keys
));
6707 data
+= sizeof(niu_txchan_stat_keys
);
6711 static int niu_get_stats_count(struct net_device
*dev
)
6713 struct niu
*np
= netdev_priv(dev
);
6715 return ((np
->flags
& NIU_FLAGS_XMAC
?
6716 NUM_XMAC_STAT_KEYS
:
6717 NUM_BMAC_STAT_KEYS
) +
6718 (np
->num_rx_rings
* NUM_RXCHAN_STAT_KEYS
) +
6719 (np
->num_tx_rings
* NUM_TXCHAN_STAT_KEYS
));
6722 static void niu_get_ethtool_stats(struct net_device
*dev
,
6723 struct ethtool_stats
*stats
, u64
*data
)
6725 struct niu
*np
= netdev_priv(dev
);
6728 niu_sync_mac_stats(np
);
6729 if (np
->flags
& NIU_FLAGS_XMAC
) {
6730 memcpy(data
, &np
->mac_stats
.xmac
,
6731 sizeof(struct niu_xmac_stats
));
6732 data
+= (sizeof(struct niu_xmac_stats
) / sizeof(u64
));
6734 memcpy(data
, &np
->mac_stats
.bmac
,
6735 sizeof(struct niu_bmac_stats
));
6736 data
+= (sizeof(struct niu_bmac_stats
) / sizeof(u64
));
6738 for (i
= 0; i
< np
->num_rx_rings
; i
++) {
6739 struct rx_ring_info
*rp
= &np
->rx_rings
[i
];
6741 data
[0] = rp
->rx_channel
;
6742 data
[1] = rp
->rx_packets
;
6743 data
[2] = rp
->rx_bytes
;
6744 data
[3] = rp
->rx_dropped
;
6745 data
[4] = rp
->rx_errors
;
6748 for (i
= 0; i
< np
->num_tx_rings
; i
++) {
6749 struct tx_ring_info
*rp
= &np
->tx_rings
[i
];
6751 data
[0] = rp
->tx_channel
;
6752 data
[1] = rp
->tx_packets
;
6753 data
[2] = rp
->tx_bytes
;
6754 data
[3] = rp
->tx_errors
;
6759 static u64
niu_led_state_save(struct niu
*np
)
6761 if (np
->flags
& NIU_FLAGS_XMAC
)
6762 return nr64_mac(XMAC_CONFIG
);
6764 return nr64_mac(BMAC_XIF_CONFIG
);
6767 static void niu_led_state_restore(struct niu
*np
, u64 val
)
6769 if (np
->flags
& NIU_FLAGS_XMAC
)
6770 nw64_mac(XMAC_CONFIG
, val
);
6772 nw64_mac(BMAC_XIF_CONFIG
, val
);
6775 static void niu_force_led(struct niu
*np
, int on
)
6779 if (np
->flags
& NIU_FLAGS_XMAC
) {
6781 bit
= XMAC_CONFIG_FORCE_LED_ON
;
6783 reg
= BMAC_XIF_CONFIG
;
6784 bit
= BMAC_XIF_CONFIG_LINK_LED
;
6787 val
= nr64_mac(reg
);
6795 static int niu_phys_id(struct net_device
*dev
, u32 data
)
6797 struct niu
*np
= netdev_priv(dev
);
6801 if (!netif_running(dev
))
6807 orig_led_state
= niu_led_state_save(np
);
6808 for (i
= 0; i
< (data
* 2); i
++) {
6809 int on
= ((i
% 2) == 0);
6811 niu_force_led(np
, on
);
6813 if (msleep_interruptible(500))
6816 niu_led_state_restore(np
, orig_led_state
);
6821 static const struct ethtool_ops niu_ethtool_ops
= {
6822 .get_drvinfo
= niu_get_drvinfo
,
6823 .get_link
= ethtool_op_get_link
,
6824 .get_msglevel
= niu_get_msglevel
,
6825 .set_msglevel
= niu_set_msglevel
,
6826 .get_eeprom_len
= niu_get_eeprom_len
,
6827 .get_eeprom
= niu_get_eeprom
,
6828 .get_settings
= niu_get_settings
,
6829 .set_settings
= niu_set_settings
,
6830 .get_strings
= niu_get_strings
,
6831 .get_stats_count
= niu_get_stats_count
,
6832 .get_ethtool_stats
= niu_get_ethtool_stats
,
6833 .phys_id
= niu_phys_id
,
6834 .get_rxhash
= niu_get_hash_opts
,
6835 .set_rxhash
= niu_set_hash_opts
,
6838 static int niu_ldg_assign_ldn(struct niu
*np
, struct niu_parent
*parent
,
6841 if (ldg
< NIU_LDG_MIN
|| ldg
> NIU_LDG_MAX
)
6843 if (ldn
< 0 || ldn
> LDN_MAX
)
6846 parent
->ldg_map
[ldn
] = ldg
;
6848 if (np
->parent
->plat_type
== PLAT_TYPE_NIU
) {
6849 /* On N2 NIU, the ldn-->ldg assignments are setup and fixed by
6850 * the firmware, and we're not supposed to change them.
6851 * Validate the mapping, because if it's wrong we probably
6852 * won't get any interrupts and that's painful to debug.
6854 if (nr64(LDG_NUM(ldn
)) != ldg
) {
6855 dev_err(np
->device
, PFX
"Port %u, mis-matched "
6857 "for ldn %d, should be %d is %llu\n",
6859 (unsigned long long) nr64(LDG_NUM(ldn
)));
6863 nw64(LDG_NUM(ldn
), ldg
);
6868 static int niu_set_ldg_timer_res(struct niu
*np
, int res
)
6870 if (res
< 0 || res
> LDG_TIMER_RES_VAL
)
6874 nw64(LDG_TIMER_RES
, res
);
6879 static int niu_set_ldg_sid(struct niu
*np
, int ldg
, int func
, int vector
)
6881 if ((ldg
< NIU_LDG_MIN
|| ldg
> NIU_LDG_MAX
) ||
6882 (func
< 0 || func
> 3) ||
6883 (vector
< 0 || vector
> 0x1f))
6886 nw64(SID(ldg
), (func
<< SID_FUNC_SHIFT
) | vector
);
6891 static int __devinit
niu_pci_eeprom_read(struct niu
*np
, u32 addr
)
6893 u64 frame
, frame_base
= (ESPC_PIO_STAT_READ_START
|
6894 (addr
<< ESPC_PIO_STAT_ADDR_SHIFT
));
6897 if (addr
> (ESPC_PIO_STAT_ADDR
>> ESPC_PIO_STAT_ADDR_SHIFT
))
6901 nw64(ESPC_PIO_STAT
, frame
);
6905 frame
= nr64(ESPC_PIO_STAT
);
6906 if (frame
& ESPC_PIO_STAT_READ_END
)
6909 if (!(frame
& ESPC_PIO_STAT_READ_END
)) {
6910 dev_err(np
->device
, PFX
"EEPROM read timeout frame[%llx]\n",
6911 (unsigned long long) frame
);
6916 nw64(ESPC_PIO_STAT
, frame
);
6920 frame
= nr64(ESPC_PIO_STAT
);
6921 if (frame
& ESPC_PIO_STAT_READ_END
)
6924 if (!(frame
& ESPC_PIO_STAT_READ_END
)) {
6925 dev_err(np
->device
, PFX
"EEPROM read timeout frame[%llx]\n",
6926 (unsigned long long) frame
);
6930 frame
= nr64(ESPC_PIO_STAT
);
6931 return (frame
& ESPC_PIO_STAT_DATA
) >> ESPC_PIO_STAT_DATA_SHIFT
;
6934 static int __devinit
niu_pci_eeprom_read16(struct niu
*np
, u32 off
)
6936 int err
= niu_pci_eeprom_read(np
, off
);
6942 err
= niu_pci_eeprom_read(np
, off
+ 1);
6945 val
|= (err
& 0xff);
6950 static int __devinit
niu_pci_eeprom_read16_swp(struct niu
*np
, u32 off
)
6952 int err
= niu_pci_eeprom_read(np
, off
);
6959 err
= niu_pci_eeprom_read(np
, off
+ 1);
6963 val
|= (err
& 0xff) << 8;
6968 static int __devinit
niu_pci_vpd_get_propname(struct niu
*np
,
6975 for (i
= 0; i
< namebuf_len
; i
++) {
6976 int err
= niu_pci_eeprom_read(np
, off
+ i
);
6983 if (i
>= namebuf_len
)
6989 static void __devinit
niu_vpd_parse_version(struct niu
*np
)
6991 struct niu_vpd
*vpd
= &np
->vpd
;
6992 int len
= strlen(vpd
->version
) + 1;
6993 const char *s
= vpd
->version
;
6996 for (i
= 0; i
< len
- 5; i
++) {
6997 if (!strncmp(s
+ i
, "FCode ", 5))
7004 sscanf(s
, "%d.%d", &vpd
->fcode_major
, &vpd
->fcode_minor
);
7006 niudbg(PROBE
, "VPD_SCAN: FCODE major(%d) minor(%d)\n",
7007 vpd
->fcode_major
, vpd
->fcode_minor
);
7008 if (vpd
->fcode_major
> NIU_VPD_MIN_MAJOR
||
7009 (vpd
->fcode_major
== NIU_VPD_MIN_MAJOR
&&
7010 vpd
->fcode_minor
>= NIU_VPD_MIN_MINOR
))
7011 np
->flags
|= NIU_FLAGS_VPD_VALID
;
7014 /* ESPC_PIO_EN_ENABLE must be set */
7015 static int __devinit
niu_pci_vpd_scan_props(struct niu
*np
,
7018 unsigned int found_mask
= 0;
7019 #define FOUND_MASK_MODEL 0x00000001
7020 #define FOUND_MASK_BMODEL 0x00000002
7021 #define FOUND_MASK_VERS 0x00000004
7022 #define FOUND_MASK_MAC 0x00000008
7023 #define FOUND_MASK_NMAC 0x00000010
7024 #define FOUND_MASK_PHY 0x00000020
7025 #define FOUND_MASK_ALL 0x0000003f
7027 niudbg(PROBE
, "VPD_SCAN: start[%x] end[%x]\n",
7029 while (start
< end
) {
7030 int len
, err
, instance
, type
, prop_len
;
7035 if (found_mask
== FOUND_MASK_ALL
) {
7036 niu_vpd_parse_version(np
);
7040 err
= niu_pci_eeprom_read(np
, start
+ 2);
7046 instance
= niu_pci_eeprom_read(np
, start
);
7047 type
= niu_pci_eeprom_read(np
, start
+ 3);
7048 prop_len
= niu_pci_eeprom_read(np
, start
+ 4);
7049 err
= niu_pci_vpd_get_propname(np
, start
+ 5, namebuf
, 64);
7055 if (!strcmp(namebuf
, "model")) {
7056 prop_buf
= np
->vpd
.model
;
7057 max_len
= NIU_VPD_MODEL_MAX
;
7058 found_mask
|= FOUND_MASK_MODEL
;
7059 } else if (!strcmp(namebuf
, "board-model")) {
7060 prop_buf
= np
->vpd
.board_model
;
7061 max_len
= NIU_VPD_BD_MODEL_MAX
;
7062 found_mask
|= FOUND_MASK_BMODEL
;
7063 } else if (!strcmp(namebuf
, "version")) {
7064 prop_buf
= np
->vpd
.version
;
7065 max_len
= NIU_VPD_VERSION_MAX
;
7066 found_mask
|= FOUND_MASK_VERS
;
7067 } else if (!strcmp(namebuf
, "local-mac-address")) {
7068 prop_buf
= np
->vpd
.local_mac
;
7070 found_mask
|= FOUND_MASK_MAC
;
7071 } else if (!strcmp(namebuf
, "num-mac-addresses")) {
7072 prop_buf
= &np
->vpd
.mac_num
;
7074 found_mask
|= FOUND_MASK_NMAC
;
7075 } else if (!strcmp(namebuf
, "phy-type")) {
7076 prop_buf
= np
->vpd
.phy_type
;
7077 max_len
= NIU_VPD_PHY_TYPE_MAX
;
7078 found_mask
|= FOUND_MASK_PHY
;
7081 if (max_len
&& prop_len
> max_len
) {
7082 dev_err(np
->device
, PFX
"Property '%s' length (%d) is "
7083 "too long.\n", namebuf
, prop_len
);
7088 u32 off
= start
+ 5 + err
;
7091 niudbg(PROBE
, "VPD_SCAN: Reading in property [%s] "
7092 "len[%d]\n", namebuf
, prop_len
);
7093 for (i
= 0; i
< prop_len
; i
++)
7094 *prop_buf
++ = niu_pci_eeprom_read(np
, off
+ i
);
7103 /* ESPC_PIO_EN_ENABLE must be set */
7104 static void __devinit
niu_pci_vpd_fetch(struct niu
*np
, u32 start
)
7109 err
= niu_pci_eeprom_read16_swp(np
, start
+ 1);
7115 while (start
+ offset
< ESPC_EEPROM_SIZE
) {
7116 u32 here
= start
+ offset
;
7119 err
= niu_pci_eeprom_read(np
, here
);
7123 err
= niu_pci_eeprom_read16_swp(np
, here
+ 1);
7127 here
= start
+ offset
+ 3;
7128 end
= start
+ offset
+ err
;
7132 err
= niu_pci_vpd_scan_props(np
, here
, end
);
7133 if (err
< 0 || err
== 1)
7138 /* ESPC_PIO_EN_ENABLE must be set */
7139 static u32 __devinit
niu_pci_vpd_offset(struct niu
*np
)
7141 u32 start
= 0, end
= ESPC_EEPROM_SIZE
, ret
;
7144 while (start
< end
) {
7147 /* ROM header signature? */
7148 err
= niu_pci_eeprom_read16(np
, start
+ 0);
7152 /* Apply offset to PCI data structure. */
7153 err
= niu_pci_eeprom_read16(np
, start
+ 23);
7158 /* Check for "PCIR" signature. */
7159 err
= niu_pci_eeprom_read16(np
, start
+ 0);
7162 err
= niu_pci_eeprom_read16(np
, start
+ 2);
7166 /* Check for OBP image type. */
7167 err
= niu_pci_eeprom_read(np
, start
+ 20);
7171 err
= niu_pci_eeprom_read(np
, ret
+ 2);
7175 start
= ret
+ (err
* 512);
7179 err
= niu_pci_eeprom_read16_swp(np
, start
+ 8);
7184 err
= niu_pci_eeprom_read(np
, ret
+ 0);
7194 static int __devinit
niu_phy_type_prop_decode(struct niu
*np
,
7195 const char *phy_prop
)
7197 if (!strcmp(phy_prop
, "mif")) {
7198 /* 1G copper, MII */
7199 np
->flags
&= ~(NIU_FLAGS_FIBER
|
7201 np
->mac_xcvr
= MAC_XCVR_MII
;
7202 } else if (!strcmp(phy_prop
, "xgf")) {
7203 /* 10G fiber, XPCS */
7204 np
->flags
|= (NIU_FLAGS_10G
|
7206 np
->mac_xcvr
= MAC_XCVR_XPCS
;
7207 } else if (!strcmp(phy_prop
, "pcs")) {
7209 np
->flags
&= ~NIU_FLAGS_10G
;
7210 np
->flags
|= NIU_FLAGS_FIBER
;
7211 np
->mac_xcvr
= MAC_XCVR_PCS
;
7212 } else if (!strcmp(phy_prop
, "xgc")) {
7213 /* 10G copper, XPCS */
7214 np
->flags
|= NIU_FLAGS_10G
;
7215 np
->flags
&= ~NIU_FLAGS_FIBER
;
7216 np
->mac_xcvr
= MAC_XCVR_XPCS
;
7223 static int niu_pci_vpd_get_nports(struct niu
*np
)
7227 if ((!strcmp(np
->vpd
.model
, NIU_QGC_LP_MDL_STR
)) ||
7228 (!strcmp(np
->vpd
.model
, NIU_QGC_PEM_MDL_STR
)) ||
7229 (!strcmp(np
->vpd
.model
, NIU_MARAMBA_MDL_STR
)) ||
7230 (!strcmp(np
->vpd
.model
, NIU_KIMI_MDL_STR
)) ||
7231 (!strcmp(np
->vpd
.model
, NIU_ALONSO_MDL_STR
))) {
7233 } else if ((!strcmp(np
->vpd
.model
, NIU_2XGF_LP_MDL_STR
)) ||
7234 (!strcmp(np
->vpd
.model
, NIU_2XGF_PEM_MDL_STR
)) ||
7235 (!strcmp(np
->vpd
.model
, NIU_FOXXY_MDL_STR
)) ||
7236 (!strcmp(np
->vpd
.model
, NIU_2XGF_MRVL_MDL_STR
))) {
7243 static void __devinit
niu_pci_vpd_validate(struct niu
*np
)
7245 struct net_device
*dev
= np
->dev
;
7246 struct niu_vpd
*vpd
= &np
->vpd
;
7249 if (!is_valid_ether_addr(&vpd
->local_mac
[0])) {
7250 dev_err(np
->device
, PFX
"VPD MAC invalid, "
7251 "falling back to SPROM.\n");
7253 np
->flags
&= ~NIU_FLAGS_VPD_VALID
;
7257 if (!strcmp(np
->vpd
.model
, NIU_ALONSO_MDL_STR
) ||
7258 !strcmp(np
->vpd
.model
, NIU_KIMI_MDL_STR
)) {
7259 np
->flags
|= NIU_FLAGS_10G
;
7260 np
->flags
&= ~NIU_FLAGS_FIBER
;
7261 np
->flags
|= NIU_FLAGS_XCVR_SERDES
;
7262 np
->mac_xcvr
= MAC_XCVR_PCS
;
7264 np
->flags
|= NIU_FLAGS_FIBER
;
7265 np
->flags
&= ~NIU_FLAGS_10G
;
7267 if (np
->flags
& NIU_FLAGS_10G
)
7268 np
->mac_xcvr
= MAC_XCVR_XPCS
;
7269 } else if (!strcmp(np
->vpd
.model
, NIU_FOXXY_MDL_STR
)) {
7270 np
->flags
|= (NIU_FLAGS_10G
| NIU_FLAGS_FIBER
|
7271 NIU_FLAGS_HOTPLUG_PHY
);
7272 } else if (niu_phy_type_prop_decode(np
, np
->vpd
.phy_type
)) {
7273 dev_err(np
->device
, PFX
"Illegal phy string [%s].\n",
7275 dev_err(np
->device
, PFX
"Falling back to SPROM.\n");
7276 np
->flags
&= ~NIU_FLAGS_VPD_VALID
;
7280 memcpy(dev
->perm_addr
, vpd
->local_mac
, ETH_ALEN
);
7282 val8
= dev
->perm_addr
[5];
7283 dev
->perm_addr
[5] += np
->port
;
7284 if (dev
->perm_addr
[5] < val8
)
7285 dev
->perm_addr
[4]++;
7287 memcpy(dev
->dev_addr
, dev
->perm_addr
, dev
->addr_len
);
7290 static int __devinit
niu_pci_probe_sprom(struct niu
*np
)
7292 struct net_device
*dev
= np
->dev
;
7297 val
= (nr64(ESPC_VER_IMGSZ
) & ESPC_VER_IMGSZ_IMGSZ
);
7298 val
>>= ESPC_VER_IMGSZ_IMGSZ_SHIFT
;
7301 np
->eeprom_len
= len
;
7303 niudbg(PROBE
, "SPROM: Image size %llu\n", (unsigned long long) val
);
7306 for (i
= 0; i
< len
; i
++) {
7307 val
= nr64(ESPC_NCR(i
));
7308 sum
+= (val
>> 0) & 0xff;
7309 sum
+= (val
>> 8) & 0xff;
7310 sum
+= (val
>> 16) & 0xff;
7311 sum
+= (val
>> 24) & 0xff;
7313 niudbg(PROBE
, "SPROM: Checksum %x\n", (int)(sum
& 0xff));
7314 if ((sum
& 0xff) != 0xab) {
7315 dev_err(np
->device
, PFX
"Bad SPROM checksum "
7316 "(%x, should be 0xab)\n", (int) (sum
& 0xff));
7320 val
= nr64(ESPC_PHY_TYPE
);
7323 val8
= (val
& ESPC_PHY_TYPE_PORT0
) >>
7324 ESPC_PHY_TYPE_PORT0_SHIFT
;
7327 val8
= (val
& ESPC_PHY_TYPE_PORT1
) >>
7328 ESPC_PHY_TYPE_PORT1_SHIFT
;
7331 val8
= (val
& ESPC_PHY_TYPE_PORT2
) >>
7332 ESPC_PHY_TYPE_PORT2_SHIFT
;
7335 val8
= (val
& ESPC_PHY_TYPE_PORT3
) >>
7336 ESPC_PHY_TYPE_PORT3_SHIFT
;
7339 dev_err(np
->device
, PFX
"Bogus port number %u\n",
7343 niudbg(PROBE
, "SPROM: PHY type %x\n", val8
);
7346 case ESPC_PHY_TYPE_1G_COPPER
:
7347 /* 1G copper, MII */
7348 np
->flags
&= ~(NIU_FLAGS_FIBER
|
7350 np
->mac_xcvr
= MAC_XCVR_MII
;
7353 case ESPC_PHY_TYPE_1G_FIBER
:
7355 np
->flags
&= ~NIU_FLAGS_10G
;
7356 np
->flags
|= NIU_FLAGS_FIBER
;
7357 np
->mac_xcvr
= MAC_XCVR_PCS
;
7360 case ESPC_PHY_TYPE_10G_COPPER
:
7361 /* 10G copper, XPCS */
7362 np
->flags
|= NIU_FLAGS_10G
;
7363 np
->flags
&= ~NIU_FLAGS_FIBER
;
7364 np
->mac_xcvr
= MAC_XCVR_XPCS
;
7367 case ESPC_PHY_TYPE_10G_FIBER
:
7368 /* 10G fiber, XPCS */
7369 np
->flags
|= (NIU_FLAGS_10G
|
7371 np
->mac_xcvr
= MAC_XCVR_XPCS
;
7375 dev_err(np
->device
, PFX
"Bogus SPROM phy type %u\n", val8
);
7379 val
= nr64(ESPC_MAC_ADDR0
);
7380 niudbg(PROBE
, "SPROM: MAC_ADDR0[%08llx]\n",
7381 (unsigned long long) val
);
7382 dev
->perm_addr
[0] = (val
>> 0) & 0xff;
7383 dev
->perm_addr
[1] = (val
>> 8) & 0xff;
7384 dev
->perm_addr
[2] = (val
>> 16) & 0xff;
7385 dev
->perm_addr
[3] = (val
>> 24) & 0xff;
7387 val
= nr64(ESPC_MAC_ADDR1
);
7388 niudbg(PROBE
, "SPROM: MAC_ADDR1[%08llx]\n",
7389 (unsigned long long) val
);
7390 dev
->perm_addr
[4] = (val
>> 0) & 0xff;
7391 dev
->perm_addr
[5] = (val
>> 8) & 0xff;
7393 if (!is_valid_ether_addr(&dev
->perm_addr
[0])) {
7394 dev_err(np
->device
, PFX
"SPROM MAC address invalid\n");
7395 dev_err(np
->device
, PFX
"[ \n");
7396 for (i
= 0; i
< 6; i
++)
7397 printk("%02x ", dev
->perm_addr
[i
]);
7402 val8
= dev
->perm_addr
[5];
7403 dev
->perm_addr
[5] += np
->port
;
7404 if (dev
->perm_addr
[5] < val8
)
7405 dev
->perm_addr
[4]++;
7407 memcpy(dev
->dev_addr
, dev
->perm_addr
, dev
->addr_len
);
7409 val
= nr64(ESPC_MOD_STR_LEN
);
7410 niudbg(PROBE
, "SPROM: MOD_STR_LEN[%llu]\n",
7411 (unsigned long long) val
);
7415 for (i
= 0; i
< val
; i
+= 4) {
7416 u64 tmp
= nr64(ESPC_NCR(5 + (i
/ 4)));
7418 np
->vpd
.model
[i
+ 3] = (tmp
>> 0) & 0xff;
7419 np
->vpd
.model
[i
+ 2] = (tmp
>> 8) & 0xff;
7420 np
->vpd
.model
[i
+ 1] = (tmp
>> 16) & 0xff;
7421 np
->vpd
.model
[i
+ 0] = (tmp
>> 24) & 0xff;
7423 np
->vpd
.model
[val
] = '\0';
7425 val
= nr64(ESPC_BD_MOD_STR_LEN
);
7426 niudbg(PROBE
, "SPROM: BD_MOD_STR_LEN[%llu]\n",
7427 (unsigned long long) val
);
7431 for (i
= 0; i
< val
; i
+= 4) {
7432 u64 tmp
= nr64(ESPC_NCR(14 + (i
/ 4)));
7434 np
->vpd
.board_model
[i
+ 3] = (tmp
>> 0) & 0xff;
7435 np
->vpd
.board_model
[i
+ 2] = (tmp
>> 8) & 0xff;
7436 np
->vpd
.board_model
[i
+ 1] = (tmp
>> 16) & 0xff;
7437 np
->vpd
.board_model
[i
+ 0] = (tmp
>> 24) & 0xff;
7439 np
->vpd
.board_model
[val
] = '\0';
7442 nr64(ESPC_NUM_PORTS_MACS
) & ESPC_NUM_PORTS_MACS_VAL
;
7443 niudbg(PROBE
, "SPROM: NUM_PORTS_MACS[%d]\n",
7449 static int __devinit
niu_get_and_validate_port(struct niu
*np
)
7451 struct niu_parent
*parent
= np
->parent
;
7454 np
->flags
|= NIU_FLAGS_XMAC
;
7456 if (!parent
->num_ports
) {
7457 if (parent
->plat_type
== PLAT_TYPE_NIU
) {
7458 parent
->num_ports
= 2;
7460 parent
->num_ports
= niu_pci_vpd_get_nports(np
);
7461 if (!parent
->num_ports
) {
7462 /* Fall back to SPROM as last resort.
7463 * This will fail on most cards.
7465 parent
->num_ports
= nr64(ESPC_NUM_PORTS_MACS
) &
7466 ESPC_NUM_PORTS_MACS_VAL
;
7468 /* All of the current probing methods fail on
7469 * Maramba on-board parts.
7471 if (!parent
->num_ports
)
7472 parent
->num_ports
= 4;
7477 niudbg(PROBE
, "niu_get_and_validate_port: port[%d] num_ports[%d]\n",
7478 np
->port
, parent
->num_ports
);
7479 if (np
->port
>= parent
->num_ports
)
7485 static int __devinit
phy_record(struct niu_parent
*parent
,
7486 struct phy_probe_info
*p
,
7487 int dev_id_1
, int dev_id_2
, u8 phy_port
,
7490 u32 id
= (dev_id_1
<< 16) | dev_id_2
;
7493 if (dev_id_1
< 0 || dev_id_2
< 0)
7495 if (type
== PHY_TYPE_PMA_PMD
|| type
== PHY_TYPE_PCS
) {
7496 if (((id
& NIU_PHY_ID_MASK
) != NIU_PHY_ID_BCM8704
) &&
7497 ((id
& NIU_PHY_ID_MASK
) != NIU_PHY_ID_MRVL88X2011
) &&
7498 ((id
& NIU_PHY_ID_MASK
) != NIU_PHY_ID_BCM8706
))
7501 if ((id
& NIU_PHY_ID_MASK
) != NIU_PHY_ID_BCM5464R
)
7505 pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n",
7507 (type
== PHY_TYPE_PMA_PMD
?
7509 (type
== PHY_TYPE_PCS
?
7513 if (p
->cur
[type
] >= NIU_MAX_PORTS
) {
7514 printk(KERN_ERR PFX
"Too many PHY ports.\n");
7518 p
->phy_id
[type
][idx
] = id
;
7519 p
->phy_port
[type
][idx
] = phy_port
;
7520 p
->cur
[type
] = idx
+ 1;
7524 static int __devinit
port_has_10g(struct phy_probe_info
*p
, int port
)
7528 for (i
= 0; i
< p
->cur
[PHY_TYPE_PMA_PMD
]; i
++) {
7529 if (p
->phy_port
[PHY_TYPE_PMA_PMD
][i
] == port
)
7532 for (i
= 0; i
< p
->cur
[PHY_TYPE_PCS
]; i
++) {
7533 if (p
->phy_port
[PHY_TYPE_PCS
][i
] == port
)
7540 static int __devinit
count_10g_ports(struct phy_probe_info
*p
, int *lowest
)
7546 for (port
= 8; port
< 32; port
++) {
7547 if (port_has_10g(p
, port
)) {
7557 static int __devinit
count_1g_ports(struct phy_probe_info
*p
, int *lowest
)
7560 if (p
->cur
[PHY_TYPE_MII
])
7561 *lowest
= p
->phy_port
[PHY_TYPE_MII
][0];
7563 return p
->cur
[PHY_TYPE_MII
];
7566 static void __devinit
niu_n2_divide_channels(struct niu_parent
*parent
)
7568 int num_ports
= parent
->num_ports
;
7571 for (i
= 0; i
< num_ports
; i
++) {
7572 parent
->rxchan_per_port
[i
] = (16 / num_ports
);
7573 parent
->txchan_per_port
[i
] = (16 / num_ports
);
7575 pr_info(PFX
"niu%d: Port %u [%u RX chans] "
7578 parent
->rxchan_per_port
[i
],
7579 parent
->txchan_per_port
[i
]);
7583 static void __devinit
niu_divide_channels(struct niu_parent
*parent
,
7584 int num_10g
, int num_1g
)
7586 int num_ports
= parent
->num_ports
;
7587 int rx_chans_per_10g
, rx_chans_per_1g
;
7588 int tx_chans_per_10g
, tx_chans_per_1g
;
7589 int i
, tot_rx
, tot_tx
;
7591 if (!num_10g
|| !num_1g
) {
7592 rx_chans_per_10g
= rx_chans_per_1g
=
7593 (NIU_NUM_RXCHAN
/ num_ports
);
7594 tx_chans_per_10g
= tx_chans_per_1g
=
7595 (NIU_NUM_TXCHAN
/ num_ports
);
7597 rx_chans_per_1g
= NIU_NUM_RXCHAN
/ 8;
7598 rx_chans_per_10g
= (NIU_NUM_RXCHAN
-
7599 (rx_chans_per_1g
* num_1g
)) /
7602 tx_chans_per_1g
= NIU_NUM_TXCHAN
/ 6;
7603 tx_chans_per_10g
= (NIU_NUM_TXCHAN
-
7604 (tx_chans_per_1g
* num_1g
)) /
7608 tot_rx
= tot_tx
= 0;
7609 for (i
= 0; i
< num_ports
; i
++) {
7610 int type
= phy_decode(parent
->port_phy
, i
);
7612 if (type
== PORT_TYPE_10G
) {
7613 parent
->rxchan_per_port
[i
] = rx_chans_per_10g
;
7614 parent
->txchan_per_port
[i
] = tx_chans_per_10g
;
7616 parent
->rxchan_per_port
[i
] = rx_chans_per_1g
;
7617 parent
->txchan_per_port
[i
] = tx_chans_per_1g
;
7619 pr_info(PFX
"niu%d: Port %u [%u RX chans] "
7622 parent
->rxchan_per_port
[i
],
7623 parent
->txchan_per_port
[i
]);
7624 tot_rx
+= parent
->rxchan_per_port
[i
];
7625 tot_tx
+= parent
->txchan_per_port
[i
];
7628 if (tot_rx
> NIU_NUM_RXCHAN
) {
7629 printk(KERN_ERR PFX
"niu%d: Too many RX channels (%d), "
7630 "resetting to one per port.\n",
7631 parent
->index
, tot_rx
);
7632 for (i
= 0; i
< num_ports
; i
++)
7633 parent
->rxchan_per_port
[i
] = 1;
7635 if (tot_tx
> NIU_NUM_TXCHAN
) {
7636 printk(KERN_ERR PFX
"niu%d: Too many TX channels (%d), "
7637 "resetting to one per port.\n",
7638 parent
->index
, tot_tx
);
7639 for (i
= 0; i
< num_ports
; i
++)
7640 parent
->txchan_per_port
[i
] = 1;
7642 if (tot_rx
< NIU_NUM_RXCHAN
|| tot_tx
< NIU_NUM_TXCHAN
) {
7643 printk(KERN_WARNING PFX
"niu%d: Driver bug, wasted channels, "
7645 parent
->index
, tot_rx
, tot_tx
);
7649 static void __devinit
niu_divide_rdc_groups(struct niu_parent
*parent
,
7650 int num_10g
, int num_1g
)
7652 int i
, num_ports
= parent
->num_ports
;
7653 int rdc_group
, rdc_groups_per_port
;
7654 int rdc_channel_base
;
7657 rdc_groups_per_port
= NIU_NUM_RDC_TABLES
/ num_ports
;
7659 rdc_channel_base
= 0;
7661 for (i
= 0; i
< num_ports
; i
++) {
7662 struct niu_rdc_tables
*tp
= &parent
->rdc_group_cfg
[i
];
7663 int grp
, num_channels
= parent
->rxchan_per_port
[i
];
7664 int this_channel_offset
;
7666 tp
->first_table_num
= rdc_group
;
7667 tp
->num_tables
= rdc_groups_per_port
;
7668 this_channel_offset
= 0;
7669 for (grp
= 0; grp
< tp
->num_tables
; grp
++) {
7670 struct rdc_table
*rt
= &tp
->tables
[grp
];
7673 pr_info(PFX
"niu%d: Port %d RDC tbl(%d) [ ",
7674 parent
->index
, i
, tp
->first_table_num
+ grp
);
7675 for (slot
= 0; slot
< NIU_RDC_TABLE_SLOTS
; slot
++) {
7676 rt
->rxdma_channel
[slot
] =
7677 rdc_channel_base
+ this_channel_offset
;
7679 printk("%d ", rt
->rxdma_channel
[slot
]);
7681 if (++this_channel_offset
== num_channels
)
7682 this_channel_offset
= 0;
7687 parent
->rdc_default
[i
] = rdc_channel_base
;
7689 rdc_channel_base
+= num_channels
;
7690 rdc_group
+= rdc_groups_per_port
;
7694 static int __devinit
fill_phy_probe_info(struct niu
*np
,
7695 struct niu_parent
*parent
,
7696 struct phy_probe_info
*info
)
7698 unsigned long flags
;
7701 memset(info
, 0, sizeof(*info
));
7703 /* Port 0 to 7 are reserved for onboard Serdes, probe the rest. */
7704 niu_lock_parent(np
, flags
);
7706 for (port
= 8; port
< 32; port
++) {
7707 int dev_id_1
, dev_id_2
;
7709 dev_id_1
= mdio_read(np
, port
,
7710 NIU_PMA_PMD_DEV_ADDR
, MII_PHYSID1
);
7711 dev_id_2
= mdio_read(np
, port
,
7712 NIU_PMA_PMD_DEV_ADDR
, MII_PHYSID2
);
7713 err
= phy_record(parent
, info
, dev_id_1
, dev_id_2
, port
,
7717 dev_id_1
= mdio_read(np
, port
,
7718 NIU_PCS_DEV_ADDR
, MII_PHYSID1
);
7719 dev_id_2
= mdio_read(np
, port
,
7720 NIU_PCS_DEV_ADDR
, MII_PHYSID2
);
7721 err
= phy_record(parent
, info
, dev_id_1
, dev_id_2
, port
,
7725 dev_id_1
= mii_read(np
, port
, MII_PHYSID1
);
7726 dev_id_2
= mii_read(np
, port
, MII_PHYSID2
);
7727 err
= phy_record(parent
, info
, dev_id_1
, dev_id_2
, port
,
7732 niu_unlock_parent(np
, flags
);
7737 static int __devinit
walk_phys(struct niu
*np
, struct niu_parent
*parent
)
7739 struct phy_probe_info
*info
= &parent
->phy_probe_info
;
7740 int lowest_10g
, lowest_1g
;
7741 int num_10g
, num_1g
;
7745 if (!strcmp(np
->vpd
.model
, NIU_ALONSO_MDL_STR
) ||
7746 !strcmp(np
->vpd
.model
, NIU_KIMI_MDL_STR
)) {
7749 parent
->plat_type
= PLAT_TYPE_ATCA_CP3220
;
7750 parent
->num_ports
= 4;
7751 val
= (phy_encode(PORT_TYPE_1G
, 0) |
7752 phy_encode(PORT_TYPE_1G
, 1) |
7753 phy_encode(PORT_TYPE_1G
, 2) |
7754 phy_encode(PORT_TYPE_1G
, 3));
7755 } else if (!strcmp(np
->vpd
.model
, NIU_FOXXY_MDL_STR
)) {
7758 parent
->num_ports
= 2;
7759 val
= (phy_encode(PORT_TYPE_10G
, 0) |
7760 phy_encode(PORT_TYPE_10G
, 1));
7762 err
= fill_phy_probe_info(np
, parent
, info
);
7766 num_10g
= count_10g_ports(info
, &lowest_10g
);
7767 num_1g
= count_1g_ports(info
, &lowest_1g
);
7769 switch ((num_10g
<< 4) | num_1g
) {
7771 if (lowest_1g
== 10)
7772 parent
->plat_type
= PLAT_TYPE_VF_P0
;
7773 else if (lowest_1g
== 26)
7774 parent
->plat_type
= PLAT_TYPE_VF_P1
;
7776 goto unknown_vg_1g_port
;
7780 val
= (phy_encode(PORT_TYPE_10G
, 0) |
7781 phy_encode(PORT_TYPE_10G
, 1) |
7782 phy_encode(PORT_TYPE_1G
, 2) |
7783 phy_encode(PORT_TYPE_1G
, 3));
7787 val
= (phy_encode(PORT_TYPE_10G
, 0) |
7788 phy_encode(PORT_TYPE_10G
, 1));
7792 val
= phy_encode(PORT_TYPE_10G
, np
->port
);
7796 if (lowest_1g
== 10)
7797 parent
->plat_type
= PLAT_TYPE_VF_P0
;
7798 else if (lowest_1g
== 26)
7799 parent
->plat_type
= PLAT_TYPE_VF_P1
;
7801 goto unknown_vg_1g_port
;
7805 if ((lowest_10g
& 0x7) == 0)
7806 val
= (phy_encode(PORT_TYPE_10G
, 0) |
7807 phy_encode(PORT_TYPE_1G
, 1) |
7808 phy_encode(PORT_TYPE_1G
, 2) |
7809 phy_encode(PORT_TYPE_1G
, 3));
7811 val
= (phy_encode(PORT_TYPE_1G
, 0) |
7812 phy_encode(PORT_TYPE_10G
, 1) |
7813 phy_encode(PORT_TYPE_1G
, 2) |
7814 phy_encode(PORT_TYPE_1G
, 3));
7818 if (lowest_1g
== 10)
7819 parent
->plat_type
= PLAT_TYPE_VF_P0
;
7820 else if (lowest_1g
== 26)
7821 parent
->plat_type
= PLAT_TYPE_VF_P1
;
7823 goto unknown_vg_1g_port
;
7825 val
= (phy_encode(PORT_TYPE_1G
, 0) |
7826 phy_encode(PORT_TYPE_1G
, 1) |
7827 phy_encode(PORT_TYPE_1G
, 2) |
7828 phy_encode(PORT_TYPE_1G
, 3));
7832 printk(KERN_ERR PFX
"Unsupported port config "
7839 parent
->port_phy
= val
;
7841 if (parent
->plat_type
== PLAT_TYPE_NIU
)
7842 niu_n2_divide_channels(parent
);
7844 niu_divide_channels(parent
, num_10g
, num_1g
);
7846 niu_divide_rdc_groups(parent
, num_10g
, num_1g
);
7851 printk(KERN_ERR PFX
"Cannot identify platform type, 1gport=%d\n",
7856 static int __devinit
niu_probe_ports(struct niu
*np
)
7858 struct niu_parent
*parent
= np
->parent
;
7861 niudbg(PROBE
, "niu_probe_ports(): port_phy[%08x]\n",
7864 if (parent
->port_phy
== PORT_PHY_UNKNOWN
) {
7865 err
= walk_phys(np
, parent
);
7869 niu_set_ldg_timer_res(np
, 2);
7870 for (i
= 0; i
<= LDN_MAX
; i
++)
7871 niu_ldn_irq_enable(np
, i
, 0);
7874 if (parent
->port_phy
== PORT_PHY_INVALID
)
7880 static int __devinit
niu_classifier_swstate_init(struct niu
*np
)
7882 struct niu_classifier
*cp
= &np
->clas
;
7884 niudbg(PROBE
, "niu_classifier_swstate_init: num_tcam(%d)\n",
7885 np
->parent
->tcam_num_entries
);
7887 cp
->tcam_index
= (u16
) np
->port
;
7888 cp
->h1_init
= 0xffffffff;
7889 cp
->h2_init
= 0xffff;
7891 return fflp_early_init(np
);
7894 static void __devinit
niu_link_config_init(struct niu
*np
)
7896 struct niu_link_config
*lp
= &np
->link_config
;
7898 lp
->advertising
= (ADVERTISED_10baseT_Half
|
7899 ADVERTISED_10baseT_Full
|
7900 ADVERTISED_100baseT_Half
|
7901 ADVERTISED_100baseT_Full
|
7902 ADVERTISED_1000baseT_Half
|
7903 ADVERTISED_1000baseT_Full
|
7904 ADVERTISED_10000baseT_Full
|
7905 ADVERTISED_Autoneg
);
7906 lp
->speed
= lp
->active_speed
= SPEED_INVALID
;
7907 lp
->duplex
= lp
->active_duplex
= DUPLEX_INVALID
;
7909 lp
->loopback_mode
= LOOPBACK_MAC
;
7910 lp
->active_speed
= SPEED_10000
;
7911 lp
->active_duplex
= DUPLEX_FULL
;
7913 lp
->loopback_mode
= LOOPBACK_DISABLED
;
7917 static int __devinit
niu_init_mac_ipp_pcs_base(struct niu
*np
)
7921 np
->mac_regs
= np
->regs
+ XMAC_PORT0_OFF
;
7922 np
->ipp_off
= 0x00000;
7923 np
->pcs_off
= 0x04000;
7924 np
->xpcs_off
= 0x02000;
7928 np
->mac_regs
= np
->regs
+ XMAC_PORT1_OFF
;
7929 np
->ipp_off
= 0x08000;
7930 np
->pcs_off
= 0x0a000;
7931 np
->xpcs_off
= 0x08000;
7935 np
->mac_regs
= np
->regs
+ BMAC_PORT2_OFF
;
7936 np
->ipp_off
= 0x04000;
7937 np
->pcs_off
= 0x0e000;
7938 np
->xpcs_off
= ~0UL;
7942 np
->mac_regs
= np
->regs
+ BMAC_PORT3_OFF
;
7943 np
->ipp_off
= 0x0c000;
7944 np
->pcs_off
= 0x12000;
7945 np
->xpcs_off
= ~0UL;
7949 dev_err(np
->device
, PFX
"Port %u is invalid, cannot "
7950 "compute MAC block offset.\n", np
->port
);
7957 static void __devinit
niu_try_msix(struct niu
*np
, u8
*ldg_num_map
)
7959 struct msix_entry msi_vec
[NIU_NUM_LDG
];
7960 struct niu_parent
*parent
= np
->parent
;
7961 struct pci_dev
*pdev
= np
->pdev
;
7962 int i
, num_irqs
, err
;
7965 first_ldg
= (NIU_NUM_LDG
/ parent
->num_ports
) * np
->port
;
7966 for (i
= 0; i
< (NIU_NUM_LDG
/ parent
->num_ports
); i
++)
7967 ldg_num_map
[i
] = first_ldg
+ i
;
7969 num_irqs
= (parent
->rxchan_per_port
[np
->port
] +
7970 parent
->txchan_per_port
[np
->port
] +
7971 (np
->port
== 0 ? 3 : 1));
7972 BUG_ON(num_irqs
> (NIU_NUM_LDG
/ parent
->num_ports
));
7975 for (i
= 0; i
< num_irqs
; i
++) {
7976 msi_vec
[i
].vector
= 0;
7977 msi_vec
[i
].entry
= i
;
7980 err
= pci_enable_msix(pdev
, msi_vec
, num_irqs
);
7982 np
->flags
&= ~NIU_FLAGS_MSIX
;
7990 np
->flags
|= NIU_FLAGS_MSIX
;
7991 for (i
= 0; i
< num_irqs
; i
++)
7992 np
->ldg
[i
].irq
= msi_vec
[i
].vector
;
7993 np
->num_ldg
= num_irqs
;
7996 static int __devinit
niu_n2_irq_init(struct niu
*np
, u8
*ldg_num_map
)
7998 #ifdef CONFIG_SPARC64
7999 struct of_device
*op
= np
->op
;
8000 const u32
*int_prop
;
8003 int_prop
= of_get_property(op
->node
, "interrupts", NULL
);
8007 for (i
= 0; i
< op
->num_irqs
; i
++) {
8008 ldg_num_map
[i
] = int_prop
[i
];
8009 np
->ldg
[i
].irq
= op
->irqs
[i
];
8012 np
->num_ldg
= op
->num_irqs
;
8020 static int __devinit
niu_ldg_init(struct niu
*np
)
8022 struct niu_parent
*parent
= np
->parent
;
8023 u8 ldg_num_map
[NIU_NUM_LDG
];
8024 int first_chan
, num_chan
;
8025 int i
, err
, ldg_rotor
;
8029 np
->ldg
[0].irq
= np
->dev
->irq
;
8030 if (parent
->plat_type
== PLAT_TYPE_NIU
) {
8031 err
= niu_n2_irq_init(np
, ldg_num_map
);
8035 niu_try_msix(np
, ldg_num_map
);
8038 for (i
= 0; i
< np
->num_ldg
; i
++) {
8039 struct niu_ldg
*lp
= &np
->ldg
[i
];
8041 netif_napi_add(np
->dev
, &lp
->napi
, niu_poll
, 64);
8044 lp
->ldg_num
= ldg_num_map
[i
];
8045 lp
->timer
= 2; /* XXX */
8047 /* On N2 NIU the firmware has setup the SID mappings so they go
8048 * to the correct values that will route the LDG to the proper
8049 * interrupt in the NCU interrupt table.
8051 if (np
->parent
->plat_type
!= PLAT_TYPE_NIU
) {
8052 err
= niu_set_ldg_sid(np
, lp
->ldg_num
, port
, i
);
8058 /* We adopt the LDG assignment ordering used by the N2 NIU
8059 * 'interrupt' properties because that simplifies a lot of
8060 * things. This ordering is:
8063 * MIF (if port zero)
8064 * SYSERR (if port zero)
8071 err
= niu_ldg_assign_ldn(np
, parent
, ldg_num_map
[ldg_rotor
],
8077 if (ldg_rotor
== np
->num_ldg
)
8081 err
= niu_ldg_assign_ldn(np
, parent
,
8082 ldg_num_map
[ldg_rotor
],
8088 if (ldg_rotor
== np
->num_ldg
)
8091 err
= niu_ldg_assign_ldn(np
, parent
,
8092 ldg_num_map
[ldg_rotor
],
8098 if (ldg_rotor
== np
->num_ldg
)
8104 for (i
= 0; i
< port
; i
++)
8105 first_chan
+= parent
->rxchan_per_port
[port
];
8106 num_chan
= parent
->rxchan_per_port
[port
];
8108 for (i
= first_chan
; i
< (first_chan
+ num_chan
); i
++) {
8109 err
= niu_ldg_assign_ldn(np
, parent
,
8110 ldg_num_map
[ldg_rotor
],
8115 if (ldg_rotor
== np
->num_ldg
)
8120 for (i
= 0; i
< port
; i
++)
8121 first_chan
+= parent
->txchan_per_port
[port
];
8122 num_chan
= parent
->txchan_per_port
[port
];
8123 for (i
= first_chan
; i
< (first_chan
+ num_chan
); i
++) {
8124 err
= niu_ldg_assign_ldn(np
, parent
,
8125 ldg_num_map
[ldg_rotor
],
8130 if (ldg_rotor
== np
->num_ldg
)
8137 static void __devexit
niu_ldg_free(struct niu
*np
)
8139 if (np
->flags
& NIU_FLAGS_MSIX
)
8140 pci_disable_msix(np
->pdev
);
8143 static int __devinit
niu_get_of_props(struct niu
*np
)
8145 #ifdef CONFIG_SPARC64
8146 struct net_device
*dev
= np
->dev
;
8147 struct device_node
*dp
;
8148 const char *phy_type
;
8153 if (np
->parent
->plat_type
== PLAT_TYPE_NIU
)
8156 dp
= pci_device_to_OF_node(np
->pdev
);
8158 phy_type
= of_get_property(dp
, "phy-type", &prop_len
);
8160 dev_err(np
->device
, PFX
"%s: OF node lacks "
8161 "phy-type property\n",
8166 if (!strcmp(phy_type
, "none"))
8169 strcpy(np
->vpd
.phy_type
, phy_type
);
8171 if (niu_phy_type_prop_decode(np
, np
->vpd
.phy_type
)) {
8172 dev_err(np
->device
, PFX
"%s: Illegal phy string [%s].\n",
8173 dp
->full_name
, np
->vpd
.phy_type
);
8177 mac_addr
= of_get_property(dp
, "local-mac-address", &prop_len
);
8179 dev_err(np
->device
, PFX
"%s: OF node lacks "
8180 "local-mac-address property\n",
8184 if (prop_len
!= dev
->addr_len
) {
8185 dev_err(np
->device
, PFX
"%s: OF MAC address prop len (%d) "
8187 dp
->full_name
, prop_len
);
8189 memcpy(dev
->perm_addr
, mac_addr
, dev
->addr_len
);
8190 if (!is_valid_ether_addr(&dev
->perm_addr
[0])) {
8193 dev_err(np
->device
, PFX
"%s: OF MAC address is invalid\n",
8195 dev_err(np
->device
, PFX
"%s: [ \n",
8197 for (i
= 0; i
< 6; i
++)
8198 printk("%02x ", dev
->perm_addr
[i
]);
8203 memcpy(dev
->dev_addr
, dev
->perm_addr
, dev
->addr_len
);
8205 model
= of_get_property(dp
, "model", &prop_len
);
8208 strcpy(np
->vpd
.model
, model
);
8216 static int __devinit
niu_get_invariants(struct niu
*np
)
8218 int err
, have_props
;
8221 err
= niu_get_of_props(np
);
8227 err
= niu_init_mac_ipp_pcs_base(np
);
8232 err
= niu_get_and_validate_port(np
);
8237 if (np
->parent
->plat_type
== PLAT_TYPE_NIU
)
8240 nw64(ESPC_PIO_EN
, ESPC_PIO_EN_ENABLE
);
8241 offset
= niu_pci_vpd_offset(np
);
8242 niudbg(PROBE
, "niu_get_invariants: VPD offset [%08x]\n",
8245 niu_pci_vpd_fetch(np
, offset
);
8246 nw64(ESPC_PIO_EN
, 0);
8248 if (np
->flags
& NIU_FLAGS_VPD_VALID
) {
8249 niu_pci_vpd_validate(np
);
8250 err
= niu_get_and_validate_port(np
);
8255 if (!(np
->flags
& NIU_FLAGS_VPD_VALID
)) {
8256 err
= niu_get_and_validate_port(np
);
8259 err
= niu_pci_probe_sprom(np
);
8265 err
= niu_probe_ports(np
);
8271 niu_classifier_swstate_init(np
);
8272 niu_link_config_init(np
);
8274 err
= niu_determine_phy_disposition(np
);
8276 err
= niu_init_link(np
);
8281 static LIST_HEAD(niu_parent_list
);
8282 static DEFINE_MUTEX(niu_parent_lock
);
8283 static int niu_parent_index
;
8285 static ssize_t
show_port_phy(struct device
*dev
,
8286 struct device_attribute
*attr
, char *buf
)
8288 struct platform_device
*plat_dev
= to_platform_device(dev
);
8289 struct niu_parent
*p
= plat_dev
->dev
.platform_data
;
8290 u32 port_phy
= p
->port_phy
;
8291 char *orig_buf
= buf
;
8294 if (port_phy
== PORT_PHY_UNKNOWN
||
8295 port_phy
== PORT_PHY_INVALID
)
8298 for (i
= 0; i
< p
->num_ports
; i
++) {
8299 const char *type_str
;
8302 type
= phy_decode(port_phy
, i
);
8303 if (type
== PORT_TYPE_10G
)
8308 (i
== 0) ? "%s" : " %s",
8311 buf
+= sprintf(buf
, "\n");
8312 return buf
- orig_buf
;
8315 static ssize_t
show_plat_type(struct device
*dev
,
8316 struct device_attribute
*attr
, char *buf
)
8318 struct platform_device
*plat_dev
= to_platform_device(dev
);
8319 struct niu_parent
*p
= plat_dev
->dev
.platform_data
;
8320 const char *type_str
;
8322 switch (p
->plat_type
) {
8323 case PLAT_TYPE_ATLAS
:
8329 case PLAT_TYPE_VF_P0
:
8332 case PLAT_TYPE_VF_P1
:
8336 type_str
= "unknown";
8340 return sprintf(buf
, "%s\n", type_str
);
8343 static ssize_t
__show_chan_per_port(struct device
*dev
,
8344 struct device_attribute
*attr
, char *buf
,
8347 struct platform_device
*plat_dev
= to_platform_device(dev
);
8348 struct niu_parent
*p
= plat_dev
->dev
.platform_data
;
8349 char *orig_buf
= buf
;
8353 arr
= (rx
? p
->rxchan_per_port
: p
->txchan_per_port
);
8355 for (i
= 0; i
< p
->num_ports
; i
++) {
8357 (i
== 0) ? "%d" : " %d",
8360 buf
+= sprintf(buf
, "\n");
8362 return buf
- orig_buf
;
8365 static ssize_t
show_rxchan_per_port(struct device
*dev
,
8366 struct device_attribute
*attr
, char *buf
)
8368 return __show_chan_per_port(dev
, attr
, buf
, 1);
8371 static ssize_t
show_txchan_per_port(struct device
*dev
,
8372 struct device_attribute
*attr
, char *buf
)
8374 return __show_chan_per_port(dev
, attr
, buf
, 1);
8377 static ssize_t
show_num_ports(struct device
*dev
,
8378 struct device_attribute
*attr
, char *buf
)
8380 struct platform_device
*plat_dev
= to_platform_device(dev
);
8381 struct niu_parent
*p
= plat_dev
->dev
.platform_data
;
8383 return sprintf(buf
, "%d\n", p
->num_ports
);
8386 static struct device_attribute niu_parent_attributes
[] = {
8387 __ATTR(port_phy
, S_IRUGO
, show_port_phy
, NULL
),
8388 __ATTR(plat_type
, S_IRUGO
, show_plat_type
, NULL
),
8389 __ATTR(rxchan_per_port
, S_IRUGO
, show_rxchan_per_port
, NULL
),
8390 __ATTR(txchan_per_port
, S_IRUGO
, show_txchan_per_port
, NULL
),
8391 __ATTR(num_ports
, S_IRUGO
, show_num_ports
, NULL
),
8395 static struct niu_parent
* __devinit
niu_new_parent(struct niu
*np
,
8396 union niu_parent_id
*id
,
8399 struct platform_device
*plat_dev
;
8400 struct niu_parent
*p
;
8403 niudbg(PROBE
, "niu_new_parent: Creating new parent.\n");
8405 plat_dev
= platform_device_register_simple("niu", niu_parent_index
,
8410 for (i
= 0; attr_name(niu_parent_attributes
[i
]); i
++) {
8411 int err
= device_create_file(&plat_dev
->dev
,
8412 &niu_parent_attributes
[i
]);
8414 goto fail_unregister
;
8417 p
= kzalloc(sizeof(*p
), GFP_KERNEL
);
8419 goto fail_unregister
;
8421 p
->index
= niu_parent_index
++;
8423 plat_dev
->dev
.platform_data
= p
;
8424 p
->plat_dev
= plat_dev
;
8426 memcpy(&p
->id
, id
, sizeof(*id
));
8427 p
->plat_type
= ptype
;
8428 INIT_LIST_HEAD(&p
->list
);
8429 atomic_set(&p
->refcnt
, 0);
8430 list_add(&p
->list
, &niu_parent_list
);
8431 spin_lock_init(&p
->lock
);
8433 p
->rxdma_clock_divider
= 7500;
8435 p
->tcam_num_entries
= NIU_PCI_TCAM_ENTRIES
;
8436 if (p
->plat_type
== PLAT_TYPE_NIU
)
8437 p
->tcam_num_entries
= NIU_NONPCI_TCAM_ENTRIES
;
8439 for (i
= CLASS_CODE_USER_PROG1
; i
<= CLASS_CODE_SCTP_IPV6
; i
++) {
8440 int index
= i
- CLASS_CODE_USER_PROG1
;
8442 p
->tcam_key
[index
] = TCAM_KEY_TSEL
;
8443 p
->flow_key
[index
] = (FLOW_KEY_IPSA
|
8446 (FLOW_KEY_L4_BYTE12
<<
8447 FLOW_KEY_L4_0_SHIFT
) |
8448 (FLOW_KEY_L4_BYTE12
<<
8449 FLOW_KEY_L4_1_SHIFT
));
8452 for (i
= 0; i
< LDN_MAX
+ 1; i
++)
8453 p
->ldg_map
[i
] = LDG_INVALID
;
8458 platform_device_unregister(plat_dev
);
8462 static struct niu_parent
* __devinit
niu_get_parent(struct niu
*np
,
8463 union niu_parent_id
*id
,
8466 struct niu_parent
*p
, *tmp
;
8467 int port
= np
->port
;
8469 niudbg(PROBE
, "niu_get_parent: platform_type[%u] port[%u]\n",
8472 mutex_lock(&niu_parent_lock
);
8474 list_for_each_entry(tmp
, &niu_parent_list
, list
) {
8475 if (!memcmp(id
, &tmp
->id
, sizeof(*id
))) {
8481 p
= niu_new_parent(np
, id
, ptype
);
8487 sprintf(port_name
, "port%d", port
);
8488 err
= sysfs_create_link(&p
->plat_dev
->dev
.kobj
,
8492 p
->ports
[port
] = np
;
8493 atomic_inc(&p
->refcnt
);
8496 mutex_unlock(&niu_parent_lock
);
8501 static void niu_put_parent(struct niu
*np
)
8503 struct niu_parent
*p
= np
->parent
;
8507 BUG_ON(!p
|| p
->ports
[port
] != np
);
8509 niudbg(PROBE
, "niu_put_parent: port[%u]\n", port
);
8511 sprintf(port_name
, "port%d", port
);
8513 mutex_lock(&niu_parent_lock
);
8515 sysfs_remove_link(&p
->plat_dev
->dev
.kobj
, port_name
);
8517 p
->ports
[port
] = NULL
;
8520 if (atomic_dec_and_test(&p
->refcnt
)) {
8522 platform_device_unregister(p
->plat_dev
);
8525 mutex_unlock(&niu_parent_lock
);
8528 static void *niu_pci_alloc_coherent(struct device
*dev
, size_t size
,
8529 u64
*handle
, gfp_t flag
)
8534 ret
= dma_alloc_coherent(dev
, size
, &dh
, flag
);
8540 static void niu_pci_free_coherent(struct device
*dev
, size_t size
,
8541 void *cpu_addr
, u64 handle
)
8543 dma_free_coherent(dev
, size
, cpu_addr
, handle
);
8546 static u64
niu_pci_map_page(struct device
*dev
, struct page
*page
,
8547 unsigned long offset
, size_t size
,
8548 enum dma_data_direction direction
)
8550 return dma_map_page(dev
, page
, offset
, size
, direction
);
8553 static void niu_pci_unmap_page(struct device
*dev
, u64 dma_address
,
8554 size_t size
, enum dma_data_direction direction
)
8556 return dma_unmap_page(dev
, dma_address
, size
, direction
);
8559 static u64
niu_pci_map_single(struct device
*dev
, void *cpu_addr
,
8561 enum dma_data_direction direction
)
8563 return dma_map_single(dev
, cpu_addr
, size
, direction
);
8566 static void niu_pci_unmap_single(struct device
*dev
, u64 dma_address
,
8568 enum dma_data_direction direction
)
8570 dma_unmap_single(dev
, dma_address
, size
, direction
);
8573 static const struct niu_ops niu_pci_ops
= {
8574 .alloc_coherent
= niu_pci_alloc_coherent
,
8575 .free_coherent
= niu_pci_free_coherent
,
8576 .map_page
= niu_pci_map_page
,
8577 .unmap_page
= niu_pci_unmap_page
,
8578 .map_single
= niu_pci_map_single
,
8579 .unmap_single
= niu_pci_unmap_single
,
8582 static void __devinit
niu_driver_version(void)
8584 static int niu_version_printed
;
8586 if (niu_version_printed
++ == 0)
8587 pr_info("%s", version
);
8590 static struct net_device
* __devinit
niu_alloc_and_init(
8591 struct device
*gen_dev
, struct pci_dev
*pdev
,
8592 struct of_device
*op
, const struct niu_ops
*ops
,
8595 struct net_device
*dev
;
8598 dev
= alloc_etherdev_mq(sizeof(struct niu
), NIU_NUM_TXCHAN
);
8600 dev_err(gen_dev
, PFX
"Etherdev alloc failed, aborting.\n");
8604 SET_NETDEV_DEV(dev
, gen_dev
);
8606 np
= netdev_priv(dev
);
8610 np
->device
= gen_dev
;
8613 np
->msg_enable
= niu_debug
;
8615 spin_lock_init(&np
->lock
);
8616 INIT_WORK(&np
->reset_task
, niu_reset_task
);
8623 static void __devinit
niu_assign_netdev_ops(struct net_device
*dev
)
8625 dev
->open
= niu_open
;
8626 dev
->stop
= niu_close
;
8627 dev
->get_stats
= niu_get_stats
;
8628 dev
->set_multicast_list
= niu_set_rx_mode
;
8629 dev
->set_mac_address
= niu_set_mac_addr
;
8630 dev
->do_ioctl
= niu_ioctl
;
8631 dev
->tx_timeout
= niu_tx_timeout
;
8632 dev
->hard_start_xmit
= niu_start_xmit
;
8633 dev
->ethtool_ops
= &niu_ethtool_ops
;
8634 dev
->watchdog_timeo
= NIU_TX_TIMEOUT
;
8635 dev
->change_mtu
= niu_change_mtu
;
8638 static void __devinit
niu_device_announce(struct niu
*np
)
8640 struct net_device
*dev
= np
->dev
;
8641 DECLARE_MAC_BUF(mac
);
8643 pr_info("%s: NIU Ethernet %s\n",
8644 dev
->name
, print_mac(mac
, dev
->dev_addr
));
8646 if (np
->parent
->plat_type
== PLAT_TYPE_ATCA_CP3220
) {
8647 pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
8649 (np
->flags
& NIU_FLAGS_XMAC
? "XMAC" : "BMAC"),
8650 (np
->flags
& NIU_FLAGS_10G
? "10G" : "1G"),
8651 (np
->flags
& NIU_FLAGS_FIBER
? "RGMII FIBER" : "SERDES"),
8652 (np
->mac_xcvr
== MAC_XCVR_MII
? "MII" :
8653 (np
->mac_xcvr
== MAC_XCVR_PCS
? "PCS" : "XPCS")),
8656 pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
8658 (np
->flags
& NIU_FLAGS_XMAC
? "XMAC" : "BMAC"),
8659 (np
->flags
& NIU_FLAGS_10G
? "10G" : "1G"),
8660 (np
->flags
& NIU_FLAGS_FIBER
? "FIBER" : "COPPER"),
8661 (np
->mac_xcvr
== MAC_XCVR_MII
? "MII" :
8662 (np
->mac_xcvr
== MAC_XCVR_PCS
? "PCS" : "XPCS")),
8667 static int __devinit
niu_pci_init_one(struct pci_dev
*pdev
,
8668 const struct pci_device_id
*ent
)
8670 unsigned long niureg_base
, niureg_len
;
8671 union niu_parent_id parent_id
;
8672 struct net_device
*dev
;
8678 niu_driver_version();
8680 err
= pci_enable_device(pdev
);
8682 dev_err(&pdev
->dev
, PFX
"Cannot enable PCI device, "
8687 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
) ||
8688 !(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
8689 dev_err(&pdev
->dev
, PFX
"Cannot find proper PCI device "
8690 "base addresses, aborting.\n");
8692 goto err_out_disable_pdev
;
8695 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
8697 dev_err(&pdev
->dev
, PFX
"Cannot obtain PCI resources, "
8699 goto err_out_disable_pdev
;
8702 pos
= pci_find_capability(pdev
, PCI_CAP_ID_EXP
);
8704 dev_err(&pdev
->dev
, PFX
"Cannot find PCI Express capability, "
8706 goto err_out_free_res
;
8709 dev
= niu_alloc_and_init(&pdev
->dev
, pdev
, NULL
,
8710 &niu_pci_ops
, PCI_FUNC(pdev
->devfn
));
8713 goto err_out_free_res
;
8715 np
= netdev_priv(dev
);
8717 memset(&parent_id
, 0, sizeof(parent_id
));
8718 parent_id
.pci
.domain
= pci_domain_nr(pdev
->bus
);
8719 parent_id
.pci
.bus
= pdev
->bus
->number
;
8720 parent_id
.pci
.device
= PCI_SLOT(pdev
->devfn
);
8722 np
->parent
= niu_get_parent(np
, &parent_id
,
8726 goto err_out_free_dev
;
8729 pci_read_config_word(pdev
, pos
+ PCI_EXP_DEVCTL
, &val16
);
8730 val16
&= ~PCI_EXP_DEVCTL_NOSNOOP_EN
;
8731 val16
|= (PCI_EXP_DEVCTL_CERE
|
8732 PCI_EXP_DEVCTL_NFERE
|
8733 PCI_EXP_DEVCTL_FERE
|
8734 PCI_EXP_DEVCTL_URRE
|
8735 PCI_EXP_DEVCTL_RELAX_EN
);
8736 pci_write_config_word(pdev
, pos
+ PCI_EXP_DEVCTL
, val16
);
8738 dma_mask
= DMA_44BIT_MASK
;
8739 err
= pci_set_dma_mask(pdev
, dma_mask
);
8741 dev
->features
|= NETIF_F_HIGHDMA
;
8742 err
= pci_set_consistent_dma_mask(pdev
, dma_mask
);
8744 dev_err(&pdev
->dev
, PFX
"Unable to obtain 44 bit "
8745 "DMA for consistent allocations, "
8747 goto err_out_release_parent
;
8750 if (err
|| dma_mask
== DMA_32BIT_MASK
) {
8751 err
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
8753 dev_err(&pdev
->dev
, PFX
"No usable DMA configuration, "
8755 goto err_out_release_parent
;
8759 dev
->features
|= (NETIF_F_SG
| NETIF_F_HW_CSUM
);
8761 niureg_base
= pci_resource_start(pdev
, 0);
8762 niureg_len
= pci_resource_len(pdev
, 0);
8764 np
->regs
= ioremap_nocache(niureg_base
, niureg_len
);
8766 dev_err(&pdev
->dev
, PFX
"Cannot map device registers, "
8769 goto err_out_release_parent
;
8772 pci_set_master(pdev
);
8773 pci_save_state(pdev
);
8775 dev
->irq
= pdev
->irq
;
8777 niu_assign_netdev_ops(dev
);
8779 err
= niu_get_invariants(np
);
8782 dev_err(&pdev
->dev
, PFX
"Problem fetching invariants "
8783 "of chip, aborting.\n");
8784 goto err_out_iounmap
;
8787 err
= register_netdev(dev
);
8789 dev_err(&pdev
->dev
, PFX
"Cannot register net device, "
8791 goto err_out_iounmap
;
8794 pci_set_drvdata(pdev
, dev
);
8796 niu_device_announce(np
);
8806 err_out_release_parent
:
8813 pci_release_regions(pdev
);
8815 err_out_disable_pdev
:
8816 pci_disable_device(pdev
);
8817 pci_set_drvdata(pdev
, NULL
);
8822 static void __devexit
niu_pci_remove_one(struct pci_dev
*pdev
)
8824 struct net_device
*dev
= pci_get_drvdata(pdev
);
8827 struct niu
*np
= netdev_priv(dev
);
8829 unregister_netdev(dev
);
8840 pci_release_regions(pdev
);
8841 pci_disable_device(pdev
);
8842 pci_set_drvdata(pdev
, NULL
);
8846 static int niu_suspend(struct pci_dev
*pdev
, pm_message_t state
)
8848 struct net_device
*dev
= pci_get_drvdata(pdev
);
8849 struct niu
*np
= netdev_priv(dev
);
8850 unsigned long flags
;
8852 if (!netif_running(dev
))
8855 flush_scheduled_work();
8858 del_timer_sync(&np
->timer
);
8860 spin_lock_irqsave(&np
->lock
, flags
);
8861 niu_enable_interrupts(np
, 0);
8862 spin_unlock_irqrestore(&np
->lock
, flags
);
8864 netif_device_detach(dev
);
8866 spin_lock_irqsave(&np
->lock
, flags
);
8868 spin_unlock_irqrestore(&np
->lock
, flags
);
8870 pci_save_state(pdev
);
8875 static int niu_resume(struct pci_dev
*pdev
)
8877 struct net_device
*dev
= pci_get_drvdata(pdev
);
8878 struct niu
*np
= netdev_priv(dev
);
8879 unsigned long flags
;
8882 if (!netif_running(dev
))
8885 pci_restore_state(pdev
);
8887 netif_device_attach(dev
);
8889 spin_lock_irqsave(&np
->lock
, flags
);
8891 err
= niu_init_hw(np
);
8893 np
->timer
.expires
= jiffies
+ HZ
;
8894 add_timer(&np
->timer
);
8895 niu_netif_start(np
);
8898 spin_unlock_irqrestore(&np
->lock
, flags
);
8903 static struct pci_driver niu_pci_driver
= {
8904 .name
= DRV_MODULE_NAME
,
8905 .id_table
= niu_pci_tbl
,
8906 .probe
= niu_pci_init_one
,
8907 .remove
= __devexit_p(niu_pci_remove_one
),
8908 .suspend
= niu_suspend
,
8909 .resume
= niu_resume
,
8912 #ifdef CONFIG_SPARC64
8913 static void *niu_phys_alloc_coherent(struct device
*dev
, size_t size
,
8914 u64
*dma_addr
, gfp_t flag
)
8916 unsigned long order
= get_order(size
);
8917 unsigned long page
= __get_free_pages(flag
, order
);
8921 memset((char *)page
, 0, PAGE_SIZE
<< order
);
8922 *dma_addr
= __pa(page
);
8924 return (void *) page
;
8927 static void niu_phys_free_coherent(struct device
*dev
, size_t size
,
8928 void *cpu_addr
, u64 handle
)
8930 unsigned long order
= get_order(size
);
8932 free_pages((unsigned long) cpu_addr
, order
);
8935 static u64
niu_phys_map_page(struct device
*dev
, struct page
*page
,
8936 unsigned long offset
, size_t size
,
8937 enum dma_data_direction direction
)
8939 return page_to_phys(page
) + offset
;
8942 static void niu_phys_unmap_page(struct device
*dev
, u64 dma_address
,
8943 size_t size
, enum dma_data_direction direction
)
8945 /* Nothing to do. */
8948 static u64
niu_phys_map_single(struct device
*dev
, void *cpu_addr
,
8950 enum dma_data_direction direction
)
8952 return __pa(cpu_addr
);
8955 static void niu_phys_unmap_single(struct device
*dev
, u64 dma_address
,
8957 enum dma_data_direction direction
)
8959 /* Nothing to do. */
8962 static const struct niu_ops niu_phys_ops
= {
8963 .alloc_coherent
= niu_phys_alloc_coherent
,
8964 .free_coherent
= niu_phys_free_coherent
,
8965 .map_page
= niu_phys_map_page
,
8966 .unmap_page
= niu_phys_unmap_page
,
8967 .map_single
= niu_phys_map_single
,
8968 .unmap_single
= niu_phys_unmap_single
,
8971 static unsigned long res_size(struct resource
*r
)
8973 return r
->end
- r
->start
+ 1UL;
8976 static int __devinit
niu_of_probe(struct of_device
*op
,
8977 const struct of_device_id
*match
)
8979 union niu_parent_id parent_id
;
8980 struct net_device
*dev
;
8985 niu_driver_version();
8987 reg
= of_get_property(op
->node
, "reg", NULL
);
8989 dev_err(&op
->dev
, PFX
"%s: No 'reg' property, aborting.\n",
8990 op
->node
->full_name
);
8994 dev
= niu_alloc_and_init(&op
->dev
, NULL
, op
,
8995 &niu_phys_ops
, reg
[0] & 0x1);
9000 np
= netdev_priv(dev
);
9002 memset(&parent_id
, 0, sizeof(parent_id
));
9003 parent_id
.of
= of_get_parent(op
->node
);
9005 np
->parent
= niu_get_parent(np
, &parent_id
,
9009 goto err_out_free_dev
;
9012 dev
->features
|= (NETIF_F_SG
| NETIF_F_HW_CSUM
);
9014 np
->regs
= of_ioremap(&op
->resource
[1], 0,
9015 res_size(&op
->resource
[1]),
9018 dev_err(&op
->dev
, PFX
"Cannot map device registers, "
9021 goto err_out_release_parent
;
9024 np
->vir_regs_1
= of_ioremap(&op
->resource
[2], 0,
9025 res_size(&op
->resource
[2]),
9027 if (!np
->vir_regs_1
) {
9028 dev_err(&op
->dev
, PFX
"Cannot map device vir registers 1, "
9031 goto err_out_iounmap
;
9034 np
->vir_regs_2
= of_ioremap(&op
->resource
[3], 0,
9035 res_size(&op
->resource
[3]),
9037 if (!np
->vir_regs_2
) {
9038 dev_err(&op
->dev
, PFX
"Cannot map device vir registers 2, "
9041 goto err_out_iounmap
;
9044 niu_assign_netdev_ops(dev
);
9046 err
= niu_get_invariants(np
);
9049 dev_err(&op
->dev
, PFX
"Problem fetching invariants "
9050 "of chip, aborting.\n");
9051 goto err_out_iounmap
;
9054 err
= register_netdev(dev
);
9056 dev_err(&op
->dev
, PFX
"Cannot register net device, "
9058 goto err_out_iounmap
;
9061 dev_set_drvdata(&op
->dev
, dev
);
9063 niu_device_announce(np
);
9068 if (np
->vir_regs_1
) {
9069 of_iounmap(&op
->resource
[2], np
->vir_regs_1
,
9070 res_size(&op
->resource
[2]));
9071 np
->vir_regs_1
= NULL
;
9074 if (np
->vir_regs_2
) {
9075 of_iounmap(&op
->resource
[3], np
->vir_regs_2
,
9076 res_size(&op
->resource
[3]));
9077 np
->vir_regs_2
= NULL
;
9081 of_iounmap(&op
->resource
[1], np
->regs
,
9082 res_size(&op
->resource
[1]));
9086 err_out_release_parent
:
9096 static int __devexit
niu_of_remove(struct of_device
*op
)
9098 struct net_device
*dev
= dev_get_drvdata(&op
->dev
);
9101 struct niu
*np
= netdev_priv(dev
);
9103 unregister_netdev(dev
);
9105 if (np
->vir_regs_1
) {
9106 of_iounmap(&op
->resource
[2], np
->vir_regs_1
,
9107 res_size(&op
->resource
[2]));
9108 np
->vir_regs_1
= NULL
;
9111 if (np
->vir_regs_2
) {
9112 of_iounmap(&op
->resource
[3], np
->vir_regs_2
,
9113 res_size(&op
->resource
[3]));
9114 np
->vir_regs_2
= NULL
;
9118 of_iounmap(&op
->resource
[1], np
->regs
,
9119 res_size(&op
->resource
[1]));
9128 dev_set_drvdata(&op
->dev
, NULL
);
9133 static struct of_device_id niu_match
[] = {
9136 .compatible
= "SUNW,niusl",
9140 MODULE_DEVICE_TABLE(of
, niu_match
);
9142 static struct of_platform_driver niu_of_driver
= {
9144 .match_table
= niu_match
,
9145 .probe
= niu_of_probe
,
9146 .remove
= __devexit_p(niu_of_remove
),
9149 #endif /* CONFIG_SPARC64 */
9151 static int __init
niu_init(void)
9155 BUILD_BUG_ON(PAGE_SIZE
< 4 * 1024);
9157 niu_debug
= netif_msg_init(debug
, NIU_MSG_DEFAULT
);
9159 #ifdef CONFIG_SPARC64
9160 err
= of_register_driver(&niu_of_driver
, &of_bus_type
);
9164 err
= pci_register_driver(&niu_pci_driver
);
9165 #ifdef CONFIG_SPARC64
9167 of_unregister_driver(&niu_of_driver
);
9174 static void __exit
niu_exit(void)
9176 pci_unregister_driver(&niu_pci_driver
);
9177 #ifdef CONFIG_SPARC64
9178 of_unregister_driver(&niu_of_driver
);
9182 module_init(niu_init
);
9183 module_exit(niu_exit
);