1 /* b44.c: Broadcom 4400 device driver.
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
6 * Distribute under GPL.
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/moduleparam.h>
12 #include <linux/types.h>
13 #include <linux/netdevice.h>
14 #include <linux/ethtool.h>
15 #include <linux/mii.h>
16 #include <linux/if_ether.h>
17 #include <linux/etherdevice.h>
18 #include <linux/pci.h>
19 #include <linux/delay.h>
20 #include <linux/init.h>
21 #include <linux/version.h>
22 #include <linux/dma-mapping.h>
24 #include <asm/uaccess.h>
30 #define DRV_MODULE_NAME "b44"
31 #define PFX DRV_MODULE_NAME ": "
32 #define DRV_MODULE_VERSION "0.95"
33 #define DRV_MODULE_RELDATE "Aug 3, 2004"
35 #define B44_DEF_MSG_ENABLE \
45 /* length of time before we decide the hardware is borked,
46 * and dev->tx_timeout() should be called to fix the problem
48 #define B44_TX_TIMEOUT (5 * HZ)
50 /* hardware minimum and maximum for a single frame's data payload */
51 #define B44_MIN_MTU 60
52 #define B44_MAX_MTU 1500
54 #define B44_RX_RING_SIZE 512
55 #define B44_DEF_RX_RING_PENDING 200
56 #define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
58 #define B44_TX_RING_SIZE 512
59 #define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
60 #define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
62 #define B44_DMA_MASK 0x3fffffff
64 #define TX_RING_GAP(BP) \
65 (B44_TX_RING_SIZE - (BP)->tx_pending)
66 #define TX_BUFFS_AVAIL(BP) \
67 (((BP)->tx_cons <= (BP)->tx_prod) ? \
68 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
69 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
70 #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
72 #define RX_PKT_BUF_SZ (1536 + bp->rx_offset + 64)
73 #define TX_PKT_BUF_SZ (B44_MAX_MTU + ETH_HLEN + 8)
75 /* minimum number of free TX descriptors required to wake up TX process */
76 #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
78 static char version
[] __devinitdata
=
79 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
81 MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
82 MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
83 MODULE_LICENSE("GPL");
84 MODULE_VERSION(DRV_MODULE_VERSION
);
86 static int b44_debug
= -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
87 module_param(b44_debug
, int, 0);
88 MODULE_PARM_DESC(b44_debug
, "B44 bitmapped debugging message enable value");
90 static struct pci_device_id b44_pci_tbl
[] = {
91 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_BCM4401
,
92 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
93 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_BCM4401B0
,
94 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
95 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_BCM4401B1
,
96 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
97 { } /* terminate list with empty entry */
100 MODULE_DEVICE_TABLE(pci
, b44_pci_tbl
);
102 static void b44_halt(struct b44
*);
103 static void b44_init_rings(struct b44
*);
104 static void b44_init_hw(struct b44
*);
105 static int b44_poll(struct net_device
*dev
, int *budget
);
106 #ifdef CONFIG_NET_POLL_CONTROLLER
107 static void b44_poll_controller(struct net_device
*dev
);
110 static int dma_desc_align_mask
;
111 static int dma_desc_sync_size
;
113 static inline void b44_sync_dma_desc_for_device(struct pci_dev
*pdev
,
115 unsigned long offset
,
116 enum dma_data_direction dir
)
118 dma_sync_single_range_for_device(&pdev
->dev
, dma_base
,
119 offset
& dma_desc_align_mask
,
120 dma_desc_sync_size
, dir
);
123 static inline void b44_sync_dma_desc_for_cpu(struct pci_dev
*pdev
,
125 unsigned long offset
,
126 enum dma_data_direction dir
)
128 dma_sync_single_range_for_cpu(&pdev
->dev
, dma_base
,
129 offset
& dma_desc_align_mask
,
130 dma_desc_sync_size
, dir
);
133 static inline unsigned long br32(const struct b44
*bp
, unsigned long reg
)
135 return readl(bp
->regs
+ reg
);
138 static inline void bw32(const struct b44
*bp
,
139 unsigned long reg
, unsigned long val
)
141 writel(val
, bp
->regs
+ reg
);
144 static int b44_wait_bit(struct b44
*bp
, unsigned long reg
,
145 u32 bit
, unsigned long timeout
, const int clear
)
149 for (i
= 0; i
< timeout
; i
++) {
150 u32 val
= br32(bp
, reg
);
152 if (clear
&& !(val
& bit
))
154 if (!clear
&& (val
& bit
))
159 printk(KERN_ERR PFX
"%s: BUG! Timeout waiting for bit %08x of register "
163 (clear
? "clear" : "set"));
169 /* Sonics SiliconBackplane support routines. ROFL, you should see all the
170 * buzz words used on this company's website :-)
172 * All of these routines must be invoked with bp->lock held and
173 * interrupts disabled.
176 #define SB_PCI_DMA 0x40000000 /* Client Mode PCI memory access space (1 GB) */
177 #define BCM4400_PCI_CORE_ADDR 0x18002000 /* Address of PCI core on BCM4400 cards */
179 static u32
ssb_get_core_rev(struct b44
*bp
)
181 return (br32(bp
, B44_SBIDHIGH
) & SBIDHIGH_RC_MASK
);
184 static u32
ssb_pci_setup(struct b44
*bp
, u32 cores
)
186 u32 bar_orig
, pci_rev
, val
;
188 pci_read_config_dword(bp
->pdev
, SSB_BAR0_WIN
, &bar_orig
);
189 pci_write_config_dword(bp
->pdev
, SSB_BAR0_WIN
, BCM4400_PCI_CORE_ADDR
);
190 pci_rev
= ssb_get_core_rev(bp
);
192 val
= br32(bp
, B44_SBINTVEC
);
194 bw32(bp
, B44_SBINTVEC
, val
);
196 val
= br32(bp
, SSB_PCI_TRANS_2
);
197 val
|= SSB_PCI_PREF
| SSB_PCI_BURST
;
198 bw32(bp
, SSB_PCI_TRANS_2
, val
);
200 pci_write_config_dword(bp
->pdev
, SSB_BAR0_WIN
, bar_orig
);
205 static void ssb_core_disable(struct b44
*bp
)
207 if (br32(bp
, B44_SBTMSLOW
) & SBTMSLOW_RESET
)
210 bw32(bp
, B44_SBTMSLOW
, (SBTMSLOW_REJECT
| SBTMSLOW_CLOCK
));
211 b44_wait_bit(bp
, B44_SBTMSLOW
, SBTMSLOW_REJECT
, 100000, 0);
212 b44_wait_bit(bp
, B44_SBTMSHIGH
, SBTMSHIGH_BUSY
, 100000, 1);
213 bw32(bp
, B44_SBTMSLOW
, (SBTMSLOW_FGC
| SBTMSLOW_CLOCK
|
214 SBTMSLOW_REJECT
| SBTMSLOW_RESET
));
215 br32(bp
, B44_SBTMSLOW
);
217 bw32(bp
, B44_SBTMSLOW
, (SBTMSLOW_REJECT
| SBTMSLOW_RESET
));
218 br32(bp
, B44_SBTMSLOW
);
222 static void ssb_core_reset(struct b44
*bp
)
226 ssb_core_disable(bp
);
227 bw32(bp
, B44_SBTMSLOW
, (SBTMSLOW_RESET
| SBTMSLOW_CLOCK
| SBTMSLOW_FGC
));
228 br32(bp
, B44_SBTMSLOW
);
231 /* Clear SERR if set, this is a hw bug workaround. */
232 if (br32(bp
, B44_SBTMSHIGH
) & SBTMSHIGH_SERR
)
233 bw32(bp
, B44_SBTMSHIGH
, 0);
235 val
= br32(bp
, B44_SBIMSTATE
);
236 if (val
& (SBIMSTATE_IBE
| SBIMSTATE_TO
))
237 bw32(bp
, B44_SBIMSTATE
, val
& ~(SBIMSTATE_IBE
| SBIMSTATE_TO
));
239 bw32(bp
, B44_SBTMSLOW
, (SBTMSLOW_CLOCK
| SBTMSLOW_FGC
));
240 br32(bp
, B44_SBTMSLOW
);
243 bw32(bp
, B44_SBTMSLOW
, (SBTMSLOW_CLOCK
));
244 br32(bp
, B44_SBTMSLOW
);
248 static int ssb_core_unit(struct b44
*bp
)
251 u32 val
= br32(bp
, B44_SBADMATCH0
);
254 type
= val
& SBADMATCH0_TYPE_MASK
;
257 base
= val
& SBADMATCH0_BS0_MASK
;
261 base
= val
& SBADMATCH0_BS1_MASK
;
266 base
= val
& SBADMATCH0_BS2_MASK
;
273 static int ssb_is_core_up(struct b44
*bp
)
275 return ((br32(bp
, B44_SBTMSLOW
) & (SBTMSLOW_RESET
| SBTMSLOW_REJECT
| SBTMSLOW_CLOCK
))
279 static void __b44_cam_write(struct b44
*bp
, unsigned char *data
, int index
)
283 val
= ((u32
) data
[2]) << 24;
284 val
|= ((u32
) data
[3]) << 16;
285 val
|= ((u32
) data
[4]) << 8;
286 val
|= ((u32
) data
[5]) << 0;
287 bw32(bp
, B44_CAM_DATA_LO
, val
);
288 val
= (CAM_DATA_HI_VALID
|
289 (((u32
) data
[0]) << 8) |
290 (((u32
) data
[1]) << 0));
291 bw32(bp
, B44_CAM_DATA_HI
, val
);
292 bw32(bp
, B44_CAM_CTRL
, (CAM_CTRL_WRITE
|
293 (index
<< CAM_CTRL_INDEX_SHIFT
)));
294 b44_wait_bit(bp
, B44_CAM_CTRL
, CAM_CTRL_BUSY
, 100, 1);
297 static inline void __b44_disable_ints(struct b44
*bp
)
299 bw32(bp
, B44_IMASK
, 0);
302 static void b44_disable_ints(struct b44
*bp
)
304 __b44_disable_ints(bp
);
306 /* Flush posted writes. */
310 static void b44_enable_ints(struct b44
*bp
)
312 bw32(bp
, B44_IMASK
, bp
->imask
);
315 static int b44_readphy(struct b44
*bp
, int reg
, u32
*val
)
319 bw32(bp
, B44_EMAC_ISTAT
, EMAC_INT_MII
);
320 bw32(bp
, B44_MDIO_DATA
, (MDIO_DATA_SB_START
|
321 (MDIO_OP_READ
<< MDIO_DATA_OP_SHIFT
) |
322 (bp
->phy_addr
<< MDIO_DATA_PMD_SHIFT
) |
323 (reg
<< MDIO_DATA_RA_SHIFT
) |
324 (MDIO_TA_VALID
<< MDIO_DATA_TA_SHIFT
)));
325 err
= b44_wait_bit(bp
, B44_EMAC_ISTAT
, EMAC_INT_MII
, 100, 0);
326 *val
= br32(bp
, B44_MDIO_DATA
) & MDIO_DATA_DATA
;
331 static int b44_writephy(struct b44
*bp
, int reg
, u32 val
)
333 bw32(bp
, B44_EMAC_ISTAT
, EMAC_INT_MII
);
334 bw32(bp
, B44_MDIO_DATA
, (MDIO_DATA_SB_START
|
335 (MDIO_OP_WRITE
<< MDIO_DATA_OP_SHIFT
) |
336 (bp
->phy_addr
<< MDIO_DATA_PMD_SHIFT
) |
337 (reg
<< MDIO_DATA_RA_SHIFT
) |
338 (MDIO_TA_VALID
<< MDIO_DATA_TA_SHIFT
) |
339 (val
& MDIO_DATA_DATA
)));
340 return b44_wait_bit(bp
, B44_EMAC_ISTAT
, EMAC_INT_MII
, 100, 0);
343 /* miilib interface */
344 /* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
345 * due to code existing before miilib use was added to this driver.
346 * Someone should remove this artificial driver limitation in
347 * b44_{read,write}phy. bp->phy_addr itself is fine (and needed).
349 static int b44_mii_read(struct net_device
*dev
, int phy_id
, int location
)
352 struct b44
*bp
= netdev_priv(dev
);
353 int rc
= b44_readphy(bp
, location
, &val
);
359 static void b44_mii_write(struct net_device
*dev
, int phy_id
, int location
,
362 struct b44
*bp
= netdev_priv(dev
);
363 b44_writephy(bp
, location
, val
);
366 static int b44_phy_reset(struct b44
*bp
)
371 err
= b44_writephy(bp
, MII_BMCR
, BMCR_RESET
);
375 err
= b44_readphy(bp
, MII_BMCR
, &val
);
377 if (val
& BMCR_RESET
) {
378 printk(KERN_ERR PFX
"%s: PHY Reset would not complete.\n",
387 static void __b44_set_flow_ctrl(struct b44
*bp
, u32 pause_flags
)
391 bp
->flags
&= ~(B44_FLAG_TX_PAUSE
| B44_FLAG_RX_PAUSE
);
392 bp
->flags
|= pause_flags
;
394 val
= br32(bp
, B44_RXCONFIG
);
395 if (pause_flags
& B44_FLAG_RX_PAUSE
)
396 val
|= RXCONFIG_FLOW
;
398 val
&= ~RXCONFIG_FLOW
;
399 bw32(bp
, B44_RXCONFIG
, val
);
401 val
= br32(bp
, B44_MAC_FLOW
);
402 if (pause_flags
& B44_FLAG_TX_PAUSE
)
403 val
|= (MAC_FLOW_PAUSE_ENAB
|
404 (0xc0 & MAC_FLOW_RX_HI_WATER
));
406 val
&= ~MAC_FLOW_PAUSE_ENAB
;
407 bw32(bp
, B44_MAC_FLOW
, val
);
410 static void b44_set_flow_ctrl(struct b44
*bp
, u32 local
, u32 remote
)
412 u32 pause_enab
= bp
->flags
& (B44_FLAG_TX_PAUSE
|
415 if (local
& ADVERTISE_PAUSE_CAP
) {
416 if (local
& ADVERTISE_PAUSE_ASYM
) {
417 if (remote
& LPA_PAUSE_CAP
)
418 pause_enab
|= (B44_FLAG_TX_PAUSE
|
420 else if (remote
& LPA_PAUSE_ASYM
)
421 pause_enab
|= B44_FLAG_RX_PAUSE
;
423 if (remote
& LPA_PAUSE_CAP
)
424 pause_enab
|= (B44_FLAG_TX_PAUSE
|
427 } else if (local
& ADVERTISE_PAUSE_ASYM
) {
428 if ((remote
& LPA_PAUSE_CAP
) &&
429 (remote
& LPA_PAUSE_ASYM
))
430 pause_enab
|= B44_FLAG_TX_PAUSE
;
433 __b44_set_flow_ctrl(bp
, pause_enab
);
436 static int b44_setup_phy(struct b44
*bp
)
441 if ((err
= b44_readphy(bp
, B44_MII_ALEDCTRL
, &val
)) != 0)
443 if ((err
= b44_writephy(bp
, B44_MII_ALEDCTRL
,
444 val
& MII_ALEDCTRL_ALLMSK
)) != 0)
446 if ((err
= b44_readphy(bp
, B44_MII_TLEDCTRL
, &val
)) != 0)
448 if ((err
= b44_writephy(bp
, B44_MII_TLEDCTRL
,
449 val
| MII_TLEDCTRL_ENABLE
)) != 0)
452 if (!(bp
->flags
& B44_FLAG_FORCE_LINK
)) {
453 u32 adv
= ADVERTISE_CSMA
;
455 if (bp
->flags
& B44_FLAG_ADV_10HALF
)
456 adv
|= ADVERTISE_10HALF
;
457 if (bp
->flags
& B44_FLAG_ADV_10FULL
)
458 adv
|= ADVERTISE_10FULL
;
459 if (bp
->flags
& B44_FLAG_ADV_100HALF
)
460 adv
|= ADVERTISE_100HALF
;
461 if (bp
->flags
& B44_FLAG_ADV_100FULL
)
462 adv
|= ADVERTISE_100FULL
;
464 if (bp
->flags
& B44_FLAG_PAUSE_AUTO
)
465 adv
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
467 if ((err
= b44_writephy(bp
, MII_ADVERTISE
, adv
)) != 0)
469 if ((err
= b44_writephy(bp
, MII_BMCR
, (BMCR_ANENABLE
|
470 BMCR_ANRESTART
))) != 0)
475 if ((err
= b44_readphy(bp
, MII_BMCR
, &bmcr
)) != 0)
477 bmcr
&= ~(BMCR_FULLDPLX
| BMCR_ANENABLE
| BMCR_SPEED100
);
478 if (bp
->flags
& B44_FLAG_100_BASE_T
)
479 bmcr
|= BMCR_SPEED100
;
480 if (bp
->flags
& B44_FLAG_FULL_DUPLEX
)
481 bmcr
|= BMCR_FULLDPLX
;
482 if ((err
= b44_writephy(bp
, MII_BMCR
, bmcr
)) != 0)
485 /* Since we will not be negotiating there is no safe way
486 * to determine if the link partner supports flow control
487 * or not. So just disable it completely in this case.
489 b44_set_flow_ctrl(bp
, 0, 0);
496 static void b44_stats_update(struct b44
*bp
)
501 val
= &bp
->hw_stats
.tx_good_octets
;
502 for (reg
= B44_TX_GOOD_O
; reg
<= B44_TX_PAUSE
; reg
+= 4UL) {
503 *val
++ += br32(bp
, reg
);
505 val
= &bp
->hw_stats
.rx_good_octets
;
506 for (reg
= B44_RX_GOOD_O
; reg
<= B44_RX_NPAUSE
; reg
+= 4UL) {
507 *val
++ += br32(bp
, reg
);
511 static void b44_link_report(struct b44
*bp
)
513 if (!netif_carrier_ok(bp
->dev
)) {
514 printk(KERN_INFO PFX
"%s: Link is down.\n", bp
->dev
->name
);
516 printk(KERN_INFO PFX
"%s: Link is up at %d Mbps, %s duplex.\n",
518 (bp
->flags
& B44_FLAG_100_BASE_T
) ? 100 : 10,
519 (bp
->flags
& B44_FLAG_FULL_DUPLEX
) ? "full" : "half");
521 printk(KERN_INFO PFX
"%s: Flow control is %s for TX and "
524 (bp
->flags
& B44_FLAG_TX_PAUSE
) ? "on" : "off",
525 (bp
->flags
& B44_FLAG_RX_PAUSE
) ? "on" : "off");
529 static void b44_check_phy(struct b44
*bp
)
533 if (!b44_readphy(bp
, MII_BMSR
, &bmsr
) &&
534 !b44_readphy(bp
, B44_MII_AUXCTRL
, &aux
) &&
536 if (aux
& MII_AUXCTRL_SPEED
)
537 bp
->flags
|= B44_FLAG_100_BASE_T
;
539 bp
->flags
&= ~B44_FLAG_100_BASE_T
;
540 if (aux
& MII_AUXCTRL_DUPLEX
)
541 bp
->flags
|= B44_FLAG_FULL_DUPLEX
;
543 bp
->flags
&= ~B44_FLAG_FULL_DUPLEX
;
545 if (!netif_carrier_ok(bp
->dev
) &&
546 (bmsr
& BMSR_LSTATUS
)) {
547 u32 val
= br32(bp
, B44_TX_CTRL
);
548 u32 local_adv
, remote_adv
;
550 if (bp
->flags
& B44_FLAG_FULL_DUPLEX
)
551 val
|= TX_CTRL_DUPLEX
;
553 val
&= ~TX_CTRL_DUPLEX
;
554 bw32(bp
, B44_TX_CTRL
, val
);
556 if (!(bp
->flags
& B44_FLAG_FORCE_LINK
) &&
557 !b44_readphy(bp
, MII_ADVERTISE
, &local_adv
) &&
558 !b44_readphy(bp
, MII_LPA
, &remote_adv
))
559 b44_set_flow_ctrl(bp
, local_adv
, remote_adv
);
562 netif_carrier_on(bp
->dev
);
564 } else if (netif_carrier_ok(bp
->dev
) && !(bmsr
& BMSR_LSTATUS
)) {
566 netif_carrier_off(bp
->dev
);
570 if (bmsr
& BMSR_RFAULT
)
571 printk(KERN_WARNING PFX
"%s: Remote fault detected in PHY\n",
574 printk(KERN_WARNING PFX
"%s: Jabber detected in PHY\n",
579 static void b44_timer(unsigned long __opaque
)
581 struct b44
*bp
= (struct b44
*) __opaque
;
583 spin_lock_irq(&bp
->lock
);
587 b44_stats_update(bp
);
589 spin_unlock_irq(&bp
->lock
);
591 bp
->timer
.expires
= jiffies
+ HZ
;
592 add_timer(&bp
->timer
);
595 static void b44_tx(struct b44
*bp
)
599 cur
= br32(bp
, B44_DMATX_STAT
) & DMATX_STAT_CDMASK
;
600 cur
/= sizeof(struct dma_desc
);
602 /* XXX needs updating when NETIF_F_SG is supported */
603 for (cons
= bp
->tx_cons
; cons
!= cur
; cons
= NEXT_TX(cons
)) {
604 struct ring_info
*rp
= &bp
->tx_buffers
[cons
];
605 struct sk_buff
*skb
= rp
->skb
;
607 if (unlikely(skb
== NULL
))
610 pci_unmap_single(bp
->pdev
,
611 pci_unmap_addr(rp
, mapping
),
615 dev_kfree_skb_irq(skb
);
619 if (netif_queue_stopped(bp
->dev
) &&
620 TX_BUFFS_AVAIL(bp
) > B44_TX_WAKEUP_THRESH
)
621 netif_wake_queue(bp
->dev
);
623 bw32(bp
, B44_GPTIMER
, 0);
626 /* Works like this. This chip writes a 'struct rx_header" 30 bytes
627 * before the DMA address you give it. So we allocate 30 more bytes
628 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
629 * point the chip at 30 bytes past where the rx_header will go.
631 static int b44_alloc_rx_skb(struct b44
*bp
, int src_idx
, u32 dest_idx_unmasked
)
634 struct ring_info
*src_map
, *map
;
635 struct rx_header
*rh
;
643 src_map
= &bp
->rx_buffers
[src_idx
];
644 dest_idx
= dest_idx_unmasked
& (B44_RX_RING_SIZE
- 1);
645 map
= &bp
->rx_buffers
[dest_idx
];
646 skb
= dev_alloc_skb(RX_PKT_BUF_SZ
);
650 mapping
= pci_map_single(bp
->pdev
, skb
->data
,
654 /* Hardware bug work-around, the chip is unable to do PCI DMA
655 to/from anything above 1GB :-( */
656 if(mapping
+RX_PKT_BUF_SZ
> B44_DMA_MASK
) {
658 pci_unmap_single(bp
->pdev
, mapping
, RX_PKT_BUF_SZ
,PCI_DMA_FROMDEVICE
);
659 dev_kfree_skb_any(skb
);
660 skb
= __dev_alloc_skb(RX_PKT_BUF_SZ
,GFP_DMA
);
663 mapping
= pci_map_single(bp
->pdev
, skb
->data
,
666 if(mapping
+RX_PKT_BUF_SZ
> B44_DMA_MASK
) {
667 pci_unmap_single(bp
->pdev
, mapping
, RX_PKT_BUF_SZ
,PCI_DMA_FROMDEVICE
);
668 dev_kfree_skb_any(skb
);
674 skb_reserve(skb
, bp
->rx_offset
);
676 rh
= (struct rx_header
*)
677 (skb
->data
- bp
->rx_offset
);
682 pci_unmap_addr_set(map
, mapping
, mapping
);
687 ctrl
= (DESC_CTRL_LEN
& (RX_PKT_BUF_SZ
- bp
->rx_offset
));
688 if (dest_idx
== (B44_RX_RING_SIZE
- 1))
689 ctrl
|= DESC_CTRL_EOT
;
691 dp
= &bp
->rx_ring
[dest_idx
];
692 dp
->ctrl
= cpu_to_le32(ctrl
);
693 dp
->addr
= cpu_to_le32((u32
) mapping
+ bp
->rx_offset
+ bp
->dma_offset
);
695 if (bp
->flags
& B44_FLAG_RX_RING_HACK
)
696 b44_sync_dma_desc_for_device(bp
->pdev
, bp
->rx_ring_dma
,
697 dest_idx
* sizeof(dp
),
700 return RX_PKT_BUF_SZ
;
703 static void b44_recycle_rx(struct b44
*bp
, int src_idx
, u32 dest_idx_unmasked
)
705 struct dma_desc
*src_desc
, *dest_desc
;
706 struct ring_info
*src_map
, *dest_map
;
707 struct rx_header
*rh
;
711 dest_idx
= dest_idx_unmasked
& (B44_RX_RING_SIZE
- 1);
712 dest_desc
= &bp
->rx_ring
[dest_idx
];
713 dest_map
= &bp
->rx_buffers
[dest_idx
];
714 src_desc
= &bp
->rx_ring
[src_idx
];
715 src_map
= &bp
->rx_buffers
[src_idx
];
717 dest_map
->skb
= src_map
->skb
;
718 rh
= (struct rx_header
*) src_map
->skb
->data
;
721 pci_unmap_addr_set(dest_map
, mapping
,
722 pci_unmap_addr(src_map
, mapping
));
724 if (bp
->flags
& B44_FLAG_RX_RING_HACK
)
725 b44_sync_dma_desc_for_cpu(bp
->pdev
, bp
->rx_ring_dma
,
726 src_idx
* sizeof(src_desc
),
729 ctrl
= src_desc
->ctrl
;
730 if (dest_idx
== (B44_RX_RING_SIZE
- 1))
731 ctrl
|= cpu_to_le32(DESC_CTRL_EOT
);
733 ctrl
&= cpu_to_le32(~DESC_CTRL_EOT
);
735 dest_desc
->ctrl
= ctrl
;
736 dest_desc
->addr
= src_desc
->addr
;
740 if (bp
->flags
& B44_FLAG_RX_RING_HACK
)
741 b44_sync_dma_desc_for_device(bp
->pdev
, bp
->rx_ring_dma
,
742 dest_idx
* sizeof(dest_desc
),
745 pci_dma_sync_single_for_device(bp
->pdev
, src_desc
->addr
,
750 static int b44_rx(struct b44
*bp
, int budget
)
756 prod
= br32(bp
, B44_DMARX_STAT
) & DMARX_STAT_CDMASK
;
757 prod
/= sizeof(struct dma_desc
);
760 while (cons
!= prod
&& budget
> 0) {
761 struct ring_info
*rp
= &bp
->rx_buffers
[cons
];
762 struct sk_buff
*skb
= rp
->skb
;
763 dma_addr_t map
= pci_unmap_addr(rp
, mapping
);
764 struct rx_header
*rh
;
767 pci_dma_sync_single_for_cpu(bp
->pdev
, map
,
770 rh
= (struct rx_header
*) skb
->data
;
771 len
= cpu_to_le16(rh
->len
);
772 if ((len
> (RX_PKT_BUF_SZ
- bp
->rx_offset
)) ||
773 (rh
->flags
& cpu_to_le16(RX_FLAG_ERRORS
))) {
775 b44_recycle_rx(bp
, cons
, bp
->rx_prod
);
777 bp
->stats
.rx_dropped
++;
787 len
= cpu_to_le16(rh
->len
);
788 } while (len
== 0 && i
++ < 5);
796 if (len
> RX_COPY_THRESHOLD
) {
798 skb_size
= b44_alloc_rx_skb(bp
, cons
, bp
->rx_prod
);
801 pci_unmap_single(bp
->pdev
, map
,
802 skb_size
, PCI_DMA_FROMDEVICE
);
803 /* Leave out rx_header */
804 skb_put(skb
, len
+bp
->rx_offset
);
805 skb_pull(skb
,bp
->rx_offset
);
807 struct sk_buff
*copy_skb
;
809 b44_recycle_rx(bp
, cons
, bp
->rx_prod
);
810 copy_skb
= dev_alloc_skb(len
+ 2);
811 if (copy_skb
== NULL
)
812 goto drop_it_no_recycle
;
814 copy_skb
->dev
= bp
->dev
;
815 skb_reserve(copy_skb
, 2);
816 skb_put(copy_skb
, len
);
817 /* DMA sync done above, copy just the actual packet */
818 memcpy(copy_skb
->data
, skb
->data
+bp
->rx_offset
, len
);
822 skb
->ip_summed
= CHECKSUM_NONE
;
823 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
824 netif_receive_skb(skb
);
825 bp
->dev
->last_rx
= jiffies
;
829 bp
->rx_prod
= (bp
->rx_prod
+ 1) &
830 (B44_RX_RING_SIZE
- 1);
831 cons
= (cons
+ 1) & (B44_RX_RING_SIZE
- 1);
835 bw32(bp
, B44_DMARX_PTR
, cons
* sizeof(struct dma_desc
));
840 static int b44_poll(struct net_device
*netdev
, int *budget
)
842 struct b44
*bp
= netdev_priv(netdev
);
845 spin_lock_irq(&bp
->lock
);
847 if (bp
->istat
& (ISTAT_TX
| ISTAT_TO
)) {
848 /* spin_lock(&bp->tx_lock); */
850 /* spin_unlock(&bp->tx_lock); */
852 spin_unlock_irq(&bp
->lock
);
855 if (bp
->istat
& ISTAT_RX
) {
856 int orig_budget
= *budget
;
859 if (orig_budget
> netdev
->quota
)
860 orig_budget
= netdev
->quota
;
862 work_done
= b44_rx(bp
, orig_budget
);
864 *budget
-= work_done
;
865 netdev
->quota
-= work_done
;
867 if (work_done
>= orig_budget
)
871 if (bp
->istat
& ISTAT_ERRORS
) {
872 spin_lock_irq(&bp
->lock
);
876 netif_wake_queue(bp
->dev
);
877 spin_unlock_irq(&bp
->lock
);
882 netif_rx_complete(netdev
);
886 return (done
? 0 : 1);
889 static irqreturn_t
b44_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
)
891 struct net_device
*dev
= dev_id
;
892 struct b44
*bp
= netdev_priv(dev
);
897 spin_lock_irqsave(&bp
->lock
, flags
);
899 istat
= br32(bp
, B44_ISTAT
);
900 imask
= br32(bp
, B44_IMASK
);
902 /* ??? What the fuck is the purpose of the interrupt mask
903 * ??? register if we have to mask it out by hand anyways?
908 if (netif_rx_schedule_prep(dev
)) {
909 /* NOTE: These writes are posted by the readback of
910 * the ISTAT register below.
913 __b44_disable_ints(bp
);
914 __netif_rx_schedule(dev
);
916 printk(KERN_ERR PFX
"%s: Error, poll already scheduled\n",
920 bw32(bp
, B44_ISTAT
, istat
);
923 spin_unlock_irqrestore(&bp
->lock
, flags
);
924 return IRQ_RETVAL(handled
);
927 static void b44_tx_timeout(struct net_device
*dev
)
929 struct b44
*bp
= netdev_priv(dev
);
931 printk(KERN_ERR PFX
"%s: transmit timed out, resetting\n",
934 spin_lock_irq(&bp
->lock
);
940 spin_unlock_irq(&bp
->lock
);
944 netif_wake_queue(dev
);
947 static int b44_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
949 struct b44
*bp
= netdev_priv(dev
);
950 struct sk_buff
*bounce_skb
;
952 u32 len
, entry
, ctrl
;
955 spin_lock_irq(&bp
->lock
);
957 /* This is a hard error, log it. */
958 if (unlikely(TX_BUFFS_AVAIL(bp
) < 1)) {
959 netif_stop_queue(dev
);
960 spin_unlock_irq(&bp
->lock
);
961 printk(KERN_ERR PFX
"%s: BUG! Tx Ring full when queue awake!\n",
966 mapping
= pci_map_single(bp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
967 if(mapping
+len
> B44_DMA_MASK
) {
968 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
969 pci_unmap_single(bp
->pdev
, mapping
, len
, PCI_DMA_TODEVICE
);
971 bounce_skb
= __dev_alloc_skb(TX_PKT_BUF_SZ
,
974 return NETDEV_TX_BUSY
;
976 mapping
= pci_map_single(bp
->pdev
, bounce_skb
->data
,
977 len
, PCI_DMA_TODEVICE
);
978 if(mapping
+len
> B44_DMA_MASK
) {
979 pci_unmap_single(bp
->pdev
, mapping
,
980 len
, PCI_DMA_TODEVICE
);
981 dev_kfree_skb_any(bounce_skb
);
982 return NETDEV_TX_BUSY
;
985 memcpy(skb_put(bounce_skb
, len
), skb
->data
, skb
->len
);
986 dev_kfree_skb_any(skb
);
991 bp
->tx_buffers
[entry
].skb
= skb
;
992 pci_unmap_addr_set(&bp
->tx_buffers
[entry
], mapping
, mapping
);
994 ctrl
= (len
& DESC_CTRL_LEN
);
995 ctrl
|= DESC_CTRL_IOC
| DESC_CTRL_SOF
| DESC_CTRL_EOF
;
996 if (entry
== (B44_TX_RING_SIZE
- 1))
997 ctrl
|= DESC_CTRL_EOT
;
999 bp
->tx_ring
[entry
].ctrl
= cpu_to_le32(ctrl
);
1000 bp
->tx_ring
[entry
].addr
= cpu_to_le32((u32
) mapping
+bp
->dma_offset
);
1002 if (bp
->flags
& B44_FLAG_TX_RING_HACK
)
1003 b44_sync_dma_desc_for_device(bp
->pdev
, bp
->tx_ring_dma
,
1004 entry
* sizeof(bp
->tx_ring
[0]),
1007 entry
= NEXT_TX(entry
);
1009 bp
->tx_prod
= entry
;
1013 bw32(bp
, B44_DMATX_PTR
, entry
* sizeof(struct dma_desc
));
1014 if (bp
->flags
& B44_FLAG_BUGGY_TXPTR
)
1015 bw32(bp
, B44_DMATX_PTR
, entry
* sizeof(struct dma_desc
));
1016 if (bp
->flags
& B44_FLAG_REORDER_BUG
)
1017 br32(bp
, B44_DMATX_PTR
);
1019 if (TX_BUFFS_AVAIL(bp
) < 1)
1020 netif_stop_queue(dev
);
1022 spin_unlock_irq(&bp
->lock
);
1024 dev
->trans_start
= jiffies
;
1029 static int b44_change_mtu(struct net_device
*dev
, int new_mtu
)
1031 struct b44
*bp
= netdev_priv(dev
);
1033 if (new_mtu
< B44_MIN_MTU
|| new_mtu
> B44_MAX_MTU
)
1036 if (!netif_running(dev
)) {
1037 /* We'll just catch it later when the
1044 spin_lock_irq(&bp
->lock
);
1049 spin_unlock_irq(&bp
->lock
);
1051 b44_enable_ints(bp
);
1056 /* Free up pending packets in all rx/tx rings.
1058 * The chip has been shut down and the driver detached from
1059 * the networking, so no interrupts or new tx packets will
1060 * end up in the driver. bp->lock is not held and we are not
1061 * in an interrupt context and thus may sleep.
1063 static void b44_free_rings(struct b44
*bp
)
1065 struct ring_info
*rp
;
1068 for (i
= 0; i
< B44_RX_RING_SIZE
; i
++) {
1069 rp
= &bp
->rx_buffers
[i
];
1071 if (rp
->skb
== NULL
)
1073 pci_unmap_single(bp
->pdev
,
1074 pci_unmap_addr(rp
, mapping
),
1076 PCI_DMA_FROMDEVICE
);
1077 dev_kfree_skb_any(rp
->skb
);
1081 /* XXX needs changes once NETIF_F_SG is set... */
1082 for (i
= 0; i
< B44_TX_RING_SIZE
; i
++) {
1083 rp
= &bp
->tx_buffers
[i
];
1085 if (rp
->skb
== NULL
)
1087 pci_unmap_single(bp
->pdev
,
1088 pci_unmap_addr(rp
, mapping
),
1091 dev_kfree_skb_any(rp
->skb
);
1096 /* Initialize tx/rx rings for packet processing.
1098 * The chip has been shut down and the driver detached from
1099 * the networking, so no interrupts or new tx packets will
1100 * end up in the driver. bp->lock is not held and we are not
1101 * in an interrupt context and thus may sleep.
1103 static void b44_init_rings(struct b44
*bp
)
1109 memset(bp
->rx_ring
, 0, B44_RX_RING_BYTES
);
1110 memset(bp
->tx_ring
, 0, B44_TX_RING_BYTES
);
1112 if (bp
->flags
& B44_FLAG_RX_RING_HACK
)
1113 dma_sync_single_for_device(&bp
->pdev
->dev
, bp
->rx_ring_dma
,
1115 PCI_DMA_BIDIRECTIONAL
);
1117 if (bp
->flags
& B44_FLAG_TX_RING_HACK
)
1118 dma_sync_single_for_device(&bp
->pdev
->dev
, bp
->tx_ring_dma
,
1122 for (i
= 0; i
< bp
->rx_pending
; i
++) {
1123 if (b44_alloc_rx_skb(bp
, -1, i
) < 0)
1129 * Must not be invoked with interrupt sources disabled and
1130 * the hardware shutdown down.
1132 static void b44_free_consistent(struct b44
*bp
)
1134 kfree(bp
->rx_buffers
);
1135 bp
->rx_buffers
= NULL
;
1136 kfree(bp
->tx_buffers
);
1137 bp
->tx_buffers
= NULL
;
1139 if (bp
->flags
& B44_FLAG_RX_RING_HACK
) {
1140 dma_unmap_single(&bp
->pdev
->dev
, bp
->rx_ring_dma
,
1145 pci_free_consistent(bp
->pdev
, DMA_TABLE_BYTES
,
1146 bp
->rx_ring
, bp
->rx_ring_dma
);
1148 bp
->flags
&= ~B44_FLAG_RX_RING_HACK
;
1151 if (bp
->flags
& B44_FLAG_TX_RING_HACK
) {
1152 dma_unmap_single(&bp
->pdev
->dev
, bp
->tx_ring_dma
,
1157 pci_free_consistent(bp
->pdev
, DMA_TABLE_BYTES
,
1158 bp
->tx_ring
, bp
->tx_ring_dma
);
1160 bp
->flags
&= ~B44_FLAG_TX_RING_HACK
;
1165 * Must not be invoked with interrupt sources disabled and
1166 * the hardware shutdown down. Can sleep.
1168 static int b44_alloc_consistent(struct b44
*bp
)
1172 size
= B44_RX_RING_SIZE
* sizeof(struct ring_info
);
1173 bp
->rx_buffers
= kmalloc(size
, GFP_KERNEL
);
1174 if (!bp
->rx_buffers
)
1176 memset(bp
->rx_buffers
, 0, size
);
1178 size
= B44_TX_RING_SIZE
* sizeof(struct ring_info
);
1179 bp
->tx_buffers
= kmalloc(size
, GFP_KERNEL
);
1180 if (!bp
->tx_buffers
)
1182 memset(bp
->tx_buffers
, 0, size
);
1184 size
= DMA_TABLE_BYTES
;
1185 bp
->rx_ring
= pci_alloc_consistent(bp
->pdev
, size
, &bp
->rx_ring_dma
);
1187 /* Allocation may have failed due to pci_alloc_consistent
1188 insisting on use of GFP_DMA, which is more restrictive
1189 than necessary... */
1190 struct dma_desc
*rx_ring
;
1191 dma_addr_t rx_ring_dma
;
1193 if (!(rx_ring
= (struct dma_desc
*)kmalloc(size
, GFP_KERNEL
)))
1196 memset(rx_ring
, 0, size
);
1197 rx_ring_dma
= dma_map_single(&bp
->pdev
->dev
, rx_ring
,
1201 if (rx_ring_dma
+ size
> B44_DMA_MASK
) {
1206 bp
->rx_ring
= rx_ring
;
1207 bp
->rx_ring_dma
= rx_ring_dma
;
1208 bp
->flags
|= B44_FLAG_RX_RING_HACK
;
1211 bp
->tx_ring
= pci_alloc_consistent(bp
->pdev
, size
, &bp
->tx_ring_dma
);
1213 /* Allocation may have failed due to pci_alloc_consistent
1214 insisting on use of GFP_DMA, which is more restrictive
1215 than necessary... */
1216 struct dma_desc
*tx_ring
;
1217 dma_addr_t tx_ring_dma
;
1219 if (!(tx_ring
= (struct dma_desc
*)kmalloc(size
, GFP_KERNEL
)))
1222 memset(tx_ring
, 0, size
);
1223 tx_ring_dma
= dma_map_single(&bp
->pdev
->dev
, tx_ring
,
1227 if (tx_ring_dma
+ size
> B44_DMA_MASK
) {
1232 bp
->tx_ring
= tx_ring
;
1233 bp
->tx_ring_dma
= tx_ring_dma
;
1234 bp
->flags
|= B44_FLAG_TX_RING_HACK
;
1240 b44_free_consistent(bp
);
1244 /* bp->lock is held. */
1245 static void b44_clear_stats(struct b44
*bp
)
1249 bw32(bp
, B44_MIB_CTRL
, MIB_CTRL_CLR_ON_READ
);
1250 for (reg
= B44_TX_GOOD_O
; reg
<= B44_TX_PAUSE
; reg
+= 4UL)
1252 for (reg
= B44_RX_GOOD_O
; reg
<= B44_RX_NPAUSE
; reg
+= 4UL)
1256 /* bp->lock is held. */
1257 static void b44_chip_reset(struct b44
*bp
)
1259 if (ssb_is_core_up(bp
)) {
1260 bw32(bp
, B44_RCV_LAZY
, 0);
1261 bw32(bp
, B44_ENET_CTRL
, ENET_CTRL_DISABLE
);
1262 b44_wait_bit(bp
, B44_ENET_CTRL
, ENET_CTRL_DISABLE
, 100, 1);
1263 bw32(bp
, B44_DMATX_CTRL
, 0);
1264 bp
->tx_prod
= bp
->tx_cons
= 0;
1265 if (br32(bp
, B44_DMARX_STAT
) & DMARX_STAT_EMASK
) {
1266 b44_wait_bit(bp
, B44_DMARX_STAT
, DMARX_STAT_SIDLE
,
1269 bw32(bp
, B44_DMARX_CTRL
, 0);
1270 bp
->rx_prod
= bp
->rx_cons
= 0;
1272 ssb_pci_setup(bp
, (bp
->core_unit
== 0 ?
1279 b44_clear_stats(bp
);
1281 /* Make PHY accessible. */
1282 bw32(bp
, B44_MDIO_CTRL
, (MDIO_CTRL_PREAMBLE
|
1283 (0x0d & MDIO_CTRL_MAXF_MASK
)));
1284 br32(bp
, B44_MDIO_CTRL
);
1286 if (!(br32(bp
, B44_DEVCTRL
) & DEVCTRL_IPP
)) {
1287 bw32(bp
, B44_ENET_CTRL
, ENET_CTRL_EPSEL
);
1288 br32(bp
, B44_ENET_CTRL
);
1289 bp
->flags
&= ~B44_FLAG_INTERNAL_PHY
;
1291 u32 val
= br32(bp
, B44_DEVCTRL
);
1293 if (val
& DEVCTRL_EPR
) {
1294 bw32(bp
, B44_DEVCTRL
, (val
& ~DEVCTRL_EPR
));
1295 br32(bp
, B44_DEVCTRL
);
1298 bp
->flags
|= B44_FLAG_INTERNAL_PHY
;
1302 /* bp->lock is held. */
1303 static void b44_halt(struct b44
*bp
)
1305 b44_disable_ints(bp
);
1309 /* bp->lock is held. */
1310 static void __b44_set_mac_addr(struct b44
*bp
)
1312 bw32(bp
, B44_CAM_CTRL
, 0);
1313 if (!(bp
->dev
->flags
& IFF_PROMISC
)) {
1316 __b44_cam_write(bp
, bp
->dev
->dev_addr
, 0);
1317 val
= br32(bp
, B44_CAM_CTRL
);
1318 bw32(bp
, B44_CAM_CTRL
, val
| CAM_CTRL_ENABLE
);
1322 static int b44_set_mac_addr(struct net_device
*dev
, void *p
)
1324 struct b44
*bp
= netdev_priv(dev
);
1325 struct sockaddr
*addr
= p
;
1327 if (netif_running(dev
))
1330 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
1332 spin_lock_irq(&bp
->lock
);
1333 __b44_set_mac_addr(bp
);
1334 spin_unlock_irq(&bp
->lock
);
1339 /* Called at device open time to get the chip ready for
1340 * packet processing. Invoked with bp->lock held.
1342 static void __b44_set_rx_mode(struct net_device
*);
1343 static void b44_init_hw(struct b44
*bp
)
1351 /* Enable CRC32, set proper LED modes and power on PHY */
1352 bw32(bp
, B44_MAC_CTRL
, MAC_CTRL_CRC32_ENAB
| MAC_CTRL_PHY_LEDCTRL
);
1353 bw32(bp
, B44_RCV_LAZY
, (1 << RCV_LAZY_FC_SHIFT
));
1355 /* This sets the MAC address too. */
1356 __b44_set_rx_mode(bp
->dev
);
1358 /* MTU + eth header + possible VLAN tag + struct rx_header */
1359 bw32(bp
, B44_RXMAXLEN
, bp
->dev
->mtu
+ ETH_HLEN
+ 8 + RX_HEADER_LEN
);
1360 bw32(bp
, B44_TXMAXLEN
, bp
->dev
->mtu
+ ETH_HLEN
+ 8 + RX_HEADER_LEN
);
1362 bw32(bp
, B44_TX_WMARK
, 56); /* XXX magic */
1363 bw32(bp
, B44_DMATX_CTRL
, DMATX_CTRL_ENABLE
);
1364 bw32(bp
, B44_DMATX_ADDR
, bp
->tx_ring_dma
+ bp
->dma_offset
);
1365 bw32(bp
, B44_DMARX_CTRL
, (DMARX_CTRL_ENABLE
|
1366 (bp
->rx_offset
<< DMARX_CTRL_ROSHIFT
)));
1367 bw32(bp
, B44_DMARX_ADDR
, bp
->rx_ring_dma
+ bp
->dma_offset
);
1369 bw32(bp
, B44_DMARX_PTR
, bp
->rx_pending
);
1370 bp
->rx_prod
= bp
->rx_pending
;
1372 bw32(bp
, B44_MIB_CTRL
, MIB_CTRL_CLR_ON_READ
);
1374 val
= br32(bp
, B44_ENET_CTRL
);
1375 bw32(bp
, B44_ENET_CTRL
, (val
| ENET_CTRL_ENABLE
));
1378 static int b44_open(struct net_device
*dev
)
1380 struct b44
*bp
= netdev_priv(dev
);
1383 err
= b44_alloc_consistent(bp
);
1387 err
= request_irq(dev
->irq
, b44_interrupt
, SA_SHIRQ
, dev
->name
, dev
);
1391 spin_lock_irq(&bp
->lock
);
1395 bp
->flags
|= B44_FLAG_INIT_COMPLETE
;
1397 netif_carrier_off(dev
);
1400 spin_unlock_irq(&bp
->lock
);
1402 init_timer(&bp
->timer
);
1403 bp
->timer
.expires
= jiffies
+ HZ
;
1404 bp
->timer
.data
= (unsigned long) bp
;
1405 bp
->timer
.function
= b44_timer
;
1406 add_timer(&bp
->timer
);
1408 b44_enable_ints(bp
);
1413 b44_free_consistent(bp
);
1418 /*static*/ void b44_dump_state(struct b44
*bp
)
1420 u32 val32
, val32_2
, val32_3
, val32_4
, val32_5
;
1423 pci_read_config_word(bp
->pdev
, PCI_STATUS
, &val16
);
1424 printk("DEBUG: PCI status [%04x] \n", val16
);
1429 #ifdef CONFIG_NET_POLL_CONTROLLER
1431 * Polling receive - used by netconsole and other diagnostic tools
1432 * to allow network i/o with interrupts disabled.
1434 static void b44_poll_controller(struct net_device
*dev
)
1436 disable_irq(dev
->irq
);
1437 b44_interrupt(dev
->irq
, dev
, NULL
);
1438 enable_irq(dev
->irq
);
1442 static int b44_close(struct net_device
*dev
)
1444 struct b44
*bp
= netdev_priv(dev
);
1446 netif_stop_queue(dev
);
1448 del_timer_sync(&bp
->timer
);
1450 spin_lock_irq(&bp
->lock
);
1457 bp
->flags
&= ~B44_FLAG_INIT_COMPLETE
;
1458 netif_carrier_off(bp
->dev
);
1460 spin_unlock_irq(&bp
->lock
);
1462 free_irq(dev
->irq
, dev
);
1464 b44_free_consistent(bp
);
1469 static struct net_device_stats
*b44_get_stats(struct net_device
*dev
)
1471 struct b44
*bp
= netdev_priv(dev
);
1472 struct net_device_stats
*nstat
= &bp
->stats
;
1473 struct b44_hw_stats
*hwstat
= &bp
->hw_stats
;
1475 /* Convert HW stats into netdevice stats. */
1476 nstat
->rx_packets
= hwstat
->rx_pkts
;
1477 nstat
->tx_packets
= hwstat
->tx_pkts
;
1478 nstat
->rx_bytes
= hwstat
->rx_octets
;
1479 nstat
->tx_bytes
= hwstat
->tx_octets
;
1480 nstat
->tx_errors
= (hwstat
->tx_jabber_pkts
+
1481 hwstat
->tx_oversize_pkts
+
1482 hwstat
->tx_underruns
+
1483 hwstat
->tx_excessive_cols
+
1484 hwstat
->tx_late_cols
);
1485 nstat
->multicast
= hwstat
->tx_multicast_pkts
;
1486 nstat
->collisions
= hwstat
->tx_total_cols
;
1488 nstat
->rx_length_errors
= (hwstat
->rx_oversize_pkts
+
1489 hwstat
->rx_undersize
);
1490 nstat
->rx_over_errors
= hwstat
->rx_missed_pkts
;
1491 nstat
->rx_frame_errors
= hwstat
->rx_align_errs
;
1492 nstat
->rx_crc_errors
= hwstat
->rx_crc_errs
;
1493 nstat
->rx_errors
= (hwstat
->rx_jabber_pkts
+
1494 hwstat
->rx_oversize_pkts
+
1495 hwstat
->rx_missed_pkts
+
1496 hwstat
->rx_crc_align_errs
+
1497 hwstat
->rx_undersize
+
1498 hwstat
->rx_crc_errs
+
1499 hwstat
->rx_align_errs
+
1500 hwstat
->rx_symbol_errs
);
1502 nstat
->tx_aborted_errors
= hwstat
->tx_underruns
;
1504 /* Carrier lost counter seems to be broken for some devices */
1505 nstat
->tx_carrier_errors
= hwstat
->tx_carrier_lost
;
1511 static int __b44_load_mcast(struct b44
*bp
, struct net_device
*dev
)
1513 struct dev_mc_list
*mclist
;
1516 num_ents
= min_t(int, dev
->mc_count
, B44_MCAST_TABLE_SIZE
);
1517 mclist
= dev
->mc_list
;
1518 for (i
= 0; mclist
&& i
< num_ents
; i
++, mclist
= mclist
->next
) {
1519 __b44_cam_write(bp
, mclist
->dmi_addr
, i
+ 1);
1524 static void __b44_set_rx_mode(struct net_device
*dev
)
1526 struct b44
*bp
= netdev_priv(dev
);
1529 unsigned char zero
[6] = {0,0,0,0,0,0};
1531 val
= br32(bp
, B44_RXCONFIG
);
1532 val
&= ~(RXCONFIG_PROMISC
| RXCONFIG_ALLMULTI
);
1533 if (dev
->flags
& IFF_PROMISC
) {
1534 val
|= RXCONFIG_PROMISC
;
1535 bw32(bp
, B44_RXCONFIG
, val
);
1537 __b44_set_mac_addr(bp
);
1539 if (dev
->flags
& IFF_ALLMULTI
)
1540 val
|= RXCONFIG_ALLMULTI
;
1542 i
=__b44_load_mcast(bp
, dev
);
1545 __b44_cam_write(bp
, zero
, i
);
1547 bw32(bp
, B44_RXCONFIG
, val
);
1548 val
= br32(bp
, B44_CAM_CTRL
);
1549 bw32(bp
, B44_CAM_CTRL
, val
| CAM_CTRL_ENABLE
);
1553 static void b44_set_rx_mode(struct net_device
*dev
)
1555 struct b44
*bp
= netdev_priv(dev
);
1557 spin_lock_irq(&bp
->lock
);
1558 __b44_set_rx_mode(dev
);
1559 spin_unlock_irq(&bp
->lock
);
1562 static u32
b44_get_msglevel(struct net_device
*dev
)
1564 struct b44
*bp
= netdev_priv(dev
);
1565 return bp
->msg_enable
;
1568 static void b44_set_msglevel(struct net_device
*dev
, u32 value
)
1570 struct b44
*bp
= netdev_priv(dev
);
1571 bp
->msg_enable
= value
;
1574 static void b44_get_drvinfo (struct net_device
*dev
, struct ethtool_drvinfo
*info
)
1576 struct b44
*bp
= netdev_priv(dev
);
1577 struct pci_dev
*pci_dev
= bp
->pdev
;
1579 strcpy (info
->driver
, DRV_MODULE_NAME
);
1580 strcpy (info
->version
, DRV_MODULE_VERSION
);
1581 strcpy (info
->bus_info
, pci_name(pci_dev
));
1584 static int b44_nway_reset(struct net_device
*dev
)
1586 struct b44
*bp
= netdev_priv(dev
);
1590 spin_lock_irq(&bp
->lock
);
1591 b44_readphy(bp
, MII_BMCR
, &bmcr
);
1592 b44_readphy(bp
, MII_BMCR
, &bmcr
);
1594 if (bmcr
& BMCR_ANENABLE
) {
1595 b44_writephy(bp
, MII_BMCR
,
1596 bmcr
| BMCR_ANRESTART
);
1599 spin_unlock_irq(&bp
->lock
);
1604 static int b44_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
1606 struct b44
*bp
= netdev_priv(dev
);
1608 if (!(bp
->flags
& B44_FLAG_INIT_COMPLETE
))
1610 cmd
->supported
= (SUPPORTED_Autoneg
);
1611 cmd
->supported
|= (SUPPORTED_100baseT_Half
|
1612 SUPPORTED_100baseT_Full
|
1613 SUPPORTED_10baseT_Half
|
1614 SUPPORTED_10baseT_Full
|
1617 cmd
->advertising
= 0;
1618 if (bp
->flags
& B44_FLAG_ADV_10HALF
)
1619 cmd
->advertising
|= ADVERTISED_10baseT_Half
;
1620 if (bp
->flags
& B44_FLAG_ADV_10FULL
)
1621 cmd
->advertising
|= ADVERTISED_10baseT_Full
;
1622 if (bp
->flags
& B44_FLAG_ADV_100HALF
)
1623 cmd
->advertising
|= ADVERTISED_100baseT_Half
;
1624 if (bp
->flags
& B44_FLAG_ADV_100FULL
)
1625 cmd
->advertising
|= ADVERTISED_100baseT_Full
;
1626 cmd
->advertising
|= ADVERTISED_Pause
| ADVERTISED_Asym_Pause
;
1627 cmd
->speed
= (bp
->flags
& B44_FLAG_100_BASE_T
) ?
1628 SPEED_100
: SPEED_10
;
1629 cmd
->duplex
= (bp
->flags
& B44_FLAG_FULL_DUPLEX
) ?
1630 DUPLEX_FULL
: DUPLEX_HALF
;
1632 cmd
->phy_address
= bp
->phy_addr
;
1633 cmd
->transceiver
= (bp
->flags
& B44_FLAG_INTERNAL_PHY
) ?
1634 XCVR_INTERNAL
: XCVR_EXTERNAL
;
1635 cmd
->autoneg
= (bp
->flags
& B44_FLAG_FORCE_LINK
) ?
1636 AUTONEG_DISABLE
: AUTONEG_ENABLE
;
1642 static int b44_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
1644 struct b44
*bp
= netdev_priv(dev
);
1646 if (!(bp
->flags
& B44_FLAG_INIT_COMPLETE
))
1649 /* We do not support gigabit. */
1650 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
1651 if (cmd
->advertising
&
1652 (ADVERTISED_1000baseT_Half
|
1653 ADVERTISED_1000baseT_Full
))
1655 } else if ((cmd
->speed
!= SPEED_100
&&
1656 cmd
->speed
!= SPEED_10
) ||
1657 (cmd
->duplex
!= DUPLEX_HALF
&&
1658 cmd
->duplex
!= DUPLEX_FULL
)) {
1662 spin_lock_irq(&bp
->lock
);
1664 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
1665 bp
->flags
&= ~B44_FLAG_FORCE_LINK
;
1666 bp
->flags
&= ~(B44_FLAG_ADV_10HALF
|
1667 B44_FLAG_ADV_10FULL
|
1668 B44_FLAG_ADV_100HALF
|
1669 B44_FLAG_ADV_100FULL
);
1670 if (cmd
->advertising
& ADVERTISE_10HALF
)
1671 bp
->flags
|= B44_FLAG_ADV_10HALF
;
1672 if (cmd
->advertising
& ADVERTISE_10FULL
)
1673 bp
->flags
|= B44_FLAG_ADV_10FULL
;
1674 if (cmd
->advertising
& ADVERTISE_100HALF
)
1675 bp
->flags
|= B44_FLAG_ADV_100HALF
;
1676 if (cmd
->advertising
& ADVERTISE_100FULL
)
1677 bp
->flags
|= B44_FLAG_ADV_100FULL
;
1679 bp
->flags
|= B44_FLAG_FORCE_LINK
;
1680 if (cmd
->speed
== SPEED_100
)
1681 bp
->flags
|= B44_FLAG_100_BASE_T
;
1682 if (cmd
->duplex
== DUPLEX_FULL
)
1683 bp
->flags
|= B44_FLAG_FULL_DUPLEX
;
1688 spin_unlock_irq(&bp
->lock
);
1693 static void b44_get_ringparam(struct net_device
*dev
,
1694 struct ethtool_ringparam
*ering
)
1696 struct b44
*bp
= netdev_priv(dev
);
1698 ering
->rx_max_pending
= B44_RX_RING_SIZE
- 1;
1699 ering
->rx_pending
= bp
->rx_pending
;
1701 /* XXX ethtool lacks a tx_max_pending, oops... */
1704 static int b44_set_ringparam(struct net_device
*dev
,
1705 struct ethtool_ringparam
*ering
)
1707 struct b44
*bp
= netdev_priv(dev
);
1709 if ((ering
->rx_pending
> B44_RX_RING_SIZE
- 1) ||
1710 (ering
->rx_mini_pending
!= 0) ||
1711 (ering
->rx_jumbo_pending
!= 0) ||
1712 (ering
->tx_pending
> B44_TX_RING_SIZE
- 1))
1715 spin_lock_irq(&bp
->lock
);
1717 bp
->rx_pending
= ering
->rx_pending
;
1718 bp
->tx_pending
= ering
->tx_pending
;
1723 netif_wake_queue(bp
->dev
);
1724 spin_unlock_irq(&bp
->lock
);
1726 b44_enable_ints(bp
);
1731 static void b44_get_pauseparam(struct net_device
*dev
,
1732 struct ethtool_pauseparam
*epause
)
1734 struct b44
*bp
= netdev_priv(dev
);
1737 (bp
->flags
& B44_FLAG_PAUSE_AUTO
) != 0;
1739 (bp
->flags
& B44_FLAG_RX_PAUSE
) != 0;
1741 (bp
->flags
& B44_FLAG_TX_PAUSE
) != 0;
1744 static int b44_set_pauseparam(struct net_device
*dev
,
1745 struct ethtool_pauseparam
*epause
)
1747 struct b44
*bp
= netdev_priv(dev
);
1749 spin_lock_irq(&bp
->lock
);
1750 if (epause
->autoneg
)
1751 bp
->flags
|= B44_FLAG_PAUSE_AUTO
;
1753 bp
->flags
&= ~B44_FLAG_PAUSE_AUTO
;
1754 if (epause
->rx_pause
)
1755 bp
->flags
|= B44_FLAG_RX_PAUSE
;
1757 bp
->flags
&= ~B44_FLAG_RX_PAUSE
;
1758 if (epause
->tx_pause
)
1759 bp
->flags
|= B44_FLAG_TX_PAUSE
;
1761 bp
->flags
&= ~B44_FLAG_TX_PAUSE
;
1762 if (bp
->flags
& B44_FLAG_PAUSE_AUTO
) {
1767 __b44_set_flow_ctrl(bp
, bp
->flags
);
1769 spin_unlock_irq(&bp
->lock
);
1771 b44_enable_ints(bp
);
1776 static struct ethtool_ops b44_ethtool_ops
= {
1777 .get_drvinfo
= b44_get_drvinfo
,
1778 .get_settings
= b44_get_settings
,
1779 .set_settings
= b44_set_settings
,
1780 .nway_reset
= b44_nway_reset
,
1781 .get_link
= ethtool_op_get_link
,
1782 .get_ringparam
= b44_get_ringparam
,
1783 .set_ringparam
= b44_set_ringparam
,
1784 .get_pauseparam
= b44_get_pauseparam
,
1785 .set_pauseparam
= b44_set_pauseparam
,
1786 .get_msglevel
= b44_get_msglevel
,
1787 .set_msglevel
= b44_set_msglevel
,
1788 .get_perm_addr
= ethtool_op_get_perm_addr
,
1791 static int b44_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
1793 struct mii_ioctl_data
*data
= if_mii(ifr
);
1794 struct b44
*bp
= netdev_priv(dev
);
1797 spin_lock_irq(&bp
->lock
);
1798 err
= generic_mii_ioctl(&bp
->mii_if
, data
, cmd
, NULL
);
1799 spin_unlock_irq(&bp
->lock
);
1804 /* Read 128-bytes of EEPROM. */
1805 static int b44_read_eeprom(struct b44
*bp
, u8
*data
)
1808 u16
*ptr
= (u16
*) data
;
1810 for (i
= 0; i
< 128; i
+= 2)
1811 ptr
[i
/ 2] = readw(bp
->regs
+ 4096 + i
);
1816 static int __devinit
b44_get_invariants(struct b44
*bp
)
1821 err
= b44_read_eeprom(bp
, &eeprom
[0]);
1825 bp
->dev
->dev_addr
[0] = eeprom
[79];
1826 bp
->dev
->dev_addr
[1] = eeprom
[78];
1827 bp
->dev
->dev_addr
[2] = eeprom
[81];
1828 bp
->dev
->dev_addr
[3] = eeprom
[80];
1829 bp
->dev
->dev_addr
[4] = eeprom
[83];
1830 bp
->dev
->dev_addr
[5] = eeprom
[82];
1831 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
, bp
->dev
->addr_len
);
1833 bp
->phy_addr
= eeprom
[90] & 0x1f;
1835 /* With this, plus the rx_header prepended to the data by the
1836 * hardware, we'll land the ethernet header on a 2-byte boundary.
1840 bp
->imask
= IMASK_DEF
;
1842 bp
->core_unit
= ssb_core_unit(bp
);
1843 bp
->dma_offset
= SB_PCI_DMA
;
1845 /* XXX - really required?
1846 bp->flags |= B44_FLAG_BUGGY_TXPTR;
1852 static int __devinit
b44_init_one(struct pci_dev
*pdev
,
1853 const struct pci_device_id
*ent
)
1855 static int b44_version_printed
= 0;
1856 unsigned long b44reg_base
, b44reg_len
;
1857 struct net_device
*dev
;
1861 if (b44_version_printed
++ == 0)
1862 printk(KERN_INFO
"%s", version
);
1864 err
= pci_enable_device(pdev
);
1866 printk(KERN_ERR PFX
"Cannot enable PCI device, "
1871 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
1872 printk(KERN_ERR PFX
"Cannot find proper PCI device "
1873 "base address, aborting.\n");
1875 goto err_out_disable_pdev
;
1878 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
1880 printk(KERN_ERR PFX
"Cannot obtain PCI resources, "
1882 goto err_out_disable_pdev
;
1885 pci_set_master(pdev
);
1887 err
= pci_set_dma_mask(pdev
, (u64
) B44_DMA_MASK
);
1889 printk(KERN_ERR PFX
"No usable DMA configuration, "
1891 goto err_out_free_res
;
1894 err
= pci_set_consistent_dma_mask(pdev
, (u64
) B44_DMA_MASK
);
1896 printk(KERN_ERR PFX
"No usable DMA configuration, "
1898 goto err_out_free_res
;
1901 b44reg_base
= pci_resource_start(pdev
, 0);
1902 b44reg_len
= pci_resource_len(pdev
, 0);
1904 dev
= alloc_etherdev(sizeof(*bp
));
1906 printk(KERN_ERR PFX
"Etherdev alloc failed, aborting.\n");
1908 goto err_out_free_res
;
1911 SET_MODULE_OWNER(dev
);
1912 SET_NETDEV_DEV(dev
,&pdev
->dev
);
1914 /* No interesting netdevice features in this card... */
1917 bp
= netdev_priv(dev
);
1921 bp
->msg_enable
= (1 << b44_debug
) - 1;
1923 bp
->msg_enable
= B44_DEF_MSG_ENABLE
;
1925 spin_lock_init(&bp
->lock
);
1927 bp
->regs
= ioremap(b44reg_base
, b44reg_len
);
1928 if (bp
->regs
== 0UL) {
1929 printk(KERN_ERR PFX
"Cannot map device registers, "
1932 goto err_out_free_dev
;
1935 bp
->rx_pending
= B44_DEF_RX_RING_PENDING
;
1936 bp
->tx_pending
= B44_DEF_TX_RING_PENDING
;
1938 dev
->open
= b44_open
;
1939 dev
->stop
= b44_close
;
1940 dev
->hard_start_xmit
= b44_start_xmit
;
1941 dev
->get_stats
= b44_get_stats
;
1942 dev
->set_multicast_list
= b44_set_rx_mode
;
1943 dev
->set_mac_address
= b44_set_mac_addr
;
1944 dev
->do_ioctl
= b44_ioctl
;
1945 dev
->tx_timeout
= b44_tx_timeout
;
1946 dev
->poll
= b44_poll
;
1948 dev
->watchdog_timeo
= B44_TX_TIMEOUT
;
1949 #ifdef CONFIG_NET_POLL_CONTROLLER
1950 dev
->poll_controller
= b44_poll_controller
;
1952 dev
->change_mtu
= b44_change_mtu
;
1953 dev
->irq
= pdev
->irq
;
1954 SET_ETHTOOL_OPS(dev
, &b44_ethtool_ops
);
1956 err
= b44_get_invariants(bp
);
1958 printk(KERN_ERR PFX
"Problem fetching invariants of chip, "
1960 goto err_out_iounmap
;
1963 bp
->mii_if
.dev
= dev
;
1964 bp
->mii_if
.mdio_read
= b44_mii_read
;
1965 bp
->mii_if
.mdio_write
= b44_mii_write
;
1966 bp
->mii_if
.phy_id
= bp
->phy_addr
;
1967 bp
->mii_if
.phy_id_mask
= 0x1f;
1968 bp
->mii_if
.reg_num_mask
= 0x1f;
1970 /* By default, advertise all speed/duplex settings. */
1971 bp
->flags
|= (B44_FLAG_ADV_10HALF
| B44_FLAG_ADV_10FULL
|
1972 B44_FLAG_ADV_100HALF
| B44_FLAG_ADV_100FULL
);
1974 /* By default, auto-negotiate PAUSE. */
1975 bp
->flags
|= B44_FLAG_PAUSE_AUTO
;
1977 err
= register_netdev(dev
);
1979 printk(KERN_ERR PFX
"Cannot register net device, "
1981 goto err_out_iounmap
;
1984 pci_set_drvdata(pdev
, dev
);
1986 pci_save_state(bp
->pdev
);
1988 printk(KERN_INFO
"%s: Broadcom 4400 10/100BaseT Ethernet ", dev
->name
);
1989 for (i
= 0; i
< 6; i
++)
1990 printk("%2.2x%c", dev
->dev_addr
[i
],
1991 i
== 5 ? '\n' : ':');
2002 pci_release_regions(pdev
);
2004 err_out_disable_pdev
:
2005 pci_disable_device(pdev
);
2006 pci_set_drvdata(pdev
, NULL
);
2010 static void __devexit
b44_remove_one(struct pci_dev
*pdev
)
2012 struct net_device
*dev
= pci_get_drvdata(pdev
);
2015 struct b44
*bp
= netdev_priv(dev
);
2017 unregister_netdev(dev
);
2020 pci_release_regions(pdev
);
2021 pci_disable_device(pdev
);
2022 pci_set_drvdata(pdev
, NULL
);
2026 static int b44_suspend(struct pci_dev
*pdev
, pm_message_t state
)
2028 struct net_device
*dev
= pci_get_drvdata(pdev
);
2029 struct b44
*bp
= netdev_priv(dev
);
2031 if (!netif_running(dev
))
2034 del_timer_sync(&bp
->timer
);
2036 spin_lock_irq(&bp
->lock
);
2039 netif_carrier_off(bp
->dev
);
2040 netif_device_detach(bp
->dev
);
2043 spin_unlock_irq(&bp
->lock
);
2045 free_irq(dev
->irq
, dev
);
2046 pci_disable_device(pdev
);
2050 static int b44_resume(struct pci_dev
*pdev
)
2052 struct net_device
*dev
= pci_get_drvdata(pdev
);
2053 struct b44
*bp
= netdev_priv(dev
);
2055 pci_restore_state(pdev
);
2056 pci_enable_device(pdev
);
2057 pci_set_master(pdev
);
2059 if (!netif_running(dev
))
2062 if (request_irq(dev
->irq
, b44_interrupt
, SA_SHIRQ
, dev
->name
, dev
))
2063 printk(KERN_ERR PFX
"%s: request_irq failed\n", dev
->name
);
2065 spin_lock_irq(&bp
->lock
);
2069 netif_device_attach(bp
->dev
);
2070 spin_unlock_irq(&bp
->lock
);
2072 bp
->timer
.expires
= jiffies
+ HZ
;
2073 add_timer(&bp
->timer
);
2075 b44_enable_ints(bp
);
2079 static struct pci_driver b44_driver
= {
2080 .name
= DRV_MODULE_NAME
,
2081 .id_table
= b44_pci_tbl
,
2082 .probe
= b44_init_one
,
2083 .remove
= __devexit_p(b44_remove_one
),
2084 .suspend
= b44_suspend
,
2085 .resume
= b44_resume
,
2088 static int __init
b44_init(void)
2090 unsigned int dma_desc_align_size
= dma_get_cache_alignment();
2092 /* Setup paramaters for syncing RX/TX DMA descriptors */
2093 dma_desc_align_mask
= ~(dma_desc_align_size
- 1);
2094 dma_desc_sync_size
= max(dma_desc_align_size
, sizeof(struct dma_desc
));
2096 return pci_module_init(&b44_driver
);
2099 static void __exit
b44_cleanup(void)
2101 pci_unregister_driver(&b44_driver
);
2104 module_init(b44_init
);
2105 module_exit(b44_cleanup
);