Commit | Line | Data |
---|---|---|
1f26dac3 DM |
1 | /* cassini.c: Sun Microsystems Cassini(+) ethernet driver. |
2 | * | |
3 | * Copyright (C) 2004 Sun Microsystems Inc. | |
4 | * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License as | |
8 | * published by the Free Software Foundation; either version 2 of the | |
9 | * License, or (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, write to the Free Software | |
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA | |
19 | * 02111-1307, USA. | |
20 | * | |
21 | * This driver uses the sungem driver (c) David Miller | |
22 | * (davem@redhat.com) as its basis. | |
23 | * | |
24 | * The cassini chip has a number of features that distinguish it from | |
25 | * the gem chip: | |
26 | * 4 transmit descriptor rings that are used for either QoS (VLAN) or | |
27 | * load balancing (non-VLAN mode) | |
28 | * batching of multiple packets | |
29 | * multiple CPU dispatching | |
30 | * page-based RX descriptor engine with separate completion rings | |
31 | * Gigabit support (GMII and PCS interface) | |
32 | * MIF link up/down detection works | |
33 | * | |
34 | * RX is handled by page sized buffers that are attached as fragments to | |
35 | * the skb. here's what's done: | |
36 | * -- driver allocates pages at a time and keeps reference counts | |
37 | * on them. | |
38 | * -- the upper protocol layers assume that the header is in the skb | |
39 | * itself. as a result, cassini will copy a small amount (64 bytes) | |
40 | * to make them happy. | |
41 | * -- driver appends the rest of the data pages as frags to skbuffs | |
42 | * and increments the reference count | |
43 | * -- on page reclamation, the driver swaps the page with a spare page. | |
44 | * if that page is still in use, it frees its reference to that page, | |
45 | * and allocates a new page for use. otherwise, it just recycles the | |
6aa20a22 | 46 | * the page. |
1f26dac3 DM |
47 | * |
48 | * NOTE: cassini can parse the header. however, it's not worth it | |
49 | * as long as the network stack requires a header copy. | |
50 | * | |
51 | * TX has 4 queues. currently these queues are used in a round-robin | |
52 | * fashion for load balancing. They can also be used for QoS. for that | |
53 | * to work, however, QoS information needs to be exposed down to the driver | |
54 | * level so that subqueues get targetted to particular transmit rings. | |
55 | * alternatively, the queues can be configured via use of the all-purpose | |
56 | * ioctl. | |
57 | * | |
58 | * RX DATA: the rx completion ring has all the info, but the rx desc | |
59 | * ring has all of the data. RX can conceivably come in under multiple | |
60 | * interrupts, but the INT# assignment needs to be set up properly by | |
61 | * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do | |
62 | * that. also, the two descriptor rings are designed to distinguish between | |
6aa20a22 | 63 | * encrypted and non-encrypted packets, but we use them for buffering |
1f26dac3 DM |
64 | * instead. |
65 | * | |
6aa20a22 | 66 | * by default, the selective clear mask is set up to process rx packets. |
1f26dac3 DM |
67 | */ |
68 | ||
1f26dac3 DM |
69 | |
70 | #include <linux/module.h> | |
71 | #include <linux/kernel.h> | |
72 | #include <linux/types.h> | |
73 | #include <linux/compiler.h> | |
74 | #include <linux/slab.h> | |
75 | #include <linux/delay.h> | |
76 | #include <linux/init.h> | |
77 | #include <linux/ioport.h> | |
78 | #include <linux/pci.h> | |
79 | #include <linux/mm.h> | |
80 | #include <linux/highmem.h> | |
81 | #include <linux/list.h> | |
82 | #include <linux/dma-mapping.h> | |
83 | ||
84 | #include <linux/netdevice.h> | |
85 | #include <linux/etherdevice.h> | |
86 | #include <linux/skbuff.h> | |
87 | #include <linux/ethtool.h> | |
88 | #include <linux/crc32.h> | |
89 | #include <linux/random.h> | |
90 | #include <linux/mii.h> | |
91 | #include <linux/ip.h> | |
92 | #include <linux/tcp.h> | |
758df69e | 93 | #include <linux/mutex.h> |
1f26dac3 DM |
94 | |
95 | #include <net/checksum.h> | |
96 | ||
97 | #include <asm/atomic.h> | |
98 | #include <asm/system.h> | |
99 | #include <asm/io.h> | |
100 | #include <asm/byteorder.h> | |
101 | #include <asm/uaccess.h> | |
102 | ||
103 | #define cas_page_map(x) kmap_atomic((x), KM_SKB_DATA_SOFTIRQ) | |
104 | #define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ) | |
105 | #define CAS_NCPUS num_online_cpus() | |
106 | ||
107 | #if defined(CONFIG_CASSINI_NAPI) && defined(HAVE_NETDEV_POLL) | |
108 | #define USE_NAPI | |
109 | #define cas_skb_release(x) netif_receive_skb(x) | |
110 | #else | |
111 | #define cas_skb_release(x) netif_rx(x) | |
112 | #endif | |
113 | ||
114 | /* select which firmware to use */ | |
6aa20a22 | 115 | #define USE_HP_WORKAROUND |
1f26dac3 DM |
116 | #define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */ |
117 | #define CAS_HP_ALT_FIRMWARE cas_prog_null /* alternate firmware */ | |
118 | ||
119 | #include "cassini.h" | |
120 | ||
121 | #define USE_TX_COMPWB /* use completion writeback registers */ | |
122 | #define USE_CSMA_CD_PROTO /* standard CSMA/CD */ | |
123 | #define USE_RX_BLANK /* hw interrupt mitigation */ | |
124 | #undef USE_ENTROPY_DEV /* don't test for entropy device */ | |
125 | ||
126 | /* NOTE: these aren't useable unless PCI interrupts can be assigned. | |
127 | * also, we need to make cp->lock finer-grained. | |
128 | */ | |
129 | #undef USE_PCI_INTB | |
130 | #undef USE_PCI_INTC | |
131 | #undef USE_PCI_INTD | |
132 | #undef USE_QOS | |
133 | ||
134 | #undef USE_VPD_DEBUG /* debug vpd information if defined */ | |
135 | ||
136 | /* rx processing options */ | |
137 | #define USE_PAGE_ORDER /* specify to allocate large rx pages */ | |
138 | #define RX_DONT_BATCH 0 /* if 1, don't batch flows */ | |
139 | #define RX_COPY_ALWAYS 0 /* if 0, use frags */ | |
140 | #define RX_COPY_MIN 64 /* copy a little to make upper layers happy */ | |
141 | #undef RX_COUNT_BUFFERS /* define to calculate RX buffer stats */ | |
142 | ||
143 | #define DRV_MODULE_NAME "cassini" | |
144 | #define PFX DRV_MODULE_NAME ": " | |
14be85f5 DM |
145 | #define DRV_MODULE_VERSION "1.5" |
146 | #define DRV_MODULE_RELDATE "4 Jan 2008" | |
1f26dac3 DM |
147 | |
148 | #define CAS_DEF_MSG_ENABLE \ | |
149 | (NETIF_MSG_DRV | \ | |
150 | NETIF_MSG_PROBE | \ | |
151 | NETIF_MSG_LINK | \ | |
152 | NETIF_MSG_TIMER | \ | |
153 | NETIF_MSG_IFDOWN | \ | |
154 | NETIF_MSG_IFUP | \ | |
155 | NETIF_MSG_RX_ERR | \ | |
156 | NETIF_MSG_TX_ERR) | |
157 | ||
158 | /* length of time before we decide the hardware is borked, | |
159 | * and dev->tx_timeout() should be called to fix the problem | |
160 | */ | |
161 | #define CAS_TX_TIMEOUT (HZ) | |
162 | #define CAS_LINK_TIMEOUT (22*HZ/10) | |
163 | #define CAS_LINK_FAST_TIMEOUT (1) | |
164 | ||
165 | /* timeout values for state changing. these specify the number | |
166 | * of 10us delays to be used before giving up. | |
167 | */ | |
168 | #define STOP_TRIES_PHY 1000 | |
169 | #define STOP_TRIES 5000 | |
170 | ||
6aa20a22 | 171 | /* specify a minimum frame size to deal with some fifo issues |
1f26dac3 DM |
172 | * max mtu == 2 * page size - ethernet header - 64 - swivel = |
173 | * 2 * page_size - 0x50 | |
174 | */ | |
175 | #define CAS_MIN_FRAME 97 | |
176 | #define CAS_1000MB_MIN_FRAME 255 | |
177 | #define CAS_MIN_MTU 60 | |
178 | #define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000) | |
179 | ||
180 | #if 1 | |
181 | /* | |
182 | * Eliminate these and use separate atomic counters for each, to | |
183 | * avoid a race condition. | |
184 | */ | |
185 | #else | |
186 | #define CAS_RESET_MTU 1 | |
187 | #define CAS_RESET_ALL 2 | |
188 | #define CAS_RESET_SPARE 3 | |
189 | #endif | |
190 | ||
191 | static char version[] __devinitdata = | |
192 | DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | |
193 | ||
8d3b33f6 RR |
194 | static int cassini_debug = -1; /* -1 == use CAS_DEF_MSG_ENABLE as value */ |
195 | static int link_mode; | |
196 | ||
1f26dac3 DM |
197 | MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)"); |
198 | MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver"); | |
199 | MODULE_LICENSE("GPL"); | |
8d3b33f6 | 200 | module_param(cassini_debug, int, 0); |
1f26dac3 | 201 | MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value"); |
8d3b33f6 | 202 | module_param(link_mode, int, 0); |
1f26dac3 DM |
203 | MODULE_PARM_DESC(link_mode, "default link mode"); |
204 | ||
205 | /* | |
206 | * Work around for a PCS bug in which the link goes down due to the chip | |
207 | * being confused and never showing a link status of "up." | |
208 | */ | |
209 | #define DEFAULT_LINKDOWN_TIMEOUT 5 | |
6aa20a22 | 210 | /* |
1f26dac3 DM |
211 | * Value in seconds, for user input. |
212 | */ | |
213 | static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT; | |
8d3b33f6 | 214 | module_param(linkdown_timeout, int, 0); |
1f26dac3 DM |
215 | MODULE_PARM_DESC(linkdown_timeout, |
216 | "min reset interval in sec. for PCS linkdown issue; disabled if not positive"); | |
217 | ||
218 | /* | |
219 | * value in 'ticks' (units used by jiffies). Set when we init the | |
220 | * module because 'HZ' in actually a function call on some flavors of | |
221 | * Linux. This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ. | |
222 | */ | |
223 | static int link_transition_timeout; | |
224 | ||
225 | ||
1f26dac3 DM |
226 | |
227 | static u16 link_modes[] __devinitdata = { | |
228 | BMCR_ANENABLE, /* 0 : autoneg */ | |
229 | 0, /* 1 : 10bt half duplex */ | |
230 | BMCR_SPEED100, /* 2 : 100bt half duplex */ | |
231 | BMCR_FULLDPLX, /* 3 : 10bt full duplex */ | |
232 | BMCR_SPEED100|BMCR_FULLDPLX, /* 4 : 100bt full duplex */ | |
233 | CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */ | |
234 | }; | |
235 | ||
236 | static struct pci_device_id cas_pci_tbl[] __devinitdata = { | |
237 | { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI, | |
238 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | |
239 | { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN, | |
240 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | |
241 | { 0, } | |
242 | }; | |
243 | ||
244 | MODULE_DEVICE_TABLE(pci, cas_pci_tbl); | |
245 | ||
246 | static void cas_set_link_modes(struct cas *cp); | |
247 | ||
248 | static inline void cas_lock_tx(struct cas *cp) | |
249 | { | |
250 | int i; | |
251 | ||
6aa20a22 | 252 | for (i = 0; i < N_TX_RINGS; i++) |
1f26dac3 DM |
253 | spin_lock(&cp->tx_lock[i]); |
254 | } | |
255 | ||
256 | static inline void cas_lock_all(struct cas *cp) | |
257 | { | |
258 | spin_lock_irq(&cp->lock); | |
259 | cas_lock_tx(cp); | |
260 | } | |
261 | ||
262 | /* WTZ: QA was finding deadlock problems with the previous | |
263 | * versions after long test runs with multiple cards per machine. | |
264 | * See if replacing cas_lock_all with safer versions helps. The | |
265 | * symptoms QA is reporting match those we'd expect if interrupts | |
266 | * aren't being properly restored, and we fixed a previous deadlock | |
267 | * with similar symptoms by using save/restore versions in other | |
268 | * places. | |
269 | */ | |
270 | #define cas_lock_all_save(cp, flags) \ | |
271 | do { \ | |
272 | struct cas *xxxcp = (cp); \ | |
273 | spin_lock_irqsave(&xxxcp->lock, flags); \ | |
274 | cas_lock_tx(xxxcp); \ | |
275 | } while (0) | |
276 | ||
277 | static inline void cas_unlock_tx(struct cas *cp) | |
278 | { | |
279 | int i; | |
280 | ||
6aa20a22 JG |
281 | for (i = N_TX_RINGS; i > 0; i--) |
282 | spin_unlock(&cp->tx_lock[i - 1]); | |
1f26dac3 DM |
283 | } |
284 | ||
285 | static inline void cas_unlock_all(struct cas *cp) | |
286 | { | |
287 | cas_unlock_tx(cp); | |
288 | spin_unlock_irq(&cp->lock); | |
289 | } | |
290 | ||
291 | #define cas_unlock_all_restore(cp, flags) \ | |
292 | do { \ | |
293 | struct cas *xxxcp = (cp); \ | |
294 | cas_unlock_tx(xxxcp); \ | |
295 | spin_unlock_irqrestore(&xxxcp->lock, flags); \ | |
296 | } while (0) | |
297 | ||
298 | static void cas_disable_irq(struct cas *cp, const int ring) | |
299 | { | |
300 | /* Make sure we won't get any more interrupts */ | |
301 | if (ring == 0) { | |
302 | writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK); | |
303 | return; | |
304 | } | |
305 | ||
306 | /* disable completion interrupts and selectively mask */ | |
307 | if (cp->cas_flags & CAS_FLAG_REG_PLUS) { | |
308 | switch (ring) { | |
309 | #if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD) | |
310 | #ifdef USE_PCI_INTB | |
311 | case 1: | |
312 | #endif | |
313 | #ifdef USE_PCI_INTC | |
314 | case 2: | |
315 | #endif | |
316 | #ifdef USE_PCI_INTD | |
317 | case 3: | |
318 | #endif | |
6aa20a22 | 319 | writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN, |
1f26dac3 DM |
320 | cp->regs + REG_PLUS_INTRN_MASK(ring)); |
321 | break; | |
322 | #endif | |
323 | default: | |
324 | writel(INTRN_MASK_CLEAR_ALL, cp->regs + | |
325 | REG_PLUS_INTRN_MASK(ring)); | |
326 | break; | |
327 | } | |
328 | } | |
329 | } | |
330 | ||
331 | static inline void cas_mask_intr(struct cas *cp) | |
332 | { | |
333 | int i; | |
334 | ||
335 | for (i = 0; i < N_RX_COMP_RINGS; i++) | |
336 | cas_disable_irq(cp, i); | |
337 | } | |
338 | ||
339 | static void cas_enable_irq(struct cas *cp, const int ring) | |
340 | { | |
341 | if (ring == 0) { /* all but TX_DONE */ | |
342 | writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK); | |
343 | return; | |
344 | } | |
345 | ||
346 | if (cp->cas_flags & CAS_FLAG_REG_PLUS) { | |
347 | switch (ring) { | |
348 | #if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD) | |
349 | #ifdef USE_PCI_INTB | |
350 | case 1: | |
351 | #endif | |
352 | #ifdef USE_PCI_INTC | |
353 | case 2: | |
354 | #endif | |
355 | #ifdef USE_PCI_INTD | |
356 | case 3: | |
357 | #endif | |
358 | writel(INTRN_MASK_RX_EN, cp->regs + | |
359 | REG_PLUS_INTRN_MASK(ring)); | |
360 | break; | |
361 | #endif | |
362 | default: | |
363 | break; | |
364 | } | |
365 | } | |
366 | } | |
367 | ||
368 | static inline void cas_unmask_intr(struct cas *cp) | |
369 | { | |
370 | int i; | |
371 | ||
372 | for (i = 0; i < N_RX_COMP_RINGS; i++) | |
373 | cas_enable_irq(cp, i); | |
374 | } | |
375 | ||
376 | static inline void cas_entropy_gather(struct cas *cp) | |
377 | { | |
378 | #ifdef USE_ENTROPY_DEV | |
379 | if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0) | |
380 | return; | |
381 | ||
382 | batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV), | |
383 | readl(cp->regs + REG_ENTROPY_IV), | |
384 | sizeof(uint64_t)*8); | |
385 | #endif | |
386 | } | |
387 | ||
388 | static inline void cas_entropy_reset(struct cas *cp) | |
389 | { | |
390 | #ifdef USE_ENTROPY_DEV | |
391 | if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0) | |
392 | return; | |
393 | ||
6aa20a22 | 394 | writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT, |
1f26dac3 DM |
395 | cp->regs + REG_BIM_LOCAL_DEV_EN); |
396 | writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET); | |
397 | writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG); | |
398 | ||
399 | /* if we read back 0x0, we don't have an entropy device */ | |
400 | if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0) | |
401 | cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV; | |
402 | #endif | |
403 | } | |
404 | ||
6aa20a22 | 405 | /* access to the phy. the following assumes that we've initialized the MIF to |
1f26dac3 DM |
406 | * be in frame rather than bit-bang mode |
407 | */ | |
408 | static u16 cas_phy_read(struct cas *cp, int reg) | |
409 | { | |
410 | u32 cmd; | |
411 | int limit = STOP_TRIES_PHY; | |
412 | ||
413 | cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ; | |
414 | cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr); | |
415 | cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg); | |
416 | cmd |= MIF_FRAME_TURN_AROUND_MSB; | |
417 | writel(cmd, cp->regs + REG_MIF_FRAME); | |
6aa20a22 | 418 | |
1f26dac3 DM |
419 | /* poll for completion */ |
420 | while (limit-- > 0) { | |
421 | udelay(10); | |
422 | cmd = readl(cp->regs + REG_MIF_FRAME); | |
423 | if (cmd & MIF_FRAME_TURN_AROUND_LSB) | |
424 | return (cmd & MIF_FRAME_DATA_MASK); | |
425 | } | |
426 | return 0xFFFF; /* -1 */ | |
427 | } | |
428 | ||
429 | static int cas_phy_write(struct cas *cp, int reg, u16 val) | |
430 | { | |
431 | int limit = STOP_TRIES_PHY; | |
432 | u32 cmd; | |
433 | ||
434 | cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE; | |
435 | cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr); | |
436 | cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg); | |
437 | cmd |= MIF_FRAME_TURN_AROUND_MSB; | |
438 | cmd |= val & MIF_FRAME_DATA_MASK; | |
439 | writel(cmd, cp->regs + REG_MIF_FRAME); | |
6aa20a22 | 440 | |
1f26dac3 DM |
441 | /* poll for completion */ |
442 | while (limit-- > 0) { | |
443 | udelay(10); | |
444 | cmd = readl(cp->regs + REG_MIF_FRAME); | |
445 | if (cmd & MIF_FRAME_TURN_AROUND_LSB) | |
446 | return 0; | |
447 | } | |
448 | return -1; | |
449 | } | |
450 | ||
451 | static void cas_phy_powerup(struct cas *cp) | |
452 | { | |
6aa20a22 | 453 | u16 ctl = cas_phy_read(cp, MII_BMCR); |
1f26dac3 DM |
454 | |
455 | if ((ctl & BMCR_PDOWN) == 0) | |
456 | return; | |
457 | ctl &= ~BMCR_PDOWN; | |
458 | cas_phy_write(cp, MII_BMCR, ctl); | |
459 | } | |
460 | ||
461 | static void cas_phy_powerdown(struct cas *cp) | |
462 | { | |
6aa20a22 | 463 | u16 ctl = cas_phy_read(cp, MII_BMCR); |
1f26dac3 DM |
464 | |
465 | if (ctl & BMCR_PDOWN) | |
466 | return; | |
467 | ctl |= BMCR_PDOWN; | |
468 | cas_phy_write(cp, MII_BMCR, ctl); | |
469 | } | |
470 | ||
471 | /* cp->lock held. note: the last put_page will free the buffer */ | |
472 | static int cas_page_free(struct cas *cp, cas_page_t *page) | |
473 | { | |
6aa20a22 | 474 | pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size, |
1f26dac3 DM |
475 | PCI_DMA_FROMDEVICE); |
476 | __free_pages(page->buffer, cp->page_order); | |
477 | kfree(page); | |
478 | return 0; | |
479 | } | |
480 | ||
481 | #ifdef RX_COUNT_BUFFERS | |
482 | #define RX_USED_ADD(x, y) ((x)->used += (y)) | |
483 | #define RX_USED_SET(x, y) ((x)->used = (y)) | |
484 | #else | |
6aa20a22 | 485 | #define RX_USED_ADD(x, y) |
1f26dac3 DM |
486 | #define RX_USED_SET(x, y) |
487 | #endif | |
488 | ||
489 | /* local page allocation routines for the receive buffers. jumbo pages | |
490 | * require at least 8K contiguous and 8K aligned buffers. | |
491 | */ | |
9e24974d | 492 | static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags) |
1f26dac3 DM |
493 | { |
494 | cas_page_t *page; | |
495 | ||
496 | page = kmalloc(sizeof(cas_page_t), flags); | |
497 | if (!page) | |
498 | return NULL; | |
499 | ||
500 | INIT_LIST_HEAD(&page->list); | |
501 | RX_USED_SET(page, 0); | |
502 | page->buffer = alloc_pages(flags, cp->page_order); | |
503 | if (!page->buffer) | |
504 | goto page_err; | |
505 | page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0, | |
506 | cp->page_size, PCI_DMA_FROMDEVICE); | |
507 | return page; | |
508 | ||
509 | page_err: | |
510 | kfree(page); | |
511 | return NULL; | |
512 | } | |
513 | ||
514 | /* initialize spare pool of rx buffers, but allocate during the open */ | |
515 | static void cas_spare_init(struct cas *cp) | |
516 | { | |
517 | spin_lock(&cp->rx_inuse_lock); | |
518 | INIT_LIST_HEAD(&cp->rx_inuse_list); | |
519 | spin_unlock(&cp->rx_inuse_lock); | |
520 | ||
521 | spin_lock(&cp->rx_spare_lock); | |
522 | INIT_LIST_HEAD(&cp->rx_spare_list); | |
523 | cp->rx_spares_needed = RX_SPARE_COUNT; | |
524 | spin_unlock(&cp->rx_spare_lock); | |
525 | } | |
526 | ||
527 | /* used on close. free all the spare buffers. */ | |
528 | static void cas_spare_free(struct cas *cp) | |
529 | { | |
530 | struct list_head list, *elem, *tmp; | |
531 | ||
532 | /* free spare buffers */ | |
533 | INIT_LIST_HEAD(&list); | |
534 | spin_lock(&cp->rx_spare_lock); | |
535 | list_splice(&cp->rx_spare_list, &list); | |
536 | INIT_LIST_HEAD(&cp->rx_spare_list); | |
537 | spin_unlock(&cp->rx_spare_lock); | |
538 | list_for_each_safe(elem, tmp, &list) { | |
539 | cas_page_free(cp, list_entry(elem, cas_page_t, list)); | |
540 | } | |
541 | ||
542 | INIT_LIST_HEAD(&list); | |
543 | #if 1 | |
544 | /* | |
545 | * Looks like Adrian had protected this with a different | |
546 | * lock than used everywhere else to manipulate this list. | |
547 | */ | |
548 | spin_lock(&cp->rx_inuse_lock); | |
549 | list_splice(&cp->rx_inuse_list, &list); | |
550 | INIT_LIST_HEAD(&cp->rx_inuse_list); | |
551 | spin_unlock(&cp->rx_inuse_lock); | |
552 | #else | |
553 | spin_lock(&cp->rx_spare_lock); | |
554 | list_splice(&cp->rx_inuse_list, &list); | |
555 | INIT_LIST_HEAD(&cp->rx_inuse_list); | |
556 | spin_unlock(&cp->rx_spare_lock); | |
557 | #endif | |
558 | list_for_each_safe(elem, tmp, &list) { | |
559 | cas_page_free(cp, list_entry(elem, cas_page_t, list)); | |
560 | } | |
561 | } | |
562 | ||
563 | /* replenish spares if needed */ | |
9e24974d | 564 | static void cas_spare_recover(struct cas *cp, const gfp_t flags) |
1f26dac3 DM |
565 | { |
566 | struct list_head list, *elem, *tmp; | |
567 | int needed, i; | |
568 | ||
569 | /* check inuse list. if we don't need any more free buffers, | |
570 | * just free it | |
571 | */ | |
572 | ||
573 | /* make a local copy of the list */ | |
574 | INIT_LIST_HEAD(&list); | |
575 | spin_lock(&cp->rx_inuse_lock); | |
576 | list_splice(&cp->rx_inuse_list, &list); | |
577 | INIT_LIST_HEAD(&cp->rx_inuse_list); | |
578 | spin_unlock(&cp->rx_inuse_lock); | |
6aa20a22 | 579 | |
1f26dac3 DM |
580 | list_for_each_safe(elem, tmp, &list) { |
581 | cas_page_t *page = list_entry(elem, cas_page_t, list); | |
582 | ||
9de4dfb4 | 583 | if (page_count(page->buffer) > 1) |
1f26dac3 DM |
584 | continue; |
585 | ||
586 | list_del(elem); | |
587 | spin_lock(&cp->rx_spare_lock); | |
588 | if (cp->rx_spares_needed > 0) { | |
589 | list_add(elem, &cp->rx_spare_list); | |
590 | cp->rx_spares_needed--; | |
591 | spin_unlock(&cp->rx_spare_lock); | |
592 | } else { | |
593 | spin_unlock(&cp->rx_spare_lock); | |
594 | cas_page_free(cp, page); | |
595 | } | |
596 | } | |
597 | ||
598 | /* put any inuse buffers back on the list */ | |
599 | if (!list_empty(&list)) { | |
600 | spin_lock(&cp->rx_inuse_lock); | |
601 | list_splice(&list, &cp->rx_inuse_list); | |
602 | spin_unlock(&cp->rx_inuse_lock); | |
603 | } | |
6aa20a22 | 604 | |
1f26dac3 DM |
605 | spin_lock(&cp->rx_spare_lock); |
606 | needed = cp->rx_spares_needed; | |
607 | spin_unlock(&cp->rx_spare_lock); | |
608 | if (!needed) | |
609 | return; | |
610 | ||
611 | /* we still need spares, so try to allocate some */ | |
612 | INIT_LIST_HEAD(&list); | |
613 | i = 0; | |
614 | while (i < needed) { | |
615 | cas_page_t *spare = cas_page_alloc(cp, flags); | |
6aa20a22 | 616 | if (!spare) |
1f26dac3 DM |
617 | break; |
618 | list_add(&spare->list, &list); | |
619 | i++; | |
620 | } | |
621 | ||
622 | spin_lock(&cp->rx_spare_lock); | |
623 | list_splice(&list, &cp->rx_spare_list); | |
624 | cp->rx_spares_needed -= i; | |
625 | spin_unlock(&cp->rx_spare_lock); | |
626 | } | |
627 | ||
628 | /* pull a page from the list. */ | |
629 | static cas_page_t *cas_page_dequeue(struct cas *cp) | |
630 | { | |
631 | struct list_head *entry; | |
632 | int recover; | |
633 | ||
634 | spin_lock(&cp->rx_spare_lock); | |
635 | if (list_empty(&cp->rx_spare_list)) { | |
636 | /* try to do a quick recovery */ | |
637 | spin_unlock(&cp->rx_spare_lock); | |
638 | cas_spare_recover(cp, GFP_ATOMIC); | |
639 | spin_lock(&cp->rx_spare_lock); | |
640 | if (list_empty(&cp->rx_spare_list)) { | |
641 | if (netif_msg_rx_err(cp)) | |
642 | printk(KERN_ERR "%s: no spare buffers " | |
643 | "available.\n", cp->dev->name); | |
644 | spin_unlock(&cp->rx_spare_lock); | |
645 | return NULL; | |
646 | } | |
647 | } | |
648 | ||
649 | entry = cp->rx_spare_list.next; | |
650 | list_del(entry); | |
651 | recover = ++cp->rx_spares_needed; | |
652 | spin_unlock(&cp->rx_spare_lock); | |
653 | ||
654 | /* trigger the timer to do the recovery */ | |
655 | if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) { | |
656 | #if 1 | |
657 | atomic_inc(&cp->reset_task_pending); | |
658 | atomic_inc(&cp->reset_task_pending_spare); | |
659 | schedule_work(&cp->reset_task); | |
660 | #else | |
661 | atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE); | |
662 | schedule_work(&cp->reset_task); | |
663 | #endif | |
664 | } | |
665 | return list_entry(entry, cas_page_t, list); | |
666 | } | |
667 | ||
668 | ||
669 | static void cas_mif_poll(struct cas *cp, const int enable) | |
670 | { | |
671 | u32 cfg; | |
6aa20a22 JG |
672 | |
673 | cfg = readl(cp->regs + REG_MIF_CFG); | |
1f26dac3 DM |
674 | cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1); |
675 | ||
676 | if (cp->phy_type & CAS_PHY_MII_MDIO1) | |
6aa20a22 | 677 | cfg |= MIF_CFG_PHY_SELECT; |
1f26dac3 DM |
678 | |
679 | /* poll and interrupt on link status change. */ | |
680 | if (enable) { | |
681 | cfg |= MIF_CFG_POLL_EN; | |
682 | cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR); | |
683 | cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr); | |
684 | } | |
6aa20a22 JG |
685 | writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF, |
686 | cp->regs + REG_MIF_MASK); | |
1f26dac3 DM |
687 | writel(cfg, cp->regs + REG_MIF_CFG); |
688 | } | |
689 | ||
690 | /* Must be invoked under cp->lock */ | |
691 | static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep) | |
692 | { | |
693 | u16 ctl; | |
694 | #if 1 | |
695 | int lcntl; | |
696 | int changed = 0; | |
697 | int oldstate = cp->lstate; | |
698 | int link_was_not_down = !(oldstate == link_down); | |
699 | #endif | |
700 | /* Setup link parameters */ | |
701 | if (!ep) | |
702 | goto start_aneg; | |
703 | lcntl = cp->link_cntl; | |
704 | if (ep->autoneg == AUTONEG_ENABLE) | |
705 | cp->link_cntl = BMCR_ANENABLE; | |
706 | else { | |
707 | cp->link_cntl = 0; | |
708 | if (ep->speed == SPEED_100) | |
709 | cp->link_cntl |= BMCR_SPEED100; | |
710 | else if (ep->speed == SPEED_1000) | |
711 | cp->link_cntl |= CAS_BMCR_SPEED1000; | |
712 | if (ep->duplex == DUPLEX_FULL) | |
713 | cp->link_cntl |= BMCR_FULLDPLX; | |
714 | } | |
715 | #if 1 | |
716 | changed = (lcntl != cp->link_cntl); | |
717 | #endif | |
718 | start_aneg: | |
719 | if (cp->lstate == link_up) { | |
720 | printk(KERN_INFO "%s: PCS link down.\n", | |
721 | cp->dev->name); | |
722 | } else { | |
723 | if (changed) { | |
724 | printk(KERN_INFO "%s: link configuration changed\n", | |
725 | cp->dev->name); | |
726 | } | |
727 | } | |
728 | cp->lstate = link_down; | |
729 | cp->link_transition = LINK_TRANSITION_LINK_DOWN; | |
730 | if (!cp->hw_running) | |
731 | return; | |
732 | #if 1 | |
733 | /* | |
734 | * WTZ: If the old state was link_up, we turn off the carrier | |
735 | * to replicate everything we do elsewhere on a link-down | |
6aa20a22 | 736 | * event when we were already in a link-up state.. |
1f26dac3 DM |
737 | */ |
738 | if (oldstate == link_up) | |
739 | netif_carrier_off(cp->dev); | |
740 | if (changed && link_was_not_down) { | |
741 | /* | |
742 | * WTZ: This branch will simply schedule a full reset after | |
743 | * we explicitly changed link modes in an ioctl. See if this | |
6aa20a22 | 744 | * fixes the link-problems we were having for forced mode. |
1f26dac3 DM |
745 | */ |
746 | atomic_inc(&cp->reset_task_pending); | |
747 | atomic_inc(&cp->reset_task_pending_all); | |
748 | schedule_work(&cp->reset_task); | |
749 | cp->timer_ticks = 0; | |
750 | mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); | |
751 | return; | |
752 | } | |
753 | #endif | |
754 | if (cp->phy_type & CAS_PHY_SERDES) { | |
755 | u32 val = readl(cp->regs + REG_PCS_MII_CTRL); | |
756 | ||
757 | if (cp->link_cntl & BMCR_ANENABLE) { | |
758 | val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN); | |
759 | cp->lstate = link_aneg; | |
760 | } else { | |
761 | if (cp->link_cntl & BMCR_FULLDPLX) | |
762 | val |= PCS_MII_CTRL_DUPLEX; | |
763 | val &= ~PCS_MII_AUTONEG_EN; | |
764 | cp->lstate = link_force_ok; | |
765 | } | |
766 | cp->link_transition = LINK_TRANSITION_LINK_CONFIG; | |
767 | writel(val, cp->regs + REG_PCS_MII_CTRL); | |
768 | ||
769 | } else { | |
770 | cas_mif_poll(cp, 0); | |
771 | ctl = cas_phy_read(cp, MII_BMCR); | |
6aa20a22 | 772 | ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | |
1f26dac3 DM |
773 | CAS_BMCR_SPEED1000 | BMCR_ANENABLE); |
774 | ctl |= cp->link_cntl; | |
775 | if (ctl & BMCR_ANENABLE) { | |
776 | ctl |= BMCR_ANRESTART; | |
777 | cp->lstate = link_aneg; | |
778 | } else { | |
779 | cp->lstate = link_force_ok; | |
780 | } | |
781 | cp->link_transition = LINK_TRANSITION_LINK_CONFIG; | |
782 | cas_phy_write(cp, MII_BMCR, ctl); | |
783 | cas_mif_poll(cp, 1); | |
784 | } | |
785 | ||
786 | cp->timer_ticks = 0; | |
787 | mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); | |
788 | } | |
789 | ||
790 | /* Must be invoked under cp->lock. */ | |
791 | static int cas_reset_mii_phy(struct cas *cp) | |
792 | { | |
793 | int limit = STOP_TRIES_PHY; | |
794 | u16 val; | |
6aa20a22 | 795 | |
1f26dac3 DM |
796 | cas_phy_write(cp, MII_BMCR, BMCR_RESET); |
797 | udelay(100); | |
798 | while (limit--) { | |
799 | val = cas_phy_read(cp, MII_BMCR); | |
800 | if ((val & BMCR_RESET) == 0) | |
801 | break; | |
802 | udelay(10); | |
803 | } | |
804 | return (limit <= 0); | |
805 | } | |
806 | ||
807 | static void cas_saturn_firmware_load(struct cas *cp) | |
808 | { | |
809 | cas_saturn_patch_t *patch = cas_saturn_patch; | |
810 | ||
811 | cas_phy_powerdown(cp); | |
812 | ||
813 | /* expanded memory access mode */ | |
814 | cas_phy_write(cp, DP83065_MII_MEM, 0x0); | |
815 | ||
816 | /* pointer configuration for new firmware */ | |
817 | cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9); | |
818 | cas_phy_write(cp, DP83065_MII_REGD, 0xbd); | |
819 | cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa); | |
820 | cas_phy_write(cp, DP83065_MII_REGD, 0x82); | |
821 | cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb); | |
822 | cas_phy_write(cp, DP83065_MII_REGD, 0x0); | |
823 | cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc); | |
824 | cas_phy_write(cp, DP83065_MII_REGD, 0x39); | |
825 | ||
826 | /* download new firmware */ | |
827 | cas_phy_write(cp, DP83065_MII_MEM, 0x1); | |
828 | cas_phy_write(cp, DP83065_MII_REGE, patch->addr); | |
829 | while (patch->addr) { | |
830 | cas_phy_write(cp, DP83065_MII_REGD, patch->val); | |
831 | patch++; | |
832 | } | |
833 | ||
834 | /* enable firmware */ | |
835 | cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8); | |
836 | cas_phy_write(cp, DP83065_MII_REGD, 0x1); | |
837 | } | |
838 | ||
839 | ||
840 | /* phy initialization */ | |
841 | static void cas_phy_init(struct cas *cp) | |
842 | { | |
843 | u16 val; | |
844 | ||
845 | /* if we're in MII/GMII mode, set up phy */ | |
846 | if (CAS_PHY_MII(cp->phy_type)) { | |
847 | writel(PCS_DATAPATH_MODE_MII, | |
848 | cp->regs + REG_PCS_DATAPATH_MODE); | |
849 | ||
850 | cas_mif_poll(cp, 0); | |
851 | cas_reset_mii_phy(cp); /* take out of isolate mode */ | |
852 | ||
853 | if (PHY_LUCENT_B0 == cp->phy_id) { | |
854 | /* workaround link up/down issue with lucent */ | |
855 | cas_phy_write(cp, LUCENT_MII_REG, 0x8000); | |
856 | cas_phy_write(cp, MII_BMCR, 0x00f1); | |
857 | cas_phy_write(cp, LUCENT_MII_REG, 0x0); | |
858 | ||
859 | } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) { | |
860 | /* workarounds for broadcom phy */ | |
861 | cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20); | |
862 | cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012); | |
863 | cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804); | |
864 | cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013); | |
865 | cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204); | |
866 | cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006); | |
867 | cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132); | |
868 | cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006); | |
869 | cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232); | |
870 | cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F); | |
871 | cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20); | |
872 | ||
873 | } else if (PHY_BROADCOM_5411 == cp->phy_id) { | |
874 | val = cas_phy_read(cp, BROADCOM_MII_REG4); | |
875 | val = cas_phy_read(cp, BROADCOM_MII_REG4); | |
876 | if (val & 0x0080) { | |
877 | /* link workaround */ | |
6aa20a22 | 878 | cas_phy_write(cp, BROADCOM_MII_REG4, |
1f26dac3 DM |
879 | val & ~0x0080); |
880 | } | |
6aa20a22 | 881 | |
1f26dac3 | 882 | } else if (cp->cas_flags & CAS_FLAG_SATURN) { |
6aa20a22 JG |
883 | writel((cp->phy_type & CAS_PHY_MII_MDIO0) ? |
884 | SATURN_PCFG_FSI : 0x0, | |
1f26dac3 DM |
885 | cp->regs + REG_SATURN_PCFG); |
886 | ||
887 | /* load firmware to address 10Mbps auto-negotiation | |
6aa20a22 | 888 | * issue. NOTE: this will need to be changed if the |
1f26dac3 DM |
889 | * default firmware gets fixed. |
890 | */ | |
891 | if (PHY_NS_DP83065 == cp->phy_id) { | |
892 | cas_saturn_firmware_load(cp); | |
893 | } | |
894 | cas_phy_powerup(cp); | |
895 | } | |
896 | ||
897 | /* advertise capabilities */ | |
898 | val = cas_phy_read(cp, MII_BMCR); | |
899 | val &= ~BMCR_ANENABLE; | |
900 | cas_phy_write(cp, MII_BMCR, val); | |
901 | udelay(10); | |
902 | ||
903 | cas_phy_write(cp, MII_ADVERTISE, | |
904 | cas_phy_read(cp, MII_ADVERTISE) | | |
905 | (ADVERTISE_10HALF | ADVERTISE_10FULL | | |
906 | ADVERTISE_100HALF | ADVERTISE_100FULL | | |
6aa20a22 | 907 | CAS_ADVERTISE_PAUSE | |
1f26dac3 | 908 | CAS_ADVERTISE_ASYM_PAUSE)); |
6aa20a22 | 909 | |
1f26dac3 DM |
910 | if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { |
911 | /* make sure that we don't advertise half | |
912 | * duplex to avoid a chip issue | |
913 | */ | |
914 | val = cas_phy_read(cp, CAS_MII_1000_CTRL); | |
915 | val &= ~CAS_ADVERTISE_1000HALF; | |
916 | val |= CAS_ADVERTISE_1000FULL; | |
917 | cas_phy_write(cp, CAS_MII_1000_CTRL, val); | |
918 | } | |
919 | ||
920 | } else { | |
921 | /* reset pcs for serdes */ | |
922 | u32 val; | |
923 | int limit; | |
924 | ||
925 | writel(PCS_DATAPATH_MODE_SERDES, | |
926 | cp->regs + REG_PCS_DATAPATH_MODE); | |
927 | ||
928 | /* enable serdes pins on saturn */ | |
929 | if (cp->cas_flags & CAS_FLAG_SATURN) | |
930 | writel(0, cp->regs + REG_SATURN_PCFG); | |
931 | ||
932 | /* Reset PCS unit. */ | |
933 | val = readl(cp->regs + REG_PCS_MII_CTRL); | |
934 | val |= PCS_MII_RESET; | |
935 | writel(val, cp->regs + REG_PCS_MII_CTRL); | |
936 | ||
937 | limit = STOP_TRIES; | |
938 | while (limit-- > 0) { | |
939 | udelay(10); | |
6aa20a22 | 940 | if ((readl(cp->regs + REG_PCS_MII_CTRL) & |
1f26dac3 DM |
941 | PCS_MII_RESET) == 0) |
942 | break; | |
943 | } | |
944 | if (limit <= 0) | |
945 | printk(KERN_WARNING "%s: PCS reset bit would not " | |
946 | "clear [%08x].\n", cp->dev->name, | |
947 | readl(cp->regs + REG_PCS_STATE_MACHINE)); | |
948 | ||
949 | /* Make sure PCS is disabled while changing advertisement | |
950 | * configuration. | |
951 | */ | |
952 | writel(0x0, cp->regs + REG_PCS_CFG); | |
953 | ||
954 | /* Advertise all capabilities except half-duplex. */ | |
955 | val = readl(cp->regs + REG_PCS_MII_ADVERT); | |
956 | val &= ~PCS_MII_ADVERT_HD; | |
6aa20a22 | 957 | val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE | |
1f26dac3 DM |
958 | PCS_MII_ADVERT_ASYM_PAUSE); |
959 | writel(val, cp->regs + REG_PCS_MII_ADVERT); | |
960 | ||
961 | /* enable PCS */ | |
962 | writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG); | |
963 | ||
964 | /* pcs workaround: enable sync detect */ | |
965 | writel(PCS_SERDES_CTRL_SYNCD_EN, | |
966 | cp->regs + REG_PCS_SERDES_CTRL); | |
967 | } | |
968 | } | |
969 | ||
970 | ||
971 | static int cas_pcs_link_check(struct cas *cp) | |
972 | { | |
973 | u32 stat, state_machine; | |
974 | int retval = 0; | |
975 | ||
976 | /* The link status bit latches on zero, so you must | |
977 | * read it twice in such a case to see a transition | |
978 | * to the link being up. | |
979 | */ | |
980 | stat = readl(cp->regs + REG_PCS_MII_STATUS); | |
981 | if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0) | |
982 | stat = readl(cp->regs + REG_PCS_MII_STATUS); | |
983 | ||
984 | /* The remote-fault indication is only valid | |
985 | * when autoneg has completed. | |
986 | */ | |
987 | if ((stat & (PCS_MII_STATUS_AUTONEG_COMP | | |
988 | PCS_MII_STATUS_REMOTE_FAULT)) == | |
989 | (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT)) { | |
990 | if (netif_msg_link(cp)) | |
6aa20a22 | 991 | printk(KERN_INFO "%s: PCS RemoteFault\n", |
1f26dac3 DM |
992 | cp->dev->name); |
993 | } | |
994 | ||
995 | /* work around link detection issue by querying the PCS state | |
996 | * machine directly. | |
997 | */ | |
998 | state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE); | |
999 | if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) { | |
1000 | stat &= ~PCS_MII_STATUS_LINK_STATUS; | |
1001 | } else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) { | |
1002 | stat |= PCS_MII_STATUS_LINK_STATUS; | |
1003 | } | |
1004 | ||
1005 | if (stat & PCS_MII_STATUS_LINK_STATUS) { | |
1006 | if (cp->lstate != link_up) { | |
1007 | if (cp->opened) { | |
1008 | cp->lstate = link_up; | |
1009 | cp->link_transition = LINK_TRANSITION_LINK_UP; | |
6aa20a22 | 1010 | |
1f26dac3 DM |
1011 | cas_set_link_modes(cp); |
1012 | netif_carrier_on(cp->dev); | |
1013 | } | |
1014 | } | |
1015 | } else if (cp->lstate == link_up) { | |
1016 | cp->lstate = link_down; | |
1017 | if (link_transition_timeout != 0 && | |
1018 | cp->link_transition != LINK_TRANSITION_REQUESTED_RESET && | |
1019 | !cp->link_transition_jiffies_valid) { | |
1020 | /* | |
6aa20a22 JG |
1021 | * force a reset, as a workaround for the |
1022 | * link-failure problem. May want to move this to a | |
1f26dac3 DM |
1023 | * point a bit earlier in the sequence. If we had |
1024 | * generated a reset a short time ago, we'll wait for | |
1025 | * the link timer to check the status until a | |
1026 | * timer expires (link_transistion_jiffies_valid is | |
1027 | * true when the timer is running.) Instead of using | |
1028 | * a system timer, we just do a check whenever the | |
1029 | * link timer is running - this clears the flag after | |
1030 | * a suitable delay. | |
1031 | */ | |
1032 | retval = 1; | |
1033 | cp->link_transition = LINK_TRANSITION_REQUESTED_RESET; | |
1034 | cp->link_transition_jiffies = jiffies; | |
1035 | cp->link_transition_jiffies_valid = 1; | |
1036 | } else { | |
1037 | cp->link_transition = LINK_TRANSITION_ON_FAILURE; | |
1038 | } | |
1039 | netif_carrier_off(cp->dev); | |
1040 | if (cp->opened && netif_msg_link(cp)) { | |
1041 | printk(KERN_INFO "%s: PCS link down.\n", | |
1042 | cp->dev->name); | |
1043 | } | |
1044 | ||
1045 | /* Cassini only: if you force a mode, there can be | |
1046 | * sync problems on link down. to fix that, the following | |
1047 | * things need to be checked: | |
1048 | * 1) read serialink state register | |
1049 | * 2) read pcs status register to verify link down. | |
1050 | * 3) if link down and serial link == 0x03, then you need | |
1051 | * to global reset the chip. | |
1052 | */ | |
1053 | if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) { | |
1054 | /* should check to see if we're in a forced mode */ | |
1055 | stat = readl(cp->regs + REG_PCS_SERDES_STATE); | |
1056 | if (stat == 0x03) | |
1057 | return 1; | |
1058 | } | |
1059 | } else if (cp->lstate == link_down) { | |
1060 | if (link_transition_timeout != 0 && | |
1061 | cp->link_transition != LINK_TRANSITION_REQUESTED_RESET && | |
1062 | !cp->link_transition_jiffies_valid) { | |
1063 | /* force a reset, as a workaround for the | |
1064 | * link-failure problem. May want to move | |
1065 | * this to a point a bit earlier in the | |
1066 | * sequence. | |
1067 | */ | |
1068 | retval = 1; | |
1069 | cp->link_transition = LINK_TRANSITION_REQUESTED_RESET; | |
1070 | cp->link_transition_jiffies = jiffies; | |
1071 | cp->link_transition_jiffies_valid = 1; | |
1072 | } else { | |
1073 | cp->link_transition = LINK_TRANSITION_STILL_FAILED; | |
1074 | } | |
1075 | } | |
1076 | ||
1077 | return retval; | |
1078 | } | |
1079 | ||
6aa20a22 | 1080 | static int cas_pcs_interrupt(struct net_device *dev, |
1f26dac3 DM |
1081 | struct cas *cp, u32 status) |
1082 | { | |
1083 | u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS); | |
1084 | ||
6aa20a22 | 1085 | if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0) |
1f26dac3 DM |
1086 | return 0; |
1087 | return cas_pcs_link_check(cp); | |
1088 | } | |
1089 | ||
6aa20a22 | 1090 | static int cas_txmac_interrupt(struct net_device *dev, |
1f26dac3 DM |
1091 | struct cas *cp, u32 status) |
1092 | { | |
1093 | u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS); | |
1094 | ||
1095 | if (!txmac_stat) | |
1096 | return 0; | |
1097 | ||
1098 | if (netif_msg_intr(cp)) | |
1099 | printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n", | |
1100 | cp->dev->name, txmac_stat); | |
1101 | ||
1102 | /* Defer timer expiration is quite normal, | |
1103 | * don't even log the event. | |
1104 | */ | |
1105 | if ((txmac_stat & MAC_TX_DEFER_TIMER) && | |
1106 | !(txmac_stat & ~MAC_TX_DEFER_TIMER)) | |
1107 | return 0; | |
1108 | ||
1109 | spin_lock(&cp->stat_lock[0]); | |
1110 | if (txmac_stat & MAC_TX_UNDERRUN) { | |
1111 | printk(KERN_ERR "%s: TX MAC xmit underrun.\n", | |
1112 | dev->name); | |
1113 | cp->net_stats[0].tx_fifo_errors++; | |
1114 | } | |
1115 | ||
1116 | if (txmac_stat & MAC_TX_MAX_PACKET_ERR) { | |
1117 | printk(KERN_ERR "%s: TX MAC max packet size error.\n", | |
1118 | dev->name); | |
1119 | cp->net_stats[0].tx_errors++; | |
1120 | } | |
1121 | ||
1122 | /* The rest are all cases of one of the 16-bit TX | |
1123 | * counters expiring. | |
1124 | */ | |
1125 | if (txmac_stat & MAC_TX_COLL_NORMAL) | |
1126 | cp->net_stats[0].collisions += 0x10000; | |
1127 | ||
1128 | if (txmac_stat & MAC_TX_COLL_EXCESS) { | |
1129 | cp->net_stats[0].tx_aborted_errors += 0x10000; | |
1130 | cp->net_stats[0].collisions += 0x10000; | |
1131 | } | |
1132 | ||
1133 | if (txmac_stat & MAC_TX_COLL_LATE) { | |
1134 | cp->net_stats[0].tx_aborted_errors += 0x10000; | |
1135 | cp->net_stats[0].collisions += 0x10000; | |
1136 | } | |
1137 | spin_unlock(&cp->stat_lock[0]); | |
1138 | ||
1139 | /* We do not keep track of MAC_TX_COLL_FIRST and | |
1140 | * MAC_TX_PEAK_ATTEMPTS events. | |
1141 | */ | |
1142 | return 0; | |
1143 | } | |
1144 | ||
6aa20a22 | 1145 | static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware) |
1f26dac3 DM |
1146 | { |
1147 | cas_hp_inst_t *inst; | |
1148 | u32 val; | |
1149 | int i; | |
1150 | ||
1151 | i = 0; | |
1152 | while ((inst = firmware) && inst->note) { | |
1153 | writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR); | |
1154 | ||
1155 | val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val); | |
1156 | val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask); | |
1157 | writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI); | |
1158 | ||
1159 | val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10); | |
1160 | val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop); | |
1161 | val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext); | |
1162 | val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff); | |
1163 | val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext); | |
1164 | val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff); | |
1165 | val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op); | |
1166 | writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID); | |
1167 | ||
1168 | val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask); | |
1169 | val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift); | |
1170 | val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab); | |
1171 | val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg); | |
1172 | writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW); | |
1173 | ++firmware; | |
1174 | ++i; | |
1175 | } | |
1176 | } | |
1177 | ||
1178 | static void cas_init_rx_dma(struct cas *cp) | |
1179 | { | |
6aa20a22 | 1180 | u64 desc_dma = cp->block_dvma; |
1f26dac3 DM |
1181 | u32 val; |
1182 | int i, size; | |
1183 | ||
1184 | /* rx free descriptors */ | |
6aa20a22 | 1185 | val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL); |
1f26dac3 DM |
1186 | val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0)); |
1187 | val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0)); | |
1188 | if ((N_RX_DESC_RINGS > 1) && | |
1189 | (cp->cas_flags & CAS_FLAG_REG_PLUS)) /* do desc 2 */ | |
1190 | val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1)); | |
1191 | writel(val, cp->regs + REG_RX_CFG); | |
1192 | ||
6aa20a22 | 1193 | val = (unsigned long) cp->init_rxds[0] - |
1f26dac3 DM |
1194 | (unsigned long) cp->init_block; |
1195 | writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI); | |
1196 | writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW); | |
1197 | writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK); | |
1198 | ||
1199 | if (cp->cas_flags & CAS_FLAG_REG_PLUS) { | |
6aa20a22 | 1200 | /* rx desc 2 is for IPSEC packets. however, |
1f26dac3 DM |
1201 | * we don't it that for that purpose. |
1202 | */ | |
6aa20a22 | 1203 | val = (unsigned long) cp->init_rxds[1] - |
1f26dac3 DM |
1204 | (unsigned long) cp->init_block; |
1205 | writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI); | |
6aa20a22 | 1206 | writel((desc_dma + val) & 0xffffffff, cp->regs + |
1f26dac3 | 1207 | REG_PLUS_RX_DB1_LOW); |
6aa20a22 | 1208 | writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs + |
1f26dac3 DM |
1209 | REG_PLUS_RX_KICK1); |
1210 | } | |
6aa20a22 | 1211 | |
1f26dac3 | 1212 | /* rx completion registers */ |
6aa20a22 | 1213 | val = (unsigned long) cp->init_rxcs[0] - |
1f26dac3 DM |
1214 | (unsigned long) cp->init_block; |
1215 | writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI); | |
1216 | writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW); | |
1217 | ||
1218 | if (cp->cas_flags & CAS_FLAG_REG_PLUS) { | |
1219 | /* rx comp 2-4 */ | |
1220 | for (i = 1; i < MAX_RX_COMP_RINGS; i++) { | |
6aa20a22 | 1221 | val = (unsigned long) cp->init_rxcs[i] - |
1f26dac3 | 1222 | (unsigned long) cp->init_block; |
6aa20a22 | 1223 | writel((desc_dma + val) >> 32, cp->regs + |
1f26dac3 | 1224 | REG_PLUS_RX_CBN_HI(i)); |
6aa20a22 | 1225 | writel((desc_dma + val) & 0xffffffff, cp->regs + |
1f26dac3 DM |
1226 | REG_PLUS_RX_CBN_LOW(i)); |
1227 | } | |
1228 | } | |
1229 | ||
1230 | /* read selective clear regs to prevent spurious interrupts | |
1231 | * on reset because complete == kick. | |
1232 | * selective clear set up to prevent interrupts on resets | |
1233 | */ | |
1234 | readl(cp->regs + REG_INTR_STATUS_ALIAS); | |
1235 | writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR); | |
1236 | if (cp->cas_flags & CAS_FLAG_REG_PLUS) { | |
1237 | for (i = 1; i < N_RX_COMP_RINGS; i++) | |
1238 | readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i)); | |
1239 | ||
1240 | /* 2 is different from 3 and 4 */ | |
1241 | if (N_RX_COMP_RINGS > 1) | |
6aa20a22 | 1242 | writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1, |
1f26dac3 DM |
1243 | cp->regs + REG_PLUS_ALIASN_CLEAR(1)); |
1244 | ||
6aa20a22 JG |
1245 | for (i = 2; i < N_RX_COMP_RINGS; i++) |
1246 | writel(INTR_RX_DONE_ALT, | |
1f26dac3 DM |
1247 | cp->regs + REG_PLUS_ALIASN_CLEAR(i)); |
1248 | } | |
1249 | ||
1250 | /* set up pause thresholds */ | |
1251 | val = CAS_BASE(RX_PAUSE_THRESH_OFF, | |
1252 | cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM); | |
6aa20a22 | 1253 | val |= CAS_BASE(RX_PAUSE_THRESH_ON, |
1f26dac3 DM |
1254 | cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM); |
1255 | writel(val, cp->regs + REG_RX_PAUSE_THRESH); | |
6aa20a22 | 1256 | |
1f26dac3 DM |
1257 | /* zero out dma reassembly buffers */ |
1258 | for (i = 0; i < 64; i++) { | |
1259 | writel(i, cp->regs + REG_RX_TABLE_ADDR); | |
1260 | writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW); | |
1261 | writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID); | |
1262 | writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI); | |
1263 | } | |
1264 | ||
1265 | /* make sure address register is 0 for normal operation */ | |
1266 | writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR); | |
1267 | writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR); | |
1268 | ||
1269 | /* interrupt mitigation */ | |
1270 | #ifdef USE_RX_BLANK | |
1271 | val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL); | |
1272 | val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL); | |
1273 | writel(val, cp->regs + REG_RX_BLANK); | |
1274 | #else | |
1275 | writel(0x0, cp->regs + REG_RX_BLANK); | |
1276 | #endif | |
1277 | ||
1278 | /* interrupt generation as a function of low water marks for | |
1279 | * free desc and completion entries. these are used to trigger | |
1280 | * housekeeping for rx descs. we don't use the free interrupt | |
1281 | * as it's not very useful | |
1282 | */ | |
1283 | /* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */ | |
1284 | val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL); | |
1285 | writel(val, cp->regs + REG_RX_AE_THRESH); | |
1286 | if (cp->cas_flags & CAS_FLAG_REG_PLUS) { | |
1287 | val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1)); | |
1288 | writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH); | |
1289 | } | |
1290 | ||
1291 | /* Random early detect registers. useful for congestion avoidance. | |
1292 | * this should be tunable. | |
1293 | */ | |
1294 | writel(0x0, cp->regs + REG_RX_RED); | |
6aa20a22 | 1295 | |
1f26dac3 DM |
1296 | /* receive page sizes. default == 2K (0x800) */ |
1297 | val = 0; | |
1298 | if (cp->page_size == 0x1000) | |
1299 | val = 0x1; | |
1300 | else if (cp->page_size == 0x2000) | |
1301 | val = 0x2; | |
1302 | else if (cp->page_size == 0x4000) | |
1303 | val = 0x3; | |
6aa20a22 | 1304 | |
1f26dac3 DM |
1305 | /* round mtu + offset. constrain to page size. */ |
1306 | size = cp->dev->mtu + 64; | |
1307 | if (size > cp->page_size) | |
1308 | size = cp->page_size; | |
1309 | ||
1310 | if (size <= 0x400) | |
1311 | i = 0x0; | |
1312 | else if (size <= 0x800) | |
1313 | i = 0x1; | |
1314 | else if (size <= 0x1000) | |
1315 | i = 0x2; | |
1316 | else | |
1317 | i = 0x3; | |
1318 | ||
1319 | cp->mtu_stride = 1 << (i + 10); | |
1320 | val = CAS_BASE(RX_PAGE_SIZE, val); | |
6aa20a22 | 1321 | val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i); |
1f26dac3 DM |
1322 | val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10)); |
1323 | val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1); | |
1324 | writel(val, cp->regs + REG_RX_PAGE_SIZE); | |
6aa20a22 | 1325 | |
1f26dac3 DM |
1326 | /* enable the header parser if desired */ |
1327 | if (CAS_HP_FIRMWARE == cas_prog_null) | |
1328 | return; | |
1329 | ||
1330 | val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS); | |
1331 | val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK; | |
1332 | val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL); | |
1333 | writel(val, cp->regs + REG_HP_CFG); | |
1334 | } | |
1335 | ||
1336 | static inline void cas_rxc_init(struct cas_rx_comp *rxc) | |
1337 | { | |
1338 | memset(rxc, 0, sizeof(*rxc)); | |
6aa20a22 | 1339 | rxc->word4 = cpu_to_le64(RX_COMP4_ZERO); |
1f26dac3 DM |
1340 | } |
1341 | ||
1342 | /* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1] | |
1343 | * flipping is protected by the fact that the chip will not | |
1344 | * hand back the same page index while it's being processed. | |
1345 | */ | |
1346 | static inline cas_page_t *cas_page_spare(struct cas *cp, const int index) | |
1347 | { | |
1348 | cas_page_t *page = cp->rx_pages[1][index]; | |
1349 | cas_page_t *new; | |
1350 | ||
9de4dfb4 | 1351 | if (page_count(page->buffer) == 1) |
1f26dac3 DM |
1352 | return page; |
1353 | ||
1354 | new = cas_page_dequeue(cp); | |
1355 | if (new) { | |
1356 | spin_lock(&cp->rx_inuse_lock); | |
1357 | list_add(&page->list, &cp->rx_inuse_list); | |
1358 | spin_unlock(&cp->rx_inuse_lock); | |
1359 | } | |
1360 | return new; | |
1361 | } | |
6aa20a22 | 1362 | |
1f26dac3 | 1363 | /* this needs to be changed if we actually use the ENC RX DESC ring */ |
6aa20a22 | 1364 | static cas_page_t *cas_page_swap(struct cas *cp, const int ring, |
1f26dac3 DM |
1365 | const int index) |
1366 | { | |
1367 | cas_page_t **page0 = cp->rx_pages[0]; | |
1368 | cas_page_t **page1 = cp->rx_pages[1]; | |
1369 | ||
1370 | /* swap if buffer is in use */ | |
9de4dfb4 | 1371 | if (page_count(page0[index]->buffer) > 1) { |
1f26dac3 DM |
1372 | cas_page_t *new = cas_page_spare(cp, index); |
1373 | if (new) { | |
1374 | page1[index] = page0[index]; | |
1375 | page0[index] = new; | |
1376 | } | |
6aa20a22 | 1377 | } |
1f26dac3 DM |
1378 | RX_USED_SET(page0[index], 0); |
1379 | return page0[index]; | |
1380 | } | |
1381 | ||
1382 | static void cas_clean_rxds(struct cas *cp) | |
1383 | { | |
1384 | /* only clean ring 0 as ring 1 is used for spare buffers */ | |
1385 | struct cas_rx_desc *rxd = cp->init_rxds[0]; | |
1386 | int i, size; | |
1387 | ||
1388 | /* release all rx flows */ | |
1389 | for (i = 0; i < N_RX_FLOWS; i++) { | |
1390 | struct sk_buff *skb; | |
1391 | while ((skb = __skb_dequeue(&cp->rx_flows[i]))) { | |
1392 | cas_skb_release(skb); | |
1393 | } | |
1394 | } | |
1395 | ||
1396 | /* initialize descriptors */ | |
1397 | size = RX_DESC_RINGN_SIZE(0); | |
1398 | for (i = 0; i < size; i++) { | |
1399 | cas_page_t *page = cas_page_swap(cp, 0, i); | |
1400 | rxd[i].buffer = cpu_to_le64(page->dma_addr); | |
6aa20a22 | 1401 | rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) | |
1f26dac3 DM |
1402 | CAS_BASE(RX_INDEX_RING, 0)); |
1403 | } | |
1404 | ||
6aa20a22 | 1405 | cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4; |
1f26dac3 DM |
1406 | cp->rx_last[0] = 0; |
1407 | cp->cas_flags &= ~CAS_FLAG_RXD_POST(0); | |
1408 | } | |
1409 | ||
1410 | static void cas_clean_rxcs(struct cas *cp) | |
1411 | { | |
1412 | int i, j; | |
1413 | ||
1414 | /* take ownership of rx comp descriptors */ | |
1415 | memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS); | |
1416 | memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS); | |
1417 | for (i = 0; i < N_RX_COMP_RINGS; i++) { | |
1418 | struct cas_rx_comp *rxc = cp->init_rxcs[i]; | |
1419 | for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) { | |
1420 | cas_rxc_init(rxc + j); | |
1421 | } | |
1422 | } | |
1423 | } | |
1424 | ||
1425 | #if 0 | |
1426 | /* When we get a RX fifo overflow, the RX unit is probably hung | |
1427 | * so we do the following. | |
1428 | * | |
1429 | * If any part of the reset goes wrong, we return 1 and that causes the | |
1430 | * whole chip to be reset. | |
1431 | */ | |
1432 | static int cas_rxmac_reset(struct cas *cp) | |
1433 | { | |
1434 | struct net_device *dev = cp->dev; | |
1435 | int limit; | |
1436 | u32 val; | |
1437 | ||
1438 | /* First, reset MAC RX. */ | |
1439 | writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); | |
1440 | for (limit = 0; limit < STOP_TRIES; limit++) { | |
1441 | if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN)) | |
1442 | break; | |
1443 | udelay(10); | |
1444 | } | |
1445 | if (limit == STOP_TRIES) { | |
1446 | printk(KERN_ERR "%s: RX MAC will not disable, resetting whole " | |
1447 | "chip.\n", dev->name); | |
1448 | return 1; | |
1449 | } | |
1450 | ||
1451 | /* Second, disable RX DMA. */ | |
1452 | writel(0, cp->regs + REG_RX_CFG); | |
1453 | for (limit = 0; limit < STOP_TRIES; limit++) { | |
1454 | if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN)) | |
1455 | break; | |
1456 | udelay(10); | |
1457 | } | |
1458 | if (limit == STOP_TRIES) { | |
1459 | printk(KERN_ERR "%s: RX DMA will not disable, resetting whole " | |
1460 | "chip.\n", dev->name); | |
1461 | return 1; | |
1462 | } | |
1463 | ||
1464 | mdelay(5); | |
1465 | ||
1466 | /* Execute RX reset command. */ | |
1467 | writel(SW_RESET_RX, cp->regs + REG_SW_RESET); | |
1468 | for (limit = 0; limit < STOP_TRIES; limit++) { | |
1469 | if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX)) | |
1470 | break; | |
1471 | udelay(10); | |
1472 | } | |
1473 | if (limit == STOP_TRIES) { | |
1474 | printk(KERN_ERR "%s: RX reset command will not execute, " | |
1475 | "resetting whole chip.\n", dev->name); | |
1476 | return 1; | |
1477 | } | |
1478 | ||
1479 | /* reset driver rx state */ | |
1480 | cas_clean_rxds(cp); | |
1481 | cas_clean_rxcs(cp); | |
1482 | ||
1483 | /* Now, reprogram the rest of RX unit. */ | |
1484 | cas_init_rx_dma(cp); | |
1485 | ||
1486 | /* re-enable */ | |
1487 | val = readl(cp->regs + REG_RX_CFG); | |
1488 | writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG); | |
1489 | writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK); | |
1490 | val = readl(cp->regs + REG_MAC_RX_CFG); | |
1491 | writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); | |
1492 | return 0; | |
1493 | } | |
1494 | #endif | |
1495 | ||
1496 | static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp, | |
1497 | u32 status) | |
1498 | { | |
1499 | u32 stat = readl(cp->regs + REG_MAC_RX_STATUS); | |
1500 | ||
1501 | if (!stat) | |
1502 | return 0; | |
1503 | ||
1504 | if (netif_msg_intr(cp)) | |
1505 | printk(KERN_DEBUG "%s: rxmac interrupt, stat: 0x%x\n", | |
1506 | cp->dev->name, stat); | |
1507 | ||
1508 | /* these are all rollovers */ | |
1509 | spin_lock(&cp->stat_lock[0]); | |
6aa20a22 | 1510 | if (stat & MAC_RX_ALIGN_ERR) |
1f26dac3 DM |
1511 | cp->net_stats[0].rx_frame_errors += 0x10000; |
1512 | ||
1513 | if (stat & MAC_RX_CRC_ERR) | |
1514 | cp->net_stats[0].rx_crc_errors += 0x10000; | |
1515 | ||
1516 | if (stat & MAC_RX_LEN_ERR) | |
1517 | cp->net_stats[0].rx_length_errors += 0x10000; | |
1518 | ||
1519 | if (stat & MAC_RX_OVERFLOW) { | |
1520 | cp->net_stats[0].rx_over_errors++; | |
1521 | cp->net_stats[0].rx_fifo_errors++; | |
1522 | } | |
1523 | ||
1524 | /* We do not track MAC_RX_FRAME_COUNT and MAC_RX_VIOL_ERR | |
1525 | * events. | |
1526 | */ | |
1527 | spin_unlock(&cp->stat_lock[0]); | |
1528 | return 0; | |
1529 | } | |
1530 | ||
1531 | static int cas_mac_interrupt(struct net_device *dev, struct cas *cp, | |
1532 | u32 status) | |
1533 | { | |
1534 | u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS); | |
1535 | ||
1536 | if (!stat) | |
1537 | return 0; | |
1538 | ||
1539 | if (netif_msg_intr(cp)) | |
1540 | printk(KERN_DEBUG "%s: mac interrupt, stat: 0x%x\n", | |
1541 | cp->dev->name, stat); | |
1542 | ||
1543 | /* This interrupt is just for pause frame and pause | |
1544 | * tracking. It is useful for diagnostics and debug | |
1545 | * but probably by default we will mask these events. | |
1546 | */ | |
1547 | if (stat & MAC_CTRL_PAUSE_STATE) | |
1548 | cp->pause_entered++; | |
1549 | ||
1550 | if (stat & MAC_CTRL_PAUSE_RECEIVED) | |
1551 | cp->pause_last_time_recvd = (stat >> 16); | |
1552 | ||
1553 | return 0; | |
1554 | } | |
1555 | ||
6aa20a22 | 1556 | |
1f26dac3 DM |
1557 | /* Must be invoked under cp->lock. */ |
1558 | static inline int cas_mdio_link_not_up(struct cas *cp) | |
1559 | { | |
1560 | u16 val; | |
6aa20a22 | 1561 | |
1f26dac3 DM |
1562 | switch (cp->lstate) { |
1563 | case link_force_ret: | |
1564 | if (netif_msg_link(cp)) | |
1565 | printk(KERN_INFO "%s: Autoneg failed again, keeping" | |
1566 | " forced mode\n", cp->dev->name); | |
1567 | cas_phy_write(cp, MII_BMCR, cp->link_fcntl); | |
1568 | cp->timer_ticks = 5; | |
1569 | cp->lstate = link_force_ok; | |
1570 | cp->link_transition = LINK_TRANSITION_LINK_CONFIG; | |
1571 | break; | |
6aa20a22 | 1572 | |
1f26dac3 DM |
1573 | case link_aneg: |
1574 | val = cas_phy_read(cp, MII_BMCR); | |
1575 | ||
1576 | /* Try forced modes. we try things in the following order: | |
1577 | * 1000 full -> 100 full/half -> 10 half | |
1578 | */ | |
1579 | val &= ~(BMCR_ANRESTART | BMCR_ANENABLE); | |
1580 | val |= BMCR_FULLDPLX; | |
6aa20a22 | 1581 | val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ? |
1f26dac3 DM |
1582 | CAS_BMCR_SPEED1000 : BMCR_SPEED100; |
1583 | cas_phy_write(cp, MII_BMCR, val); | |
1584 | cp->timer_ticks = 5; | |
1585 | cp->lstate = link_force_try; | |
1586 | cp->link_transition = LINK_TRANSITION_LINK_CONFIG; | |
1587 | break; | |
1588 | ||
1589 | case link_force_try: | |
1590 | /* Downgrade from 1000 to 100 to 10 Mbps if necessary. */ | |
1591 | val = cas_phy_read(cp, MII_BMCR); | |
1592 | cp->timer_ticks = 5; | |
1593 | if (val & CAS_BMCR_SPEED1000) { /* gigabit */ | |
1594 | val &= ~CAS_BMCR_SPEED1000; | |
1595 | val |= (BMCR_SPEED100 | BMCR_FULLDPLX); | |
1596 | cas_phy_write(cp, MII_BMCR, val); | |
1597 | break; | |
1598 | } | |
1599 | ||
1600 | if (val & BMCR_SPEED100) { | |
1601 | if (val & BMCR_FULLDPLX) /* fd failed */ | |
1602 | val &= ~BMCR_FULLDPLX; | |
1603 | else { /* 100Mbps failed */ | |
1604 | val &= ~BMCR_SPEED100; | |
1605 | } | |
1606 | cas_phy_write(cp, MII_BMCR, val); | |
1607 | break; | |
1608 | } | |
1609 | default: | |
1610 | break; | |
1611 | } | |
1612 | return 0; | |
1613 | } | |
1614 | ||
1615 | ||
1616 | /* must be invoked with cp->lock held */ | |
1617 | static int cas_mii_link_check(struct cas *cp, const u16 bmsr) | |
1618 | { | |
1619 | int restart; | |
1620 | ||
1621 | if (bmsr & BMSR_LSTATUS) { | |
1622 | /* Ok, here we got a link. If we had it due to a forced | |
6aa20a22 | 1623 | * fallback, and we were configured for autoneg, we |
1f26dac3 DM |
1624 | * retry a short autoneg pass. If you know your hub is |
1625 | * broken, use ethtool ;) | |
1626 | */ | |
6aa20a22 | 1627 | if ((cp->lstate == link_force_try) && |
1f26dac3 DM |
1628 | (cp->link_cntl & BMCR_ANENABLE)) { |
1629 | cp->lstate = link_force_ret; | |
1630 | cp->link_transition = LINK_TRANSITION_LINK_CONFIG; | |
1631 | cas_mif_poll(cp, 0); | |
1632 | cp->link_fcntl = cas_phy_read(cp, MII_BMCR); | |
1633 | cp->timer_ticks = 5; | |
1634 | if (cp->opened && netif_msg_link(cp)) | |
1635 | printk(KERN_INFO "%s: Got link after fallback, retrying" | |
1636 | " autoneg once...\n", cp->dev->name); | |
1637 | cas_phy_write(cp, MII_BMCR, | |
1638 | cp->link_fcntl | BMCR_ANENABLE | | |
1639 | BMCR_ANRESTART); | |
1640 | cas_mif_poll(cp, 1); | |
1641 | ||
1642 | } else if (cp->lstate != link_up) { | |
1643 | cp->lstate = link_up; | |
1644 | cp->link_transition = LINK_TRANSITION_LINK_UP; | |
1645 | ||
1646 | if (cp->opened) { | |
1647 | cas_set_link_modes(cp); | |
1648 | netif_carrier_on(cp->dev); | |
1649 | } | |
1650 | } | |
1651 | return 0; | |
1652 | } | |
1653 | ||
1654 | /* link not up. if the link was previously up, we restart the | |
1655 | * whole process | |
1656 | */ | |
1657 | restart = 0; | |
1658 | if (cp->lstate == link_up) { | |
1659 | cp->lstate = link_down; | |
1660 | cp->link_transition = LINK_TRANSITION_LINK_DOWN; | |
1661 | ||
1662 | netif_carrier_off(cp->dev); | |
1663 | if (cp->opened && netif_msg_link(cp)) | |
1664 | printk(KERN_INFO "%s: Link down\n", | |
1665 | cp->dev->name); | |
1666 | restart = 1; | |
6aa20a22 | 1667 | |
1f26dac3 DM |
1668 | } else if (++cp->timer_ticks > 10) |
1669 | cas_mdio_link_not_up(cp); | |
6aa20a22 | 1670 | |
1f26dac3 DM |
1671 | return restart; |
1672 | } | |
1673 | ||
1674 | static int cas_mif_interrupt(struct net_device *dev, struct cas *cp, | |
1675 | u32 status) | |
1676 | { | |
1677 | u32 stat = readl(cp->regs + REG_MIF_STATUS); | |
1678 | u16 bmsr; | |
1679 | ||
1680 | /* check for a link change */ | |
1681 | if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0) | |
1682 | return 0; | |
1683 | ||
1684 | bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat); | |
1685 | return cas_mii_link_check(cp, bmsr); | |
1686 | } | |
1687 | ||
1688 | static int cas_pci_interrupt(struct net_device *dev, struct cas *cp, | |
1689 | u32 status) | |
1690 | { | |
1691 | u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS); | |
1692 | ||
1693 | if (!stat) | |
1694 | return 0; | |
1695 | ||
1696 | printk(KERN_ERR "%s: PCI error [%04x:%04x] ", dev->name, stat, | |
1697 | readl(cp->regs + REG_BIM_DIAG)); | |
1698 | ||
1699 | /* cassini+ has this reserved */ | |
1700 | if ((stat & PCI_ERR_BADACK) && | |
1701 | ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0)) | |
1702 | printk("<No ACK64# during ABS64 cycle> "); | |
1703 | ||
1704 | if (stat & PCI_ERR_DTRTO) | |
1705 | printk("<Delayed transaction timeout> "); | |
1706 | if (stat & PCI_ERR_OTHER) | |
1707 | printk("<other> "); | |
1708 | if (stat & PCI_ERR_BIM_DMA_WRITE) | |
1709 | printk("<BIM DMA 0 write req> "); | |
1710 | if (stat & PCI_ERR_BIM_DMA_READ) | |
1711 | printk("<BIM DMA 0 read req> "); | |
1712 | printk("\n"); | |
1713 | ||
1714 | if (stat & PCI_ERR_OTHER) { | |
1715 | u16 cfg; | |
1716 | ||
1717 | /* Interrogate PCI config space for the | |
1718 | * true cause. | |
1719 | */ | |
1720 | pci_read_config_word(cp->pdev, PCI_STATUS, &cfg); | |
1721 | printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n", | |
1722 | dev->name, cfg); | |
1723 | if (cfg & PCI_STATUS_PARITY) | |
1724 | printk(KERN_ERR "%s: PCI parity error detected.\n", | |
1725 | dev->name); | |
1726 | if (cfg & PCI_STATUS_SIG_TARGET_ABORT) | |
1727 | printk(KERN_ERR "%s: PCI target abort.\n", | |
1728 | dev->name); | |
1729 | if (cfg & PCI_STATUS_REC_TARGET_ABORT) | |
1730 | printk(KERN_ERR "%s: PCI master acks target abort.\n", | |
1731 | dev->name); | |
1732 | if (cfg & PCI_STATUS_REC_MASTER_ABORT) | |
1733 | printk(KERN_ERR "%s: PCI master abort.\n", dev->name); | |
1734 | if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR) | |
1735 | printk(KERN_ERR "%s: PCI system error SERR#.\n", | |
1736 | dev->name); | |
1737 | if (cfg & PCI_STATUS_DETECTED_PARITY) | |
1738 | printk(KERN_ERR "%s: PCI parity error.\n", | |
1739 | dev->name); | |
1740 | ||
1741 | /* Write the error bits back to clear them. */ | |
1742 | cfg &= (PCI_STATUS_PARITY | | |
1743 | PCI_STATUS_SIG_TARGET_ABORT | | |
1744 | PCI_STATUS_REC_TARGET_ABORT | | |
1745 | PCI_STATUS_REC_MASTER_ABORT | | |
1746 | PCI_STATUS_SIG_SYSTEM_ERROR | | |
1747 | PCI_STATUS_DETECTED_PARITY); | |
1748 | pci_write_config_word(cp->pdev, PCI_STATUS, cfg); | |
1749 | } | |
1750 | ||
1751 | /* For all PCI errors, we should reset the chip. */ | |
1752 | return 1; | |
1753 | } | |
1754 | ||
1755 | /* All non-normal interrupt conditions get serviced here. | |
1756 | * Returns non-zero if we should just exit the interrupt | |
1757 | * handler right now (ie. if we reset the card which invalidates | |
1758 | * all of the other original irq status bits). | |
1759 | */ | |
1760 | static int cas_abnormal_irq(struct net_device *dev, struct cas *cp, | |
1761 | u32 status) | |
1762 | { | |
1763 | if (status & INTR_RX_TAG_ERROR) { | |
1764 | /* corrupt RX tag framing */ | |
1765 | if (netif_msg_rx_err(cp)) | |
1766 | printk(KERN_DEBUG "%s: corrupt rx tag framing\n", | |
1767 | cp->dev->name); | |
1768 | spin_lock(&cp->stat_lock[0]); | |
1769 | cp->net_stats[0].rx_errors++; | |
1770 | spin_unlock(&cp->stat_lock[0]); | |
1771 | goto do_reset; | |
1772 | } | |
1773 | ||
1774 | if (status & INTR_RX_LEN_MISMATCH) { | |
1775 | /* length mismatch. */ | |
1776 | if (netif_msg_rx_err(cp)) | |
1777 | printk(KERN_DEBUG "%s: length mismatch for rx frame\n", | |
1778 | cp->dev->name); | |
1779 | spin_lock(&cp->stat_lock[0]); | |
1780 | cp->net_stats[0].rx_errors++; | |
1781 | spin_unlock(&cp->stat_lock[0]); | |
1782 | goto do_reset; | |
1783 | } | |
1784 | ||
1785 | if (status & INTR_PCS_STATUS) { | |
1786 | if (cas_pcs_interrupt(dev, cp, status)) | |
1787 | goto do_reset; | |
1788 | } | |
1789 | ||
1790 | if (status & INTR_TX_MAC_STATUS) { | |
1791 | if (cas_txmac_interrupt(dev, cp, status)) | |
1792 | goto do_reset; | |
1793 | } | |
1794 | ||
1795 | if (status & INTR_RX_MAC_STATUS) { | |
1796 | if (cas_rxmac_interrupt(dev, cp, status)) | |
1797 | goto do_reset; | |
1798 | } | |
1799 | ||
1800 | if (status & INTR_MAC_CTRL_STATUS) { | |
1801 | if (cas_mac_interrupt(dev, cp, status)) | |
1802 | goto do_reset; | |
1803 | } | |
1804 | ||
1805 | if (status & INTR_MIF_STATUS) { | |
1806 | if (cas_mif_interrupt(dev, cp, status)) | |
1807 | goto do_reset; | |
1808 | } | |
1809 | ||
1810 | if (status & INTR_PCI_ERROR_STATUS) { | |
1811 | if (cas_pci_interrupt(dev, cp, status)) | |
1812 | goto do_reset; | |
1813 | } | |
1814 | return 0; | |
1815 | ||
1816 | do_reset: | |
1817 | #if 1 | |
1818 | atomic_inc(&cp->reset_task_pending); | |
1819 | atomic_inc(&cp->reset_task_pending_all); | |
1820 | printk(KERN_ERR "%s:reset called in cas_abnormal_irq [0x%x]\n", | |
1821 | dev->name, status); | |
1822 | schedule_work(&cp->reset_task); | |
1823 | #else | |
1824 | atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); | |
1825 | printk(KERN_ERR "reset called in cas_abnormal_irq\n"); | |
1826 | schedule_work(&cp->reset_task); | |
1827 | #endif | |
1828 | return 1; | |
1829 | } | |
1830 | ||
1831 | /* NOTE: CAS_TABORT returns 1 or 2 so that it can be used when | |
1832 | * determining whether to do a netif_stop/wakeup | |
1833 | */ | |
1834 | #define CAS_TABORT(x) (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1) | |
1835 | #define CAS_ROUND_PAGE(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK) | |
1836 | static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr, | |
1837 | const int len) | |
1838 | { | |
1839 | unsigned long off = addr + len; | |
1840 | ||
1841 | if (CAS_TABORT(cp) == 1) | |
1842 | return 0; | |
1843 | if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN) | |
1844 | return 0; | |
1845 | return TX_TARGET_ABORT_LEN; | |
1846 | } | |
1847 | ||
1848 | static inline void cas_tx_ringN(struct cas *cp, int ring, int limit) | |
1849 | { | |
1850 | struct cas_tx_desc *txds; | |
1851 | struct sk_buff **skbs; | |
1852 | struct net_device *dev = cp->dev; | |
1853 | int entry, count; | |
1854 | ||
1855 | spin_lock(&cp->tx_lock[ring]); | |
1856 | txds = cp->init_txds[ring]; | |
1857 | skbs = cp->tx_skbs[ring]; | |
1858 | entry = cp->tx_old[ring]; | |
1859 | ||
1860 | count = TX_BUFF_COUNT(ring, entry, limit); | |
1861 | while (entry != limit) { | |
1862 | struct sk_buff *skb = skbs[entry]; | |
1863 | dma_addr_t daddr; | |
1864 | u32 dlen; | |
1865 | int frag; | |
1866 | ||
1867 | if (!skb) { | |
1868 | /* this should never occur */ | |
1869 | entry = TX_DESC_NEXT(ring, entry); | |
1870 | continue; | |
1871 | } | |
1872 | ||
1873 | /* however, we might get only a partial skb release. */ | |
1874 | count -= skb_shinfo(skb)->nr_frags + | |
1875 | + cp->tx_tiny_use[ring][entry].nbufs + 1; | |
1876 | if (count < 0) | |
1877 | break; | |
1878 | ||
1879 | if (netif_msg_tx_done(cp)) | |
1880 | printk(KERN_DEBUG "%s: tx[%d] done, slot %d\n", | |
1881 | cp->dev->name, ring, entry); | |
1882 | ||
1883 | skbs[entry] = NULL; | |
1884 | cp->tx_tiny_use[ring][entry].nbufs = 0; | |
6aa20a22 | 1885 | |
1f26dac3 DM |
1886 | for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { |
1887 | struct cas_tx_desc *txd = txds + entry; | |
1888 | ||
1889 | daddr = le64_to_cpu(txd->buffer); | |
1890 | dlen = CAS_VAL(TX_DESC_BUFLEN, | |
1891 | le64_to_cpu(txd->control)); | |
1892 | pci_unmap_page(cp->pdev, daddr, dlen, | |
1893 | PCI_DMA_TODEVICE); | |
1894 | entry = TX_DESC_NEXT(ring, entry); | |
1895 | ||
1896 | /* tiny buffer may follow */ | |
1897 | if (cp->tx_tiny_use[ring][entry].used) { | |
1898 | cp->tx_tiny_use[ring][entry].used = 0; | |
1899 | entry = TX_DESC_NEXT(ring, entry); | |
6aa20a22 | 1900 | } |
1f26dac3 DM |
1901 | } |
1902 | ||
1903 | spin_lock(&cp->stat_lock[ring]); | |
1904 | cp->net_stats[ring].tx_packets++; | |
1905 | cp->net_stats[ring].tx_bytes += skb->len; | |
1906 | spin_unlock(&cp->stat_lock[ring]); | |
1907 | dev_kfree_skb_irq(skb); | |
1908 | } | |
1909 | cp->tx_old[ring] = entry; | |
1910 | ||
1911 | /* this is wrong for multiple tx rings. the net device needs | |
1912 | * multiple queues for this to do the right thing. we wait | |
1913 | * for 2*packets to be available when using tiny buffers | |
1914 | */ | |
1915 | if (netif_queue_stopped(dev) && | |
1916 | (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))) | |
1917 | netif_wake_queue(dev); | |
1918 | spin_unlock(&cp->tx_lock[ring]); | |
1919 | } | |
1920 | ||
1921 | static void cas_tx(struct net_device *dev, struct cas *cp, | |
1922 | u32 status) | |
1923 | { | |
1924 | int limit, ring; | |
1925 | #ifdef USE_TX_COMPWB | |
1926 | u64 compwb = le64_to_cpu(cp->init_block->tx_compwb); | |
1927 | #endif | |
1928 | if (netif_msg_intr(cp)) | |
64af4c13 AM |
1929 | printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %llx\n", |
1930 | cp->dev->name, status, (unsigned long long)compwb); | |
1f26dac3 DM |
1931 | /* process all the rings */ |
1932 | for (ring = 0; ring < N_TX_RINGS; ring++) { | |
1933 | #ifdef USE_TX_COMPWB | |
1934 | /* use the completion writeback registers */ | |
1935 | limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) | | |
1936 | CAS_VAL(TX_COMPWB_LSB, compwb); | |
1937 | compwb = TX_COMPWB_NEXT(compwb); | |
1938 | #else | |
1939 | limit = readl(cp->regs + REG_TX_COMPN(ring)); | |
1940 | #endif | |
6aa20a22 | 1941 | if (cp->tx_old[ring] != limit) |
1f26dac3 DM |
1942 | cas_tx_ringN(cp, ring, limit); |
1943 | } | |
1944 | } | |
1945 | ||
1946 | ||
6aa20a22 JG |
1947 | static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, |
1948 | int entry, const u64 *words, | |
1f26dac3 DM |
1949 | struct sk_buff **skbref) |
1950 | { | |
1951 | int dlen, hlen, len, i, alloclen; | |
1952 | int off, swivel = RX_SWIVEL_OFF_VAL; | |
1953 | struct cas_page *page; | |
1954 | struct sk_buff *skb; | |
1955 | void *addr, *crcaddr; | |
e5e02540 | 1956 | __sum16 csum; |
6aa20a22 | 1957 | char *p; |
1f26dac3 DM |
1958 | |
1959 | hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]); | |
1960 | dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]); | |
1961 | len = hlen + dlen; | |
1962 | ||
6aa20a22 | 1963 | if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT)) |
1f26dac3 | 1964 | alloclen = len; |
6aa20a22 | 1965 | else |
1f26dac3 DM |
1966 | alloclen = max(hlen, RX_COPY_MIN); |
1967 | ||
1968 | skb = dev_alloc_skb(alloclen + swivel + cp->crc_size); | |
6aa20a22 | 1969 | if (skb == NULL) |
1f26dac3 DM |
1970 | return -1; |
1971 | ||
1972 | *skbref = skb; | |
1f26dac3 DM |
1973 | skb_reserve(skb, swivel); |
1974 | ||
1975 | p = skb->data; | |
1976 | addr = crcaddr = NULL; | |
1977 | if (hlen) { /* always copy header pages */ | |
1978 | i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]); | |
1979 | page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; | |
6aa20a22 | 1980 | off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 + |
1f26dac3 DM |
1981 | swivel; |
1982 | ||
1983 | i = hlen; | |
1984 | if (!dlen) /* attach FCS */ | |
1985 | i += cp->crc_size; | |
1986 | pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, | |
1987 | PCI_DMA_FROMDEVICE); | |
1988 | addr = cas_page_map(page->buffer); | |
1989 | memcpy(p, addr + off, i); | |
1990 | pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, | |
1991 | PCI_DMA_FROMDEVICE); | |
1992 | cas_page_unmap(addr); | |
1993 | RX_USED_ADD(page, 0x100); | |
1994 | p += hlen; | |
1995 | swivel = 0; | |
6aa20a22 | 1996 | } |
1f26dac3 DM |
1997 | |
1998 | ||
1999 | if (alloclen < (hlen + dlen)) { | |
2000 | skb_frag_t *frag = skb_shinfo(skb)->frags; | |
2001 | ||
2002 | /* normal or jumbo packets. we use frags */ | |
2003 | i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]); | |
2004 | page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; | |
2005 | off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel; | |
2006 | ||
2007 | hlen = min(cp->page_size - off, dlen); | |
2008 | if (hlen < 0) { | |
2009 | if (netif_msg_rx_err(cp)) { | |
2010 | printk(KERN_DEBUG "%s: rx page overflow: " | |
2011 | "%d\n", cp->dev->name, hlen); | |
2012 | } | |
2013 | dev_kfree_skb_irq(skb); | |
2014 | return -1; | |
2015 | } | |
2016 | i = hlen; | |
2017 | if (i == dlen) /* attach FCS */ | |
2018 | i += cp->crc_size; | |
2019 | pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, | |
2020 | PCI_DMA_FROMDEVICE); | |
2021 | ||
2022 | /* make sure we always copy a header */ | |
2023 | swivel = 0; | |
2024 | if (p == (char *) skb->data) { /* not split */ | |
2025 | addr = cas_page_map(page->buffer); | |
2026 | memcpy(p, addr + off, RX_COPY_MIN); | |
2027 | pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, | |
2028 | PCI_DMA_FROMDEVICE); | |
2029 | cas_page_unmap(addr); | |
2030 | off += RX_COPY_MIN; | |
2031 | swivel = RX_COPY_MIN; | |
2032 | RX_USED_ADD(page, cp->mtu_stride); | |
2033 | } else { | |
2034 | RX_USED_ADD(page, hlen); | |
2035 | } | |
2036 | skb_put(skb, alloclen); | |
2037 | ||
2038 | skb_shinfo(skb)->nr_frags++; | |
2039 | skb->data_len += hlen - swivel; | |
d011a231 | 2040 | skb->truesize += hlen - swivel; |
1f26dac3 DM |
2041 | skb->len += hlen - swivel; |
2042 | ||
2043 | get_page(page->buffer); | |
2044 | frag->page = page->buffer; | |
2045 | frag->page_offset = off; | |
2046 | frag->size = hlen - swivel; | |
6aa20a22 | 2047 | |
1f26dac3 DM |
2048 | /* any more data? */ |
2049 | if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) { | |
2050 | hlen = dlen; | |
2051 | off = 0; | |
2052 | ||
2053 | i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); | |
2054 | page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; | |
6aa20a22 JG |
2055 | pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr, |
2056 | hlen + cp->crc_size, | |
1f26dac3 DM |
2057 | PCI_DMA_FROMDEVICE); |
2058 | pci_dma_sync_single_for_device(cp->pdev, page->dma_addr, | |
2059 | hlen + cp->crc_size, | |
2060 | PCI_DMA_FROMDEVICE); | |
2061 | ||
2062 | skb_shinfo(skb)->nr_frags++; | |
2063 | skb->data_len += hlen; | |
6aa20a22 | 2064 | skb->len += hlen; |
1f26dac3 DM |
2065 | frag++; |
2066 | ||
2067 | get_page(page->buffer); | |
2068 | frag->page = page->buffer; | |
2069 | frag->page_offset = 0; | |
2070 | frag->size = hlen; | |
2071 | RX_USED_ADD(page, hlen + cp->crc_size); | |
2072 | } | |
2073 | ||
2074 | if (cp->crc_size) { | |
2075 | addr = cas_page_map(page->buffer); | |
2076 | crcaddr = addr + off + hlen; | |
2077 | } | |
2078 | ||
2079 | } else { | |
2080 | /* copying packet */ | |
2081 | if (!dlen) | |
2082 | goto end_copy_pkt; | |
2083 | ||
2084 | i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]); | |
2085 | page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; | |
2086 | off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel; | |
2087 | hlen = min(cp->page_size - off, dlen); | |
2088 | if (hlen < 0) { | |
2089 | if (netif_msg_rx_err(cp)) { | |
2090 | printk(KERN_DEBUG "%s: rx page overflow: " | |
2091 | "%d\n", cp->dev->name, hlen); | |
2092 | } | |
2093 | dev_kfree_skb_irq(skb); | |
2094 | return -1; | |
2095 | } | |
2096 | i = hlen; | |
2097 | if (i == dlen) /* attach FCS */ | |
2098 | i += cp->crc_size; | |
2099 | pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, | |
2100 | PCI_DMA_FROMDEVICE); | |
2101 | addr = cas_page_map(page->buffer); | |
2102 | memcpy(p, addr + off, i); | |
2103 | pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, | |
2104 | PCI_DMA_FROMDEVICE); | |
2105 | cas_page_unmap(addr); | |
2106 | if (p == (char *) skb->data) /* not split */ | |
2107 | RX_USED_ADD(page, cp->mtu_stride); | |
2108 | else | |
2109 | RX_USED_ADD(page, i); | |
6aa20a22 | 2110 | |
1f26dac3 DM |
2111 | /* any more data? */ |
2112 | if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) { | |
2113 | p += hlen; | |
2114 | i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); | |
2115 | page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; | |
6aa20a22 JG |
2116 | pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr, |
2117 | dlen + cp->crc_size, | |
1f26dac3 DM |
2118 | PCI_DMA_FROMDEVICE); |
2119 | addr = cas_page_map(page->buffer); | |
2120 | memcpy(p, addr, dlen + cp->crc_size); | |
2121 | pci_dma_sync_single_for_device(cp->pdev, page->dma_addr, | |
2122 | dlen + cp->crc_size, | |
2123 | PCI_DMA_FROMDEVICE); | |
2124 | cas_page_unmap(addr); | |
6aa20a22 | 2125 | RX_USED_ADD(page, dlen + cp->crc_size); |
1f26dac3 DM |
2126 | } |
2127 | end_copy_pkt: | |
2128 | if (cp->crc_size) { | |
2129 | addr = NULL; | |
2130 | crcaddr = skb->data + alloclen; | |
2131 | } | |
2132 | skb_put(skb, alloclen); | |
2133 | } | |
2134 | ||
e5e02540 | 2135 | csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3])); |
1f26dac3 DM |
2136 | if (cp->crc_size) { |
2137 | /* checksum includes FCS. strip it out. */ | |
e5e02540 AV |
2138 | csum = csum_fold(csum_partial(crcaddr, cp->crc_size, |
2139 | csum_unfold(csum))); | |
1f26dac3 DM |
2140 | if (addr) |
2141 | cas_page_unmap(addr); | |
2142 | } | |
e5e02540 | 2143 | skb->csum = csum_unfold(~csum); |
84fa7933 | 2144 | skb->ip_summed = CHECKSUM_COMPLETE; |
1f26dac3 DM |
2145 | skb->protocol = eth_type_trans(skb, cp->dev); |
2146 | return len; | |
2147 | } | |
2148 | ||
2149 | ||
2150 | /* we can handle up to 64 rx flows at a time. we do the same thing | |
6aa20a22 | 2151 | * as nonreassm except that we batch up the buffers. |
1f26dac3 DM |
2152 | * NOTE: we currently just treat each flow as a bunch of packets that |
2153 | * we pass up. a better way would be to coalesce the packets | |
2154 | * into a jumbo packet. to do that, we need to do the following: | |
2155 | * 1) the first packet will have a clean split between header and | |
2156 | * data. save both. | |
2157 | * 2) each time the next flow packet comes in, extend the | |
2158 | * data length and merge the checksums. | |
2159 | * 3) on flow release, fix up the header. | |
2160 | * 4) make sure the higher layer doesn't care. | |
6aa20a22 | 2161 | * because packets get coalesced, we shouldn't run into fragment count |
1f26dac3 DM |
2162 | * issues. |
2163 | */ | |
2164 | static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words, | |
2165 | struct sk_buff *skb) | |
2166 | { | |
2167 | int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1); | |
2168 | struct sk_buff_head *flow = &cp->rx_flows[flowid]; | |
6aa20a22 JG |
2169 | |
2170 | /* this is protected at a higher layer, so no need to | |
1f26dac3 DM |
2171 | * do any additional locking here. stick the buffer |
2172 | * at the end. | |
2173 | */ | |
2174 | __skb_insert(skb, flow->prev, (struct sk_buff *) flow, flow); | |
2175 | if (words[0] & RX_COMP1_RELEASE_FLOW) { | |
2176 | while ((skb = __skb_dequeue(flow))) { | |
2177 | cas_skb_release(skb); | |
2178 | } | |
2179 | } | |
2180 | } | |
2181 | ||
2182 | /* put rx descriptor back on ring. if a buffer is in use by a higher | |
2183 | * layer, this will need to put in a replacement. | |
2184 | */ | |
2185 | static void cas_post_page(struct cas *cp, const int ring, const int index) | |
2186 | { | |
2187 | cas_page_t *new; | |
2188 | int entry; | |
2189 | ||
2190 | entry = cp->rx_old[ring]; | |
2191 | ||
2192 | new = cas_page_swap(cp, ring, index); | |
2193 | cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr); | |
2194 | cp->init_rxds[ring][entry].index = | |
6aa20a22 | 2195 | cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) | |
1f26dac3 DM |
2196 | CAS_BASE(RX_INDEX_RING, ring)); |
2197 | ||
2198 | entry = RX_DESC_ENTRY(ring, entry + 1); | |
2199 | cp->rx_old[ring] = entry; | |
6aa20a22 | 2200 | |
1f26dac3 DM |
2201 | if (entry % 4) |
2202 | return; | |
2203 | ||
2204 | if (ring == 0) | |
2205 | writel(entry, cp->regs + REG_RX_KICK); | |
2206 | else if ((N_RX_DESC_RINGS > 1) && | |
6aa20a22 | 2207 | (cp->cas_flags & CAS_FLAG_REG_PLUS)) |
1f26dac3 DM |
2208 | writel(entry, cp->regs + REG_PLUS_RX_KICK1); |
2209 | } | |
2210 | ||
2211 | ||
2212 | /* only when things are bad */ | |
2213 | static int cas_post_rxds_ringN(struct cas *cp, int ring, int num) | |
2214 | { | |
2215 | unsigned int entry, last, count, released; | |
2216 | int cluster; | |
2217 | cas_page_t **page = cp->rx_pages[ring]; | |
2218 | ||
2219 | entry = cp->rx_old[ring]; | |
2220 | ||
2221 | if (netif_msg_intr(cp)) | |
2222 | printk(KERN_DEBUG "%s: rxd[%d] interrupt, done: %d\n", | |
2223 | cp->dev->name, ring, entry); | |
2224 | ||
2225 | cluster = -1; | |
6aa20a22 | 2226 | count = entry & 0x3; |
1f26dac3 DM |
2227 | last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4); |
2228 | released = 0; | |
2229 | while (entry != last) { | |
2230 | /* make a new buffer if it's still in use */ | |
9de4dfb4 | 2231 | if (page_count(page[entry]->buffer) > 1) { |
1f26dac3 DM |
2232 | cas_page_t *new = cas_page_dequeue(cp); |
2233 | if (!new) { | |
6aa20a22 | 2234 | /* let the timer know that we need to |
1f26dac3 DM |
2235 | * do this again |
2236 | */ | |
2237 | cp->cas_flags |= CAS_FLAG_RXD_POST(ring); | |
2238 | if (!timer_pending(&cp->link_timer)) | |
6aa20a22 | 2239 | mod_timer(&cp->link_timer, jiffies + |
1f26dac3 DM |
2240 | CAS_LINK_FAST_TIMEOUT); |
2241 | cp->rx_old[ring] = entry; | |
2242 | cp->rx_last[ring] = num ? num - released : 0; | |
2243 | return -ENOMEM; | |
2244 | } | |
2245 | spin_lock(&cp->rx_inuse_lock); | |
2246 | list_add(&page[entry]->list, &cp->rx_inuse_list); | |
2247 | spin_unlock(&cp->rx_inuse_lock); | |
6aa20a22 | 2248 | cp->init_rxds[ring][entry].buffer = |
1f26dac3 DM |
2249 | cpu_to_le64(new->dma_addr); |
2250 | page[entry] = new; | |
6aa20a22 | 2251 | |
1f26dac3 DM |
2252 | } |
2253 | ||
2254 | if (++count == 4) { | |
2255 | cluster = entry; | |
2256 | count = 0; | |
2257 | } | |
2258 | released++; | |
2259 | entry = RX_DESC_ENTRY(ring, entry + 1); | |
2260 | } | |
2261 | cp->rx_old[ring] = entry; | |
2262 | ||
6aa20a22 | 2263 | if (cluster < 0) |
1f26dac3 DM |
2264 | return 0; |
2265 | ||
2266 | if (ring == 0) | |
2267 | writel(cluster, cp->regs + REG_RX_KICK); | |
2268 | else if ((N_RX_DESC_RINGS > 1) && | |
6aa20a22 | 2269 | (cp->cas_flags & CAS_FLAG_REG_PLUS)) |
1f26dac3 DM |
2270 | writel(cluster, cp->regs + REG_PLUS_RX_KICK1); |
2271 | return 0; | |
2272 | } | |
2273 | ||
2274 | ||
2275 | /* process a completion ring. packets are set up in three basic ways: | |
2276 | * small packets: should be copied header + data in single buffer. | |
2277 | * large packets: header and data in a single buffer. | |
6aa20a22 | 2278 | * split packets: header in a separate buffer from data. |
1f26dac3 | 2279 | * data may be in multiple pages. data may be > 256 |
6aa20a22 | 2280 | * bytes but in a single page. |
1f26dac3 DM |
2281 | * |
2282 | * NOTE: RX page posting is done in this routine as well. while there's | |
2283 | * the capability of using multiple RX completion rings, it isn't | |
2284 | * really worthwhile due to the fact that the page posting will | |
6aa20a22 | 2285 | * force serialization on the single descriptor ring. |
1f26dac3 DM |
2286 | */ |
2287 | static int cas_rx_ringN(struct cas *cp, int ring, int budget) | |
2288 | { | |
2289 | struct cas_rx_comp *rxcs = cp->init_rxcs[ring]; | |
2290 | int entry, drops; | |
2291 | int npackets = 0; | |
2292 | ||
2293 | if (netif_msg_intr(cp)) | |
2294 | printk(KERN_DEBUG "%s: rx[%d] interrupt, done: %d/%d\n", | |
2295 | cp->dev->name, ring, | |
6aa20a22 | 2296 | readl(cp->regs + REG_RX_COMP_HEAD), |
1f26dac3 DM |
2297 | cp->rx_new[ring]); |
2298 | ||
2299 | entry = cp->rx_new[ring]; | |
2300 | drops = 0; | |
2301 | while (1) { | |
2302 | struct cas_rx_comp *rxc = rxcs + entry; | |
2303 | struct sk_buff *skb; | |
2304 | int type, len; | |
2305 | u64 words[4]; | |
2306 | int i, dring; | |
2307 | ||
2308 | words[0] = le64_to_cpu(rxc->word1); | |
2309 | words[1] = le64_to_cpu(rxc->word2); | |
2310 | words[2] = le64_to_cpu(rxc->word3); | |
2311 | words[3] = le64_to_cpu(rxc->word4); | |
2312 | ||
2313 | /* don't touch if still owned by hw */ | |
2314 | type = CAS_VAL(RX_COMP1_TYPE, words[0]); | |
2315 | if (type == 0) | |
2316 | break; | |
2317 | ||
2318 | /* hw hasn't cleared the zero bit yet */ | |
2319 | if (words[3] & RX_COMP4_ZERO) { | |
2320 | break; | |
2321 | } | |
2322 | ||
2323 | /* get info on the packet */ | |
2324 | if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) { | |
2325 | spin_lock(&cp->stat_lock[ring]); | |
2326 | cp->net_stats[ring].rx_errors++; | |
2327 | if (words[3] & RX_COMP4_LEN_MISMATCH) | |
2328 | cp->net_stats[ring].rx_length_errors++; | |
2329 | if (words[3] & RX_COMP4_BAD) | |
2330 | cp->net_stats[ring].rx_crc_errors++; | |
2331 | spin_unlock(&cp->stat_lock[ring]); | |
2332 | ||
2333 | /* We'll just return it to Cassini. */ | |
2334 | drop_it: | |
2335 | spin_lock(&cp->stat_lock[ring]); | |
2336 | ++cp->net_stats[ring].rx_dropped; | |
2337 | spin_unlock(&cp->stat_lock[ring]); | |
2338 | goto next; | |
2339 | } | |
2340 | ||
2341 | len = cas_rx_process_pkt(cp, rxc, entry, words, &skb); | |
2342 | if (len < 0) { | |
2343 | ++drops; | |
2344 | goto drop_it; | |
2345 | } | |
2346 | ||
2347 | /* see if it's a flow re-assembly or not. the driver | |
2348 | * itself handles release back up. | |
2349 | */ | |
2350 | if (RX_DONT_BATCH || (type == 0x2)) { | |
2351 | /* non-reassm: these always get released */ | |
6aa20a22 | 2352 | cas_skb_release(skb); |
1f26dac3 DM |
2353 | } else { |
2354 | cas_rx_flow_pkt(cp, words, skb); | |
2355 | } | |
2356 | ||
2357 | spin_lock(&cp->stat_lock[ring]); | |
2358 | cp->net_stats[ring].rx_packets++; | |
2359 | cp->net_stats[ring].rx_bytes += len; | |
2360 | spin_unlock(&cp->stat_lock[ring]); | |
2361 | cp->dev->last_rx = jiffies; | |
2362 | ||
2363 | next: | |
2364 | npackets++; | |
2365 | ||
2366 | /* should it be released? */ | |
2367 | if (words[0] & RX_COMP1_RELEASE_HDR) { | |
2368 | i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]); | |
2369 | dring = CAS_VAL(RX_INDEX_RING, i); | |
2370 | i = CAS_VAL(RX_INDEX_NUM, i); | |
2371 | cas_post_page(cp, dring, i); | |
2372 | } | |
6aa20a22 | 2373 | |
1f26dac3 DM |
2374 | if (words[0] & RX_COMP1_RELEASE_DATA) { |
2375 | i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]); | |
2376 | dring = CAS_VAL(RX_INDEX_RING, i); | |
2377 | i = CAS_VAL(RX_INDEX_NUM, i); | |
2378 | cas_post_page(cp, dring, i); | |
2379 | } | |
2380 | ||
2381 | if (words[0] & RX_COMP1_RELEASE_NEXT) { | |
2382 | i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); | |
2383 | dring = CAS_VAL(RX_INDEX_RING, i); | |
2384 | i = CAS_VAL(RX_INDEX_NUM, i); | |
2385 | cas_post_page(cp, dring, i); | |
2386 | } | |
2387 | ||
2388 | /* skip to the next entry */ | |
6aa20a22 | 2389 | entry = RX_COMP_ENTRY(ring, entry + 1 + |
1f26dac3 DM |
2390 | CAS_VAL(RX_COMP1_SKIP, words[0])); |
2391 | #ifdef USE_NAPI | |
2392 | if (budget && (npackets >= budget)) | |
2393 | break; | |
2394 | #endif | |
2395 | } | |
2396 | cp->rx_new[ring] = entry; | |
2397 | ||
2398 | if (drops) | |
2399 | printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", | |
2400 | cp->dev->name); | |
2401 | return npackets; | |
2402 | } | |
2403 | ||
2404 | ||
2405 | /* put completion entries back on the ring */ | |
2406 | static void cas_post_rxcs_ringN(struct net_device *dev, | |
2407 | struct cas *cp, int ring) | |
2408 | { | |
2409 | struct cas_rx_comp *rxc = cp->init_rxcs[ring]; | |
2410 | int last, entry; | |
2411 | ||
2412 | last = cp->rx_cur[ring]; | |
6aa20a22 | 2413 | entry = cp->rx_new[ring]; |
1f26dac3 DM |
2414 | if (netif_msg_intr(cp)) |
2415 | printk(KERN_DEBUG "%s: rxc[%d] interrupt, done: %d/%d\n", | |
2416 | dev->name, ring, readl(cp->regs + REG_RX_COMP_HEAD), | |
2417 | entry); | |
6aa20a22 | 2418 | |
1f26dac3 DM |
2419 | /* zero and re-mark descriptors */ |
2420 | while (last != entry) { | |
2421 | cas_rxc_init(rxc + last); | |
2422 | last = RX_COMP_ENTRY(ring, last + 1); | |
2423 | } | |
2424 | cp->rx_cur[ring] = last; | |
2425 | ||
2426 | if (ring == 0) | |
2427 | writel(last, cp->regs + REG_RX_COMP_TAIL); | |
6aa20a22 | 2428 | else if (cp->cas_flags & CAS_FLAG_REG_PLUS) |
1f26dac3 DM |
2429 | writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring)); |
2430 | } | |
2431 | ||
2432 | ||
2433 | ||
6aa20a22 | 2434 | /* cassini can use all four PCI interrupts for the completion ring. |
1f26dac3 DM |
2435 | * rings 3 and 4 are identical |
2436 | */ | |
2437 | #if defined(USE_PCI_INTC) || defined(USE_PCI_INTD) | |
6aa20a22 | 2438 | static inline void cas_handle_irqN(struct net_device *dev, |
1f26dac3 DM |
2439 | struct cas *cp, const u32 status, |
2440 | const int ring) | |
2441 | { | |
6aa20a22 | 2442 | if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT)) |
1f26dac3 DM |
2443 | cas_post_rxcs_ringN(dev, cp, ring); |
2444 | } | |
2445 | ||
7d12e780 | 2446 | static irqreturn_t cas_interruptN(int irq, void *dev_id) |
1f26dac3 DM |
2447 | { |
2448 | struct net_device *dev = dev_id; | |
2449 | struct cas *cp = netdev_priv(dev); | |
2450 | unsigned long flags; | |
2451 | int ring; | |
2452 | u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring)); | |
2453 | ||
2454 | /* check for shared irq */ | |
2455 | if (status == 0) | |
2456 | return IRQ_NONE; | |
2457 | ||
2458 | ring = (irq == cp->pci_irq_INTC) ? 2 : 3; | |
2459 | spin_lock_irqsave(&cp->lock, flags); | |
2460 | if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ | |
2461 | #ifdef USE_NAPI | |
2462 | cas_mask_intr(cp); | |
bea3348e | 2463 | netif_rx_schedule(dev, &cp->napi); |
1f26dac3 DM |
2464 | #else |
2465 | cas_rx_ringN(cp, ring, 0); | |
2466 | #endif | |
2467 | status &= ~INTR_RX_DONE_ALT; | |
2468 | } | |
2469 | ||
2470 | if (status) | |
2471 | cas_handle_irqN(dev, cp, status, ring); | |
2472 | spin_unlock_irqrestore(&cp->lock, flags); | |
2473 | return IRQ_HANDLED; | |
2474 | } | |
2475 | #endif | |
2476 | ||
2477 | #ifdef USE_PCI_INTB | |
2478 | /* everything but rx packets */ | |
2479 | static inline void cas_handle_irq1(struct cas *cp, const u32 status) | |
2480 | { | |
2481 | if (status & INTR_RX_BUF_UNAVAIL_1) { | |
6aa20a22 | 2482 | /* Frame arrived, no free RX buffers available. |
1f26dac3 DM |
2483 | * NOTE: we can get this on a link transition. */ |
2484 | cas_post_rxds_ringN(cp, 1, 0); | |
2485 | spin_lock(&cp->stat_lock[1]); | |
2486 | cp->net_stats[1].rx_dropped++; | |
2487 | spin_unlock(&cp->stat_lock[1]); | |
2488 | } | |
2489 | ||
6aa20a22 JG |
2490 | if (status & INTR_RX_BUF_AE_1) |
2491 | cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) - | |
1f26dac3 DM |
2492 | RX_AE_FREEN_VAL(1)); |
2493 | ||
2494 | if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL)) | |
2495 | cas_post_rxcs_ringN(cp, 1); | |
2496 | } | |
2497 | ||
2498 | /* ring 2 handles a few more events than 3 and 4 */ | |
7d12e780 | 2499 | static irqreturn_t cas_interrupt1(int irq, void *dev_id) |
1f26dac3 DM |
2500 | { |
2501 | struct net_device *dev = dev_id; | |
2502 | struct cas *cp = netdev_priv(dev); | |
2503 | unsigned long flags; | |
2504 | u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1)); | |
2505 | ||
2506 | /* check for shared interrupt */ | |
2507 | if (status == 0) | |
2508 | return IRQ_NONE; | |
2509 | ||
2510 | spin_lock_irqsave(&cp->lock, flags); | |
2511 | if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ | |
2512 | #ifdef USE_NAPI | |
2513 | cas_mask_intr(cp); | |
bea3348e | 2514 | netif_rx_schedule(dev, &cp->napi); |
1f26dac3 DM |
2515 | #else |
2516 | cas_rx_ringN(cp, 1, 0); | |
2517 | #endif | |
2518 | status &= ~INTR_RX_DONE_ALT; | |
2519 | } | |
2520 | if (status) | |
2521 | cas_handle_irq1(cp, status); | |
2522 | spin_unlock_irqrestore(&cp->lock, flags); | |
2523 | return IRQ_HANDLED; | |
2524 | } | |
2525 | #endif | |
2526 | ||
2527 | static inline void cas_handle_irq(struct net_device *dev, | |
2528 | struct cas *cp, const u32 status) | |
2529 | { | |
2530 | /* housekeeping interrupts */ | |
2531 | if (status & INTR_ERROR_MASK) | |
2532 | cas_abnormal_irq(dev, cp, status); | |
2533 | ||
2534 | if (status & INTR_RX_BUF_UNAVAIL) { | |
6aa20a22 | 2535 | /* Frame arrived, no free RX buffers available. |
1f26dac3 DM |
2536 | * NOTE: we can get this on a link transition. |
2537 | */ | |
2538 | cas_post_rxds_ringN(cp, 0, 0); | |
2539 | spin_lock(&cp->stat_lock[0]); | |
2540 | cp->net_stats[0].rx_dropped++; | |
2541 | spin_unlock(&cp->stat_lock[0]); | |
2542 | } else if (status & INTR_RX_BUF_AE) { | |
2543 | cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) - | |
2544 | RX_AE_FREEN_VAL(0)); | |
2545 | } | |
2546 | ||
2547 | if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL)) | |
2548 | cas_post_rxcs_ringN(dev, cp, 0); | |
2549 | } | |
2550 | ||
7d12e780 | 2551 | static irqreturn_t cas_interrupt(int irq, void *dev_id) |
1f26dac3 DM |
2552 | { |
2553 | struct net_device *dev = dev_id; | |
2554 | struct cas *cp = netdev_priv(dev); | |
2555 | unsigned long flags; | |
2556 | u32 status = readl(cp->regs + REG_INTR_STATUS); | |
2557 | ||
2558 | if (status == 0) | |
2559 | return IRQ_NONE; | |
2560 | ||
2561 | spin_lock_irqsave(&cp->lock, flags); | |
2562 | if (status & (INTR_TX_ALL | INTR_TX_INTME)) { | |
2563 | cas_tx(dev, cp, status); | |
2564 | status &= ~(INTR_TX_ALL | INTR_TX_INTME); | |
2565 | } | |
2566 | ||
2567 | if (status & INTR_RX_DONE) { | |
2568 | #ifdef USE_NAPI | |
2569 | cas_mask_intr(cp); | |
bea3348e | 2570 | netif_rx_schedule(dev, &cp->napi); |
1f26dac3 DM |
2571 | #else |
2572 | cas_rx_ringN(cp, 0, 0); | |
2573 | #endif | |
2574 | status &= ~INTR_RX_DONE; | |
2575 | } | |
2576 | ||
2577 | if (status) | |
2578 | cas_handle_irq(dev, cp, status); | |
2579 | spin_unlock_irqrestore(&cp->lock, flags); | |
2580 | return IRQ_HANDLED; | |
2581 | } | |
2582 | ||
2583 | ||
2584 | #ifdef USE_NAPI | |
bea3348e | 2585 | static int cas_poll(struct napi_struct *napi, int budget) |
1f26dac3 | 2586 | { |
bea3348e SH |
2587 | struct cas *cp = container_of(napi, struct cas, napi); |
2588 | struct net_device *dev = cp->dev; | |
86216268 | 2589 | int i, enable_intr, credits; |
1f26dac3 DM |
2590 | u32 status = readl(cp->regs + REG_INTR_STATUS); |
2591 | unsigned long flags; | |
2592 | ||
2593 | spin_lock_irqsave(&cp->lock, flags); | |
2594 | cas_tx(dev, cp, status); | |
2595 | spin_unlock_irqrestore(&cp->lock, flags); | |
2596 | ||
2597 | /* NAPI rx packets. we spread the credits across all of the | |
2598 | * rxc rings | |
bea3348e SH |
2599 | * |
2600 | * to make sure we're fair with the work we loop through each | |
6aa20a22 | 2601 | * ring N_RX_COMP_RING times with a request of |
bea3348e | 2602 | * budget / N_RX_COMP_RINGS |
1f26dac3 DM |
2603 | */ |
2604 | enable_intr = 1; | |
2605 | credits = 0; | |
2606 | for (i = 0; i < N_RX_COMP_RINGS; i++) { | |
2607 | int j; | |
2608 | for (j = 0; j < N_RX_COMP_RINGS; j++) { | |
bea3348e SH |
2609 | credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS); |
2610 | if (credits >= budget) { | |
1f26dac3 DM |
2611 | enable_intr = 0; |
2612 | goto rx_comp; | |
2613 | } | |
2614 | } | |
2615 | } | |
2616 | ||
2617 | rx_comp: | |
1f26dac3 DM |
2618 | /* final rx completion */ |
2619 | spin_lock_irqsave(&cp->lock, flags); | |
2620 | if (status) | |
2621 | cas_handle_irq(dev, cp, status); | |
2622 | ||
2623 | #ifdef USE_PCI_INTB | |
2624 | if (N_RX_COMP_RINGS > 1) { | |
2625 | status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1)); | |
2626 | if (status) | |
2627 | cas_handle_irq1(dev, cp, status); | |
2628 | } | |
2629 | #endif | |
2630 | ||
2631 | #ifdef USE_PCI_INTC | |
2632 | if (N_RX_COMP_RINGS > 2) { | |
2633 | status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2)); | |
2634 | if (status) | |
2635 | cas_handle_irqN(dev, cp, status, 2); | |
2636 | } | |
2637 | #endif | |
2638 | ||
2639 | #ifdef USE_PCI_INTD | |
2640 | if (N_RX_COMP_RINGS > 3) { | |
2641 | status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3)); | |
2642 | if (status) | |
2643 | cas_handle_irqN(dev, cp, status, 3); | |
2644 | } | |
2645 | #endif | |
2646 | spin_unlock_irqrestore(&cp->lock, flags); | |
2647 | if (enable_intr) { | |
bea3348e | 2648 | netif_rx_complete(dev, napi); |
1f26dac3 | 2649 | cas_unmask_intr(cp); |
1f26dac3 | 2650 | } |
bea3348e | 2651 | return credits; |
1f26dac3 DM |
2652 | } |
2653 | #endif | |
2654 | ||
2655 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
2656 | static void cas_netpoll(struct net_device *dev) | |
2657 | { | |
2658 | struct cas *cp = netdev_priv(dev); | |
2659 | ||
2660 | cas_disable_irq(cp, 0); | |
7d12e780 | 2661 | cas_interrupt(cp->pdev->irq, dev); |
1f26dac3 DM |
2662 | cas_enable_irq(cp, 0); |
2663 | ||
2664 | #ifdef USE_PCI_INTB | |
2665 | if (N_RX_COMP_RINGS > 1) { | |
2666 | /* cas_interrupt1(); */ | |
2667 | } | |
2668 | #endif | |
2669 | #ifdef USE_PCI_INTC | |
2670 | if (N_RX_COMP_RINGS > 2) { | |
2671 | /* cas_interruptN(); */ | |
2672 | } | |
2673 | #endif | |
2674 | #ifdef USE_PCI_INTD | |
2675 | if (N_RX_COMP_RINGS > 3) { | |
2676 | /* cas_interruptN(); */ | |
2677 | } | |
2678 | #endif | |
2679 | } | |
2680 | #endif | |
2681 | ||
2682 | static void cas_tx_timeout(struct net_device *dev) | |
2683 | { | |
2684 | struct cas *cp = netdev_priv(dev); | |
2685 | ||
2686 | printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); | |
2687 | if (!cp->hw_running) { | |
2688 | printk("%s: hrm.. hw not running!\n", dev->name); | |
2689 | return; | |
2690 | } | |
2691 | ||
2692 | printk(KERN_ERR "%s: MIF_STATE[%08x]\n", | |
2693 | dev->name, readl(cp->regs + REG_MIF_STATE_MACHINE)); | |
2694 | ||
2695 | printk(KERN_ERR "%s: MAC_STATE[%08x]\n", | |
2696 | dev->name, readl(cp->regs + REG_MAC_STATE_MACHINE)); | |
2697 | ||
2698 | printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x] " | |
2699 | "FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n", | |
2700 | dev->name, | |
2701 | readl(cp->regs + REG_TX_CFG), | |
2702 | readl(cp->regs + REG_MAC_TX_STATUS), | |
2703 | readl(cp->regs + REG_MAC_TX_CFG), | |
2704 | readl(cp->regs + REG_TX_FIFO_PKT_CNT), | |
2705 | readl(cp->regs + REG_TX_FIFO_WRITE_PTR), | |
2706 | readl(cp->regs + REG_TX_FIFO_READ_PTR), | |
2707 | readl(cp->regs + REG_TX_SM_1), | |
2708 | readl(cp->regs + REG_TX_SM_2)); | |
2709 | ||
2710 | printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n", | |
2711 | dev->name, | |
2712 | readl(cp->regs + REG_RX_CFG), | |
2713 | readl(cp->regs + REG_MAC_RX_STATUS), | |
2714 | readl(cp->regs + REG_MAC_RX_CFG)); | |
2715 | ||
2716 | printk(KERN_ERR "%s: HP_STATE[%08x:%08x:%08x:%08x]\n", | |
2717 | dev->name, | |
2718 | readl(cp->regs + REG_HP_STATE_MACHINE), | |
2719 | readl(cp->regs + REG_HP_STATUS0), | |
2720 | readl(cp->regs + REG_HP_STATUS1), | |
2721 | readl(cp->regs + REG_HP_STATUS2)); | |
2722 | ||
2723 | #if 1 | |
2724 | atomic_inc(&cp->reset_task_pending); | |
2725 | atomic_inc(&cp->reset_task_pending_all); | |
2726 | schedule_work(&cp->reset_task); | |
2727 | #else | |
2728 | atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); | |
2729 | schedule_work(&cp->reset_task); | |
2730 | #endif | |
2731 | } | |
2732 | ||
2733 | static inline int cas_intme(int ring, int entry) | |
2734 | { | |
2735 | /* Algorithm: IRQ every 1/2 of descriptors. */ | |
2736 | if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1))) | |
2737 | return 1; | |
2738 | return 0; | |
2739 | } | |
2740 | ||
2741 | ||
2742 | static void cas_write_txd(struct cas *cp, int ring, int entry, | |
2743 | dma_addr_t mapping, int len, u64 ctrl, int last) | |
2744 | { | |
2745 | struct cas_tx_desc *txd = cp->init_txds[ring] + entry; | |
2746 | ||
2747 | ctrl |= CAS_BASE(TX_DESC_BUFLEN, len); | |
2748 | if (cas_intme(ring, entry)) | |
2749 | ctrl |= TX_DESC_INTME; | |
2750 | if (last) | |
2751 | ctrl |= TX_DESC_EOF; | |
2752 | txd->control = cpu_to_le64(ctrl); | |
2753 | txd->buffer = cpu_to_le64(mapping); | |
2754 | } | |
2755 | ||
6aa20a22 | 2756 | static inline void *tx_tiny_buf(struct cas *cp, const int ring, |
1f26dac3 DM |
2757 | const int entry) |
2758 | { | |
2759 | return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry; | |
2760 | } | |
2761 | ||
6aa20a22 | 2762 | static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring, |
1f26dac3 DM |
2763 | const int entry, const int tentry) |
2764 | { | |
2765 | cp->tx_tiny_use[ring][tentry].nbufs++; | |
2766 | cp->tx_tiny_use[ring][entry].used = 1; | |
2767 | return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry; | |
2768 | } | |
2769 | ||
6aa20a22 | 2770 | static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, |
1f26dac3 DM |
2771 | struct sk_buff *skb) |
2772 | { | |
2773 | struct net_device *dev = cp->dev; | |
2774 | int entry, nr_frags, frag, tabort, tentry; | |
2775 | dma_addr_t mapping; | |
2776 | unsigned long flags; | |
2777 | u64 ctrl; | |
2778 | u32 len; | |
2779 | ||
2780 | spin_lock_irqsave(&cp->tx_lock[ring], flags); | |
2781 | ||
2782 | /* This is a hard error, log it. */ | |
6aa20a22 | 2783 | if (TX_BUFFS_AVAIL(cp, ring) <= |
1f26dac3 DM |
2784 | CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) { |
2785 | netif_stop_queue(dev); | |
2786 | spin_unlock_irqrestore(&cp->tx_lock[ring], flags); | |
2787 | printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " | |
2788 | "queue awake!\n", dev->name); | |
2789 | return 1; | |
2790 | } | |
2791 | ||
2792 | ctrl = 0; | |
84fa7933 | 2793 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
ea2ae17d ACM |
2794 | const u64 csum_start_off = skb_transport_offset(skb); |
2795 | const u64 csum_stuff_off = csum_start_off + skb->csum_offset; | |
1f26dac3 | 2796 | |
6aa20a22 | 2797 | ctrl = TX_DESC_CSUM_EN | |
1f26dac3 DM |
2798 | CAS_BASE(TX_DESC_CSUM_START, csum_start_off) | |
2799 | CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off); | |
2800 | } | |
2801 | ||
2802 | entry = cp->tx_new[ring]; | |
2803 | cp->tx_skbs[ring][entry] = skb; | |
2804 | ||
2805 | nr_frags = skb_shinfo(skb)->nr_frags; | |
2806 | len = skb_headlen(skb); | |
2807 | mapping = pci_map_page(cp->pdev, virt_to_page(skb->data), | |
2808 | offset_in_page(skb->data), len, | |
2809 | PCI_DMA_TODEVICE); | |
2810 | ||
2811 | tentry = entry; | |
2812 | tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len); | |
2813 | if (unlikely(tabort)) { | |
2814 | /* NOTE: len is always > tabort */ | |
6aa20a22 | 2815 | cas_write_txd(cp, ring, entry, mapping, len - tabort, |
1f26dac3 DM |
2816 | ctrl | TX_DESC_SOF, 0); |
2817 | entry = TX_DESC_NEXT(ring, entry); | |
2818 | ||
d626f62b ACM |
2819 | skb_copy_from_linear_data_offset(skb, len - tabort, |
2820 | tx_tiny_buf(cp, ring, entry), tabort); | |
1f26dac3 DM |
2821 | mapping = tx_tiny_map(cp, ring, entry, tentry); |
2822 | cas_write_txd(cp, ring, entry, mapping, tabort, ctrl, | |
2823 | (nr_frags == 0)); | |
2824 | } else { | |
6aa20a22 | 2825 | cas_write_txd(cp, ring, entry, mapping, len, ctrl | |
1f26dac3 DM |
2826 | TX_DESC_SOF, (nr_frags == 0)); |
2827 | } | |
2828 | entry = TX_DESC_NEXT(ring, entry); | |
2829 | ||
2830 | for (frag = 0; frag < nr_frags; frag++) { | |
2831 | skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; | |
2832 | ||
2833 | len = fragp->size; | |
2834 | mapping = pci_map_page(cp->pdev, fragp->page, | |
2835 | fragp->page_offset, len, | |
2836 | PCI_DMA_TODEVICE); | |
2837 | ||
2838 | tabort = cas_calc_tabort(cp, fragp->page_offset, len); | |
2839 | if (unlikely(tabort)) { | |
2840 | void *addr; | |
2841 | ||
2842 | /* NOTE: len is always > tabort */ | |
2843 | cas_write_txd(cp, ring, entry, mapping, len - tabort, | |
2844 | ctrl, 0); | |
2845 | entry = TX_DESC_NEXT(ring, entry); | |
6aa20a22 | 2846 | |
1f26dac3 DM |
2847 | addr = cas_page_map(fragp->page); |
2848 | memcpy(tx_tiny_buf(cp, ring, entry), | |
6aa20a22 | 2849 | addr + fragp->page_offset + len - tabort, |
1f26dac3 DM |
2850 | tabort); |
2851 | cas_page_unmap(addr); | |
2852 | mapping = tx_tiny_map(cp, ring, entry, tentry); | |
2853 | len = tabort; | |
2854 | } | |
2855 | ||
2856 | cas_write_txd(cp, ring, entry, mapping, len, ctrl, | |
2857 | (frag + 1 == nr_frags)); | |
2858 | entry = TX_DESC_NEXT(ring, entry); | |
2859 | } | |
2860 | ||
2861 | cp->tx_new[ring] = entry; | |
2862 | if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)) | |
2863 | netif_stop_queue(dev); | |
2864 | ||
2865 | if (netif_msg_tx_queued(cp)) | |
2866 | printk(KERN_DEBUG "%s: tx[%d] queued, slot %d, skblen %d, " | |
2867 | "avail %d\n", | |
6aa20a22 | 2868 | dev->name, ring, entry, skb->len, |
1f26dac3 DM |
2869 | TX_BUFFS_AVAIL(cp, ring)); |
2870 | writel(entry, cp->regs + REG_TX_KICKN(ring)); | |
2871 | spin_unlock_irqrestore(&cp->tx_lock[ring], flags); | |
2872 | return 0; | |
6aa20a22 | 2873 | } |
1f26dac3 DM |
2874 | |
2875 | static int cas_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
2876 | { | |
2877 | struct cas *cp = netdev_priv(dev); | |
2878 | ||
2879 | /* this is only used as a load-balancing hint, so it doesn't | |
2880 | * need to be SMP safe | |
2881 | */ | |
6aa20a22 | 2882 | static int ring; |
1f26dac3 | 2883 | |
5b057c6b | 2884 | if (skb_padto(skb, cp->min_frame_size)) |
1f26dac3 DM |
2885 | return 0; |
2886 | ||
2887 | /* XXX: we need some higher-level QoS hooks to steer packets to | |
2888 | * individual queues. | |
2889 | */ | |
2890 | if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb)) | |
2891 | return 1; | |
2892 | dev->trans_start = jiffies; | |
2893 | return 0; | |
2894 | } | |
2895 | ||
2896 | static void cas_init_tx_dma(struct cas *cp) | |
2897 | { | |
2898 | u64 desc_dma = cp->block_dvma; | |
2899 | unsigned long off; | |
2900 | u32 val; | |
2901 | int i; | |
2902 | ||
2903 | /* set up tx completion writeback registers. must be 8-byte aligned */ | |
2904 | #ifdef USE_TX_COMPWB | |
2905 | off = offsetof(struct cas_init_block, tx_compwb); | |
2906 | writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI); | |
2907 | writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW); | |
2908 | #endif | |
2909 | ||
2910 | /* enable completion writebacks, enable paced mode, | |
2911 | * disable read pipe, and disable pre-interrupt compwbs | |
2912 | */ | |
6aa20a22 | 2913 | val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 | |
1f26dac3 | 2914 | TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 | |
6aa20a22 | 2915 | TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE | |
1f26dac3 DM |
2916 | TX_CFG_INTR_COMPWB_DIS; |
2917 | ||
2918 | /* write out tx ring info and tx desc bases */ | |
2919 | for (i = 0; i < MAX_TX_RINGS; i++) { | |
6aa20a22 | 2920 | off = (unsigned long) cp->init_txds[i] - |
1f26dac3 DM |
2921 | (unsigned long) cp->init_block; |
2922 | ||
2923 | val |= CAS_TX_RINGN_BASE(i); | |
2924 | writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i)); | |
2925 | writel((desc_dma + off) & 0xffffffff, cp->regs + | |
2926 | REG_TX_DBN_LOW(i)); | |
2927 | /* don't zero out the kick register here as the system | |
2928 | * will wedge | |
2929 | */ | |
2930 | } | |
2931 | writel(val, cp->regs + REG_TX_CFG); | |
2932 | ||
2933 | /* program max burst sizes. these numbers should be different | |
2934 | * if doing QoS. | |
2935 | */ | |
2936 | #ifdef USE_QOS | |
2937 | writel(0x800, cp->regs + REG_TX_MAXBURST_0); | |
2938 | writel(0x1600, cp->regs + REG_TX_MAXBURST_1); | |
2939 | writel(0x2400, cp->regs + REG_TX_MAXBURST_2); | |
2940 | writel(0x4800, cp->regs + REG_TX_MAXBURST_3); | |
2941 | #else | |
2942 | writel(0x800, cp->regs + REG_TX_MAXBURST_0); | |
2943 | writel(0x800, cp->regs + REG_TX_MAXBURST_1); | |
2944 | writel(0x800, cp->regs + REG_TX_MAXBURST_2); | |
2945 | writel(0x800, cp->regs + REG_TX_MAXBURST_3); | |
2946 | #endif | |
2947 | } | |
2948 | ||
2949 | /* Must be invoked under cp->lock. */ | |
2950 | static inline void cas_init_dma(struct cas *cp) | |
2951 | { | |
2952 | cas_init_tx_dma(cp); | |
2953 | cas_init_rx_dma(cp); | |
2954 | } | |
2955 | ||
2956 | /* Must be invoked under cp->lock. */ | |
2957 | static u32 cas_setup_multicast(struct cas *cp) | |
2958 | { | |
2959 | u32 rxcfg = 0; | |
2960 | int i; | |
6aa20a22 | 2961 | |
1f26dac3 DM |
2962 | if (cp->dev->flags & IFF_PROMISC) { |
2963 | rxcfg |= MAC_RX_CFG_PROMISC_EN; | |
2964 | ||
2965 | } else if (cp->dev->flags & IFF_ALLMULTI) { | |
2966 | for (i=0; i < 16; i++) | |
2967 | writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i)); | |
2968 | rxcfg |= MAC_RX_CFG_HASH_FILTER_EN; | |
2969 | ||
2970 | } else { | |
2971 | u16 hash_table[16]; | |
2972 | u32 crc; | |
2973 | struct dev_mc_list *dmi = cp->dev->mc_list; | |
2974 | int i; | |
2975 | ||
2976 | /* use the alternate mac address registers for the | |
2977 | * first 15 multicast addresses | |
2978 | */ | |
2979 | for (i = 1; i <= CAS_MC_EXACT_MATCH_SIZE; i++) { | |
2980 | if (!dmi) { | |
2981 | writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 0)); | |
2982 | writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 1)); | |
2983 | writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 2)); | |
2984 | continue; | |
2985 | } | |
6aa20a22 | 2986 | writel((dmi->dmi_addr[4] << 8) | dmi->dmi_addr[5], |
1f26dac3 | 2987 | cp->regs + REG_MAC_ADDRN(i*3 + 0)); |
6aa20a22 | 2988 | writel((dmi->dmi_addr[2] << 8) | dmi->dmi_addr[3], |
1f26dac3 | 2989 | cp->regs + REG_MAC_ADDRN(i*3 + 1)); |
6aa20a22 | 2990 | writel((dmi->dmi_addr[0] << 8) | dmi->dmi_addr[1], |
1f26dac3 DM |
2991 | cp->regs + REG_MAC_ADDRN(i*3 + 2)); |
2992 | dmi = dmi->next; | |
2993 | } | |
2994 | ||
6aa20a22 | 2995 | /* use hw hash table for the next series of |
1f26dac3 DM |
2996 | * multicast addresses |
2997 | */ | |
2998 | memset(hash_table, 0, sizeof(hash_table)); | |
2999 | while (dmi) { | |
3000 | crc = ether_crc_le(ETH_ALEN, dmi->dmi_addr); | |
3001 | crc >>= 24; | |
3002 | hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); | |
3003 | dmi = dmi->next; | |
3004 | } | |
3005 | for (i=0; i < 16; i++) | |
6aa20a22 | 3006 | writel(hash_table[i], cp->regs + |
1f26dac3 DM |
3007 | REG_MAC_HASH_TABLEN(i)); |
3008 | rxcfg |= MAC_RX_CFG_HASH_FILTER_EN; | |
3009 | } | |
3010 | ||
3011 | return rxcfg; | |
3012 | } | |
3013 | ||
3014 | /* must be invoked under cp->stat_lock[N_TX_RINGS] */ | |
3015 | static void cas_clear_mac_err(struct cas *cp) | |
3016 | { | |
3017 | writel(0, cp->regs + REG_MAC_COLL_NORMAL); | |
3018 | writel(0, cp->regs + REG_MAC_COLL_FIRST); | |
3019 | writel(0, cp->regs + REG_MAC_COLL_EXCESS); | |
3020 | writel(0, cp->regs + REG_MAC_COLL_LATE); | |
3021 | writel(0, cp->regs + REG_MAC_TIMER_DEFER); | |
3022 | writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK); | |
3023 | writel(0, cp->regs + REG_MAC_RECV_FRAME); | |
3024 | writel(0, cp->regs + REG_MAC_LEN_ERR); | |
3025 | writel(0, cp->regs + REG_MAC_ALIGN_ERR); | |
3026 | writel(0, cp->regs + REG_MAC_FCS_ERR); | |
3027 | writel(0, cp->regs + REG_MAC_RX_CODE_ERR); | |
3028 | } | |
3029 | ||
3030 | ||
3031 | static void cas_mac_reset(struct cas *cp) | |
3032 | { | |
3033 | int i; | |
3034 | ||
3035 | /* do both TX and RX reset */ | |
3036 | writel(0x1, cp->regs + REG_MAC_TX_RESET); | |
3037 | writel(0x1, cp->regs + REG_MAC_RX_RESET); | |
3038 | ||
3039 | /* wait for TX */ | |
3040 | i = STOP_TRIES; | |
3041 | while (i-- > 0) { | |
3042 | if (readl(cp->regs + REG_MAC_TX_RESET) == 0) | |
3043 | break; | |
3044 | udelay(10); | |
3045 | } | |
3046 | ||
3047 | /* wait for RX */ | |
3048 | i = STOP_TRIES; | |
3049 | while (i-- > 0) { | |
3050 | if (readl(cp->regs + REG_MAC_RX_RESET) == 0) | |
3051 | break; | |
3052 | udelay(10); | |
3053 | } | |
3054 | ||
3055 | if (readl(cp->regs + REG_MAC_TX_RESET) | | |
3056 | readl(cp->regs + REG_MAC_RX_RESET)) | |
3057 | printk(KERN_ERR "%s: mac tx[%d]/rx[%d] reset failed [%08x]\n", | |
3058 | cp->dev->name, readl(cp->regs + REG_MAC_TX_RESET), | |
3059 | readl(cp->regs + REG_MAC_RX_RESET), | |
3060 | readl(cp->regs + REG_MAC_STATE_MACHINE)); | |
3061 | } | |
3062 | ||
3063 | ||
3064 | /* Must be invoked under cp->lock. */ | |
3065 | static void cas_init_mac(struct cas *cp) | |
3066 | { | |
3067 | unsigned char *e = &cp->dev->dev_addr[0]; | |
3068 | int i; | |
3069 | #ifdef CONFIG_CASSINI_MULTICAST_REG_WRITE | |
3070 | u32 rxcfg; | |
3071 | #endif | |
3072 | cas_mac_reset(cp); | |
3073 | ||
3074 | /* setup core arbitration weight register */ | |
3075 | writel(CAWR_RR_DIS, cp->regs + REG_CAWR); | |
3076 | ||
3077 | /* XXX Use pci_dma_burst_advice() */ | |
3078 | #if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA) | |
3079 | /* set the infinite burst register for chips that don't have | |
3080 | * pci issues. | |
3081 | */ | |
3082 | if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0) | |
3083 | writel(INF_BURST_EN, cp->regs + REG_INF_BURST); | |
3084 | #endif | |
3085 | ||
3086 | writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE); | |
3087 | ||
3088 | writel(0x00, cp->regs + REG_MAC_IPG0); | |
3089 | writel(0x08, cp->regs + REG_MAC_IPG1); | |
3090 | writel(0x04, cp->regs + REG_MAC_IPG2); | |
6aa20a22 | 3091 | |
1f26dac3 | 3092 | /* change later for 802.3z */ |
6aa20a22 | 3093 | writel(0x40, cp->regs + REG_MAC_SLOT_TIME); |
1f26dac3 DM |
3094 | |
3095 | /* min frame + FCS */ | |
3096 | writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN); | |
3097 | ||
3098 | /* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we | |
6aa20a22 | 3099 | * specify the maximum frame size to prevent RX tag errors on |
1f26dac3 DM |
3100 | * oversized frames. |
3101 | */ | |
3102 | writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) | | |
6aa20a22 JG |
3103 | CAS_BASE(MAC_FRAMESIZE_MAX_FRAME, |
3104 | (CAS_MAX_MTU + ETH_HLEN + 4 + 4)), | |
1f26dac3 DM |
3105 | cp->regs + REG_MAC_FRAMESIZE_MAX); |
3106 | ||
6aa20a22 | 3107 | /* NOTE: crc_size is used as a surrogate for half-duplex. |
1f26dac3 DM |
3108 | * workaround saturn half-duplex issue by increasing preamble |
3109 | * size to 65 bytes. | |
3110 | */ | |
3111 | if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size) | |
3112 | writel(0x41, cp->regs + REG_MAC_PA_SIZE); | |
3113 | else | |
3114 | writel(0x07, cp->regs + REG_MAC_PA_SIZE); | |
3115 | writel(0x04, cp->regs + REG_MAC_JAM_SIZE); | |
3116 | writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT); | |
3117 | writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE); | |
3118 | ||
3119 | writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED); | |
3120 | ||
3121 | writel(0, cp->regs + REG_MAC_ADDR_FILTER0); | |
3122 | writel(0, cp->regs + REG_MAC_ADDR_FILTER1); | |
3123 | writel(0, cp->regs + REG_MAC_ADDR_FILTER2); | |
3124 | writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK); | |
3125 | writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK); | |
3126 | ||
3127 | /* setup mac address in perfect filter array */ | |
3128 | for (i = 0; i < 45; i++) | |
3129 | writel(0x0, cp->regs + REG_MAC_ADDRN(i)); | |
3130 | ||
3131 | writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0)); | |
3132 | writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1)); | |
3133 | writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2)); | |
3134 | ||
3135 | writel(0x0001, cp->regs + REG_MAC_ADDRN(42)); | |
3136 | writel(0xc200, cp->regs + REG_MAC_ADDRN(43)); | |
3137 | writel(0x0180, cp->regs + REG_MAC_ADDRN(44)); | |
3138 | ||
3139 | #ifndef CONFIG_CASSINI_MULTICAST_REG_WRITE | |
3140 | cp->mac_rx_cfg = cas_setup_multicast(cp); | |
3141 | #else | |
3142 | /* WTZ: Do what Adrian did in cas_set_multicast. Doing | |
3143 | * a writel does not seem to be necessary because Cassini | |
3144 | * seems to preserve the configuration when we do the reset. | |
3145 | * If the chip is in trouble, though, it is not clear if we | |
3146 | * can really count on this behavior. cas_set_multicast uses | |
3147 | * spin_lock_irqsave, but we are called only in cas_init_hw and | |
3148 | * cas_init_hw is protected by cas_lock_all, which calls | |
3149 | * spin_lock_irq (so it doesn't need to save the flags, and | |
6aa20a22 | 3150 | * we should be OK for the writel, as that is the only |
1f26dac3 DM |
3151 | * difference). |
3152 | */ | |
3153 | cp->mac_rx_cfg = rxcfg = cas_setup_multicast(cp); | |
3154 | writel(rxcfg, cp->regs + REG_MAC_RX_CFG); | |
3155 | #endif | |
3156 | spin_lock(&cp->stat_lock[N_TX_RINGS]); | |
3157 | cas_clear_mac_err(cp); | |
3158 | spin_unlock(&cp->stat_lock[N_TX_RINGS]); | |
3159 | ||
3160 | /* Setup MAC interrupts. We want to get all of the interesting | |
3161 | * counter expiration events, but we do not want to hear about | |
3162 | * normal rx/tx as the DMA engine tells us that. | |
3163 | */ | |
3164 | writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK); | |
3165 | writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK); | |
3166 | ||
3167 | /* Don't enable even the PAUSE interrupts for now, we | |
3168 | * make no use of those events other than to record them. | |
3169 | */ | |
3170 | writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK); | |
3171 | } | |
3172 | ||
3173 | /* Must be invoked under cp->lock. */ | |
3174 | static void cas_init_pause_thresholds(struct cas *cp) | |
3175 | { | |
3176 | /* Calculate pause thresholds. Setting the OFF threshold to the | |
3177 | * full RX fifo size effectively disables PAUSE generation | |
3178 | */ | |
3179 | if (cp->rx_fifo_size <= (2 * 1024)) { | |
3180 | cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size; | |
3181 | } else { | |
3182 | int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63; | |
3183 | if (max_frame * 3 > cp->rx_fifo_size) { | |
3184 | cp->rx_pause_off = 7104; | |
3185 | cp->rx_pause_on = 960; | |
3186 | } else { | |
3187 | int off = (cp->rx_fifo_size - (max_frame * 2)); | |
3188 | int on = off - max_frame; | |
3189 | cp->rx_pause_off = off; | |
3190 | cp->rx_pause_on = on; | |
3191 | } | |
3192 | } | |
3193 | } | |
3194 | ||
3195 | static int cas_vpd_match(const void __iomem *p, const char *str) | |
3196 | { | |
3197 | int len = strlen(str) + 1; | |
3198 | int i; | |
6aa20a22 | 3199 | |
1f26dac3 DM |
3200 | for (i = 0; i < len; i++) { |
3201 | if (readb(p + i) != str[i]) | |
3202 | return 0; | |
3203 | } | |
3204 | return 1; | |
3205 | } | |
3206 | ||
3207 | ||
3208 | /* get the mac address by reading the vpd information in the rom. | |
3209 | * also get the phy type and determine if there's an entropy generator. | |
3210 | * NOTE: this is a bit convoluted for the following reasons: | |
3211 | * 1) vpd info has order-dependent mac addresses for multinic cards | |
3212 | * 2) the only way to determine the nic order is to use the slot | |
3213 | * number. | |
3214 | * 3) fiber cards don't have bridges, so their slot numbers don't | |
3215 | * mean anything. | |
6aa20a22 | 3216 | * 4) we don't actually know we have a fiber card until after |
1f26dac3 DM |
3217 | * the mac addresses are parsed. |
3218 | */ | |
3219 | static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr, | |
3220 | const int offset) | |
3221 | { | |
3222 | void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START; | |
3223 | void __iomem *base, *kstart; | |
3224 | int i, len; | |
3225 | int found = 0; | |
3226 | #define VPD_FOUND_MAC 0x01 | |
3227 | #define VPD_FOUND_PHY 0x02 | |
3228 | ||
3229 | int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */ | |
3230 | int mac_off = 0; | |
3231 | ||
3232 | /* give us access to the PROM */ | |
3233 | writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD, | |
3234 | cp->regs + REG_BIM_LOCAL_DEV_EN); | |
3235 | ||
3236 | /* check for an expansion rom */ | |
3237 | if (readb(p) != 0x55 || readb(p + 1) != 0xaa) | |
3238 | goto use_random_mac_addr; | |
3239 | ||
3240 | /* search for beginning of vpd */ | |
46d7031e | 3241 | base = NULL; |
1f26dac3 DM |
3242 | for (i = 2; i < EXPANSION_ROM_SIZE; i++) { |
3243 | /* check for PCIR */ | |
3244 | if ((readb(p + i + 0) == 0x50) && | |
3245 | (readb(p + i + 1) == 0x43) && | |
3246 | (readb(p + i + 2) == 0x49) && | |
3247 | (readb(p + i + 3) == 0x52)) { | |
6aa20a22 | 3248 | base = p + (readb(p + i + 8) | |
1f26dac3 DM |
3249 | (readb(p + i + 9) << 8)); |
3250 | break; | |
6aa20a22 | 3251 | } |
1f26dac3 DM |
3252 | } |
3253 | ||
3254 | if (!base || (readb(base) != 0x82)) | |
3255 | goto use_random_mac_addr; | |
6aa20a22 | 3256 | |
1f26dac3 DM |
3257 | i = (readb(base + 1) | (readb(base + 2) << 8)) + 3; |
3258 | while (i < EXPANSION_ROM_SIZE) { | |
3259 | if (readb(base + i) != 0x90) /* no vpd found */ | |
3260 | goto use_random_mac_addr; | |
3261 | ||
3262 | /* found a vpd field */ | |
3263 | len = readb(base + i + 1) | (readb(base + i + 2) << 8); | |
3264 | ||
3265 | /* extract keywords */ | |
3266 | kstart = base + i + 3; | |
3267 | p = kstart; | |
3268 | while ((p - kstart) < len) { | |
3269 | int klen = readb(p + 2); | |
3270 | int j; | |
3271 | char type; | |
3272 | ||
3273 | p += 3; | |
6aa20a22 | 3274 | |
1f26dac3 DM |
3275 | /* look for the following things: |
3276 | * -- correct length == 29 | |
6aa20a22 JG |
3277 | * 3 (type) + 2 (size) + |
3278 | * 18 (strlen("local-mac-address") + 1) + | |
3279 | * 6 (mac addr) | |
1f26dac3 DM |
3280 | * -- VPD Instance 'I' |
3281 | * -- VPD Type Bytes 'B' | |
3282 | * -- VPD data length == 6 | |
3283 | * -- property string == local-mac-address | |
6aa20a22 | 3284 | * |
1f26dac3 | 3285 | * -- correct length == 24 |
6aa20a22 JG |
3286 | * 3 (type) + 2 (size) + |
3287 | * 12 (strlen("entropy-dev") + 1) + | |
1f26dac3 DM |
3288 | * 7 (strlen("vms110") + 1) |
3289 | * -- VPD Instance 'I' | |
3290 | * -- VPD Type String 'B' | |
3291 | * -- VPD data length == 7 | |
3292 | * -- property string == entropy-dev | |
3293 | * | |
3294 | * -- correct length == 18 | |
6aa20a22 JG |
3295 | * 3 (type) + 2 (size) + |
3296 | * 9 (strlen("phy-type") + 1) + | |
1f26dac3 DM |
3297 | * 4 (strlen("pcs") + 1) |
3298 | * -- VPD Instance 'I' | |
3299 | * -- VPD Type String 'S' | |
3300 | * -- VPD data length == 4 | |
3301 | * -- property string == phy-type | |
6aa20a22 | 3302 | * |
1f26dac3 | 3303 | * -- correct length == 23 |
6aa20a22 JG |
3304 | * 3 (type) + 2 (size) + |
3305 | * 14 (strlen("phy-interface") + 1) + | |
1f26dac3 DM |
3306 | * 4 (strlen("pcs") + 1) |
3307 | * -- VPD Instance 'I' | |
3308 | * -- VPD Type String 'S' | |
3309 | * -- VPD data length == 4 | |
3310 | * -- property string == phy-interface | |
3311 | */ | |
3312 | if (readb(p) != 'I') | |
3313 | goto next; | |
3314 | ||
3315 | /* finally, check string and length */ | |
3316 | type = readb(p + 3); | |
3317 | if (type == 'B') { | |
3318 | if ((klen == 29) && readb(p + 4) == 6 && | |
6aa20a22 | 3319 | cas_vpd_match(p + 5, |
1f26dac3 | 3320 | "local-mac-address")) { |
6aa20a22 | 3321 | if (mac_off++ > offset) |
1f26dac3 DM |
3322 | goto next; |
3323 | ||
3324 | /* set mac address */ | |
6aa20a22 JG |
3325 | for (j = 0; j < 6; j++) |
3326 | dev_addr[j] = | |
1f26dac3 DM |
3327 | readb(p + 23 + j); |
3328 | goto found_mac; | |
3329 | } | |
3330 | } | |
3331 | ||
3332 | if (type != 'S') | |
3333 | goto next; | |
3334 | ||
3335 | #ifdef USE_ENTROPY_DEV | |
6aa20a22 | 3336 | if ((klen == 24) && |
1f26dac3 DM |
3337 | cas_vpd_match(p + 5, "entropy-dev") && |
3338 | cas_vpd_match(p + 17, "vms110")) { | |
3339 | cp->cas_flags |= CAS_FLAG_ENTROPY_DEV; | |
3340 | goto next; | |
3341 | } | |
3342 | #endif | |
3343 | ||
3344 | if (found & VPD_FOUND_PHY) | |
3345 | goto next; | |
3346 | ||
3347 | if ((klen == 18) && readb(p + 4) == 4 && | |
3348 | cas_vpd_match(p + 5, "phy-type")) { | |
3349 | if (cas_vpd_match(p + 14, "pcs")) { | |
3350 | phy_type = CAS_PHY_SERDES; | |
3351 | goto found_phy; | |
3352 | } | |
3353 | } | |
6aa20a22 | 3354 | |
1f26dac3 DM |
3355 | if ((klen == 23) && readb(p + 4) == 4 && |
3356 | cas_vpd_match(p + 5, "phy-interface")) { | |
3357 | if (cas_vpd_match(p + 19, "pcs")) { | |
3358 | phy_type = CAS_PHY_SERDES; | |
3359 | goto found_phy; | |
3360 | } | |
3361 | } | |
3362 | found_mac: | |
3363 | found |= VPD_FOUND_MAC; | |
3364 | goto next; | |
3365 | ||
3366 | found_phy: | |
3367 | found |= VPD_FOUND_PHY; | |
3368 | ||
3369 | next: | |
3370 | p += klen; | |
3371 | } | |
3372 | i += len + 3; | |
3373 | } | |
3374 | ||
3375 | use_random_mac_addr: | |
3376 | if (found & VPD_FOUND_MAC) | |
3377 | goto done; | |
3378 | ||
3379 | /* Sun MAC prefix then 3 random bytes. */ | |
3380 | printk(PFX "MAC address not found in ROM VPD\n"); | |
3381 | dev_addr[0] = 0x08; | |
3382 | dev_addr[1] = 0x00; | |
3383 | dev_addr[2] = 0x20; | |
3384 | get_random_bytes(dev_addr + 3, 3); | |
3385 | ||
3386 | done: | |
3387 | writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN); | |
3388 | return phy_type; | |
3389 | } | |
3390 | ||
3391 | /* check pci invariants */ | |
3392 | static void cas_check_pci_invariants(struct cas *cp) | |
3393 | { | |
3394 | struct pci_dev *pdev = cp->pdev; | |
1f26dac3 DM |
3395 | |
3396 | cp->cas_flags = 0; | |
1f26dac3 DM |
3397 | if ((pdev->vendor == PCI_VENDOR_ID_SUN) && |
3398 | (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) { | |
44c10138 | 3399 | if (pdev->revision >= CAS_ID_REVPLUS) |
1f26dac3 | 3400 | cp->cas_flags |= CAS_FLAG_REG_PLUS; |
44c10138 | 3401 | if (pdev->revision < CAS_ID_REVPLUS02u) |
1f26dac3 DM |
3402 | cp->cas_flags |= CAS_FLAG_TARGET_ABORT; |
3403 | ||
3404 | /* Original Cassini supports HW CSUM, but it's not | |
3405 | * enabled by default as it can trigger TX hangs. | |
3406 | */ | |
44c10138 | 3407 | if (pdev->revision < CAS_ID_REV2) |
1f26dac3 DM |
3408 | cp->cas_flags |= CAS_FLAG_NO_HW_CSUM; |
3409 | } else { | |
3410 | /* Only sun has original cassini chips. */ | |
3411 | cp->cas_flags |= CAS_FLAG_REG_PLUS; | |
3412 | ||
3413 | /* We use a flag because the same phy might be externally | |
3414 | * connected. | |
3415 | */ | |
3416 | if ((pdev->vendor == PCI_VENDOR_ID_NS) && | |
3417 | (pdev->device == PCI_DEVICE_ID_NS_SATURN)) | |
3418 | cp->cas_flags |= CAS_FLAG_SATURN; | |
3419 | } | |
3420 | } | |
3421 | ||
3422 | ||
3423 | static int cas_check_invariants(struct cas *cp) | |
3424 | { | |
3425 | struct pci_dev *pdev = cp->pdev; | |
3426 | u32 cfg; | |
3427 | int i; | |
3428 | ||
3429 | /* get page size for rx buffers. */ | |
6aa20a22 | 3430 | cp->page_order = 0; |
1f26dac3 DM |
3431 | #ifdef USE_PAGE_ORDER |
3432 | if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) { | |
3433 | /* see if we can allocate larger pages */ | |
6aa20a22 JG |
3434 | struct page *page = alloc_pages(GFP_ATOMIC, |
3435 | CAS_JUMBO_PAGE_SHIFT - | |
1f26dac3 DM |
3436 | PAGE_SHIFT); |
3437 | if (page) { | |
3438 | __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT); | |
3439 | cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT; | |
3440 | } else { | |
3441 | printk(PFX "MTU limited to %d bytes\n", CAS_MAX_MTU); | |
3442 | } | |
3443 | } | |
3444 | #endif | |
3445 | cp->page_size = (PAGE_SIZE << cp->page_order); | |
3446 | ||
3447 | /* Fetch the FIFO configurations. */ | |
3448 | cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64; | |
3449 | cp->rx_fifo_size = RX_FIFO_SIZE; | |
3450 | ||
6aa20a22 | 3451 | /* finish phy determination. MDIO1 takes precedence over MDIO0 if |
1f26dac3 DM |
3452 | * they're both connected. |
3453 | */ | |
6aa20a22 | 3454 | cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr, |
1f26dac3 DM |
3455 | PCI_SLOT(pdev->devfn)); |
3456 | if (cp->phy_type & CAS_PHY_SERDES) { | |
3457 | cp->cas_flags |= CAS_FLAG_1000MB_CAP; | |
3458 | return 0; /* no more checking needed */ | |
6aa20a22 | 3459 | } |
1f26dac3 DM |
3460 | |
3461 | /* MII */ | |
3462 | cfg = readl(cp->regs + REG_MIF_CFG); | |
3463 | if (cfg & MIF_CFG_MDIO_1) { | |
3464 | cp->phy_type = CAS_PHY_MII_MDIO1; | |
3465 | } else if (cfg & MIF_CFG_MDIO_0) { | |
3466 | cp->phy_type = CAS_PHY_MII_MDIO0; | |
3467 | } | |
3468 | ||
3469 | cas_mif_poll(cp, 0); | |
3470 | writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE); | |
3471 | ||
3472 | for (i = 0; i < 32; i++) { | |
3473 | u32 phy_id; | |
3474 | int j; | |
3475 | ||
3476 | for (j = 0; j < 3; j++) { | |
3477 | cp->phy_addr = i; | |
3478 | phy_id = cas_phy_read(cp, MII_PHYSID1) << 16; | |
3479 | phy_id |= cas_phy_read(cp, MII_PHYSID2); | |
3480 | if (phy_id && (phy_id != 0xFFFFFFFF)) { | |
3481 | cp->phy_id = phy_id; | |
3482 | goto done; | |
3483 | } | |
3484 | } | |
3485 | } | |
3486 | printk(KERN_ERR PFX "MII phy did not respond [%08x]\n", | |
3487 | readl(cp->regs + REG_MIF_STATE_MACHINE)); | |
3488 | return -1; | |
3489 | ||
3490 | done: | |
3491 | /* see if we can do gigabit */ | |
3492 | cfg = cas_phy_read(cp, MII_BMSR); | |
6aa20a22 | 3493 | if ((cfg & CAS_BMSR_1000_EXTEND) && |
1f26dac3 DM |
3494 | cas_phy_read(cp, CAS_MII_1000_EXTEND)) |
3495 | cp->cas_flags |= CAS_FLAG_1000MB_CAP; | |
3496 | return 0; | |
3497 | } | |
3498 | ||
3499 | /* Must be invoked under cp->lock. */ | |
3500 | static inline void cas_start_dma(struct cas *cp) | |
3501 | { | |
3502 | int i; | |
3503 | u32 val; | |
3504 | int txfailed = 0; | |
6aa20a22 | 3505 | |
1f26dac3 DM |
3506 | /* enable dma */ |
3507 | val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN; | |
3508 | writel(val, cp->regs + REG_TX_CFG); | |
3509 | val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN; | |
3510 | writel(val, cp->regs + REG_RX_CFG); | |
3511 | ||
3512 | /* enable the mac */ | |
3513 | val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN; | |
3514 | writel(val, cp->regs + REG_MAC_TX_CFG); | |
3515 | val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN; | |
3516 | writel(val, cp->regs + REG_MAC_RX_CFG); | |
3517 | ||
3518 | i = STOP_TRIES; | |
3519 | while (i-- > 0) { | |
3520 | val = readl(cp->regs + REG_MAC_TX_CFG); | |
3521 | if ((val & MAC_TX_CFG_EN)) | |
3522 | break; | |
3523 | udelay(10); | |
3524 | } | |
3525 | if (i < 0) txfailed = 1; | |
3526 | i = STOP_TRIES; | |
3527 | while (i-- > 0) { | |
3528 | val = readl(cp->regs + REG_MAC_RX_CFG); | |
3529 | if ((val & MAC_RX_CFG_EN)) { | |
3530 | if (txfailed) { | |
6aa20a22 JG |
3531 | printk(KERN_ERR |
3532 | "%s: enabling mac failed [tx:%08x:%08x].\n", | |
1f26dac3 DM |
3533 | cp->dev->name, |
3534 | readl(cp->regs + REG_MIF_STATE_MACHINE), | |
3535 | readl(cp->regs + REG_MAC_STATE_MACHINE)); | |
3536 | } | |
3537 | goto enable_rx_done; | |
3538 | } | |
3539 | udelay(10); | |
3540 | } | |
6aa20a22 | 3541 | printk(KERN_ERR "%s: enabling mac failed [%s:%08x:%08x].\n", |
1f26dac3 DM |
3542 | cp->dev->name, |
3543 | (txfailed? "tx,rx":"rx"), | |
3544 | readl(cp->regs + REG_MIF_STATE_MACHINE), | |
3545 | readl(cp->regs + REG_MAC_STATE_MACHINE)); | |
3546 | ||
3547 | enable_rx_done: | |
3548 | cas_unmask_intr(cp); /* enable interrupts */ | |
3549 | writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK); | |
3550 | writel(0, cp->regs + REG_RX_COMP_TAIL); | |
3551 | ||
3552 | if (cp->cas_flags & CAS_FLAG_REG_PLUS) { | |
6aa20a22 JG |
3553 | if (N_RX_DESC_RINGS > 1) |
3554 | writel(RX_DESC_RINGN_SIZE(1) - 4, | |
1f26dac3 DM |
3555 | cp->regs + REG_PLUS_RX_KICK1); |
3556 | ||
6aa20a22 | 3557 | for (i = 1; i < N_RX_COMP_RINGS; i++) |
1f26dac3 DM |
3558 | writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i)); |
3559 | } | |
3560 | } | |
3561 | ||
3562 | /* Must be invoked under cp->lock. */ | |
3563 | static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd, | |
3564 | int *pause) | |
3565 | { | |
3566 | u32 val = readl(cp->regs + REG_PCS_MII_LPA); | |
3567 | *fd = (val & PCS_MII_LPA_FD) ? 1 : 0; | |
3568 | *pause = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00; | |
3569 | if (val & PCS_MII_LPA_ASYM_PAUSE) | |
3570 | *pause |= 0x10; | |
3571 | *spd = 1000; | |
3572 | } | |
3573 | ||
3574 | /* Must be invoked under cp->lock. */ | |
3575 | static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd, | |
3576 | int *pause) | |
3577 | { | |
3578 | u32 val; | |
3579 | ||
3580 | *fd = 0; | |
3581 | *spd = 10; | |
3582 | *pause = 0; | |
6aa20a22 | 3583 | |
1f26dac3 DM |
3584 | /* use GMII registers */ |
3585 | val = cas_phy_read(cp, MII_LPA); | |
3586 | if (val & CAS_LPA_PAUSE) | |
3587 | *pause = 0x01; | |
3588 | ||
3589 | if (val & CAS_LPA_ASYM_PAUSE) | |
3590 | *pause |= 0x10; | |
3591 | ||
3592 | if (val & LPA_DUPLEX) | |
3593 | *fd = 1; | |
3594 | if (val & LPA_100) | |
3595 | *spd = 100; | |
3596 | ||
3597 | if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { | |
3598 | val = cas_phy_read(cp, CAS_MII_1000_STATUS); | |
3599 | if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF)) | |
3600 | *spd = 1000; | |
3601 | if (val & CAS_LPA_1000FULL) | |
3602 | *fd = 1; | |
3603 | } | |
3604 | } | |
3605 | ||
3606 | /* A link-up condition has occurred, initialize and enable the | |
3607 | * rest of the chip. | |
3608 | * | |
3609 | * Must be invoked under cp->lock. | |
3610 | */ | |
3611 | static void cas_set_link_modes(struct cas *cp) | |
3612 | { | |
3613 | u32 val; | |
3614 | int full_duplex, speed, pause; | |
3615 | ||
3616 | full_duplex = 0; | |
3617 | speed = 10; | |
3618 | pause = 0; | |
3619 | ||
3620 | if (CAS_PHY_MII(cp->phy_type)) { | |
3621 | cas_mif_poll(cp, 0); | |
3622 | val = cas_phy_read(cp, MII_BMCR); | |
3623 | if (val & BMCR_ANENABLE) { | |
6aa20a22 | 3624 | cas_read_mii_link_mode(cp, &full_duplex, &speed, |
1f26dac3 DM |
3625 | &pause); |
3626 | } else { | |
3627 | if (val & BMCR_FULLDPLX) | |
3628 | full_duplex = 1; | |
3629 | ||
3630 | if (val & BMCR_SPEED100) | |
3631 | speed = 100; | |
3632 | else if (val & CAS_BMCR_SPEED1000) | |
3633 | speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ? | |
3634 | 1000 : 100; | |
3635 | } | |
3636 | cas_mif_poll(cp, 1); | |
3637 | ||
3638 | } else { | |
3639 | val = readl(cp->regs + REG_PCS_MII_CTRL); | |
3640 | cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause); | |
3641 | if ((val & PCS_MII_AUTONEG_EN) == 0) { | |
3642 | if (val & PCS_MII_CTRL_DUPLEX) | |
3643 | full_duplex = 1; | |
3644 | } | |
3645 | } | |
3646 | ||
3647 | if (netif_msg_link(cp)) | |
3648 | printk(KERN_INFO "%s: Link up at %d Mbps, %s-duplex.\n", | |
3649 | cp->dev->name, speed, (full_duplex ? "full" : "half")); | |
3650 | ||
3651 | val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED; | |
3652 | if (CAS_PHY_MII(cp->phy_type)) { | |
3653 | val |= MAC_XIF_MII_BUFFER_OUTPUT_EN; | |
3654 | if (!full_duplex) | |
3655 | val |= MAC_XIF_DISABLE_ECHO; | |
3656 | } | |
6aa20a22 | 3657 | if (full_duplex) |
1f26dac3 DM |
3658 | val |= MAC_XIF_FDPLX_LED; |
3659 | if (speed == 1000) | |
3660 | val |= MAC_XIF_GMII_MODE; | |
3661 | writel(val, cp->regs + REG_MAC_XIF_CFG); | |
3662 | ||
3663 | /* deal with carrier and collision detect. */ | |
3664 | val = MAC_TX_CFG_IPG_EN; | |
3665 | if (full_duplex) { | |
3666 | val |= MAC_TX_CFG_IGNORE_CARRIER; | |
3667 | val |= MAC_TX_CFG_IGNORE_COLL; | |
3668 | } else { | |
3669 | #ifndef USE_CSMA_CD_PROTO | |
3670 | val |= MAC_TX_CFG_NEVER_GIVE_UP_EN; | |
3671 | val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM; | |
3672 | #endif | |
3673 | } | |
3674 | /* val now set up for REG_MAC_TX_CFG */ | |
3675 | ||
3676 | /* If gigabit and half-duplex, enable carrier extension | |
6aa20a22 | 3677 | * mode. increase slot time to 512 bytes as well. |
1f26dac3 DM |
3678 | * else, disable it and make sure slot time is 64 bytes. |
3679 | * also activate checksum bug workaround | |
3680 | */ | |
3681 | if ((speed == 1000) && !full_duplex) { | |
6aa20a22 | 3682 | writel(val | MAC_TX_CFG_CARRIER_EXTEND, |
1f26dac3 DM |
3683 | cp->regs + REG_MAC_TX_CFG); |
3684 | ||
3685 | val = readl(cp->regs + REG_MAC_RX_CFG); | |
3686 | val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */ | |
6aa20a22 | 3687 | writel(val | MAC_RX_CFG_CARRIER_EXTEND, |
1f26dac3 DM |
3688 | cp->regs + REG_MAC_RX_CFG); |
3689 | ||
3690 | writel(0x200, cp->regs + REG_MAC_SLOT_TIME); | |
3691 | ||
3692 | cp->crc_size = 4; | |
3693 | /* minimum size gigabit frame at half duplex */ | |
3694 | cp->min_frame_size = CAS_1000MB_MIN_FRAME; | |
3695 | ||
3696 | } else { | |
3697 | writel(val, cp->regs + REG_MAC_TX_CFG); | |
3698 | ||
6aa20a22 | 3699 | /* checksum bug workaround. don't strip FCS when in |
1f26dac3 DM |
3700 | * half-duplex mode |
3701 | */ | |
3702 | val = readl(cp->regs + REG_MAC_RX_CFG); | |
3703 | if (full_duplex) { | |
3704 | val |= MAC_RX_CFG_STRIP_FCS; | |
3705 | cp->crc_size = 0; | |
3706 | cp->min_frame_size = CAS_MIN_MTU; | |
3707 | } else { | |
3708 | val &= ~MAC_RX_CFG_STRIP_FCS; | |
3709 | cp->crc_size = 4; | |
3710 | cp->min_frame_size = CAS_MIN_FRAME; | |
3711 | } | |
6aa20a22 | 3712 | writel(val & ~MAC_RX_CFG_CARRIER_EXTEND, |
1f26dac3 DM |
3713 | cp->regs + REG_MAC_RX_CFG); |
3714 | writel(0x40, cp->regs + REG_MAC_SLOT_TIME); | |
3715 | } | |
3716 | ||
3717 | if (netif_msg_link(cp)) { | |
3718 | if (pause & 0x01) { | |
3719 | printk(KERN_INFO "%s: Pause is enabled " | |
3720 | "(rxfifo: %d off: %d on: %d)\n", | |
3721 | cp->dev->name, | |
3722 | cp->rx_fifo_size, | |
3723 | cp->rx_pause_off, | |
3724 | cp->rx_pause_on); | |
3725 | } else if (pause & 0x10) { | |
3726 | printk(KERN_INFO "%s: TX pause enabled\n", | |
3727 | cp->dev->name); | |
3728 | } else { | |
3729 | printk(KERN_INFO "%s: Pause is disabled\n", | |
3730 | cp->dev->name); | |
3731 | } | |
3732 | } | |
3733 | ||
3734 | val = readl(cp->regs + REG_MAC_CTRL_CFG); | |
3735 | val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN); | |
3736 | if (pause) { /* symmetric or asymmetric pause */ | |
3737 | val |= MAC_CTRL_CFG_SEND_PAUSE_EN; | |
3738 | if (pause & 0x01) { /* symmetric pause */ | |
3739 | val |= MAC_CTRL_CFG_RECV_PAUSE_EN; | |
6aa20a22 | 3740 | } |
1f26dac3 DM |
3741 | } |
3742 | writel(val, cp->regs + REG_MAC_CTRL_CFG); | |
3743 | cas_start_dma(cp); | |
3744 | } | |
3745 | ||
3746 | /* Must be invoked under cp->lock. */ | |
3747 | static void cas_init_hw(struct cas *cp, int restart_link) | |
3748 | { | |
3749 | if (restart_link) | |
3750 | cas_phy_init(cp); | |
3751 | ||
3752 | cas_init_pause_thresholds(cp); | |
3753 | cas_init_mac(cp); | |
3754 | cas_init_dma(cp); | |
3755 | ||
3756 | if (restart_link) { | |
3757 | /* Default aneg parameters */ | |
3758 | cp->timer_ticks = 0; | |
3759 | cas_begin_auto_negotiation(cp, NULL); | |
3760 | } else if (cp->lstate == link_up) { | |
3761 | cas_set_link_modes(cp); | |
3762 | netif_carrier_on(cp->dev); | |
3763 | } | |
3764 | } | |
3765 | ||
3766 | /* Must be invoked under cp->lock. on earlier cassini boards, | |
3767 | * SOFT_0 is tied to PCI reset. we use this to force a pci reset, | |
3768 | * let it settle out, and then restore pci state. | |
3769 | */ | |
3770 | static void cas_hard_reset(struct cas *cp) | |
3771 | { | |
6aa20a22 | 3772 | writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN); |
1f26dac3 DM |
3773 | udelay(20); |
3774 | pci_restore_state(cp->pdev); | |
3775 | } | |
3776 | ||
3777 | ||
3778 | static void cas_global_reset(struct cas *cp, int blkflag) | |
3779 | { | |
3780 | int limit; | |
3781 | ||
3782 | /* issue a global reset. don't use RSTOUT. */ | |
3783 | if (blkflag && !CAS_PHY_MII(cp->phy_type)) { | |
3784 | /* For PCS, when the blkflag is set, we should set the | |
3785 | * SW_REST_BLOCK_PCS_SLINK bit to prevent the results of | |
3786 | * the last autonegotiation from being cleared. We'll | |
3787 | * need some special handling if the chip is set into a | |
3788 | * loopback mode. | |
3789 | */ | |
6aa20a22 | 3790 | writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK), |
1f26dac3 DM |
3791 | cp->regs + REG_SW_RESET); |
3792 | } else { | |
3793 | writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET); | |
3794 | } | |
3795 | ||
3796 | /* need to wait at least 3ms before polling register */ | |
3797 | mdelay(3); | |
3798 | ||
3799 | limit = STOP_TRIES; | |
3800 | while (limit-- > 0) { | |
3801 | u32 val = readl(cp->regs + REG_SW_RESET); | |
3802 | if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0) | |
3803 | goto done; | |
3804 | udelay(10); | |
3805 | } | |
3806 | printk(KERN_ERR "%s: sw reset failed.\n", cp->dev->name); | |
3807 | ||
3808 | done: | |
3809 | /* enable various BIM interrupts */ | |
6aa20a22 | 3810 | writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE | |
1f26dac3 DM |
3811 | BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG); |
3812 | ||
3813 | /* clear out pci error status mask for handled errors. | |
3814 | * we don't deal with DMA counter overflows as they happen | |
3815 | * all the time. | |
3816 | */ | |
6aa20a22 JG |
3817 | writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO | |
3818 | PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE | | |
3819 | PCI_ERR_BIM_DMA_READ), cp->regs + | |
1f26dac3 DM |
3820 | REG_PCI_ERR_STATUS_MASK); |
3821 | ||
3822 | /* set up for MII by default to address mac rx reset timeout | |
3823 | * issue | |
3824 | */ | |
3825 | writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE); | |
3826 | } | |
3827 | ||
3828 | static void cas_reset(struct cas *cp, int blkflag) | |
3829 | { | |
3830 | u32 val; | |
3831 | ||
3832 | cas_mask_intr(cp); | |
3833 | cas_global_reset(cp, blkflag); | |
3834 | cas_mac_reset(cp); | |
3835 | cas_entropy_reset(cp); | |
3836 | ||
3837 | /* disable dma engines. */ | |
3838 | val = readl(cp->regs + REG_TX_CFG); | |
3839 | val &= ~TX_CFG_DMA_EN; | |
3840 | writel(val, cp->regs + REG_TX_CFG); | |
3841 | ||
3842 | val = readl(cp->regs + REG_RX_CFG); | |
3843 | val &= ~RX_CFG_DMA_EN; | |
3844 | writel(val, cp->regs + REG_RX_CFG); | |
3845 | ||
3846 | /* program header parser */ | |
3847 | if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) || | |
3848 | (CAS_HP_ALT_FIRMWARE == cas_prog_null)) { | |
3849 | cas_load_firmware(cp, CAS_HP_FIRMWARE); | |
3850 | } else { | |
3851 | cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE); | |
3852 | } | |
3853 | ||
3854 | /* clear out error registers */ | |
3855 | spin_lock(&cp->stat_lock[N_TX_RINGS]); | |
3856 | cas_clear_mac_err(cp); | |
3857 | spin_unlock(&cp->stat_lock[N_TX_RINGS]); | |
3858 | } | |
3859 | ||
758df69e | 3860 | /* Shut down the chip, must be called with pm_mutex held. */ |
1f26dac3 DM |
3861 | static void cas_shutdown(struct cas *cp) |
3862 | { | |
3863 | unsigned long flags; | |
3864 | ||
3865 | /* Make us not-running to avoid timers respawning */ | |
3866 | cp->hw_running = 0; | |
3867 | ||
3868 | del_timer_sync(&cp->link_timer); | |
3869 | ||
3870 | /* Stop the reset task */ | |
3871 | #if 0 | |
3872 | while (atomic_read(&cp->reset_task_pending_mtu) || | |
3873 | atomic_read(&cp->reset_task_pending_spare) || | |
3874 | atomic_read(&cp->reset_task_pending_all)) | |
3875 | schedule(); | |
3876 | ||
3877 | #else | |
3878 | while (atomic_read(&cp->reset_task_pending)) | |
3879 | schedule(); | |
6aa20a22 | 3880 | #endif |
1f26dac3 DM |
3881 | /* Actually stop the chip */ |
3882 | cas_lock_all_save(cp, flags); | |
3883 | cas_reset(cp, 0); | |
3884 | if (cp->cas_flags & CAS_FLAG_SATURN) | |
3885 | cas_phy_powerdown(cp); | |
3886 | cas_unlock_all_restore(cp, flags); | |
3887 | } | |
3888 | ||
3889 | static int cas_change_mtu(struct net_device *dev, int new_mtu) | |
3890 | { | |
3891 | struct cas *cp = netdev_priv(dev); | |
3892 | ||
3893 | if (new_mtu < CAS_MIN_MTU || new_mtu > CAS_MAX_MTU) | |
3894 | return -EINVAL; | |
3895 | ||
3896 | dev->mtu = new_mtu; | |
3897 | if (!netif_running(dev) || !netif_device_present(dev)) | |
3898 | return 0; | |
3899 | ||
3900 | /* let the reset task handle it */ | |
3901 | #if 1 | |
3902 | atomic_inc(&cp->reset_task_pending); | |
3903 | if ((cp->phy_type & CAS_PHY_SERDES)) { | |
3904 | atomic_inc(&cp->reset_task_pending_all); | |
3905 | } else { | |
3906 | atomic_inc(&cp->reset_task_pending_mtu); | |
3907 | } | |
3908 | schedule_work(&cp->reset_task); | |
3909 | #else | |
6aa20a22 | 3910 | atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ? |
1f26dac3 DM |
3911 | CAS_RESET_ALL : CAS_RESET_MTU); |
3912 | printk(KERN_ERR "reset called in cas_change_mtu\n"); | |
3913 | schedule_work(&cp->reset_task); | |
3914 | #endif | |
3915 | ||
3916 | flush_scheduled_work(); | |
3917 | return 0; | |
3918 | } | |
3919 | ||
3920 | static void cas_clean_txd(struct cas *cp, int ring) | |
3921 | { | |
3922 | struct cas_tx_desc *txd = cp->init_txds[ring]; | |
3923 | struct sk_buff *skb, **skbs = cp->tx_skbs[ring]; | |
3924 | u64 daddr, dlen; | |
3925 | int i, size; | |
3926 | ||
3927 | size = TX_DESC_RINGN_SIZE(ring); | |
3928 | for (i = 0; i < size; i++) { | |
3929 | int frag; | |
3930 | ||
3931 | if (skbs[i] == NULL) | |
3932 | continue; | |
3933 | ||
3934 | skb = skbs[i]; | |
3935 | skbs[i] = NULL; | |
3936 | ||
3937 | for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { | |
3938 | int ent = i & (size - 1); | |
3939 | ||
3940 | /* first buffer is never a tiny buffer and so | |
3941 | * needs to be unmapped. | |
3942 | */ | |
3943 | daddr = le64_to_cpu(txd[ent].buffer); | |
6aa20a22 | 3944 | dlen = CAS_VAL(TX_DESC_BUFLEN, |
1f26dac3 DM |
3945 | le64_to_cpu(txd[ent].control)); |
3946 | pci_unmap_page(cp->pdev, daddr, dlen, | |
3947 | PCI_DMA_TODEVICE); | |
3948 | ||
3949 | if (frag != skb_shinfo(skb)->nr_frags) { | |
3950 | i++; | |
3951 | ||
3952 | /* next buffer might by a tiny buffer. | |
3953 | * skip past it. | |
3954 | */ | |
3955 | ent = i & (size - 1); | |
3956 | if (cp->tx_tiny_use[ring][ent].used) | |
3957 | i++; | |
3958 | } | |
3959 | } | |
3960 | dev_kfree_skb_any(skb); | |
3961 | } | |
3962 | ||
3963 | /* zero out tiny buf usage */ | |
3964 | memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring])); | |
3965 | } | |
3966 | ||
3967 | /* freed on close */ | |
3968 | static inline void cas_free_rx_desc(struct cas *cp, int ring) | |
3969 | { | |
3970 | cas_page_t **page = cp->rx_pages[ring]; | |
3971 | int i, size; | |
3972 | ||
3973 | size = RX_DESC_RINGN_SIZE(ring); | |
3974 | for (i = 0; i < size; i++) { | |
3975 | if (page[i]) { | |
3976 | cas_page_free(cp, page[i]); | |
3977 | page[i] = NULL; | |
3978 | } | |
3979 | } | |
3980 | } | |
3981 | ||
3982 | static void cas_free_rxds(struct cas *cp) | |
3983 | { | |
3984 | int i; | |
3985 | ||
3986 | for (i = 0; i < N_RX_DESC_RINGS; i++) | |
3987 | cas_free_rx_desc(cp, i); | |
3988 | } | |
3989 | ||
3990 | /* Must be invoked under cp->lock. */ | |
3991 | static void cas_clean_rings(struct cas *cp) | |
3992 | { | |
3993 | int i; | |
3994 | ||
3995 | /* need to clean all tx rings */ | |
3996 | memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS); | |
3997 | memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS); | |
3998 | for (i = 0; i < N_TX_RINGS; i++) | |
3999 | cas_clean_txd(cp, i); | |
4000 | ||
4001 | /* zero out init block */ | |
4002 | memset(cp->init_block, 0, sizeof(struct cas_init_block)); | |
4003 | cas_clean_rxds(cp); | |
4004 | cas_clean_rxcs(cp); | |
4005 | } | |
4006 | ||
4007 | /* allocated on open */ | |
4008 | static inline int cas_alloc_rx_desc(struct cas *cp, int ring) | |
4009 | { | |
4010 | cas_page_t **page = cp->rx_pages[ring]; | |
4011 | int size, i = 0; | |
4012 | ||
4013 | size = RX_DESC_RINGN_SIZE(ring); | |
4014 | for (i = 0; i < size; i++) { | |
6aa20a22 | 4015 | if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL) |
1f26dac3 DM |
4016 | return -1; |
4017 | } | |
4018 | return 0; | |
4019 | } | |
4020 | ||
4021 | static int cas_alloc_rxds(struct cas *cp) | |
4022 | { | |
4023 | int i; | |
4024 | ||
4025 | for (i = 0; i < N_RX_DESC_RINGS; i++) { | |
4026 | if (cas_alloc_rx_desc(cp, i) < 0) { | |
4027 | cas_free_rxds(cp); | |
4028 | return -1; | |
4029 | } | |
4030 | } | |
4031 | return 0; | |
4032 | } | |
4033 | ||
c4028958 | 4034 | static void cas_reset_task(struct work_struct *work) |
1f26dac3 | 4035 | { |
c4028958 | 4036 | struct cas *cp = container_of(work, struct cas, reset_task); |
1f26dac3 DM |
4037 | #if 0 |
4038 | int pending = atomic_read(&cp->reset_task_pending); | |
4039 | #else | |
4040 | int pending_all = atomic_read(&cp->reset_task_pending_all); | |
4041 | int pending_spare = atomic_read(&cp->reset_task_pending_spare); | |
4042 | int pending_mtu = atomic_read(&cp->reset_task_pending_mtu); | |
4043 | ||
4044 | if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) { | |
4045 | /* We can have more tasks scheduled than actually | |
4046 | * needed. | |
4047 | */ | |
4048 | atomic_dec(&cp->reset_task_pending); | |
4049 | return; | |
4050 | } | |
4051 | #endif | |
4052 | /* The link went down, we reset the ring, but keep | |
4053 | * DMA stopped. Use this function for reset | |
4054 | * on error as well. | |
4055 | */ | |
4056 | if (cp->hw_running) { | |
4057 | unsigned long flags; | |
4058 | ||
4059 | /* Make sure we don't get interrupts or tx packets */ | |
4060 | netif_device_detach(cp->dev); | |
4061 | cas_lock_all_save(cp, flags); | |
4062 | ||
4063 | if (cp->opened) { | |
4064 | /* We call cas_spare_recover when we call cas_open. | |
4065 | * but we do not initialize the lists cas_spare_recover | |
4066 | * uses until cas_open is called. | |
4067 | */ | |
4068 | cas_spare_recover(cp, GFP_ATOMIC); | |
4069 | } | |
4070 | #if 1 | |
4071 | /* test => only pending_spare set */ | |
4072 | if (!pending_all && !pending_mtu) | |
4073 | goto done; | |
4074 | #else | |
4075 | if (pending == CAS_RESET_SPARE) | |
4076 | goto done; | |
4077 | #endif | |
4078 | /* when pending == CAS_RESET_ALL, the following | |
4079 | * call to cas_init_hw will restart auto negotiation. | |
4080 | * Setting the second argument of cas_reset to | |
4081 | * !(pending == CAS_RESET_ALL) will set this argument | |
6aa20a22 | 4082 | * to 1 (avoiding reinitializing the PHY for the normal |
1f26dac3 DM |
4083 | * PCS case) when auto negotiation is not restarted. |
4084 | */ | |
4085 | #if 1 | |
4086 | cas_reset(cp, !(pending_all > 0)); | |
4087 | if (cp->opened) | |
4088 | cas_clean_rings(cp); | |
4089 | cas_init_hw(cp, (pending_all > 0)); | |
4090 | #else | |
4091 | cas_reset(cp, !(pending == CAS_RESET_ALL)); | |
4092 | if (cp->opened) | |
4093 | cas_clean_rings(cp); | |
4094 | cas_init_hw(cp, pending == CAS_RESET_ALL); | |
4095 | #endif | |
4096 | ||
4097 | done: | |
4098 | cas_unlock_all_restore(cp, flags); | |
4099 | netif_device_attach(cp->dev); | |
4100 | } | |
4101 | #if 1 | |
4102 | atomic_sub(pending_all, &cp->reset_task_pending_all); | |
4103 | atomic_sub(pending_spare, &cp->reset_task_pending_spare); | |
4104 | atomic_sub(pending_mtu, &cp->reset_task_pending_mtu); | |
4105 | atomic_dec(&cp->reset_task_pending); | |
4106 | #else | |
4107 | atomic_set(&cp->reset_task_pending, 0); | |
4108 | #endif | |
4109 | } | |
4110 | ||
4111 | static void cas_link_timer(unsigned long data) | |
4112 | { | |
4113 | struct cas *cp = (struct cas *) data; | |
4114 | int mask, pending = 0, reset = 0; | |
4115 | unsigned long flags; | |
4116 | ||
4117 | if (link_transition_timeout != 0 && | |
4118 | cp->link_transition_jiffies_valid && | |
6aa20a22 | 4119 | ((jiffies - cp->link_transition_jiffies) > |
1f26dac3 | 4120 | (link_transition_timeout))) { |
6aa20a22 | 4121 | /* One-second counter so link-down workaround doesn't |
1f26dac3 DM |
4122 | * cause resets to occur so fast as to fool the switch |
4123 | * into thinking the link is down. | |
4124 | */ | |
4125 | cp->link_transition_jiffies_valid = 0; | |
4126 | } | |
4127 | ||
4128 | if (!cp->hw_running) | |
4129 | return; | |
4130 | ||
4131 | spin_lock_irqsave(&cp->lock, flags); | |
4132 | cas_lock_tx(cp); | |
4133 | cas_entropy_gather(cp); | |
4134 | ||
4135 | /* If the link task is still pending, we just | |
4136 | * reschedule the link timer | |
4137 | */ | |
4138 | #if 1 | |
4139 | if (atomic_read(&cp->reset_task_pending_all) || | |
4140 | atomic_read(&cp->reset_task_pending_spare) || | |
6aa20a22 | 4141 | atomic_read(&cp->reset_task_pending_mtu)) |
1f26dac3 DM |
4142 | goto done; |
4143 | #else | |
6aa20a22 | 4144 | if (atomic_read(&cp->reset_task_pending)) |
1f26dac3 DM |
4145 | goto done; |
4146 | #endif | |
4147 | ||
4148 | /* check for rx cleaning */ | |
4149 | if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) { | |
4150 | int i, rmask; | |
4151 | ||
4152 | for (i = 0; i < MAX_RX_DESC_RINGS; i++) { | |
4153 | rmask = CAS_FLAG_RXD_POST(i); | |
4154 | if ((mask & rmask) == 0) | |
4155 | continue; | |
4156 | ||
4157 | /* post_rxds will do a mod_timer */ | |
4158 | if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) { | |
4159 | pending = 1; | |
4160 | continue; | |
4161 | } | |
4162 | cp->cas_flags &= ~rmask; | |
4163 | } | |
4164 | } | |
4165 | ||
4166 | if (CAS_PHY_MII(cp->phy_type)) { | |
4167 | u16 bmsr; | |
4168 | cas_mif_poll(cp, 0); | |
4169 | bmsr = cas_phy_read(cp, MII_BMSR); | |
4170 | /* WTZ: Solaris driver reads this twice, but that | |
4171 | * may be due to the PCS case and the use of a | |
4172 | * common implementation. Read it twice here to be | |
4173 | * safe. | |
4174 | */ | |
4175 | bmsr = cas_phy_read(cp, MII_BMSR); | |
4176 | cas_mif_poll(cp, 1); | |
4177 | readl(cp->regs + REG_MIF_STATUS); /* avoid dups */ | |
4178 | reset = cas_mii_link_check(cp, bmsr); | |
4179 | } else { | |
4180 | reset = cas_pcs_link_check(cp); | |
4181 | } | |
4182 | ||
4183 | if (reset) | |
4184 | goto done; | |
4185 | ||
4186 | /* check for tx state machine confusion */ | |
4187 | if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) { | |
4188 | u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE); | |
4189 | u32 wptr, rptr; | |
4190 | int tlm = CAS_VAL(MAC_SM_TLM, val); | |
4191 | ||
4192 | if (((tlm == 0x5) || (tlm == 0x3)) && | |
4193 | (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) { | |
4194 | if (netif_msg_tx_err(cp)) | |
4195 | printk(KERN_DEBUG "%s: tx err: " | |
4196 | "MAC_STATE[%08x]\n", | |
4197 | cp->dev->name, val); | |
4198 | reset = 1; | |
4199 | goto done; | |
4200 | } | |
4201 | ||
4202 | val = readl(cp->regs + REG_TX_FIFO_PKT_CNT); | |
4203 | wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR); | |
4204 | rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR); | |
4205 | if ((val == 0) && (wptr != rptr)) { | |
4206 | if (netif_msg_tx_err(cp)) | |
4207 | printk(KERN_DEBUG "%s: tx err: " | |
4208 | "TX_FIFO[%08x:%08x:%08x]\n", | |
4209 | cp->dev->name, val, wptr, rptr); | |
4210 | reset = 1; | |
4211 | } | |
4212 | ||
4213 | if (reset) | |
4214 | cas_hard_reset(cp); | |
4215 | } | |
4216 | ||
4217 | done: | |
4218 | if (reset) { | |
4219 | #if 1 | |
4220 | atomic_inc(&cp->reset_task_pending); | |
4221 | atomic_inc(&cp->reset_task_pending_all); | |
4222 | schedule_work(&cp->reset_task); | |
4223 | #else | |
4224 | atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); | |
4225 | printk(KERN_ERR "reset called in cas_link_timer\n"); | |
4226 | schedule_work(&cp->reset_task); | |
4227 | #endif | |
4228 | } | |
4229 | ||
4230 | if (!pending) | |
4231 | mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); | |
4232 | cas_unlock_tx(cp); | |
4233 | spin_unlock_irqrestore(&cp->lock, flags); | |
4234 | } | |
4235 | ||
6aa20a22 | 4236 | /* tiny buffers are used to avoid target abort issues with |
1f26dac3 DM |
4237 | * older cassini's |
4238 | */ | |
4239 | static void cas_tx_tiny_free(struct cas *cp) | |
4240 | { | |
4241 | struct pci_dev *pdev = cp->pdev; | |
4242 | int i; | |
4243 | ||
4244 | for (i = 0; i < N_TX_RINGS; i++) { | |
4245 | if (!cp->tx_tiny_bufs[i]) | |
4246 | continue; | |
4247 | ||
6aa20a22 | 4248 | pci_free_consistent(pdev, TX_TINY_BUF_BLOCK, |
1f26dac3 DM |
4249 | cp->tx_tiny_bufs[i], |
4250 | cp->tx_tiny_dvma[i]); | |
4251 | cp->tx_tiny_bufs[i] = NULL; | |
4252 | } | |
4253 | } | |
4254 | ||
4255 | static int cas_tx_tiny_alloc(struct cas *cp) | |
4256 | { | |
4257 | struct pci_dev *pdev = cp->pdev; | |
4258 | int i; | |
4259 | ||
4260 | for (i = 0; i < N_TX_RINGS; i++) { | |
6aa20a22 | 4261 | cp->tx_tiny_bufs[i] = |
1f26dac3 DM |
4262 | pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK, |
4263 | &cp->tx_tiny_dvma[i]); | |
4264 | if (!cp->tx_tiny_bufs[i]) { | |
4265 | cas_tx_tiny_free(cp); | |
4266 | return -1; | |
4267 | } | |
4268 | } | |
4269 | return 0; | |
4270 | } | |
4271 | ||
4272 | ||
4273 | static int cas_open(struct net_device *dev) | |
4274 | { | |
4275 | struct cas *cp = netdev_priv(dev); | |
4276 | int hw_was_up, err; | |
4277 | unsigned long flags; | |
4278 | ||
758df69e | 4279 | mutex_lock(&cp->pm_mutex); |
1f26dac3 DM |
4280 | |
4281 | hw_was_up = cp->hw_running; | |
4282 | ||
758df69e | 4283 | /* The power-management mutex protects the hw_running |
1f26dac3 DM |
4284 | * etc. state so it is safe to do this bit without cp->lock |
4285 | */ | |
4286 | if (!cp->hw_running) { | |
4287 | /* Reset the chip */ | |
4288 | cas_lock_all_save(cp, flags); | |
4289 | /* We set the second arg to cas_reset to zero | |
6aa20a22 | 4290 | * because cas_init_hw below will have its second |
1f26dac3 DM |
4291 | * argument set to non-zero, which will force |
4292 | * autonegotiation to start. | |
4293 | */ | |
4294 | cas_reset(cp, 0); | |
4295 | cp->hw_running = 1; | |
4296 | cas_unlock_all_restore(cp, flags); | |
4297 | } | |
4298 | ||
4299 | if (cas_tx_tiny_alloc(cp) < 0) | |
4300 | return -ENOMEM; | |
4301 | ||
4302 | /* alloc rx descriptors */ | |
4303 | err = -ENOMEM; | |
4304 | if (cas_alloc_rxds(cp) < 0) | |
4305 | goto err_tx_tiny; | |
6aa20a22 | 4306 | |
1f26dac3 DM |
4307 | /* allocate spares */ |
4308 | cas_spare_init(cp); | |
4309 | cas_spare_recover(cp, GFP_KERNEL); | |
4310 | ||
4311 | /* We can now request the interrupt as we know it's masked | |
4312 | * on the controller. cassini+ has up to 4 interrupts | |
6aa20a22 | 4313 | * that can be used, but you need to do explicit pci interrupt |
1f26dac3 DM |
4314 | * mapping to expose them |
4315 | */ | |
4316 | if (request_irq(cp->pdev->irq, cas_interrupt, | |
1fb9df5d | 4317 | IRQF_SHARED, dev->name, (void *) dev)) { |
6aa20a22 | 4318 | printk(KERN_ERR "%s: failed to request irq !\n", |
1f26dac3 DM |
4319 | cp->dev->name); |
4320 | err = -EAGAIN; | |
4321 | goto err_spare; | |
4322 | } | |
4323 | ||
bea3348e SH |
4324 | #ifdef USE_NAPI |
4325 | napi_enable(&cp->napi); | |
4326 | #endif | |
1f26dac3 DM |
4327 | /* init hw */ |
4328 | cas_lock_all_save(cp, flags); | |
4329 | cas_clean_rings(cp); | |
4330 | cas_init_hw(cp, !hw_was_up); | |
4331 | cp->opened = 1; | |
4332 | cas_unlock_all_restore(cp, flags); | |
4333 | ||
4334 | netif_start_queue(dev); | |
758df69e | 4335 | mutex_unlock(&cp->pm_mutex); |
1f26dac3 DM |
4336 | return 0; |
4337 | ||
4338 | err_spare: | |
4339 | cas_spare_free(cp); | |
4340 | cas_free_rxds(cp); | |
4341 | err_tx_tiny: | |
4342 | cas_tx_tiny_free(cp); | |
758df69e | 4343 | mutex_unlock(&cp->pm_mutex); |
1f26dac3 DM |
4344 | return err; |
4345 | } | |
4346 | ||
4347 | static int cas_close(struct net_device *dev) | |
4348 | { | |
4349 | unsigned long flags; | |
4350 | struct cas *cp = netdev_priv(dev); | |
4351 | ||
bea3348e | 4352 | #ifdef USE_NAPI |
86216268 | 4353 | napi_disable(&cp->napi); |
bea3348e | 4354 | #endif |
1f26dac3 | 4355 | /* Make sure we don't get distracted by suspend/resume */ |
758df69e | 4356 | mutex_lock(&cp->pm_mutex); |
1f26dac3 DM |
4357 | |
4358 | netif_stop_queue(dev); | |
4359 | ||
4360 | /* Stop traffic, mark us closed */ | |
4361 | cas_lock_all_save(cp, flags); | |
6aa20a22 | 4362 | cp->opened = 0; |
1f26dac3 | 4363 | cas_reset(cp, 0); |
6aa20a22 | 4364 | cas_phy_init(cp); |
1f26dac3 DM |
4365 | cas_begin_auto_negotiation(cp, NULL); |
4366 | cas_clean_rings(cp); | |
4367 | cas_unlock_all_restore(cp, flags); | |
4368 | ||
4369 | free_irq(cp->pdev->irq, (void *) dev); | |
4370 | cas_spare_free(cp); | |
4371 | cas_free_rxds(cp); | |
4372 | cas_tx_tiny_free(cp); | |
758df69e | 4373 | mutex_unlock(&cp->pm_mutex); |
1f26dac3 DM |
4374 | return 0; |
4375 | } | |
4376 | ||
4377 | static struct { | |
4378 | const char name[ETH_GSTRING_LEN]; | |
4379 | } ethtool_cassini_statnames[] = { | |
4380 | {"collisions"}, | |
4381 | {"rx_bytes"}, | |
4382 | {"rx_crc_errors"}, | |
4383 | {"rx_dropped"}, | |
4384 | {"rx_errors"}, | |
4385 | {"rx_fifo_errors"}, | |
4386 | {"rx_frame_errors"}, | |
4387 | {"rx_length_errors"}, | |
4388 | {"rx_over_errors"}, | |
4389 | {"rx_packets"}, | |
4390 | {"tx_aborted_errors"}, | |
4391 | {"tx_bytes"}, | |
4392 | {"tx_dropped"}, | |
4393 | {"tx_errors"}, | |
4394 | {"tx_fifo_errors"}, | |
4395 | {"tx_packets"} | |
4396 | }; | |
4c3616cd | 4397 | #define CAS_NUM_STAT_KEYS ARRAY_SIZE(ethtool_cassini_statnames) |
1f26dac3 DM |
4398 | |
4399 | static struct { | |
4400 | const int offsets; /* neg. values for 2nd arg to cas_read_phy */ | |
4401 | } ethtool_register_table[] = { | |
4402 | {-MII_BMSR}, | |
4403 | {-MII_BMCR}, | |
4404 | {REG_CAWR}, | |
4405 | {REG_INF_BURST}, | |
4406 | {REG_BIM_CFG}, | |
4407 | {REG_RX_CFG}, | |
4408 | {REG_HP_CFG}, | |
4409 | {REG_MAC_TX_CFG}, | |
4410 | {REG_MAC_RX_CFG}, | |
4411 | {REG_MAC_CTRL_CFG}, | |
4412 | {REG_MAC_XIF_CFG}, | |
4413 | {REG_MIF_CFG}, | |
4414 | {REG_PCS_CFG}, | |
4415 | {REG_SATURN_PCFG}, | |
4416 | {REG_PCS_MII_STATUS}, | |
4417 | {REG_PCS_STATE_MACHINE}, | |
4418 | {REG_MAC_COLL_EXCESS}, | |
4419 | {REG_MAC_COLL_LATE} | |
4420 | }; | |
e9edda69 | 4421 | #define CAS_REG_LEN ARRAY_SIZE(ethtool_register_table) |
1f26dac3 DM |
4422 | #define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN) |
4423 | ||
a232f767 | 4424 | static void cas_read_regs(struct cas *cp, u8 *ptr, int len) |
1f26dac3 | 4425 | { |
1f26dac3 DM |
4426 | u8 *p; |
4427 | int i; | |
4428 | unsigned long flags; | |
4429 | ||
1f26dac3 | 4430 | spin_lock_irqsave(&cp->lock, flags); |
a232f767 | 4431 | for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) { |
1f26dac3 DM |
4432 | u16 hval; |
4433 | u32 val; | |
4434 | if (ethtool_register_table[i].offsets < 0) { | |
4435 | hval = cas_phy_read(cp, | |
4436 | -ethtool_register_table[i].offsets); | |
4437 | val = hval; | |
4438 | } else { | |
4439 | val= readl(cp->regs+ethtool_register_table[i].offsets); | |
4440 | } | |
4441 | memcpy(p, (u8 *)&val, sizeof(u32)); | |
4442 | } | |
4443 | spin_unlock_irqrestore(&cp->lock, flags); | |
1f26dac3 DM |
4444 | } |
4445 | ||
4446 | static struct net_device_stats *cas_get_stats(struct net_device *dev) | |
4447 | { | |
4448 | struct cas *cp = netdev_priv(dev); | |
4449 | struct net_device_stats *stats = cp->net_stats; | |
4450 | unsigned long flags; | |
4451 | int i; | |
4452 | unsigned long tmp; | |
4453 | ||
4454 | /* we collate all of the stats into net_stats[N_TX_RING] */ | |
4455 | if (!cp->hw_running) | |
4456 | return stats + N_TX_RINGS; | |
6aa20a22 | 4457 | |
1f26dac3 DM |
4458 | /* collect outstanding stats */ |
4459 | /* WTZ: the Cassini spec gives these as 16 bit counters but | |
4460 | * stored in 32-bit words. Added a mask of 0xffff to be safe, | |
4461 | * in case the chip somehow puts any garbage in the other bits. | |
4462 | * Also, counter usage didn't seem to mach what Adrian did | |
4463 | * in the parts of the code that set these quantities. Made | |
4464 | * that consistent. | |
4465 | */ | |
4466 | spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags); | |
6aa20a22 | 4467 | stats[N_TX_RINGS].rx_crc_errors += |
1f26dac3 | 4468 | readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff; |
6aa20a22 | 4469 | stats[N_TX_RINGS].rx_frame_errors += |
1f26dac3 | 4470 | readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff; |
6aa20a22 | 4471 | stats[N_TX_RINGS].rx_length_errors += |
1f26dac3 DM |
4472 | readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff; |
4473 | #if 1 | |
4474 | tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) + | |
4475 | (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff); | |
4476 | stats[N_TX_RINGS].tx_aborted_errors += tmp; | |
4477 | stats[N_TX_RINGS].collisions += | |
4478 | tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff); | |
4479 | #else | |
6aa20a22 | 4480 | stats[N_TX_RINGS].tx_aborted_errors += |
1f26dac3 DM |
4481 | readl(cp->regs + REG_MAC_COLL_EXCESS); |
4482 | stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) + | |
4483 | readl(cp->regs + REG_MAC_COLL_LATE); | |
4484 | #endif | |
4485 | cas_clear_mac_err(cp); | |
4486 | ||
4487 | /* saved bits that are unique to ring 0 */ | |
4488 | spin_lock(&cp->stat_lock[0]); | |
4489 | stats[N_TX_RINGS].collisions += stats[0].collisions; | |
4490 | stats[N_TX_RINGS].rx_over_errors += stats[0].rx_over_errors; | |
4491 | stats[N_TX_RINGS].rx_frame_errors += stats[0].rx_frame_errors; | |
4492 | stats[N_TX_RINGS].rx_fifo_errors += stats[0].rx_fifo_errors; | |
4493 | stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors; | |
4494 | stats[N_TX_RINGS].tx_fifo_errors += stats[0].tx_fifo_errors; | |
4495 | spin_unlock(&cp->stat_lock[0]); | |
4496 | ||
4497 | for (i = 0; i < N_TX_RINGS; i++) { | |
4498 | spin_lock(&cp->stat_lock[i]); | |
6aa20a22 | 4499 | stats[N_TX_RINGS].rx_length_errors += |
1f26dac3 DM |
4500 | stats[i].rx_length_errors; |
4501 | stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors; | |
4502 | stats[N_TX_RINGS].rx_packets += stats[i].rx_packets; | |
4503 | stats[N_TX_RINGS].tx_packets += stats[i].tx_packets; | |
4504 | stats[N_TX_RINGS].rx_bytes += stats[i].rx_bytes; | |
4505 | stats[N_TX_RINGS].tx_bytes += stats[i].tx_bytes; | |
4506 | stats[N_TX_RINGS].rx_errors += stats[i].rx_errors; | |
4507 | stats[N_TX_RINGS].tx_errors += stats[i].tx_errors; | |
4508 | stats[N_TX_RINGS].rx_dropped += stats[i].rx_dropped; | |
4509 | stats[N_TX_RINGS].tx_dropped += stats[i].tx_dropped; | |
4510 | memset(stats + i, 0, sizeof(struct net_device_stats)); | |
4511 | spin_unlock(&cp->stat_lock[i]); | |
4512 | } | |
4513 | spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags); | |
4514 | return stats + N_TX_RINGS; | |
4515 | } | |
4516 | ||
4517 | ||
4518 | static void cas_set_multicast(struct net_device *dev) | |
4519 | { | |
4520 | struct cas *cp = netdev_priv(dev); | |
4521 | u32 rxcfg, rxcfg_new; | |
4522 | unsigned long flags; | |
4523 | int limit = STOP_TRIES; | |
6aa20a22 | 4524 | |
1f26dac3 DM |
4525 | if (!cp->hw_running) |
4526 | return; | |
6aa20a22 | 4527 | |
1f26dac3 DM |
4528 | spin_lock_irqsave(&cp->lock, flags); |
4529 | rxcfg = readl(cp->regs + REG_MAC_RX_CFG); | |
4530 | ||
4531 | /* disable RX MAC and wait for completion */ | |
4532 | writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); | |
4533 | while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) { | |
4534 | if (!limit--) | |
4535 | break; | |
4536 | udelay(10); | |
4537 | } | |
4538 | ||
4539 | /* disable hash filter and wait for completion */ | |
4540 | limit = STOP_TRIES; | |
4541 | rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN); | |
4542 | writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); | |
4543 | while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) { | |
4544 | if (!limit--) | |
4545 | break; | |
4546 | udelay(10); | |
4547 | } | |
4548 | ||
4549 | /* program hash filters */ | |
4550 | cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp); | |
4551 | rxcfg |= rxcfg_new; | |
4552 | writel(rxcfg, cp->regs + REG_MAC_RX_CFG); | |
4553 | spin_unlock_irqrestore(&cp->lock, flags); | |
4554 | } | |
4555 | ||
a232f767 AV |
4556 | static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) |
4557 | { | |
4558 | struct cas *cp = netdev_priv(dev); | |
4559 | strncpy(info->driver, DRV_MODULE_NAME, ETHTOOL_BUSINFO_LEN); | |
4560 | strncpy(info->version, DRV_MODULE_VERSION, ETHTOOL_BUSINFO_LEN); | |
4561 | info->fw_version[0] = '\0'; | |
4562 | strncpy(info->bus_info, pci_name(cp->pdev), ETHTOOL_BUSINFO_LEN); | |
4563 | info->regdump_len = cp->casreg_len < CAS_MAX_REGS ? | |
4564 | cp->casreg_len : CAS_MAX_REGS; | |
4565 | info->n_stats = CAS_NUM_STAT_KEYS; | |
4566 | } | |
4567 | ||
4568 | static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |
1f26dac3 DM |
4569 | { |
4570 | struct cas *cp = netdev_priv(dev); | |
4571 | u16 bmcr; | |
4572 | int full_duplex, speed, pause; | |
1f26dac3 DM |
4573 | unsigned long flags; |
4574 | enum link_state linkstate = link_up; | |
4575 | ||
a232f767 AV |
4576 | cmd->advertising = 0; |
4577 | cmd->supported = SUPPORTED_Autoneg; | |
4578 | if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { | |
4579 | cmd->supported |= SUPPORTED_1000baseT_Full; | |
4580 | cmd->advertising |= ADVERTISED_1000baseT_Full; | |
1f26dac3 DM |
4581 | } |
4582 | ||
a232f767 AV |
4583 | /* Record PHY settings if HW is on. */ |
4584 | spin_lock_irqsave(&cp->lock, flags); | |
4585 | bmcr = 0; | |
4586 | linkstate = cp->lstate; | |
4587 | if (CAS_PHY_MII(cp->phy_type)) { | |
4588 | cmd->port = PORT_MII; | |
4589 | cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ? | |
4590 | XCVR_INTERNAL : XCVR_EXTERNAL; | |
4591 | cmd->phy_address = cp->phy_addr; | |
4592 | cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII | | |
6aa20a22 JG |
4593 | ADVERTISED_10baseT_Half | |
4594 | ADVERTISED_10baseT_Full | | |
4595 | ADVERTISED_100baseT_Half | | |
a232f767 AV |
4596 | ADVERTISED_100baseT_Full; |
4597 | ||
4598 | cmd->supported |= | |
6aa20a22 | 4599 | (SUPPORTED_10baseT_Half | |
a232f767 | 4600 | SUPPORTED_10baseT_Full | |
6aa20a22 | 4601 | SUPPORTED_100baseT_Half | |
a232f767 AV |
4602 | SUPPORTED_100baseT_Full | |
4603 | SUPPORTED_TP | SUPPORTED_MII); | |
4604 | ||
4605 | if (cp->hw_running) { | |
4606 | cas_mif_poll(cp, 0); | |
4607 | bmcr = cas_phy_read(cp, MII_BMCR); | |
6aa20a22 | 4608 | cas_read_mii_link_mode(cp, &full_duplex, |
a232f767 AV |
4609 | &speed, &pause); |
4610 | cas_mif_poll(cp, 1); | |
1f26dac3 DM |
4611 | } |
4612 | ||
a232f767 AV |
4613 | } else { |
4614 | cmd->port = PORT_FIBRE; | |
4615 | cmd->transceiver = XCVR_INTERNAL; | |
4616 | cmd->phy_address = 0; | |
4617 | cmd->supported |= SUPPORTED_FIBRE; | |
4618 | cmd->advertising |= ADVERTISED_FIBRE; | |
4619 | ||
4620 | if (cp->hw_running) { | |
6aa20a22 | 4621 | /* pcs uses the same bits as mii */ |
a232f767 | 4622 | bmcr = readl(cp->regs + REG_PCS_MII_CTRL); |
6aa20a22 | 4623 | cas_read_pcs_link_mode(cp, &full_duplex, |
a232f767 | 4624 | &speed, &pause); |
1f26dac3 | 4625 | } |
a232f767 AV |
4626 | } |
4627 | spin_unlock_irqrestore(&cp->lock, flags); | |
1f26dac3 | 4628 | |
a232f767 AV |
4629 | if (bmcr & BMCR_ANENABLE) { |
4630 | cmd->advertising |= ADVERTISED_Autoneg; | |
4631 | cmd->autoneg = AUTONEG_ENABLE; | |
4632 | cmd->speed = ((speed == 10) ? | |
4633 | SPEED_10 : | |
4634 | ((speed == 1000) ? | |
4635 | SPEED_1000 : SPEED_100)); | |
4636 | cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF; | |
4637 | } else { | |
4638 | cmd->autoneg = AUTONEG_DISABLE; | |
4639 | cmd->speed = | |
4640 | (bmcr & CAS_BMCR_SPEED1000) ? | |
6aa20a22 JG |
4641 | SPEED_1000 : |
4642 | ((bmcr & BMCR_SPEED100) ? SPEED_100: | |
a232f767 AV |
4643 | SPEED_10); |
4644 | cmd->duplex = | |
4645 | (bmcr & BMCR_FULLDPLX) ? | |
4646 | DUPLEX_FULL : DUPLEX_HALF; | |
4647 | } | |
4648 | if (linkstate != link_up) { | |
4649 | /* Force these to "unknown" if the link is not up and | |
6aa20a22 | 4650 | * autonogotiation in enabled. We can set the link |
a232f767 AV |
4651 | * speed to 0, but not cmd->duplex, |
4652 | * because its legal values are 0 and 1. Ethtool will | |
4653 | * print the value reported in parentheses after the | |
4654 | * word "Unknown" for unrecognized values. | |
4655 | * | |
4656 | * If in forced mode, we report the speed and duplex | |
4657 | * settings that we configured. | |
4658 | */ | |
4659 | if (cp->link_cntl & BMCR_ANENABLE) { | |
4660 | cmd->speed = 0; | |
4661 | cmd->duplex = 0xff; | |
1f26dac3 | 4662 | } else { |
a232f767 AV |
4663 | cmd->speed = SPEED_10; |
4664 | if (cp->link_cntl & BMCR_SPEED100) { | |
4665 | cmd->speed = SPEED_100; | |
4666 | } else if (cp->link_cntl & CAS_BMCR_SPEED1000) { | |
4667 | cmd->speed = SPEED_1000; | |
1f26dac3 | 4668 | } |
a232f767 AV |
4669 | cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)? |
4670 | DUPLEX_FULL : DUPLEX_HALF; | |
1f26dac3 | 4671 | } |
a232f767 AV |
4672 | } |
4673 | return 0; | |
4674 | } | |
1f26dac3 | 4675 | |
a232f767 AV |
4676 | static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
4677 | { | |
4678 | struct cas *cp = netdev_priv(dev); | |
4679 | unsigned long flags; | |
1f26dac3 | 4680 | |
a232f767 AV |
4681 | /* Verify the settings we care about. */ |
4682 | if (cmd->autoneg != AUTONEG_ENABLE && | |
4683 | cmd->autoneg != AUTONEG_DISABLE) | |
4684 | return -EINVAL; | |
1f26dac3 | 4685 | |
a232f767 AV |
4686 | if (cmd->autoneg == AUTONEG_DISABLE && |
4687 | ((cmd->speed != SPEED_1000 && | |
4688 | cmd->speed != SPEED_100 && | |
4689 | cmd->speed != SPEED_10) || | |
4690 | (cmd->duplex != DUPLEX_HALF && | |
4691 | cmd->duplex != DUPLEX_FULL))) | |
4692 | return -EINVAL; | |
1f26dac3 | 4693 | |
a232f767 AV |
4694 | /* Apply settings and restart link process. */ |
4695 | spin_lock_irqsave(&cp->lock, flags); | |
4696 | cas_begin_auto_negotiation(cp, cmd); | |
4697 | spin_unlock_irqrestore(&cp->lock, flags); | |
4698 | return 0; | |
4699 | } | |
1f26dac3 | 4700 | |
a232f767 AV |
4701 | static int cas_nway_reset(struct net_device *dev) |
4702 | { | |
4703 | struct cas *cp = netdev_priv(dev); | |
4704 | unsigned long flags; | |
1f26dac3 | 4705 | |
a232f767 AV |
4706 | if ((cp->link_cntl & BMCR_ANENABLE) == 0) |
4707 | return -EINVAL; | |
1f26dac3 | 4708 | |
a232f767 AV |
4709 | /* Restart link process. */ |
4710 | spin_lock_irqsave(&cp->lock, flags); | |
4711 | cas_begin_auto_negotiation(cp, NULL); | |
4712 | spin_unlock_irqrestore(&cp->lock, flags); | |
1f26dac3 | 4713 | |
a232f767 AV |
4714 | return 0; |
4715 | } | |
1f26dac3 | 4716 | |
a232f767 AV |
4717 | static u32 cas_get_link(struct net_device *dev) |
4718 | { | |
4719 | struct cas *cp = netdev_priv(dev); | |
4720 | return cp->lstate == link_up; | |
4721 | } | |
1f26dac3 | 4722 | |
a232f767 AV |
4723 | static u32 cas_get_msglevel(struct net_device *dev) |
4724 | { | |
4725 | struct cas *cp = netdev_priv(dev); | |
4726 | return cp->msg_enable; | |
4727 | } | |
1f26dac3 | 4728 | |
a232f767 AV |
4729 | static void cas_set_msglevel(struct net_device *dev, u32 value) |
4730 | { | |
4731 | struct cas *cp = netdev_priv(dev); | |
4732 | cp->msg_enable = value; | |
4733 | } | |
1f26dac3 | 4734 | |
a232f767 AV |
4735 | static int cas_get_regs_len(struct net_device *dev) |
4736 | { | |
4737 | struct cas *cp = netdev_priv(dev); | |
4738 | return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS; | |
4739 | } | |
1f26dac3 | 4740 | |
a232f767 AV |
4741 | static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs, |
4742 | void *p) | |
4743 | { | |
4744 | struct cas *cp = netdev_priv(dev); | |
4745 | regs->version = 0; | |
4746 | /* cas_read_regs handles locks (cp->lock). */ | |
4747 | cas_read_regs(cp, p, regs->len / sizeof(u32)); | |
4748 | } | |
1f26dac3 | 4749 | |
b9f2c044 | 4750 | static int cas_get_sset_count(struct net_device *dev, int sset) |
a232f767 | 4751 | { |
b9f2c044 JG |
4752 | switch (sset) { |
4753 | case ETH_SS_STATS: | |
4754 | return CAS_NUM_STAT_KEYS; | |
4755 | default: | |
4756 | return -EOPNOTSUPP; | |
4757 | } | |
a232f767 | 4758 | } |
1f26dac3 | 4759 | |
a232f767 AV |
4760 | static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data) |
4761 | { | |
6aa20a22 | 4762 | memcpy(data, ðtool_cassini_statnames, |
a232f767 AV |
4763 | CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN); |
4764 | } | |
1f26dac3 | 4765 | |
a232f767 AV |
4766 | static void cas_get_ethtool_stats(struct net_device *dev, |
4767 | struct ethtool_stats *estats, u64 *data) | |
4768 | { | |
4769 | struct cas *cp = netdev_priv(dev); | |
4770 | struct net_device_stats *stats = cas_get_stats(cp->dev); | |
4771 | int i = 0; | |
4772 | data[i++] = stats->collisions; | |
4773 | data[i++] = stats->rx_bytes; | |
4774 | data[i++] = stats->rx_crc_errors; | |
4775 | data[i++] = stats->rx_dropped; | |
4776 | data[i++] = stats->rx_errors; | |
4777 | data[i++] = stats->rx_fifo_errors; | |
4778 | data[i++] = stats->rx_frame_errors; | |
4779 | data[i++] = stats->rx_length_errors; | |
4780 | data[i++] = stats->rx_over_errors; | |
4781 | data[i++] = stats->rx_packets; | |
4782 | data[i++] = stats->tx_aborted_errors; | |
4783 | data[i++] = stats->tx_bytes; | |
4784 | data[i++] = stats->tx_dropped; | |
4785 | data[i++] = stats->tx_errors; | |
4786 | data[i++] = stats->tx_fifo_errors; | |
4787 | data[i++] = stats->tx_packets; | |
4788 | BUG_ON(i != CAS_NUM_STAT_KEYS); | |
1f26dac3 DM |
4789 | } |
4790 | ||
7282d491 | 4791 | static const struct ethtool_ops cas_ethtool_ops = { |
a232f767 AV |
4792 | .get_drvinfo = cas_get_drvinfo, |
4793 | .get_settings = cas_get_settings, | |
4794 | .set_settings = cas_set_settings, | |
4795 | .nway_reset = cas_nway_reset, | |
4796 | .get_link = cas_get_link, | |
4797 | .get_msglevel = cas_get_msglevel, | |
4798 | .set_msglevel = cas_set_msglevel, | |
4799 | .get_regs_len = cas_get_regs_len, | |
4800 | .get_regs = cas_get_regs, | |
b9f2c044 | 4801 | .get_sset_count = cas_get_sset_count, |
a232f767 AV |
4802 | .get_strings = cas_get_strings, |
4803 | .get_ethtool_stats = cas_get_ethtool_stats, | |
4804 | }; | |
4805 | ||
1f26dac3 DM |
4806 | static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
4807 | { | |
4808 | struct cas *cp = netdev_priv(dev); | |
46d7031e | 4809 | struct mii_ioctl_data *data = if_mii(ifr); |
1f26dac3 DM |
4810 | unsigned long flags; |
4811 | int rc = -EOPNOTSUPP; | |
6aa20a22 | 4812 | |
758df69e | 4813 | /* Hold the PM mutex while doing ioctl's or we may collide |
1f26dac3 DM |
4814 | * with open/close and power management and oops. |
4815 | */ | |
758df69e | 4816 | mutex_lock(&cp->pm_mutex); |
1f26dac3 | 4817 | switch (cmd) { |
1f26dac3 DM |
4818 | case SIOCGMIIPHY: /* Get address of MII PHY in use. */ |
4819 | data->phy_id = cp->phy_addr; | |
4820 | /* Fallthrough... */ | |
4821 | ||
4822 | case SIOCGMIIREG: /* Read MII PHY register. */ | |
4823 | spin_lock_irqsave(&cp->lock, flags); | |
4824 | cas_mif_poll(cp, 0); | |
4825 | data->val_out = cas_phy_read(cp, data->reg_num & 0x1f); | |
4826 | cas_mif_poll(cp, 1); | |
4827 | spin_unlock_irqrestore(&cp->lock, flags); | |
4828 | rc = 0; | |
4829 | break; | |
4830 | ||
4831 | case SIOCSMIIREG: /* Write MII PHY register. */ | |
4832 | if (!capable(CAP_NET_ADMIN)) { | |
4833 | rc = -EPERM; | |
4834 | break; | |
4835 | } | |
4836 | spin_lock_irqsave(&cp->lock, flags); | |
4837 | cas_mif_poll(cp, 0); | |
4838 | rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in); | |
4839 | cas_mif_poll(cp, 1); | |
4840 | spin_unlock_irqrestore(&cp->lock, flags); | |
4841 | break; | |
4842 | default: | |
4843 | break; | |
4844 | }; | |
4845 | ||
758df69e | 4846 | mutex_unlock(&cp->pm_mutex); |
1f26dac3 DM |
4847 | return rc; |
4848 | } | |
4849 | ||
9e1848b6 DM |
4850 | /* When this chip sits underneath an Intel 31154 bridge, it is the |
4851 | * only subordinate device and we can tweak the bridge settings to | |
4852 | * reflect that fact. | |
4853 | */ | |
4854 | static void __devinit cas_program_bridge(struct pci_dev *cas_pdev) | |
4855 | { | |
4856 | struct pci_dev *pdev = cas_pdev->bus->self; | |
4857 | u32 val; | |
4858 | ||
4859 | if (!pdev) | |
4860 | return; | |
4861 | ||
4862 | if (pdev->vendor != 0x8086 || pdev->device != 0x537c) | |
4863 | return; | |
4864 | ||
4865 | /* Clear bit 10 (Bus Parking Control) in the Secondary | |
4866 | * Arbiter Control/Status Register which lives at offset | |
4867 | * 0x41. Using a 32-bit word read/modify/write at 0x40 | |
4868 | * is much simpler so that's how we do this. | |
4869 | */ | |
4870 | pci_read_config_dword(pdev, 0x40, &val); | |
4871 | val &= ~0x00040000; | |
4872 | pci_write_config_dword(pdev, 0x40, val); | |
4873 | ||
4874 | /* Max out the Multi-Transaction Timer settings since | |
4875 | * Cassini is the only device present. | |
4876 | * | |
4877 | * The register is 16-bit and lives at 0x50. When the | |
4878 | * settings are enabled, it extends the GRANT# signal | |
4879 | * for a requestor after a transaction is complete. This | |
4880 | * allows the next request to run without first needing | |
4881 | * to negotiate the GRANT# signal back. | |
4882 | * | |
4883 | * Bits 12:10 define the grant duration: | |
4884 | * | |
4885 | * 1 -- 16 clocks | |
4886 | * 2 -- 32 clocks | |
4887 | * 3 -- 64 clocks | |
4888 | * 4 -- 128 clocks | |
4889 | * 5 -- 256 clocks | |
4890 | * | |
4891 | * All other values are illegal. | |
4892 | * | |
4893 | * Bits 09:00 define which REQ/GNT signal pairs get the | |
4894 | * GRANT# signal treatment. We set them all. | |
4895 | */ | |
4896 | pci_write_config_word(pdev, 0x50, (5 << 10) | 0x3ff); | |
4897 | ||
4898 | /* The Read Prefecth Policy register is 16-bit and sits at | |
4899 | * offset 0x52. It enables a "smart" pre-fetch policy. We | |
4900 | * enable it and max out all of the settings since only one | |
4901 | * device is sitting underneath and thus bandwidth sharing is | |
4902 | * not an issue. | |
4903 | * | |
4904 | * The register has several 3 bit fields, which indicates a | |
4905 | * multiplier applied to the base amount of prefetching the | |
4906 | * chip would do. These fields are at: | |
4907 | * | |
4908 | * 15:13 --- ReRead Primary Bus | |
4909 | * 12:10 --- FirstRead Primary Bus | |
4910 | * 09:07 --- ReRead Secondary Bus | |
4911 | * 06:04 --- FirstRead Secondary Bus | |
4912 | * | |
4913 | * Bits 03:00 control which REQ/GNT pairs the prefetch settings | |
4914 | * get enabled on. Bit 3 is a grouped enabler which controls | |
4915 | * all of the REQ/GNT pairs from [8:3]. Bits 2 to 0 control | |
4916 | * the individual REQ/GNT pairs [2:0]. | |
4917 | */ | |
4918 | pci_write_config_word(pdev, 0x52, | |
4919 | (0x7 << 13) | | |
4920 | (0x7 << 10) | | |
4921 | (0x7 << 7) | | |
4922 | (0x7 << 4) | | |
4923 | (0xf << 0)); | |
4924 | ||
4925 | /* Force cacheline size to 0x8 */ | |
4926 | pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08); | |
4927 | ||
4928 | /* Force latency timer to maximum setting so Cassini can | |
4929 | * sit on the bus as long as it likes. | |
4930 | */ | |
4931 | pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff); | |
4932 | } | |
4933 | ||
1f26dac3 DM |
4934 | static int __devinit cas_init_one(struct pci_dev *pdev, |
4935 | const struct pci_device_id *ent) | |
4936 | { | |
4937 | static int cas_version_printed = 0; | |
18e37f2a | 4938 | unsigned long casreg_len; |
1f26dac3 DM |
4939 | struct net_device *dev; |
4940 | struct cas *cp; | |
4941 | int i, err, pci_using_dac; | |
4942 | u16 pci_cmd; | |
4943 | u8 orig_cacheline_size = 0, cas_cacheline_size = 0; | |
0795af57 | 4944 | DECLARE_MAC_BUF(mac); |
1f26dac3 DM |
4945 | |
4946 | if (cas_version_printed++ == 0) | |
4947 | printk(KERN_INFO "%s", version); | |
4948 | ||
4949 | err = pci_enable_device(pdev); | |
4950 | if (err) { | |
9b91cf9d | 4951 | dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n"); |
1f26dac3 DM |
4952 | return err; |
4953 | } | |
4954 | ||
4955 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { | |
9b91cf9d | 4956 | dev_err(&pdev->dev, "Cannot find proper PCI device " |
1f26dac3 DM |
4957 | "base address, aborting.\n"); |
4958 | err = -ENODEV; | |
4959 | goto err_out_disable_pdev; | |
4960 | } | |
4961 | ||
4962 | dev = alloc_etherdev(sizeof(*cp)); | |
4963 | if (!dev) { | |
9b91cf9d | 4964 | dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n"); |
1f26dac3 DM |
4965 | err = -ENOMEM; |
4966 | goto err_out_disable_pdev; | |
4967 | } | |
1f26dac3 DM |
4968 | SET_NETDEV_DEV(dev, &pdev->dev); |
4969 | ||
4970 | err = pci_request_regions(pdev, dev->name); | |
4971 | if (err) { | |
9b91cf9d | 4972 | dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n"); |
1f26dac3 DM |
4973 | goto err_out_free_netdev; |
4974 | } | |
4975 | pci_set_master(pdev); | |
4976 | ||
4977 | /* we must always turn on parity response or else parity | |
4978 | * doesn't get generated properly. disable SERR/PERR as well. | |
4979 | * in addition, we want to turn MWI on. | |
4980 | */ | |
4981 | pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); | |
4982 | pci_cmd &= ~PCI_COMMAND_SERR; | |
4983 | pci_cmd |= PCI_COMMAND_PARITY; | |
4984 | pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); | |
694625c0 | 4985 | if (pci_try_set_mwi(pdev)) |
4738d2fa | 4986 | printk(KERN_WARNING PFX "Could not enable MWI for %s\n", |
04efb878 DM |
4987 | pci_name(pdev)); |
4988 | ||
9e1848b6 DM |
4989 | cas_program_bridge(pdev); |
4990 | ||
1f26dac3 DM |
4991 | /* |
4992 | * On some architectures, the default cache line size set | |
694625c0 | 4993 | * by pci_try_set_mwi reduces perforamnce. We have to increase |
1f26dac3 DM |
4994 | * it for this case. To start, we'll print some configuration |
4995 | * data. | |
4996 | */ | |
4997 | #if 1 | |
4998 | pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, | |
4999 | &orig_cacheline_size); | |
5000 | if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) { | |
6aa20a22 JG |
5001 | cas_cacheline_size = |
5002 | (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ? | |
1f26dac3 | 5003 | CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES; |
6aa20a22 JG |
5004 | if (pci_write_config_byte(pdev, |
5005 | PCI_CACHE_LINE_SIZE, | |
1f26dac3 | 5006 | cas_cacheline_size)) { |
9b91cf9d | 5007 | dev_err(&pdev->dev, "Could not set PCI cache " |
1f26dac3 DM |
5008 | "line size\n"); |
5009 | goto err_write_cacheline; | |
5010 | } | |
5011 | } | |
5012 | #endif | |
5013 | ||
5014 | ||
5015 | /* Configure DMA attributes. */ | |
5016 | if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { | |
5017 | pci_using_dac = 1; | |
5018 | err = pci_set_consistent_dma_mask(pdev, | |
5019 | DMA_64BIT_MASK); | |
5020 | if (err < 0) { | |
9b91cf9d | 5021 | dev_err(&pdev->dev, "Unable to obtain 64-bit DMA " |
1f26dac3 DM |
5022 | "for consistent allocations\n"); |
5023 | goto err_out_free_res; | |
5024 | } | |
5025 | ||
5026 | } else { | |
5027 | err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | |
5028 | if (err) { | |
9b91cf9d | 5029 | dev_err(&pdev->dev, "No usable DMA configuration, " |
1f26dac3 DM |
5030 | "aborting.\n"); |
5031 | goto err_out_free_res; | |
5032 | } | |
5033 | pci_using_dac = 0; | |
5034 | } | |
5035 | ||
1f26dac3 DM |
5036 | casreg_len = pci_resource_len(pdev, 0); |
5037 | ||
5038 | cp = netdev_priv(dev); | |
5039 | cp->pdev = pdev; | |
5040 | #if 1 | |
5041 | /* A value of 0 indicates we never explicitly set it */ | |
5042 | cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0; | |
5043 | #endif | |
5044 | cp->dev = dev; | |
6aa20a22 | 5045 | cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE : |
1f26dac3 DM |
5046 | cassini_debug; |
5047 | ||
5048 | cp->link_transition = LINK_TRANSITION_UNKNOWN; | |
5049 | cp->link_transition_jiffies_valid = 0; | |
5050 | ||
5051 | spin_lock_init(&cp->lock); | |
5052 | spin_lock_init(&cp->rx_inuse_lock); | |
5053 | spin_lock_init(&cp->rx_spare_lock); | |
5054 | for (i = 0; i < N_TX_RINGS; i++) { | |
5055 | spin_lock_init(&cp->stat_lock[i]); | |
5056 | spin_lock_init(&cp->tx_lock[i]); | |
5057 | } | |
5058 | spin_lock_init(&cp->stat_lock[N_TX_RINGS]); | |
758df69e | 5059 | mutex_init(&cp->pm_mutex); |
1f26dac3 DM |
5060 | |
5061 | init_timer(&cp->link_timer); | |
5062 | cp->link_timer.function = cas_link_timer; | |
5063 | cp->link_timer.data = (unsigned long) cp; | |
5064 | ||
5065 | #if 1 | |
5066 | /* Just in case the implementation of atomic operations | |
5067 | * change so that an explicit initialization is necessary. | |
5068 | */ | |
5069 | atomic_set(&cp->reset_task_pending, 0); | |
5070 | atomic_set(&cp->reset_task_pending_all, 0); | |
5071 | atomic_set(&cp->reset_task_pending_spare, 0); | |
5072 | atomic_set(&cp->reset_task_pending_mtu, 0); | |
5073 | #endif | |
c4028958 | 5074 | INIT_WORK(&cp->reset_task, cas_reset_task); |
1f26dac3 DM |
5075 | |
5076 | /* Default link parameters */ | |
5077 | if (link_mode >= 0 && link_mode <= 6) | |
5078 | cp->link_cntl = link_modes[link_mode]; | |
5079 | else | |
5080 | cp->link_cntl = BMCR_ANENABLE; | |
5081 | cp->lstate = link_down; | |
5082 | cp->link_transition = LINK_TRANSITION_LINK_DOWN; | |
5083 | netif_carrier_off(cp->dev); | |
5084 | cp->timer_ticks = 0; | |
5085 | ||
5086 | /* give us access to cassini registers */ | |
18e37f2a | 5087 | cp->regs = pci_iomap(pdev, 0, casreg_len); |
79ea13ce | 5088 | if (!cp->regs) { |
9b91cf9d | 5089 | dev_err(&pdev->dev, "Cannot map device registers, aborting.\n"); |
1f26dac3 DM |
5090 | goto err_out_free_res; |
5091 | } | |
5092 | cp->casreg_len = casreg_len; | |
5093 | ||
5094 | pci_save_state(pdev); | |
5095 | cas_check_pci_invariants(cp); | |
5096 | cas_hard_reset(cp); | |
5097 | cas_reset(cp, 0); | |
5098 | if (cas_check_invariants(cp)) | |
5099 | goto err_out_iounmap; | |
5100 | ||
5101 | cp->init_block = (struct cas_init_block *) | |
5102 | pci_alloc_consistent(pdev, sizeof(struct cas_init_block), | |
5103 | &cp->block_dvma); | |
5104 | if (!cp->init_block) { | |
9b91cf9d | 5105 | dev_err(&pdev->dev, "Cannot allocate init block, aborting.\n"); |
1f26dac3 DM |
5106 | goto err_out_iounmap; |
5107 | } | |
5108 | ||
6aa20a22 | 5109 | for (i = 0; i < N_TX_RINGS; i++) |
1f26dac3 DM |
5110 | cp->init_txds[i] = cp->init_block->txds[i]; |
5111 | ||
6aa20a22 | 5112 | for (i = 0; i < N_RX_DESC_RINGS; i++) |
1f26dac3 DM |
5113 | cp->init_rxds[i] = cp->init_block->rxds[i]; |
5114 | ||
6aa20a22 | 5115 | for (i = 0; i < N_RX_COMP_RINGS; i++) |
1f26dac3 DM |
5116 | cp->init_rxcs[i] = cp->init_block->rxcs[i]; |
5117 | ||
5118 | for (i = 0; i < N_RX_FLOWS; i++) | |
5119 | skb_queue_head_init(&cp->rx_flows[i]); | |
5120 | ||
5121 | dev->open = cas_open; | |
5122 | dev->stop = cas_close; | |
5123 | dev->hard_start_xmit = cas_start_xmit; | |
5124 | dev->get_stats = cas_get_stats; | |
5125 | dev->set_multicast_list = cas_set_multicast; | |
5126 | dev->do_ioctl = cas_ioctl; | |
a232f767 | 5127 | dev->ethtool_ops = &cas_ethtool_ops; |
1f26dac3 DM |
5128 | dev->tx_timeout = cas_tx_timeout; |
5129 | dev->watchdog_timeo = CAS_TX_TIMEOUT; | |
5130 | dev->change_mtu = cas_change_mtu; | |
5131 | #ifdef USE_NAPI | |
bea3348e | 5132 | netif_napi_add(dev, &cp->napi, cas_poll, 64); |
1f26dac3 DM |
5133 | #endif |
5134 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
5135 | dev->poll_controller = cas_netpoll; | |
5136 | #endif | |
5137 | dev->irq = pdev->irq; | |
5138 | dev->dma = 0; | |
5139 | ||
5140 | /* Cassini features. */ | |
5141 | if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0) | |
5142 | dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; | |
5143 | ||
5144 | if (pci_using_dac) | |
5145 | dev->features |= NETIF_F_HIGHDMA; | |
5146 | ||
5147 | if (register_netdev(dev)) { | |
9b91cf9d | 5148 | dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); |
1f26dac3 DM |
5149 | goto err_out_free_consistent; |
5150 | } | |
5151 | ||
5152 | i = readl(cp->regs + REG_BIM_CFG); | |
5153 | printk(KERN_INFO "%s: Sun Cassini%s (%sbit/%sMHz PCI/%s) " | |
0795af57 | 5154 | "Ethernet[%d] %s\n", dev->name, |
6aa20a22 | 5155 | (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "", |
1f26dac3 DM |
5156 | (i & BIM_CFG_32BIT) ? "32" : "64", |
5157 | (i & BIM_CFG_66MHZ) ? "66" : "33", | |
0795af57 JP |
5158 | (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq, |
5159 | print_mac(mac, dev->dev_addr)); | |
1f26dac3 DM |
5160 | |
5161 | pci_set_drvdata(pdev, dev); | |
5162 | cp->hw_running = 1; | |
5163 | cas_entropy_reset(cp); | |
5164 | cas_phy_init(cp); | |
5165 | cas_begin_auto_negotiation(cp, NULL); | |
5166 | return 0; | |
5167 | ||
5168 | err_out_free_consistent: | |
5169 | pci_free_consistent(pdev, sizeof(struct cas_init_block), | |
5170 | cp->init_block, cp->block_dvma); | |
5171 | ||
5172 | err_out_iounmap: | |
758df69e | 5173 | mutex_lock(&cp->pm_mutex); |
1f26dac3 DM |
5174 | if (cp->hw_running) |
5175 | cas_shutdown(cp); | |
758df69e | 5176 | mutex_unlock(&cp->pm_mutex); |
1f26dac3 | 5177 | |
18e37f2a | 5178 | pci_iounmap(pdev, cp->regs); |
1f26dac3 DM |
5179 | |
5180 | ||
5181 | err_out_free_res: | |
5182 | pci_release_regions(pdev); | |
5183 | ||
5184 | err_write_cacheline: | |
5185 | /* Try to restore it in case the error occured after we | |
6aa20a22 | 5186 | * set it. |
1f26dac3 DM |
5187 | */ |
5188 | pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size); | |
5189 | ||
5190 | err_out_free_netdev: | |
5191 | free_netdev(dev); | |
5192 | ||
5193 | err_out_disable_pdev: | |
5194 | pci_disable_device(pdev); | |
5195 | pci_set_drvdata(pdev, NULL); | |
5196 | return -ENODEV; | |
5197 | } | |
5198 | ||
5199 | static void __devexit cas_remove_one(struct pci_dev *pdev) | |
5200 | { | |
5201 | struct net_device *dev = pci_get_drvdata(pdev); | |
5202 | struct cas *cp; | |
5203 | if (!dev) | |
5204 | return; | |
5205 | ||
5206 | cp = netdev_priv(dev); | |
5207 | unregister_netdev(dev); | |
5208 | ||
758df69e | 5209 | mutex_lock(&cp->pm_mutex); |
1f26dac3 DM |
5210 | flush_scheduled_work(); |
5211 | if (cp->hw_running) | |
5212 | cas_shutdown(cp); | |
758df69e | 5213 | mutex_unlock(&cp->pm_mutex); |
1f26dac3 DM |
5214 | |
5215 | #if 1 | |
5216 | if (cp->orig_cacheline_size) { | |
5217 | /* Restore the cache line size if we had modified | |
5218 | * it. | |
5219 | */ | |
6aa20a22 | 5220 | pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, |
1f26dac3 DM |
5221 | cp->orig_cacheline_size); |
5222 | } | |
5223 | #endif | |
5224 | pci_free_consistent(pdev, sizeof(struct cas_init_block), | |
5225 | cp->init_block, cp->block_dvma); | |
18e37f2a | 5226 | pci_iounmap(pdev, cp->regs); |
1f26dac3 DM |
5227 | free_netdev(dev); |
5228 | pci_release_regions(pdev); | |
5229 | pci_disable_device(pdev); | |
5230 | pci_set_drvdata(pdev, NULL); | |
5231 | } | |
5232 | ||
5233 | #ifdef CONFIG_PM | |
46d7031e | 5234 | static int cas_suspend(struct pci_dev *pdev, pm_message_t state) |
1f26dac3 DM |
5235 | { |
5236 | struct net_device *dev = pci_get_drvdata(pdev); | |
5237 | struct cas *cp = netdev_priv(dev); | |
5238 | unsigned long flags; | |
5239 | ||
758df69e | 5240 | mutex_lock(&cp->pm_mutex); |
6aa20a22 | 5241 | |
1f26dac3 DM |
5242 | /* If the driver is opened, we stop the DMA */ |
5243 | if (cp->opened) { | |
5244 | netif_device_detach(dev); | |
5245 | ||
5246 | cas_lock_all_save(cp, flags); | |
5247 | ||
5248 | /* We can set the second arg of cas_reset to 0 | |
5249 | * because on resume, we'll call cas_init_hw with | |
5250 | * its second arg set so that autonegotiation is | |
5251 | * restarted. | |
5252 | */ | |
5253 | cas_reset(cp, 0); | |
5254 | cas_clean_rings(cp); | |
5255 | cas_unlock_all_restore(cp, flags); | |
5256 | } | |
5257 | ||
5258 | if (cp->hw_running) | |
5259 | cas_shutdown(cp); | |
758df69e | 5260 | mutex_unlock(&cp->pm_mutex); |
1f26dac3 DM |
5261 | |
5262 | return 0; | |
5263 | } | |
5264 | ||
5265 | static int cas_resume(struct pci_dev *pdev) | |
5266 | { | |
5267 | struct net_device *dev = pci_get_drvdata(pdev); | |
5268 | struct cas *cp = netdev_priv(dev); | |
5269 | ||
5270 | printk(KERN_INFO "%s: resuming\n", dev->name); | |
5271 | ||
758df69e | 5272 | mutex_lock(&cp->pm_mutex); |
1f26dac3 DM |
5273 | cas_hard_reset(cp); |
5274 | if (cp->opened) { | |
5275 | unsigned long flags; | |
5276 | cas_lock_all_save(cp, flags); | |
5277 | cas_reset(cp, 0); | |
5278 | cp->hw_running = 1; | |
5279 | cas_clean_rings(cp); | |
5280 | cas_init_hw(cp, 1); | |
5281 | cas_unlock_all_restore(cp, flags); | |
5282 | ||
5283 | netif_device_attach(dev); | |
5284 | } | |
758df69e | 5285 | mutex_unlock(&cp->pm_mutex); |
1f26dac3 DM |
5286 | return 0; |
5287 | } | |
5288 | #endif /* CONFIG_PM */ | |
5289 | ||
5290 | static struct pci_driver cas_driver = { | |
5291 | .name = DRV_MODULE_NAME, | |
5292 | .id_table = cas_pci_tbl, | |
5293 | .probe = cas_init_one, | |
5294 | .remove = __devexit_p(cas_remove_one), | |
5295 | #ifdef CONFIG_PM | |
5296 | .suspend = cas_suspend, | |
5297 | .resume = cas_resume | |
5298 | #endif | |
5299 | }; | |
5300 | ||
5301 | static int __init cas_init(void) | |
5302 | { | |
5303 | if (linkdown_timeout > 0) | |
5304 | link_transition_timeout = linkdown_timeout * HZ; | |
5305 | else | |
5306 | link_transition_timeout = 0; | |
5307 | ||
29917620 | 5308 | return pci_register_driver(&cas_driver); |
1f26dac3 DM |
5309 | } |
5310 | ||
5311 | static void __exit cas_cleanup(void) | |
5312 | { | |
5313 | pci_unregister_driver(&cas_driver); | |
5314 | } | |
5315 | ||
5316 | module_init(cas_init); | |
5317 | module_exit(cas_cleanup); |