Commit | Line | Data |
---|---|---|
5efa1d1c | 1 | /* |
1da177e4 LT |
2 | * e100net.c: A network driver for the ETRAX 100LX network controller. |
3 | * | |
4 | * Copyright (c) 1998-2002 Axis Communications AB. | |
5 | * | |
6 | * The outline of this driver comes from skeleton.c. | |
7 | * | |
1da177e4 LT |
8 | */ |
9 | ||
1da177e4 LT |
10 | |
11 | #include <linux/module.h> | |
12 | ||
13 | #include <linux/kernel.h> | |
1da177e4 LT |
14 | #include <linux/delay.h> |
15 | #include <linux/types.h> | |
16 | #include <linux/fcntl.h> | |
17 | #include <linux/interrupt.h> | |
18 | #include <linux/ptrace.h> | |
19 | #include <linux/ioport.h> | |
20 | #include <linux/in.h> | |
1da177e4 LT |
21 | #include <linux/string.h> |
22 | #include <linux/spinlock.h> | |
23 | #include <linux/errno.h> | |
24 | #include <linux/init.h> | |
1977f032 | 25 | #include <linux/bitops.h> |
1da177e4 LT |
26 | |
27 | #include <linux/if.h> | |
28 | #include <linux/mii.h> | |
29 | #include <linux/netdevice.h> | |
30 | #include <linux/etherdevice.h> | |
31 | #include <linux/skbuff.h> | |
32 | #include <linux/ethtool.h> | |
33 | ||
556dcee7 | 34 | #include <arch/svinto.h>/* DMA and register descriptions */ |
5efa1d1c | 35 | #include <asm/io.h> /* CRIS_LED_* I/O functions */ |
1da177e4 LT |
36 | #include <asm/irq.h> |
37 | #include <asm/dma.h> | |
38 | #include <asm/system.h> | |
1da177e4 LT |
39 | #include <asm/ethernet.h> |
40 | #include <asm/cache.h> | |
556dcee7 | 41 | #include <arch/io_interface_mux.h> |
1da177e4 LT |
42 | |
43 | //#define ETHDEBUG | |
44 | #define D(x) | |
45 | ||
46 | /* | |
47 | * The name of the card. Is used for messages and in the requests for | |
48 | * io regions, irqs and dma channels | |
49 | */ | |
50 | ||
51 | static const char* cardname = "ETRAX 100LX built-in ethernet controller"; | |
52 | ||
53 | /* A default ethernet address. Highlevel SW will set the real one later */ | |
54 | ||
55 | static struct sockaddr default_mac = { | |
56 | 0, | |
57 | { 0x00, 0x40, 0x8C, 0xCD, 0x00, 0x00 } | |
58 | }; | |
59 | ||
60 | /* Information that need to be kept for each board. */ | |
61 | struct net_local { | |
62 | struct net_device_stats stats; | |
63 | struct mii_if_info mii_if; | |
64 | ||
65 | /* Tx control lock. This protects the transmit buffer ring | |
66 | * state along with the "tx full" state of the driver. This | |
67 | * means all netif_queue flow control actions are protected | |
68 | * by this lock as well. | |
69 | */ | |
70 | spinlock_t lock; | |
bafef0ae JN |
71 | |
72 | spinlock_t led_lock; /* Protect LED state */ | |
73 | spinlock_t transceiver_lock; /* Protect transceiver state. */ | |
1da177e4 LT |
74 | }; |
75 | ||
76 | typedef struct etrax_eth_descr | |
77 | { | |
78 | etrax_dma_descr descr; | |
79 | struct sk_buff* skb; | |
80 | } etrax_eth_descr; | |
81 | ||
82 | /* Some transceivers requires special handling */ | |
83 | struct transceiver_ops | |
84 | { | |
85 | unsigned int oui; | |
86 | void (*check_speed)(struct net_device* dev); | |
87 | void (*check_duplex)(struct net_device* dev); | |
88 | }; | |
89 | ||
1da177e4 LT |
90 | /* Duplex settings */ |
91 | enum duplex | |
92 | { | |
93 | half, | |
94 | full, | |
95 | autoneg | |
96 | }; | |
97 | ||
98 | /* Dma descriptors etc. */ | |
99 | ||
bafef0ae | 100 | #define MAX_MEDIA_DATA_SIZE 1522 |
1da177e4 LT |
101 | |
102 | #define MIN_PACKET_LEN 46 | |
103 | #define ETHER_HEAD_LEN 14 | |
104 | ||
105 | /* | |
106 | ** MDIO constants. | |
107 | */ | |
108 | #define MDIO_START 0x1 | |
109 | #define MDIO_READ 0x2 | |
110 | #define MDIO_WRITE 0x1 | |
111 | #define MDIO_PREAMBLE 0xfffffffful | |
112 | ||
113 | /* Broadcom specific */ | |
114 | #define MDIO_AUX_CTRL_STATUS_REG 0x18 | |
115 | #define MDIO_BC_FULL_DUPLEX_IND 0x1 | |
116 | #define MDIO_BC_SPEED 0x2 | |
117 | ||
118 | /* TDK specific */ | |
119 | #define MDIO_TDK_DIAGNOSTIC_REG 18 | |
120 | #define MDIO_TDK_DIAGNOSTIC_RATE 0x400 | |
121 | #define MDIO_TDK_DIAGNOSTIC_DPLX 0x800 | |
122 | ||
123 | /*Intel LXT972A specific*/ | |
124 | #define MDIO_INT_STATUS_REG_2 0x0011 | |
bafef0ae JN |
125 | #define MDIO_INT_FULL_DUPLEX_IND (1 << 9) |
126 | #define MDIO_INT_SPEED (1 << 14) | |
1da177e4 LT |
127 | |
128 | /* Network flash constants */ | |
129 | #define NET_FLASH_TIME (HZ/50) /* 20 ms */ | |
130 | #define NET_FLASH_PAUSE (HZ/100) /* 10 ms */ | |
131 | #define NET_LINK_UP_CHECK_INTERVAL (2*HZ) /* 2 s */ | |
132 | #define NET_DUPLEX_CHECK_INTERVAL (2*HZ) /* 2 s */ | |
133 | ||
134 | #define NO_NETWORK_ACTIVITY 0 | |
135 | #define NETWORK_ACTIVITY 1 | |
136 | ||
bafef0ae JN |
137 | #define NBR_OF_RX_DESC 32 |
138 | #define NBR_OF_TX_DESC 16 | |
1da177e4 LT |
139 | |
140 | /* Large packets are sent directly to upper layers while small packets are */ | |
141 | /* copied (to reduce memory waste). The following constant decides the breakpoint */ | |
142 | #define RX_COPYBREAK 256 | |
143 | ||
144 | /* Due to a chip bug we need to flush the cache when descriptors are returned */ | |
145 | /* to the DMA. To decrease performance impact we return descriptors in chunks. */ | |
146 | /* The following constant determines the number of descriptors to return. */ | |
147 | #define RX_QUEUE_THRESHOLD NBR_OF_RX_DESC/2 | |
148 | ||
149 | #define GET_BIT(bit,val) (((val) >> (bit)) & 0x01) | |
150 | ||
151 | /* Define some macros to access ETRAX 100 registers */ | |
152 | #define SETF(var, reg, field, val) var = (var & ~IO_MASK_(reg##_, field##_)) | \ | |
153 | IO_FIELD_(reg##_, field##_, val) | |
154 | #define SETS(var, reg, field, val) var = (var & ~IO_MASK_(reg##_, field##_)) | \ | |
155 | IO_STATE_(reg##_, field##_, _##val) | |
156 | ||
157 | static etrax_eth_descr *myNextRxDesc; /* Points to the next descriptor to | |
158 | to be processed */ | |
159 | static etrax_eth_descr *myLastRxDesc; /* The last processed descriptor */ | |
1da177e4 LT |
160 | |
161 | static etrax_eth_descr RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned(32))); | |
162 | ||
163 | static etrax_eth_descr* myFirstTxDesc; /* First packet not yet sent */ | |
164 | static etrax_eth_descr* myLastTxDesc; /* End of send queue */ | |
165 | static etrax_eth_descr* myNextTxDesc; /* Next descriptor to use */ | |
166 | static etrax_eth_descr TxDescList[NBR_OF_TX_DESC] __attribute__ ((aligned(32))); | |
167 | ||
168 | static unsigned int network_rec_config_shadow = 0; | |
1da177e4 LT |
169 | |
170 | static unsigned int network_tr_ctrl_shadow = 0; | |
171 | ||
172 | /* Network speed indication. */ | |
8d06afab IM |
173 | static DEFINE_TIMER(speed_timer, NULL, 0, 0); |
174 | static DEFINE_TIMER(clear_led_timer, NULL, 0, 0); | |
1da177e4 LT |
175 | static int current_speed; /* Speed read from transceiver */ |
176 | static int current_speed_selection; /* Speed selected by user */ | |
177 | static unsigned long led_next_time; | |
178 | static int led_active; | |
179 | static int rx_queue_len; | |
180 | ||
181 | /* Duplex */ | |
8d06afab | 182 | static DEFINE_TIMER(duplex_timer, NULL, 0, 0); |
1da177e4 LT |
183 | static int full_duplex; |
184 | static enum duplex current_duplex; | |
185 | ||
186 | /* Index to functions, as function prototypes. */ | |
187 | ||
188 | static int etrax_ethernet_init(void); | |
189 | ||
190 | static int e100_open(struct net_device *dev); | |
191 | static int e100_set_mac_address(struct net_device *dev, void *addr); | |
192 | static int e100_send_packet(struct sk_buff *skb, struct net_device *dev); | |
7d12e780 DH |
193 | static irqreturn_t e100rxtx_interrupt(int irq, void *dev_id); |
194 | static irqreturn_t e100nw_interrupt(int irq, void *dev_id); | |
1da177e4 LT |
195 | static void e100_rx(struct net_device *dev); |
196 | static int e100_close(struct net_device *dev); | |
197 | static int e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); | |
1da177e4 LT |
198 | static int e100_set_config(struct net_device* dev, struct ifmap* map); |
199 | static void e100_tx_timeout(struct net_device *dev); | |
200 | static struct net_device_stats *e100_get_stats(struct net_device *dev); | |
201 | static void set_multicast_list(struct net_device *dev); | |
bafef0ae | 202 | static void e100_hardware_send_packet(struct net_local* np, char *buf, int length); |
1da177e4 LT |
203 | static void update_rx_stats(struct net_device_stats *); |
204 | static void update_tx_stats(struct net_device_stats *); | |
205 | static int e100_probe_transceiver(struct net_device* dev); | |
206 | ||
207 | static void e100_check_speed(unsigned long priv); | |
208 | static void e100_set_speed(struct net_device* dev, unsigned long speed); | |
209 | static void e100_check_duplex(unsigned long priv); | |
210 | static void e100_set_duplex(struct net_device* dev, enum duplex); | |
211 | static void e100_negotiate(struct net_device* dev); | |
212 | ||
213 | static int e100_get_mdio_reg(struct net_device *dev, int phy_id, int location); | |
214 | static void e100_set_mdio_reg(struct net_device *dev, int phy_id, int location, int value); | |
215 | ||
216 | static void e100_send_mdio_cmd(unsigned short cmd, int write_cmd); | |
217 | static void e100_send_mdio_bit(unsigned char bit); | |
218 | static unsigned char e100_receive_mdio_bit(void); | |
219 | static void e100_reset_transceiver(struct net_device* net); | |
220 | ||
221 | static void e100_clear_network_leds(unsigned long dummy); | |
222 | static void e100_set_network_leds(int active); | |
223 | ||
7282d491 | 224 | static const struct ethtool_ops e100_ethtool_ops; |
bafef0ae JN |
225 | #if defined(CONFIG_ETRAX_NO_PHY) |
226 | static void dummy_check_speed(struct net_device* dev); | |
227 | static void dummy_check_duplex(struct net_device* dev); | |
228 | #else | |
1da177e4 LT |
229 | static void broadcom_check_speed(struct net_device* dev); |
230 | static void broadcom_check_duplex(struct net_device* dev); | |
231 | static void tdk_check_speed(struct net_device* dev); | |
232 | static void tdk_check_duplex(struct net_device* dev); | |
233 | static void intel_check_speed(struct net_device* dev); | |
234 | static void intel_check_duplex(struct net_device* dev); | |
235 | static void generic_check_speed(struct net_device* dev); | |
236 | static void generic_check_duplex(struct net_device* dev); | |
bafef0ae JN |
237 | #endif |
238 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
239 | static void e100_netpoll(struct net_device* dev); | |
240 | #endif | |
241 | ||
242 | static int autoneg_normal = 1; | |
1da177e4 LT |
243 | |
244 | struct transceiver_ops transceivers[] = | |
245 | { | |
bafef0ae JN |
246 | #if defined(CONFIG_ETRAX_NO_PHY) |
247 | {0x0000, dummy_check_speed, dummy_check_duplex} /* Dummy */ | |
248 | #else | |
1da177e4 LT |
249 | {0x1018, broadcom_check_speed, broadcom_check_duplex}, /* Broadcom */ |
250 | {0xC039, tdk_check_speed, tdk_check_duplex}, /* TDK 2120 */ | |
251 | {0x039C, tdk_check_speed, tdk_check_duplex}, /* TDK 2120C */ | |
252 | {0x04de, intel_check_speed, intel_check_duplex}, /* Intel LXT972A*/ | |
253 | {0x0000, generic_check_speed, generic_check_duplex} /* Generic, must be last */ | |
bafef0ae | 254 | #endif |
1da177e4 LT |
255 | }; |
256 | ||
bafef0ae JN |
257 | struct transceiver_ops* transceiver = &transceivers[0]; |
258 | ||
a95c2a3b AB |
259 | static const struct net_device_ops e100_netdev_ops = { |
260 | .ndo_open = e100_open, | |
261 | .ndo_stop = e100_close, | |
262 | .ndo_start_xmit = e100_send_packet, | |
263 | .ndo_tx_timeout = e100_tx_timeout, | |
264 | .ndo_get_stats = e100_get_stats, | |
265 | .ndo_set_multicast_list = set_multicast_list, | |
266 | .ndo_do_ioctl = e100_ioctl, | |
267 | .ndo_set_mac_address = e100_set_mac_address, | |
268 | .ndo_validate_addr = eth_validate_addr, | |
269 | .ndo_change_mtu = eth_change_mtu, | |
270 | .ndo_set_config = e100_set_config, | |
271 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
272 | .ndo_poll_controller = e100_netpoll, | |
273 | #endif | |
274 | }; | |
275 | ||
1da177e4 LT |
276 | #define tx_done(dev) (*R_DMA_CH0_CMD == 0) |
277 | ||
278 | /* | |
279 | * Check for a network adaptor of this type, and return '0' if one exists. | |
280 | * If dev->base_addr == 0, probe all likely locations. | |
281 | * If dev->base_addr == 1, always return failure. | |
282 | * If dev->base_addr == 2, allocate space for the device and return success | |
283 | * (detachable devices only). | |
284 | */ | |
285 | ||
286 | static int __init | |
287 | etrax_ethernet_init(void) | |
288 | { | |
289 | struct net_device *dev; | |
290 | struct net_local* np; | |
291 | int i, err; | |
292 | ||
293 | printk(KERN_INFO | |
bafef0ae | 294 | "ETRAX 100LX 10/100MBit ethernet v2.0 (c) 1998-2007 Axis Communications AB\n"); |
1da177e4 | 295 | |
bafef0ae JN |
296 | if (cris_request_io_interface(if_eth, cardname)) { |
297 | printk(KERN_CRIT "etrax_ethernet_init failed to get IO interface\n"); | |
298 | return -EBUSY; | |
299 | } | |
1da177e4 | 300 | |
bafef0ae | 301 | dev = alloc_etherdev(sizeof(struct net_local)); |
1da177e4 LT |
302 | if (!dev) |
303 | return -ENOMEM; | |
304 | ||
bafef0ae JN |
305 | np = netdev_priv(dev); |
306 | ||
307 | /* we do our own locking */ | |
308 | dev->features |= NETIF_F_LLTX; | |
309 | ||
1da177e4 LT |
310 | dev->base_addr = (unsigned int)R_NETWORK_SA_0; /* just to have something to show */ |
311 | ||
312 | /* now setup our etrax specific stuff */ | |
313 | ||
314 | dev->irq = NETWORK_DMA_RX_IRQ_NBR; /* we really use DMATX as well... */ | |
315 | dev->dma = NETWORK_RX_DMA_NBR; | |
316 | ||
317 | /* fill in our handlers so the network layer can talk to us in the future */ | |
318 | ||
76f2b4d9 | 319 | dev->ethtool_ops = &e100_ethtool_ops; |
a95c2a3b | 320 | dev->netdev_ops = &e100_netdev_ops; |
bafef0ae JN |
321 | |
322 | spin_lock_init(&np->lock); | |
323 | spin_lock_init(&np->led_lock); | |
324 | spin_lock_init(&np->transceiver_lock); | |
1da177e4 LT |
325 | |
326 | /* Initialise the list of Etrax DMA-descriptors */ | |
327 | ||
328 | /* Initialise receive descriptors */ | |
329 | ||
330 | for (i = 0; i < NBR_OF_RX_DESC; i++) { | |
bafef0ae JN |
331 | /* Allocate two extra cachelines to make sure that buffer used |
332 | * by DMA does not share cacheline with any other data (to | |
333 | * avoid cache bug) | |
1da177e4 LT |
334 | */ |
335 | RxDescList[i].skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES); | |
92b1f905 DR |
336 | if (!RxDescList[i].skb) |
337 | return -ENOMEM; | |
1da177e4 LT |
338 | RxDescList[i].descr.ctrl = 0; |
339 | RxDescList[i].descr.sw_len = MAX_MEDIA_DATA_SIZE; | |
340 | RxDescList[i].descr.next = virt_to_phys(&RxDescList[i + 1]); | |
341 | RxDescList[i].descr.buf = L1_CACHE_ALIGN(virt_to_phys(RxDescList[i].skb->data)); | |
342 | RxDescList[i].descr.status = 0; | |
343 | RxDescList[i].descr.hw_len = 0; | |
344 | prepare_rx_descriptor(&RxDescList[i].descr); | |
345 | } | |
346 | ||
347 | RxDescList[NBR_OF_RX_DESC - 1].descr.ctrl = d_eol; | |
348 | RxDescList[NBR_OF_RX_DESC - 1].descr.next = virt_to_phys(&RxDescList[0]); | |
349 | rx_queue_len = 0; | |
350 | ||
351 | /* Initialize transmit descriptors */ | |
352 | for (i = 0; i < NBR_OF_TX_DESC; i++) { | |
353 | TxDescList[i].descr.ctrl = 0; | |
354 | TxDescList[i].descr.sw_len = 0; | |
355 | TxDescList[i].descr.next = virt_to_phys(&TxDescList[i + 1].descr); | |
356 | TxDescList[i].descr.buf = 0; | |
357 | TxDescList[i].descr.status = 0; | |
358 | TxDescList[i].descr.hw_len = 0; | |
359 | TxDescList[i].skb = 0; | |
360 | } | |
361 | ||
362 | TxDescList[NBR_OF_TX_DESC - 1].descr.ctrl = d_eol; | |
363 | TxDescList[NBR_OF_TX_DESC - 1].descr.next = virt_to_phys(&TxDescList[0].descr); | |
364 | ||
365 | /* Initialise initial pointers */ | |
366 | ||
367 | myNextRxDesc = &RxDescList[0]; | |
368 | myLastRxDesc = &RxDescList[NBR_OF_RX_DESC - 1]; | |
1da177e4 LT |
369 | myFirstTxDesc = &TxDescList[0]; |
370 | myNextTxDesc = &TxDescList[0]; | |
371 | myLastTxDesc = &TxDescList[NBR_OF_TX_DESC - 1]; | |
372 | ||
373 | /* Register device */ | |
374 | err = register_netdev(dev); | |
375 | if (err) { | |
376 | free_netdev(dev); | |
377 | return err; | |
378 | } | |
379 | ||
380 | /* set the default MAC address */ | |
381 | ||
382 | e100_set_mac_address(dev, &default_mac); | |
383 | ||
384 | /* Initialize speed indicator stuff. */ | |
385 | ||
386 | current_speed = 10; | |
387 | current_speed_selection = 0; /* Auto */ | |
388 | speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL; | |
bafef0ae | 389 | speed_timer.data = (unsigned long)dev; |
1da177e4 LT |
390 | speed_timer.function = e100_check_speed; |
391 | ||
392 | clear_led_timer.function = e100_clear_network_leds; | |
bafef0ae | 393 | clear_led_timer.data = (unsigned long)dev; |
1da177e4 LT |
394 | |
395 | full_duplex = 0; | |
396 | current_duplex = autoneg; | |
397 | duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL; | |
398 | duplex_timer.data = (unsigned long)dev; | |
399 | duplex_timer.function = e100_check_duplex; | |
400 | ||
401 | /* Initialize mii interface */ | |
1da177e4 LT |
402 | np->mii_if.phy_id_mask = 0x1f; |
403 | np->mii_if.reg_num_mask = 0x1f; | |
404 | np->mii_if.dev = dev; | |
405 | np->mii_if.mdio_read = e100_get_mdio_reg; | |
406 | np->mii_if.mdio_write = e100_set_mdio_reg; | |
407 | ||
408 | /* Initialize group address registers to make sure that no */ | |
409 | /* unwanted addresses are matched */ | |
410 | *R_NETWORK_GA_0 = 0x00000000; | |
411 | *R_NETWORK_GA_1 = 0x00000000; | |
bafef0ae JN |
412 | |
413 | /* Initialize next time the led can flash */ | |
414 | led_next_time = jiffies; | |
1da177e4 LT |
415 | return 0; |
416 | } | |
417 | ||
418 | /* set MAC address of the interface. called from the core after a | |
419 | * SIOCSIFADDR ioctl, and from the bootup above. | |
420 | */ | |
421 | ||
422 | static int | |
423 | e100_set_mac_address(struct net_device *dev, void *p) | |
424 | { | |
bafef0ae | 425 | struct net_local *np = netdev_priv(dev); |
1da177e4 | 426 | struct sockaddr *addr = p; |
1da177e4 LT |
427 | |
428 | spin_lock(&np->lock); /* preemption protection */ | |
429 | ||
430 | /* remember it */ | |
431 | ||
432 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | |
433 | ||
434 | /* Write it to the hardware. | |
435 | * Note the way the address is wrapped: | |
436 | * *R_NETWORK_SA_0 = a0_0 | (a0_1 << 8) | (a0_2 << 16) | (a0_3 << 24); | |
437 | * *R_NETWORK_SA_1 = a0_4 | (a0_5 << 8); | |
438 | */ | |
439 | ||
440 | *R_NETWORK_SA_0 = dev->dev_addr[0] | (dev->dev_addr[1] << 8) | | |
441 | (dev->dev_addr[2] << 16) | (dev->dev_addr[3] << 24); | |
442 | *R_NETWORK_SA_1 = dev->dev_addr[4] | (dev->dev_addr[5] << 8); | |
443 | *R_NETWORK_SA_2 = 0; | |
444 | ||
445 | /* show it in the log as well */ | |
446 | ||
e174961c | 447 | printk(KERN_INFO "%s: changed MAC to %pM\n", dev->name, dev->dev_addr); |
1da177e4 LT |
448 | |
449 | spin_unlock(&np->lock); | |
450 | ||
451 | return 0; | |
452 | } | |
453 | ||
454 | /* | |
455 | * Open/initialize the board. This is called (in the current kernel) | |
456 | * sometime after booting when the 'ifconfig' program is run. | |
457 | * | |
458 | * This routine should set everything up anew at each open, even | |
459 | * registers that "should" only need to be set once at boot, so that | |
460 | * there is non-reboot way to recover if something goes wrong. | |
461 | */ | |
462 | ||
463 | static int | |
464 | e100_open(struct net_device *dev) | |
465 | { | |
466 | unsigned long flags; | |
467 | ||
468 | /* enable the MDIO output pin */ | |
469 | ||
470 | *R_NETWORK_MGM_CTRL = IO_STATE(R_NETWORK_MGM_CTRL, mdoe, enable); | |
471 | ||
472 | *R_IRQ_MASK0_CLR = | |
473 | IO_STATE(R_IRQ_MASK0_CLR, overrun, clr) | | |
474 | IO_STATE(R_IRQ_MASK0_CLR, underrun, clr) | | |
475 | IO_STATE(R_IRQ_MASK0_CLR, excessive_col, clr); | |
476 | ||
477 | /* clear dma0 and 1 eop and descr irq masks */ | |
478 | *R_IRQ_MASK2_CLR = | |
479 | IO_STATE(R_IRQ_MASK2_CLR, dma0_descr, clr) | | |
480 | IO_STATE(R_IRQ_MASK2_CLR, dma0_eop, clr) | | |
481 | IO_STATE(R_IRQ_MASK2_CLR, dma1_descr, clr) | | |
482 | IO_STATE(R_IRQ_MASK2_CLR, dma1_eop, clr); | |
483 | ||
484 | /* Reset and wait for the DMA channels */ | |
485 | ||
486 | RESET_DMA(NETWORK_TX_DMA_NBR); | |
487 | RESET_DMA(NETWORK_RX_DMA_NBR); | |
488 | WAIT_DMA(NETWORK_TX_DMA_NBR); | |
489 | WAIT_DMA(NETWORK_RX_DMA_NBR); | |
490 | ||
491 | /* Initialise the etrax network controller */ | |
492 | ||
493 | /* allocate the irq corresponding to the receiving DMA */ | |
494 | ||
495 | if (request_irq(NETWORK_DMA_RX_IRQ_NBR, e100rxtx_interrupt, | |
1fb9df5d | 496 | IRQF_SAMPLE_RANDOM, cardname, (void *)dev)) { |
1da177e4 LT |
497 | goto grace_exit0; |
498 | } | |
499 | ||
500 | /* allocate the irq corresponding to the transmitting DMA */ | |
501 | ||
502 | if (request_irq(NETWORK_DMA_TX_IRQ_NBR, e100rxtx_interrupt, 0, | |
503 | cardname, (void *)dev)) { | |
504 | goto grace_exit1; | |
505 | } | |
506 | ||
507 | /* allocate the irq corresponding to the network errors etc */ | |
508 | ||
509 | if (request_irq(NETWORK_STATUS_IRQ_NBR, e100nw_interrupt, 0, | |
510 | cardname, (void *)dev)) { | |
511 | goto grace_exit2; | |
512 | } | |
513 | ||
bafef0ae JN |
514 | /* |
515 | * Always allocate the DMA channels after the IRQ, | |
516 | * and clean up on failure. | |
517 | */ | |
518 | ||
519 | if (cris_request_dma(NETWORK_TX_DMA_NBR, | |
520 | cardname, | |
521 | DMA_VERBOSE_ON_ERROR, | |
522 | dma_eth)) { | |
523 | goto grace_exit3; | |
524 | } | |
525 | ||
526 | if (cris_request_dma(NETWORK_RX_DMA_NBR, | |
527 | cardname, | |
528 | DMA_VERBOSE_ON_ERROR, | |
529 | dma_eth)) { | |
530 | goto grace_exit4; | |
531 | } | |
532 | ||
1da177e4 LT |
533 | /* give the HW an idea of what MAC address we want */ |
534 | ||
535 | *R_NETWORK_SA_0 = dev->dev_addr[0] | (dev->dev_addr[1] << 8) | | |
536 | (dev->dev_addr[2] << 16) | (dev->dev_addr[3] << 24); | |
537 | *R_NETWORK_SA_1 = dev->dev_addr[4] | (dev->dev_addr[5] << 8); | |
538 | *R_NETWORK_SA_2 = 0; | |
539 | ||
540 | #if 0 | |
541 | /* use promiscuous mode for testing */ | |
542 | *R_NETWORK_GA_0 = 0xffffffff; | |
543 | *R_NETWORK_GA_1 = 0xffffffff; | |
544 | ||
545 | *R_NETWORK_REC_CONFIG = 0xd; /* broadcast rec, individ. rec, ma0 enabled */ | |
546 | #else | |
bafef0ae | 547 | SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, max_size, size1522); |
1da177e4 LT |
548 | SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, broadcast, receive); |
549 | SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, ma0, enable); | |
550 | SETF(network_rec_config_shadow, R_NETWORK_REC_CONFIG, duplex, full_duplex); | |
551 | *R_NETWORK_REC_CONFIG = network_rec_config_shadow; | |
552 | #endif | |
553 | ||
554 | *R_NETWORK_GEN_CONFIG = | |
555 | IO_STATE(R_NETWORK_GEN_CONFIG, phy, mii_clk) | | |
556 | IO_STATE(R_NETWORK_GEN_CONFIG, enable, on); | |
557 | ||
558 | SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr); | |
559 | SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, delay, none); | |
560 | SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, cancel, dont); | |
561 | SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, cd, enable); | |
562 | SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, retry, enable); | |
563 | SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, pad, enable); | |
564 | SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, crc, enable); | |
565 | *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow; | |
566 | ||
bafef0ae | 567 | local_irq_save(flags); |
1da177e4 LT |
568 | |
569 | /* enable the irq's for ethernet DMA */ | |
570 | ||
571 | *R_IRQ_MASK2_SET = | |
572 | IO_STATE(R_IRQ_MASK2_SET, dma0_eop, set) | | |
573 | IO_STATE(R_IRQ_MASK2_SET, dma1_eop, set); | |
574 | ||
575 | *R_IRQ_MASK0_SET = | |
576 | IO_STATE(R_IRQ_MASK0_SET, overrun, set) | | |
577 | IO_STATE(R_IRQ_MASK0_SET, underrun, set) | | |
578 | IO_STATE(R_IRQ_MASK0_SET, excessive_col, set); | |
579 | ||
580 | /* make sure the irqs are cleared */ | |
581 | ||
582 | *R_DMA_CH0_CLR_INTR = IO_STATE(R_DMA_CH0_CLR_INTR, clr_eop, do); | |
583 | *R_DMA_CH1_CLR_INTR = IO_STATE(R_DMA_CH1_CLR_INTR, clr_eop, do); | |
584 | ||
585 | /* make sure the rec and transmit error counters are cleared */ | |
586 | ||
587 | (void)*R_REC_COUNTERS; /* dummy read */ | |
588 | (void)*R_TR_COUNTERS; /* dummy read */ | |
589 | ||
590 | /* start the receiving DMA channel so we can receive packets from now on */ | |
591 | ||
592 | *R_DMA_CH1_FIRST = virt_to_phys(myNextRxDesc); | |
593 | *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, start); | |
594 | ||
595 | /* Set up transmit DMA channel so it can be restarted later */ | |
596 | ||
597 | *R_DMA_CH0_FIRST = 0; | |
598 | *R_DMA_CH0_DESCR = virt_to_phys(myLastTxDesc); | |
bafef0ae | 599 | netif_start_queue(dev); |
1da177e4 | 600 | |
bafef0ae | 601 | local_irq_restore(flags); |
1da177e4 LT |
602 | |
603 | /* Probe for transceiver */ | |
604 | if (e100_probe_transceiver(dev)) | |
bafef0ae | 605 | goto grace_exit5; |
1da177e4 LT |
606 | |
607 | /* Start duplex/speed timers */ | |
608 | add_timer(&speed_timer); | |
609 | add_timer(&duplex_timer); | |
610 | ||
611 | /* We are now ready to accept transmit requeusts from | |
612 | * the queueing layer of the networking. | |
613 | */ | |
bafef0ae | 614 | netif_carrier_on(dev); |
1da177e4 LT |
615 | |
616 | return 0; | |
617 | ||
bafef0ae JN |
618 | grace_exit5: |
619 | cris_free_dma(NETWORK_RX_DMA_NBR, cardname); | |
620 | grace_exit4: | |
621 | cris_free_dma(NETWORK_TX_DMA_NBR, cardname); | |
1da177e4 LT |
622 | grace_exit3: |
623 | free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev); | |
624 | grace_exit2: | |
625 | free_irq(NETWORK_DMA_TX_IRQ_NBR, (void *)dev); | |
626 | grace_exit1: | |
627 | free_irq(NETWORK_DMA_RX_IRQ_NBR, (void *)dev); | |
628 | grace_exit0: | |
629 | return -EAGAIN; | |
630 | } | |
631 | ||
bafef0ae JN |
632 | #if defined(CONFIG_ETRAX_NO_PHY) |
633 | static void | |
634 | dummy_check_speed(struct net_device* dev) | |
635 | { | |
636 | current_speed = 100; | |
637 | } | |
638 | #else | |
1da177e4 LT |
639 | static void |
640 | generic_check_speed(struct net_device* dev) | |
641 | { | |
642 | unsigned long data; | |
bafef0ae JN |
643 | struct net_local *np = netdev_priv(dev); |
644 | ||
645 | data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE); | |
1da177e4 LT |
646 | if ((data & ADVERTISE_100FULL) || |
647 | (data & ADVERTISE_100HALF)) | |
648 | current_speed = 100; | |
649 | else | |
650 | current_speed = 10; | |
651 | } | |
652 | ||
653 | static void | |
654 | tdk_check_speed(struct net_device* dev) | |
655 | { | |
656 | unsigned long data; | |
bafef0ae JN |
657 | struct net_local *np = netdev_priv(dev); |
658 | ||
659 | data = e100_get_mdio_reg(dev, np->mii_if.phy_id, | |
660 | MDIO_TDK_DIAGNOSTIC_REG); | |
1da177e4 LT |
661 | current_speed = (data & MDIO_TDK_DIAGNOSTIC_RATE ? 100 : 10); |
662 | } | |
663 | ||
664 | static void | |
665 | broadcom_check_speed(struct net_device* dev) | |
666 | { | |
667 | unsigned long data; | |
bafef0ae JN |
668 | struct net_local *np = netdev_priv(dev); |
669 | ||
670 | data = e100_get_mdio_reg(dev, np->mii_if.phy_id, | |
671 | MDIO_AUX_CTRL_STATUS_REG); | |
1da177e4 LT |
672 | current_speed = (data & MDIO_BC_SPEED ? 100 : 10); |
673 | } | |
674 | ||
675 | static void | |
676 | intel_check_speed(struct net_device* dev) | |
677 | { | |
678 | unsigned long data; | |
bafef0ae JN |
679 | struct net_local *np = netdev_priv(dev); |
680 | ||
681 | data = e100_get_mdio_reg(dev, np->mii_if.phy_id, | |
682 | MDIO_INT_STATUS_REG_2); | |
1da177e4 LT |
683 | current_speed = (data & MDIO_INT_SPEED ? 100 : 10); |
684 | } | |
bafef0ae | 685 | #endif |
1da177e4 LT |
686 | static void |
687 | e100_check_speed(unsigned long priv) | |
688 | { | |
689 | struct net_device* dev = (struct net_device*)priv; | |
bafef0ae | 690 | struct net_local *np = netdev_priv(dev); |
1da177e4 LT |
691 | static int led_initiated = 0; |
692 | unsigned long data; | |
693 | int old_speed = current_speed; | |
694 | ||
bafef0ae JN |
695 | spin_lock(&np->transceiver_lock); |
696 | ||
697 | data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMSR); | |
1da177e4 LT |
698 | if (!(data & BMSR_LSTATUS)) { |
699 | current_speed = 0; | |
700 | } else { | |
701 | transceiver->check_speed(dev); | |
702 | } | |
703 | ||
bafef0ae | 704 | spin_lock(&np->led_lock); |
1da177e4 LT |
705 | if ((old_speed != current_speed) || !led_initiated) { |
706 | led_initiated = 1; | |
707 | e100_set_network_leds(NO_NETWORK_ACTIVITY); | |
bafef0ae JN |
708 | if (current_speed) |
709 | netif_carrier_on(dev); | |
710 | else | |
711 | netif_carrier_off(dev); | |
1da177e4 | 712 | } |
bafef0ae | 713 | spin_unlock(&np->led_lock); |
1da177e4 LT |
714 | |
715 | /* Reinitialize the timer. */ | |
716 | speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL; | |
717 | add_timer(&speed_timer); | |
bafef0ae JN |
718 | |
719 | spin_unlock(&np->transceiver_lock); | |
1da177e4 LT |
720 | } |
721 | ||
722 | static void | |
723 | e100_negotiate(struct net_device* dev) | |
724 | { | |
bafef0ae JN |
725 | struct net_local *np = netdev_priv(dev); |
726 | unsigned short data = e100_get_mdio_reg(dev, np->mii_if.phy_id, | |
727 | MII_ADVERTISE); | |
1da177e4 LT |
728 | |
729 | /* Discard old speed and duplex settings */ | |
730 | data &= ~(ADVERTISE_100HALF | ADVERTISE_100FULL | | |
731 | ADVERTISE_10HALF | ADVERTISE_10FULL); | |
732 | ||
733 | switch (current_speed_selection) { | |
bafef0ae | 734 | case 10: |
1da177e4 LT |
735 | if (current_duplex == full) |
736 | data |= ADVERTISE_10FULL; | |
737 | else if (current_duplex == half) | |
738 | data |= ADVERTISE_10HALF; | |
739 | else | |
740 | data |= ADVERTISE_10HALF | ADVERTISE_10FULL; | |
741 | break; | |
742 | ||
bafef0ae | 743 | case 100: |
1da177e4 LT |
744 | if (current_duplex == full) |
745 | data |= ADVERTISE_100FULL; | |
746 | else if (current_duplex == half) | |
747 | data |= ADVERTISE_100HALF; | |
748 | else | |
749 | data |= ADVERTISE_100HALF | ADVERTISE_100FULL; | |
750 | break; | |
751 | ||
bafef0ae | 752 | case 0: /* Auto */ |
1da177e4 LT |
753 | if (current_duplex == full) |
754 | data |= ADVERTISE_100FULL | ADVERTISE_10FULL; | |
755 | else if (current_duplex == half) | |
756 | data |= ADVERTISE_100HALF | ADVERTISE_10HALF; | |
757 | else | |
758 | data |= ADVERTISE_10HALF | ADVERTISE_10FULL | | |
759 | ADVERTISE_100HALF | ADVERTISE_100FULL; | |
760 | break; | |
761 | ||
bafef0ae | 762 | default: /* assume autoneg speed and duplex */ |
1da177e4 LT |
763 | data |= ADVERTISE_10HALF | ADVERTISE_10FULL | |
764 | ADVERTISE_100HALF | ADVERTISE_100FULL; | |
bafef0ae | 765 | break; |
1da177e4 LT |
766 | } |
767 | ||
bafef0ae | 768 | e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE, data); |
1da177e4 | 769 | |
e6cd1974 | 770 | data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR); |
bafef0ae | 771 | if (autoneg_normal) { |
e6cd1974 JN |
772 | /* Renegotiate with link partner */ |
773 | data |= BMCR_ANENABLE | BMCR_ANRESTART; | |
774 | } else { | |
775 | /* Don't negotiate speed or duplex */ | |
776 | data &= ~(BMCR_ANENABLE | BMCR_ANRESTART); | |
777 | ||
778 | /* Set speed and duplex static */ | |
779 | if (current_speed_selection == 10) | |
780 | data &= ~BMCR_SPEED100; | |
781 | else | |
782 | data |= BMCR_SPEED100; | |
783 | ||
784 | if (current_duplex != full) | |
785 | data &= ~BMCR_FULLDPLX; | |
786 | else | |
787 | data |= BMCR_FULLDPLX; | |
bafef0ae JN |
788 | } |
789 | e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR, data); | |
1da177e4 LT |
790 | } |
791 | ||
792 | static void | |
793 | e100_set_speed(struct net_device* dev, unsigned long speed) | |
794 | { | |
bafef0ae JN |
795 | struct net_local *np = netdev_priv(dev); |
796 | ||
797 | spin_lock(&np->transceiver_lock); | |
1da177e4 LT |
798 | if (speed != current_speed_selection) { |
799 | current_speed_selection = speed; | |
800 | e100_negotiate(dev); | |
801 | } | |
bafef0ae | 802 | spin_unlock(&np->transceiver_lock); |
1da177e4 LT |
803 | } |
804 | ||
805 | static void | |
806 | e100_check_duplex(unsigned long priv) | |
807 | { | |
808 | struct net_device *dev = (struct net_device *)priv; | |
bafef0ae JN |
809 | struct net_local *np = netdev_priv(dev); |
810 | int old_duplex; | |
811 | ||
812 | spin_lock(&np->transceiver_lock); | |
813 | old_duplex = full_duplex; | |
1da177e4 LT |
814 | transceiver->check_duplex(dev); |
815 | if (old_duplex != full_duplex) { | |
816 | /* Duplex changed */ | |
817 | SETF(network_rec_config_shadow, R_NETWORK_REC_CONFIG, duplex, full_duplex); | |
818 | *R_NETWORK_REC_CONFIG = network_rec_config_shadow; | |
819 | } | |
820 | ||
821 | /* Reinitialize the timer. */ | |
822 | duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL; | |
823 | add_timer(&duplex_timer); | |
824 | np->mii_if.full_duplex = full_duplex; | |
bafef0ae | 825 | spin_unlock(&np->transceiver_lock); |
1da177e4 | 826 | } |
bafef0ae JN |
827 | #if defined(CONFIG_ETRAX_NO_PHY) |
828 | static void | |
829 | dummy_check_duplex(struct net_device* dev) | |
830 | { | |
831 | full_duplex = 1; | |
832 | } | |
833 | #else | |
1da177e4 LT |
834 | static void |
835 | generic_check_duplex(struct net_device* dev) | |
836 | { | |
837 | unsigned long data; | |
bafef0ae JN |
838 | struct net_local *np = netdev_priv(dev); |
839 | ||
840 | data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE); | |
1da177e4 LT |
841 | if ((data & ADVERTISE_10FULL) || |
842 | (data & ADVERTISE_100FULL)) | |
843 | full_duplex = 1; | |
844 | else | |
845 | full_duplex = 0; | |
846 | } | |
847 | ||
848 | static void | |
849 | tdk_check_duplex(struct net_device* dev) | |
850 | { | |
851 | unsigned long data; | |
bafef0ae JN |
852 | struct net_local *np = netdev_priv(dev); |
853 | ||
854 | data = e100_get_mdio_reg(dev, np->mii_if.phy_id, | |
855 | MDIO_TDK_DIAGNOSTIC_REG); | |
1da177e4 LT |
856 | full_duplex = (data & MDIO_TDK_DIAGNOSTIC_DPLX) ? 1 : 0; |
857 | } | |
858 | ||
859 | static void | |
860 | broadcom_check_duplex(struct net_device* dev) | |
861 | { | |
862 | unsigned long data; | |
bafef0ae JN |
863 | struct net_local *np = netdev_priv(dev); |
864 | ||
865 | data = e100_get_mdio_reg(dev, np->mii_if.phy_id, | |
866 | MDIO_AUX_CTRL_STATUS_REG); | |
1da177e4 LT |
867 | full_duplex = (data & MDIO_BC_FULL_DUPLEX_IND) ? 1 : 0; |
868 | } | |
869 | ||
870 | static void | |
871 | intel_check_duplex(struct net_device* dev) | |
872 | { | |
873 | unsigned long data; | |
bafef0ae JN |
874 | struct net_local *np = netdev_priv(dev); |
875 | ||
876 | data = e100_get_mdio_reg(dev, np->mii_if.phy_id, | |
877 | MDIO_INT_STATUS_REG_2); | |
1da177e4 LT |
878 | full_duplex = (data & MDIO_INT_FULL_DUPLEX_IND) ? 1 : 0; |
879 | } | |
bafef0ae | 880 | #endif |
1da177e4 LT |
881 | static void |
882 | e100_set_duplex(struct net_device* dev, enum duplex new_duplex) | |
883 | { | |
bafef0ae JN |
884 | struct net_local *np = netdev_priv(dev); |
885 | ||
886 | spin_lock(&np->transceiver_lock); | |
1da177e4 LT |
887 | if (new_duplex != current_duplex) { |
888 | current_duplex = new_duplex; | |
889 | e100_negotiate(dev); | |
890 | } | |
bafef0ae | 891 | spin_unlock(&np->transceiver_lock); |
1da177e4 LT |
892 | } |
893 | ||
894 | static int | |
895 | e100_probe_transceiver(struct net_device* dev) | |
896 | { | |
633edf5a AM |
897 | int ret = 0; |
898 | ||
bafef0ae | 899 | #if !defined(CONFIG_ETRAX_NO_PHY) |
1da177e4 LT |
900 | unsigned int phyid_high; |
901 | unsigned int phyid_low; | |
902 | unsigned int oui; | |
903 | struct transceiver_ops* ops = NULL; | |
bafef0ae JN |
904 | struct net_local *np = netdev_priv(dev); |
905 | ||
906 | spin_lock(&np->transceiver_lock); | |
1da177e4 LT |
907 | |
908 | /* Probe MDIO physical address */ | |
bafef0ae JN |
909 | for (np->mii_if.phy_id = 0; np->mii_if.phy_id <= 31; |
910 | np->mii_if.phy_id++) { | |
911 | if (e100_get_mdio_reg(dev, | |
912 | np->mii_if.phy_id, MII_BMSR) != 0xffff) | |
1da177e4 LT |
913 | break; |
914 | } | |
633edf5a AM |
915 | if (np->mii_if.phy_id == 32) { |
916 | ret = -ENODEV; | |
917 | goto out; | |
918 | } | |
1da177e4 LT |
919 | |
920 | /* Get manufacturer */ | |
bafef0ae JN |
921 | phyid_high = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_PHYSID1); |
922 | phyid_low = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_PHYSID2); | |
1da177e4 LT |
923 | oui = (phyid_high << 6) | (phyid_low >> 10); |
924 | ||
925 | for (ops = &transceivers[0]; ops->oui; ops++) { | |
926 | if (ops->oui == oui) | |
927 | break; | |
928 | } | |
929 | transceiver = ops; | |
633edf5a | 930 | out: |
bafef0ae JN |
931 | spin_unlock(&np->transceiver_lock); |
932 | #endif | |
633edf5a | 933 | return ret; |
1da177e4 LT |
934 | } |
935 | ||
936 | static int | |
937 | e100_get_mdio_reg(struct net_device *dev, int phy_id, int location) | |
938 | { | |
939 | unsigned short cmd; /* Data to be sent on MDIO port */ | |
940 | int data; /* Data read from MDIO */ | |
941 | int bitCounter; | |
942 | ||
943 | /* Start of frame, OP Code, Physical Address, Register Address */ | |
944 | cmd = (MDIO_START << 14) | (MDIO_READ << 12) | (phy_id << 7) | | |
945 | (location << 2); | |
946 | ||
947 | e100_send_mdio_cmd(cmd, 0); | |
948 | ||
949 | data = 0; | |
950 | ||
951 | /* Data... */ | |
952 | for (bitCounter=15; bitCounter>=0 ; bitCounter--) { | |
953 | data |= (e100_receive_mdio_bit() << bitCounter); | |
954 | } | |
955 | ||
956 | return data; | |
957 | } | |
958 | ||
959 | static void | |
960 | e100_set_mdio_reg(struct net_device *dev, int phy_id, int location, int value) | |
961 | { | |
962 | int bitCounter; | |
963 | unsigned short cmd; | |
964 | ||
965 | cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (phy_id << 7) | | |
966 | (location << 2); | |
967 | ||
968 | e100_send_mdio_cmd(cmd, 1); | |
969 | ||
970 | /* Data... */ | |
971 | for (bitCounter=15; bitCounter>=0 ; bitCounter--) { | |
972 | e100_send_mdio_bit(GET_BIT(bitCounter, value)); | |
973 | } | |
974 | ||
975 | } | |
976 | ||
977 | static void | |
978 | e100_send_mdio_cmd(unsigned short cmd, int write_cmd) | |
979 | { | |
980 | int bitCounter; | |
981 | unsigned char data = 0x2; | |
982 | ||
983 | /* Preamble */ | |
984 | for (bitCounter = 31; bitCounter>= 0; bitCounter--) | |
985 | e100_send_mdio_bit(GET_BIT(bitCounter, MDIO_PREAMBLE)); | |
986 | ||
987 | for (bitCounter = 15; bitCounter >= 2; bitCounter--) | |
988 | e100_send_mdio_bit(GET_BIT(bitCounter, cmd)); | |
989 | ||
990 | /* Turnaround */ | |
991 | for (bitCounter = 1; bitCounter >= 0 ; bitCounter--) | |
992 | if (write_cmd) | |
993 | e100_send_mdio_bit(GET_BIT(bitCounter, data)); | |
994 | else | |
995 | e100_receive_mdio_bit(); | |
996 | } | |
997 | ||
998 | static void | |
999 | e100_send_mdio_bit(unsigned char bit) | |
1000 | { | |
1001 | *R_NETWORK_MGM_CTRL = | |
1002 | IO_STATE(R_NETWORK_MGM_CTRL, mdoe, enable) | | |
1003 | IO_FIELD(R_NETWORK_MGM_CTRL, mdio, bit); | |
1004 | udelay(1); | |
1005 | *R_NETWORK_MGM_CTRL = | |
1006 | IO_STATE(R_NETWORK_MGM_CTRL, mdoe, enable) | | |
1007 | IO_MASK(R_NETWORK_MGM_CTRL, mdck) | | |
1008 | IO_FIELD(R_NETWORK_MGM_CTRL, mdio, bit); | |
1009 | udelay(1); | |
1010 | } | |
1011 | ||
1012 | static unsigned char | |
1013 | e100_receive_mdio_bit() | |
1014 | { | |
1015 | unsigned char bit; | |
1016 | *R_NETWORK_MGM_CTRL = 0; | |
1017 | bit = IO_EXTRACT(R_NETWORK_STAT, mdio, *R_NETWORK_STAT); | |
1018 | udelay(1); | |
1019 | *R_NETWORK_MGM_CTRL = IO_MASK(R_NETWORK_MGM_CTRL, mdck); | |
1020 | udelay(1); | |
1021 | return bit; | |
1022 | } | |
1023 | ||
1024 | static void | |
1025 | e100_reset_transceiver(struct net_device* dev) | |
1026 | { | |
bafef0ae | 1027 | struct net_local *np = netdev_priv(dev); |
1da177e4 LT |
1028 | unsigned short cmd; |
1029 | unsigned short data; | |
1030 | int bitCounter; | |
1031 | ||
bafef0ae | 1032 | data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR); |
1da177e4 | 1033 | |
bafef0ae | 1034 | cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (np->mii_if.phy_id << 7) | (MII_BMCR << 2); |
1da177e4 LT |
1035 | |
1036 | e100_send_mdio_cmd(cmd, 1); | |
1037 | ||
1038 | data |= 0x8000; | |
1039 | ||
1040 | for (bitCounter = 15; bitCounter >= 0 ; bitCounter--) { | |
1041 | e100_send_mdio_bit(GET_BIT(bitCounter, data)); | |
1042 | } | |
1043 | } | |
1044 | ||
1045 | /* Called by upper layers if they decide it took too long to complete | |
1046 | * sending a packet - we need to reset and stuff. | |
1047 | */ | |
1048 | ||
1049 | static void | |
1050 | e100_tx_timeout(struct net_device *dev) | |
1051 | { | |
bafef0ae | 1052 | struct net_local *np = netdev_priv(dev); |
1da177e4 LT |
1053 | unsigned long flags; |
1054 | ||
1055 | spin_lock_irqsave(&np->lock, flags); | |
1056 | ||
1057 | printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name, | |
1058 | tx_done(dev) ? "IRQ problem" : "network cable problem"); | |
1059 | ||
1060 | /* remember we got an error */ | |
1061 | ||
1062 | np->stats.tx_errors++; | |
1063 | ||
1064 | /* reset the TX DMA in case it has hung on something */ | |
1065 | ||
1066 | RESET_DMA(NETWORK_TX_DMA_NBR); | |
1067 | WAIT_DMA(NETWORK_TX_DMA_NBR); | |
1068 | ||
1069 | /* Reset the transceiver. */ | |
1070 | ||
1071 | e100_reset_transceiver(dev); | |
1072 | ||
1073 | /* and get rid of the packets that never got an interrupt */ | |
bafef0ae | 1074 | while (myFirstTxDesc != myNextTxDesc) { |
1da177e4 LT |
1075 | dev_kfree_skb(myFirstTxDesc->skb); |
1076 | myFirstTxDesc->skb = 0; | |
1077 | myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next); | |
1078 | } | |
1079 | ||
1080 | /* Set up transmit DMA channel so it can be restarted later */ | |
1081 | *R_DMA_CH0_FIRST = 0; | |
1082 | *R_DMA_CH0_DESCR = virt_to_phys(myLastTxDesc); | |
1083 | ||
1084 | /* tell the upper layers we're ok again */ | |
1085 | ||
1086 | netif_wake_queue(dev); | |
1087 | spin_unlock_irqrestore(&np->lock, flags); | |
1088 | } | |
1089 | ||
1090 | ||
1091 | /* This will only be invoked if the driver is _not_ in XOFF state. | |
1092 | * What this means is that we need not check it, and that this | |
1093 | * invariant will hold if we make sure that the netif_*_queue() | |
1094 | * calls are done at the proper times. | |
1095 | */ | |
1096 | ||
1097 | static int | |
1098 | e100_send_packet(struct sk_buff *skb, struct net_device *dev) | |
1099 | { | |
bafef0ae | 1100 | struct net_local *np = netdev_priv(dev); |
1da177e4 LT |
1101 | unsigned char *buf = skb->data; |
1102 | unsigned long flags; | |
1103 | ||
1104 | #ifdef ETHDEBUG | |
1105 | printk("send packet len %d\n", length); | |
1106 | #endif | |
1107 | spin_lock_irqsave(&np->lock, flags); /* protect from tx_interrupt and ourself */ | |
1108 | ||
1109 | myNextTxDesc->skb = skb; | |
1110 | ||
1ae5dc34 | 1111 | dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */ |
1da177e4 | 1112 | |
bafef0ae | 1113 | e100_hardware_send_packet(np, buf, skb->len); |
1da177e4 LT |
1114 | |
1115 | myNextTxDesc = phys_to_virt(myNextTxDesc->descr.next); | |
1116 | ||
1117 | /* Stop queue if full */ | |
1118 | if (myNextTxDesc == myFirstTxDesc) { | |
1119 | netif_stop_queue(dev); | |
1120 | } | |
1121 | ||
1122 | spin_unlock_irqrestore(&np->lock, flags); | |
1123 | ||
6ed10654 | 1124 | return NETDEV_TX_OK; |
1da177e4 LT |
1125 | } |
1126 | ||
1127 | /* | |
1128 | * The typical workload of the driver: | |
1129 | * Handle the network interface interrupts. | |
1130 | */ | |
1131 | ||
1132 | static irqreturn_t | |
7d12e780 | 1133 | e100rxtx_interrupt(int irq, void *dev_id) |
1da177e4 LT |
1134 | { |
1135 | struct net_device *dev = (struct net_device *)dev_id; | |
bafef0ae JN |
1136 | struct net_local *np = netdev_priv(dev); |
1137 | unsigned long irqbits; | |
1da177e4 | 1138 | |
bafef0ae JN |
1139 | /* |
1140 | * Note that both rx and tx interrupts are blocked at this point, | |
1141 | * regardless of which got us here. | |
1142 | */ | |
1143 | ||
1144 | irqbits = *R_IRQ_MASK2_RD; | |
1da177e4 LT |
1145 | |
1146 | /* Handle received packets */ | |
1147 | if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma1_eop, active)) { | |
1148 | /* acknowledge the eop interrupt */ | |
1149 | ||
1150 | *R_DMA_CH1_CLR_INTR = IO_STATE(R_DMA_CH1_CLR_INTR, clr_eop, do); | |
1151 | ||
1152 | /* check if one or more complete packets were indeed received */ | |
1153 | ||
1154 | while ((*R_DMA_CH1_FIRST != virt_to_phys(myNextRxDesc)) && | |
1155 | (myNextRxDesc != myLastRxDesc)) { | |
1156 | /* Take out the buffer and give it to the OS, then | |
1157 | * allocate a new buffer to put a packet in. | |
1158 | */ | |
1159 | e100_rx(dev); | |
bafef0ae | 1160 | np->stats.rx_packets++; |
1da177e4 LT |
1161 | /* restart/continue on the channel, for safety */ |
1162 | *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, restart); | |
1163 | /* clear dma channel 1 eop/descr irq bits */ | |
1164 | *R_DMA_CH1_CLR_INTR = | |
1165 | IO_STATE(R_DMA_CH1_CLR_INTR, clr_eop, do) | | |
1166 | IO_STATE(R_DMA_CH1_CLR_INTR, clr_descr, do); | |
1167 | ||
1168 | /* now, we might have gotten another packet | |
1169 | so we have to loop back and check if so */ | |
1170 | } | |
1171 | } | |
1172 | ||
1173 | /* Report any packets that have been sent */ | |
bafef0ae JN |
1174 | while (virt_to_phys(myFirstTxDesc) != *R_DMA_CH0_FIRST && |
1175 | (netif_queue_stopped(dev) || myFirstTxDesc != myNextTxDesc)) { | |
1da177e4 LT |
1176 | np->stats.tx_bytes += myFirstTxDesc->skb->len; |
1177 | np->stats.tx_packets++; | |
1178 | ||
1179 | /* dma is ready with the transmission of the data in tx_skb, so now | |
1180 | we can release the skb memory */ | |
1181 | dev_kfree_skb_irq(myFirstTxDesc->skb); | |
1182 | myFirstTxDesc->skb = 0; | |
1183 | myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next); | |
bafef0ae JN |
1184 | /* Wake up queue. */ |
1185 | netif_wake_queue(dev); | |
1da177e4 LT |
1186 | } |
1187 | ||
1188 | if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma0_eop, active)) { | |
bafef0ae | 1189 | /* acknowledge the eop interrupt. */ |
1da177e4 | 1190 | *R_DMA_CH0_CLR_INTR = IO_STATE(R_DMA_CH0_CLR_INTR, clr_eop, do); |
1da177e4 LT |
1191 | } |
1192 | ||
1da177e4 LT |
1193 | return IRQ_HANDLED; |
1194 | } | |
1195 | ||
1196 | static irqreturn_t | |
7d12e780 | 1197 | e100nw_interrupt(int irq, void *dev_id) |
1da177e4 LT |
1198 | { |
1199 | struct net_device *dev = (struct net_device *)dev_id; | |
bafef0ae | 1200 | struct net_local *np = netdev_priv(dev); |
1da177e4 LT |
1201 | unsigned long irqbits = *R_IRQ_MASK0_RD; |
1202 | ||
1203 | /* check for underrun irq */ | |
1204 | if (irqbits & IO_STATE(R_IRQ_MASK0_RD, underrun, active)) { | |
1205 | SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr); | |
1206 | *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow; | |
1207 | SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop); | |
1208 | np->stats.tx_errors++; | |
1209 | D(printk("ethernet receiver underrun!\n")); | |
1210 | } | |
1211 | ||
1212 | /* check for overrun irq */ | |
1213 | if (irqbits & IO_STATE(R_IRQ_MASK0_RD, overrun, active)) { | |
1214 | update_rx_stats(&np->stats); /* this will ack the irq */ | |
1215 | D(printk("ethernet receiver overrun!\n")); | |
1216 | } | |
1217 | /* check for excessive collision irq */ | |
1218 | if (irqbits & IO_STATE(R_IRQ_MASK0_RD, excessive_col, active)) { | |
1219 | SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr); | |
1220 | *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow; | |
1221 | SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop); | |
1da177e4 LT |
1222 | np->stats.tx_errors++; |
1223 | D(printk("ethernet excessive collisions!\n")); | |
1224 | } | |
1225 | return IRQ_HANDLED; | |
1226 | } | |
1227 | ||
1228 | /* We have a good packet(s), get it/them out of the buffers. */ | |
1229 | static void | |
1230 | e100_rx(struct net_device *dev) | |
1231 | { | |
1232 | struct sk_buff *skb; | |
1233 | int length = 0; | |
bafef0ae | 1234 | struct net_local *np = netdev_priv(dev); |
1da177e4 LT |
1235 | unsigned char *skb_data_ptr; |
1236 | #ifdef ETHDEBUG | |
1237 | int i; | |
1238 | #endif | |
bafef0ae JN |
1239 | etrax_eth_descr *prevRxDesc; /* The descriptor right before myNextRxDesc */ |
1240 | spin_lock(&np->led_lock); | |
1da177e4 LT |
1241 | if (!led_active && time_after(jiffies, led_next_time)) { |
1242 | /* light the network leds depending on the current speed. */ | |
1243 | e100_set_network_leds(NETWORK_ACTIVITY); | |
1244 | ||
1245 | /* Set the earliest time we may clear the LED */ | |
1246 | led_next_time = jiffies + NET_FLASH_TIME; | |
1247 | led_active = 1; | |
1248 | mod_timer(&clear_led_timer, jiffies + HZ/10); | |
1249 | } | |
bafef0ae | 1250 | spin_unlock(&np->led_lock); |
1da177e4 LT |
1251 | |
1252 | length = myNextRxDesc->descr.hw_len - 4; | |
bafef0ae | 1253 | np->stats.rx_bytes += length; |
1da177e4 LT |
1254 | |
1255 | #ifdef ETHDEBUG | |
1256 | printk("Got a packet of length %d:\n", length); | |
1257 | /* dump the first bytes in the packet */ | |
1258 | skb_data_ptr = (unsigned char *)phys_to_virt(myNextRxDesc->descr.buf); | |
1259 | for (i = 0; i < 8; i++) { | |
1260 | printk("%d: %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x\n", i * 8, | |
1261 | skb_data_ptr[0],skb_data_ptr[1],skb_data_ptr[2],skb_data_ptr[3], | |
1262 | skb_data_ptr[4],skb_data_ptr[5],skb_data_ptr[6],skb_data_ptr[7]); | |
1263 | skb_data_ptr += 8; | |
1264 | } | |
1265 | #endif | |
1266 | ||
1267 | if (length < RX_COPYBREAK) { | |
1268 | /* Small packet, copy data */ | |
1269 | skb = dev_alloc_skb(length - ETHER_HEAD_LEN); | |
1270 | if (!skb) { | |
1271 | np->stats.rx_errors++; | |
1272 | printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); | |
bafef0ae | 1273 | goto update_nextrxdesc; |
1da177e4 LT |
1274 | } |
1275 | ||
1276 | skb_put(skb, length - ETHER_HEAD_LEN); /* allocate room for the packet body */ | |
1277 | skb_data_ptr = skb_push(skb, ETHER_HEAD_LEN); /* allocate room for the header */ | |
1278 | ||
1279 | #ifdef ETHDEBUG | |
1280 | printk("head = 0x%x, data = 0x%x, tail = 0x%x, end = 0x%x\n", | |
4305b541 ACM |
1281 | skb->head, skb->data, skb_tail_pointer(skb), |
1282 | skb_end_pointer(skb)); | |
1da177e4 LT |
1283 | printk("copying packet to 0x%x.\n", skb_data_ptr); |
1284 | #endif | |
1285 | ||
1286 | memcpy(skb_data_ptr, phys_to_virt(myNextRxDesc->descr.buf), length); | |
1287 | } | |
1288 | else { | |
1289 | /* Large packet, send directly to upper layers and allocate new | |
1290 | * memory (aligned to cache line boundary to avoid bug). | |
bafef0ae JN |
1291 | * Before sending the skb to upper layers we must make sure |
1292 | * that skb->data points to the aligned start of the packet. | |
1da177e4 LT |
1293 | */ |
1294 | int align; | |
1295 | struct sk_buff *new_skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES); | |
1296 | if (!new_skb) { | |
1297 | np->stats.rx_errors++; | |
1298 | printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); | |
bafef0ae | 1299 | goto update_nextrxdesc; |
1da177e4 LT |
1300 | } |
1301 | skb = myNextRxDesc->skb; | |
1302 | align = (int)phys_to_virt(myNextRxDesc->descr.buf) - (int)skb->data; | |
1303 | skb_put(skb, length + align); | |
1304 | skb_pull(skb, align); /* Remove alignment bytes */ | |
1305 | myNextRxDesc->skb = new_skb; | |
1306 | myNextRxDesc->descr.buf = L1_CACHE_ALIGN(virt_to_phys(myNextRxDesc->skb->data)); | |
1307 | } | |
1308 | ||
1da177e4 LT |
1309 | skb->protocol = eth_type_trans(skb, dev); |
1310 | ||
1311 | /* Send the packet to the upper layers */ | |
1312 | netif_rx(skb); | |
1313 | ||
bafef0ae | 1314 | update_nextrxdesc: |
1da177e4 LT |
1315 | /* Prepare for next packet */ |
1316 | myNextRxDesc->descr.status = 0; | |
bafef0ae | 1317 | prevRxDesc = myNextRxDesc; |
1da177e4 LT |
1318 | myNextRxDesc = phys_to_virt(myNextRxDesc->descr.next); |
1319 | ||
1320 | rx_queue_len++; | |
1321 | ||
1322 | /* Check if descriptors should be returned */ | |
1323 | if (rx_queue_len == RX_QUEUE_THRESHOLD) { | |
1324 | flush_etrax_cache(); | |
bafef0ae | 1325 | prevRxDesc->descr.ctrl |= d_eol; |
1da177e4 | 1326 | myLastRxDesc->descr.ctrl &= ~d_eol; |
bafef0ae | 1327 | myLastRxDesc = prevRxDesc; |
1da177e4 LT |
1328 | rx_queue_len = 0; |
1329 | } | |
1330 | } | |
1331 | ||
1332 | /* The inverse routine to net_open(). */ | |
1333 | static int | |
1334 | e100_close(struct net_device *dev) | |
1335 | { | |
bafef0ae | 1336 | struct net_local *np = netdev_priv(dev); |
1da177e4 LT |
1337 | |
1338 | printk(KERN_INFO "Closing %s.\n", dev->name); | |
1339 | ||
1340 | netif_stop_queue(dev); | |
1341 | ||
1342 | *R_IRQ_MASK0_CLR = | |
1343 | IO_STATE(R_IRQ_MASK0_CLR, overrun, clr) | | |
1344 | IO_STATE(R_IRQ_MASK0_CLR, underrun, clr) | | |
1345 | IO_STATE(R_IRQ_MASK0_CLR, excessive_col, clr); | |
1346 | ||
1347 | *R_IRQ_MASK2_CLR = | |
1348 | IO_STATE(R_IRQ_MASK2_CLR, dma0_descr, clr) | | |
1349 | IO_STATE(R_IRQ_MASK2_CLR, dma0_eop, clr) | | |
1350 | IO_STATE(R_IRQ_MASK2_CLR, dma1_descr, clr) | | |
1351 | IO_STATE(R_IRQ_MASK2_CLR, dma1_eop, clr); | |
1352 | ||
1353 | /* Stop the receiver and the transmitter */ | |
1354 | ||
1355 | RESET_DMA(NETWORK_TX_DMA_NBR); | |
1356 | RESET_DMA(NETWORK_RX_DMA_NBR); | |
1357 | ||
1358 | /* Flush the Tx and disable Rx here. */ | |
1359 | ||
1360 | free_irq(NETWORK_DMA_RX_IRQ_NBR, (void *)dev); | |
1361 | free_irq(NETWORK_DMA_TX_IRQ_NBR, (void *)dev); | |
1362 | free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev); | |
1363 | ||
bafef0ae JN |
1364 | cris_free_dma(NETWORK_TX_DMA_NBR, cardname); |
1365 | cris_free_dma(NETWORK_RX_DMA_NBR, cardname); | |
1366 | ||
1da177e4 LT |
1367 | /* Update the statistics here. */ |
1368 | ||
1369 | update_rx_stats(&np->stats); | |
1370 | update_tx_stats(&np->stats); | |
1371 | ||
1372 | /* Stop speed/duplex timers */ | |
1373 | del_timer(&speed_timer); | |
1374 | del_timer(&duplex_timer); | |
1375 | ||
1376 | return 0; | |
1377 | } | |
1378 | ||
1379 | static int | |
1380 | e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |
1381 | { | |
1382 | struct mii_ioctl_data *data = if_mii(ifr); | |
1383 | struct net_local *np = netdev_priv(dev); | |
bafef0ae JN |
1384 | int rc = 0; |
1385 | int old_autoneg; | |
1da177e4 LT |
1386 | |
1387 | spin_lock(&np->lock); /* Preempt protection */ | |
1388 | switch (cmd) { | |
1da177e4 LT |
1389 | /* The ioctls below should be considered obsolete but are */ |
1390 | /* still present for compatability with old scripts/apps */ | |
1391 | case SET_ETH_SPEED_10: /* 10 Mbps */ | |
1392 | e100_set_speed(dev, 10); | |
1393 | break; | |
1394 | case SET_ETH_SPEED_100: /* 100 Mbps */ | |
1395 | e100_set_speed(dev, 100); | |
1396 | break; | |
bafef0ae | 1397 | case SET_ETH_SPEED_AUTO: /* Auto-negotiate speed */ |
1da177e4 LT |
1398 | e100_set_speed(dev, 0); |
1399 | break; | |
bafef0ae | 1400 | case SET_ETH_DUPLEX_HALF: /* Half duplex */ |
1da177e4 LT |
1401 | e100_set_duplex(dev, half); |
1402 | break; | |
bafef0ae | 1403 | case SET_ETH_DUPLEX_FULL: /* Full duplex */ |
1da177e4 LT |
1404 | e100_set_duplex(dev, full); |
1405 | break; | |
bafef0ae | 1406 | case SET_ETH_DUPLEX_AUTO: /* Auto-negotiate duplex */ |
1da177e4 LT |
1407 | e100_set_duplex(dev, autoneg); |
1408 | break; | |
bafef0ae JN |
1409 | case SET_ETH_AUTONEG: |
1410 | old_autoneg = autoneg_normal; | |
1411 | autoneg_normal = *(int*)data; | |
1412 | if (autoneg_normal != old_autoneg) | |
1413 | e100_negotiate(dev); | |
1414 | break; | |
1da177e4 | 1415 | default: |
bafef0ae JN |
1416 | rc = generic_mii_ioctl(&np->mii_if, if_mii(ifr), |
1417 | cmd, NULL); | |
1418 | break; | |
1da177e4 LT |
1419 | } |
1420 | spin_unlock(&np->lock); | |
bafef0ae | 1421 | return rc; |
1da177e4 LT |
1422 | } |
1423 | ||
bafef0ae JN |
1424 | static int e100_get_settings(struct net_device *dev, |
1425 | struct ethtool_cmd *cmd) | |
1da177e4 | 1426 | { |
bafef0ae JN |
1427 | struct net_local *np = netdev_priv(dev); |
1428 | int err; | |
76f2b4d9 | 1429 | |
bafef0ae JN |
1430 | spin_lock_irq(&np->lock); |
1431 | err = mii_ethtool_gset(&np->mii_if, cmd); | |
1432 | spin_unlock_irq(&np->lock); | |
76f2b4d9 | 1433 | |
bafef0ae JN |
1434 | /* The PHY may support 1000baseT, but the Etrax100 does not. */ |
1435 | cmd->supported &= ~(SUPPORTED_1000baseT_Half | |
1436 | | SUPPORTED_1000baseT_Full); | |
1437 | return err; | |
76f2b4d9 CH |
1438 | } |
1439 | ||
1440 | static int e100_set_settings(struct net_device *dev, | |
1441 | struct ethtool_cmd *ecmd) | |
1442 | { | |
1443 | if (ecmd->autoneg == AUTONEG_ENABLE) { | |
1444 | e100_set_duplex(dev, autoneg); | |
1445 | e100_set_speed(dev, 0); | |
1446 | } else { | |
1447 | e100_set_duplex(dev, ecmd->duplex == DUPLEX_HALF ? half : full); | |
1448 | e100_set_speed(dev, ecmd->speed == SPEED_10 ? 10: 100); | |
1da177e4 | 1449 | } |
76f2b4d9 CH |
1450 | |
1451 | return 0; | |
1452 | } | |
1453 | ||
1454 | static void e100_get_drvinfo(struct net_device *dev, | |
1455 | struct ethtool_drvinfo *info) | |
1456 | { | |
1457 | strncpy(info->driver, "ETRAX 100LX", sizeof(info->driver) - 1); | |
1458 | strncpy(info->version, "$Revision: 1.31 $", sizeof(info->version) - 1); | |
1459 | strncpy(info->fw_version, "N/A", sizeof(info->fw_version) - 1); | |
1460 | strncpy(info->bus_info, "N/A", sizeof(info->bus_info) - 1); | |
1461 | } | |
1462 | ||
1463 | static int e100_nway_reset(struct net_device *dev) | |
1464 | { | |
1465 | if (current_duplex == autoneg && current_speed_selection == 0) | |
1466 | e100_negotiate(dev); | |
1da177e4 LT |
1467 | return 0; |
1468 | } | |
1469 | ||
7282d491 | 1470 | static const struct ethtool_ops e100_ethtool_ops = { |
76f2b4d9 CH |
1471 | .get_settings = e100_get_settings, |
1472 | .set_settings = e100_set_settings, | |
1473 | .get_drvinfo = e100_get_drvinfo, | |
1474 | .nway_reset = e100_nway_reset, | |
1475 | .get_link = ethtool_op_get_link, | |
1476 | }; | |
1477 | ||
1da177e4 LT |
1478 | static int |
1479 | e100_set_config(struct net_device *dev, struct ifmap *map) | |
1480 | { | |
bafef0ae JN |
1481 | struct net_local *np = netdev_priv(dev); |
1482 | ||
1da177e4 LT |
1483 | spin_lock(&np->lock); /* Preempt protection */ |
1484 | ||
1485 | switch(map->port) { | |
1486 | case IF_PORT_UNKNOWN: | |
1487 | /* Use autoneg */ | |
1488 | e100_set_speed(dev, 0); | |
1489 | e100_set_duplex(dev, autoneg); | |
1490 | break; | |
1491 | case IF_PORT_10BASET: | |
1492 | e100_set_speed(dev, 10); | |
1493 | e100_set_duplex(dev, autoneg); | |
1494 | break; | |
1495 | case IF_PORT_100BASET: | |
1496 | case IF_PORT_100BASETX: | |
1497 | e100_set_speed(dev, 100); | |
1498 | e100_set_duplex(dev, autoneg); | |
1499 | break; | |
1500 | case IF_PORT_100BASEFX: | |
1501 | case IF_PORT_10BASE2: | |
1502 | case IF_PORT_AUI: | |
1503 | spin_unlock(&np->lock); | |
1504 | return -EOPNOTSUPP; | |
1505 | break; | |
1506 | default: | |
1507 | printk(KERN_ERR "%s: Invalid media selected", dev->name); | |
1508 | spin_unlock(&np->lock); | |
1509 | return -EINVAL; | |
1510 | } | |
1511 | spin_unlock(&np->lock); | |
1512 | return 0; | |
1513 | } | |
1514 | ||
1515 | static void | |
1516 | update_rx_stats(struct net_device_stats *es) | |
1517 | { | |
1518 | unsigned long r = *R_REC_COUNTERS; | |
1519 | /* update stats relevant to reception errors */ | |
1520 | es->rx_fifo_errors += IO_EXTRACT(R_REC_COUNTERS, congestion, r); | |
1521 | es->rx_crc_errors += IO_EXTRACT(R_REC_COUNTERS, crc_error, r); | |
1522 | es->rx_frame_errors += IO_EXTRACT(R_REC_COUNTERS, alignment_error, r); | |
1523 | es->rx_length_errors += IO_EXTRACT(R_REC_COUNTERS, oversize, r); | |
1524 | } | |
1525 | ||
1526 | static void | |
1527 | update_tx_stats(struct net_device_stats *es) | |
1528 | { | |
1529 | unsigned long r = *R_TR_COUNTERS; | |
1530 | /* update stats relevant to transmission errors */ | |
1531 | es->collisions += | |
1532 | IO_EXTRACT(R_TR_COUNTERS, single_col, r) + | |
1533 | IO_EXTRACT(R_TR_COUNTERS, multiple_col, r); | |
1da177e4 LT |
1534 | } |
1535 | ||
1536 | /* | |
1537 | * Get the current statistics. | |
1538 | * This may be called with the card open or closed. | |
1539 | */ | |
1540 | static struct net_device_stats * | |
1541 | e100_get_stats(struct net_device *dev) | |
1542 | { | |
bafef0ae | 1543 | struct net_local *lp = netdev_priv(dev); |
1da177e4 | 1544 | unsigned long flags; |
bafef0ae | 1545 | |
1da177e4 LT |
1546 | spin_lock_irqsave(&lp->lock, flags); |
1547 | ||
1548 | update_rx_stats(&lp->stats); | |
1549 | update_tx_stats(&lp->stats); | |
1550 | ||
1551 | spin_unlock_irqrestore(&lp->lock, flags); | |
1552 | return &lp->stats; | |
1553 | } | |
1554 | ||
1555 | /* | |
1556 | * Set or clear the multicast filter for this adaptor. | |
1557 | * num_addrs == -1 Promiscuous mode, receive all packets | |
1558 | * num_addrs == 0 Normal mode, clear multicast list | |
1559 | * num_addrs > 0 Multicast mode, receive normal and MC packets, | |
1560 | * and do best-effort filtering. | |
1561 | */ | |
1562 | static void | |
1563 | set_multicast_list(struct net_device *dev) | |
1564 | { | |
bafef0ae | 1565 | struct net_local *lp = netdev_priv(dev); |
4cd24eaf | 1566 | int num_addr = netdev_mc_count(dev); |
1da177e4 LT |
1567 | unsigned long int lo_bits; |
1568 | unsigned long int hi_bits; | |
bafef0ae | 1569 | |
1da177e4 | 1570 | spin_lock(&lp->lock); |
bafef0ae | 1571 | if (dev->flags & IFF_PROMISC) { |
1da177e4 LT |
1572 | /* promiscuous mode */ |
1573 | lo_bits = 0xfffffffful; | |
1574 | hi_bits = 0xfffffffful; | |
1575 | ||
1576 | /* Enable individual receive */ | |
1577 | SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, receive); | |
1578 | *R_NETWORK_REC_CONFIG = network_rec_config_shadow; | |
1579 | } else if (dev->flags & IFF_ALLMULTI) { | |
1580 | /* enable all multicasts */ | |
1581 | lo_bits = 0xfffffffful; | |
1582 | hi_bits = 0xfffffffful; | |
1583 | ||
1584 | /* Disable individual receive */ | |
1585 | SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, discard); | |
1586 | *R_NETWORK_REC_CONFIG = network_rec_config_shadow; | |
1587 | } else if (num_addr == 0) { | |
1588 | /* Normal, clear the mc list */ | |
1589 | lo_bits = 0x00000000ul; | |
1590 | hi_bits = 0x00000000ul; | |
1591 | ||
1592 | /* Disable individual receive */ | |
1593 | SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, discard); | |
1594 | *R_NETWORK_REC_CONFIG = network_rec_config_shadow; | |
1595 | } else { | |
1596 | /* MC mode, receive normal and MC packets */ | |
1597 | char hash_ix; | |
22bedad3 | 1598 | struct netdev_hw_addr *ha; |
1da177e4 | 1599 | char *baddr; |
bafef0ae | 1600 | |
1da177e4 LT |
1601 | lo_bits = 0x00000000ul; |
1602 | hi_bits = 0x00000000ul; | |
22bedad3 | 1603 | netdev_for_each_mc_addr(ha, dev) { |
1da177e4 LT |
1604 | /* Calculate the hash index for the GA registers */ |
1605 | ||
1606 | hash_ix = 0; | |
22bedad3 | 1607 | baddr = ha->addr; |
1da177e4 LT |
1608 | hash_ix ^= (*baddr) & 0x3f; |
1609 | hash_ix ^= ((*baddr) >> 6) & 0x03; | |
1610 | ++baddr; | |
1611 | hash_ix ^= ((*baddr) << 2) & 0x03c; | |
1612 | hash_ix ^= ((*baddr) >> 4) & 0xf; | |
1613 | ++baddr; | |
1614 | hash_ix ^= ((*baddr) << 4) & 0x30; | |
1615 | hash_ix ^= ((*baddr) >> 2) & 0x3f; | |
1616 | ++baddr; | |
1617 | hash_ix ^= (*baddr) & 0x3f; | |
1618 | hash_ix ^= ((*baddr) >> 6) & 0x03; | |
1619 | ++baddr; | |
1620 | hash_ix ^= ((*baddr) << 2) & 0x03c; | |
1621 | hash_ix ^= ((*baddr) >> 4) & 0xf; | |
1622 | ++baddr; | |
1623 | hash_ix ^= ((*baddr) << 4) & 0x30; | |
1624 | hash_ix ^= ((*baddr) >> 2) & 0x3f; | |
1625 | ||
1626 | hash_ix &= 0x3f; | |
1627 | ||
1628 | if (hash_ix >= 32) { | |
1629 | hi_bits |= (1 << (hash_ix-32)); | |
bafef0ae | 1630 | } else { |
1da177e4 LT |
1631 | lo_bits |= (1 << hash_ix); |
1632 | } | |
1da177e4 LT |
1633 | } |
1634 | /* Disable individual receive */ | |
1635 | SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, discard); | |
1636 | *R_NETWORK_REC_CONFIG = network_rec_config_shadow; | |
1637 | } | |
1638 | *R_NETWORK_GA_0 = lo_bits; | |
1639 | *R_NETWORK_GA_1 = hi_bits; | |
1640 | spin_unlock(&lp->lock); | |
1641 | } | |
1642 | ||
1643 | void | |
bafef0ae | 1644 | e100_hardware_send_packet(struct net_local *np, char *buf, int length) |
1da177e4 LT |
1645 | { |
1646 | D(printk("e100 send pack, buf 0x%x len %d\n", buf, length)); | |
1647 | ||
bafef0ae | 1648 | spin_lock(&np->led_lock); |
1da177e4 LT |
1649 | if (!led_active && time_after(jiffies, led_next_time)) { |
1650 | /* light the network leds depending on the current speed. */ | |
1651 | e100_set_network_leds(NETWORK_ACTIVITY); | |
1652 | ||
1653 | /* Set the earliest time we may clear the LED */ | |
1654 | led_next_time = jiffies + NET_FLASH_TIME; | |
1655 | led_active = 1; | |
1656 | mod_timer(&clear_led_timer, jiffies + HZ/10); | |
1657 | } | |
bafef0ae | 1658 | spin_unlock(&np->led_lock); |
1da177e4 LT |
1659 | |
1660 | /* configure the tx dma descriptor */ | |
1661 | myNextTxDesc->descr.sw_len = length; | |
1662 | myNextTxDesc->descr.ctrl = d_eop | d_eol | d_wait; | |
1663 | myNextTxDesc->descr.buf = virt_to_phys(buf); | |
1664 | ||
1665 | /* Move end of list */ | |
1666 | myLastTxDesc->descr.ctrl &= ~d_eol; | |
1667 | myLastTxDesc = myNextTxDesc; | |
1668 | ||
1669 | /* Restart DMA channel */ | |
1670 | *R_DMA_CH0_CMD = IO_STATE(R_DMA_CH0_CMD, cmd, restart); | |
1671 | } | |
1672 | ||
1673 | static void | |
1674 | e100_clear_network_leds(unsigned long dummy) | |
1675 | { | |
bafef0ae JN |
1676 | struct net_device *dev = (struct net_device *)dummy; |
1677 | struct net_local *np = netdev_priv(dev); | |
1678 | ||
1679 | spin_lock(&np->led_lock); | |
1680 | ||
1da177e4 LT |
1681 | if (led_active && time_after(jiffies, led_next_time)) { |
1682 | e100_set_network_leds(NO_NETWORK_ACTIVITY); | |
1683 | ||
1684 | /* Set the earliest time we may set the LED */ | |
1685 | led_next_time = jiffies + NET_FLASH_PAUSE; | |
1686 | led_active = 0; | |
1687 | } | |
bafef0ae JN |
1688 | |
1689 | spin_unlock(&np->led_lock); | |
1da177e4 LT |
1690 | } |
1691 | ||
1692 | static void | |
1693 | e100_set_network_leds(int active) | |
1694 | { | |
1695 | #if defined(CONFIG_ETRAX_NETWORK_LED_ON_WHEN_LINK) | |
1696 | int light_leds = (active == NO_NETWORK_ACTIVITY); | |
1697 | #elif defined(CONFIG_ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY) | |
1698 | int light_leds = (active == NETWORK_ACTIVITY); | |
1699 | #else | |
1700 | #error "Define either CONFIG_ETRAX_NETWORK_LED_ON_WHEN_LINK or CONFIG_ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY" | |
1701 | #endif | |
1702 | ||
1703 | if (!current_speed) { | |
1704 | /* Make LED red, link is down */ | |
1705 | #if defined(CONFIG_ETRAX_NETWORK_RED_ON_NO_CONNECTION) | |
5efa1d1c | 1706 | CRIS_LED_NETWORK_SET(CRIS_LED_RED); |
1da177e4 | 1707 | #else |
5efa1d1c | 1708 | CRIS_LED_NETWORK_SET(CRIS_LED_OFF); |
1da177e4 | 1709 | #endif |
bafef0ae | 1710 | } else if (light_leds) { |
1da177e4 | 1711 | if (current_speed == 10) { |
5efa1d1c | 1712 | CRIS_LED_NETWORK_SET(CRIS_LED_ORANGE); |
1da177e4 | 1713 | } else { |
5efa1d1c | 1714 | CRIS_LED_NETWORK_SET(CRIS_LED_GREEN); |
1da177e4 | 1715 | } |
bafef0ae | 1716 | } else { |
5efa1d1c | 1717 | CRIS_LED_NETWORK_SET(CRIS_LED_OFF); |
1da177e4 LT |
1718 | } |
1719 | } | |
1720 | ||
bafef0ae JN |
1721 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1722 | static void | |
1723 | e100_netpoll(struct net_device* netdev) | |
1724 | { | |
1725 | e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev, NULL); | |
1726 | } | |
1727 | #endif | |
1728 | ||
1da177e4 LT |
1729 | static int |
1730 | etrax_init_module(void) | |
1731 | { | |
1732 | return etrax_ethernet_init(); | |
1733 | } | |
1734 | ||
1735 | static int __init | |
1736 | e100_boot_setup(char* str) | |
1737 | { | |
1738 | struct sockaddr sa = {0}; | |
1739 | int i; | |
1740 | ||
1741 | /* Parse the colon separated Ethernet station address */ | |
1742 | for (i = 0; i < ETH_ALEN; i++) { | |
1743 | unsigned int tmp; | |
1744 | if (sscanf(str + 3*i, "%2x", &tmp) != 1) { | |
1745 | printk(KERN_WARNING "Malformed station address"); | |
1746 | return 0; | |
1747 | } | |
1748 | sa.sa_data[i] = (char)tmp; | |
1749 | } | |
1750 | ||
1751 | default_mac = sa; | |
1752 | return 1; | |
1753 | } | |
1754 | ||
1755 | __setup("etrax100_eth=", e100_boot_setup); | |
1756 | ||
1757 | module_init(etrax_init_module); |