Merge remote-tracking branch 'nios2/for-next'
[deliverable/linux.git] / drivers / net / ethernet / nxp / lpc_eth.c
1 /*
2 * drivers/net/ethernet/nxp/lpc_eth.c
3 *
4 * Author: Kevin Wells <kevin.wells@nxp.com>
5 *
6 * Copyright (C) 2010 NXP Semiconductors
7 * Copyright (C) 2012 Roland Stigge <stigge@antcom.de>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/interrupt.h>
28 #include <linux/errno.h>
29 #include <linux/ioport.h>
30 #include <linux/crc32.h>
31 #include <linux/platform_device.h>
32 #include <linux/spinlock.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/clk.h>
36 #include <linux/workqueue.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/skbuff.h>
40 #include <linux/phy.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/of.h>
43 #include <linux/of_net.h>
44 #include <linux/types.h>
45
46 #include <linux/io.h>
47 #include <mach/board.h>
48 #include <mach/platform.h>
49 #include <mach/hardware.h>
50
51 #define MODNAME "lpc-eth"
52 #define DRV_VERSION "1.00"
53
54 #define ENET_MAXF_SIZE 1536
55 #define ENET_RX_DESC 48
56 #define ENET_TX_DESC 16
57
58 #define NAPI_WEIGHT 16
59
60 /*
61 * Ethernet MAC controller Register offsets
62 */
63 #define LPC_ENET_MAC1(x) (x + 0x000)
64 #define LPC_ENET_MAC2(x) (x + 0x004)
65 #define LPC_ENET_IPGT(x) (x + 0x008)
66 #define LPC_ENET_IPGR(x) (x + 0x00C)
67 #define LPC_ENET_CLRT(x) (x + 0x010)
68 #define LPC_ENET_MAXF(x) (x + 0x014)
69 #define LPC_ENET_SUPP(x) (x + 0x018)
70 #define LPC_ENET_TEST(x) (x + 0x01C)
71 #define LPC_ENET_MCFG(x) (x + 0x020)
72 #define LPC_ENET_MCMD(x) (x + 0x024)
73 #define LPC_ENET_MADR(x) (x + 0x028)
74 #define LPC_ENET_MWTD(x) (x + 0x02C)
75 #define LPC_ENET_MRDD(x) (x + 0x030)
76 #define LPC_ENET_MIND(x) (x + 0x034)
77 #define LPC_ENET_SA0(x) (x + 0x040)
78 #define LPC_ENET_SA1(x) (x + 0x044)
79 #define LPC_ENET_SA2(x) (x + 0x048)
80 #define LPC_ENET_COMMAND(x) (x + 0x100)
81 #define LPC_ENET_STATUS(x) (x + 0x104)
82 #define LPC_ENET_RXDESCRIPTOR(x) (x + 0x108)
83 #define LPC_ENET_RXSTATUS(x) (x + 0x10C)
84 #define LPC_ENET_RXDESCRIPTORNUMBER(x) (x + 0x110)
85 #define LPC_ENET_RXPRODUCEINDEX(x) (x + 0x114)
86 #define LPC_ENET_RXCONSUMEINDEX(x) (x + 0x118)
87 #define LPC_ENET_TXDESCRIPTOR(x) (x + 0x11C)
88 #define LPC_ENET_TXSTATUS(x) (x + 0x120)
89 #define LPC_ENET_TXDESCRIPTORNUMBER(x) (x + 0x124)
90 #define LPC_ENET_TXPRODUCEINDEX(x) (x + 0x128)
91 #define LPC_ENET_TXCONSUMEINDEX(x) (x + 0x12C)
92 #define LPC_ENET_TSV0(x) (x + 0x158)
93 #define LPC_ENET_TSV1(x) (x + 0x15C)
94 #define LPC_ENET_RSV(x) (x + 0x160)
95 #define LPC_ENET_FLOWCONTROLCOUNTER(x) (x + 0x170)
96 #define LPC_ENET_FLOWCONTROLSTATUS(x) (x + 0x174)
97 #define LPC_ENET_RXFILTER_CTRL(x) (x + 0x200)
98 #define LPC_ENET_RXFILTERWOLSTATUS(x) (x + 0x204)
99 #define LPC_ENET_RXFILTERWOLCLEAR(x) (x + 0x208)
100 #define LPC_ENET_HASHFILTERL(x) (x + 0x210)
101 #define LPC_ENET_HASHFILTERH(x) (x + 0x214)
102 #define LPC_ENET_INTSTATUS(x) (x + 0xFE0)
103 #define LPC_ENET_INTENABLE(x) (x + 0xFE4)
104 #define LPC_ENET_INTCLEAR(x) (x + 0xFE8)
105 #define LPC_ENET_INTSET(x) (x + 0xFEC)
106 #define LPC_ENET_POWERDOWN(x) (x + 0xFF4)
107
108 /*
109 * mac1 register definitions
110 */
111 #define LPC_MAC1_RECV_ENABLE (1 << 0)
112 #define LPC_MAC1_PASS_ALL_RX_FRAMES (1 << 1)
113 #define LPC_MAC1_RX_FLOW_CONTROL (1 << 2)
114 #define LPC_MAC1_TX_FLOW_CONTROL (1 << 3)
115 #define LPC_MAC1_LOOPBACK (1 << 4)
116 #define LPC_MAC1_RESET_TX (1 << 8)
117 #define LPC_MAC1_RESET_MCS_TX (1 << 9)
118 #define LPC_MAC1_RESET_RX (1 << 10)
119 #define LPC_MAC1_RESET_MCS_RX (1 << 11)
120 #define LPC_MAC1_SIMULATION_RESET (1 << 14)
121 #define LPC_MAC1_SOFT_RESET (1 << 15)
122
123 /*
124 * mac2 register definitions
125 */
126 #define LPC_MAC2_FULL_DUPLEX (1 << 0)
127 #define LPC_MAC2_FRAME_LENGTH_CHECKING (1 << 1)
128 #define LPC_MAC2_HUGH_LENGTH_CHECKING (1 << 2)
129 #define LPC_MAC2_DELAYED_CRC (1 << 3)
130 #define LPC_MAC2_CRC_ENABLE (1 << 4)
131 #define LPC_MAC2_PAD_CRC_ENABLE (1 << 5)
132 #define LPC_MAC2_VLAN_PAD_ENABLE (1 << 6)
133 #define LPC_MAC2_AUTO_DETECT_PAD_ENABLE (1 << 7)
134 #define LPC_MAC2_PURE_PREAMBLE_ENFORCEMENT (1 << 8)
135 #define LPC_MAC2_LONG_PREAMBLE_ENFORCEMENT (1 << 9)
136 #define LPC_MAC2_NO_BACKOFF (1 << 12)
137 #define LPC_MAC2_BACK_PRESSURE (1 << 13)
138 #define LPC_MAC2_EXCESS_DEFER (1 << 14)
139
140 /*
141 * ipgt register definitions
142 */
143 #define LPC_IPGT_LOAD(n) ((n) & 0x7F)
144
145 /*
146 * ipgr register definitions
147 */
148 #define LPC_IPGR_LOAD_PART2(n) ((n) & 0x7F)
149 #define LPC_IPGR_LOAD_PART1(n) (((n) & 0x7F) << 8)
150
151 /*
152 * clrt register definitions
153 */
154 #define LPC_CLRT_LOAD_RETRY_MAX(n) ((n) & 0xF)
155 #define LPC_CLRT_LOAD_COLLISION_WINDOW(n) (((n) & 0x3F) << 8)
156
157 /*
158 * maxf register definitions
159 */
160 #define LPC_MAXF_LOAD_MAX_FRAME_LEN(n) ((n) & 0xFFFF)
161
162 /*
163 * supp register definitions
164 */
165 #define LPC_SUPP_SPEED (1 << 8)
166 #define LPC_SUPP_RESET_RMII (1 << 11)
167
168 /*
169 * test register definitions
170 */
171 #define LPC_TEST_SHORTCUT_PAUSE_QUANTA (1 << 0)
172 #define LPC_TEST_PAUSE (1 << 1)
173 #define LPC_TEST_BACKPRESSURE (1 << 2)
174
175 /*
176 * mcfg register definitions
177 */
178 #define LPC_MCFG_SCAN_INCREMENT (1 << 0)
179 #define LPC_MCFG_SUPPRESS_PREAMBLE (1 << 1)
180 #define LPC_MCFG_CLOCK_SELECT(n) (((n) & 0x7) << 2)
181 #define LPC_MCFG_CLOCK_HOST_DIV_4 0
182 #define LPC_MCFG_CLOCK_HOST_DIV_6 2
183 #define LPC_MCFG_CLOCK_HOST_DIV_8 3
184 #define LPC_MCFG_CLOCK_HOST_DIV_10 4
185 #define LPC_MCFG_CLOCK_HOST_DIV_14 5
186 #define LPC_MCFG_CLOCK_HOST_DIV_20 6
187 #define LPC_MCFG_CLOCK_HOST_DIV_28 7
188 #define LPC_MCFG_RESET_MII_MGMT (1 << 15)
189
190 /*
191 * mcmd register definitions
192 */
193 #define LPC_MCMD_READ (1 << 0)
194 #define LPC_MCMD_SCAN (1 << 1)
195
196 /*
197 * madr register definitions
198 */
199 #define LPC_MADR_REGISTER_ADDRESS(n) ((n) & 0x1F)
200 #define LPC_MADR_PHY_0ADDRESS(n) (((n) & 0x1F) << 8)
201
202 /*
203 * mwtd register definitions
204 */
205 #define LPC_MWDT_WRITE(n) ((n) & 0xFFFF)
206
207 /*
208 * mrdd register definitions
209 */
210 #define LPC_MRDD_READ_MASK 0xFFFF
211
212 /*
213 * mind register definitions
214 */
215 #define LPC_MIND_BUSY (1 << 0)
216 #define LPC_MIND_SCANNING (1 << 1)
217 #define LPC_MIND_NOT_VALID (1 << 2)
218 #define LPC_MIND_MII_LINK_FAIL (1 << 3)
219
220 /*
221 * command register definitions
222 */
223 #define LPC_COMMAND_RXENABLE (1 << 0)
224 #define LPC_COMMAND_TXENABLE (1 << 1)
225 #define LPC_COMMAND_REG_RESET (1 << 3)
226 #define LPC_COMMAND_TXRESET (1 << 4)
227 #define LPC_COMMAND_RXRESET (1 << 5)
228 #define LPC_COMMAND_PASSRUNTFRAME (1 << 6)
229 #define LPC_COMMAND_PASSRXFILTER (1 << 7)
230 #define LPC_COMMAND_TXFLOWCONTROL (1 << 8)
231 #define LPC_COMMAND_RMII (1 << 9)
232 #define LPC_COMMAND_FULLDUPLEX (1 << 10)
233
234 /*
235 * status register definitions
236 */
237 #define LPC_STATUS_RXACTIVE (1 << 0)
238 #define LPC_STATUS_TXACTIVE (1 << 1)
239
240 /*
241 * tsv0 register definitions
242 */
243 #define LPC_TSV0_CRC_ERROR (1 << 0)
244 #define LPC_TSV0_LENGTH_CHECK_ERROR (1 << 1)
245 #define LPC_TSV0_LENGTH_OUT_OF_RANGE (1 << 2)
246 #define LPC_TSV0_DONE (1 << 3)
247 #define LPC_TSV0_MULTICAST (1 << 4)
248 #define LPC_TSV0_BROADCAST (1 << 5)
249 #define LPC_TSV0_PACKET_DEFER (1 << 6)
250 #define LPC_TSV0_ESCESSIVE_DEFER (1 << 7)
251 #define LPC_TSV0_ESCESSIVE_COLLISION (1 << 8)
252 #define LPC_TSV0_LATE_COLLISION (1 << 9)
253 #define LPC_TSV0_GIANT (1 << 10)
254 #define LPC_TSV0_UNDERRUN (1 << 11)
255 #define LPC_TSV0_TOTAL_BYTES(n) (((n) >> 12) & 0xFFFF)
256 #define LPC_TSV0_CONTROL_FRAME (1 << 28)
257 #define LPC_TSV0_PAUSE (1 << 29)
258 #define LPC_TSV0_BACKPRESSURE (1 << 30)
259 #define LPC_TSV0_VLAN (1 << 31)
260
261 /*
262 * tsv1 register definitions
263 */
264 #define LPC_TSV1_TRANSMIT_BYTE_COUNT(n) ((n) & 0xFFFF)
265 #define LPC_TSV1_COLLISION_COUNT(n) (((n) >> 16) & 0xF)
266
267 /*
268 * rsv register definitions
269 */
270 #define LPC_RSV_RECEIVED_BYTE_COUNT(n) ((n) & 0xFFFF)
271 #define LPC_RSV_RXDV_EVENT_IGNORED (1 << 16)
272 #define LPC_RSV_RXDV_EVENT_PREVIOUSLY_SEEN (1 << 17)
273 #define LPC_RSV_CARRIER_EVNT_PREVIOUS_SEEN (1 << 18)
274 #define LPC_RSV_RECEIVE_CODE_VIOLATION (1 << 19)
275 #define LPC_RSV_CRC_ERROR (1 << 20)
276 #define LPC_RSV_LENGTH_CHECK_ERROR (1 << 21)
277 #define LPC_RSV_LENGTH_OUT_OF_RANGE (1 << 22)
278 #define LPC_RSV_RECEIVE_OK (1 << 23)
279 #define LPC_RSV_MULTICAST (1 << 24)
280 #define LPC_RSV_BROADCAST (1 << 25)
281 #define LPC_RSV_DRIBBLE_NIBBLE (1 << 26)
282 #define LPC_RSV_CONTROL_FRAME (1 << 27)
283 #define LPC_RSV_PAUSE (1 << 28)
284 #define LPC_RSV_UNSUPPORTED_OPCODE (1 << 29)
285 #define LPC_RSV_VLAN (1 << 30)
286
287 /*
288 * flowcontrolcounter register definitions
289 */
290 #define LPC_FCCR_MIRRORCOUNTER(n) ((n) & 0xFFFF)
291 #define LPC_FCCR_PAUSETIMER(n) (((n) >> 16) & 0xFFFF)
292
293 /*
294 * flowcontrolstatus register definitions
295 */
296 #define LPC_FCCR_MIRRORCOUNTERCURRENT(n) ((n) & 0xFFFF)
297
298 /*
299 * rxfliterctrl, rxfilterwolstatus, and rxfilterwolclear shared
300 * register definitions
301 */
302 #define LPC_RXFLTRW_ACCEPTUNICAST (1 << 0)
303 #define LPC_RXFLTRW_ACCEPTUBROADCAST (1 << 1)
304 #define LPC_RXFLTRW_ACCEPTUMULTICAST (1 << 2)
305 #define LPC_RXFLTRW_ACCEPTUNICASTHASH (1 << 3)
306 #define LPC_RXFLTRW_ACCEPTUMULTICASTHASH (1 << 4)
307 #define LPC_RXFLTRW_ACCEPTPERFECT (1 << 5)
308
309 /*
310 * rxfliterctrl register definitions
311 */
312 #define LPC_RXFLTRWSTS_MAGICPACKETENWOL (1 << 12)
313 #define LPC_RXFLTRWSTS_RXFILTERENWOL (1 << 13)
314
315 /*
316 * rxfilterwolstatus/rxfilterwolclear register definitions
317 */
318 #define LPC_RXFLTRWSTS_RXFILTERWOL (1 << 7)
319 #define LPC_RXFLTRWSTS_MAGICPACKETWOL (1 << 8)
320
321 /*
322 * intstatus, intenable, intclear, and Intset shared register
323 * definitions
324 */
325 #define LPC_MACINT_RXOVERRUNINTEN (1 << 0)
326 #define LPC_MACINT_RXERRORONINT (1 << 1)
327 #define LPC_MACINT_RXFINISHEDINTEN (1 << 2)
328 #define LPC_MACINT_RXDONEINTEN (1 << 3)
329 #define LPC_MACINT_TXUNDERRUNINTEN (1 << 4)
330 #define LPC_MACINT_TXERRORINTEN (1 << 5)
331 #define LPC_MACINT_TXFINISHEDINTEN (1 << 6)
332 #define LPC_MACINT_TXDONEINTEN (1 << 7)
333 #define LPC_MACINT_SOFTINTEN (1 << 12)
334 #define LPC_MACINT_WAKEUPINTEN (1 << 13)
335
336 /*
337 * powerdown register definitions
338 */
339 #define LPC_POWERDOWN_MACAHB (1 << 31)
340
341 static phy_interface_t lpc_phy_interface_mode(struct device *dev)
342 {
343 if (dev && dev->of_node) {
344 const char *mode = of_get_property(dev->of_node,
345 "phy-mode", NULL);
346 if (mode && !strcmp(mode, "mii"))
347 return PHY_INTERFACE_MODE_MII;
348 }
349 return PHY_INTERFACE_MODE_RMII;
350 }
351
352 static bool use_iram_for_net(struct device *dev)
353 {
354 if (dev && dev->of_node)
355 return of_property_read_bool(dev->of_node, "use-iram");
356 return false;
357 }
358
359 /* Receive Status information word */
360 #define RXSTATUS_SIZE 0x000007FF
361 #define RXSTATUS_CONTROL (1 << 18)
362 #define RXSTATUS_VLAN (1 << 19)
363 #define RXSTATUS_FILTER (1 << 20)
364 #define RXSTATUS_MULTICAST (1 << 21)
365 #define RXSTATUS_BROADCAST (1 << 22)
366 #define RXSTATUS_CRC (1 << 23)
367 #define RXSTATUS_SYMBOL (1 << 24)
368 #define RXSTATUS_LENGTH (1 << 25)
369 #define RXSTATUS_RANGE (1 << 26)
370 #define RXSTATUS_ALIGN (1 << 27)
371 #define RXSTATUS_OVERRUN (1 << 28)
372 #define RXSTATUS_NODESC (1 << 29)
373 #define RXSTATUS_LAST (1 << 30)
374 #define RXSTATUS_ERROR (1 << 31)
375
376 #define RXSTATUS_STATUS_ERROR \
377 (RXSTATUS_NODESC | RXSTATUS_OVERRUN | RXSTATUS_ALIGN | \
378 RXSTATUS_RANGE | RXSTATUS_LENGTH | RXSTATUS_SYMBOL | RXSTATUS_CRC)
379
380 /* Receive Descriptor control word */
381 #define RXDESC_CONTROL_SIZE 0x000007FF
382 #define RXDESC_CONTROL_INT (1 << 31)
383
384 /* Transmit Status information word */
385 #define TXSTATUS_COLLISIONS_GET(x) (((x) >> 21) & 0xF)
386 #define TXSTATUS_DEFER (1 << 25)
387 #define TXSTATUS_EXCESSDEFER (1 << 26)
388 #define TXSTATUS_EXCESSCOLL (1 << 27)
389 #define TXSTATUS_LATECOLL (1 << 28)
390 #define TXSTATUS_UNDERRUN (1 << 29)
391 #define TXSTATUS_NODESC (1 << 30)
392 #define TXSTATUS_ERROR (1 << 31)
393
394 /* Transmit Descriptor control word */
395 #define TXDESC_CONTROL_SIZE 0x000007FF
396 #define TXDESC_CONTROL_OVERRIDE (1 << 26)
397 #define TXDESC_CONTROL_HUGE (1 << 27)
398 #define TXDESC_CONTROL_PAD (1 << 28)
399 #define TXDESC_CONTROL_CRC (1 << 29)
400 #define TXDESC_CONTROL_LAST (1 << 30)
401 #define TXDESC_CONTROL_INT (1 << 31)
402
403 /*
404 * Structure of a TX/RX descriptors and RX status
405 */
406 struct txrx_desc_t {
407 __le32 packet;
408 __le32 control;
409 };
410 struct rx_status_t {
411 __le32 statusinfo;
412 __le32 statushashcrc;
413 };
414
415 /*
416 * Device driver data structure
417 */
418 struct netdata_local {
419 struct platform_device *pdev;
420 struct net_device *ndev;
421 spinlock_t lock;
422 void __iomem *net_base;
423 u32 msg_enable;
424 unsigned int skblen[ENET_TX_DESC];
425 unsigned int last_tx_idx;
426 unsigned int num_used_tx_buffs;
427 struct mii_bus *mii_bus;
428 struct clk *clk;
429 dma_addr_t dma_buff_base_p;
430 void *dma_buff_base_v;
431 size_t dma_buff_size;
432 struct txrx_desc_t *tx_desc_v;
433 u32 *tx_stat_v;
434 void *tx_buff_v;
435 struct txrx_desc_t *rx_desc_v;
436 struct rx_status_t *rx_stat_v;
437 void *rx_buff_v;
438 int link;
439 int speed;
440 int duplex;
441 struct napi_struct napi;
442 };
443
444 /*
445 * MAC support functions
446 */
447 static void __lpc_set_mac(struct netdata_local *pldat, u8 *mac)
448 {
449 u32 tmp;
450
451 /* Set station address */
452 tmp = mac[0] | ((u32)mac[1] << 8);
453 writel(tmp, LPC_ENET_SA2(pldat->net_base));
454 tmp = mac[2] | ((u32)mac[3] << 8);
455 writel(tmp, LPC_ENET_SA1(pldat->net_base));
456 tmp = mac[4] | ((u32)mac[5] << 8);
457 writel(tmp, LPC_ENET_SA0(pldat->net_base));
458
459 netdev_dbg(pldat->ndev, "Ethernet MAC address %pM\n", mac);
460 }
461
462 static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac)
463 {
464 u32 tmp;
465
466 /* Get station address */
467 tmp = readl(LPC_ENET_SA2(pldat->net_base));
468 mac[0] = tmp & 0xFF;
469 mac[1] = tmp >> 8;
470 tmp = readl(LPC_ENET_SA1(pldat->net_base));
471 mac[2] = tmp & 0xFF;
472 mac[3] = tmp >> 8;
473 tmp = readl(LPC_ENET_SA0(pldat->net_base));
474 mac[4] = tmp & 0xFF;
475 mac[5] = tmp >> 8;
476 }
477
478 static void __lpc_params_setup(struct netdata_local *pldat)
479 {
480 u32 tmp;
481
482 if (pldat->duplex == DUPLEX_FULL) {
483 tmp = readl(LPC_ENET_MAC2(pldat->net_base));
484 tmp |= LPC_MAC2_FULL_DUPLEX;
485 writel(tmp, LPC_ENET_MAC2(pldat->net_base));
486 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
487 tmp |= LPC_COMMAND_FULLDUPLEX;
488 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
489 writel(LPC_IPGT_LOAD(0x15), LPC_ENET_IPGT(pldat->net_base));
490 } else {
491 tmp = readl(LPC_ENET_MAC2(pldat->net_base));
492 tmp &= ~LPC_MAC2_FULL_DUPLEX;
493 writel(tmp, LPC_ENET_MAC2(pldat->net_base));
494 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
495 tmp &= ~LPC_COMMAND_FULLDUPLEX;
496 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
497 writel(LPC_IPGT_LOAD(0x12), LPC_ENET_IPGT(pldat->net_base));
498 }
499
500 if (pldat->speed == SPEED_100)
501 writel(LPC_SUPP_SPEED, LPC_ENET_SUPP(pldat->net_base));
502 else
503 writel(0, LPC_ENET_SUPP(pldat->net_base));
504 }
505
506 static void __lpc_eth_reset(struct netdata_local *pldat)
507 {
508 /* Reset all MAC logic */
509 writel((LPC_MAC1_RESET_TX | LPC_MAC1_RESET_MCS_TX | LPC_MAC1_RESET_RX |
510 LPC_MAC1_RESET_MCS_RX | LPC_MAC1_SIMULATION_RESET |
511 LPC_MAC1_SOFT_RESET), LPC_ENET_MAC1(pldat->net_base));
512 writel((LPC_COMMAND_REG_RESET | LPC_COMMAND_TXRESET |
513 LPC_COMMAND_RXRESET), LPC_ENET_COMMAND(pldat->net_base));
514 }
515
516 static int __lpc_mii_mngt_reset(struct netdata_local *pldat)
517 {
518 /* Reset MII management hardware */
519 writel(LPC_MCFG_RESET_MII_MGMT, LPC_ENET_MCFG(pldat->net_base));
520
521 /* Setup MII clock to slowest rate with a /28 divider */
522 writel(LPC_MCFG_CLOCK_SELECT(LPC_MCFG_CLOCK_HOST_DIV_28),
523 LPC_ENET_MCFG(pldat->net_base));
524
525 return 0;
526 }
527
528 static inline phys_addr_t __va_to_pa(void *addr, struct netdata_local *pldat)
529 {
530 phys_addr_t phaddr;
531
532 phaddr = addr - pldat->dma_buff_base_v;
533 phaddr += pldat->dma_buff_base_p;
534
535 return phaddr;
536 }
537
538 static void lpc_eth_enable_int(void __iomem *regbase)
539 {
540 writel((LPC_MACINT_RXDONEINTEN | LPC_MACINT_TXDONEINTEN),
541 LPC_ENET_INTENABLE(regbase));
542 }
543
544 static void lpc_eth_disable_int(void __iomem *regbase)
545 {
546 writel(0, LPC_ENET_INTENABLE(regbase));
547 }
548
549 /* Setup TX/RX descriptors */
550 static void __lpc_txrx_desc_setup(struct netdata_local *pldat)
551 {
552 u32 *ptxstat;
553 void *tbuff;
554 int i;
555 struct txrx_desc_t *ptxrxdesc;
556 struct rx_status_t *prxstat;
557
558 tbuff = PTR_ALIGN(pldat->dma_buff_base_v, 16);
559
560 /* Setup TX descriptors, status, and buffers */
561 pldat->tx_desc_v = tbuff;
562 tbuff += sizeof(struct txrx_desc_t) * ENET_TX_DESC;
563
564 pldat->tx_stat_v = tbuff;
565 tbuff += sizeof(u32) * ENET_TX_DESC;
566
567 tbuff = PTR_ALIGN(tbuff, 16);
568 pldat->tx_buff_v = tbuff;
569 tbuff += ENET_MAXF_SIZE * ENET_TX_DESC;
570
571 /* Setup RX descriptors, status, and buffers */
572 pldat->rx_desc_v = tbuff;
573 tbuff += sizeof(struct txrx_desc_t) * ENET_RX_DESC;
574
575 tbuff = PTR_ALIGN(tbuff, 16);
576 pldat->rx_stat_v = tbuff;
577 tbuff += sizeof(struct rx_status_t) * ENET_RX_DESC;
578
579 tbuff = PTR_ALIGN(tbuff, 16);
580 pldat->rx_buff_v = tbuff;
581 tbuff += ENET_MAXF_SIZE * ENET_RX_DESC;
582
583 /* Map the TX descriptors to the TX buffers in hardware */
584 for (i = 0; i < ENET_TX_DESC; i++) {
585 ptxstat = &pldat->tx_stat_v[i];
586 ptxrxdesc = &pldat->tx_desc_v[i];
587
588 ptxrxdesc->packet = __va_to_pa(
589 pldat->tx_buff_v + i * ENET_MAXF_SIZE, pldat);
590 ptxrxdesc->control = 0;
591 *ptxstat = 0;
592 }
593
594 /* Map the RX descriptors to the RX buffers in hardware */
595 for (i = 0; i < ENET_RX_DESC; i++) {
596 prxstat = &pldat->rx_stat_v[i];
597 ptxrxdesc = &pldat->rx_desc_v[i];
598
599 ptxrxdesc->packet = __va_to_pa(
600 pldat->rx_buff_v + i * ENET_MAXF_SIZE, pldat);
601 ptxrxdesc->control = RXDESC_CONTROL_INT | (ENET_MAXF_SIZE - 1);
602 prxstat->statusinfo = 0;
603 prxstat->statushashcrc = 0;
604 }
605
606 /* Setup base addresses in hardware to point to buffers and
607 * descriptors
608 */
609 writel((ENET_TX_DESC - 1),
610 LPC_ENET_TXDESCRIPTORNUMBER(pldat->net_base));
611 writel(__va_to_pa(pldat->tx_desc_v, pldat),
612 LPC_ENET_TXDESCRIPTOR(pldat->net_base));
613 writel(__va_to_pa(pldat->tx_stat_v, pldat),
614 LPC_ENET_TXSTATUS(pldat->net_base));
615 writel((ENET_RX_DESC - 1),
616 LPC_ENET_RXDESCRIPTORNUMBER(pldat->net_base));
617 writel(__va_to_pa(pldat->rx_desc_v, pldat),
618 LPC_ENET_RXDESCRIPTOR(pldat->net_base));
619 writel(__va_to_pa(pldat->rx_stat_v, pldat),
620 LPC_ENET_RXSTATUS(pldat->net_base));
621 }
622
623 static void __lpc_eth_init(struct netdata_local *pldat)
624 {
625 u32 tmp;
626
627 /* Disable controller and reset */
628 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
629 tmp &= ~LPC_COMMAND_RXENABLE | LPC_COMMAND_TXENABLE;
630 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
631 tmp = readl(LPC_ENET_MAC1(pldat->net_base));
632 tmp &= ~LPC_MAC1_RECV_ENABLE;
633 writel(tmp, LPC_ENET_MAC1(pldat->net_base));
634
635 /* Initial MAC setup */
636 writel(LPC_MAC1_PASS_ALL_RX_FRAMES, LPC_ENET_MAC1(pldat->net_base));
637 writel((LPC_MAC2_PAD_CRC_ENABLE | LPC_MAC2_CRC_ENABLE),
638 LPC_ENET_MAC2(pldat->net_base));
639 writel(ENET_MAXF_SIZE, LPC_ENET_MAXF(pldat->net_base));
640
641 /* Collision window, gap */
642 writel((LPC_CLRT_LOAD_RETRY_MAX(0xF) |
643 LPC_CLRT_LOAD_COLLISION_WINDOW(0x37)),
644 LPC_ENET_CLRT(pldat->net_base));
645 writel(LPC_IPGR_LOAD_PART2(0x12), LPC_ENET_IPGR(pldat->net_base));
646
647 if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII)
648 writel(LPC_COMMAND_PASSRUNTFRAME,
649 LPC_ENET_COMMAND(pldat->net_base));
650 else {
651 writel((LPC_COMMAND_PASSRUNTFRAME | LPC_COMMAND_RMII),
652 LPC_ENET_COMMAND(pldat->net_base));
653 writel(LPC_SUPP_RESET_RMII, LPC_ENET_SUPP(pldat->net_base));
654 }
655
656 __lpc_params_setup(pldat);
657
658 /* Setup TX and RX descriptors */
659 __lpc_txrx_desc_setup(pldat);
660
661 /* Setup packet filtering */
662 writel((LPC_RXFLTRW_ACCEPTUBROADCAST | LPC_RXFLTRW_ACCEPTPERFECT),
663 LPC_ENET_RXFILTER_CTRL(pldat->net_base));
664
665 /* Get the next TX buffer output index */
666 pldat->num_used_tx_buffs = 0;
667 pldat->last_tx_idx =
668 readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
669
670 /* Clear and enable interrupts */
671 writel(0xFFFF, LPC_ENET_INTCLEAR(pldat->net_base));
672 smp_wmb();
673 lpc_eth_enable_int(pldat->net_base);
674
675 /* Enable controller */
676 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
677 tmp |= LPC_COMMAND_RXENABLE | LPC_COMMAND_TXENABLE;
678 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
679 tmp = readl(LPC_ENET_MAC1(pldat->net_base));
680 tmp |= LPC_MAC1_RECV_ENABLE;
681 writel(tmp, LPC_ENET_MAC1(pldat->net_base));
682 }
683
684 static void __lpc_eth_shutdown(struct netdata_local *pldat)
685 {
686 /* Reset ethernet and power down PHY */
687 __lpc_eth_reset(pldat);
688 writel(0, LPC_ENET_MAC1(pldat->net_base));
689 writel(0, LPC_ENET_MAC2(pldat->net_base));
690 }
691
692 /*
693 * MAC<--->PHY support functions
694 */
695 static int lpc_mdio_read(struct mii_bus *bus, int phy_id, int phyreg)
696 {
697 struct netdata_local *pldat = bus->priv;
698 unsigned long timeout = jiffies + msecs_to_jiffies(100);
699 int lps;
700
701 writel(((phy_id << 8) | phyreg), LPC_ENET_MADR(pldat->net_base));
702 writel(LPC_MCMD_READ, LPC_ENET_MCMD(pldat->net_base));
703
704 /* Wait for unbusy status */
705 while (readl(LPC_ENET_MIND(pldat->net_base)) & LPC_MIND_BUSY) {
706 if (time_after(jiffies, timeout))
707 return -EIO;
708 cpu_relax();
709 }
710
711 lps = readl(LPC_ENET_MRDD(pldat->net_base));
712 writel(0, LPC_ENET_MCMD(pldat->net_base));
713
714 return lps;
715 }
716
717 static int lpc_mdio_write(struct mii_bus *bus, int phy_id, int phyreg,
718 u16 phydata)
719 {
720 struct netdata_local *pldat = bus->priv;
721 unsigned long timeout = jiffies + msecs_to_jiffies(100);
722
723 writel(((phy_id << 8) | phyreg), LPC_ENET_MADR(pldat->net_base));
724 writel(phydata, LPC_ENET_MWTD(pldat->net_base));
725
726 /* Wait for completion */
727 while (readl(LPC_ENET_MIND(pldat->net_base)) & LPC_MIND_BUSY) {
728 if (time_after(jiffies, timeout))
729 return -EIO;
730 cpu_relax();
731 }
732
733 return 0;
734 }
735
736 static int lpc_mdio_reset(struct mii_bus *bus)
737 {
738 return __lpc_mii_mngt_reset((struct netdata_local *)bus->priv);
739 }
740
741 static void lpc_handle_link_change(struct net_device *ndev)
742 {
743 struct netdata_local *pldat = netdev_priv(ndev);
744 struct phy_device *phydev = ndev->phydev;
745 unsigned long flags;
746
747 bool status_change = false;
748
749 spin_lock_irqsave(&pldat->lock, flags);
750
751 if (phydev->link) {
752 if ((pldat->speed != phydev->speed) ||
753 (pldat->duplex != phydev->duplex)) {
754 pldat->speed = phydev->speed;
755 pldat->duplex = phydev->duplex;
756 status_change = true;
757 }
758 }
759
760 if (phydev->link != pldat->link) {
761 if (!phydev->link) {
762 pldat->speed = 0;
763 pldat->duplex = -1;
764 }
765 pldat->link = phydev->link;
766
767 status_change = true;
768 }
769
770 spin_unlock_irqrestore(&pldat->lock, flags);
771
772 if (status_change)
773 __lpc_params_setup(pldat);
774 }
775
776 static int lpc_mii_probe(struct net_device *ndev)
777 {
778 struct netdata_local *pldat = netdev_priv(ndev);
779 struct phy_device *phydev = phy_find_first(pldat->mii_bus);
780
781 if (!phydev) {
782 netdev_err(ndev, "no PHY found\n");
783 return -ENODEV;
784 }
785
786 /* Attach to the PHY */
787 if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII)
788 netdev_info(ndev, "using MII interface\n");
789 else
790 netdev_info(ndev, "using RMII interface\n");
791 phydev = phy_connect(ndev, phydev_name(phydev),
792 &lpc_handle_link_change,
793 lpc_phy_interface_mode(&pldat->pdev->dev));
794
795 if (IS_ERR(phydev)) {
796 netdev_err(ndev, "Could not attach to PHY\n");
797 return PTR_ERR(phydev);
798 }
799
800 /* mask with MAC supported features */
801 phydev->supported &= PHY_BASIC_FEATURES;
802
803 phydev->advertising = phydev->supported;
804
805 pldat->link = 0;
806 pldat->speed = 0;
807 pldat->duplex = -1;
808
809 phy_attached_info(phydev);
810
811 return 0;
812 }
813
814 static int lpc_mii_init(struct netdata_local *pldat)
815 {
816 int err = -ENXIO;
817
818 pldat->mii_bus = mdiobus_alloc();
819 if (!pldat->mii_bus) {
820 err = -ENOMEM;
821 goto err_out;
822 }
823
824 /* Setup MII mode */
825 if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII)
826 writel(LPC_COMMAND_PASSRUNTFRAME,
827 LPC_ENET_COMMAND(pldat->net_base));
828 else {
829 writel((LPC_COMMAND_PASSRUNTFRAME | LPC_COMMAND_RMII),
830 LPC_ENET_COMMAND(pldat->net_base));
831 writel(LPC_SUPP_RESET_RMII, LPC_ENET_SUPP(pldat->net_base));
832 }
833
834 pldat->mii_bus->name = "lpc_mii_bus";
835 pldat->mii_bus->read = &lpc_mdio_read;
836 pldat->mii_bus->write = &lpc_mdio_write;
837 pldat->mii_bus->reset = &lpc_mdio_reset;
838 snprintf(pldat->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
839 pldat->pdev->name, pldat->pdev->id);
840 pldat->mii_bus->priv = pldat;
841 pldat->mii_bus->parent = &pldat->pdev->dev;
842
843 platform_set_drvdata(pldat->pdev, pldat->mii_bus);
844
845 if (mdiobus_register(pldat->mii_bus))
846 goto err_out_unregister_bus;
847
848 if (lpc_mii_probe(pldat->ndev) != 0)
849 goto err_out_unregister_bus;
850
851 return 0;
852
853 err_out_unregister_bus:
854 mdiobus_unregister(pldat->mii_bus);
855 mdiobus_free(pldat->mii_bus);
856 err_out:
857 return err;
858 }
859
860 static void __lpc_handle_xmit(struct net_device *ndev)
861 {
862 struct netdata_local *pldat = netdev_priv(ndev);
863 u32 txcidx, *ptxstat, txstat;
864
865 txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
866 while (pldat->last_tx_idx != txcidx) {
867 unsigned int skblen = pldat->skblen[pldat->last_tx_idx];
868
869 /* A buffer is available, get buffer status */
870 ptxstat = &pldat->tx_stat_v[pldat->last_tx_idx];
871 txstat = *ptxstat;
872
873 /* Next buffer and decrement used buffer counter */
874 pldat->num_used_tx_buffs--;
875 pldat->last_tx_idx++;
876 if (pldat->last_tx_idx >= ENET_TX_DESC)
877 pldat->last_tx_idx = 0;
878
879 /* Update collision counter */
880 ndev->stats.collisions += TXSTATUS_COLLISIONS_GET(txstat);
881
882 /* Any errors occurred? */
883 if (txstat & TXSTATUS_ERROR) {
884 if (txstat & TXSTATUS_UNDERRUN) {
885 /* FIFO underrun */
886 ndev->stats.tx_fifo_errors++;
887 }
888 if (txstat & TXSTATUS_LATECOLL) {
889 /* Late collision */
890 ndev->stats.tx_aborted_errors++;
891 }
892 if (txstat & TXSTATUS_EXCESSCOLL) {
893 /* Excessive collision */
894 ndev->stats.tx_aborted_errors++;
895 }
896 if (txstat & TXSTATUS_EXCESSDEFER) {
897 /* Defer limit */
898 ndev->stats.tx_aborted_errors++;
899 }
900 ndev->stats.tx_errors++;
901 } else {
902 /* Update stats */
903 ndev->stats.tx_packets++;
904 ndev->stats.tx_bytes += skblen;
905 }
906
907 txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
908 }
909
910 if (pldat->num_used_tx_buffs <= ENET_TX_DESC/2) {
911 if (netif_queue_stopped(ndev))
912 netif_wake_queue(ndev);
913 }
914 }
915
916 static int __lpc_handle_recv(struct net_device *ndev, int budget)
917 {
918 struct netdata_local *pldat = netdev_priv(ndev);
919 struct sk_buff *skb;
920 u32 rxconsidx, len, ethst;
921 struct rx_status_t *prxstat;
922 u8 *prdbuf;
923 int rx_done = 0;
924
925 /* Get the current RX buffer indexes */
926 rxconsidx = readl(LPC_ENET_RXCONSUMEINDEX(pldat->net_base));
927 while (rx_done < budget && rxconsidx !=
928 readl(LPC_ENET_RXPRODUCEINDEX(pldat->net_base))) {
929 /* Get pointer to receive status */
930 prxstat = &pldat->rx_stat_v[rxconsidx];
931 len = (prxstat->statusinfo & RXSTATUS_SIZE) + 1;
932
933 /* Status error? */
934 ethst = prxstat->statusinfo;
935 if ((ethst & (RXSTATUS_ERROR | RXSTATUS_STATUS_ERROR)) ==
936 (RXSTATUS_ERROR | RXSTATUS_RANGE))
937 ethst &= ~RXSTATUS_ERROR;
938
939 if (ethst & RXSTATUS_ERROR) {
940 int si = prxstat->statusinfo;
941 /* Check statuses */
942 if (si & RXSTATUS_OVERRUN) {
943 /* Overrun error */
944 ndev->stats.rx_fifo_errors++;
945 } else if (si & RXSTATUS_CRC) {
946 /* CRC error */
947 ndev->stats.rx_crc_errors++;
948 } else if (si & RXSTATUS_LENGTH) {
949 /* Length error */
950 ndev->stats.rx_length_errors++;
951 } else if (si & RXSTATUS_ERROR) {
952 /* Other error */
953 ndev->stats.rx_length_errors++;
954 }
955 ndev->stats.rx_errors++;
956 } else {
957 /* Packet is good */
958 skb = dev_alloc_skb(len);
959 if (!skb) {
960 ndev->stats.rx_dropped++;
961 } else {
962 prdbuf = skb_put(skb, len);
963
964 /* Copy packet from buffer */
965 memcpy(prdbuf, pldat->rx_buff_v +
966 rxconsidx * ENET_MAXF_SIZE, len);
967
968 /* Pass to upper layer */
969 skb->protocol = eth_type_trans(skb, ndev);
970 netif_receive_skb(skb);
971 ndev->stats.rx_packets++;
972 ndev->stats.rx_bytes += len;
973 }
974 }
975
976 /* Increment consume index */
977 rxconsidx = rxconsidx + 1;
978 if (rxconsidx >= ENET_RX_DESC)
979 rxconsidx = 0;
980 writel(rxconsidx,
981 LPC_ENET_RXCONSUMEINDEX(pldat->net_base));
982 rx_done++;
983 }
984
985 return rx_done;
986 }
987
988 static int lpc_eth_poll(struct napi_struct *napi, int budget)
989 {
990 struct netdata_local *pldat = container_of(napi,
991 struct netdata_local, napi);
992 struct net_device *ndev = pldat->ndev;
993 int rx_done = 0;
994 struct netdev_queue *txq = netdev_get_tx_queue(ndev, 0);
995
996 __netif_tx_lock(txq, smp_processor_id());
997 __lpc_handle_xmit(ndev);
998 __netif_tx_unlock(txq);
999 rx_done = __lpc_handle_recv(ndev, budget);
1000
1001 if (rx_done < budget) {
1002 napi_complete(napi);
1003 lpc_eth_enable_int(pldat->net_base);
1004 }
1005
1006 return rx_done;
1007 }
1008
1009 static irqreturn_t __lpc_eth_interrupt(int irq, void *dev_id)
1010 {
1011 struct net_device *ndev = dev_id;
1012 struct netdata_local *pldat = netdev_priv(ndev);
1013 u32 tmp;
1014
1015 spin_lock(&pldat->lock);
1016
1017 tmp = readl(LPC_ENET_INTSTATUS(pldat->net_base));
1018 /* Clear interrupts */
1019 writel(tmp, LPC_ENET_INTCLEAR(pldat->net_base));
1020
1021 lpc_eth_disable_int(pldat->net_base);
1022 if (likely(napi_schedule_prep(&pldat->napi)))
1023 __napi_schedule(&pldat->napi);
1024
1025 spin_unlock(&pldat->lock);
1026
1027 return IRQ_HANDLED;
1028 }
1029
1030 static int lpc_eth_close(struct net_device *ndev)
1031 {
1032 unsigned long flags;
1033 struct netdata_local *pldat = netdev_priv(ndev);
1034
1035 if (netif_msg_ifdown(pldat))
1036 dev_dbg(&pldat->pdev->dev, "shutting down %s\n", ndev->name);
1037
1038 napi_disable(&pldat->napi);
1039 netif_stop_queue(ndev);
1040
1041 if (ndev->phydev)
1042 phy_stop(ndev->phydev);
1043
1044 spin_lock_irqsave(&pldat->lock, flags);
1045 __lpc_eth_reset(pldat);
1046 netif_carrier_off(ndev);
1047 writel(0, LPC_ENET_MAC1(pldat->net_base));
1048 writel(0, LPC_ENET_MAC2(pldat->net_base));
1049 spin_unlock_irqrestore(&pldat->lock, flags);
1050
1051 clk_disable_unprepare(pldat->clk);
1052
1053 return 0;
1054 }
1055
1056 static int lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1057 {
1058 struct netdata_local *pldat = netdev_priv(ndev);
1059 u32 len, txidx;
1060 u32 *ptxstat;
1061 struct txrx_desc_t *ptxrxdesc;
1062
1063 len = skb->len;
1064
1065 spin_lock_irq(&pldat->lock);
1066
1067 if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1)) {
1068 /* This function should never be called when there are no
1069 buffers */
1070 netif_stop_queue(ndev);
1071 spin_unlock_irq(&pldat->lock);
1072 WARN(1, "BUG! TX request when no free TX buffers!\n");
1073 return NETDEV_TX_BUSY;
1074 }
1075
1076 /* Get the next TX descriptor index */
1077 txidx = readl(LPC_ENET_TXPRODUCEINDEX(pldat->net_base));
1078
1079 /* Setup control for the transfer */
1080 ptxstat = &pldat->tx_stat_v[txidx];
1081 *ptxstat = 0;
1082 ptxrxdesc = &pldat->tx_desc_v[txidx];
1083 ptxrxdesc->control =
1084 (len - 1) | TXDESC_CONTROL_LAST | TXDESC_CONTROL_INT;
1085
1086 /* Copy data to the DMA buffer */
1087 memcpy(pldat->tx_buff_v + txidx * ENET_MAXF_SIZE, skb->data, len);
1088
1089 /* Save the buffer and increment the buffer counter */
1090 pldat->skblen[txidx] = len;
1091 pldat->num_used_tx_buffs++;
1092
1093 /* Start transmit */
1094 txidx++;
1095 if (txidx >= ENET_TX_DESC)
1096 txidx = 0;
1097 writel(txidx, LPC_ENET_TXPRODUCEINDEX(pldat->net_base));
1098
1099 /* Stop queue if no more TX buffers */
1100 if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1))
1101 netif_stop_queue(ndev);
1102
1103 spin_unlock_irq(&pldat->lock);
1104
1105 dev_kfree_skb(skb);
1106 return NETDEV_TX_OK;
1107 }
1108
1109 static int lpc_set_mac_address(struct net_device *ndev, void *p)
1110 {
1111 struct sockaddr *addr = p;
1112 struct netdata_local *pldat = netdev_priv(ndev);
1113 unsigned long flags;
1114
1115 if (!is_valid_ether_addr(addr->sa_data))
1116 return -EADDRNOTAVAIL;
1117 memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN);
1118
1119 spin_lock_irqsave(&pldat->lock, flags);
1120
1121 /* Set station address */
1122 __lpc_set_mac(pldat, ndev->dev_addr);
1123
1124 spin_unlock_irqrestore(&pldat->lock, flags);
1125
1126 return 0;
1127 }
1128
1129 static void lpc_eth_set_multicast_list(struct net_device *ndev)
1130 {
1131 struct netdata_local *pldat = netdev_priv(ndev);
1132 struct netdev_hw_addr_list *mcptr = &ndev->mc;
1133 struct netdev_hw_addr *ha;
1134 u32 tmp32, hash_val, hashlo, hashhi;
1135 unsigned long flags;
1136
1137 spin_lock_irqsave(&pldat->lock, flags);
1138
1139 /* Set station address */
1140 __lpc_set_mac(pldat, ndev->dev_addr);
1141
1142 tmp32 = LPC_RXFLTRW_ACCEPTUBROADCAST | LPC_RXFLTRW_ACCEPTPERFECT;
1143
1144 if (ndev->flags & IFF_PROMISC)
1145 tmp32 |= LPC_RXFLTRW_ACCEPTUNICAST |
1146 LPC_RXFLTRW_ACCEPTUMULTICAST;
1147 if (ndev->flags & IFF_ALLMULTI)
1148 tmp32 |= LPC_RXFLTRW_ACCEPTUMULTICAST;
1149
1150 if (netdev_hw_addr_list_count(mcptr))
1151 tmp32 |= LPC_RXFLTRW_ACCEPTUMULTICASTHASH;
1152
1153 writel(tmp32, LPC_ENET_RXFILTER_CTRL(pldat->net_base));
1154
1155
1156 /* Set initial hash table */
1157 hashlo = 0x0;
1158 hashhi = 0x0;
1159
1160 /* 64 bits : multicast address in hash table */
1161 netdev_hw_addr_list_for_each(ha, mcptr) {
1162 hash_val = (ether_crc(6, ha->addr) >> 23) & 0x3F;
1163
1164 if (hash_val >= 32)
1165 hashhi |= 1 << (hash_val - 32);
1166 else
1167 hashlo |= 1 << hash_val;
1168 }
1169
1170 writel(hashlo, LPC_ENET_HASHFILTERL(pldat->net_base));
1171 writel(hashhi, LPC_ENET_HASHFILTERH(pldat->net_base));
1172
1173 spin_unlock_irqrestore(&pldat->lock, flags);
1174 }
1175
1176 static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
1177 {
1178 struct phy_device *phydev = ndev->phydev;
1179
1180 if (!netif_running(ndev))
1181 return -EINVAL;
1182
1183 if (!phydev)
1184 return -ENODEV;
1185
1186 return phy_mii_ioctl(phydev, req, cmd);
1187 }
1188
1189 static int lpc_eth_open(struct net_device *ndev)
1190 {
1191 struct netdata_local *pldat = netdev_priv(ndev);
1192 int ret;
1193
1194 if (netif_msg_ifup(pldat))
1195 dev_dbg(&pldat->pdev->dev, "enabling %s\n", ndev->name);
1196
1197 ret = clk_prepare_enable(pldat->clk);
1198 if (ret)
1199 return ret;
1200
1201 /* Suspended PHY makes LPC ethernet core block, so resume now */
1202 phy_resume(ndev->phydev);
1203
1204 /* Reset and initialize */
1205 __lpc_eth_reset(pldat);
1206 __lpc_eth_init(pldat);
1207
1208 /* schedule a link state check */
1209 phy_start(ndev->phydev);
1210 netif_start_queue(ndev);
1211 napi_enable(&pldat->napi);
1212
1213 return 0;
1214 }
1215
1216 /*
1217 * Ethtool ops
1218 */
1219 static void lpc_eth_ethtool_getdrvinfo(struct net_device *ndev,
1220 struct ethtool_drvinfo *info)
1221 {
1222 strlcpy(info->driver, MODNAME, sizeof(info->driver));
1223 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1224 strlcpy(info->bus_info, dev_name(ndev->dev.parent),
1225 sizeof(info->bus_info));
1226 }
1227
1228 static u32 lpc_eth_ethtool_getmsglevel(struct net_device *ndev)
1229 {
1230 struct netdata_local *pldat = netdev_priv(ndev);
1231
1232 return pldat->msg_enable;
1233 }
1234
1235 static void lpc_eth_ethtool_setmsglevel(struct net_device *ndev, u32 level)
1236 {
1237 struct netdata_local *pldat = netdev_priv(ndev);
1238
1239 pldat->msg_enable = level;
1240 }
1241
1242 static const struct ethtool_ops lpc_eth_ethtool_ops = {
1243 .get_drvinfo = lpc_eth_ethtool_getdrvinfo,
1244 .get_msglevel = lpc_eth_ethtool_getmsglevel,
1245 .set_msglevel = lpc_eth_ethtool_setmsglevel,
1246 .get_link = ethtool_op_get_link,
1247 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1248 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1249 };
1250
1251 static const struct net_device_ops lpc_netdev_ops = {
1252 .ndo_open = lpc_eth_open,
1253 .ndo_stop = lpc_eth_close,
1254 .ndo_start_xmit = lpc_eth_hard_start_xmit,
1255 .ndo_set_rx_mode = lpc_eth_set_multicast_list,
1256 .ndo_do_ioctl = lpc_eth_ioctl,
1257 .ndo_set_mac_address = lpc_set_mac_address,
1258 .ndo_validate_addr = eth_validate_addr,
1259 .ndo_change_mtu = eth_change_mtu,
1260 };
1261
1262 static int lpc_eth_drv_probe(struct platform_device *pdev)
1263 {
1264 struct resource *res;
1265 struct net_device *ndev;
1266 struct netdata_local *pldat;
1267 struct phy_device *phydev;
1268 dma_addr_t dma_handle;
1269 int irq, ret;
1270 u32 tmp;
1271
1272 /* Setup network interface for RMII or MII mode */
1273 tmp = __raw_readl(LPC32XX_CLKPWR_MACCLK_CTRL);
1274 tmp &= ~LPC32XX_CLKPWR_MACCTRL_PINS_MSK;
1275 if (lpc_phy_interface_mode(&pdev->dev) == PHY_INTERFACE_MODE_MII)
1276 tmp |= LPC32XX_CLKPWR_MACCTRL_USE_MII_PINS;
1277 else
1278 tmp |= LPC32XX_CLKPWR_MACCTRL_USE_RMII_PINS;
1279 __raw_writel(tmp, LPC32XX_CLKPWR_MACCLK_CTRL);
1280
1281 /* Get platform resources */
1282 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1283 irq = platform_get_irq(pdev, 0);
1284 if (!res || irq < 0) {
1285 dev_err(&pdev->dev, "error getting resources.\n");
1286 ret = -ENXIO;
1287 goto err_exit;
1288 }
1289
1290 /* Allocate net driver data structure */
1291 ndev = alloc_etherdev(sizeof(struct netdata_local));
1292 if (!ndev) {
1293 dev_err(&pdev->dev, "could not allocate device.\n");
1294 ret = -ENOMEM;
1295 goto err_exit;
1296 }
1297
1298 SET_NETDEV_DEV(ndev, &pdev->dev);
1299
1300 pldat = netdev_priv(ndev);
1301 pldat->pdev = pdev;
1302 pldat->ndev = ndev;
1303
1304 spin_lock_init(&pldat->lock);
1305
1306 /* Save resources */
1307 ndev->irq = irq;
1308
1309 /* Get clock for the device */
1310 pldat->clk = clk_get(&pdev->dev, NULL);
1311 if (IS_ERR(pldat->clk)) {
1312 dev_err(&pdev->dev, "error getting clock.\n");
1313 ret = PTR_ERR(pldat->clk);
1314 goto err_out_free_dev;
1315 }
1316
1317 /* Enable network clock */
1318 ret = clk_prepare_enable(pldat->clk);
1319 if (ret)
1320 goto err_out_clk_put;
1321
1322 /* Map IO space */
1323 pldat->net_base = ioremap(res->start, resource_size(res));
1324 if (!pldat->net_base) {
1325 dev_err(&pdev->dev, "failed to map registers\n");
1326 ret = -ENOMEM;
1327 goto err_out_disable_clocks;
1328 }
1329 ret = request_irq(ndev->irq, __lpc_eth_interrupt, 0,
1330 ndev->name, ndev);
1331 if (ret) {
1332 dev_err(&pdev->dev, "error requesting interrupt.\n");
1333 goto err_out_iounmap;
1334 }
1335
1336 /* Setup driver functions */
1337 ndev->netdev_ops = &lpc_netdev_ops;
1338 ndev->ethtool_ops = &lpc_eth_ethtool_ops;
1339 ndev->watchdog_timeo = msecs_to_jiffies(2500);
1340
1341 /* Get size of DMA buffers/descriptors region */
1342 pldat->dma_buff_size = (ENET_TX_DESC + ENET_RX_DESC) * (ENET_MAXF_SIZE +
1343 sizeof(struct txrx_desc_t) + sizeof(struct rx_status_t));
1344 pldat->dma_buff_base_v = 0;
1345
1346 if (use_iram_for_net(&pldat->pdev->dev)) {
1347 dma_handle = LPC32XX_IRAM_BASE;
1348 if (pldat->dma_buff_size <= lpc32xx_return_iram_size())
1349 pldat->dma_buff_base_v =
1350 io_p2v(LPC32XX_IRAM_BASE);
1351 else
1352 netdev_err(ndev,
1353 "IRAM not big enough for net buffers, using SDRAM instead.\n");
1354 }
1355
1356 if (pldat->dma_buff_base_v == 0) {
1357 ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1358 if (ret)
1359 goto err_out_free_irq;
1360
1361 pldat->dma_buff_size = PAGE_ALIGN(pldat->dma_buff_size);
1362
1363 /* Allocate a chunk of memory for the DMA ethernet buffers
1364 and descriptors */
1365 pldat->dma_buff_base_v =
1366 dma_alloc_coherent(&pldat->pdev->dev,
1367 pldat->dma_buff_size, &dma_handle,
1368 GFP_KERNEL);
1369 if (pldat->dma_buff_base_v == NULL) {
1370 ret = -ENOMEM;
1371 goto err_out_free_irq;
1372 }
1373 }
1374 pldat->dma_buff_base_p = dma_handle;
1375
1376 netdev_dbg(ndev, "IO address space :%pR\n", res);
1377 netdev_dbg(ndev, "IO address size :%d\n", resource_size(res));
1378 netdev_dbg(ndev, "IO address (mapped) :0x%p\n",
1379 pldat->net_base);
1380 netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq);
1381 netdev_dbg(ndev, "DMA buffer size :%d\n", pldat->dma_buff_size);
1382 netdev_dbg(ndev, "DMA buffer P address :0x%08x\n",
1383 pldat->dma_buff_base_p);
1384 netdev_dbg(ndev, "DMA buffer V address :0x%p\n",
1385 pldat->dma_buff_base_v);
1386
1387 /* Get MAC address from current HW setting (POR state is all zeros) */
1388 __lpc_get_mac(pldat, ndev->dev_addr);
1389
1390 if (!is_valid_ether_addr(ndev->dev_addr)) {
1391 const char *macaddr = of_get_mac_address(pdev->dev.of_node);
1392 if (macaddr)
1393 memcpy(ndev->dev_addr, macaddr, ETH_ALEN);
1394 }
1395 if (!is_valid_ether_addr(ndev->dev_addr))
1396 eth_hw_addr_random(ndev);
1397
1398 /* Reset the ethernet controller */
1399 __lpc_eth_reset(pldat);
1400
1401 /* then shut everything down to save power */
1402 __lpc_eth_shutdown(pldat);
1403
1404 /* Set default parameters */
1405 pldat->msg_enable = NETIF_MSG_LINK;
1406
1407 /* Force an MII interface reset and clock setup */
1408 __lpc_mii_mngt_reset(pldat);
1409
1410 /* Force default PHY interface setup in chip, this will probably be
1411 changed by the PHY driver */
1412 pldat->link = 0;
1413 pldat->speed = 100;
1414 pldat->duplex = DUPLEX_FULL;
1415 __lpc_params_setup(pldat);
1416
1417 netif_napi_add(ndev, &pldat->napi, lpc_eth_poll, NAPI_WEIGHT);
1418
1419 ret = register_netdev(ndev);
1420 if (ret) {
1421 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
1422 goto err_out_dma_unmap;
1423 }
1424 platform_set_drvdata(pdev, ndev);
1425
1426 ret = lpc_mii_init(pldat);
1427 if (ret)
1428 goto err_out_unregister_netdev;
1429
1430 netdev_info(ndev, "LPC mac at 0x%08x irq %d\n",
1431 res->start, ndev->irq);
1432
1433 phydev = ndev->phydev;
1434
1435 device_init_wakeup(&pdev->dev, 1);
1436 device_set_wakeup_enable(&pdev->dev, 0);
1437
1438 return 0;
1439
1440 err_out_unregister_netdev:
1441 unregister_netdev(ndev);
1442 err_out_dma_unmap:
1443 if (!use_iram_for_net(&pldat->pdev->dev) ||
1444 pldat->dma_buff_size > lpc32xx_return_iram_size())
1445 dma_free_coherent(&pldat->pdev->dev, pldat->dma_buff_size,
1446 pldat->dma_buff_base_v,
1447 pldat->dma_buff_base_p);
1448 err_out_free_irq:
1449 free_irq(ndev->irq, ndev);
1450 err_out_iounmap:
1451 iounmap(pldat->net_base);
1452 err_out_disable_clocks:
1453 clk_disable_unprepare(pldat->clk);
1454 err_out_clk_put:
1455 clk_put(pldat->clk);
1456 err_out_free_dev:
1457 free_netdev(ndev);
1458 err_exit:
1459 pr_err("%s: not found (%d).\n", MODNAME, ret);
1460 return ret;
1461 }
1462
1463 static int lpc_eth_drv_remove(struct platform_device *pdev)
1464 {
1465 struct net_device *ndev = platform_get_drvdata(pdev);
1466 struct netdata_local *pldat = netdev_priv(ndev);
1467
1468 unregister_netdev(ndev);
1469
1470 if (!use_iram_for_net(&pldat->pdev->dev) ||
1471 pldat->dma_buff_size > lpc32xx_return_iram_size())
1472 dma_free_coherent(&pldat->pdev->dev, pldat->dma_buff_size,
1473 pldat->dma_buff_base_v,
1474 pldat->dma_buff_base_p);
1475 free_irq(ndev->irq, ndev);
1476 iounmap(pldat->net_base);
1477 mdiobus_unregister(pldat->mii_bus);
1478 mdiobus_free(pldat->mii_bus);
1479 clk_disable_unprepare(pldat->clk);
1480 clk_put(pldat->clk);
1481 free_netdev(ndev);
1482
1483 return 0;
1484 }
1485
1486 #ifdef CONFIG_PM
1487 static int lpc_eth_drv_suspend(struct platform_device *pdev,
1488 pm_message_t state)
1489 {
1490 struct net_device *ndev = platform_get_drvdata(pdev);
1491 struct netdata_local *pldat = netdev_priv(ndev);
1492
1493 if (device_may_wakeup(&pdev->dev))
1494 enable_irq_wake(ndev->irq);
1495
1496 if (ndev) {
1497 if (netif_running(ndev)) {
1498 netif_device_detach(ndev);
1499 __lpc_eth_shutdown(pldat);
1500 clk_disable_unprepare(pldat->clk);
1501
1502 /*
1503 * Reset again now clock is disable to be sure
1504 * EMC_MDC is down
1505 */
1506 __lpc_eth_reset(pldat);
1507 }
1508 }
1509
1510 return 0;
1511 }
1512
1513 static int lpc_eth_drv_resume(struct platform_device *pdev)
1514 {
1515 struct net_device *ndev = platform_get_drvdata(pdev);
1516 struct netdata_local *pldat;
1517
1518 if (device_may_wakeup(&pdev->dev))
1519 disable_irq_wake(ndev->irq);
1520
1521 if (ndev) {
1522 if (netif_running(ndev)) {
1523 pldat = netdev_priv(ndev);
1524
1525 /* Enable interface clock */
1526 clk_enable(pldat->clk);
1527
1528 /* Reset and initialize */
1529 __lpc_eth_reset(pldat);
1530 __lpc_eth_init(pldat);
1531
1532 netif_device_attach(ndev);
1533 }
1534 }
1535
1536 return 0;
1537 }
1538 #endif
1539
1540 #ifdef CONFIG_OF
1541 static const struct of_device_id lpc_eth_match[] = {
1542 { .compatible = "nxp,lpc-eth" },
1543 { }
1544 };
1545 MODULE_DEVICE_TABLE(of, lpc_eth_match);
1546 #endif
1547
1548 static struct platform_driver lpc_eth_driver = {
1549 .probe = lpc_eth_drv_probe,
1550 .remove = lpc_eth_drv_remove,
1551 #ifdef CONFIG_PM
1552 .suspend = lpc_eth_drv_suspend,
1553 .resume = lpc_eth_drv_resume,
1554 #endif
1555 .driver = {
1556 .name = MODNAME,
1557 .of_match_table = of_match_ptr(lpc_eth_match),
1558 },
1559 };
1560
1561 module_platform_driver(lpc_eth_driver);
1562
1563 MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
1564 MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
1565 MODULE_DESCRIPTION("LPC Ethernet Driver");
1566 MODULE_LICENSE("GPL");
This page took 0.073688 seconds and 6 git commands to generate.