Commit | Line | Data |
---|---|---|
1c1008c7 FF |
1 | /* |
2 | * Broadcom GENET (Gigabit Ethernet) controller driver | |
3 | * | |
4 | * Copyright (c) 2014 Broadcom Corporation | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
1c1008c7 FF |
9 | */ |
10 | ||
11 | #define pr_fmt(fmt) "bcmgenet: " fmt | |
12 | ||
13 | #include <linux/kernel.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/sched.h> | |
16 | #include <linux/types.h> | |
17 | #include <linux/fcntl.h> | |
18 | #include <linux/interrupt.h> | |
19 | #include <linux/string.h> | |
20 | #include <linux/if_ether.h> | |
21 | #include <linux/init.h> | |
22 | #include <linux/errno.h> | |
23 | #include <linux/delay.h> | |
24 | #include <linux/platform_device.h> | |
25 | #include <linux/dma-mapping.h> | |
26 | #include <linux/pm.h> | |
27 | #include <linux/clk.h> | |
1c1008c7 FF |
28 | #include <linux/of.h> |
29 | #include <linux/of_address.h> | |
30 | #include <linux/of_irq.h> | |
31 | #include <linux/of_net.h> | |
32 | #include <linux/of_platform.h> | |
33 | #include <net/arp.h> | |
34 | ||
35 | #include <linux/mii.h> | |
36 | #include <linux/ethtool.h> | |
37 | #include <linux/netdevice.h> | |
38 | #include <linux/inetdevice.h> | |
39 | #include <linux/etherdevice.h> | |
40 | #include <linux/skbuff.h> | |
41 | #include <linux/in.h> | |
42 | #include <linux/ip.h> | |
43 | #include <linux/ipv6.h> | |
44 | #include <linux/phy.h> | |
b0ba512e | 45 | #include <linux/platform_data/bcmgenet.h> |
1c1008c7 FF |
46 | |
47 | #include <asm/unaligned.h> | |
48 | ||
49 | #include "bcmgenet.h" | |
50 | ||
51 | /* Maximum number of hardware queues, downsized if needed */ | |
52 | #define GENET_MAX_MQ_CNT 4 | |
53 | ||
54 | /* Default highest priority queue for multi queue support */ | |
55 | #define GENET_Q0_PRIORITY 0 | |
56 | ||
3feafa02 PG |
57 | #define GENET_Q16_RX_BD_CNT \ |
58 | (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q) | |
51a966a7 PG |
59 | #define GENET_Q16_TX_BD_CNT \ |
60 | (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q) | |
1c1008c7 FF |
61 | |
62 | #define RX_BUF_LENGTH 2048 | |
63 | #define SKB_ALIGNMENT 32 | |
64 | ||
65 | /* Tx/Rx DMA register offset, skip 256 descriptors */ | |
66 | #define WORDS_PER_BD(p) (p->hw_params->words_per_bd) | |
67 | #define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32)) | |
68 | ||
69 | #define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \ | |
70 | TOTAL_DESC * DMA_DESC_SIZE) | |
71 | ||
72 | #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \ | |
73 | TOTAL_DESC * DMA_DESC_SIZE) | |
74 | ||
75 | static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv, | |
c91b7f66 | 76 | void __iomem *d, u32 value) |
1c1008c7 FF |
77 | { |
78 | __raw_writel(value, d + DMA_DESC_LENGTH_STATUS); | |
79 | } | |
80 | ||
81 | static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv, | |
c91b7f66 | 82 | void __iomem *d) |
1c1008c7 FF |
83 | { |
84 | return __raw_readl(d + DMA_DESC_LENGTH_STATUS); | |
85 | } | |
86 | ||
87 | static inline void dmadesc_set_addr(struct bcmgenet_priv *priv, | |
88 | void __iomem *d, | |
89 | dma_addr_t addr) | |
90 | { | |
91 | __raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO); | |
92 | ||
93 | /* Register writes to GISB bus can take couple hundred nanoseconds | |
94 | * and are done for each packet, save these expensive writes unless | |
7fc527f9 | 95 | * the platform is explicitly configured for 64-bits/LPAE. |
1c1008c7 FF |
96 | */ |
97 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | |
98 | if (priv->hw_params->flags & GENET_HAS_40BITS) | |
99 | __raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI); | |
100 | #endif | |
101 | } | |
102 | ||
103 | /* Combined address + length/status setter */ | |
104 | static inline void dmadesc_set(struct bcmgenet_priv *priv, | |
c91b7f66 | 105 | void __iomem *d, dma_addr_t addr, u32 val) |
1c1008c7 FF |
106 | { |
107 | dmadesc_set_length_status(priv, d, val); | |
108 | dmadesc_set_addr(priv, d, addr); | |
109 | } | |
110 | ||
111 | static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv, | |
112 | void __iomem *d) | |
113 | { | |
114 | dma_addr_t addr; | |
115 | ||
116 | addr = __raw_readl(d + DMA_DESC_ADDRESS_LO); | |
117 | ||
118 | /* Register writes to GISB bus can take couple hundred nanoseconds | |
119 | * and are done for each packet, save these expensive writes unless | |
7fc527f9 | 120 | * the platform is explicitly configured for 64-bits/LPAE. |
1c1008c7 FF |
121 | */ |
122 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | |
123 | if (priv->hw_params->flags & GENET_HAS_40BITS) | |
124 | addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32; | |
125 | #endif | |
126 | return addr; | |
127 | } | |
128 | ||
129 | #define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x" | |
130 | ||
131 | #define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ | |
132 | NETIF_MSG_LINK) | |
133 | ||
134 | static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv) | |
135 | { | |
136 | if (GENET_IS_V1(priv)) | |
137 | return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1); | |
138 | else | |
139 | return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL); | |
140 | } | |
141 | ||
142 | static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) | |
143 | { | |
144 | if (GENET_IS_V1(priv)) | |
145 | bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1); | |
146 | else | |
147 | bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL); | |
148 | } | |
149 | ||
150 | /* These macros are defined to deal with register map change | |
151 | * between GENET1.1 and GENET2. Only those currently being used | |
152 | * by driver are defined. | |
153 | */ | |
154 | static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv) | |
155 | { | |
156 | if (GENET_IS_V1(priv)) | |
157 | return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1); | |
158 | else | |
159 | return __raw_readl(priv->base + | |
160 | priv->hw_params->tbuf_offset + TBUF_CTRL); | |
161 | } | |
162 | ||
163 | static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) | |
164 | { | |
165 | if (GENET_IS_V1(priv)) | |
166 | bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1); | |
167 | else | |
168 | __raw_writel(val, priv->base + | |
169 | priv->hw_params->tbuf_offset + TBUF_CTRL); | |
170 | } | |
171 | ||
172 | static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv) | |
173 | { | |
174 | if (GENET_IS_V1(priv)) | |
175 | return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1); | |
176 | else | |
177 | return __raw_readl(priv->base + | |
178 | priv->hw_params->tbuf_offset + TBUF_BP_MC); | |
179 | } | |
180 | ||
181 | static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val) | |
182 | { | |
183 | if (GENET_IS_V1(priv)) | |
184 | bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1); | |
185 | else | |
186 | __raw_writel(val, priv->base + | |
187 | priv->hw_params->tbuf_offset + TBUF_BP_MC); | |
188 | } | |
189 | ||
190 | /* RX/TX DMA register accessors */ | |
191 | enum dma_reg { | |
192 | DMA_RING_CFG = 0, | |
193 | DMA_CTRL, | |
194 | DMA_STATUS, | |
195 | DMA_SCB_BURST_SIZE, | |
196 | DMA_ARB_CTRL, | |
37742166 PG |
197 | DMA_PRIORITY_0, |
198 | DMA_PRIORITY_1, | |
199 | DMA_PRIORITY_2, | |
0034de41 PG |
200 | DMA_INDEX2RING_0, |
201 | DMA_INDEX2RING_1, | |
202 | DMA_INDEX2RING_2, | |
203 | DMA_INDEX2RING_3, | |
204 | DMA_INDEX2RING_4, | |
205 | DMA_INDEX2RING_5, | |
206 | DMA_INDEX2RING_6, | |
207 | DMA_INDEX2RING_7, | |
4a29645b FF |
208 | DMA_RING0_TIMEOUT, |
209 | DMA_RING1_TIMEOUT, | |
210 | DMA_RING2_TIMEOUT, | |
211 | DMA_RING3_TIMEOUT, | |
212 | DMA_RING4_TIMEOUT, | |
213 | DMA_RING5_TIMEOUT, | |
214 | DMA_RING6_TIMEOUT, | |
215 | DMA_RING7_TIMEOUT, | |
216 | DMA_RING8_TIMEOUT, | |
217 | DMA_RING9_TIMEOUT, | |
218 | DMA_RING10_TIMEOUT, | |
219 | DMA_RING11_TIMEOUT, | |
220 | DMA_RING12_TIMEOUT, | |
221 | DMA_RING13_TIMEOUT, | |
222 | DMA_RING14_TIMEOUT, | |
223 | DMA_RING15_TIMEOUT, | |
224 | DMA_RING16_TIMEOUT, | |
1c1008c7 FF |
225 | }; |
226 | ||
227 | static const u8 bcmgenet_dma_regs_v3plus[] = { | |
228 | [DMA_RING_CFG] = 0x00, | |
229 | [DMA_CTRL] = 0x04, | |
230 | [DMA_STATUS] = 0x08, | |
231 | [DMA_SCB_BURST_SIZE] = 0x0C, | |
232 | [DMA_ARB_CTRL] = 0x2C, | |
37742166 PG |
233 | [DMA_PRIORITY_0] = 0x30, |
234 | [DMA_PRIORITY_1] = 0x34, | |
235 | [DMA_PRIORITY_2] = 0x38, | |
4a29645b FF |
236 | [DMA_RING0_TIMEOUT] = 0x2C, |
237 | [DMA_RING1_TIMEOUT] = 0x30, | |
238 | [DMA_RING2_TIMEOUT] = 0x34, | |
239 | [DMA_RING3_TIMEOUT] = 0x38, | |
240 | [DMA_RING4_TIMEOUT] = 0x3c, | |
241 | [DMA_RING5_TIMEOUT] = 0x40, | |
242 | [DMA_RING6_TIMEOUT] = 0x44, | |
243 | [DMA_RING7_TIMEOUT] = 0x48, | |
244 | [DMA_RING8_TIMEOUT] = 0x4c, | |
245 | [DMA_RING9_TIMEOUT] = 0x50, | |
246 | [DMA_RING10_TIMEOUT] = 0x54, | |
247 | [DMA_RING11_TIMEOUT] = 0x58, | |
248 | [DMA_RING12_TIMEOUT] = 0x5c, | |
249 | [DMA_RING13_TIMEOUT] = 0x60, | |
250 | [DMA_RING14_TIMEOUT] = 0x64, | |
251 | [DMA_RING15_TIMEOUT] = 0x68, | |
252 | [DMA_RING16_TIMEOUT] = 0x6C, | |
0034de41 PG |
253 | [DMA_INDEX2RING_0] = 0x70, |
254 | [DMA_INDEX2RING_1] = 0x74, | |
255 | [DMA_INDEX2RING_2] = 0x78, | |
256 | [DMA_INDEX2RING_3] = 0x7C, | |
257 | [DMA_INDEX2RING_4] = 0x80, | |
258 | [DMA_INDEX2RING_5] = 0x84, | |
259 | [DMA_INDEX2RING_6] = 0x88, | |
260 | [DMA_INDEX2RING_7] = 0x8C, | |
1c1008c7 FF |
261 | }; |
262 | ||
263 | static const u8 bcmgenet_dma_regs_v2[] = { | |
264 | [DMA_RING_CFG] = 0x00, | |
265 | [DMA_CTRL] = 0x04, | |
266 | [DMA_STATUS] = 0x08, | |
267 | [DMA_SCB_BURST_SIZE] = 0x0C, | |
268 | [DMA_ARB_CTRL] = 0x30, | |
37742166 PG |
269 | [DMA_PRIORITY_0] = 0x34, |
270 | [DMA_PRIORITY_1] = 0x38, | |
271 | [DMA_PRIORITY_2] = 0x3C, | |
4a29645b FF |
272 | [DMA_RING0_TIMEOUT] = 0x2C, |
273 | [DMA_RING1_TIMEOUT] = 0x30, | |
274 | [DMA_RING2_TIMEOUT] = 0x34, | |
275 | [DMA_RING3_TIMEOUT] = 0x38, | |
276 | [DMA_RING4_TIMEOUT] = 0x3c, | |
277 | [DMA_RING5_TIMEOUT] = 0x40, | |
278 | [DMA_RING6_TIMEOUT] = 0x44, | |
279 | [DMA_RING7_TIMEOUT] = 0x48, | |
280 | [DMA_RING8_TIMEOUT] = 0x4c, | |
281 | [DMA_RING9_TIMEOUT] = 0x50, | |
282 | [DMA_RING10_TIMEOUT] = 0x54, | |
283 | [DMA_RING11_TIMEOUT] = 0x58, | |
284 | [DMA_RING12_TIMEOUT] = 0x5c, | |
285 | [DMA_RING13_TIMEOUT] = 0x60, | |
286 | [DMA_RING14_TIMEOUT] = 0x64, | |
287 | [DMA_RING15_TIMEOUT] = 0x68, | |
288 | [DMA_RING16_TIMEOUT] = 0x6C, | |
1c1008c7 FF |
289 | }; |
290 | ||
291 | static const u8 bcmgenet_dma_regs_v1[] = { | |
292 | [DMA_CTRL] = 0x00, | |
293 | [DMA_STATUS] = 0x04, | |
294 | [DMA_SCB_BURST_SIZE] = 0x0C, | |
295 | [DMA_ARB_CTRL] = 0x30, | |
37742166 PG |
296 | [DMA_PRIORITY_0] = 0x34, |
297 | [DMA_PRIORITY_1] = 0x38, | |
298 | [DMA_PRIORITY_2] = 0x3C, | |
4a29645b FF |
299 | [DMA_RING0_TIMEOUT] = 0x2C, |
300 | [DMA_RING1_TIMEOUT] = 0x30, | |
301 | [DMA_RING2_TIMEOUT] = 0x34, | |
302 | [DMA_RING3_TIMEOUT] = 0x38, | |
303 | [DMA_RING4_TIMEOUT] = 0x3c, | |
304 | [DMA_RING5_TIMEOUT] = 0x40, | |
305 | [DMA_RING6_TIMEOUT] = 0x44, | |
306 | [DMA_RING7_TIMEOUT] = 0x48, | |
307 | [DMA_RING8_TIMEOUT] = 0x4c, | |
308 | [DMA_RING9_TIMEOUT] = 0x50, | |
309 | [DMA_RING10_TIMEOUT] = 0x54, | |
310 | [DMA_RING11_TIMEOUT] = 0x58, | |
311 | [DMA_RING12_TIMEOUT] = 0x5c, | |
312 | [DMA_RING13_TIMEOUT] = 0x60, | |
313 | [DMA_RING14_TIMEOUT] = 0x64, | |
314 | [DMA_RING15_TIMEOUT] = 0x68, | |
315 | [DMA_RING16_TIMEOUT] = 0x6C, | |
1c1008c7 FF |
316 | }; |
317 | ||
318 | /* Set at runtime once bcmgenet version is known */ | |
319 | static const u8 *bcmgenet_dma_regs; | |
320 | ||
321 | static inline struct bcmgenet_priv *dev_to_priv(struct device *dev) | |
322 | { | |
323 | return netdev_priv(dev_get_drvdata(dev)); | |
324 | } | |
325 | ||
326 | static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv, | |
c91b7f66 | 327 | enum dma_reg r) |
1c1008c7 FF |
328 | { |
329 | return __raw_readl(priv->base + GENET_TDMA_REG_OFF + | |
330 | DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); | |
331 | } | |
332 | ||
333 | static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv, | |
334 | u32 val, enum dma_reg r) | |
335 | { | |
336 | __raw_writel(val, priv->base + GENET_TDMA_REG_OFF + | |
337 | DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); | |
338 | } | |
339 | ||
340 | static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv, | |
c91b7f66 | 341 | enum dma_reg r) |
1c1008c7 FF |
342 | { |
343 | return __raw_readl(priv->base + GENET_RDMA_REG_OFF + | |
344 | DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); | |
345 | } | |
346 | ||
347 | static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv, | |
348 | u32 val, enum dma_reg r) | |
349 | { | |
350 | __raw_writel(val, priv->base + GENET_RDMA_REG_OFF + | |
351 | DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); | |
352 | } | |
353 | ||
354 | /* RDMA/TDMA ring registers and accessors | |
355 | * we merge the common fields and just prefix with T/D the registers | |
356 | * having different meaning depending on the direction | |
357 | */ | |
358 | enum dma_ring_reg { | |
359 | TDMA_READ_PTR = 0, | |
360 | RDMA_WRITE_PTR = TDMA_READ_PTR, | |
361 | TDMA_READ_PTR_HI, | |
362 | RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI, | |
363 | TDMA_CONS_INDEX, | |
364 | RDMA_PROD_INDEX = TDMA_CONS_INDEX, | |
365 | TDMA_PROD_INDEX, | |
366 | RDMA_CONS_INDEX = TDMA_PROD_INDEX, | |
367 | DMA_RING_BUF_SIZE, | |
368 | DMA_START_ADDR, | |
369 | DMA_START_ADDR_HI, | |
370 | DMA_END_ADDR, | |
371 | DMA_END_ADDR_HI, | |
372 | DMA_MBUF_DONE_THRESH, | |
373 | TDMA_FLOW_PERIOD, | |
374 | RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD, | |
375 | TDMA_WRITE_PTR, | |
376 | RDMA_READ_PTR = TDMA_WRITE_PTR, | |
377 | TDMA_WRITE_PTR_HI, | |
378 | RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI | |
379 | }; | |
380 | ||
381 | /* GENET v4 supports 40-bits pointer addressing | |
382 | * for obvious reasons the LO and HI word parts | |
383 | * are contiguous, but this offsets the other | |
384 | * registers. | |
385 | */ | |
386 | static const u8 genet_dma_ring_regs_v4[] = { | |
387 | [TDMA_READ_PTR] = 0x00, | |
388 | [TDMA_READ_PTR_HI] = 0x04, | |
389 | [TDMA_CONS_INDEX] = 0x08, | |
390 | [TDMA_PROD_INDEX] = 0x0C, | |
391 | [DMA_RING_BUF_SIZE] = 0x10, | |
392 | [DMA_START_ADDR] = 0x14, | |
393 | [DMA_START_ADDR_HI] = 0x18, | |
394 | [DMA_END_ADDR] = 0x1C, | |
395 | [DMA_END_ADDR_HI] = 0x20, | |
396 | [DMA_MBUF_DONE_THRESH] = 0x24, | |
397 | [TDMA_FLOW_PERIOD] = 0x28, | |
398 | [TDMA_WRITE_PTR] = 0x2C, | |
399 | [TDMA_WRITE_PTR_HI] = 0x30, | |
400 | }; | |
401 | ||
402 | static const u8 genet_dma_ring_regs_v123[] = { | |
403 | [TDMA_READ_PTR] = 0x00, | |
404 | [TDMA_CONS_INDEX] = 0x04, | |
405 | [TDMA_PROD_INDEX] = 0x08, | |
406 | [DMA_RING_BUF_SIZE] = 0x0C, | |
407 | [DMA_START_ADDR] = 0x10, | |
408 | [DMA_END_ADDR] = 0x14, | |
409 | [DMA_MBUF_DONE_THRESH] = 0x18, | |
410 | [TDMA_FLOW_PERIOD] = 0x1C, | |
411 | [TDMA_WRITE_PTR] = 0x20, | |
412 | }; | |
413 | ||
414 | /* Set at runtime once GENET version is known */ | |
415 | static const u8 *genet_dma_ring_regs; | |
416 | ||
417 | static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv, | |
c91b7f66 FF |
418 | unsigned int ring, |
419 | enum dma_ring_reg r) | |
1c1008c7 FF |
420 | { |
421 | return __raw_readl(priv->base + GENET_TDMA_REG_OFF + | |
422 | (DMA_RING_SIZE * ring) + | |
423 | genet_dma_ring_regs[r]); | |
424 | } | |
425 | ||
426 | static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv, | |
c91b7f66 FF |
427 | unsigned int ring, u32 val, |
428 | enum dma_ring_reg r) | |
1c1008c7 FF |
429 | { |
430 | __raw_writel(val, priv->base + GENET_TDMA_REG_OFF + | |
431 | (DMA_RING_SIZE * ring) + | |
432 | genet_dma_ring_regs[r]); | |
433 | } | |
434 | ||
435 | static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv, | |
c91b7f66 FF |
436 | unsigned int ring, |
437 | enum dma_ring_reg r) | |
1c1008c7 FF |
438 | { |
439 | return __raw_readl(priv->base + GENET_RDMA_REG_OFF + | |
440 | (DMA_RING_SIZE * ring) + | |
441 | genet_dma_ring_regs[r]); | |
442 | } | |
443 | ||
444 | static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv, | |
c91b7f66 FF |
445 | unsigned int ring, u32 val, |
446 | enum dma_ring_reg r) | |
1c1008c7 FF |
447 | { |
448 | __raw_writel(val, priv->base + GENET_RDMA_REG_OFF + | |
449 | (DMA_RING_SIZE * ring) + | |
450 | genet_dma_ring_regs[r]); | |
451 | } | |
452 | ||
453 | static int bcmgenet_get_settings(struct net_device *dev, | |
c91b7f66 | 454 | struct ethtool_cmd *cmd) |
1c1008c7 FF |
455 | { |
456 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
457 | ||
458 | if (!netif_running(dev)) | |
459 | return -EINVAL; | |
460 | ||
461 | if (!priv->phydev) | |
462 | return -ENODEV; | |
463 | ||
464 | return phy_ethtool_gset(priv->phydev, cmd); | |
465 | } | |
466 | ||
467 | static int bcmgenet_set_settings(struct net_device *dev, | |
c91b7f66 | 468 | struct ethtool_cmd *cmd) |
1c1008c7 FF |
469 | { |
470 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
471 | ||
472 | if (!netif_running(dev)) | |
473 | return -EINVAL; | |
474 | ||
475 | if (!priv->phydev) | |
476 | return -ENODEV; | |
477 | ||
478 | return phy_ethtool_sset(priv->phydev, cmd); | |
479 | } | |
480 | ||
481 | static int bcmgenet_set_rx_csum(struct net_device *dev, | |
482 | netdev_features_t wanted) | |
483 | { | |
484 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
485 | u32 rbuf_chk_ctrl; | |
486 | bool rx_csum_en; | |
487 | ||
488 | rx_csum_en = !!(wanted & NETIF_F_RXCSUM); | |
489 | ||
490 | rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL); | |
491 | ||
492 | /* enable rx checksumming */ | |
493 | if (rx_csum_en) | |
494 | rbuf_chk_ctrl |= RBUF_RXCHK_EN; | |
495 | else | |
496 | rbuf_chk_ctrl &= ~RBUF_RXCHK_EN; | |
497 | priv->desc_rxchk_en = rx_csum_en; | |
ebe5e3c6 FF |
498 | |
499 | /* If UniMAC forwards CRC, we need to skip over it to get | |
500 | * a valid CHK bit to be set in the per-packet status word | |
501 | */ | |
502 | if (rx_csum_en && priv->crc_fwd_en) | |
503 | rbuf_chk_ctrl |= RBUF_SKIP_FCS; | |
504 | else | |
505 | rbuf_chk_ctrl &= ~RBUF_SKIP_FCS; | |
506 | ||
1c1008c7 FF |
507 | bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL); |
508 | ||
509 | return 0; | |
510 | } | |
511 | ||
512 | static int bcmgenet_set_tx_csum(struct net_device *dev, | |
513 | netdev_features_t wanted) | |
514 | { | |
515 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
516 | bool desc_64b_en; | |
517 | u32 tbuf_ctrl, rbuf_ctrl; | |
518 | ||
519 | tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv); | |
520 | rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL); | |
521 | ||
522 | desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); | |
523 | ||
524 | /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */ | |
525 | if (desc_64b_en) { | |
526 | tbuf_ctrl |= RBUF_64B_EN; | |
527 | rbuf_ctrl |= RBUF_64B_EN; | |
528 | } else { | |
529 | tbuf_ctrl &= ~RBUF_64B_EN; | |
530 | rbuf_ctrl &= ~RBUF_64B_EN; | |
531 | } | |
532 | priv->desc_64b_en = desc_64b_en; | |
533 | ||
534 | bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl); | |
535 | bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL); | |
536 | ||
537 | return 0; | |
538 | } | |
539 | ||
540 | static int bcmgenet_set_features(struct net_device *dev, | |
c91b7f66 | 541 | netdev_features_t features) |
1c1008c7 FF |
542 | { |
543 | netdev_features_t changed = features ^ dev->features; | |
544 | netdev_features_t wanted = dev->wanted_features; | |
545 | int ret = 0; | |
546 | ||
547 | if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) | |
548 | ret = bcmgenet_set_tx_csum(dev, wanted); | |
549 | if (changed & (NETIF_F_RXCSUM)) | |
550 | ret = bcmgenet_set_rx_csum(dev, wanted); | |
551 | ||
552 | return ret; | |
553 | } | |
554 | ||
555 | static u32 bcmgenet_get_msglevel(struct net_device *dev) | |
556 | { | |
557 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
558 | ||
559 | return priv->msg_enable; | |
560 | } | |
561 | ||
562 | static void bcmgenet_set_msglevel(struct net_device *dev, u32 level) | |
563 | { | |
564 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
565 | ||
566 | priv->msg_enable = level; | |
567 | } | |
568 | ||
2f913070 FF |
569 | static int bcmgenet_get_coalesce(struct net_device *dev, |
570 | struct ethtool_coalesce *ec) | |
571 | { | |
572 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
573 | ||
574 | ec->tx_max_coalesced_frames = | |
575 | bcmgenet_tdma_ring_readl(priv, DESC_INDEX, | |
576 | DMA_MBUF_DONE_THRESH); | |
4a29645b FF |
577 | ec->rx_max_coalesced_frames = |
578 | bcmgenet_rdma_ring_readl(priv, DESC_INDEX, | |
579 | DMA_MBUF_DONE_THRESH); | |
580 | ec->rx_coalesce_usecs = | |
581 | bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000; | |
2f913070 FF |
582 | |
583 | return 0; | |
584 | } | |
585 | ||
586 | static int bcmgenet_set_coalesce(struct net_device *dev, | |
587 | struct ethtool_coalesce *ec) | |
588 | { | |
589 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
590 | unsigned int i; | |
4a29645b | 591 | u32 reg; |
2f913070 | 592 | |
4a29645b FF |
593 | /* Base system clock is 125Mhz, DMA timeout is this reference clock |
594 | * divided by 1024, which yields roughly 8.192us, our maximum value | |
595 | * has to fit in the DMA_TIMEOUT_MASK (16 bits) | |
596 | */ | |
2f913070 | 597 | if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK || |
4a29645b FF |
598 | ec->tx_max_coalesced_frames == 0 || |
599 | ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK || | |
600 | ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1) | |
601 | return -EINVAL; | |
602 | ||
603 | if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0) | |
2f913070 FF |
604 | return -EINVAL; |
605 | ||
606 | /* GENET TDMA hardware does not support a configurable timeout, but will | |
607 | * always generate an interrupt either after MBDONE packets have been | |
608 | * transmitted, or when the ring is emtpy. | |
609 | */ | |
610 | if (ec->tx_coalesce_usecs || ec->tx_coalesce_usecs_high || | |
611 | ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_high || | |
612 | ec->tx_coalesce_usecs_low) | |
613 | return -EOPNOTSUPP; | |
614 | ||
615 | /* Program all TX queues with the same values, as there is no | |
616 | * ethtool knob to do coalescing on a per-queue basis | |
617 | */ | |
618 | for (i = 0; i < priv->hw_params->tx_queues; i++) | |
619 | bcmgenet_tdma_ring_writel(priv, i, | |
620 | ec->tx_max_coalesced_frames, | |
621 | DMA_MBUF_DONE_THRESH); | |
622 | bcmgenet_tdma_ring_writel(priv, DESC_INDEX, | |
623 | ec->tx_max_coalesced_frames, | |
624 | DMA_MBUF_DONE_THRESH); | |
625 | ||
4a29645b FF |
626 | for (i = 0; i < priv->hw_params->rx_queues; i++) { |
627 | bcmgenet_rdma_ring_writel(priv, i, | |
628 | ec->rx_max_coalesced_frames, | |
629 | DMA_MBUF_DONE_THRESH); | |
630 | ||
631 | reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i); | |
632 | reg &= ~DMA_TIMEOUT_MASK; | |
633 | reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192); | |
634 | bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i); | |
635 | } | |
636 | ||
637 | bcmgenet_rdma_ring_writel(priv, DESC_INDEX, | |
638 | ec->rx_max_coalesced_frames, | |
639 | DMA_MBUF_DONE_THRESH); | |
640 | ||
641 | reg = bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT); | |
642 | reg &= ~DMA_TIMEOUT_MASK; | |
643 | reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192); | |
644 | bcmgenet_rdma_writel(priv, reg, DMA_RING16_TIMEOUT); | |
645 | ||
2f913070 FF |
646 | return 0; |
647 | } | |
648 | ||
1c1008c7 FF |
649 | /* standard ethtool support functions. */ |
650 | enum bcmgenet_stat_type { | |
651 | BCMGENET_STAT_NETDEV = -1, | |
652 | BCMGENET_STAT_MIB_RX, | |
653 | BCMGENET_STAT_MIB_TX, | |
654 | BCMGENET_STAT_RUNT, | |
655 | BCMGENET_STAT_MISC, | |
f62ba9c1 | 656 | BCMGENET_STAT_SOFT, |
1c1008c7 FF |
657 | }; |
658 | ||
659 | struct bcmgenet_stats { | |
660 | char stat_string[ETH_GSTRING_LEN]; | |
661 | int stat_sizeof; | |
662 | int stat_offset; | |
663 | enum bcmgenet_stat_type type; | |
664 | /* reg offset from UMAC base for misc counters */ | |
665 | u16 reg_offset; | |
666 | }; | |
667 | ||
668 | #define STAT_NETDEV(m) { \ | |
669 | .stat_string = __stringify(m), \ | |
670 | .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \ | |
671 | .stat_offset = offsetof(struct net_device_stats, m), \ | |
672 | .type = BCMGENET_STAT_NETDEV, \ | |
673 | } | |
674 | ||
675 | #define STAT_GENET_MIB(str, m, _type) { \ | |
676 | .stat_string = str, \ | |
677 | .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ | |
678 | .stat_offset = offsetof(struct bcmgenet_priv, m), \ | |
679 | .type = _type, \ | |
680 | } | |
681 | ||
682 | #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) | |
683 | #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX) | |
684 | #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT) | |
f62ba9c1 | 685 | #define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT) |
1c1008c7 FF |
686 | |
687 | #define STAT_GENET_MISC(str, m, offset) { \ | |
688 | .stat_string = str, \ | |
689 | .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ | |
690 | .stat_offset = offsetof(struct bcmgenet_priv, m), \ | |
691 | .type = BCMGENET_STAT_MISC, \ | |
692 | .reg_offset = offset, \ | |
693 | } | |
694 | ||
695 | ||
696 | /* There is a 0xC gap between the end of RX and beginning of TX stats and then | |
697 | * between the end of TX stats and the beginning of the RX RUNT | |
698 | */ | |
699 | #define BCMGENET_STAT_OFFSET 0xc | |
700 | ||
701 | /* Hardware counters must be kept in sync because the order/offset | |
702 | * is important here (order in structure declaration = order in hardware) | |
703 | */ | |
704 | static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { | |
705 | /* general stats */ | |
706 | STAT_NETDEV(rx_packets), | |
707 | STAT_NETDEV(tx_packets), | |
708 | STAT_NETDEV(rx_bytes), | |
709 | STAT_NETDEV(tx_bytes), | |
710 | STAT_NETDEV(rx_errors), | |
711 | STAT_NETDEV(tx_errors), | |
712 | STAT_NETDEV(rx_dropped), | |
713 | STAT_NETDEV(tx_dropped), | |
714 | STAT_NETDEV(multicast), | |
715 | /* UniMAC RSV counters */ | |
716 | STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64), | |
717 | STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127), | |
718 | STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255), | |
719 | STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511), | |
720 | STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023), | |
721 | STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518), | |
722 | STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv), | |
723 | STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047), | |
724 | STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095), | |
725 | STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216), | |
726 | STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt), | |
727 | STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes), | |
728 | STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca), | |
729 | STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca), | |
730 | STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs), | |
731 | STAT_GENET_MIB_RX("rx_control", mib.rx.cf), | |
732 | STAT_GENET_MIB_RX("rx_pause", mib.rx.pf), | |
733 | STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo), | |
734 | STAT_GENET_MIB_RX("rx_align", mib.rx.aln), | |
735 | STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr), | |
736 | STAT_GENET_MIB_RX("rx_code", mib.rx.cde), | |
737 | STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr), | |
738 | STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr), | |
739 | STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr), | |
740 | STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue), | |
741 | STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok), | |
742 | STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc), | |
743 | STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp), | |
744 | STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc), | |
745 | /* UniMAC TSV counters */ | |
746 | STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64), | |
747 | STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127), | |
748 | STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255), | |
749 | STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511), | |
750 | STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023), | |
751 | STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518), | |
752 | STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv), | |
753 | STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047), | |
754 | STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095), | |
755 | STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216), | |
756 | STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts), | |
757 | STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca), | |
758 | STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca), | |
759 | STAT_GENET_MIB_TX("tx_pause", mib.tx.pf), | |
760 | STAT_GENET_MIB_TX("tx_control", mib.tx.cf), | |
761 | STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs), | |
762 | STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr), | |
763 | STAT_GENET_MIB_TX("tx_defer", mib.tx.drf), | |
764 | STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf), | |
765 | STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl), | |
766 | STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl), | |
767 | STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl), | |
768 | STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl), | |
769 | STAT_GENET_MIB_TX("tx_frags", mib.tx.frg), | |
770 | STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl), | |
771 | STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr), | |
772 | STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes), | |
773 | STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok), | |
774 | STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc), | |
775 | /* UniMAC RUNT counters */ | |
776 | STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt), | |
777 | STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs), | |
778 | STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align), | |
779 | STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes), | |
780 | /* Misc UniMAC counters */ | |
781 | STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, | |
782 | UMAC_RBUF_OVFL_CNT), | |
783 | STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT), | |
784 | STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), | |
f62ba9c1 FF |
785 | STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), |
786 | STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed), | |
787 | STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed), | |
1c1008c7 FF |
788 | }; |
789 | ||
790 | #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) | |
791 | ||
792 | static void bcmgenet_get_drvinfo(struct net_device *dev, | |
c91b7f66 | 793 | struct ethtool_drvinfo *info) |
1c1008c7 FF |
794 | { |
795 | strlcpy(info->driver, "bcmgenet", sizeof(info->driver)); | |
796 | strlcpy(info->version, "v2.0", sizeof(info->version)); | |
797 | info->n_stats = BCMGENET_STATS_LEN; | |
1c1008c7 FF |
798 | } |
799 | ||
800 | static int bcmgenet_get_sset_count(struct net_device *dev, int string_set) | |
801 | { | |
802 | switch (string_set) { | |
803 | case ETH_SS_STATS: | |
804 | return BCMGENET_STATS_LEN; | |
805 | default: | |
806 | return -EOPNOTSUPP; | |
807 | } | |
808 | } | |
809 | ||
c91b7f66 FF |
810 | static void bcmgenet_get_strings(struct net_device *dev, u32 stringset, |
811 | u8 *data) | |
1c1008c7 FF |
812 | { |
813 | int i; | |
814 | ||
815 | switch (stringset) { | |
816 | case ETH_SS_STATS: | |
817 | for (i = 0; i < BCMGENET_STATS_LEN; i++) { | |
818 | memcpy(data + i * ETH_GSTRING_LEN, | |
c91b7f66 FF |
819 | bcmgenet_gstrings_stats[i].stat_string, |
820 | ETH_GSTRING_LEN); | |
1c1008c7 FF |
821 | } |
822 | break; | |
823 | } | |
824 | } | |
825 | ||
826 | static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) | |
827 | { | |
828 | int i, j = 0; | |
829 | ||
830 | for (i = 0; i < BCMGENET_STATS_LEN; i++) { | |
831 | const struct bcmgenet_stats *s; | |
832 | u8 offset = 0; | |
833 | u32 val = 0; | |
834 | char *p; | |
835 | ||
836 | s = &bcmgenet_gstrings_stats[i]; | |
837 | switch (s->type) { | |
838 | case BCMGENET_STAT_NETDEV: | |
f62ba9c1 | 839 | case BCMGENET_STAT_SOFT: |
1c1008c7 FF |
840 | continue; |
841 | case BCMGENET_STAT_MIB_RX: | |
842 | case BCMGENET_STAT_MIB_TX: | |
843 | case BCMGENET_STAT_RUNT: | |
844 | if (s->type != BCMGENET_STAT_MIB_RX) | |
845 | offset = BCMGENET_STAT_OFFSET; | |
c91b7f66 FF |
846 | val = bcmgenet_umac_readl(priv, |
847 | UMAC_MIB_START + j + offset); | |
1c1008c7 FF |
848 | break; |
849 | case BCMGENET_STAT_MISC: | |
850 | val = bcmgenet_umac_readl(priv, s->reg_offset); | |
851 | /* clear if overflowed */ | |
852 | if (val == ~0) | |
853 | bcmgenet_umac_writel(priv, 0, s->reg_offset); | |
854 | break; | |
855 | } | |
856 | ||
857 | j += s->stat_sizeof; | |
858 | p = (char *)priv + s->stat_offset; | |
859 | *(u32 *)p = val; | |
860 | } | |
861 | } | |
862 | ||
863 | static void bcmgenet_get_ethtool_stats(struct net_device *dev, | |
c91b7f66 FF |
864 | struct ethtool_stats *stats, |
865 | u64 *data) | |
1c1008c7 FF |
866 | { |
867 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
868 | int i; | |
869 | ||
870 | if (netif_running(dev)) | |
871 | bcmgenet_update_mib_counters(priv); | |
872 | ||
873 | for (i = 0; i < BCMGENET_STATS_LEN; i++) { | |
874 | const struct bcmgenet_stats *s; | |
875 | char *p; | |
876 | ||
877 | s = &bcmgenet_gstrings_stats[i]; | |
878 | if (s->type == BCMGENET_STAT_NETDEV) | |
879 | p = (char *)&dev->stats; | |
880 | else | |
881 | p = (char *)priv; | |
882 | p += s->stat_offset; | |
883 | data[i] = *(u32 *)p; | |
884 | } | |
885 | } | |
886 | ||
6ef398ea FF |
887 | static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable) |
888 | { | |
889 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
890 | u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL; | |
891 | u32 reg; | |
892 | ||
893 | if (enable && !priv->clk_eee_enabled) { | |
894 | clk_prepare_enable(priv->clk_eee); | |
895 | priv->clk_eee_enabled = true; | |
896 | } | |
897 | ||
898 | reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL); | |
899 | if (enable) | |
900 | reg |= EEE_EN; | |
901 | else | |
902 | reg &= ~EEE_EN; | |
903 | bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL); | |
904 | ||
905 | /* Enable EEE and switch to a 27Mhz clock automatically */ | |
906 | reg = __raw_readl(priv->base + off); | |
907 | if (enable) | |
908 | reg |= TBUF_EEE_EN | TBUF_PM_EN; | |
909 | else | |
910 | reg &= ~(TBUF_EEE_EN | TBUF_PM_EN); | |
911 | __raw_writel(reg, priv->base + off); | |
912 | ||
913 | /* Do the same for thing for RBUF */ | |
914 | reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL); | |
915 | if (enable) | |
916 | reg |= RBUF_EEE_EN | RBUF_PM_EN; | |
917 | else | |
918 | reg &= ~(RBUF_EEE_EN | RBUF_PM_EN); | |
919 | bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL); | |
920 | ||
921 | if (!enable && priv->clk_eee_enabled) { | |
922 | clk_disable_unprepare(priv->clk_eee); | |
923 | priv->clk_eee_enabled = false; | |
924 | } | |
925 | ||
926 | priv->eee.eee_enabled = enable; | |
927 | priv->eee.eee_active = enable; | |
928 | } | |
929 | ||
930 | static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e) | |
931 | { | |
932 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
933 | struct ethtool_eee *p = &priv->eee; | |
934 | ||
935 | if (GENET_IS_V1(priv)) | |
936 | return -EOPNOTSUPP; | |
937 | ||
938 | e->eee_enabled = p->eee_enabled; | |
939 | e->eee_active = p->eee_active; | |
940 | e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER); | |
941 | ||
942 | return phy_ethtool_get_eee(priv->phydev, e); | |
943 | } | |
944 | ||
945 | static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e) | |
946 | { | |
947 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
948 | struct ethtool_eee *p = &priv->eee; | |
949 | int ret = 0; | |
950 | ||
951 | if (GENET_IS_V1(priv)) | |
952 | return -EOPNOTSUPP; | |
953 | ||
954 | p->eee_enabled = e->eee_enabled; | |
955 | ||
956 | if (!p->eee_enabled) { | |
957 | bcmgenet_eee_enable_set(dev, false); | |
958 | } else { | |
959 | ret = phy_init_eee(priv->phydev, 0); | |
960 | if (ret) { | |
961 | netif_err(priv, hw, dev, "EEE initialization failed\n"); | |
962 | return ret; | |
963 | } | |
964 | ||
965 | bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER); | |
966 | bcmgenet_eee_enable_set(dev, true); | |
967 | } | |
968 | ||
969 | return phy_ethtool_set_eee(priv->phydev, e); | |
970 | } | |
971 | ||
6b0c5406 FF |
972 | static int bcmgenet_nway_reset(struct net_device *dev) |
973 | { | |
974 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
975 | ||
976 | return genphy_restart_aneg(priv->phydev); | |
977 | } | |
978 | ||
1c1008c7 FF |
979 | /* standard ethtool support functions. */ |
980 | static struct ethtool_ops bcmgenet_ethtool_ops = { | |
981 | .get_strings = bcmgenet_get_strings, | |
982 | .get_sset_count = bcmgenet_get_sset_count, | |
983 | .get_ethtool_stats = bcmgenet_get_ethtool_stats, | |
984 | .get_settings = bcmgenet_get_settings, | |
985 | .set_settings = bcmgenet_set_settings, | |
986 | .get_drvinfo = bcmgenet_get_drvinfo, | |
987 | .get_link = ethtool_op_get_link, | |
988 | .get_msglevel = bcmgenet_get_msglevel, | |
989 | .set_msglevel = bcmgenet_set_msglevel, | |
06ba8375 FF |
990 | .get_wol = bcmgenet_get_wol, |
991 | .set_wol = bcmgenet_set_wol, | |
6ef398ea FF |
992 | .get_eee = bcmgenet_get_eee, |
993 | .set_eee = bcmgenet_set_eee, | |
6b0c5406 | 994 | .nway_reset = bcmgenet_nway_reset, |
2f913070 FF |
995 | .get_coalesce = bcmgenet_get_coalesce, |
996 | .set_coalesce = bcmgenet_set_coalesce, | |
1c1008c7 FF |
997 | }; |
998 | ||
999 | /* Power down the unimac, based on mode. */ | |
ca8cf341 | 1000 | static int bcmgenet_power_down(struct bcmgenet_priv *priv, |
1c1008c7 FF |
1001 | enum bcmgenet_power_mode mode) |
1002 | { | |
ca8cf341 | 1003 | int ret = 0; |
1c1008c7 FF |
1004 | u32 reg; |
1005 | ||
1006 | switch (mode) { | |
1007 | case GENET_POWER_CABLE_SENSE: | |
80d8e96d | 1008 | phy_detach(priv->phydev); |
1c1008c7 FF |
1009 | break; |
1010 | ||
c3ae64ae | 1011 | case GENET_POWER_WOL_MAGIC: |
ca8cf341 | 1012 | ret = bcmgenet_wol_power_down_cfg(priv, mode); |
c3ae64ae FF |
1013 | break; |
1014 | ||
1c1008c7 FF |
1015 | case GENET_POWER_PASSIVE: |
1016 | /* Power down LED */ | |
1c1008c7 FF |
1017 | if (priv->hw_params->flags & GENET_HAS_EXT) { |
1018 | reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); | |
1019 | reg |= (EXT_PWR_DOWN_PHY | | |
1020 | EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS); | |
1021 | bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); | |
a642c4f7 FF |
1022 | |
1023 | bcmgenet_phy_power_set(priv->dev, false); | |
1c1008c7 FF |
1024 | } |
1025 | break; | |
1026 | default: | |
1027 | break; | |
1028 | } | |
ca8cf341 FF |
1029 | |
1030 | return 0; | |
1c1008c7 FF |
1031 | } |
1032 | ||
1033 | static void bcmgenet_power_up(struct bcmgenet_priv *priv, | |
c91b7f66 | 1034 | enum bcmgenet_power_mode mode) |
1c1008c7 FF |
1035 | { |
1036 | u32 reg; | |
1037 | ||
1038 | if (!(priv->hw_params->flags & GENET_HAS_EXT)) | |
1039 | return; | |
1040 | ||
1041 | reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); | |
1042 | ||
1043 | switch (mode) { | |
1044 | case GENET_POWER_PASSIVE: | |
1045 | reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY | | |
1046 | EXT_PWR_DOWN_BIAS); | |
1047 | /* fallthrough */ | |
1048 | case GENET_POWER_CABLE_SENSE: | |
1049 | /* enable APD */ | |
1050 | reg |= EXT_PWR_DN_EN_LD; | |
1051 | break; | |
c3ae64ae FF |
1052 | case GENET_POWER_WOL_MAGIC: |
1053 | bcmgenet_wol_power_up_cfg(priv, mode); | |
1054 | return; | |
1c1008c7 FF |
1055 | default: |
1056 | break; | |
1057 | } | |
1058 | ||
1059 | bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); | |
bd4060a6 FF |
1060 | if (mode == GENET_POWER_PASSIVE) |
1061 | bcmgenet_phy_power_set(priv->dev, true); | |
1c1008c7 FF |
1062 | } |
1063 | ||
1064 | /* ioctl handle special commands that are not present in ethtool. */ | |
1065 | static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |
1066 | { | |
1067 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
1068 | int val = 0; | |
1069 | ||
1070 | if (!netif_running(dev)) | |
1071 | return -EINVAL; | |
1072 | ||
1073 | switch (cmd) { | |
1074 | case SIOCGMIIPHY: | |
1075 | case SIOCGMIIREG: | |
1076 | case SIOCSMIIREG: | |
1077 | if (!priv->phydev) | |
1078 | val = -ENODEV; | |
1079 | else | |
1080 | val = phy_mii_ioctl(priv->phydev, rq, cmd); | |
1081 | break; | |
1082 | ||
1083 | default: | |
1084 | val = -EINVAL; | |
1085 | break; | |
1086 | } | |
1087 | ||
1088 | return val; | |
1089 | } | |
1090 | ||
1091 | static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv, | |
1092 | struct bcmgenet_tx_ring *ring) | |
1093 | { | |
1094 | struct enet_cb *tx_cb_ptr; | |
1095 | ||
1096 | tx_cb_ptr = ring->cbs; | |
1097 | tx_cb_ptr += ring->write_ptr - ring->cb_ptr; | |
014012a4 | 1098 | |
1c1008c7 FF |
1099 | /* Advancing local write pointer */ |
1100 | if (ring->write_ptr == ring->end_ptr) | |
1101 | ring->write_ptr = ring->cb_ptr; | |
1102 | else | |
1103 | ring->write_ptr++; | |
1104 | ||
1105 | return tx_cb_ptr; | |
1106 | } | |
1107 | ||
1108 | /* Simple helper to free a control block's resources */ | |
1109 | static void bcmgenet_free_cb(struct enet_cb *cb) | |
1110 | { | |
1111 | dev_kfree_skb_any(cb->skb); | |
1112 | cb->skb = NULL; | |
1113 | dma_unmap_addr_set(cb, dma_addr, 0); | |
1114 | } | |
1115 | ||
4055eaef PG |
1116 | static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring) |
1117 | { | |
ee7d8c20 | 1118 | bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE, |
4055eaef PG |
1119 | INTRL2_CPU_MASK_SET); |
1120 | } | |
1121 | ||
1122 | static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring) | |
1123 | { | |
ee7d8c20 | 1124 | bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE, |
4055eaef PG |
1125 | INTRL2_CPU_MASK_CLEAR); |
1126 | } | |
1127 | ||
1128 | static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring) | |
1129 | { | |
1130 | bcmgenet_intrl2_1_writel(ring->priv, | |
1131 | 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), | |
1132 | INTRL2_CPU_MASK_SET); | |
1133 | } | |
1134 | ||
1135 | static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring) | |
1136 | { | |
1137 | bcmgenet_intrl2_1_writel(ring->priv, | |
1138 | 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index), | |
1139 | INTRL2_CPU_MASK_CLEAR); | |
1140 | } | |
1141 | ||
9dbac28f | 1142 | static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring) |
1c1008c7 | 1143 | { |
ee7d8c20 | 1144 | bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE, |
c91b7f66 | 1145 | INTRL2_CPU_MASK_SET); |
1c1008c7 FF |
1146 | } |
1147 | ||
9dbac28f | 1148 | static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring) |
1c1008c7 | 1149 | { |
ee7d8c20 | 1150 | bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE, |
c91b7f66 | 1151 | INTRL2_CPU_MASK_CLEAR); |
1c1008c7 FF |
1152 | } |
1153 | ||
9dbac28f | 1154 | static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring) |
1c1008c7 | 1155 | { |
9dbac28f | 1156 | bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index, |
c91b7f66 | 1157 | INTRL2_CPU_MASK_CLEAR); |
1c1008c7 FF |
1158 | } |
1159 | ||
9dbac28f | 1160 | static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring) |
1c1008c7 | 1161 | { |
9dbac28f | 1162 | bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index, |
c91b7f66 | 1163 | INTRL2_CPU_MASK_SET); |
1c1008c7 FF |
1164 | } |
1165 | ||
1166 | /* Unlocked version of the reclaim routine */ | |
4092e6ac JS |
1167 | static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, |
1168 | struct bcmgenet_tx_ring *ring) | |
1c1008c7 FF |
1169 | { |
1170 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
1c1008c7 | 1171 | struct enet_cb *tx_cb_ptr; |
b2cde2cc | 1172 | struct netdev_queue *txq; |
4092e6ac | 1173 | unsigned int pkts_compl = 0; |
1c1008c7 | 1174 | unsigned int c_index; |
66d06757 PG |
1175 | unsigned int txbds_ready; |
1176 | unsigned int txbds_processed = 0; | |
1c1008c7 | 1177 | |
7fc527f9 | 1178 | /* Compute how many buffers are transmitted since last xmit call */ |
1c1008c7 | 1179 | c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); |
66d06757 | 1180 | c_index &= DMA_C_INDEX_MASK; |
1c1008c7 | 1181 | |
66d06757 PG |
1182 | if (likely(c_index >= ring->c_index)) |
1183 | txbds_ready = c_index - ring->c_index; | |
1c1008c7 | 1184 | else |
66d06757 | 1185 | txbds_ready = (DMA_C_INDEX_MASK + 1) - ring->c_index + c_index; |
1c1008c7 FF |
1186 | |
1187 | netif_dbg(priv, tx_done, dev, | |
66d06757 PG |
1188 | "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n", |
1189 | __func__, ring->index, ring->c_index, c_index, txbds_ready); | |
1c1008c7 FF |
1190 | |
1191 | /* Reclaim transmitted buffers */ | |
66d06757 PG |
1192 | while (txbds_processed < txbds_ready) { |
1193 | tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr]; | |
1c1008c7 | 1194 | if (tx_cb_ptr->skb) { |
4092e6ac | 1195 | pkts_compl++; |
66d06757 | 1196 | dev->stats.tx_packets++; |
1c1008c7 FF |
1197 | dev->stats.tx_bytes += tx_cb_ptr->skb->len; |
1198 | dma_unmap_single(&dev->dev, | |
c91b7f66 FF |
1199 | dma_unmap_addr(tx_cb_ptr, dma_addr), |
1200 | tx_cb_ptr->skb->len, | |
1201 | DMA_TO_DEVICE); | |
1c1008c7 FF |
1202 | bcmgenet_free_cb(tx_cb_ptr); |
1203 | } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) { | |
1204 | dev->stats.tx_bytes += | |
1205 | dma_unmap_len(tx_cb_ptr, dma_len); | |
1206 | dma_unmap_page(&dev->dev, | |
c91b7f66 FF |
1207 | dma_unmap_addr(tx_cb_ptr, dma_addr), |
1208 | dma_unmap_len(tx_cb_ptr, dma_len), | |
1209 | DMA_TO_DEVICE); | |
1c1008c7 FF |
1210 | dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0); |
1211 | } | |
1c1008c7 | 1212 | |
66d06757 PG |
1213 | txbds_processed++; |
1214 | if (likely(ring->clean_ptr < ring->end_ptr)) | |
1215 | ring->clean_ptr++; | |
1216 | else | |
1217 | ring->clean_ptr = ring->cb_ptr; | |
1c1008c7 FF |
1218 | } |
1219 | ||
66d06757 PG |
1220 | ring->free_bds += txbds_processed; |
1221 | ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK; | |
1222 | ||
4092e6ac | 1223 | if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { |
66d06757 | 1224 | txq = netdev_get_tx_queue(dev, ring->queue); |
4092e6ac JS |
1225 | if (netif_tx_queue_stopped(txq)) |
1226 | netif_tx_wake_queue(txq); | |
1227 | } | |
1c1008c7 | 1228 | |
4092e6ac | 1229 | return pkts_compl; |
1c1008c7 FF |
1230 | } |
1231 | ||
4092e6ac | 1232 | static unsigned int bcmgenet_tx_reclaim(struct net_device *dev, |
c91b7f66 | 1233 | struct bcmgenet_tx_ring *ring) |
1c1008c7 | 1234 | { |
4092e6ac | 1235 | unsigned int released; |
1c1008c7 FF |
1236 | unsigned long flags; |
1237 | ||
1238 | spin_lock_irqsave(&ring->lock, flags); | |
4092e6ac | 1239 | released = __bcmgenet_tx_reclaim(dev, ring); |
1c1008c7 | 1240 | spin_unlock_irqrestore(&ring->lock, flags); |
4092e6ac JS |
1241 | |
1242 | return released; | |
1243 | } | |
1244 | ||
1245 | static int bcmgenet_tx_poll(struct napi_struct *napi, int budget) | |
1246 | { | |
1247 | struct bcmgenet_tx_ring *ring = | |
1248 | container_of(napi, struct bcmgenet_tx_ring, napi); | |
1249 | unsigned int work_done = 0; | |
1250 | ||
1251 | work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring); | |
1252 | ||
1253 | if (work_done == 0) { | |
1254 | napi_complete(napi); | |
9dbac28f | 1255 | ring->int_enable(ring); |
4092e6ac JS |
1256 | |
1257 | return 0; | |
1258 | } | |
1259 | ||
1260 | return budget; | |
1c1008c7 FF |
1261 | } |
1262 | ||
1263 | static void bcmgenet_tx_reclaim_all(struct net_device *dev) | |
1264 | { | |
1265 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
1266 | int i; | |
1267 | ||
1268 | if (netif_is_multiqueue(dev)) { | |
1269 | for (i = 0; i < priv->hw_params->tx_queues; i++) | |
1270 | bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]); | |
1271 | } | |
1272 | ||
1273 | bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]); | |
1274 | } | |
1275 | ||
1276 | /* Transmits a single SKB (either head of a fragment or a single SKB) | |
1277 | * caller must hold priv->lock | |
1278 | */ | |
1279 | static int bcmgenet_xmit_single(struct net_device *dev, | |
1280 | struct sk_buff *skb, | |
1281 | u16 dma_desc_flags, | |
1282 | struct bcmgenet_tx_ring *ring) | |
1283 | { | |
1284 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
1285 | struct device *kdev = &priv->pdev->dev; | |
1286 | struct enet_cb *tx_cb_ptr; | |
1287 | unsigned int skb_len; | |
1288 | dma_addr_t mapping; | |
1289 | u32 length_status; | |
1290 | int ret; | |
1291 | ||
1292 | tx_cb_ptr = bcmgenet_get_txcb(priv, ring); | |
1293 | ||
1294 | if (unlikely(!tx_cb_ptr)) | |
1295 | BUG(); | |
1296 | ||
1297 | tx_cb_ptr->skb = skb; | |
1298 | ||
1299 | skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb); | |
1300 | ||
1301 | mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); | |
1302 | ret = dma_mapping_error(kdev, mapping); | |
1303 | if (ret) { | |
44c8bc3c | 1304 | priv->mib.tx_dma_failed++; |
1c1008c7 FF |
1305 | netif_err(priv, tx_err, dev, "Tx DMA map failed\n"); |
1306 | dev_kfree_skb(skb); | |
1307 | return ret; | |
1308 | } | |
1309 | ||
1310 | dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); | |
1311 | dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len); | |
1312 | length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | | |
1313 | (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) | | |
1314 | DMA_TX_APPEND_CRC; | |
1315 | ||
1316 | if (skb->ip_summed == CHECKSUM_PARTIAL) | |
1317 | length_status |= DMA_TX_DO_CSUM; | |
1318 | ||
1319 | dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status); | |
1320 | ||
1c1008c7 FF |
1321 | return 0; |
1322 | } | |
1323 | ||
7fc527f9 | 1324 | /* Transmit a SKB fragment */ |
1c1008c7 | 1325 | static int bcmgenet_xmit_frag(struct net_device *dev, |
c91b7f66 FF |
1326 | skb_frag_t *frag, |
1327 | u16 dma_desc_flags, | |
1328 | struct bcmgenet_tx_ring *ring) | |
1c1008c7 FF |
1329 | { |
1330 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
1331 | struct device *kdev = &priv->pdev->dev; | |
1332 | struct enet_cb *tx_cb_ptr; | |
1333 | dma_addr_t mapping; | |
1334 | int ret; | |
1335 | ||
1336 | tx_cb_ptr = bcmgenet_get_txcb(priv, ring); | |
1337 | ||
1338 | if (unlikely(!tx_cb_ptr)) | |
1339 | BUG(); | |
1340 | tx_cb_ptr->skb = NULL; | |
1341 | ||
1342 | mapping = skb_frag_dma_map(kdev, frag, 0, | |
c91b7f66 | 1343 | skb_frag_size(frag), DMA_TO_DEVICE); |
1c1008c7 FF |
1344 | ret = dma_mapping_error(kdev, mapping); |
1345 | if (ret) { | |
44c8bc3c | 1346 | priv->mib.tx_dma_failed++; |
1c1008c7 | 1347 | netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n", |
c91b7f66 | 1348 | __func__); |
1c1008c7 FF |
1349 | return ret; |
1350 | } | |
1351 | ||
1352 | dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); | |
1353 | dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size); | |
1354 | ||
1355 | dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, | |
c91b7f66 FF |
1356 | (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | |
1357 | (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT)); | |
1c1008c7 | 1358 | |
1c1008c7 FF |
1359 | return 0; |
1360 | } | |
1361 | ||
1362 | /* Reallocate the SKB to put enough headroom in front of it and insert | |
1363 | * the transmit checksum offsets in the descriptors | |
1364 | */ | |
bc23333b PG |
1365 | static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev, |
1366 | struct sk_buff *skb) | |
1c1008c7 FF |
1367 | { |
1368 | struct status_64 *status = NULL; | |
1369 | struct sk_buff *new_skb; | |
1370 | u16 offset; | |
1371 | u8 ip_proto; | |
1372 | u16 ip_ver; | |
1373 | u32 tx_csum_info; | |
1374 | ||
1375 | if (unlikely(skb_headroom(skb) < sizeof(*status))) { | |
1376 | /* If 64 byte status block enabled, must make sure skb has | |
1377 | * enough headroom for us to insert 64B status block. | |
1378 | */ | |
1379 | new_skb = skb_realloc_headroom(skb, sizeof(*status)); | |
1380 | dev_kfree_skb(skb); | |
1381 | if (!new_skb) { | |
1c1008c7 | 1382 | dev->stats.tx_dropped++; |
bc23333b | 1383 | return NULL; |
1c1008c7 FF |
1384 | } |
1385 | skb = new_skb; | |
1386 | } | |
1387 | ||
1388 | skb_push(skb, sizeof(*status)); | |
1389 | status = (struct status_64 *)skb->data; | |
1390 | ||
1391 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
1392 | ip_ver = htons(skb->protocol); | |
1393 | switch (ip_ver) { | |
1394 | case ETH_P_IP: | |
1395 | ip_proto = ip_hdr(skb)->protocol; | |
1396 | break; | |
1397 | case ETH_P_IPV6: | |
1398 | ip_proto = ipv6_hdr(skb)->nexthdr; | |
1399 | break; | |
1400 | default: | |
bc23333b | 1401 | return skb; |
1c1008c7 FF |
1402 | } |
1403 | ||
1404 | offset = skb_checksum_start_offset(skb) - sizeof(*status); | |
1405 | tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) | | |
1406 | (offset + skb->csum_offset); | |
1407 | ||
1408 | /* Set the length valid bit for TCP and UDP and just set | |
1409 | * the special UDP flag for IPv4, else just set to 0. | |
1410 | */ | |
1411 | if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) { | |
1412 | tx_csum_info |= STATUS_TX_CSUM_LV; | |
1413 | if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP) | |
1414 | tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP; | |
8900ea57 | 1415 | } else { |
1c1008c7 | 1416 | tx_csum_info = 0; |
8900ea57 | 1417 | } |
1c1008c7 FF |
1418 | |
1419 | status->tx_csum_info = tx_csum_info; | |
1420 | } | |
1421 | ||
bc23333b | 1422 | return skb; |
1c1008c7 FF |
1423 | } |
1424 | ||
1425 | static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) | |
1426 | { | |
1427 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
1428 | struct bcmgenet_tx_ring *ring = NULL; | |
b2cde2cc | 1429 | struct netdev_queue *txq; |
1c1008c7 FF |
1430 | unsigned long flags = 0; |
1431 | int nr_frags, index; | |
1432 | u16 dma_desc_flags; | |
1433 | int ret; | |
1434 | int i; | |
1435 | ||
1436 | index = skb_get_queue_mapping(skb); | |
1437 | /* Mapping strategy: | |
1438 | * queue_mapping = 0, unclassified, packet xmited through ring16 | |
1439 | * queue_mapping = 1, goes to ring 0. (highest priority queue | |
1440 | * queue_mapping = 2, goes to ring 1. | |
1441 | * queue_mapping = 3, goes to ring 2. | |
1442 | * queue_mapping = 4, goes to ring 3. | |
1443 | */ | |
1444 | if (index == 0) | |
1445 | index = DESC_INDEX; | |
1446 | else | |
1447 | index -= 1; | |
1448 | ||
1c1008c7 FF |
1449 | nr_frags = skb_shinfo(skb)->nr_frags; |
1450 | ring = &priv->tx_rings[index]; | |
b2cde2cc | 1451 | txq = netdev_get_tx_queue(dev, ring->queue); |
1c1008c7 FF |
1452 | |
1453 | spin_lock_irqsave(&ring->lock, flags); | |
1454 | if (ring->free_bds <= nr_frags + 1) { | |
b2cde2cc | 1455 | netif_tx_stop_queue(txq); |
1c1008c7 | 1456 | netdev_err(dev, "%s: tx ring %d full when queue %d awake\n", |
c91b7f66 | 1457 | __func__, index, ring->queue); |
1c1008c7 FF |
1458 | ret = NETDEV_TX_BUSY; |
1459 | goto out; | |
1460 | } | |
1461 | ||
474ea9ca FF |
1462 | if (skb_padto(skb, ETH_ZLEN)) { |
1463 | ret = NETDEV_TX_OK; | |
1464 | goto out; | |
1465 | } | |
1466 | ||
1c1008c7 FF |
1467 | /* set the SKB transmit checksum */ |
1468 | if (priv->desc_64b_en) { | |
bc23333b PG |
1469 | skb = bcmgenet_put_tx_csum(dev, skb); |
1470 | if (!skb) { | |
1c1008c7 FF |
1471 | ret = NETDEV_TX_OK; |
1472 | goto out; | |
1473 | } | |
1474 | } | |
1475 | ||
1476 | dma_desc_flags = DMA_SOP; | |
1477 | if (nr_frags == 0) | |
1478 | dma_desc_flags |= DMA_EOP; | |
1479 | ||
1480 | /* Transmit single SKB or head of fragment list */ | |
1481 | ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring); | |
1482 | if (ret) { | |
1483 | ret = NETDEV_TX_OK; | |
1484 | goto out; | |
1485 | } | |
1486 | ||
1487 | /* xmit fragment */ | |
1488 | for (i = 0; i < nr_frags; i++) { | |
1489 | ret = bcmgenet_xmit_frag(dev, | |
c91b7f66 FF |
1490 | &skb_shinfo(skb)->frags[i], |
1491 | (i == nr_frags - 1) ? DMA_EOP : 0, | |
1492 | ring); | |
1c1008c7 FF |
1493 | if (ret) { |
1494 | ret = NETDEV_TX_OK; | |
1495 | goto out; | |
1496 | } | |
1497 | } | |
1498 | ||
d03825fb FF |
1499 | skb_tx_timestamp(skb); |
1500 | ||
ae67bf01 FF |
1501 | /* Decrement total BD count and advance our write pointer */ |
1502 | ring->free_bds -= nr_frags + 1; | |
1503 | ring->prod_index += nr_frags + 1; | |
1504 | ring->prod_index &= DMA_P_INDEX_MASK; | |
1505 | ||
4092e6ac | 1506 | if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) |
b2cde2cc | 1507 | netif_tx_stop_queue(txq); |
1c1008c7 | 1508 | |
ddd0ca5d FF |
1509 | if (!skb->xmit_more || netif_xmit_stopped(txq)) |
1510 | /* Packets are ready, update producer index */ | |
1511 | bcmgenet_tdma_ring_writel(priv, ring->index, | |
1512 | ring->prod_index, TDMA_PROD_INDEX); | |
1c1008c7 FF |
1513 | out: |
1514 | spin_unlock_irqrestore(&ring->lock, flags); | |
1515 | ||
1516 | return ret; | |
1517 | } | |
1518 | ||
d6707bec PG |
1519 | static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv, |
1520 | struct enet_cb *cb) | |
1c1008c7 FF |
1521 | { |
1522 | struct device *kdev = &priv->pdev->dev; | |
1523 | struct sk_buff *skb; | |
d6707bec | 1524 | struct sk_buff *rx_skb; |
1c1008c7 | 1525 | dma_addr_t mapping; |
1c1008c7 | 1526 | |
d6707bec | 1527 | /* Allocate a new Rx skb */ |
c91b7f66 | 1528 | skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT); |
d6707bec PG |
1529 | if (!skb) { |
1530 | priv->mib.alloc_rx_buff_failed++; | |
1531 | netif_err(priv, rx_err, priv->dev, | |
1532 | "%s: Rx skb allocation failed\n", __func__); | |
1533 | return NULL; | |
1534 | } | |
1c1008c7 | 1535 | |
d6707bec PG |
1536 | /* DMA-map the new Rx skb */ |
1537 | mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len, | |
1538 | DMA_FROM_DEVICE); | |
1539 | if (dma_mapping_error(kdev, mapping)) { | |
44c8bc3c | 1540 | priv->mib.rx_dma_failed++; |
d6707bec | 1541 | dev_kfree_skb_any(skb); |
1c1008c7 | 1542 | netif_err(priv, rx_err, priv->dev, |
d6707bec PG |
1543 | "%s: Rx skb DMA mapping failed\n", __func__); |
1544 | return NULL; | |
1c1008c7 FF |
1545 | } |
1546 | ||
d6707bec PG |
1547 | /* Grab the current Rx skb from the ring and DMA-unmap it */ |
1548 | rx_skb = cb->skb; | |
1549 | if (likely(rx_skb)) | |
1550 | dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), | |
1551 | priv->rx_buf_len, DMA_FROM_DEVICE); | |
1552 | ||
1553 | /* Put the new Rx skb on the ring */ | |
1554 | cb->skb = skb; | |
1c1008c7 | 1555 | dma_unmap_addr_set(cb, dma_addr, mapping); |
8ac467e8 | 1556 | dmadesc_set_addr(priv, cb->bd_addr, mapping); |
1c1008c7 | 1557 | |
d6707bec PG |
1558 | /* Return the current Rx skb to caller */ |
1559 | return rx_skb; | |
1c1008c7 FF |
1560 | } |
1561 | ||
1562 | /* bcmgenet_desc_rx - descriptor based rx process. | |
1563 | * this could be called from bottom half, or from NAPI polling method. | |
1564 | */ | |
4055eaef | 1565 | static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, |
1c1008c7 FF |
1566 | unsigned int budget) |
1567 | { | |
4055eaef | 1568 | struct bcmgenet_priv *priv = ring->priv; |
1c1008c7 FF |
1569 | struct net_device *dev = priv->dev; |
1570 | struct enet_cb *cb; | |
1571 | struct sk_buff *skb; | |
1572 | u32 dma_length_status; | |
1573 | unsigned long dma_flag; | |
d6707bec | 1574 | int len; |
1c1008c7 FF |
1575 | unsigned int rxpktprocessed = 0, rxpkttoprocess; |
1576 | unsigned int p_index; | |
d26ea6cc | 1577 | unsigned int discards; |
1c1008c7 FF |
1578 | unsigned int chksum_ok = 0; |
1579 | ||
4055eaef | 1580 | p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX); |
d26ea6cc PG |
1581 | |
1582 | discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) & | |
1583 | DMA_P_INDEX_DISCARD_CNT_MASK; | |
1584 | if (discards > ring->old_discards) { | |
1585 | discards = discards - ring->old_discards; | |
1586 | dev->stats.rx_missed_errors += discards; | |
1587 | dev->stats.rx_errors += discards; | |
1588 | ring->old_discards += discards; | |
1589 | ||
1590 | /* Clear HW register when we reach 75% of maximum 0xFFFF */ | |
1591 | if (ring->old_discards >= 0xC000) { | |
1592 | ring->old_discards = 0; | |
4055eaef | 1593 | bcmgenet_rdma_ring_writel(priv, ring->index, 0, |
d26ea6cc PG |
1594 | RDMA_PROD_INDEX); |
1595 | } | |
1596 | } | |
1597 | ||
1c1008c7 FF |
1598 | p_index &= DMA_P_INDEX_MASK; |
1599 | ||
8ac467e8 PG |
1600 | if (likely(p_index >= ring->c_index)) |
1601 | rxpkttoprocess = p_index - ring->c_index; | |
1c1008c7 | 1602 | else |
8ac467e8 PG |
1603 | rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - ring->c_index + |
1604 | p_index; | |
1c1008c7 FF |
1605 | |
1606 | netif_dbg(priv, rx_status, dev, | |
c91b7f66 | 1607 | "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess); |
1c1008c7 FF |
1608 | |
1609 | while ((rxpktprocessed < rxpkttoprocess) && | |
c91b7f66 | 1610 | (rxpktprocessed < budget)) { |
8ac467e8 | 1611 | cb = &priv->rx_cbs[ring->read_ptr]; |
d6707bec | 1612 | skb = bcmgenet_rx_refill(priv, cb); |
b629be5c | 1613 | |
b629be5c FF |
1614 | if (unlikely(!skb)) { |
1615 | dev->stats.rx_dropped++; | |
d6707bec | 1616 | goto next; |
b629be5c FF |
1617 | } |
1618 | ||
1c1008c7 | 1619 | if (!priv->desc_64b_en) { |
c91b7f66 | 1620 | dma_length_status = |
8ac467e8 | 1621 | dmadesc_get_length_status(priv, cb->bd_addr); |
1c1008c7 FF |
1622 | } else { |
1623 | struct status_64 *status; | |
164d4f20 | 1624 | |
1c1008c7 FF |
1625 | status = (struct status_64 *)skb->data; |
1626 | dma_length_status = status->length_status; | |
1627 | } | |
1628 | ||
1629 | /* DMA flags and length are still valid no matter how | |
1630 | * we got the Receive Status Vector (64B RSB or register) | |
1631 | */ | |
1632 | dma_flag = dma_length_status & 0xffff; | |
1633 | len = dma_length_status >> DMA_BUFLENGTH_SHIFT; | |
1634 | ||
1635 | netif_dbg(priv, rx_status, dev, | |
c91b7f66 | 1636 | "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n", |
8ac467e8 PG |
1637 | __func__, p_index, ring->c_index, |
1638 | ring->read_ptr, dma_length_status); | |
1c1008c7 | 1639 | |
1c1008c7 FF |
1640 | if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { |
1641 | netif_err(priv, rx_status, dev, | |
c91b7f66 | 1642 | "dropping fragmented packet!\n"); |
1c1008c7 | 1643 | dev->stats.rx_errors++; |
d6707bec PG |
1644 | dev_kfree_skb_any(skb); |
1645 | goto next; | |
1c1008c7 | 1646 | } |
d6707bec | 1647 | |
1c1008c7 FF |
1648 | /* report errors */ |
1649 | if (unlikely(dma_flag & (DMA_RX_CRC_ERROR | | |
1650 | DMA_RX_OV | | |
1651 | DMA_RX_NO | | |
1652 | DMA_RX_LG | | |
1653 | DMA_RX_RXER))) { | |
1654 | netif_err(priv, rx_status, dev, "dma_flag=0x%x\n", | |
c91b7f66 | 1655 | (unsigned int)dma_flag); |
1c1008c7 FF |
1656 | if (dma_flag & DMA_RX_CRC_ERROR) |
1657 | dev->stats.rx_crc_errors++; | |
1658 | if (dma_flag & DMA_RX_OV) | |
1659 | dev->stats.rx_over_errors++; | |
1660 | if (dma_flag & DMA_RX_NO) | |
1661 | dev->stats.rx_frame_errors++; | |
1662 | if (dma_flag & DMA_RX_LG) | |
1663 | dev->stats.rx_length_errors++; | |
1c1008c7 | 1664 | dev->stats.rx_errors++; |
d6707bec PG |
1665 | dev_kfree_skb_any(skb); |
1666 | goto next; | |
1c1008c7 FF |
1667 | } /* error packet */ |
1668 | ||
1669 | chksum_ok = (dma_flag & priv->dma_rx_chk_bit) && | |
c91b7f66 | 1670 | priv->desc_rxchk_en; |
1c1008c7 FF |
1671 | |
1672 | skb_put(skb, len); | |
1673 | if (priv->desc_64b_en) { | |
1674 | skb_pull(skb, 64); | |
1675 | len -= 64; | |
1676 | } | |
1677 | ||
1678 | if (likely(chksum_ok)) | |
1679 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
1680 | ||
1681 | /* remove hardware 2bytes added for IP alignment */ | |
1682 | skb_pull(skb, 2); | |
1683 | len -= 2; | |
1684 | ||
1685 | if (priv->crc_fwd_en) { | |
1686 | skb_trim(skb, len - ETH_FCS_LEN); | |
1687 | len -= ETH_FCS_LEN; | |
1688 | } | |
1689 | ||
1690 | /*Finish setting up the received SKB and send it to the kernel*/ | |
1691 | skb->protocol = eth_type_trans(skb, priv->dev); | |
1692 | dev->stats.rx_packets++; | |
1693 | dev->stats.rx_bytes += len; | |
1694 | if (dma_flag & DMA_RX_MULT) | |
1695 | dev->stats.multicast++; | |
1696 | ||
1697 | /* Notify kernel */ | |
4055eaef | 1698 | napi_gro_receive(&ring->napi, skb); |
1c1008c7 FF |
1699 | netif_dbg(priv, rx_status, dev, "pushed up to kernel\n"); |
1700 | ||
d6707bec | 1701 | next: |
cf377d88 | 1702 | rxpktprocessed++; |
8ac467e8 PG |
1703 | if (likely(ring->read_ptr < ring->end_ptr)) |
1704 | ring->read_ptr++; | |
1705 | else | |
1706 | ring->read_ptr = ring->cb_ptr; | |
1707 | ||
1708 | ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK; | |
4055eaef | 1709 | bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX); |
1c1008c7 FF |
1710 | } |
1711 | ||
1712 | return rxpktprocessed; | |
1713 | } | |
1714 | ||
3ab11339 PG |
1715 | /* Rx NAPI polling method */ |
1716 | static int bcmgenet_rx_poll(struct napi_struct *napi, int budget) | |
1717 | { | |
4055eaef PG |
1718 | struct bcmgenet_rx_ring *ring = container_of(napi, |
1719 | struct bcmgenet_rx_ring, napi); | |
3ab11339 PG |
1720 | unsigned int work_done; |
1721 | ||
4055eaef | 1722 | work_done = bcmgenet_desc_rx(ring, budget); |
3ab11339 PG |
1723 | |
1724 | if (work_done < budget) { | |
1725 | napi_complete(napi); | |
4055eaef | 1726 | ring->int_enable(ring); |
3ab11339 PG |
1727 | } |
1728 | ||
1729 | return work_done; | |
1730 | } | |
1731 | ||
1c1008c7 | 1732 | /* Assign skb to RX DMA descriptor. */ |
8ac467e8 PG |
1733 | static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv, |
1734 | struct bcmgenet_rx_ring *ring) | |
1c1008c7 FF |
1735 | { |
1736 | struct enet_cb *cb; | |
d6707bec | 1737 | struct sk_buff *skb; |
1c1008c7 FF |
1738 | int i; |
1739 | ||
8ac467e8 | 1740 | netif_dbg(priv, hw, priv->dev, "%s\n", __func__); |
1c1008c7 FF |
1741 | |
1742 | /* loop here for each buffer needing assign */ | |
8ac467e8 PG |
1743 | for (i = 0; i < ring->size; i++) { |
1744 | cb = ring->cbs + i; | |
d6707bec PG |
1745 | skb = bcmgenet_rx_refill(priv, cb); |
1746 | if (skb) | |
1747 | dev_kfree_skb_any(skb); | |
1748 | if (!cb->skb) | |
1749 | return -ENOMEM; | |
1c1008c7 FF |
1750 | } |
1751 | ||
d6707bec | 1752 | return 0; |
1c1008c7 FF |
1753 | } |
1754 | ||
1755 | static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) | |
1756 | { | |
1757 | struct enet_cb *cb; | |
1758 | int i; | |
1759 | ||
1760 | for (i = 0; i < priv->num_rx_bds; i++) { | |
1761 | cb = &priv->rx_cbs[i]; | |
1762 | ||
1763 | if (dma_unmap_addr(cb, dma_addr)) { | |
1764 | dma_unmap_single(&priv->dev->dev, | |
c91b7f66 FF |
1765 | dma_unmap_addr(cb, dma_addr), |
1766 | priv->rx_buf_len, DMA_FROM_DEVICE); | |
1c1008c7 FF |
1767 | dma_unmap_addr_set(cb, dma_addr, 0); |
1768 | } | |
1769 | ||
1770 | if (cb->skb) | |
1771 | bcmgenet_free_cb(cb); | |
1772 | } | |
1773 | } | |
1774 | ||
c91b7f66 | 1775 | static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable) |
e29585b8 FF |
1776 | { |
1777 | u32 reg; | |
1778 | ||
1779 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); | |
1780 | if (enable) | |
1781 | reg |= mask; | |
1782 | else | |
1783 | reg &= ~mask; | |
1784 | bcmgenet_umac_writel(priv, reg, UMAC_CMD); | |
1785 | ||
1786 | /* UniMAC stops on a packet boundary, wait for a full-size packet | |
1787 | * to be processed | |
1788 | */ | |
1789 | if (enable == 0) | |
1790 | usleep_range(1000, 2000); | |
1791 | } | |
1792 | ||
1c1008c7 FF |
1793 | static int reset_umac(struct bcmgenet_priv *priv) |
1794 | { | |
1795 | struct device *kdev = &priv->pdev->dev; | |
1796 | unsigned int timeout = 0; | |
1797 | u32 reg; | |
1798 | ||
1799 | /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */ | |
1800 | bcmgenet_rbuf_ctrl_set(priv, 0); | |
1801 | udelay(10); | |
1802 | ||
1803 | /* disable MAC while updating its registers */ | |
1804 | bcmgenet_umac_writel(priv, 0, UMAC_CMD); | |
1805 | ||
1806 | /* issue soft reset, wait for it to complete */ | |
1807 | bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD); | |
1808 | while (timeout++ < 1000) { | |
1809 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); | |
1810 | if (!(reg & CMD_SW_RESET)) | |
1811 | return 0; | |
1812 | ||
1813 | udelay(1); | |
1814 | } | |
1815 | ||
1816 | if (timeout == 1000) { | |
1817 | dev_err(kdev, | |
7fc527f9 | 1818 | "timeout waiting for MAC to come out of reset\n"); |
1c1008c7 FF |
1819 | return -ETIMEDOUT; |
1820 | } | |
1821 | ||
1822 | return 0; | |
1823 | } | |
1824 | ||
909ff5ef FF |
1825 | static void bcmgenet_intr_disable(struct bcmgenet_priv *priv) |
1826 | { | |
1827 | /* Mask all interrupts.*/ | |
1828 | bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); | |
1829 | bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); | |
1830 | bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); | |
1831 | bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); | |
1832 | bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); | |
1833 | bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); | |
1834 | } | |
1835 | ||
1c1008c7 FF |
1836 | static int init_umac(struct bcmgenet_priv *priv) |
1837 | { | |
1838 | struct device *kdev = &priv->pdev->dev; | |
1839 | int ret; | |
b2e97eca PG |
1840 | u32 reg; |
1841 | u32 int0_enable = 0; | |
1842 | u32 int1_enable = 0; | |
1843 | int i; | |
1c1008c7 FF |
1844 | |
1845 | dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); | |
1846 | ||
1847 | ret = reset_umac(priv); | |
1848 | if (ret) | |
1849 | return ret; | |
1850 | ||
1851 | bcmgenet_umac_writel(priv, 0, UMAC_CMD); | |
1852 | /* clear tx/rx counter */ | |
1853 | bcmgenet_umac_writel(priv, | |
c91b7f66 FF |
1854 | MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT, |
1855 | UMAC_MIB_CTRL); | |
1c1008c7 FF |
1856 | bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL); |
1857 | ||
1858 | bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); | |
1859 | ||
1860 | /* init rx registers, enable ip header optimization */ | |
1861 | reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL); | |
1862 | reg |= RBUF_ALIGN_2B; | |
1863 | bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL); | |
1864 | ||
1865 | if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv)) | |
1866 | bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL); | |
1867 | ||
909ff5ef | 1868 | bcmgenet_intr_disable(priv); |
1c1008c7 | 1869 | |
b2e97eca | 1870 | /* Enable Rx default queue 16 interrupts */ |
ee7d8c20 | 1871 | int0_enable |= UMAC_IRQ_RXDMA_DONE; |
1c1008c7 | 1872 | |
b2e97eca | 1873 | /* Enable Tx default queue 16 interrupts */ |
ee7d8c20 | 1874 | int0_enable |= UMAC_IRQ_TXDMA_DONE; |
1c1008c7 | 1875 | |
7fc527f9 | 1876 | /* Monitor cable plug/unplugged event for internal PHY */ |
c624f891 | 1877 | if (priv->internal_phy) { |
e122966d | 1878 | int0_enable |= UMAC_IRQ_LINK_EVENT; |
8900ea57 | 1879 | } else if (priv->ext_phy) { |
e122966d | 1880 | int0_enable |= UMAC_IRQ_LINK_EVENT; |
8900ea57 | 1881 | } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { |
8d88c6eb PG |
1882 | if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) |
1883 | int0_enable |= UMAC_IRQ_LINK_EVENT; | |
1884 | ||
1c1008c7 FF |
1885 | reg = bcmgenet_bp_mc_get(priv); |
1886 | reg |= BIT(priv->hw_params->bp_in_en_shift); | |
1887 | ||
1888 | /* bp_mask: back pressure mask */ | |
1889 | if (netif_is_multiqueue(priv->dev)) | |
1890 | reg |= priv->hw_params->bp_in_mask; | |
1891 | else | |
1892 | reg &= ~priv->hw_params->bp_in_mask; | |
1893 | bcmgenet_bp_mc_set(priv, reg); | |
1894 | } | |
1895 | ||
1896 | /* Enable MDIO interrupts on GENET v3+ */ | |
1897 | if (priv->hw_params->flags & GENET_HAS_MDIO_INTR) | |
b2e97eca | 1898 | int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR); |
1c1008c7 | 1899 | |
4055eaef PG |
1900 | /* Enable Rx priority queue interrupts */ |
1901 | for (i = 0; i < priv->hw_params->rx_queues; ++i) | |
1902 | int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i)); | |
1903 | ||
b2e97eca PG |
1904 | /* Enable Tx priority queue interrupts */ |
1905 | for (i = 0; i < priv->hw_params->tx_queues; ++i) | |
1906 | int1_enable |= (1 << i); | |
1c1008c7 | 1907 | |
b2e97eca PG |
1908 | bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); |
1909 | bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR); | |
4092e6ac | 1910 | |
1c1008c7 FF |
1911 | /* Enable rx/tx engine.*/ |
1912 | dev_dbg(kdev, "done init umac\n"); | |
1913 | ||
1914 | return 0; | |
1915 | } | |
1916 | ||
4f8b2d7d | 1917 | /* Initialize a Tx ring along with corresponding hardware registers */ |
1c1008c7 FF |
1918 | static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, |
1919 | unsigned int index, unsigned int size, | |
4f8b2d7d | 1920 | unsigned int start_ptr, unsigned int end_ptr) |
1c1008c7 FF |
1921 | { |
1922 | struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; | |
1923 | u32 words_per_bd = WORDS_PER_BD(priv); | |
1924 | u32 flow_period_val = 0; | |
1c1008c7 FF |
1925 | |
1926 | spin_lock_init(&ring->lock); | |
4092e6ac | 1927 | ring->priv = priv; |
1c1008c7 FF |
1928 | ring->index = index; |
1929 | if (index == DESC_INDEX) { | |
1930 | ring->queue = 0; | |
1931 | ring->int_enable = bcmgenet_tx_ring16_int_enable; | |
1932 | ring->int_disable = bcmgenet_tx_ring16_int_disable; | |
1933 | } else { | |
1934 | ring->queue = index + 1; | |
1935 | ring->int_enable = bcmgenet_tx_ring_int_enable; | |
1936 | ring->int_disable = bcmgenet_tx_ring_int_disable; | |
1937 | } | |
4f8b2d7d | 1938 | ring->cbs = priv->tx_cbs + start_ptr; |
1c1008c7 | 1939 | ring->size = size; |
66d06757 | 1940 | ring->clean_ptr = start_ptr; |
1c1008c7 FF |
1941 | ring->c_index = 0; |
1942 | ring->free_bds = size; | |
4f8b2d7d PG |
1943 | ring->write_ptr = start_ptr; |
1944 | ring->cb_ptr = start_ptr; | |
1c1008c7 FF |
1945 | ring->end_ptr = end_ptr - 1; |
1946 | ring->prod_index = 0; | |
1947 | ||
1948 | /* Set flow period for ring != 16 */ | |
1949 | if (index != DESC_INDEX) | |
1950 | flow_period_val = ENET_MAX_MTU_SIZE << 16; | |
1951 | ||
1952 | bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX); | |
1953 | bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX); | |
1954 | bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH); | |
1955 | /* Disable rate control for now */ | |
1956 | bcmgenet_tdma_ring_writel(priv, index, flow_period_val, | |
c91b7f66 | 1957 | TDMA_FLOW_PERIOD); |
1c1008c7 | 1958 | bcmgenet_tdma_ring_writel(priv, index, |
c91b7f66 FF |
1959 | ((size << DMA_RING_SIZE_SHIFT) | |
1960 | RX_BUF_LENGTH), DMA_RING_BUF_SIZE); | |
1c1008c7 | 1961 | |
1c1008c7 | 1962 | /* Set start and end address, read and write pointers */ |
4f8b2d7d | 1963 | bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, |
c91b7f66 | 1964 | DMA_START_ADDR); |
4f8b2d7d | 1965 | bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, |
c91b7f66 | 1966 | TDMA_READ_PTR); |
4f8b2d7d | 1967 | bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd, |
c91b7f66 | 1968 | TDMA_WRITE_PTR); |
1c1008c7 | 1969 | bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, |
c91b7f66 | 1970 | DMA_END_ADDR); |
1c1008c7 FF |
1971 | } |
1972 | ||
1973 | /* Initialize a RDMA ring */ | |
1974 | static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, | |
8ac467e8 PG |
1975 | unsigned int index, unsigned int size, |
1976 | unsigned int start_ptr, unsigned int end_ptr) | |
1c1008c7 | 1977 | { |
8ac467e8 | 1978 | struct bcmgenet_rx_ring *ring = &priv->rx_rings[index]; |
1c1008c7 FF |
1979 | u32 words_per_bd = WORDS_PER_BD(priv); |
1980 | int ret; | |
1981 | ||
4055eaef | 1982 | ring->priv = priv; |
8ac467e8 | 1983 | ring->index = index; |
4055eaef PG |
1984 | if (index == DESC_INDEX) { |
1985 | ring->int_enable = bcmgenet_rx_ring16_int_enable; | |
1986 | ring->int_disable = bcmgenet_rx_ring16_int_disable; | |
1987 | } else { | |
1988 | ring->int_enable = bcmgenet_rx_ring_int_enable; | |
1989 | ring->int_disable = bcmgenet_rx_ring_int_disable; | |
1990 | } | |
8ac467e8 PG |
1991 | ring->cbs = priv->rx_cbs + start_ptr; |
1992 | ring->size = size; | |
1993 | ring->c_index = 0; | |
1994 | ring->read_ptr = start_ptr; | |
1995 | ring->cb_ptr = start_ptr; | |
1996 | ring->end_ptr = end_ptr - 1; | |
1c1008c7 | 1997 | |
8ac467e8 PG |
1998 | ret = bcmgenet_alloc_rx_buffers(priv, ring); |
1999 | if (ret) | |
1c1008c7 | 2000 | return ret; |
1c1008c7 | 2001 | |
1c1008c7 FF |
2002 | bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX); |
2003 | bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX); | |
6f5a272c | 2004 | bcmgenet_rdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH); |
1c1008c7 | 2005 | bcmgenet_rdma_ring_writel(priv, index, |
c91b7f66 FF |
2006 | ((size << DMA_RING_SIZE_SHIFT) | |
2007 | RX_BUF_LENGTH), DMA_RING_BUF_SIZE); | |
1c1008c7 | 2008 | bcmgenet_rdma_ring_writel(priv, index, |
c91b7f66 FF |
2009 | (DMA_FC_THRESH_LO << |
2010 | DMA_XOFF_THRESHOLD_SHIFT) | | |
2011 | DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH); | |
6f5a272c PG |
2012 | |
2013 | /* Set start and end address, read and write pointers */ | |
8ac467e8 PG |
2014 | bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, |
2015 | DMA_START_ADDR); | |
2016 | bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, | |
2017 | RDMA_READ_PTR); | |
2018 | bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd, | |
2019 | RDMA_WRITE_PTR); | |
2020 | bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, | |
6f5a272c | 2021 | DMA_END_ADDR); |
1c1008c7 FF |
2022 | |
2023 | return ret; | |
2024 | } | |
2025 | ||
e2aadb4a PG |
2026 | static void bcmgenet_init_tx_napi(struct bcmgenet_priv *priv) |
2027 | { | |
2028 | unsigned int i; | |
2029 | struct bcmgenet_tx_ring *ring; | |
2030 | ||
2031 | for (i = 0; i < priv->hw_params->tx_queues; ++i) { | |
2032 | ring = &priv->tx_rings[i]; | |
2033 | netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64); | |
2034 | } | |
2035 | ||
2036 | ring = &priv->tx_rings[DESC_INDEX]; | |
2037 | netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64); | |
2038 | } | |
2039 | ||
2040 | static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv) | |
2041 | { | |
2042 | unsigned int i; | |
2043 | struct bcmgenet_tx_ring *ring; | |
2044 | ||
2045 | for (i = 0; i < priv->hw_params->tx_queues; ++i) { | |
2046 | ring = &priv->tx_rings[i]; | |
2047 | napi_enable(&ring->napi); | |
2048 | } | |
2049 | ||
2050 | ring = &priv->tx_rings[DESC_INDEX]; | |
2051 | napi_enable(&ring->napi); | |
2052 | } | |
2053 | ||
2054 | static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv) | |
2055 | { | |
2056 | unsigned int i; | |
2057 | struct bcmgenet_tx_ring *ring; | |
2058 | ||
2059 | for (i = 0; i < priv->hw_params->tx_queues; ++i) { | |
2060 | ring = &priv->tx_rings[i]; | |
2061 | napi_disable(&ring->napi); | |
2062 | } | |
2063 | ||
2064 | ring = &priv->tx_rings[DESC_INDEX]; | |
2065 | napi_disable(&ring->napi); | |
2066 | } | |
2067 | ||
2068 | static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv) | |
2069 | { | |
2070 | unsigned int i; | |
2071 | struct bcmgenet_tx_ring *ring; | |
2072 | ||
2073 | for (i = 0; i < priv->hw_params->tx_queues; ++i) { | |
2074 | ring = &priv->tx_rings[i]; | |
2075 | netif_napi_del(&ring->napi); | |
2076 | } | |
2077 | ||
2078 | ring = &priv->tx_rings[DESC_INDEX]; | |
2079 | netif_napi_del(&ring->napi); | |
2080 | } | |
2081 | ||
16c6d667 | 2082 | /* Initialize Tx queues |
1c1008c7 | 2083 | * |
16c6d667 | 2084 | * Queues 0-3 are priority-based, each one has 32 descriptors, |
1c1008c7 FF |
2085 | * with queue 0 being the highest priority queue. |
2086 | * | |
16c6d667 | 2087 | * Queue 16 is the default Tx queue with |
51a966a7 | 2088 | * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors. |
1c1008c7 | 2089 | * |
16c6d667 PG |
2090 | * The transmit control block pool is then partitioned as follows: |
2091 | * - Tx queue 0 uses tx_cbs[0..31] | |
2092 | * - Tx queue 1 uses tx_cbs[32..63] | |
2093 | * - Tx queue 2 uses tx_cbs[64..95] | |
2094 | * - Tx queue 3 uses tx_cbs[96..127] | |
2095 | * - Tx queue 16 uses tx_cbs[128..255] | |
1c1008c7 | 2096 | */ |
16c6d667 | 2097 | static void bcmgenet_init_tx_queues(struct net_device *dev) |
1c1008c7 FF |
2098 | { |
2099 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
16c6d667 PG |
2100 | u32 i, dma_enable; |
2101 | u32 dma_ctrl, ring_cfg; | |
37742166 | 2102 | u32 dma_priority[3] = {0, 0, 0}; |
1c1008c7 | 2103 | |
1c1008c7 FF |
2104 | dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL); |
2105 | dma_enable = dma_ctrl & DMA_EN; | |
2106 | dma_ctrl &= ~DMA_EN; | |
2107 | bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); | |
2108 | ||
16c6d667 PG |
2109 | dma_ctrl = 0; |
2110 | ring_cfg = 0; | |
2111 | ||
1c1008c7 FF |
2112 | /* Enable strict priority arbiter mode */ |
2113 | bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL); | |
2114 | ||
16c6d667 | 2115 | /* Initialize Tx priority queues */ |
1c1008c7 | 2116 | for (i = 0; i < priv->hw_params->tx_queues; i++) { |
51a966a7 PG |
2117 | bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q, |
2118 | i * priv->hw_params->tx_bds_per_q, | |
2119 | (i + 1) * priv->hw_params->tx_bds_per_q); | |
16c6d667 PG |
2120 | ring_cfg |= (1 << i); |
2121 | dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); | |
37742166 PG |
2122 | dma_priority[DMA_PRIO_REG_INDEX(i)] |= |
2123 | ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i)); | |
1c1008c7 FF |
2124 | } |
2125 | ||
16c6d667 | 2126 | /* Initialize Tx default queue 16 */ |
51a966a7 | 2127 | bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT, |
16c6d667 | 2128 | priv->hw_params->tx_queues * |
51a966a7 | 2129 | priv->hw_params->tx_bds_per_q, |
16c6d667 PG |
2130 | TOTAL_DESC); |
2131 | ring_cfg |= (1 << DESC_INDEX); | |
2132 | dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); | |
37742166 PG |
2133 | dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |= |
2134 | ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << | |
2135 | DMA_PRIO_REG_SHIFT(DESC_INDEX)); | |
16c6d667 PG |
2136 | |
2137 | /* Set Tx queue priorities */ | |
37742166 PG |
2138 | bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0); |
2139 | bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1); | |
2140 | bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2); | |
2141 | ||
e2aadb4a PG |
2142 | /* Initialize Tx NAPI */ |
2143 | bcmgenet_init_tx_napi(priv); | |
2144 | ||
16c6d667 PG |
2145 | /* Enable Tx queues */ |
2146 | bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG); | |
1c1008c7 | 2147 | |
16c6d667 | 2148 | /* Enable Tx DMA */ |
1c1008c7 | 2149 | if (dma_enable) |
16c6d667 PG |
2150 | dma_ctrl |= DMA_EN; |
2151 | bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); | |
1c1008c7 FF |
2152 | } |
2153 | ||
3ab11339 PG |
2154 | static void bcmgenet_init_rx_napi(struct bcmgenet_priv *priv) |
2155 | { | |
4055eaef PG |
2156 | unsigned int i; |
2157 | struct bcmgenet_rx_ring *ring; | |
2158 | ||
2159 | for (i = 0; i < priv->hw_params->rx_queues; ++i) { | |
2160 | ring = &priv->rx_rings[i]; | |
2161 | netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64); | |
2162 | } | |
2163 | ||
2164 | ring = &priv->rx_rings[DESC_INDEX]; | |
2165 | netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64); | |
3ab11339 PG |
2166 | } |
2167 | ||
2168 | static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv) | |
2169 | { | |
4055eaef PG |
2170 | unsigned int i; |
2171 | struct bcmgenet_rx_ring *ring; | |
2172 | ||
2173 | for (i = 0; i < priv->hw_params->rx_queues; ++i) { | |
2174 | ring = &priv->rx_rings[i]; | |
2175 | napi_enable(&ring->napi); | |
2176 | } | |
2177 | ||
2178 | ring = &priv->rx_rings[DESC_INDEX]; | |
2179 | napi_enable(&ring->napi); | |
3ab11339 PG |
2180 | } |
2181 | ||
2182 | static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv) | |
2183 | { | |
4055eaef PG |
2184 | unsigned int i; |
2185 | struct bcmgenet_rx_ring *ring; | |
2186 | ||
2187 | for (i = 0; i < priv->hw_params->rx_queues; ++i) { | |
2188 | ring = &priv->rx_rings[i]; | |
2189 | napi_disable(&ring->napi); | |
2190 | } | |
2191 | ||
2192 | ring = &priv->rx_rings[DESC_INDEX]; | |
2193 | napi_disable(&ring->napi); | |
3ab11339 PG |
2194 | } |
2195 | ||
2196 | static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv) | |
2197 | { | |
4055eaef PG |
2198 | unsigned int i; |
2199 | struct bcmgenet_rx_ring *ring; | |
2200 | ||
2201 | for (i = 0; i < priv->hw_params->rx_queues; ++i) { | |
2202 | ring = &priv->rx_rings[i]; | |
2203 | netif_napi_del(&ring->napi); | |
2204 | } | |
2205 | ||
2206 | ring = &priv->rx_rings[DESC_INDEX]; | |
2207 | netif_napi_del(&ring->napi); | |
3ab11339 PG |
2208 | } |
2209 | ||
8ac467e8 PG |
2210 | /* Initialize Rx queues |
2211 | * | |
2212 | * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be | |
2213 | * used to direct traffic to these queues. | |
2214 | * | |
2215 | * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors. | |
2216 | */ | |
2217 | static int bcmgenet_init_rx_queues(struct net_device *dev) | |
2218 | { | |
2219 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
2220 | u32 i; | |
2221 | u32 dma_enable; | |
2222 | u32 dma_ctrl; | |
2223 | u32 ring_cfg; | |
2224 | int ret; | |
2225 | ||
2226 | dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL); | |
2227 | dma_enable = dma_ctrl & DMA_EN; | |
2228 | dma_ctrl &= ~DMA_EN; | |
2229 | bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL); | |
2230 | ||
2231 | dma_ctrl = 0; | |
2232 | ring_cfg = 0; | |
2233 | ||
2234 | /* Initialize Rx priority queues */ | |
2235 | for (i = 0; i < priv->hw_params->rx_queues; i++) { | |
2236 | ret = bcmgenet_init_rx_ring(priv, i, | |
2237 | priv->hw_params->rx_bds_per_q, | |
2238 | i * priv->hw_params->rx_bds_per_q, | |
2239 | (i + 1) * | |
2240 | priv->hw_params->rx_bds_per_q); | |
2241 | if (ret) | |
2242 | return ret; | |
2243 | ||
2244 | ring_cfg |= (1 << i); | |
2245 | dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); | |
2246 | } | |
2247 | ||
2248 | /* Initialize Rx default queue 16 */ | |
2249 | ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT, | |
2250 | priv->hw_params->rx_queues * | |
2251 | priv->hw_params->rx_bds_per_q, | |
2252 | TOTAL_DESC); | |
2253 | if (ret) | |
2254 | return ret; | |
2255 | ||
2256 | ring_cfg |= (1 << DESC_INDEX); | |
2257 | dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); | |
2258 | ||
3ab11339 PG |
2259 | /* Initialize Rx NAPI */ |
2260 | bcmgenet_init_rx_napi(priv); | |
2261 | ||
8ac467e8 PG |
2262 | /* Enable rings */ |
2263 | bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG); | |
2264 | ||
2265 | /* Configure ring as descriptor ring and re-enable DMA if enabled */ | |
2266 | if (dma_enable) | |
2267 | dma_ctrl |= DMA_EN; | |
2268 | bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL); | |
2269 | ||
2270 | return 0; | |
2271 | } | |
2272 | ||
4a0c081e FF |
2273 | static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) |
2274 | { | |
2275 | int ret = 0; | |
2276 | int timeout = 0; | |
2277 | u32 reg; | |
b6df7d61 JS |
2278 | u32 dma_ctrl; |
2279 | int i; | |
4a0c081e FF |
2280 | |
2281 | /* Disable TDMA to stop add more frames in TX DMA */ | |
2282 | reg = bcmgenet_tdma_readl(priv, DMA_CTRL); | |
2283 | reg &= ~DMA_EN; | |
2284 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | |
2285 | ||
2286 | /* Check TDMA status register to confirm TDMA is disabled */ | |
2287 | while (timeout++ < DMA_TIMEOUT_VAL) { | |
2288 | reg = bcmgenet_tdma_readl(priv, DMA_STATUS); | |
2289 | if (reg & DMA_DISABLED) | |
2290 | break; | |
2291 | ||
2292 | udelay(1); | |
2293 | } | |
2294 | ||
2295 | if (timeout == DMA_TIMEOUT_VAL) { | |
2296 | netdev_warn(priv->dev, "Timed out while disabling TX DMA\n"); | |
2297 | ret = -ETIMEDOUT; | |
2298 | } | |
2299 | ||
2300 | /* Wait 10ms for packet drain in both tx and rx dma */ | |
2301 | usleep_range(10000, 20000); | |
2302 | ||
2303 | /* Disable RDMA */ | |
2304 | reg = bcmgenet_rdma_readl(priv, DMA_CTRL); | |
2305 | reg &= ~DMA_EN; | |
2306 | bcmgenet_rdma_writel(priv, reg, DMA_CTRL); | |
2307 | ||
2308 | timeout = 0; | |
2309 | /* Check RDMA status register to confirm RDMA is disabled */ | |
2310 | while (timeout++ < DMA_TIMEOUT_VAL) { | |
2311 | reg = bcmgenet_rdma_readl(priv, DMA_STATUS); | |
2312 | if (reg & DMA_DISABLED) | |
2313 | break; | |
2314 | ||
2315 | udelay(1); | |
2316 | } | |
2317 | ||
2318 | if (timeout == DMA_TIMEOUT_VAL) { | |
2319 | netdev_warn(priv->dev, "Timed out while disabling RX DMA\n"); | |
2320 | ret = -ETIMEDOUT; | |
2321 | } | |
2322 | ||
b6df7d61 JS |
2323 | dma_ctrl = 0; |
2324 | for (i = 0; i < priv->hw_params->rx_queues; i++) | |
2325 | dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); | |
2326 | reg = bcmgenet_rdma_readl(priv, DMA_CTRL); | |
2327 | reg &= ~dma_ctrl; | |
2328 | bcmgenet_rdma_writel(priv, reg, DMA_CTRL); | |
2329 | ||
2330 | dma_ctrl = 0; | |
2331 | for (i = 0; i < priv->hw_params->tx_queues; i++) | |
2332 | dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); | |
2333 | reg = bcmgenet_tdma_readl(priv, DMA_CTRL); | |
2334 | reg &= ~dma_ctrl; | |
2335 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | |
2336 | ||
4a0c081e FF |
2337 | return ret; |
2338 | } | |
2339 | ||
9abab96d | 2340 | static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) |
1c1008c7 FF |
2341 | { |
2342 | int i; | |
2343 | ||
9abab96d PG |
2344 | bcmgenet_fini_rx_napi(priv); |
2345 | bcmgenet_fini_tx_napi(priv); | |
2346 | ||
1c1008c7 | 2347 | /* disable DMA */ |
4a0c081e | 2348 | bcmgenet_dma_teardown(priv); |
1c1008c7 FF |
2349 | |
2350 | for (i = 0; i < priv->num_tx_bds; i++) { | |
2351 | if (priv->tx_cbs[i].skb != NULL) { | |
2352 | dev_kfree_skb(priv->tx_cbs[i].skb); | |
2353 | priv->tx_cbs[i].skb = NULL; | |
2354 | } | |
2355 | } | |
2356 | ||
2357 | bcmgenet_free_rx_buffers(priv); | |
2358 | kfree(priv->rx_cbs); | |
2359 | kfree(priv->tx_cbs); | |
2360 | } | |
2361 | ||
2362 | /* init_edma: Initialize DMA control register */ | |
2363 | static int bcmgenet_init_dma(struct bcmgenet_priv *priv) | |
2364 | { | |
2365 | int ret; | |
014012a4 PG |
2366 | unsigned int i; |
2367 | struct enet_cb *cb; | |
1c1008c7 | 2368 | |
6f5a272c | 2369 | netif_dbg(priv, hw, priv->dev, "%s\n", __func__); |
1c1008c7 | 2370 | |
6f5a272c PG |
2371 | /* Initialize common Rx ring structures */ |
2372 | priv->rx_bds = priv->base + priv->hw_params->rdma_offset; | |
2373 | priv->num_rx_bds = TOTAL_DESC; | |
2374 | priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb), | |
2375 | GFP_KERNEL); | |
2376 | if (!priv->rx_cbs) | |
2377 | return -ENOMEM; | |
2378 | ||
2379 | for (i = 0; i < priv->num_rx_bds; i++) { | |
2380 | cb = priv->rx_cbs + i; | |
2381 | cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE; | |
2382 | } | |
2383 | ||
7fc527f9 | 2384 | /* Initialize common TX ring structures */ |
1c1008c7 FF |
2385 | priv->tx_bds = priv->base + priv->hw_params->tdma_offset; |
2386 | priv->num_tx_bds = TOTAL_DESC; | |
c489be08 | 2387 | priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb), |
c91b7f66 | 2388 | GFP_KERNEL); |
1c1008c7 | 2389 | if (!priv->tx_cbs) { |
ebbd96fb | 2390 | kfree(priv->rx_cbs); |
1c1008c7 FF |
2391 | return -ENOMEM; |
2392 | } | |
2393 | ||
014012a4 PG |
2394 | for (i = 0; i < priv->num_tx_bds; i++) { |
2395 | cb = priv->tx_cbs + i; | |
2396 | cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE; | |
2397 | } | |
2398 | ||
ebbd96fb PG |
2399 | /* Init rDma */ |
2400 | bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); | |
2401 | ||
2402 | /* Initialize Rx queues */ | |
2403 | ret = bcmgenet_init_rx_queues(priv->dev); | |
2404 | if (ret) { | |
2405 | netdev_err(priv->dev, "failed to initialize Rx queues\n"); | |
2406 | bcmgenet_free_rx_buffers(priv); | |
2407 | kfree(priv->rx_cbs); | |
2408 | kfree(priv->tx_cbs); | |
2409 | return ret; | |
2410 | } | |
2411 | ||
2412 | /* Init tDma */ | |
2413 | bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); | |
2414 | ||
16c6d667 PG |
2415 | /* Initialize Tx queues */ |
2416 | bcmgenet_init_tx_queues(priv->dev); | |
1c1008c7 FF |
2417 | |
2418 | return 0; | |
2419 | } | |
2420 | ||
1c1008c7 FF |
2421 | /* Interrupt bottom half */ |
2422 | static void bcmgenet_irq_task(struct work_struct *work) | |
2423 | { | |
2424 | struct bcmgenet_priv *priv = container_of( | |
2425 | work, struct bcmgenet_priv, bcmgenet_irq_work); | |
2426 | ||
2427 | netif_dbg(priv, intr, priv->dev, "%s\n", __func__); | |
2428 | ||
8fdb0e0f FF |
2429 | if (priv->irq0_stat & UMAC_IRQ_MPD_R) { |
2430 | priv->irq0_stat &= ~UMAC_IRQ_MPD_R; | |
2431 | netif_dbg(priv, wol, priv->dev, | |
2432 | "magic packet detected, waking up\n"); | |
2433 | bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); | |
2434 | } | |
2435 | ||
1c1008c7 FF |
2436 | /* Link UP/DOWN event */ |
2437 | if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && | |
e122966d | 2438 | (priv->irq0_stat & UMAC_IRQ_LINK_EVENT)) { |
80d8e96d | 2439 | phy_mac_interrupt(priv->phydev, |
451e1ca2 | 2440 | !!(priv->irq0_stat & UMAC_IRQ_LINK_UP)); |
e122966d | 2441 | priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT; |
1c1008c7 FF |
2442 | } |
2443 | } | |
2444 | ||
4055eaef | 2445 | /* bcmgenet_isr1: handle Rx and Tx priority queues */ |
1c1008c7 FF |
2446 | static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) |
2447 | { | |
2448 | struct bcmgenet_priv *priv = dev_id; | |
4055eaef PG |
2449 | struct bcmgenet_rx_ring *rx_ring; |
2450 | struct bcmgenet_tx_ring *tx_ring; | |
1c1008c7 FF |
2451 | unsigned int index; |
2452 | ||
2453 | /* Save irq status for bottom-half processing. */ | |
2454 | priv->irq1_stat = | |
2455 | bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & | |
4092e6ac | 2456 | ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); |
4055eaef | 2457 | |
7fc527f9 | 2458 | /* clear interrupts */ |
1c1008c7 FF |
2459 | bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); |
2460 | ||
2461 | netif_dbg(priv, intr, priv->dev, | |
c91b7f66 | 2462 | "%s: IRQ=0x%x\n", __func__, priv->irq1_stat); |
4092e6ac | 2463 | |
4055eaef PG |
2464 | /* Check Rx priority queue interrupts */ |
2465 | for (index = 0; index < priv->hw_params->rx_queues; index++) { | |
2466 | if (!(priv->irq1_stat & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index))) | |
2467 | continue; | |
2468 | ||
2469 | rx_ring = &priv->rx_rings[index]; | |
2470 | ||
2471 | if (likely(napi_schedule_prep(&rx_ring->napi))) { | |
2472 | rx_ring->int_disable(rx_ring); | |
2473 | __napi_schedule(&rx_ring->napi); | |
2474 | } | |
2475 | } | |
2476 | ||
2477 | /* Check Tx priority queue interrupts */ | |
4092e6ac JS |
2478 | for (index = 0; index < priv->hw_params->tx_queues; index++) { |
2479 | if (!(priv->irq1_stat & BIT(index))) | |
2480 | continue; | |
2481 | ||
4055eaef | 2482 | tx_ring = &priv->tx_rings[index]; |
4092e6ac | 2483 | |
4055eaef PG |
2484 | if (likely(napi_schedule_prep(&tx_ring->napi))) { |
2485 | tx_ring->int_disable(tx_ring); | |
2486 | __napi_schedule(&tx_ring->napi); | |
1c1008c7 FF |
2487 | } |
2488 | } | |
4092e6ac | 2489 | |
1c1008c7 FF |
2490 | return IRQ_HANDLED; |
2491 | } | |
2492 | ||
4055eaef | 2493 | /* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */ |
1c1008c7 FF |
2494 | static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) |
2495 | { | |
2496 | struct bcmgenet_priv *priv = dev_id; | |
4055eaef PG |
2497 | struct bcmgenet_rx_ring *rx_ring; |
2498 | struct bcmgenet_tx_ring *tx_ring; | |
1c1008c7 FF |
2499 | |
2500 | /* Save irq status for bottom-half processing. */ | |
2501 | priv->irq0_stat = | |
2502 | bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) & | |
2503 | ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); | |
4055eaef | 2504 | |
7fc527f9 | 2505 | /* clear interrupts */ |
1c1008c7 FF |
2506 | bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); |
2507 | ||
2508 | netif_dbg(priv, intr, priv->dev, | |
c91b7f66 | 2509 | "IRQ=0x%x\n", priv->irq0_stat); |
1c1008c7 | 2510 | |
ee7d8c20 | 2511 | if (priv->irq0_stat & UMAC_IRQ_RXDMA_DONE) { |
4055eaef PG |
2512 | rx_ring = &priv->rx_rings[DESC_INDEX]; |
2513 | ||
2514 | if (likely(napi_schedule_prep(&rx_ring->napi))) { | |
2515 | rx_ring->int_disable(rx_ring); | |
2516 | __napi_schedule(&rx_ring->napi); | |
1c1008c7 FF |
2517 | } |
2518 | } | |
4092e6ac | 2519 | |
ee7d8c20 | 2520 | if (priv->irq0_stat & UMAC_IRQ_TXDMA_DONE) { |
4055eaef PG |
2521 | tx_ring = &priv->tx_rings[DESC_INDEX]; |
2522 | ||
2523 | if (likely(napi_schedule_prep(&tx_ring->napi))) { | |
2524 | tx_ring->int_disable(tx_ring); | |
2525 | __napi_schedule(&tx_ring->napi); | |
4092e6ac | 2526 | } |
1c1008c7 | 2527 | } |
4055eaef | 2528 | |
1c1008c7 FF |
2529 | if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R | |
2530 | UMAC_IRQ_PHY_DET_F | | |
e122966d | 2531 | UMAC_IRQ_LINK_EVENT | |
1c1008c7 FF |
2532 | UMAC_IRQ_HFB_SM | |
2533 | UMAC_IRQ_HFB_MM | | |
2534 | UMAC_IRQ_MPD_R)) { | |
2535 | /* all other interested interrupts handled in bottom half */ | |
2536 | schedule_work(&priv->bcmgenet_irq_work); | |
2537 | } | |
2538 | ||
2539 | if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && | |
c91b7f66 | 2540 | priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) { |
1c1008c7 FF |
2541 | priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR); |
2542 | wake_up(&priv->wq); | |
2543 | } | |
2544 | ||
2545 | return IRQ_HANDLED; | |
2546 | } | |
2547 | ||
8562056f FF |
2548 | static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id) |
2549 | { | |
2550 | struct bcmgenet_priv *priv = dev_id; | |
2551 | ||
2552 | pm_wakeup_event(&priv->pdev->dev, 0); | |
2553 | ||
2554 | return IRQ_HANDLED; | |
2555 | } | |
2556 | ||
4d2e8882 FF |
2557 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2558 | static void bcmgenet_poll_controller(struct net_device *dev) | |
2559 | { | |
2560 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
2561 | ||
2562 | /* Invoke the main RX/TX interrupt handler */ | |
2563 | disable_irq(priv->irq0); | |
2564 | bcmgenet_isr0(priv->irq0, priv); | |
2565 | enable_irq(priv->irq0); | |
2566 | ||
2567 | /* And the interrupt handler for RX/TX priority queues */ | |
2568 | disable_irq(priv->irq1); | |
2569 | bcmgenet_isr1(priv->irq1, priv); | |
2570 | enable_irq(priv->irq1); | |
2571 | } | |
2572 | #endif | |
2573 | ||
1c1008c7 FF |
2574 | static void bcmgenet_umac_reset(struct bcmgenet_priv *priv) |
2575 | { | |
2576 | u32 reg; | |
2577 | ||
2578 | reg = bcmgenet_rbuf_ctrl_get(priv); | |
2579 | reg |= BIT(1); | |
2580 | bcmgenet_rbuf_ctrl_set(priv, reg); | |
2581 | udelay(10); | |
2582 | ||
2583 | reg &= ~BIT(1); | |
2584 | bcmgenet_rbuf_ctrl_set(priv, reg); | |
2585 | udelay(10); | |
2586 | } | |
2587 | ||
2588 | static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv, | |
c91b7f66 | 2589 | unsigned char *addr) |
1c1008c7 FF |
2590 | { |
2591 | bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) | | |
2592 | (addr[2] << 8) | addr[3], UMAC_MAC0); | |
2593 | bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1); | |
2594 | } | |
2595 | ||
1c1008c7 FF |
2596 | /* Returns a reusable dma control register value */ |
2597 | static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv) | |
2598 | { | |
2599 | u32 reg; | |
2600 | u32 dma_ctrl; | |
2601 | ||
2602 | /* disable DMA */ | |
2603 | dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; | |
2604 | reg = bcmgenet_tdma_readl(priv, DMA_CTRL); | |
2605 | reg &= ~dma_ctrl; | |
2606 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | |
2607 | ||
2608 | reg = bcmgenet_rdma_readl(priv, DMA_CTRL); | |
2609 | reg &= ~dma_ctrl; | |
2610 | bcmgenet_rdma_writel(priv, reg, DMA_CTRL); | |
2611 | ||
2612 | bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH); | |
2613 | udelay(10); | |
2614 | bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH); | |
2615 | ||
2616 | return dma_ctrl; | |
2617 | } | |
2618 | ||
2619 | static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl) | |
2620 | { | |
2621 | u32 reg; | |
2622 | ||
2623 | reg = bcmgenet_rdma_readl(priv, DMA_CTRL); | |
2624 | reg |= dma_ctrl; | |
2625 | bcmgenet_rdma_writel(priv, reg, DMA_CTRL); | |
2626 | ||
2627 | reg = bcmgenet_tdma_readl(priv, DMA_CTRL); | |
2628 | reg |= dma_ctrl; | |
2629 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | |
2630 | } | |
2631 | ||
0034de41 PG |
2632 | static bool bcmgenet_hfb_is_filter_enabled(struct bcmgenet_priv *priv, |
2633 | u32 f_index) | |
2634 | { | |
2635 | u32 offset; | |
2636 | u32 reg; | |
2637 | ||
2638 | offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32); | |
2639 | reg = bcmgenet_hfb_reg_readl(priv, offset); | |
2640 | return !!(reg & (1 << (f_index % 32))); | |
2641 | } | |
2642 | ||
2643 | static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index) | |
2644 | { | |
2645 | u32 offset; | |
2646 | u32 reg; | |
2647 | ||
2648 | offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32); | |
2649 | reg = bcmgenet_hfb_reg_readl(priv, offset); | |
2650 | reg |= (1 << (f_index % 32)); | |
2651 | bcmgenet_hfb_reg_writel(priv, reg, offset); | |
2652 | } | |
2653 | ||
2654 | static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv, | |
2655 | u32 f_index, u32 rx_queue) | |
2656 | { | |
2657 | u32 offset; | |
2658 | u32 reg; | |
2659 | ||
2660 | offset = f_index / 8; | |
2661 | reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset); | |
2662 | reg &= ~(0xF << (4 * (f_index % 8))); | |
2663 | reg |= ((rx_queue & 0xF) << (4 * (f_index % 8))); | |
2664 | bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset); | |
2665 | } | |
2666 | ||
2667 | static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv, | |
2668 | u32 f_index, u32 f_length) | |
2669 | { | |
2670 | u32 offset; | |
2671 | u32 reg; | |
2672 | ||
2673 | offset = HFB_FLT_LEN_V3PLUS + | |
2674 | ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) * | |
2675 | sizeof(u32); | |
2676 | reg = bcmgenet_hfb_reg_readl(priv, offset); | |
2677 | reg &= ~(0xFF << (8 * (f_index % 4))); | |
2678 | reg |= ((f_length & 0xFF) << (8 * (f_index % 4))); | |
2679 | bcmgenet_hfb_reg_writel(priv, reg, offset); | |
2680 | } | |
2681 | ||
2682 | static int bcmgenet_hfb_find_unused_filter(struct bcmgenet_priv *priv) | |
2683 | { | |
2684 | u32 f_index; | |
2685 | ||
2686 | for (f_index = 0; f_index < priv->hw_params->hfb_filter_cnt; f_index++) | |
2687 | if (!bcmgenet_hfb_is_filter_enabled(priv, f_index)) | |
2688 | return f_index; | |
2689 | ||
2690 | return -ENOMEM; | |
2691 | } | |
2692 | ||
2693 | /* bcmgenet_hfb_add_filter | |
2694 | * | |
2695 | * Add new filter to Hardware Filter Block to match and direct Rx traffic to | |
2696 | * desired Rx queue. | |
2697 | * | |
2698 | * f_data is an array of unsigned 32-bit integers where each 32-bit integer | |
2699 | * provides filter data for 2 bytes (4 nibbles) of Rx frame: | |
2700 | * | |
2701 | * bits 31:20 - unused | |
2702 | * bit 19 - nibble 0 match enable | |
2703 | * bit 18 - nibble 1 match enable | |
2704 | * bit 17 - nibble 2 match enable | |
2705 | * bit 16 - nibble 3 match enable | |
2706 | * bits 15:12 - nibble 0 data | |
2707 | * bits 11:8 - nibble 1 data | |
2708 | * bits 7:4 - nibble 2 data | |
2709 | * bits 3:0 - nibble 3 data | |
2710 | * | |
2711 | * Example: | |
2712 | * In order to match: | |
2713 | * - Ethernet frame type = 0x0800 (IP) | |
2714 | * - IP version field = 4 | |
2715 | * - IP protocol field = 0x11 (UDP) | |
2716 | * | |
2717 | * The following filter is needed: | |
2718 | * u32 hfb_filter_ipv4_udp[] = { | |
2719 | * Rx frame offset 0x00: 0x00000000, 0x00000000, 0x00000000, 0x00000000, | |
2720 | * Rx frame offset 0x08: 0x00000000, 0x00000000, 0x000F0800, 0x00084000, | |
2721 | * Rx frame offset 0x10: 0x00000000, 0x00000000, 0x00000000, 0x00030011, | |
2722 | * }; | |
2723 | * | |
2724 | * To add the filter to HFB and direct the traffic to Rx queue 0, call: | |
2725 | * bcmgenet_hfb_add_filter(priv, hfb_filter_ipv4_udp, | |
2726 | * ARRAY_SIZE(hfb_filter_ipv4_udp), 0); | |
2727 | */ | |
2728 | int bcmgenet_hfb_add_filter(struct bcmgenet_priv *priv, u32 *f_data, | |
2729 | u32 f_length, u32 rx_queue) | |
2730 | { | |
2731 | int f_index; | |
2732 | u32 i; | |
2733 | ||
2734 | f_index = bcmgenet_hfb_find_unused_filter(priv); | |
2735 | if (f_index < 0) | |
2736 | return -ENOMEM; | |
2737 | ||
2738 | if (f_length > priv->hw_params->hfb_filter_size) | |
2739 | return -EINVAL; | |
2740 | ||
2741 | for (i = 0; i < f_length; i++) | |
2742 | bcmgenet_hfb_writel(priv, f_data[i], | |
2743 | (f_index * priv->hw_params->hfb_filter_size + i) * | |
2744 | sizeof(u32)); | |
2745 | ||
2746 | bcmgenet_hfb_set_filter_length(priv, f_index, 2 * f_length); | |
2747 | bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f_index, rx_queue); | |
2748 | bcmgenet_hfb_enable_filter(priv, f_index); | |
2749 | bcmgenet_hfb_reg_writel(priv, 0x1, HFB_CTRL); | |
2750 | ||
2751 | return 0; | |
2752 | } | |
2753 | ||
2754 | /* bcmgenet_hfb_clear | |
2755 | * | |
2756 | * Clear Hardware Filter Block and disable all filtering. | |
2757 | */ | |
2758 | static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv) | |
2759 | { | |
2760 | u32 i; | |
2761 | ||
2762 | bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL); | |
2763 | bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS); | |
2764 | bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4); | |
2765 | ||
2766 | for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++) | |
2767 | bcmgenet_rdma_writel(priv, 0x0, i); | |
2768 | ||
2769 | for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++) | |
2770 | bcmgenet_hfb_reg_writel(priv, 0x0, | |
2771 | HFB_FLT_LEN_V3PLUS + i * sizeof(u32)); | |
2772 | ||
2773 | for (i = 0; i < priv->hw_params->hfb_filter_cnt * | |
2774 | priv->hw_params->hfb_filter_size; i++) | |
2775 | bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32)); | |
2776 | } | |
2777 | ||
2778 | static void bcmgenet_hfb_init(struct bcmgenet_priv *priv) | |
2779 | { | |
2780 | if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) | |
2781 | return; | |
2782 | ||
2783 | bcmgenet_hfb_clear(priv); | |
2784 | } | |
2785 | ||
909ff5ef FF |
2786 | static void bcmgenet_netif_start(struct net_device *dev) |
2787 | { | |
2788 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
2789 | ||
2790 | /* Start the network engine */ | |
3ab11339 | 2791 | bcmgenet_enable_rx_napi(priv); |
e2aadb4a | 2792 | bcmgenet_enable_tx_napi(priv); |
909ff5ef FF |
2793 | |
2794 | umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true); | |
2795 | ||
909ff5ef FF |
2796 | netif_tx_start_all_queues(dev); |
2797 | ||
2798 | phy_start(priv->phydev); | |
2799 | } | |
2800 | ||
1c1008c7 FF |
2801 | static int bcmgenet_open(struct net_device *dev) |
2802 | { | |
2803 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
2804 | unsigned long dma_ctrl; | |
2805 | u32 reg; | |
2806 | int ret; | |
2807 | ||
2808 | netif_dbg(priv, ifup, dev, "bcmgenet_open\n"); | |
2809 | ||
2810 | /* Turn on the clock */ | |
7d5d3075 | 2811 | clk_prepare_enable(priv->clk); |
1c1008c7 | 2812 | |
a642c4f7 FF |
2813 | /* If this is an internal GPHY, power it back on now, before UniMAC is |
2814 | * brought out of reset as absolutely no UniMAC activity is allowed | |
2815 | */ | |
c624f891 | 2816 | if (priv->internal_phy) |
a642c4f7 FF |
2817 | bcmgenet_power_up(priv, GENET_POWER_PASSIVE); |
2818 | ||
1c1008c7 FF |
2819 | /* take MAC out of reset */ |
2820 | bcmgenet_umac_reset(priv); | |
2821 | ||
2822 | ret = init_umac(priv); | |
2823 | if (ret) | |
2824 | goto err_clk_disable; | |
2825 | ||
2826 | /* disable ethernet MAC while updating its registers */ | |
e29585b8 | 2827 | umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); |
1c1008c7 | 2828 | |
909ff5ef FF |
2829 | /* Make sure we reflect the value of CRC_CMD_FWD */ |
2830 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); | |
2831 | priv->crc_fwd_en = !!(reg & CMD_CRC_FWD); | |
2832 | ||
1c1008c7 FF |
2833 | bcmgenet_set_hw_addr(priv, dev->dev_addr); |
2834 | ||
c624f891 | 2835 | if (priv->internal_phy) { |
1c1008c7 FF |
2836 | reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); |
2837 | reg |= EXT_ENERGY_DET_MASK; | |
2838 | bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); | |
2839 | } | |
2840 | ||
2841 | /* Disable RX/TX DMA and flush TX queues */ | |
2842 | dma_ctrl = bcmgenet_dma_disable(priv); | |
2843 | ||
2844 | /* Reinitialize TDMA and RDMA and SW housekeeping */ | |
2845 | ret = bcmgenet_init_dma(priv); | |
2846 | if (ret) { | |
2847 | netdev_err(dev, "failed to initialize DMA\n"); | |
fac25940 | 2848 | goto err_clk_disable; |
1c1008c7 FF |
2849 | } |
2850 | ||
2851 | /* Always enable ring 16 - descriptor ring */ | |
2852 | bcmgenet_enable_dma(priv, dma_ctrl); | |
2853 | ||
0034de41 PG |
2854 | /* HFB init */ |
2855 | bcmgenet_hfb_init(priv); | |
2856 | ||
1c1008c7 | 2857 | ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED, |
c91b7f66 | 2858 | dev->name, priv); |
1c1008c7 FF |
2859 | if (ret < 0) { |
2860 | netdev_err(dev, "can't request IRQ %d\n", priv->irq0); | |
2861 | goto err_fini_dma; | |
2862 | } | |
2863 | ||
2864 | ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED, | |
c91b7f66 | 2865 | dev->name, priv); |
1c1008c7 FF |
2866 | if (ret < 0) { |
2867 | netdev_err(dev, "can't request IRQ %d\n", priv->irq1); | |
2868 | goto err_irq0; | |
2869 | } | |
2870 | ||
6cc8e6d4 FF |
2871 | ret = bcmgenet_mii_probe(dev); |
2872 | if (ret) { | |
2873 | netdev_err(dev, "failed to connect to PHY\n"); | |
2874 | goto err_irq1; | |
2875 | } | |
c96e731c | 2876 | |
909ff5ef | 2877 | bcmgenet_netif_start(dev); |
1c1008c7 FF |
2878 | |
2879 | return 0; | |
2880 | ||
6cc8e6d4 FF |
2881 | err_irq1: |
2882 | free_irq(priv->irq1, priv); | |
1c1008c7 | 2883 | err_irq0: |
978ffac4 | 2884 | free_irq(priv->irq0, priv); |
1c1008c7 FF |
2885 | err_fini_dma: |
2886 | bcmgenet_fini_dma(priv); | |
2887 | err_clk_disable: | |
7d5d3075 | 2888 | clk_disable_unprepare(priv->clk); |
1c1008c7 FF |
2889 | return ret; |
2890 | } | |
2891 | ||
909ff5ef FF |
2892 | static void bcmgenet_netif_stop(struct net_device *dev) |
2893 | { | |
2894 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
2895 | ||
2896 | netif_tx_stop_all_queues(dev); | |
909ff5ef | 2897 | phy_stop(priv->phydev); |
909ff5ef | 2898 | bcmgenet_intr_disable(priv); |
3ab11339 | 2899 | bcmgenet_disable_rx_napi(priv); |
e2aadb4a | 2900 | bcmgenet_disable_tx_napi(priv); |
909ff5ef FF |
2901 | |
2902 | /* Wait for pending work items to complete. Since interrupts are | |
2903 | * disabled no new work will be scheduled. | |
2904 | */ | |
2905 | cancel_work_sync(&priv->bcmgenet_irq_work); | |
cc013fb4 | 2906 | |
cc013fb4 | 2907 | priv->old_link = -1; |
5ad6e6c5 | 2908 | priv->old_speed = -1; |
cc013fb4 | 2909 | priv->old_duplex = -1; |
5ad6e6c5 | 2910 | priv->old_pause = -1; |
909ff5ef FF |
2911 | } |
2912 | ||
1c1008c7 FF |
2913 | static int bcmgenet_close(struct net_device *dev) |
2914 | { | |
2915 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
2916 | int ret; | |
1c1008c7 FF |
2917 | |
2918 | netif_dbg(priv, ifdown, dev, "bcmgenet_close\n"); | |
2919 | ||
909ff5ef | 2920 | bcmgenet_netif_stop(dev); |
1c1008c7 | 2921 | |
c96e731c FF |
2922 | /* Really kill the PHY state machine and disconnect from it */ |
2923 | phy_disconnect(priv->phydev); | |
2924 | ||
1c1008c7 | 2925 | /* Disable MAC receive */ |
e29585b8 | 2926 | umac_enable_set(priv, CMD_RX_EN, false); |
1c1008c7 | 2927 | |
1c1008c7 FF |
2928 | ret = bcmgenet_dma_teardown(priv); |
2929 | if (ret) | |
2930 | return ret; | |
2931 | ||
2932 | /* Disable MAC transmit. TX DMA disabled have to done before this */ | |
e29585b8 | 2933 | umac_enable_set(priv, CMD_TX_EN, false); |
1c1008c7 | 2934 | |
1c1008c7 FF |
2935 | /* tx reclaim */ |
2936 | bcmgenet_tx_reclaim_all(dev); | |
2937 | bcmgenet_fini_dma(priv); | |
2938 | ||
2939 | free_irq(priv->irq0, priv); | |
2940 | free_irq(priv->irq1, priv); | |
2941 | ||
c624f891 | 2942 | if (priv->internal_phy) |
ca8cf341 | 2943 | ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); |
1c1008c7 | 2944 | |
7d5d3075 | 2945 | clk_disable_unprepare(priv->clk); |
1c1008c7 | 2946 | |
ca8cf341 | 2947 | return ret; |
1c1008c7 FF |
2948 | } |
2949 | ||
13ea6578 FF |
2950 | static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring) |
2951 | { | |
2952 | struct bcmgenet_priv *priv = ring->priv; | |
2953 | u32 p_index, c_index, intsts, intmsk; | |
2954 | struct netdev_queue *txq; | |
2955 | unsigned int free_bds; | |
2956 | unsigned long flags; | |
2957 | bool txq_stopped; | |
2958 | ||
2959 | if (!netif_msg_tx_err(priv)) | |
2960 | return; | |
2961 | ||
2962 | txq = netdev_get_tx_queue(priv->dev, ring->queue); | |
2963 | ||
2964 | spin_lock_irqsave(&ring->lock, flags); | |
2965 | if (ring->index == DESC_INDEX) { | |
2966 | intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); | |
2967 | intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE; | |
2968 | } else { | |
2969 | intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); | |
2970 | intmsk = 1 << ring->index; | |
2971 | } | |
2972 | c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); | |
2973 | p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX); | |
2974 | txq_stopped = netif_tx_queue_stopped(txq); | |
2975 | free_bds = ring->free_bds; | |
2976 | spin_unlock_irqrestore(&ring->lock, flags); | |
2977 | ||
2978 | netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n" | |
2979 | "TX queue status: %s, interrupts: %s\n" | |
2980 | "(sw)free_bds: %d (sw)size: %d\n" | |
2981 | "(sw)p_index: %d (hw)p_index: %d\n" | |
2982 | "(sw)c_index: %d (hw)c_index: %d\n" | |
2983 | "(sw)clean_p: %d (sw)write_p: %d\n" | |
2984 | "(sw)cb_ptr: %d (sw)end_ptr: %d\n", | |
2985 | ring->index, ring->queue, | |
2986 | txq_stopped ? "stopped" : "active", | |
2987 | intsts & intmsk ? "enabled" : "disabled", | |
2988 | free_bds, ring->size, | |
2989 | ring->prod_index, p_index & DMA_P_INDEX_MASK, | |
2990 | ring->c_index, c_index & DMA_C_INDEX_MASK, | |
2991 | ring->clean_ptr, ring->write_ptr, | |
2992 | ring->cb_ptr, ring->end_ptr); | |
2993 | } | |
2994 | ||
1c1008c7 FF |
2995 | static void bcmgenet_timeout(struct net_device *dev) |
2996 | { | |
2997 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
13ea6578 FF |
2998 | u32 int0_enable = 0; |
2999 | u32 int1_enable = 0; | |
3000 | unsigned int q; | |
1c1008c7 FF |
3001 | |
3002 | netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n"); | |
3003 | ||
13ea6578 FF |
3004 | for (q = 0; q < priv->hw_params->tx_queues; q++) |
3005 | bcmgenet_dump_tx_queue(&priv->tx_rings[q]); | |
3006 | bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]); | |
3007 | ||
3008 | bcmgenet_tx_reclaim_all(dev); | |
3009 | ||
3010 | for (q = 0; q < priv->hw_params->tx_queues; q++) | |
3011 | int1_enable |= (1 << q); | |
3012 | ||
3013 | int0_enable = UMAC_IRQ_TXDMA_DONE; | |
3014 | ||
3015 | /* Re-enable TX interrupts if disabled */ | |
3016 | bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); | |
3017 | bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR); | |
3018 | ||
1c1008c7 FF |
3019 | dev->trans_start = jiffies; |
3020 | ||
3021 | dev->stats.tx_errors++; | |
3022 | ||
3023 | netif_tx_wake_all_queues(dev); | |
3024 | } | |
3025 | ||
3026 | #define MAX_MC_COUNT 16 | |
3027 | ||
3028 | static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv, | |
3029 | unsigned char *addr, | |
3030 | int *i, | |
3031 | int *mc) | |
3032 | { | |
3033 | u32 reg; | |
3034 | ||
c91b7f66 FF |
3035 | bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1], |
3036 | UMAC_MDF_ADDR + (*i * 4)); | |
3037 | bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 | | |
3038 | addr[4] << 8 | addr[5], | |
3039 | UMAC_MDF_ADDR + ((*i + 1) * 4)); | |
1c1008c7 FF |
3040 | reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL); |
3041 | reg |= (1 << (MAX_MC_COUNT - *mc)); | |
3042 | bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL); | |
3043 | *i += 2; | |
3044 | (*mc)++; | |
3045 | } | |
3046 | ||
3047 | static void bcmgenet_set_rx_mode(struct net_device *dev) | |
3048 | { | |
3049 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
3050 | struct netdev_hw_addr *ha; | |
3051 | int i, mc; | |
3052 | u32 reg; | |
3053 | ||
3054 | netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags); | |
3055 | ||
7fc527f9 | 3056 | /* Promiscuous mode */ |
1c1008c7 FF |
3057 | reg = bcmgenet_umac_readl(priv, UMAC_CMD); |
3058 | if (dev->flags & IFF_PROMISC) { | |
3059 | reg |= CMD_PROMISC; | |
3060 | bcmgenet_umac_writel(priv, reg, UMAC_CMD); | |
3061 | bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL); | |
3062 | return; | |
3063 | } else { | |
3064 | reg &= ~CMD_PROMISC; | |
3065 | bcmgenet_umac_writel(priv, reg, UMAC_CMD); | |
3066 | } | |
3067 | ||
3068 | /* UniMac doesn't support ALLMULTI */ | |
3069 | if (dev->flags & IFF_ALLMULTI) { | |
3070 | netdev_warn(dev, "ALLMULTI is not supported\n"); | |
3071 | return; | |
3072 | } | |
3073 | ||
3074 | /* update MDF filter */ | |
3075 | i = 0; | |
3076 | mc = 0; | |
3077 | /* Broadcast */ | |
3078 | bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc); | |
3079 | /* my own address.*/ | |
3080 | bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc); | |
3081 | /* Unicast list*/ | |
3082 | if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc)) | |
3083 | return; | |
3084 | ||
3085 | if (!netdev_uc_empty(dev)) | |
3086 | netdev_for_each_uc_addr(ha, dev) | |
3087 | bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc); | |
3088 | /* Multicast */ | |
3089 | if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc)) | |
3090 | return; | |
3091 | ||
3092 | netdev_for_each_mc_addr(ha, dev) | |
3093 | bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc); | |
3094 | } | |
3095 | ||
3096 | /* Set the hardware MAC address. */ | |
3097 | static int bcmgenet_set_mac_addr(struct net_device *dev, void *p) | |
3098 | { | |
3099 | struct sockaddr *addr = p; | |
3100 | ||
3101 | /* Setting the MAC address at the hardware level is not possible | |
3102 | * without disabling the UniMAC RX/TX enable bits. | |
3103 | */ | |
3104 | if (netif_running(dev)) | |
3105 | return -EBUSY; | |
3106 | ||
3107 | ether_addr_copy(dev->dev_addr, addr->sa_data); | |
3108 | ||
3109 | return 0; | |
3110 | } | |
3111 | ||
1c1008c7 FF |
3112 | static const struct net_device_ops bcmgenet_netdev_ops = { |
3113 | .ndo_open = bcmgenet_open, | |
3114 | .ndo_stop = bcmgenet_close, | |
3115 | .ndo_start_xmit = bcmgenet_xmit, | |
1c1008c7 FF |
3116 | .ndo_tx_timeout = bcmgenet_timeout, |
3117 | .ndo_set_rx_mode = bcmgenet_set_rx_mode, | |
3118 | .ndo_set_mac_address = bcmgenet_set_mac_addr, | |
3119 | .ndo_do_ioctl = bcmgenet_ioctl, | |
3120 | .ndo_set_features = bcmgenet_set_features, | |
4d2e8882 FF |
3121 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3122 | .ndo_poll_controller = bcmgenet_poll_controller, | |
3123 | #endif | |
1c1008c7 FF |
3124 | }; |
3125 | ||
3126 | /* Array of GENET hardware parameters/characteristics */ | |
3127 | static struct bcmgenet_hw_params bcmgenet_hw_params[] = { | |
3128 | [GENET_V1] = { | |
3129 | .tx_queues = 0, | |
51a966a7 | 3130 | .tx_bds_per_q = 0, |
1c1008c7 | 3131 | .rx_queues = 0, |
3feafa02 | 3132 | .rx_bds_per_q = 0, |
1c1008c7 FF |
3133 | .bp_in_en_shift = 16, |
3134 | .bp_in_mask = 0xffff, | |
3135 | .hfb_filter_cnt = 16, | |
3136 | .qtag_mask = 0x1F, | |
3137 | .hfb_offset = 0x1000, | |
3138 | .rdma_offset = 0x2000, | |
3139 | .tdma_offset = 0x3000, | |
3140 | .words_per_bd = 2, | |
3141 | }, | |
3142 | [GENET_V2] = { | |
3143 | .tx_queues = 4, | |
51a966a7 | 3144 | .tx_bds_per_q = 32, |
7e906e02 | 3145 | .rx_queues = 0, |
3feafa02 | 3146 | .rx_bds_per_q = 0, |
1c1008c7 FF |
3147 | .bp_in_en_shift = 16, |
3148 | .bp_in_mask = 0xffff, | |
3149 | .hfb_filter_cnt = 16, | |
3150 | .qtag_mask = 0x1F, | |
3151 | .tbuf_offset = 0x0600, | |
3152 | .hfb_offset = 0x1000, | |
3153 | .hfb_reg_offset = 0x2000, | |
3154 | .rdma_offset = 0x3000, | |
3155 | .tdma_offset = 0x4000, | |
3156 | .words_per_bd = 2, | |
3157 | .flags = GENET_HAS_EXT, | |
3158 | }, | |
3159 | [GENET_V3] = { | |
3160 | .tx_queues = 4, | |
51a966a7 | 3161 | .tx_bds_per_q = 32, |
7e906e02 | 3162 | .rx_queues = 0, |
3feafa02 | 3163 | .rx_bds_per_q = 0, |
1c1008c7 FF |
3164 | .bp_in_en_shift = 17, |
3165 | .bp_in_mask = 0x1ffff, | |
3166 | .hfb_filter_cnt = 48, | |
0034de41 | 3167 | .hfb_filter_size = 128, |
1c1008c7 FF |
3168 | .qtag_mask = 0x3F, |
3169 | .tbuf_offset = 0x0600, | |
3170 | .hfb_offset = 0x8000, | |
3171 | .hfb_reg_offset = 0xfc00, | |
3172 | .rdma_offset = 0x10000, | |
3173 | .tdma_offset = 0x11000, | |
3174 | .words_per_bd = 2, | |
8d88c6eb PG |
3175 | .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR | |
3176 | GENET_HAS_MOCA_LINK_DET, | |
1c1008c7 FF |
3177 | }, |
3178 | [GENET_V4] = { | |
3179 | .tx_queues = 4, | |
51a966a7 | 3180 | .tx_bds_per_q = 32, |
7e906e02 | 3181 | .rx_queues = 0, |
3feafa02 | 3182 | .rx_bds_per_q = 0, |
1c1008c7 FF |
3183 | .bp_in_en_shift = 17, |
3184 | .bp_in_mask = 0x1ffff, | |
3185 | .hfb_filter_cnt = 48, | |
0034de41 | 3186 | .hfb_filter_size = 128, |
1c1008c7 FF |
3187 | .qtag_mask = 0x3F, |
3188 | .tbuf_offset = 0x0600, | |
3189 | .hfb_offset = 0x8000, | |
3190 | .hfb_reg_offset = 0xfc00, | |
3191 | .rdma_offset = 0x2000, | |
3192 | .tdma_offset = 0x4000, | |
3193 | .words_per_bd = 3, | |
8d88c6eb PG |
3194 | .flags = GENET_HAS_40BITS | GENET_HAS_EXT | |
3195 | GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET, | |
1c1008c7 FF |
3196 | }, |
3197 | }; | |
3198 | ||
3199 | /* Infer hardware parameters from the detected GENET version */ | |
3200 | static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) | |
3201 | { | |
3202 | struct bcmgenet_hw_params *params; | |
3203 | u32 reg; | |
3204 | u8 major; | |
b04a2f5b | 3205 | u16 gphy_rev; |
1c1008c7 FF |
3206 | |
3207 | if (GENET_IS_V4(priv)) { | |
3208 | bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; | |
3209 | genet_dma_ring_regs = genet_dma_ring_regs_v4; | |
3210 | priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS; | |
3211 | priv->version = GENET_V4; | |
3212 | } else if (GENET_IS_V3(priv)) { | |
3213 | bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; | |
3214 | genet_dma_ring_regs = genet_dma_ring_regs_v123; | |
3215 | priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS; | |
3216 | priv->version = GENET_V3; | |
3217 | } else if (GENET_IS_V2(priv)) { | |
3218 | bcmgenet_dma_regs = bcmgenet_dma_regs_v2; | |
3219 | genet_dma_ring_regs = genet_dma_ring_regs_v123; | |
3220 | priv->dma_rx_chk_bit = DMA_RX_CHK_V12; | |
3221 | priv->version = GENET_V2; | |
3222 | } else if (GENET_IS_V1(priv)) { | |
3223 | bcmgenet_dma_regs = bcmgenet_dma_regs_v1; | |
3224 | genet_dma_ring_regs = genet_dma_ring_regs_v123; | |
3225 | priv->dma_rx_chk_bit = DMA_RX_CHK_V12; | |
3226 | priv->version = GENET_V1; | |
3227 | } | |
3228 | ||
3229 | /* enum genet_version starts at 1 */ | |
3230 | priv->hw_params = &bcmgenet_hw_params[priv->version]; | |
3231 | params = priv->hw_params; | |
3232 | ||
3233 | /* Read GENET HW version */ | |
3234 | reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL); | |
3235 | major = (reg >> 24 & 0x0f); | |
3236 | if (major == 5) | |
3237 | major = 4; | |
3238 | else if (major == 0) | |
3239 | major = 1; | |
3240 | if (major != priv->version) { | |
3241 | dev_err(&priv->pdev->dev, | |
3242 | "GENET version mismatch, got: %d, configured for: %d\n", | |
3243 | major, priv->version); | |
3244 | } | |
3245 | ||
3246 | /* Print the GENET core version */ | |
3247 | dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT, | |
c91b7f66 | 3248 | major, (reg >> 16) & 0x0f, reg & 0xffff); |
1c1008c7 | 3249 | |
487320c5 FF |
3250 | /* Store the integrated PHY revision for the MDIO probing function |
3251 | * to pass this information to the PHY driver. The PHY driver expects | |
3252 | * to find the PHY major revision in bits 15:8 while the GENET register | |
3253 | * stores that information in bits 7:0, account for that. | |
b04a2f5b FF |
3254 | * |
3255 | * On newer chips, starting with PHY revision G0, a new scheme is | |
3256 | * deployed similar to the Starfighter 2 switch with GPHY major | |
3257 | * revision in bits 15:8 and patch level in bits 7:0. Major revision 0 | |
3258 | * is reserved as well as special value 0x01ff, we have a small | |
3259 | * heuristic to check for the new GPHY revision and re-arrange things | |
3260 | * so the GPHY driver is happy. | |
487320c5 | 3261 | */ |
b04a2f5b FF |
3262 | gphy_rev = reg & 0xffff; |
3263 | ||
3264 | /* This is the good old scheme, just GPHY major, no minor nor patch */ | |
3265 | if ((gphy_rev & 0xf0) != 0) | |
3266 | priv->gphy_rev = gphy_rev << 8; | |
3267 | ||
3268 | /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */ | |
3269 | else if ((gphy_rev & 0xff00) != 0) | |
3270 | priv->gphy_rev = gphy_rev; | |
3271 | ||
3272 | /* This is reserved so should require special treatment */ | |
3273 | else if (gphy_rev == 0 || gphy_rev == 0x01ff) { | |
3274 | pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev); | |
3275 | return; | |
3276 | } | |
487320c5 | 3277 | |
1c1008c7 FF |
3278 | #ifdef CONFIG_PHYS_ADDR_T_64BIT |
3279 | if (!(params->flags & GENET_HAS_40BITS)) | |
3280 | pr_warn("GENET does not support 40-bits PA\n"); | |
3281 | #endif | |
3282 | ||
3283 | pr_debug("Configuration for version: %d\n" | |
3feafa02 | 3284 | "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n" |
1c1008c7 FF |
3285 | "BP << en: %2d, BP msk: 0x%05x\n" |
3286 | "HFB count: %2d, QTAQ msk: 0x%05x\n" | |
3287 | "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n" | |
3288 | "RDMA: 0x%05x, TDMA: 0x%05x\n" | |
3289 | "Words/BD: %d\n", | |
3290 | priv->version, | |
51a966a7 | 3291 | params->tx_queues, params->tx_bds_per_q, |
3feafa02 | 3292 | params->rx_queues, params->rx_bds_per_q, |
1c1008c7 FF |
3293 | params->bp_in_en_shift, params->bp_in_mask, |
3294 | params->hfb_filter_cnt, params->qtag_mask, | |
3295 | params->tbuf_offset, params->hfb_offset, | |
3296 | params->hfb_reg_offset, | |
3297 | params->rdma_offset, params->tdma_offset, | |
3298 | params->words_per_bd); | |
3299 | } | |
3300 | ||
3301 | static const struct of_device_id bcmgenet_match[] = { | |
3302 | { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 }, | |
3303 | { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 }, | |
3304 | { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 }, | |
3305 | { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 }, | |
3306 | { }, | |
3307 | }; | |
3308 | ||
3309 | static int bcmgenet_probe(struct platform_device *pdev) | |
3310 | { | |
b0ba512e | 3311 | struct bcmgenet_platform_data *pd = pdev->dev.platform_data; |
1c1008c7 | 3312 | struct device_node *dn = pdev->dev.of_node; |
b0ba512e | 3313 | const struct of_device_id *of_id = NULL; |
1c1008c7 FF |
3314 | struct bcmgenet_priv *priv; |
3315 | struct net_device *dev; | |
3316 | const void *macaddr; | |
3317 | struct resource *r; | |
3318 | int err = -EIO; | |
3319 | ||
3feafeed PG |
3320 | /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */ |
3321 | dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, | |
3322 | GENET_MAX_MQ_CNT + 1); | |
1c1008c7 FF |
3323 | if (!dev) { |
3324 | dev_err(&pdev->dev, "can't allocate net device\n"); | |
3325 | return -ENOMEM; | |
3326 | } | |
3327 | ||
b0ba512e PG |
3328 | if (dn) { |
3329 | of_id = of_match_node(bcmgenet_match, dn); | |
3330 | if (!of_id) | |
3331 | return -EINVAL; | |
3332 | } | |
1c1008c7 FF |
3333 | |
3334 | priv = netdev_priv(dev); | |
3335 | priv->irq0 = platform_get_irq(pdev, 0); | |
3336 | priv->irq1 = platform_get_irq(pdev, 1); | |
8562056f | 3337 | priv->wol_irq = platform_get_irq(pdev, 2); |
1c1008c7 FF |
3338 | if (!priv->irq0 || !priv->irq1) { |
3339 | dev_err(&pdev->dev, "can't find IRQs\n"); | |
3340 | err = -EINVAL; | |
3341 | goto err; | |
3342 | } | |
3343 | ||
b0ba512e PG |
3344 | if (dn) { |
3345 | macaddr = of_get_mac_address(dn); | |
3346 | if (!macaddr) { | |
3347 | dev_err(&pdev->dev, "can't find MAC address\n"); | |
3348 | err = -EINVAL; | |
3349 | goto err; | |
3350 | } | |
3351 | } else { | |
3352 | macaddr = pd->mac_address; | |
1c1008c7 FF |
3353 | } |
3354 | ||
3355 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
5343a10d FE |
3356 | priv->base = devm_ioremap_resource(&pdev->dev, r); |
3357 | if (IS_ERR(priv->base)) { | |
3358 | err = PTR_ERR(priv->base); | |
1c1008c7 FF |
3359 | goto err; |
3360 | } | |
3361 | ||
3362 | SET_NETDEV_DEV(dev, &pdev->dev); | |
3363 | dev_set_drvdata(&pdev->dev, dev); | |
3364 | ether_addr_copy(dev->dev_addr, macaddr); | |
3365 | dev->watchdog_timeo = 2 * HZ; | |
7ad24ea4 | 3366 | dev->ethtool_ops = &bcmgenet_ethtool_ops; |
1c1008c7 | 3367 | dev->netdev_ops = &bcmgenet_netdev_ops; |
1c1008c7 FF |
3368 | |
3369 | priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT); | |
3370 | ||
3371 | /* Set hardware features */ | |
3372 | dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | | |
3373 | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; | |
3374 | ||
8562056f FF |
3375 | /* Request the WOL interrupt and advertise suspend if available */ |
3376 | priv->wol_irq_disabled = true; | |
3377 | err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0, | |
3378 | dev->name, priv); | |
3379 | if (!err) | |
3380 | device_set_wakeup_capable(&pdev->dev, 1); | |
3381 | ||
1c1008c7 FF |
3382 | /* Set the needed headroom to account for any possible |
3383 | * features enabling/disabling at runtime | |
3384 | */ | |
3385 | dev->needed_headroom += 64; | |
3386 | ||
3387 | netdev_boot_setup_check(dev); | |
3388 | ||
3389 | priv->dev = dev; | |
3390 | priv->pdev = pdev; | |
b0ba512e PG |
3391 | if (of_id) |
3392 | priv->version = (enum bcmgenet_version)of_id->data; | |
3393 | else | |
3394 | priv->version = pd->genet_version; | |
1c1008c7 | 3395 | |
e4a60a93 | 3396 | priv->clk = devm_clk_get(&priv->pdev->dev, "enet"); |
7d5d3075 | 3397 | if (IS_ERR(priv->clk)) { |
e4a60a93 | 3398 | dev_warn(&priv->pdev->dev, "failed to get enet clock\n"); |
7d5d3075 FF |
3399 | priv->clk = NULL; |
3400 | } | |
e4a60a93 | 3401 | |
7d5d3075 | 3402 | clk_prepare_enable(priv->clk); |
e4a60a93 | 3403 | |
1c1008c7 FF |
3404 | bcmgenet_set_hw_params(priv); |
3405 | ||
1c1008c7 FF |
3406 | /* Mii wait queue */ |
3407 | init_waitqueue_head(&priv->wq); | |
3408 | /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */ | |
3409 | priv->rx_buf_len = RX_BUF_LENGTH; | |
3410 | INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task); | |
3411 | ||
1c1008c7 | 3412 | priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol"); |
7d5d3075 | 3413 | if (IS_ERR(priv->clk_wol)) { |
1c1008c7 | 3414 | dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n"); |
7d5d3075 FF |
3415 | priv->clk_wol = NULL; |
3416 | } | |
1c1008c7 | 3417 | |
6ef398ea FF |
3418 | priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee"); |
3419 | if (IS_ERR(priv->clk_eee)) { | |
3420 | dev_warn(&priv->pdev->dev, "failed to get enet-eee clock\n"); | |
3421 | priv->clk_eee = NULL; | |
3422 | } | |
3423 | ||
1c1008c7 FF |
3424 | err = reset_umac(priv); |
3425 | if (err) | |
3426 | goto err_clk_disable; | |
3427 | ||
3428 | err = bcmgenet_mii_init(dev); | |
3429 | if (err) | |
3430 | goto err_clk_disable; | |
3431 | ||
3432 | /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues | |
3433 | * just the ring 16 descriptor based TX | |
3434 | */ | |
3435 | netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1); | |
3436 | netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1); | |
3437 | ||
219575eb FF |
3438 | /* libphy will determine the link state */ |
3439 | netif_carrier_off(dev); | |
3440 | ||
1c1008c7 | 3441 | /* Turn off the main clock, WOL clock is handled separately */ |
7d5d3075 | 3442 | clk_disable_unprepare(priv->clk); |
1c1008c7 | 3443 | |
0f50ce96 FF |
3444 | err = register_netdev(dev); |
3445 | if (err) | |
3446 | goto err; | |
3447 | ||
1c1008c7 FF |
3448 | return err; |
3449 | ||
3450 | err_clk_disable: | |
7d5d3075 | 3451 | clk_disable_unprepare(priv->clk); |
1c1008c7 FF |
3452 | err: |
3453 | free_netdev(dev); | |
3454 | return err; | |
3455 | } | |
3456 | ||
3457 | static int bcmgenet_remove(struct platform_device *pdev) | |
3458 | { | |
3459 | struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev); | |
3460 | ||
3461 | dev_set_drvdata(&pdev->dev, NULL); | |
3462 | unregister_netdev(priv->dev); | |
3463 | bcmgenet_mii_exit(priv->dev); | |
3464 | free_netdev(priv->dev); | |
3465 | ||
3466 | return 0; | |
3467 | } | |
3468 | ||
b6e978e5 FF |
3469 | #ifdef CONFIG_PM_SLEEP |
3470 | static int bcmgenet_suspend(struct device *d) | |
3471 | { | |
3472 | struct net_device *dev = dev_get_drvdata(d); | |
3473 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
3474 | int ret; | |
3475 | ||
3476 | if (!netif_running(dev)) | |
3477 | return 0; | |
3478 | ||
3479 | bcmgenet_netif_stop(dev); | |
3480 | ||
cc013fb4 FF |
3481 | phy_suspend(priv->phydev); |
3482 | ||
b6e978e5 FF |
3483 | netif_device_detach(dev); |
3484 | ||
3485 | /* Disable MAC receive */ | |
3486 | umac_enable_set(priv, CMD_RX_EN, false); | |
3487 | ||
3488 | ret = bcmgenet_dma_teardown(priv); | |
3489 | if (ret) | |
3490 | return ret; | |
3491 | ||
3492 | /* Disable MAC transmit. TX DMA disabled have to done before this */ | |
3493 | umac_enable_set(priv, CMD_TX_EN, false); | |
3494 | ||
3495 | /* tx reclaim */ | |
3496 | bcmgenet_tx_reclaim_all(dev); | |
3497 | bcmgenet_fini_dma(priv); | |
3498 | ||
8c90db72 FF |
3499 | /* Prepare the device for Wake-on-LAN and switch to the slow clock */ |
3500 | if (device_may_wakeup(d) && priv->wolopts) { | |
ca8cf341 | 3501 | ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC); |
8c90db72 | 3502 | clk_prepare_enable(priv->clk_wol); |
c624f891 | 3503 | } else if (priv->internal_phy) { |
a6f31f5e | 3504 | ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); |
8c90db72 FF |
3505 | } |
3506 | ||
b6e978e5 FF |
3507 | /* Turn off the clocks */ |
3508 | clk_disable_unprepare(priv->clk); | |
3509 | ||
ca8cf341 | 3510 | return ret; |
b6e978e5 FF |
3511 | } |
3512 | ||
3513 | static int bcmgenet_resume(struct device *d) | |
3514 | { | |
3515 | struct net_device *dev = dev_get_drvdata(d); | |
3516 | struct bcmgenet_priv *priv = netdev_priv(dev); | |
3517 | unsigned long dma_ctrl; | |
3518 | int ret; | |
3519 | u32 reg; | |
3520 | ||
3521 | if (!netif_running(dev)) | |
3522 | return 0; | |
3523 | ||
3524 | /* Turn on the clock */ | |
3525 | ret = clk_prepare_enable(priv->clk); | |
3526 | if (ret) | |
3527 | return ret; | |
3528 | ||
a6f31f5e FF |
3529 | /* If this is an internal GPHY, power it back on now, before UniMAC is |
3530 | * brought out of reset as absolutely no UniMAC activity is allowed | |
3531 | */ | |
c624f891 | 3532 | if (priv->internal_phy) |
a6f31f5e FF |
3533 | bcmgenet_power_up(priv, GENET_POWER_PASSIVE); |
3534 | ||
b6e978e5 FF |
3535 | bcmgenet_umac_reset(priv); |
3536 | ||
3537 | ret = init_umac(priv); | |
3538 | if (ret) | |
3539 | goto out_clk_disable; | |
3540 | ||
0a29b3da TK |
3541 | /* From WOL-enabled suspend, switch to regular clock */ |
3542 | if (priv->wolopts) | |
3543 | clk_disable_unprepare(priv->clk_wol); | |
3544 | ||
3545 | phy_init_hw(priv->phydev); | |
3546 | /* Speed settings must be restored */ | |
28b45910 | 3547 | bcmgenet_mii_config(priv->dev); |
8c90db72 | 3548 | |
b6e978e5 FF |
3549 | /* disable ethernet MAC while updating its registers */ |
3550 | umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); | |
3551 | ||
3552 | bcmgenet_set_hw_addr(priv, dev->dev_addr); | |
3553 | ||
c624f891 | 3554 | if (priv->internal_phy) { |
b6e978e5 FF |
3555 | reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); |
3556 | reg |= EXT_ENERGY_DET_MASK; | |
3557 | bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); | |
3558 | } | |
3559 | ||
98bb7399 FF |
3560 | if (priv->wolopts) |
3561 | bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); | |
3562 | ||
b6e978e5 FF |
3563 | /* Disable RX/TX DMA and flush TX queues */ |
3564 | dma_ctrl = bcmgenet_dma_disable(priv); | |
3565 | ||
3566 | /* Reinitialize TDMA and RDMA and SW housekeeping */ | |
3567 | ret = bcmgenet_init_dma(priv); | |
3568 | if (ret) { | |
3569 | netdev_err(dev, "failed to initialize DMA\n"); | |
3570 | goto out_clk_disable; | |
3571 | } | |
3572 | ||
3573 | /* Always enable ring 16 - descriptor ring */ | |
3574 | bcmgenet_enable_dma(priv, dma_ctrl); | |
3575 | ||
3576 | netif_device_attach(dev); | |
3577 | ||
cc013fb4 FF |
3578 | phy_resume(priv->phydev); |
3579 | ||
6ef398ea FF |
3580 | if (priv->eee.eee_enabled) |
3581 | bcmgenet_eee_enable_set(dev, true); | |
3582 | ||
b6e978e5 FF |
3583 | bcmgenet_netif_start(dev); |
3584 | ||
3585 | return 0; | |
3586 | ||
3587 | out_clk_disable: | |
3588 | clk_disable_unprepare(priv->clk); | |
3589 | return ret; | |
3590 | } | |
3591 | #endif /* CONFIG_PM_SLEEP */ | |
3592 | ||
3593 | static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume); | |
3594 | ||
1c1008c7 FF |
3595 | static struct platform_driver bcmgenet_driver = { |
3596 | .probe = bcmgenet_probe, | |
3597 | .remove = bcmgenet_remove, | |
3598 | .driver = { | |
3599 | .name = "bcmgenet", | |
1c1008c7 | 3600 | .of_match_table = bcmgenet_match, |
b6e978e5 | 3601 | .pm = &bcmgenet_pm_ops, |
1c1008c7 FF |
3602 | }, |
3603 | }; | |
3604 | module_platform_driver(bcmgenet_driver); | |
3605 | ||
3606 | MODULE_AUTHOR("Broadcom Corporation"); | |
3607 | MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver"); | |
3608 | MODULE_ALIAS("platform:bcmgenet"); | |
3609 | MODULE_LICENSE("GPL"); |