Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[deliverable/linux.git] / drivers / net / ethernet / marvell / mvneta.c
CommitLineData
c5aff182
TP
1/*
2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Rami Rosen <rosenr@marvell.com>
7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#include <linux/kernel.h>
c5aff182
TP
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/platform_device.h>
18#include <linux/skbuff.h>
19#include <linux/inetdevice.h>
20#include <linux/mbus.h>
21#include <linux/module.h>
22#include <linux/interrupt.h>
23#include <net/ip.h>
24#include <net/ipv6.h>
25#include <linux/of.h>
26#include <linux/of_irq.h>
27#include <linux/of_mdio.h>
28#include <linux/of_net.h>
29#include <linux/of_address.h>
30#include <linux/phy.h>
189dd626 31#include <linux/clk.h>
c5aff182
TP
32
33/* Registers */
34#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
35#define MVNETA_RXQ_HW_BUF_ALLOC BIT(1)
36#define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
37#define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
38#define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
39#define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
40#define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
41#define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
42#define MVNETA_RXQ_BUF_SIZE_SHIFT 19
43#define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
44#define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
45#define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
46#define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
47#define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
48#define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
49#define MVNETA_PORT_RX_RESET 0x1cc0
50#define MVNETA_PORT_RX_DMA_RESET BIT(0)
51#define MVNETA_PHY_ADDR 0x2000
52#define MVNETA_PHY_ADDR_MASK 0x1f
53#define MVNETA_MBUS_RETRY 0x2010
54#define MVNETA_UNIT_INTR_CAUSE 0x2080
55#define MVNETA_UNIT_CONTROL 0x20B0
56#define MVNETA_PHY_POLLING_ENABLE BIT(1)
57#define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
58#define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
59#define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
60#define MVNETA_BASE_ADDR_ENABLE 0x2290
61#define MVNETA_PORT_CONFIG 0x2400
62#define MVNETA_UNI_PROMISC_MODE BIT(0)
63#define MVNETA_DEF_RXQ(q) ((q) << 1)
64#define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
65#define MVNETA_TX_UNSET_ERR_SUM BIT(12)
66#define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
67#define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
68#define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
69#define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
70#define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
71 MVNETA_DEF_RXQ_ARP(q) | \
72 MVNETA_DEF_RXQ_TCP(q) | \
73 MVNETA_DEF_RXQ_UDP(q) | \
74 MVNETA_DEF_RXQ_BPDU(q) | \
75 MVNETA_TX_UNSET_ERR_SUM | \
76 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
77#define MVNETA_PORT_CONFIG_EXTEND 0x2404
78#define MVNETA_MAC_ADDR_LOW 0x2414
79#define MVNETA_MAC_ADDR_HIGH 0x2418
80#define MVNETA_SDMA_CONFIG 0x241c
81#define MVNETA_SDMA_BRST_SIZE_16 4
c5aff182
TP
82#define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
83#define MVNETA_RX_NO_DATA_SWAP BIT(4)
84#define MVNETA_TX_NO_DATA_SWAP BIT(5)
9ad8fef6 85#define MVNETA_DESC_SWAP BIT(6)
c5aff182
TP
86#define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
87#define MVNETA_PORT_STATUS 0x2444
88#define MVNETA_TX_IN_PRGRS BIT(1)
89#define MVNETA_TX_FIFO_EMPTY BIT(8)
90#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
5445eaf3
APR
91#define MVNETA_SGMII_SERDES_CFG 0x24A0
92#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
c5aff182
TP
93#define MVNETA_TYPE_PRIO 0x24bc
94#define MVNETA_FORCE_UNI BIT(21)
95#define MVNETA_TXQ_CMD_1 0x24e4
96#define MVNETA_TXQ_CMD 0x2448
97#define MVNETA_TXQ_DISABLE_SHIFT 8
98#define MVNETA_TXQ_ENABLE_MASK 0x000000ff
99#define MVNETA_ACC_MODE 0x2500
100#define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
101#define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
102#define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
103#define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
40ba35e7 104
105/* Exception Interrupt Port/Queue Cause register */
106
c5aff182 107#define MVNETA_INTR_NEW_CAUSE 0x25a0
c5aff182 108#define MVNETA_INTR_NEW_MASK 0x25a4
40ba35e7 109
110/* bits 0..7 = TXQ SENT, one bit per queue.
111 * bits 8..15 = RXQ OCCUP, one bit per queue.
112 * bits 16..23 = RXQ FREE, one bit per queue.
113 * bit 29 = OLD_REG_SUM, see old reg ?
114 * bit 30 = TX_ERR_SUM, one bit for 4 ports
115 * bit 31 = MISC_SUM, one bit for 4 ports
116 */
117#define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
118#define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
119#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
120#define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
121
c5aff182
TP
122#define MVNETA_INTR_OLD_CAUSE 0x25a8
123#define MVNETA_INTR_OLD_MASK 0x25ac
40ba35e7 124
125/* Data Path Port/Queue Cause Register */
c5aff182
TP
126#define MVNETA_INTR_MISC_CAUSE 0x25b0
127#define MVNETA_INTR_MISC_MASK 0x25b4
40ba35e7 128
129#define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0)
130#define MVNETA_CAUSE_LINK_CHANGE BIT(1)
131#define MVNETA_CAUSE_PTP BIT(4)
132
133#define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7)
134#define MVNETA_CAUSE_RX_OVERRUN BIT(8)
135#define MVNETA_CAUSE_RX_CRC_ERROR BIT(9)
136#define MVNETA_CAUSE_RX_LARGE_PKT BIT(10)
137#define MVNETA_CAUSE_TX_UNDERUN BIT(11)
138#define MVNETA_CAUSE_PRBS_ERR BIT(12)
139#define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13)
140#define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14)
141
142#define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16
143#define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
144#define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
145
146#define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24
147#define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
148#define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
149
c5aff182
TP
150#define MVNETA_INTR_ENABLE 0x25b8
151#define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
40ba35e7 152#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000 // note: neta says it's 0x000000FF
153
c5aff182
TP
154#define MVNETA_RXQ_CMD 0x2680
155#define MVNETA_RXQ_DISABLE_SHIFT 8
156#define MVNETA_RXQ_ENABLE_MASK 0x000000ff
157#define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
158#define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
159#define MVNETA_GMAC_CTRL_0 0x2c00
160#define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
161#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
162#define MVNETA_GMAC0_PORT_ENABLE BIT(0)
163#define MVNETA_GMAC_CTRL_2 0x2c08
164#define MVNETA_GMAC2_PSC_ENABLE BIT(3)
165#define MVNETA_GMAC2_PORT_RGMII BIT(4)
166#define MVNETA_GMAC2_PORT_RESET BIT(6)
167#define MVNETA_GMAC_STATUS 0x2c10
168#define MVNETA_GMAC_LINK_UP BIT(0)
169#define MVNETA_GMAC_SPEED_1000 BIT(1)
170#define MVNETA_GMAC_SPEED_100 BIT(2)
171#define MVNETA_GMAC_FULL_DUPLEX BIT(3)
172#define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
173#define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
174#define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
175#define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
176#define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
177#define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
178#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
179#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
180#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
71408602 181#define MVNETA_GMAC_AN_SPEED_EN BIT(7)
c5aff182 182#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
71408602 183#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
c5aff182
TP
184#define MVNETA_MIB_COUNTERS_BASE 0x3080
185#define MVNETA_MIB_LATE_COLLISION 0x7c
186#define MVNETA_DA_FILT_SPEC_MCAST 0x3400
187#define MVNETA_DA_FILT_OTH_MCAST 0x3500
188#define MVNETA_DA_FILT_UCAST_BASE 0x3600
189#define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
190#define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
191#define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
192#define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
193#define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
194#define MVNETA_TXQ_DEC_SENT_SHIFT 16
195#define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
196#define MVNETA_TXQ_SENT_DESC_SHIFT 16
197#define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
198#define MVNETA_PORT_TX_RESET 0x3cf0
199#define MVNETA_PORT_TX_DMA_RESET BIT(0)
200#define MVNETA_TX_MTU 0x3e0c
201#define MVNETA_TX_TOKEN_SIZE 0x3e14
202#define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
203#define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
204#define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
205
206#define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
207
208/* Descriptor ring Macros */
209#define MVNETA_QUEUE_NEXT_DESC(q, index) \
210 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
211
212/* Various constants */
213
214/* Coalescing */
215#define MVNETA_TXDONE_COAL_PKTS 16
216#define MVNETA_RX_COAL_PKTS 32
217#define MVNETA_RX_COAL_USEC 100
218
c5aff182
TP
219/* Napi polling weight */
220#define MVNETA_RX_POLL_WEIGHT 64
221
6a20c175 222/* The two bytes Marvell header. Either contains a special value used
c5aff182
TP
223 * by Marvell switches when a specific hardware mode is enabled (not
224 * supported by this driver) or is filled automatically by zeroes on
225 * the RX side. Those two bytes being at the front of the Ethernet
226 * header, they allow to have the IP header aligned on a 4 bytes
227 * boundary automatically: the hardware skips those two bytes on its
228 * own.
229 */
230#define MVNETA_MH_SIZE 2
231
232#define MVNETA_VLAN_TAG_LEN 4
233
234#define MVNETA_CPU_D_CACHE_LINE_SIZE 32
235#define MVNETA_TX_CSUM_MAX_SIZE 9800
236#define MVNETA_ACC_MODE_EXT 1
237
238/* Timeout constants */
239#define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
240#define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
241#define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
242
243#define MVNETA_TX_MTU_MAX 0x3ffff
244
245/* Max number of Rx descriptors */
246#define MVNETA_MAX_RXD 128
247
248/* Max number of Tx descriptors */
249#define MVNETA_MAX_TXD 532
250
251/* descriptor aligned size */
252#define MVNETA_DESC_ALIGNED_SIZE 32
253
254#define MVNETA_RX_PKT_SIZE(mtu) \
255 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
256 ETH_HLEN + ETH_FCS_LEN, \
257 MVNETA_CPU_D_CACHE_LINE_SIZE)
258
259#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
260
74c41b04 261struct mvneta_pcpu_stats {
c5aff182 262 struct u64_stats_sync syncp;
74c41b04 263 u64 rx_packets;
264 u64 rx_bytes;
265 u64 tx_packets;
266 u64 tx_bytes;
c5aff182
TP
267};
268
269struct mvneta_port {
270 int pkt_size;
8ec2cd48 271 unsigned int frag_size;
c5aff182
TP
272 void __iomem *base;
273 struct mvneta_rx_queue *rxqs;
274 struct mvneta_tx_queue *txqs;
c5aff182
TP
275 struct net_device *dev;
276
277 u32 cause_rx_tx;
278 struct napi_struct napi;
279
c5aff182
TP
280 /* Napi weight */
281 int weight;
282
283 /* Core clock */
189dd626 284 struct clk *clk;
c5aff182
TP
285 u8 mcast_count[256];
286 u16 tx_ring_size;
287 u16 rx_ring_size;
74c41b04 288 struct mvneta_pcpu_stats *stats;
c5aff182
TP
289
290 struct mii_bus *mii_bus;
291 struct phy_device *phy_dev;
292 phy_interface_t phy_interface;
293 struct device_node *phy_node;
294 unsigned int link;
295 unsigned int duplex;
296 unsigned int speed;
297};
298
6a20c175 299/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
c5aff182
TP
300 * layout of the transmit and reception DMA descriptors, and their
301 * layout is therefore defined by the hardware design
302 */
6083ed44 303
c5aff182
TP
304#define MVNETA_TX_L3_OFF_SHIFT 0
305#define MVNETA_TX_IP_HLEN_SHIFT 8
306#define MVNETA_TX_L4_UDP BIT(16)
307#define MVNETA_TX_L3_IP6 BIT(17)
308#define MVNETA_TXD_IP_CSUM BIT(18)
309#define MVNETA_TXD_Z_PAD BIT(19)
310#define MVNETA_TXD_L_DESC BIT(20)
311#define MVNETA_TXD_F_DESC BIT(21)
312#define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
313 MVNETA_TXD_L_DESC | \
314 MVNETA_TXD_F_DESC)
315#define MVNETA_TX_L4_CSUM_FULL BIT(30)
316#define MVNETA_TX_L4_CSUM_NOT BIT(31)
317
c5aff182
TP
318#define MVNETA_RXD_ERR_CRC 0x0
319#define MVNETA_RXD_ERR_SUMMARY BIT(16)
320#define MVNETA_RXD_ERR_OVERRUN BIT(17)
321#define MVNETA_RXD_ERR_LEN BIT(18)
322#define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
323#define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
324#define MVNETA_RXD_L3_IP4 BIT(25)
325#define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
326#define MVNETA_RXD_L4_CSUM_OK BIT(30)
327
9ad8fef6 328#if defined(__LITTLE_ENDIAN)
6083ed44
TP
329struct mvneta_tx_desc {
330 u32 command; /* Options used by HW for packet transmitting.*/
331 u16 reserverd1; /* csum_l4 (for future use) */
332 u16 data_size; /* Data size of transmitted packet in bytes */
333 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
334 u32 reserved2; /* hw_cmd - (for future use, PMT) */
335 u32 reserved3[4]; /* Reserved - (for future use) */
336};
337
338struct mvneta_rx_desc {
339 u32 status; /* Info about received packet */
c5aff182
TP
340 u16 reserved1; /* pnc_info - (for future use, PnC) */
341 u16 data_size; /* Size of received packet in bytes */
6083ed44 342
c5aff182
TP
343 u32 buf_phys_addr; /* Physical address of the buffer */
344 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
6083ed44 345
c5aff182
TP
346 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
347 u16 reserved3; /* prefetch_cmd, for future use */
348 u16 reserved4; /* csum_l4 - (for future use, PnC) */
6083ed44 349
c5aff182
TP
350 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
351 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
352};
9ad8fef6
TP
353#else
354struct mvneta_tx_desc {
355 u16 data_size; /* Data size of transmitted packet in bytes */
356 u16 reserverd1; /* csum_l4 (for future use) */
357 u32 command; /* Options used by HW for packet transmitting.*/
358 u32 reserved2; /* hw_cmd - (for future use, PMT) */
359 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
360 u32 reserved3[4]; /* Reserved - (for future use) */
361};
362
363struct mvneta_rx_desc {
364 u16 data_size; /* Size of received packet in bytes */
365 u16 reserved1; /* pnc_info - (for future use, PnC) */
366 u32 status; /* Info about received packet */
367
368 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
369 u32 buf_phys_addr; /* Physical address of the buffer */
370
371 u16 reserved4; /* csum_l4 - (for future use, PnC) */
372 u16 reserved3; /* prefetch_cmd, for future use */
373 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
374
375 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
376 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
377};
378#endif
c5aff182
TP
379
380struct mvneta_tx_queue {
381 /* Number of this TX queue, in the range 0-7 */
382 u8 id;
383
384 /* Number of TX DMA descriptors in the descriptor ring */
385 int size;
386
387 /* Number of currently used TX DMA descriptor in the
6a20c175
TP
388 * descriptor ring
389 */
c5aff182
TP
390 int count;
391
392 /* Array of transmitted skb */
393 struct sk_buff **tx_skb;
394
395 /* Index of last TX DMA descriptor that was inserted */
396 int txq_put_index;
397
398 /* Index of the TX DMA descriptor to be cleaned up */
399 int txq_get_index;
400
401 u32 done_pkts_coal;
402
403 /* Virtual address of the TX DMA descriptors array */
404 struct mvneta_tx_desc *descs;
405
406 /* DMA address of the TX DMA descriptors array */
407 dma_addr_t descs_phys;
408
409 /* Index of the last TX DMA descriptor */
410 int last_desc;
411
412 /* Index of the next TX DMA descriptor to process */
413 int next_desc_to_proc;
414};
415
416struct mvneta_rx_queue {
417 /* rx queue number, in the range 0-7 */
418 u8 id;
419
420 /* num of rx descriptors in the rx descriptor ring */
421 int size;
422
423 /* counter of times when mvneta_refill() failed */
424 int missed;
425
426 u32 pkts_coal;
427 u32 time_coal;
428
429 /* Virtual address of the RX DMA descriptors array */
430 struct mvneta_rx_desc *descs;
431
432 /* DMA address of the RX DMA descriptors array */
433 dma_addr_t descs_phys;
434
435 /* Index of the last RX DMA descriptor */
436 int last_desc;
437
438 /* Index of the next RX DMA descriptor to process */
439 int next_desc_to_proc;
440};
441
442static int rxq_number = 8;
443static int txq_number = 8;
444
445static int rxq_def;
c5aff182 446
f19fadfc 447static int rx_copybreak __read_mostly = 256;
448
c5aff182
TP
449#define MVNETA_DRIVER_NAME "mvneta"
450#define MVNETA_DRIVER_VERSION "1.0"
451
452/* Utility/helper methods */
453
454/* Write helper method */
455static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
456{
457 writel(data, pp->base + offset);
458}
459
460/* Read helper method */
461static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
462{
463 return readl(pp->base + offset);
464}
465
466/* Increment txq get counter */
467static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
468{
469 txq->txq_get_index++;
470 if (txq->txq_get_index == txq->size)
471 txq->txq_get_index = 0;
472}
473
474/* Increment txq put counter */
475static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
476{
477 txq->txq_put_index++;
478 if (txq->txq_put_index == txq->size)
479 txq->txq_put_index = 0;
480}
481
482
483/* Clear all MIB counters */
484static void mvneta_mib_counters_clear(struct mvneta_port *pp)
485{
486 int i;
487 u32 dummy;
488
489 /* Perform dummy reads from MIB counters */
490 for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
491 dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
492}
493
494/* Get System Network Statistics */
495struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
496 struct rtnl_link_stats64 *stats)
497{
498 struct mvneta_port *pp = netdev_priv(dev);
499 unsigned int start;
74c41b04 500 int cpu;
c5aff182 501
74c41b04 502 for_each_possible_cpu(cpu) {
503 struct mvneta_pcpu_stats *cpu_stats;
504 u64 rx_packets;
505 u64 rx_bytes;
506 u64 tx_packets;
507 u64 tx_bytes;
c5aff182 508
74c41b04 509 cpu_stats = per_cpu_ptr(pp->stats, cpu);
510 do {
511 start = u64_stats_fetch_begin_bh(&cpu_stats->syncp);
512 rx_packets = cpu_stats->rx_packets;
513 rx_bytes = cpu_stats->rx_bytes;
514 tx_packets = cpu_stats->tx_packets;
515 tx_bytes = cpu_stats->tx_bytes;
516 } while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start));
c5aff182 517
74c41b04 518 stats->rx_packets += rx_packets;
519 stats->rx_bytes += rx_bytes;
520 stats->tx_packets += tx_packets;
521 stats->tx_bytes += tx_bytes;
522 }
c5aff182
TP
523
524 stats->rx_errors = dev->stats.rx_errors;
525 stats->rx_dropped = dev->stats.rx_dropped;
526
527 stats->tx_dropped = dev->stats.tx_dropped;
528
529 return stats;
530}
531
532/* Rx descriptors helper methods */
533
5428213c 534/* Checks whether the RX descriptor having this status is both the first
535 * and the last descriptor for the RX packet. Each RX packet is currently
c5aff182
TP
536 * received through a single RX descriptor, so not having each RX
537 * descriptor with its first and last bits set is an error
538 */
5428213c 539static int mvneta_rxq_desc_is_first_last(u32 status)
c5aff182 540{
5428213c 541 return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
c5aff182
TP
542 MVNETA_RXD_FIRST_LAST_DESC;
543}
544
545/* Add number of descriptors ready to receive new packets */
546static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
547 struct mvneta_rx_queue *rxq,
548 int ndescs)
549{
550 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
6a20c175
TP
551 * be added at once
552 */
c5aff182
TP
553 while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
554 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
555 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
556 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
557 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
558 }
559
560 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
561 (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
562}
563
564/* Get number of RX descriptors occupied by received packets */
565static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
566 struct mvneta_rx_queue *rxq)
567{
568 u32 val;
569
570 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
571 return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
572}
573
6a20c175 574/* Update num of rx desc called upon return from rx path or
c5aff182
TP
575 * from mvneta_rxq_drop_pkts().
576 */
577static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
578 struct mvneta_rx_queue *rxq,
579 int rx_done, int rx_filled)
580{
581 u32 val;
582
583 if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
584 val = rx_done |
585 (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
586 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
587 return;
588 }
589
590 /* Only 255 descriptors can be added at once */
591 while ((rx_done > 0) || (rx_filled > 0)) {
592 if (rx_done <= 0xff) {
593 val = rx_done;
594 rx_done = 0;
595 } else {
596 val = 0xff;
597 rx_done -= 0xff;
598 }
599 if (rx_filled <= 0xff) {
600 val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
601 rx_filled = 0;
602 } else {
603 val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
604 rx_filled -= 0xff;
605 }
606 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
607 }
608}
609
610/* Get pointer to next RX descriptor to be processed by SW */
611static struct mvneta_rx_desc *
612mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
613{
614 int rx_desc = rxq->next_desc_to_proc;
615
616 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
34e4179d 617 prefetch(rxq->descs + rxq->next_desc_to_proc);
c5aff182
TP
618 return rxq->descs + rx_desc;
619}
620
621/* Change maximum receive size of the port. */
622static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
623{
624 u32 val;
625
626 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
627 val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
628 val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
629 MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
630 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
631}
632
633
634/* Set rx queue offset */
635static void mvneta_rxq_offset_set(struct mvneta_port *pp,
636 struct mvneta_rx_queue *rxq,
637 int offset)
638{
639 u32 val;
640
641 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
642 val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
643
644 /* Offset is in */
645 val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
646 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
647}
648
649
650/* Tx descriptors helper methods */
651
652/* Update HW with number of TX descriptors to be sent */
653static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
654 struct mvneta_tx_queue *txq,
655 int pend_desc)
656{
657 u32 val;
658
659 /* Only 255 descriptors can be added at once ; Assume caller
6a20c175
TP
660 * process TX desriptors in quanta less than 256
661 */
c5aff182
TP
662 val = pend_desc;
663 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
664}
665
666/* Get pointer to next TX descriptor to be processed (send) by HW */
667static struct mvneta_tx_desc *
668mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
669{
670 int tx_desc = txq->next_desc_to_proc;
671
672 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
673 return txq->descs + tx_desc;
674}
675
676/* Release the last allocated TX descriptor. Useful to handle DMA
6a20c175
TP
677 * mapping failures in the TX path.
678 */
c5aff182
TP
679static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
680{
681 if (txq->next_desc_to_proc == 0)
682 txq->next_desc_to_proc = txq->last_desc - 1;
683 else
684 txq->next_desc_to_proc--;
685}
686
687/* Set rxq buf size */
688static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
689 struct mvneta_rx_queue *rxq,
690 int buf_size)
691{
692 u32 val;
693
694 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
695
696 val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
697 val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
698
699 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
700}
701
702/* Disable buffer management (BM) */
703static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
704 struct mvneta_rx_queue *rxq)
705{
706 u32 val;
707
708 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
709 val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
710 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
711}
712
713
714
715/* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */
03ce758e 716static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable)
c5aff182
TP
717{
718 u32 val;
719
720 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
721
722 if (enable)
723 val |= MVNETA_GMAC2_PORT_RGMII;
724 else
725 val &= ~MVNETA_GMAC2_PORT_RGMII;
726
727 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
728}
729
730/* Config SGMII port */
03ce758e 731static void mvneta_port_sgmii_config(struct mvneta_port *pp)
c5aff182
TP
732{
733 u32 val;
734
735 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
736 val |= MVNETA_GMAC2_PSC_ENABLE;
737 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
5445eaf3
APR
738
739 mvreg_write(pp, MVNETA_SGMII_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
c5aff182
TP
740}
741
742/* Start the Ethernet port RX and TX activity */
743static void mvneta_port_up(struct mvneta_port *pp)
744{
745 int queue;
746 u32 q_map;
747
748 /* Enable all initialized TXs. */
749 mvneta_mib_counters_clear(pp);
750 q_map = 0;
751 for (queue = 0; queue < txq_number; queue++) {
752 struct mvneta_tx_queue *txq = &pp->txqs[queue];
753 if (txq->descs != NULL)
754 q_map |= (1 << queue);
755 }
756 mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
757
758 /* Enable all initialized RXQs. */
759 q_map = 0;
760 for (queue = 0; queue < rxq_number; queue++) {
761 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
762 if (rxq->descs != NULL)
763 q_map |= (1 << queue);
764 }
765
766 mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
767}
768
769/* Stop the Ethernet port activity */
770static void mvneta_port_down(struct mvneta_port *pp)
771{
772 u32 val;
773 int count;
774
775 /* Stop Rx port activity. Check port Rx activity. */
776 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
777
778 /* Issue stop command for active channels only */
779 if (val != 0)
780 mvreg_write(pp, MVNETA_RXQ_CMD,
781 val << MVNETA_RXQ_DISABLE_SHIFT);
782
783 /* Wait for all Rx activity to terminate. */
784 count = 0;
785 do {
786 if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
787 netdev_warn(pp->dev,
788 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n",
789 val);
790 break;
791 }
792 mdelay(1);
793
794 val = mvreg_read(pp, MVNETA_RXQ_CMD);
795 } while (val & 0xff);
796
797 /* Stop Tx port activity. Check port Tx activity. Issue stop
6a20c175
TP
798 * command for active channels only
799 */
c5aff182
TP
800 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
801
802 if (val != 0)
803 mvreg_write(pp, MVNETA_TXQ_CMD,
804 (val << MVNETA_TXQ_DISABLE_SHIFT));
805
806 /* Wait for all Tx activity to terminate. */
807 count = 0;
808 do {
809 if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
810 netdev_warn(pp->dev,
811 "TIMEOUT for TX stopped status=0x%08x\n",
812 val);
813 break;
814 }
815 mdelay(1);
816
817 /* Check TX Command reg that all Txqs are stopped */
818 val = mvreg_read(pp, MVNETA_TXQ_CMD);
819
820 } while (val & 0xff);
821
822 /* Double check to verify that TX FIFO is empty */
823 count = 0;
824 do {
825 if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
826 netdev_warn(pp->dev,
827 "TX FIFO empty timeout status=0x08%x\n",
828 val);
829 break;
830 }
831 mdelay(1);
832
833 val = mvreg_read(pp, MVNETA_PORT_STATUS);
834 } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
835 (val & MVNETA_TX_IN_PRGRS));
836
837 udelay(200);
838}
839
840/* Enable the port by setting the port enable bit of the MAC control register */
841static void mvneta_port_enable(struct mvneta_port *pp)
842{
843 u32 val;
844
845 /* Enable port */
846 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
847 val |= MVNETA_GMAC0_PORT_ENABLE;
848 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
849}
850
851/* Disable the port and wait for about 200 usec before retuning */
852static void mvneta_port_disable(struct mvneta_port *pp)
853{
854 u32 val;
855
856 /* Reset the Enable bit in the Serial Control Register */
857 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
858 val &= ~MVNETA_GMAC0_PORT_ENABLE;
859 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
860
861 udelay(200);
862}
863
864/* Multicast tables methods */
865
866/* Set all entries in Unicast MAC Table; queue==-1 means reject all */
867static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
868{
869 int offset;
870 u32 val;
871
872 if (queue == -1) {
873 val = 0;
874 } else {
875 val = 0x1 | (queue << 1);
876 val |= (val << 24) | (val << 16) | (val << 8);
877 }
878
879 for (offset = 0; offset <= 0xc; offset += 4)
880 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
881}
882
883/* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
884static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
885{
886 int offset;
887 u32 val;
888
889 if (queue == -1) {
890 val = 0;
891 } else {
892 val = 0x1 | (queue << 1);
893 val |= (val << 24) | (val << 16) | (val << 8);
894 }
895
896 for (offset = 0; offset <= 0xfc; offset += 4)
897 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
898
899}
900
901/* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
902static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
903{
904 int offset;
905 u32 val;
906
907 if (queue == -1) {
908 memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
909 val = 0;
910 } else {
911 memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
912 val = 0x1 | (queue << 1);
913 val |= (val << 24) | (val << 16) | (val << 8);
914 }
915
916 for (offset = 0; offset <= 0xfc; offset += 4)
917 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
918}
919
920/* This method sets defaults to the NETA port:
921 * Clears interrupt Cause and Mask registers.
922 * Clears all MAC tables.
923 * Sets defaults to all registers.
924 * Resets RX and TX descriptor rings.
925 * Resets PHY.
926 * This method can be called after mvneta_port_down() to return the port
927 * settings to defaults.
928 */
929static void mvneta_defaults_set(struct mvneta_port *pp)
930{
931 int cpu;
932 int queue;
933 u32 val;
934
935 /* Clear all Cause registers */
936 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
937 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
938 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
939
940 /* Mask all interrupts */
941 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
942 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
943 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
944 mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
945
946 /* Enable MBUS Retry bit16 */
947 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
948
949 /* Set CPU queue access map - all CPUs have access to all RX
6a20c175
TP
950 * queues and to all TX queues
951 */
c5aff182
TP
952 for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++)
953 mvreg_write(pp, MVNETA_CPU_MAP(cpu),
954 (MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
955 MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
956
957 /* Reset RX and TX DMAs */
958 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
959 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
960
961 /* Disable Legacy WRR, Disable EJP, Release from reset */
962 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
963 for (queue = 0; queue < txq_number; queue++) {
964 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
965 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
966 }
967
968 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
969 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
970
971 /* Set Port Acceleration Mode */
972 val = MVNETA_ACC_MODE_EXT;
973 mvreg_write(pp, MVNETA_ACC_MODE, val);
974
975 /* Update val of portCfg register accordingly with all RxQueue types */
976 val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def);
977 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
978
979 val = 0;
980 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
981 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
982
983 /* Build PORT_SDMA_CONFIG_REG */
984 val = 0;
985
986 /* Default burst size */
987 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
988 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
9ad8fef6 989 val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
c5aff182 990
9ad8fef6
TP
991#if defined(__BIG_ENDIAN)
992 val |= MVNETA_DESC_SWAP;
993#endif
c5aff182
TP
994
995 /* Assign port SDMA configuration */
996 mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
997
71408602
TP
998 /* Disable PHY polling in hardware, since we're using the
999 * kernel phylib to do this.
1000 */
1001 val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
1002 val &= ~MVNETA_PHY_POLLING_ENABLE;
1003 mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
1004
c5aff182
TP
1005 mvneta_set_ucast_table(pp, -1);
1006 mvneta_set_special_mcast_table(pp, -1);
1007 mvneta_set_other_mcast_table(pp, -1);
1008
1009 /* Set port interrupt enable register - default enable all */
1010 mvreg_write(pp, MVNETA_INTR_ENABLE,
1011 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
1012 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
1013}
1014
1015/* Set max sizes for tx queues */
1016static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
1017
1018{
1019 u32 val, size, mtu;
1020 int queue;
1021
1022 mtu = max_tx_size * 8;
1023 if (mtu > MVNETA_TX_MTU_MAX)
1024 mtu = MVNETA_TX_MTU_MAX;
1025
1026 /* Set MTU */
1027 val = mvreg_read(pp, MVNETA_TX_MTU);
1028 val &= ~MVNETA_TX_MTU_MAX;
1029 val |= mtu;
1030 mvreg_write(pp, MVNETA_TX_MTU, val);
1031
1032 /* TX token size and all TXQs token size must be larger that MTU */
1033 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
1034
1035 size = val & MVNETA_TX_TOKEN_SIZE_MAX;
1036 if (size < mtu) {
1037 size = mtu;
1038 val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
1039 val |= size;
1040 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
1041 }
1042 for (queue = 0; queue < txq_number; queue++) {
1043 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
1044
1045 size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
1046 if (size < mtu) {
1047 size = mtu;
1048 val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
1049 val |= size;
1050 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
1051 }
1052 }
1053}
1054
1055/* Set unicast address */
1056static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
1057 int queue)
1058{
1059 unsigned int unicast_reg;
1060 unsigned int tbl_offset;
1061 unsigned int reg_offset;
1062
1063 /* Locate the Unicast table entry */
1064 last_nibble = (0xf & last_nibble);
1065
1066 /* offset from unicast tbl base */
1067 tbl_offset = (last_nibble / 4) * 4;
1068
1069 /* offset within the above reg */
1070 reg_offset = last_nibble % 4;
1071
1072 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
1073
1074 if (queue == -1) {
1075 /* Clear accepts frame bit at specified unicast DA tbl entry */
1076 unicast_reg &= ~(0xff << (8 * reg_offset));
1077 } else {
1078 unicast_reg &= ~(0xff << (8 * reg_offset));
1079 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1080 }
1081
1082 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
1083}
1084
1085/* Set mac address */
1086static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
1087 int queue)
1088{
1089 unsigned int mac_h;
1090 unsigned int mac_l;
1091
1092 if (queue != -1) {
1093 mac_l = (addr[4] << 8) | (addr[5]);
1094 mac_h = (addr[0] << 24) | (addr[1] << 16) |
1095 (addr[2] << 8) | (addr[3] << 0);
1096
1097 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1098 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1099 }
1100
1101 /* Accept frames of this address */
1102 mvneta_set_ucast_addr(pp, addr[5], queue);
1103}
1104
6a20c175
TP
1105/* Set the number of packets that will be received before RX interrupt
1106 * will be generated by HW.
c5aff182
TP
1107 */
1108static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1109 struct mvneta_rx_queue *rxq, u32 value)
1110{
1111 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1112 value | MVNETA_RXQ_NON_OCCUPIED(0));
1113 rxq->pkts_coal = value;
1114}
1115
6a20c175
TP
1116/* Set the time delay in usec before RX interrupt will be generated by
1117 * HW.
c5aff182
TP
1118 */
1119static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1120 struct mvneta_rx_queue *rxq, u32 value)
1121{
189dd626
TP
1122 u32 val;
1123 unsigned long clk_rate;
1124
1125 clk_rate = clk_get_rate(pp->clk);
1126 val = (clk_rate / 1000000) * value;
c5aff182
TP
1127
1128 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1129 rxq->time_coal = value;
1130}
1131
1132/* Set threshold for TX_DONE pkts coalescing */
1133static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1134 struct mvneta_tx_queue *txq, u32 value)
1135{
1136 u32 val;
1137
1138 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1139
1140 val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
1141 val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
1142
1143 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1144
1145 txq->done_pkts_coal = value;
1146}
1147
c5aff182
TP
1148/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1149static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
1150 u32 phys_addr, u32 cookie)
1151{
1152 rx_desc->buf_cookie = cookie;
1153 rx_desc->buf_phys_addr = phys_addr;
1154}
1155
1156/* Decrement sent descriptors counter */
1157static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1158 struct mvneta_tx_queue *txq,
1159 int sent_desc)
1160{
1161 u32 val;
1162
1163 /* Only 255 TX descriptors can be updated at once */
1164 while (sent_desc > 0xff) {
1165 val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
1166 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1167 sent_desc = sent_desc - 0xff;
1168 }
1169
1170 val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
1171 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1172}
1173
1174/* Get number of TX descriptors already sent by HW */
1175static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1176 struct mvneta_tx_queue *txq)
1177{
1178 u32 val;
1179 int sent_desc;
1180
1181 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1182 sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
1183 MVNETA_TXQ_SENT_DESC_SHIFT;
1184
1185 return sent_desc;
1186}
1187
6a20c175 1188/* Get number of sent descriptors and decrement counter.
c5aff182
TP
1189 * The number of sent descriptors is returned.
1190 */
1191static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1192 struct mvneta_tx_queue *txq)
1193{
1194 int sent_desc;
1195
1196 /* Get number of sent descriptors */
1197 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1198
1199 /* Decrement sent descriptors counter */
1200 if (sent_desc)
1201 mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1202
1203 return sent_desc;
1204}
1205
1206/* Set TXQ descriptors fields relevant for CSUM calculation */
1207static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1208 int ip_hdr_len, int l4_proto)
1209{
1210 u32 command;
1211
1212 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
6a20c175
TP
1213 * G_L4_chk, L4_type; required only for checksum
1214 * calculation
1215 */
c5aff182
TP
1216 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
1217 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1218
1219 if (l3_proto == swab16(ETH_P_IP))
1220 command |= MVNETA_TXD_IP_CSUM;
1221 else
1222 command |= MVNETA_TX_L3_IP6;
1223
1224 if (l4_proto == IPPROTO_TCP)
1225 command |= MVNETA_TX_L4_CSUM_FULL;
1226 else if (l4_proto == IPPROTO_UDP)
1227 command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
1228 else
1229 command |= MVNETA_TX_L4_CSUM_NOT;
1230
1231 return command;
1232}
1233
1234
1235/* Display more error info */
1236static void mvneta_rx_error(struct mvneta_port *pp,
1237 struct mvneta_rx_desc *rx_desc)
1238{
1239 u32 status = rx_desc->status;
1240
5428213c 1241 if (!mvneta_rxq_desc_is_first_last(status)) {
c5aff182
TP
1242 netdev_err(pp->dev,
1243 "bad rx status %08x (buffer oversize), size=%d\n",
5428213c 1244 status, rx_desc->data_size);
c5aff182
TP
1245 return;
1246 }
1247
1248 switch (status & MVNETA_RXD_ERR_CODE_MASK) {
1249 case MVNETA_RXD_ERR_CRC:
1250 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1251 status, rx_desc->data_size);
1252 break;
1253 case MVNETA_RXD_ERR_OVERRUN:
1254 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1255 status, rx_desc->data_size);
1256 break;
1257 case MVNETA_RXD_ERR_LEN:
1258 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1259 status, rx_desc->data_size);
1260 break;
1261 case MVNETA_RXD_ERR_RESOURCE:
1262 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1263 status, rx_desc->data_size);
1264 break;
1265 }
1266}
1267
5428213c 1268/* Handle RX checksum offload based on the descriptor's status */
1269static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
c5aff182
TP
1270 struct sk_buff *skb)
1271{
5428213c 1272 if ((status & MVNETA_RXD_L3_IP4) &&
1273 (status & MVNETA_RXD_L4_CSUM_OK)) {
c5aff182
TP
1274 skb->csum = 0;
1275 skb->ip_summed = CHECKSUM_UNNECESSARY;
1276 return;
1277 }
1278
1279 skb->ip_summed = CHECKSUM_NONE;
1280}
1281
6c498974 1282/* Return tx queue pointer (find last set bit) according to <cause> returned
1283 * form tx_done reg. <cause> must not be null. The return value is always a
1284 * valid queue for matching the first one found in <cause>.
1285 */
c5aff182
TP
1286static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1287 u32 cause)
1288{
1289 int queue = fls(cause) - 1;
1290
6c498974 1291 return &pp->txqs[queue];
c5aff182
TP
1292}
1293
1294/* Free tx queue skbuffs */
1295static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1296 struct mvneta_tx_queue *txq, int num)
1297{
1298 int i;
1299
1300 for (i = 0; i < num; i++) {
1301 struct mvneta_tx_desc *tx_desc = txq->descs +
1302 txq->txq_get_index;
1303 struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
1304
1305 mvneta_txq_inc_get(txq);
1306
1307 if (!skb)
1308 continue;
1309
1310 dma_unmap_single(pp->dev->dev.parent, tx_desc->buf_phys_addr,
1311 tx_desc->data_size, DMA_TO_DEVICE);
1312 dev_kfree_skb_any(skb);
1313 }
1314}
1315
1316/* Handle end of transmission */
cd713199 1317static void mvneta_txq_done(struct mvneta_port *pp,
c5aff182
TP
1318 struct mvneta_tx_queue *txq)
1319{
1320 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1321 int tx_done;
1322
1323 tx_done = mvneta_txq_sent_desc_proc(pp, txq);
cd713199
AE
1324 if (!tx_done)
1325 return;
1326
c5aff182
TP
1327 mvneta_txq_bufs_free(pp, txq, tx_done);
1328
1329 txq->count -= tx_done;
1330
1331 if (netif_tx_queue_stopped(nq)) {
1332 if (txq->size - txq->count >= MAX_SKB_FRAGS + 1)
1333 netif_tx_wake_queue(nq);
1334 }
c5aff182
TP
1335}
1336
8ec2cd48 1337static void *mvneta_frag_alloc(const struct mvneta_port *pp)
1338{
1339 if (likely(pp->frag_size <= PAGE_SIZE))
1340 return netdev_alloc_frag(pp->frag_size);
1341 else
1342 return kmalloc(pp->frag_size, GFP_ATOMIC);
1343}
1344
1345static void mvneta_frag_free(const struct mvneta_port *pp, void *data)
1346{
1347 if (likely(pp->frag_size <= PAGE_SIZE))
1348 put_page(virt_to_head_page(data));
1349 else
1350 kfree(data);
1351}
1352
c5aff182
TP
1353/* Refill processing */
1354static int mvneta_rx_refill(struct mvneta_port *pp,
1355 struct mvneta_rx_desc *rx_desc)
1356
1357{
1358 dma_addr_t phys_addr;
8ec2cd48 1359 void *data;
c5aff182 1360
8ec2cd48 1361 data = mvneta_frag_alloc(pp);
1362 if (!data)
c5aff182
TP
1363 return -ENOMEM;
1364
8ec2cd48 1365 phys_addr = dma_map_single(pp->dev->dev.parent, data,
c5aff182
TP
1366 MVNETA_RX_BUF_SIZE(pp->pkt_size),
1367 DMA_FROM_DEVICE);
1368 if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
8ec2cd48 1369 mvneta_frag_free(pp, data);
c5aff182
TP
1370 return -ENOMEM;
1371 }
1372
8ec2cd48 1373 mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)data);
c5aff182
TP
1374 return 0;
1375}
1376
1377/* Handle tx checksum */
1378static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1379{
1380 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1381 int ip_hdr_len = 0;
1382 u8 l4_proto;
1383
1384 if (skb->protocol == htons(ETH_P_IP)) {
1385 struct iphdr *ip4h = ip_hdr(skb);
1386
1387 /* Calculate IPv4 checksum and L4 checksum */
1388 ip_hdr_len = ip4h->ihl;
1389 l4_proto = ip4h->protocol;
1390 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1391 struct ipv6hdr *ip6h = ipv6_hdr(skb);
1392
1393 /* Read l4_protocol from one of IPv6 extra headers */
1394 if (skb_network_header_len(skb) > 0)
1395 ip_hdr_len = (skb_network_header_len(skb) >> 2);
1396 l4_proto = ip6h->nexthdr;
1397 } else
1398 return MVNETA_TX_L4_CSUM_NOT;
1399
1400 return mvneta_txq_desc_csum(skb_network_offset(skb),
1401 skb->protocol, ip_hdr_len, l4_proto);
1402 }
1403
1404 return MVNETA_TX_L4_CSUM_NOT;
1405}
1406
6a20c175 1407/* Returns rx queue pointer (find last set bit) according to causeRxTx
c5aff182
TP
1408 * value
1409 */
1410static struct mvneta_rx_queue *mvneta_rx_policy(struct mvneta_port *pp,
1411 u32 cause)
1412{
1413 int queue = fls(cause >> 8) - 1;
1414
1415 return (queue < 0 || queue >= rxq_number) ? NULL : &pp->rxqs[queue];
1416}
1417
1418/* Drop packets received by the RXQ and free buffers */
1419static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1420 struct mvneta_rx_queue *rxq)
1421{
1422 int rx_done, i;
1423
1424 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1425 for (i = 0; i < rxq->size; i++) {
1426 struct mvneta_rx_desc *rx_desc = rxq->descs + i;
8ec2cd48 1427 void *data = (void *)rx_desc->buf_cookie;
c5aff182 1428
8ec2cd48 1429 mvneta_frag_free(pp, data);
c5aff182 1430 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
a328f3a0 1431 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
c5aff182
TP
1432 }
1433
1434 if (rx_done)
1435 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1436}
1437
1438/* Main rx processing */
1439static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1440 struct mvneta_rx_queue *rxq)
1441{
1442 struct net_device *dev = pp->dev;
1443 int rx_done, rx_filled;
dc4277dd 1444 u32 rcvd_pkts = 0;
1445 u32 rcvd_bytes = 0;
c5aff182
TP
1446
1447 /* Get number of received packets */
1448 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1449
1450 if (rx_todo > rx_done)
1451 rx_todo = rx_done;
1452
1453 rx_done = 0;
1454 rx_filled = 0;
1455
1456 /* Fairness NAPI loop */
1457 while (rx_done < rx_todo) {
1458 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
1459 struct sk_buff *skb;
8ec2cd48 1460 unsigned char *data;
c5aff182
TP
1461 u32 rx_status;
1462 int rx_bytes, err;
1463
c5aff182
TP
1464 rx_done++;
1465 rx_filled++;
1466 rx_status = rx_desc->status;
f19fadfc 1467 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
8ec2cd48 1468 data = (unsigned char *)rx_desc->buf_cookie;
c5aff182 1469
5428213c 1470 if (!mvneta_rxq_desc_is_first_last(rx_status) ||
f19fadfc 1471 (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
1472 err_drop_frame:
c5aff182
TP
1473 dev->stats.rx_errors++;
1474 mvneta_rx_error(pp, rx_desc);
8ec2cd48 1475 /* leave the descriptor untouched */
c5aff182
TP
1476 continue;
1477 }
1478
f19fadfc 1479 if (rx_bytes <= rx_copybreak) {
1480 /* better copy a small frame and not unmap the DMA region */
1481 skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
1482 if (unlikely(!skb))
1483 goto err_drop_frame;
1484
1485 dma_sync_single_range_for_cpu(dev->dev.parent,
1486 rx_desc->buf_phys_addr,
1487 MVNETA_MH_SIZE + NET_SKB_PAD,
1488 rx_bytes,
1489 DMA_FROM_DEVICE);
1490 memcpy(skb_put(skb, rx_bytes),
1491 data + MVNETA_MH_SIZE + NET_SKB_PAD,
1492 rx_bytes);
1493
1494 skb->protocol = eth_type_trans(skb, dev);
1495 mvneta_rx_csum(pp, rx_status, skb);
1496 napi_gro_receive(&pp->napi, skb);
1497
1498 rcvd_pkts++;
1499 rcvd_bytes += rx_bytes;
1500
1501 /* leave the descriptor and buffer untouched */
1502 continue;
1503 }
1504
1505 skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
1506 if (!skb)
1507 goto err_drop_frame;
1508
1509 dma_unmap_single(dev->dev.parent, rx_desc->buf_phys_addr,
a328f3a0 1510 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
c5aff182 1511
dc4277dd 1512 rcvd_pkts++;
1513 rcvd_bytes += rx_bytes;
c5aff182
TP
1514
1515 /* Linux processing */
8ec2cd48 1516 skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
c5aff182
TP
1517 skb_put(skb, rx_bytes);
1518
1519 skb->protocol = eth_type_trans(skb, dev);
1520
5428213c 1521 mvneta_rx_csum(pp, rx_status, skb);
c5aff182
TP
1522
1523 napi_gro_receive(&pp->napi, skb);
1524
1525 /* Refill processing */
1526 err = mvneta_rx_refill(pp, rx_desc);
1527 if (err) {
f19fadfc 1528 netdev_err(dev, "Linux processing - Can't refill\n");
c5aff182
TP
1529 rxq->missed++;
1530 rx_filled--;
1531 }
1532 }
1533
dc4277dd 1534 if (rcvd_pkts) {
74c41b04 1535 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1536
1537 u64_stats_update_begin(&stats->syncp);
1538 stats->rx_packets += rcvd_pkts;
1539 stats->rx_bytes += rcvd_bytes;
1540 u64_stats_update_end(&stats->syncp);
dc4277dd 1541 }
1542
c5aff182
TP
1543 /* Update rxq management counters */
1544 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled);
1545
1546 return rx_done;
1547}
1548
1549/* Handle tx fragmentation processing */
1550static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
1551 struct mvneta_tx_queue *txq)
1552{
1553 struct mvneta_tx_desc *tx_desc;
1554 int i;
1555
1556 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1557 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1558 void *addr = page_address(frag->page.p) + frag->page_offset;
1559
1560 tx_desc = mvneta_txq_next_desc_get(txq);
1561 tx_desc->data_size = frag->size;
1562
1563 tx_desc->buf_phys_addr =
1564 dma_map_single(pp->dev->dev.parent, addr,
1565 tx_desc->data_size, DMA_TO_DEVICE);
1566
1567 if (dma_mapping_error(pp->dev->dev.parent,
1568 tx_desc->buf_phys_addr)) {
1569 mvneta_txq_desc_put(txq);
1570 goto error;
1571 }
1572
1573 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
1574 /* Last descriptor */
1575 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
1576
1577 txq->tx_skb[txq->txq_put_index] = skb;
1578
1579 mvneta_txq_inc_put(txq);
1580 } else {
1581 /* Descriptor in the middle: Not First, Not Last */
1582 tx_desc->command = 0;
1583
1584 txq->tx_skb[txq->txq_put_index] = NULL;
1585 mvneta_txq_inc_put(txq);
1586 }
1587 }
1588
1589 return 0;
1590
1591error:
1592 /* Release all descriptors that were used to map fragments of
6a20c175
TP
1593 * this packet, as well as the corresponding DMA mappings
1594 */
c5aff182
TP
1595 for (i = i - 1; i >= 0; i--) {
1596 tx_desc = txq->descs + i;
1597 dma_unmap_single(pp->dev->dev.parent,
1598 tx_desc->buf_phys_addr,
1599 tx_desc->data_size,
1600 DMA_TO_DEVICE);
1601 mvneta_txq_desc_put(txq);
1602 }
1603
1604 return -ENOMEM;
1605}
1606
1607/* Main tx processing */
1608static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
1609{
1610 struct mvneta_port *pp = netdev_priv(dev);
ee40a116
WT
1611 u16 txq_id = skb_get_queue_mapping(skb);
1612 struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
c5aff182
TP
1613 struct mvneta_tx_desc *tx_desc;
1614 struct netdev_queue *nq;
1615 int frags = 0;
1616 u32 tx_cmd;
1617
1618 if (!netif_running(dev))
1619 goto out;
1620
1621 frags = skb_shinfo(skb)->nr_frags + 1;
ee40a116 1622 nq = netdev_get_tx_queue(dev, txq_id);
c5aff182
TP
1623
1624 /* Get a descriptor for the first part of the packet */
1625 tx_desc = mvneta_txq_next_desc_get(txq);
1626
1627 tx_cmd = mvneta_skb_tx_csum(pp, skb);
1628
1629 tx_desc->data_size = skb_headlen(skb);
1630
1631 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
1632 tx_desc->data_size,
1633 DMA_TO_DEVICE);
1634 if (unlikely(dma_mapping_error(dev->dev.parent,
1635 tx_desc->buf_phys_addr))) {
1636 mvneta_txq_desc_put(txq);
1637 frags = 0;
1638 goto out;
1639 }
1640
1641 if (frags == 1) {
1642 /* First and Last descriptor */
1643 tx_cmd |= MVNETA_TXD_FLZ_DESC;
1644 tx_desc->command = tx_cmd;
1645 txq->tx_skb[txq->txq_put_index] = skb;
1646 mvneta_txq_inc_put(txq);
1647 } else {
1648 /* First but not Last */
1649 tx_cmd |= MVNETA_TXD_F_DESC;
1650 txq->tx_skb[txq->txq_put_index] = NULL;
1651 mvneta_txq_inc_put(txq);
1652 tx_desc->command = tx_cmd;
1653 /* Continue with other skb fragments */
1654 if (mvneta_tx_frag_process(pp, skb, txq)) {
1655 dma_unmap_single(dev->dev.parent,
1656 tx_desc->buf_phys_addr,
1657 tx_desc->data_size,
1658 DMA_TO_DEVICE);
1659 mvneta_txq_desc_put(txq);
1660 frags = 0;
1661 goto out;
1662 }
1663 }
1664
1665 txq->count += frags;
1666 mvneta_txq_pend_desc_add(pp, txq, frags);
1667
1668 if (txq->size - txq->count < MAX_SKB_FRAGS + 1)
1669 netif_tx_stop_queue(nq);
1670
1671out:
1672 if (frags > 0) {
74c41b04 1673 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
c5aff182 1674
74c41b04 1675 u64_stats_update_begin(&stats->syncp);
1676 stats->tx_packets++;
1677 stats->tx_bytes += skb->len;
1678 u64_stats_update_end(&stats->syncp);
c5aff182
TP
1679 } else {
1680 dev->stats.tx_dropped++;
1681 dev_kfree_skb_any(skb);
1682 }
1683
c5aff182
TP
1684 return NETDEV_TX_OK;
1685}
1686
1687
1688/* Free tx resources, when resetting a port */
1689static void mvneta_txq_done_force(struct mvneta_port *pp,
1690 struct mvneta_tx_queue *txq)
1691
1692{
1693 int tx_done = txq->count;
1694
1695 mvneta_txq_bufs_free(pp, txq, tx_done);
1696
1697 /* reset txq */
1698 txq->count = 0;
1699 txq->txq_put_index = 0;
1700 txq->txq_get_index = 0;
1701}
1702
6c498974 1703/* Handle tx done - called in softirq context. The <cause_tx_done> argument
1704 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
1705 */
0713a86a 1706static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
c5aff182
TP
1707{
1708 struct mvneta_tx_queue *txq;
c5aff182
TP
1709 struct netdev_queue *nq;
1710
6c498974 1711 while (cause_tx_done) {
c5aff182 1712 txq = mvneta_tx_done_policy(pp, cause_tx_done);
c5aff182
TP
1713
1714 nq = netdev_get_tx_queue(pp->dev, txq->id);
1715 __netif_tx_lock(nq, smp_processor_id());
1716
0713a86a
AE
1717 if (txq->count)
1718 mvneta_txq_done(pp, txq);
c5aff182
TP
1719
1720 __netif_tx_unlock(nq);
1721 cause_tx_done &= ~((1 << txq->id));
1722 }
c5aff182
TP
1723}
1724
6a20c175 1725/* Compute crc8 of the specified address, using a unique algorithm ,
c5aff182
TP
1726 * according to hw spec, different than generic crc8 algorithm
1727 */
1728static int mvneta_addr_crc(unsigned char *addr)
1729{
1730 int crc = 0;
1731 int i;
1732
1733 for (i = 0; i < ETH_ALEN; i++) {
1734 int j;
1735
1736 crc = (crc ^ addr[i]) << 8;
1737 for (j = 7; j >= 0; j--) {
1738 if (crc & (0x100 << j))
1739 crc ^= 0x107 << j;
1740 }
1741 }
1742
1743 return crc;
1744}
1745
1746/* This method controls the net device special MAC multicast support.
1747 * The Special Multicast Table for MAC addresses supports MAC of the form
1748 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
1749 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
1750 * Table entries in the DA-Filter table. This method set the Special
1751 * Multicast Table appropriate entry.
1752 */
1753static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
1754 unsigned char last_byte,
1755 int queue)
1756{
1757 unsigned int smc_table_reg;
1758 unsigned int tbl_offset;
1759 unsigned int reg_offset;
1760
1761 /* Register offset from SMC table base */
1762 tbl_offset = (last_byte / 4);
1763 /* Entry offset within the above reg */
1764 reg_offset = last_byte % 4;
1765
1766 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
1767 + tbl_offset * 4));
1768
1769 if (queue == -1)
1770 smc_table_reg &= ~(0xff << (8 * reg_offset));
1771 else {
1772 smc_table_reg &= ~(0xff << (8 * reg_offset));
1773 smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1774 }
1775
1776 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
1777 smc_table_reg);
1778}
1779
1780/* This method controls the network device Other MAC multicast support.
1781 * The Other Multicast Table is used for multicast of another type.
1782 * A CRC-8 is used as an index to the Other Multicast Table entries
1783 * in the DA-Filter table.
1784 * The method gets the CRC-8 value from the calling routine and
1785 * sets the Other Multicast Table appropriate entry according to the
1786 * specified CRC-8 .
1787 */
1788static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
1789 unsigned char crc8,
1790 int queue)
1791{
1792 unsigned int omc_table_reg;
1793 unsigned int tbl_offset;
1794 unsigned int reg_offset;
1795
1796 tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
1797 reg_offset = crc8 % 4; /* Entry offset within the above reg */
1798
1799 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
1800
1801 if (queue == -1) {
1802 /* Clear accepts frame bit at specified Other DA table entry */
1803 omc_table_reg &= ~(0xff << (8 * reg_offset));
1804 } else {
1805 omc_table_reg &= ~(0xff << (8 * reg_offset));
1806 omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1807 }
1808
1809 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
1810}
1811
1812/* The network device supports multicast using two tables:
1813 * 1) Special Multicast Table for MAC addresses of the form
1814 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
1815 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
1816 * Table entries in the DA-Filter table.
1817 * 2) Other Multicast Table for multicast of another type. A CRC-8 value
1818 * is used as an index to the Other Multicast Table entries in the
1819 * DA-Filter table.
1820 */
1821static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
1822 int queue)
1823{
1824 unsigned char crc_result = 0;
1825
1826 if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
1827 mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
1828 return 0;
1829 }
1830
1831 crc_result = mvneta_addr_crc(p_addr);
1832 if (queue == -1) {
1833 if (pp->mcast_count[crc_result] == 0) {
1834 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
1835 crc_result);
1836 return -EINVAL;
1837 }
1838
1839 pp->mcast_count[crc_result]--;
1840 if (pp->mcast_count[crc_result] != 0) {
1841 netdev_info(pp->dev,
1842 "After delete there are %d valid Mcast for crc8=0x%02x\n",
1843 pp->mcast_count[crc_result], crc_result);
1844 return -EINVAL;
1845 }
1846 } else
1847 pp->mcast_count[crc_result]++;
1848
1849 mvneta_set_other_mcast_addr(pp, crc_result, queue);
1850
1851 return 0;
1852}
1853
1854/* Configure Fitering mode of Ethernet port */
1855static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
1856 int is_promisc)
1857{
1858 u32 port_cfg_reg, val;
1859
1860 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
1861
1862 val = mvreg_read(pp, MVNETA_TYPE_PRIO);
1863
1864 /* Set / Clear UPM bit in port configuration register */
1865 if (is_promisc) {
1866 /* Accept all Unicast addresses */
1867 port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
1868 val |= MVNETA_FORCE_UNI;
1869 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
1870 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
1871 } else {
1872 /* Reject all Unicast addresses */
1873 port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
1874 val &= ~MVNETA_FORCE_UNI;
1875 }
1876
1877 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
1878 mvreg_write(pp, MVNETA_TYPE_PRIO, val);
1879}
1880
1881/* register unicast and multicast addresses */
1882static void mvneta_set_rx_mode(struct net_device *dev)
1883{
1884 struct mvneta_port *pp = netdev_priv(dev);
1885 struct netdev_hw_addr *ha;
1886
1887 if (dev->flags & IFF_PROMISC) {
1888 /* Accept all: Multicast + Unicast */
1889 mvneta_rx_unicast_promisc_set(pp, 1);
1890 mvneta_set_ucast_table(pp, rxq_def);
1891 mvneta_set_special_mcast_table(pp, rxq_def);
1892 mvneta_set_other_mcast_table(pp, rxq_def);
1893 } else {
1894 /* Accept single Unicast */
1895 mvneta_rx_unicast_promisc_set(pp, 0);
1896 mvneta_set_ucast_table(pp, -1);
1897 mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
1898
1899 if (dev->flags & IFF_ALLMULTI) {
1900 /* Accept all multicast */
1901 mvneta_set_special_mcast_table(pp, rxq_def);
1902 mvneta_set_other_mcast_table(pp, rxq_def);
1903 } else {
1904 /* Accept only initialized multicast */
1905 mvneta_set_special_mcast_table(pp, -1);
1906 mvneta_set_other_mcast_table(pp, -1);
1907
1908 if (!netdev_mc_empty(dev)) {
1909 netdev_for_each_mc_addr(ha, dev) {
1910 mvneta_mcast_addr_set(pp, ha->addr,
1911 rxq_def);
1912 }
1913 }
1914 }
1915 }
1916}
1917
1918/* Interrupt handling - the callback for request_irq() */
1919static irqreturn_t mvneta_isr(int irq, void *dev_id)
1920{
1921 struct mvneta_port *pp = (struct mvneta_port *)dev_id;
1922
1923 /* Mask all interrupts */
1924 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
1925
1926 napi_schedule(&pp->napi);
1927
1928 return IRQ_HANDLED;
1929}
1930
1931/* NAPI handler
1932 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
1933 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
1934 * Bits 8 -15 of the cause Rx Tx register indicate that are received
1935 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
1936 * Each CPU has its own causeRxTx register
1937 */
1938static int mvneta_poll(struct napi_struct *napi, int budget)
1939{
1940 int rx_done = 0;
1941 u32 cause_rx_tx;
1942 unsigned long flags;
1943 struct mvneta_port *pp = netdev_priv(napi->dev);
1944
1945 if (!netif_running(pp->dev)) {
1946 napi_complete(napi);
1947 return rx_done;
1948 }
1949
1950 /* Read cause register */
1951 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) &
71f6d1b3 1952 (MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
1953
1954 /* Release Tx descriptors */
1955 if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
0713a86a 1956 mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
71f6d1b3 1957 cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
1958 }
c5aff182 1959
6a20c175 1960 /* For the case where the last mvneta_poll did not process all
c5aff182
TP
1961 * RX packets
1962 */
1963 cause_rx_tx |= pp->cause_rx_tx;
1964 if (rxq_number > 1) {
71f6d1b3 1965 while ((cause_rx_tx & MVNETA_RX_INTR_MASK_ALL) && (budget > 0)) {
c5aff182
TP
1966 int count;
1967 struct mvneta_rx_queue *rxq;
1968 /* get rx queue number from cause_rx_tx */
1969 rxq = mvneta_rx_policy(pp, cause_rx_tx);
1970 if (!rxq)
1971 break;
1972
1973 /* process the packet in that rx queue */
1974 count = mvneta_rx(pp, budget, rxq);
1975 rx_done += count;
1976 budget -= count;
1977 if (budget > 0) {
6a20c175
TP
1978 /* set off the rx bit of the
1979 * corresponding bit in the cause rx
1980 * tx register, so that next iteration
1981 * will find the next rx queue where
1982 * packets are received on
1983 */
c5aff182
TP
1984 cause_rx_tx &= ~((1 << rxq->id) << 8);
1985 }
1986 }
1987 } else {
1988 rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]);
1989 budget -= rx_done;
1990 }
1991
1992 if (budget > 0) {
1993 cause_rx_tx = 0;
1994 napi_complete(napi);
1995 local_irq_save(flags);
1996 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
71f6d1b3 1997 MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
c5aff182
TP
1998 local_irq_restore(flags);
1999 }
2000
2001 pp->cause_rx_tx = cause_rx_tx;
2002 return rx_done;
2003}
2004
c5aff182
TP
2005/* Handle rxq fill: allocates rxq skbs; called when initializing a port */
2006static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2007 int num)
2008{
c5aff182
TP
2009 int i;
2010
2011 for (i = 0; i < num; i++) {
a1a65ab1 2012 memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
2013 if (mvneta_rx_refill(pp, rxq->descs + i) != 0) {
2014 netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs filled\n",
c5aff182
TP
2015 __func__, rxq->id, i, num);
2016 break;
2017 }
c5aff182
TP
2018 }
2019
2020 /* Add this number of RX descriptors as non occupied (ready to
6a20c175
TP
2021 * get packets)
2022 */
c5aff182
TP
2023 mvneta_rxq_non_occup_desc_add(pp, rxq, i);
2024
2025 return i;
2026}
2027
2028/* Free all packets pending transmit from all TXQs and reset TX port */
2029static void mvneta_tx_reset(struct mvneta_port *pp)
2030{
2031 int queue;
2032
2033 /* free the skb's in the hal tx ring */
2034 for (queue = 0; queue < txq_number; queue++)
2035 mvneta_txq_done_force(pp, &pp->txqs[queue]);
2036
2037 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
2038 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
2039}
2040
2041static void mvneta_rx_reset(struct mvneta_port *pp)
2042{
2043 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
2044 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
2045}
2046
2047/* Rx/Tx queue initialization/cleanup methods */
2048
2049/* Create a specified RX queue */
2050static int mvneta_rxq_init(struct mvneta_port *pp,
2051 struct mvneta_rx_queue *rxq)
2052
2053{
2054 rxq->size = pp->rx_ring_size;
2055
2056 /* Allocate memory for RX descriptors */
2057 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2058 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2059 &rxq->descs_phys, GFP_KERNEL);
d0320f75 2060 if (rxq->descs == NULL)
c5aff182 2061 return -ENOMEM;
c5aff182
TP
2062
2063 BUG_ON(rxq->descs !=
2064 PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2065
2066 rxq->last_desc = rxq->size - 1;
2067
2068 /* Set Rx descriptors queue starting address */
2069 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
2070 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
2071
2072 /* Set Offset */
2073 mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD);
2074
2075 /* Set coalescing pkts and time */
2076 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2077 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2078
2079 /* Fill RXQ with buffers from RX pool */
2080 mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size));
2081 mvneta_rxq_bm_disable(pp, rxq);
2082 mvneta_rxq_fill(pp, rxq, rxq->size);
2083
2084 return 0;
2085}
2086
2087/* Cleanup Rx queue */
2088static void mvneta_rxq_deinit(struct mvneta_port *pp,
2089 struct mvneta_rx_queue *rxq)
2090{
2091 mvneta_rxq_drop_pkts(pp, rxq);
2092
2093 if (rxq->descs)
2094 dma_free_coherent(pp->dev->dev.parent,
2095 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2096 rxq->descs,
2097 rxq->descs_phys);
2098
2099 rxq->descs = NULL;
2100 rxq->last_desc = 0;
2101 rxq->next_desc_to_proc = 0;
2102 rxq->descs_phys = 0;
2103}
2104
2105/* Create and initialize a tx queue */
2106static int mvneta_txq_init(struct mvneta_port *pp,
2107 struct mvneta_tx_queue *txq)
2108{
2109 txq->size = pp->tx_ring_size;
2110
2111 /* Allocate memory for TX descriptors */
2112 txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2113 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2114 &txq->descs_phys, GFP_KERNEL);
d0320f75 2115 if (txq->descs == NULL)
c5aff182 2116 return -ENOMEM;
c5aff182
TP
2117
2118 /* Make sure descriptor address is cache line size aligned */
2119 BUG_ON(txq->descs !=
2120 PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2121
2122 txq->last_desc = txq->size - 1;
2123
2124 /* Set maximum bandwidth for enabled TXQs */
2125 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
2126 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
2127
2128 /* Set Tx descriptors queue starting address */
2129 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
2130 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
2131
2132 txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL);
2133 if (txq->tx_skb == NULL) {
2134 dma_free_coherent(pp->dev->dev.parent,
2135 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2136 txq->descs, txq->descs_phys);
2137 return -ENOMEM;
2138 }
2139 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2140
2141 return 0;
2142}
2143
2144/* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
2145static void mvneta_txq_deinit(struct mvneta_port *pp,
2146 struct mvneta_tx_queue *txq)
2147{
2148 kfree(txq->tx_skb);
2149
2150 if (txq->descs)
2151 dma_free_coherent(pp->dev->dev.parent,
2152 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2153 txq->descs, txq->descs_phys);
2154
2155 txq->descs = NULL;
2156 txq->last_desc = 0;
2157 txq->next_desc_to_proc = 0;
2158 txq->descs_phys = 0;
2159
2160 /* Set minimum bandwidth for disabled TXQs */
2161 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
2162 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
2163
2164 /* Set Tx descriptors queue starting address and size */
2165 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
2166 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
2167}
2168
2169/* Cleanup all Tx queues */
2170static void mvneta_cleanup_txqs(struct mvneta_port *pp)
2171{
2172 int queue;
2173
2174 for (queue = 0; queue < txq_number; queue++)
2175 mvneta_txq_deinit(pp, &pp->txqs[queue]);
2176}
2177
2178/* Cleanup all Rx queues */
2179static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
2180{
2181 int queue;
2182
2183 for (queue = 0; queue < rxq_number; queue++)
2184 mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
2185}
2186
2187
2188/* Init all Rx queues */
2189static int mvneta_setup_rxqs(struct mvneta_port *pp)
2190{
2191 int queue;
2192
2193 for (queue = 0; queue < rxq_number; queue++) {
2194 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
2195 if (err) {
2196 netdev_err(pp->dev, "%s: can't create rxq=%d\n",
2197 __func__, queue);
2198 mvneta_cleanup_rxqs(pp);
2199 return err;
2200 }
2201 }
2202
2203 return 0;
2204}
2205
2206/* Init all tx queues */
2207static int mvneta_setup_txqs(struct mvneta_port *pp)
2208{
2209 int queue;
2210
2211 for (queue = 0; queue < txq_number; queue++) {
2212 int err = mvneta_txq_init(pp, &pp->txqs[queue]);
2213 if (err) {
2214 netdev_err(pp->dev, "%s: can't create txq=%d\n",
2215 __func__, queue);
2216 mvneta_cleanup_txqs(pp);
2217 return err;
2218 }
2219 }
2220
2221 return 0;
2222}
2223
2224static void mvneta_start_dev(struct mvneta_port *pp)
2225{
2226 mvneta_max_rx_size_set(pp, pp->pkt_size);
2227 mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
2228
2229 /* start the Rx/Tx activity */
2230 mvneta_port_enable(pp);
2231
2232 /* Enable polling on the port */
2233 napi_enable(&pp->napi);
2234
2235 /* Unmask interrupts */
2236 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
71f6d1b3 2237 MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
c5aff182
TP
2238
2239 phy_start(pp->phy_dev);
2240 netif_tx_start_all_queues(pp->dev);
2241}
2242
2243static void mvneta_stop_dev(struct mvneta_port *pp)
2244{
2245 phy_stop(pp->phy_dev);
2246
2247 napi_disable(&pp->napi);
2248
2249 netif_carrier_off(pp->dev);
2250
2251 mvneta_port_down(pp);
2252 netif_tx_stop_all_queues(pp->dev);
2253
2254 /* Stop the port activity */
2255 mvneta_port_disable(pp);
2256
2257 /* Clear all ethernet port interrupts */
2258 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
2259 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
2260
2261 /* Mask all ethernet port interrupts */
2262 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2263 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2264 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2265
2266 mvneta_tx_reset(pp);
2267 mvneta_rx_reset(pp);
2268}
2269
c5aff182
TP
2270/* Return positive if MTU is valid */
2271static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
2272{
2273 if (mtu < 68) {
2274 netdev_err(dev, "cannot change mtu to less than 68\n");
2275 return -EINVAL;
2276 }
2277
6a20c175 2278 /* 9676 == 9700 - 20 and rounding to 8 */
c5aff182
TP
2279 if (mtu > 9676) {
2280 netdev_info(dev, "Illegal MTU value %d, round to 9676\n", mtu);
2281 mtu = 9676;
2282 }
2283
2284 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
2285 netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
2286 mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
2287 mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
2288 }
2289
2290 return mtu;
2291}
2292
2293/* Change the device mtu */
2294static int mvneta_change_mtu(struct net_device *dev, int mtu)
2295{
2296 struct mvneta_port *pp = netdev_priv(dev);
2297 int ret;
2298
2299 mtu = mvneta_check_mtu_valid(dev, mtu);
2300 if (mtu < 0)
2301 return -EINVAL;
2302
2303 dev->mtu = mtu;
2304
2305 if (!netif_running(dev))
2306 return 0;
2307
6a20c175 2308 /* The interface is running, so we have to force a
c5aff182
TP
2309 * reallocation of the RXQs
2310 */
2311 mvneta_stop_dev(pp);
2312
2313 mvneta_cleanup_txqs(pp);
2314 mvneta_cleanup_rxqs(pp);
2315
2316 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
8ec2cd48 2317 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
2318 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
c5aff182
TP
2319
2320 ret = mvneta_setup_rxqs(pp);
2321 if (ret) {
2322 netdev_err(pp->dev, "unable to setup rxqs after MTU change\n");
2323 return ret;
2324 }
2325
2326 mvneta_setup_txqs(pp);
2327
2328 mvneta_start_dev(pp);
2329 mvneta_port_up(pp);
2330
2331 return 0;
2332}
2333
8cc3e439
TP
2334/* Get mac address */
2335static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
2336{
2337 u32 mac_addr_l, mac_addr_h;
2338
2339 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
2340 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
2341 addr[0] = (mac_addr_h >> 24) & 0xFF;
2342 addr[1] = (mac_addr_h >> 16) & 0xFF;
2343 addr[2] = (mac_addr_h >> 8) & 0xFF;
2344 addr[3] = mac_addr_h & 0xFF;
2345 addr[4] = (mac_addr_l >> 8) & 0xFF;
2346 addr[5] = mac_addr_l & 0xFF;
2347}
2348
c5aff182
TP
2349/* Handle setting mac address */
2350static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
2351{
2352 struct mvneta_port *pp = netdev_priv(dev);
2353 u8 *mac = addr + 2;
2354 int i;
2355
2356 if (netif_running(dev))
2357 return -EBUSY;
2358
2359 /* Remove previous address table entry */
2360 mvneta_mac_addr_set(pp, dev->dev_addr, -1);
2361
2362 /* Set new addr in hw */
2363 mvneta_mac_addr_set(pp, mac, rxq_def);
2364
2365 /* Set addr in the device */
2366 for (i = 0; i < ETH_ALEN; i++)
2367 dev->dev_addr[i] = mac[i];
2368
2369 return 0;
2370}
2371
2372static void mvneta_adjust_link(struct net_device *ndev)
2373{
2374 struct mvneta_port *pp = netdev_priv(ndev);
2375 struct phy_device *phydev = pp->phy_dev;
2376 int status_change = 0;
2377
2378 if (phydev->link) {
2379 if ((pp->speed != phydev->speed) ||
2380 (pp->duplex != phydev->duplex)) {
2381 u32 val;
2382
2383 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
2384 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
2385 MVNETA_GMAC_CONFIG_GMII_SPEED |
71408602
TP
2386 MVNETA_GMAC_CONFIG_FULL_DUPLEX |
2387 MVNETA_GMAC_AN_SPEED_EN |
2388 MVNETA_GMAC_AN_DUPLEX_EN);
c5aff182
TP
2389
2390 if (phydev->duplex)
2391 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
2392
2393 if (phydev->speed == SPEED_1000)
2394 val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
2395 else
2396 val |= MVNETA_GMAC_CONFIG_MII_SPEED;
2397
2398 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
2399
2400 pp->duplex = phydev->duplex;
2401 pp->speed = phydev->speed;
2402 }
2403 }
2404
2405 if (phydev->link != pp->link) {
2406 if (!phydev->link) {
2407 pp->duplex = -1;
2408 pp->speed = 0;
2409 }
2410
2411 pp->link = phydev->link;
2412 status_change = 1;
2413 }
2414
2415 if (status_change) {
2416 if (phydev->link) {
2417 u32 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
2418 val |= (MVNETA_GMAC_FORCE_LINK_PASS |
2419 MVNETA_GMAC_FORCE_LINK_DOWN);
2420 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
2421 mvneta_port_up(pp);
2422 netdev_info(pp->dev, "link up\n");
2423 } else {
2424 mvneta_port_down(pp);
2425 netdev_info(pp->dev, "link down\n");
2426 }
2427 }
2428}
2429
2430static int mvneta_mdio_probe(struct mvneta_port *pp)
2431{
2432 struct phy_device *phy_dev;
2433
2434 phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
2435 pp->phy_interface);
2436 if (!phy_dev) {
2437 netdev_err(pp->dev, "could not find the PHY\n");
2438 return -ENODEV;
2439 }
2440
2441 phy_dev->supported &= PHY_GBIT_FEATURES;
2442 phy_dev->advertising = phy_dev->supported;
2443
2444 pp->phy_dev = phy_dev;
2445 pp->link = 0;
2446 pp->duplex = 0;
2447 pp->speed = 0;
2448
2449 return 0;
2450}
2451
2452static void mvneta_mdio_remove(struct mvneta_port *pp)
2453{
2454 phy_disconnect(pp->phy_dev);
2455 pp->phy_dev = NULL;
2456}
2457
2458static int mvneta_open(struct net_device *dev)
2459{
2460 struct mvneta_port *pp = netdev_priv(dev);
2461 int ret;
2462
2463 mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
2464
2465 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
8ec2cd48 2466 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
2467 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
c5aff182
TP
2468
2469 ret = mvneta_setup_rxqs(pp);
2470 if (ret)
2471 return ret;
2472
2473 ret = mvneta_setup_txqs(pp);
2474 if (ret)
2475 goto err_cleanup_rxqs;
2476
2477 /* Connect to port interrupt line */
2478 ret = request_irq(pp->dev->irq, mvneta_isr, 0,
2479 MVNETA_DRIVER_NAME, pp);
2480 if (ret) {
2481 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
2482 goto err_cleanup_txqs;
2483 }
2484
2485 /* In default link is down */
2486 netif_carrier_off(pp->dev);
2487
2488 ret = mvneta_mdio_probe(pp);
2489 if (ret < 0) {
2490 netdev_err(dev, "cannot probe MDIO bus\n");
2491 goto err_free_irq;
2492 }
2493
2494 mvneta_start_dev(pp);
2495
2496 return 0;
2497
2498err_free_irq:
2499 free_irq(pp->dev->irq, pp);
2500err_cleanup_txqs:
2501 mvneta_cleanup_txqs(pp);
2502err_cleanup_rxqs:
2503 mvneta_cleanup_rxqs(pp);
2504 return ret;
2505}
2506
2507/* Stop the port, free port interrupt line */
2508static int mvneta_stop(struct net_device *dev)
2509{
2510 struct mvneta_port *pp = netdev_priv(dev);
2511
2512 mvneta_stop_dev(pp);
2513 mvneta_mdio_remove(pp);
2514 free_irq(dev->irq, pp);
2515 mvneta_cleanup_rxqs(pp);
2516 mvneta_cleanup_txqs(pp);
c5aff182
TP
2517
2518 return 0;
2519}
2520
15f59456
TP
2521static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2522{
2523 struct mvneta_port *pp = netdev_priv(dev);
2524 int ret;
2525
2526 if (!pp->phy_dev)
2527 return -ENOTSUPP;
2528
2529 ret = phy_mii_ioctl(pp->phy_dev, ifr, cmd);
2530 if (!ret)
2531 mvneta_adjust_link(dev);
2532
2533 return ret;
2534}
2535
c5aff182
TP
2536/* Ethtool methods */
2537
2538/* Get settings (phy address, speed) for ethtools */
2539int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2540{
2541 struct mvneta_port *pp = netdev_priv(dev);
2542
2543 if (!pp->phy_dev)
2544 return -ENODEV;
2545
2546 return phy_ethtool_gset(pp->phy_dev, cmd);
2547}
2548
2549/* Set settings (phy address, speed) for ethtools */
2550int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2551{
2552 struct mvneta_port *pp = netdev_priv(dev);
2553
2554 if (!pp->phy_dev)
2555 return -ENODEV;
2556
2557 return phy_ethtool_sset(pp->phy_dev, cmd);
2558}
2559
2560/* Set interrupt coalescing for ethtools */
2561static int mvneta_ethtool_set_coalesce(struct net_device *dev,
2562 struct ethtool_coalesce *c)
2563{
2564 struct mvneta_port *pp = netdev_priv(dev);
2565 int queue;
2566
2567 for (queue = 0; queue < rxq_number; queue++) {
2568 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
2569 rxq->time_coal = c->rx_coalesce_usecs;
2570 rxq->pkts_coal = c->rx_max_coalesced_frames;
2571 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2572 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2573 }
2574
2575 for (queue = 0; queue < txq_number; queue++) {
2576 struct mvneta_tx_queue *txq = &pp->txqs[queue];
2577 txq->done_pkts_coal = c->tx_max_coalesced_frames;
2578 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2579 }
2580
2581 return 0;
2582}
2583
2584/* get coalescing for ethtools */
2585static int mvneta_ethtool_get_coalesce(struct net_device *dev,
2586 struct ethtool_coalesce *c)
2587{
2588 struct mvneta_port *pp = netdev_priv(dev);
2589
2590 c->rx_coalesce_usecs = pp->rxqs[0].time_coal;
2591 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal;
2592
2593 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal;
2594 return 0;
2595}
2596
2597
2598static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
2599 struct ethtool_drvinfo *drvinfo)
2600{
2601 strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
2602 sizeof(drvinfo->driver));
2603 strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
2604 sizeof(drvinfo->version));
2605 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
2606 sizeof(drvinfo->bus_info));
2607}
2608
2609
2610static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
2611 struct ethtool_ringparam *ring)
2612{
2613 struct mvneta_port *pp = netdev_priv(netdev);
2614
2615 ring->rx_max_pending = MVNETA_MAX_RXD;
2616 ring->tx_max_pending = MVNETA_MAX_TXD;
2617 ring->rx_pending = pp->rx_ring_size;
2618 ring->tx_pending = pp->tx_ring_size;
2619}
2620
2621static int mvneta_ethtool_set_ringparam(struct net_device *dev,
2622 struct ethtool_ringparam *ring)
2623{
2624 struct mvneta_port *pp = netdev_priv(dev);
2625
2626 if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
2627 return -EINVAL;
2628 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
2629 ring->rx_pending : MVNETA_MAX_RXD;
2630 pp->tx_ring_size = ring->tx_pending < MVNETA_MAX_TXD ?
2631 ring->tx_pending : MVNETA_MAX_TXD;
2632
2633 if (netif_running(dev)) {
2634 mvneta_stop(dev);
2635 if (mvneta_open(dev)) {
2636 netdev_err(dev,
2637 "error on opening device after ring param change\n");
2638 return -ENOMEM;
2639 }
2640 }
2641
2642 return 0;
2643}
2644
2645static const struct net_device_ops mvneta_netdev_ops = {
2646 .ndo_open = mvneta_open,
2647 .ndo_stop = mvneta_stop,
2648 .ndo_start_xmit = mvneta_tx,
2649 .ndo_set_rx_mode = mvneta_set_rx_mode,
2650 .ndo_set_mac_address = mvneta_set_mac_addr,
2651 .ndo_change_mtu = mvneta_change_mtu,
c5aff182 2652 .ndo_get_stats64 = mvneta_get_stats64,
15f59456 2653 .ndo_do_ioctl = mvneta_ioctl,
c5aff182
TP
2654};
2655
2656const struct ethtool_ops mvneta_eth_tool_ops = {
2657 .get_link = ethtool_op_get_link,
2658 .get_settings = mvneta_ethtool_get_settings,
2659 .set_settings = mvneta_ethtool_set_settings,
2660 .set_coalesce = mvneta_ethtool_set_coalesce,
2661 .get_coalesce = mvneta_ethtool_get_coalesce,
2662 .get_drvinfo = mvneta_ethtool_get_drvinfo,
2663 .get_ringparam = mvneta_ethtool_get_ringparam,
2664 .set_ringparam = mvneta_ethtool_set_ringparam,
2665};
2666
2667/* Initialize hw */
03ce758e 2668static int mvneta_init(struct mvneta_port *pp, int phy_addr)
c5aff182
TP
2669{
2670 int queue;
2671
2672 /* Disable port */
2673 mvneta_port_disable(pp);
2674
2675 /* Set port default values */
2676 mvneta_defaults_set(pp);
2677
2678 pp->txqs = kzalloc(txq_number * sizeof(struct mvneta_tx_queue),
2679 GFP_KERNEL);
2680 if (!pp->txqs)
2681 return -ENOMEM;
2682
2683 /* Initialize TX descriptor rings */
2684 for (queue = 0; queue < txq_number; queue++) {
2685 struct mvneta_tx_queue *txq = &pp->txqs[queue];
2686 txq->id = queue;
2687 txq->size = pp->tx_ring_size;
2688 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
2689 }
2690
2691 pp->rxqs = kzalloc(rxq_number * sizeof(struct mvneta_rx_queue),
2692 GFP_KERNEL);
2693 if (!pp->rxqs) {
2694 kfree(pp->txqs);
2695 return -ENOMEM;
2696 }
2697
2698 /* Create Rx descriptor rings */
2699 for (queue = 0; queue < rxq_number; queue++) {
2700 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
2701 rxq->id = queue;
2702 rxq->size = pp->rx_ring_size;
2703 rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
2704 rxq->time_coal = MVNETA_RX_COAL_USEC;
2705 }
2706
2707 return 0;
2708}
2709
70eeaf98 2710static void mvneta_deinit(struct mvneta_port *pp)
c5aff182
TP
2711{
2712 kfree(pp->txqs);
2713 kfree(pp->rxqs);
2714}
2715
2716/* platform glue : initialize decoding windows */
03ce758e
GK
2717static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
2718 const struct mbus_dram_target_info *dram)
c5aff182
TP
2719{
2720 u32 win_enable;
2721 u32 win_protect;
2722 int i;
2723
2724 for (i = 0; i < 6; i++) {
2725 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
2726 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
2727
2728 if (i < 4)
2729 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
2730 }
2731
2732 win_enable = 0x3f;
2733 win_protect = 0;
2734
2735 for (i = 0; i < dram->num_cs; i++) {
2736 const struct mbus_dram_window *cs = dram->cs + i;
2737 mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) |
2738 (cs->mbus_attr << 8) | dram->mbus_dram_target_id);
2739
2740 mvreg_write(pp, MVNETA_WIN_SIZE(i),
2741 (cs->size - 1) & 0xffff0000);
2742
2743 win_enable &= ~(1 << i);
2744 win_protect |= 3 << (2 * i);
2745 }
2746
2747 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
2748}
2749
2750/* Power up the port */
03ce758e 2751static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
c5aff182
TP
2752{
2753 u32 val;
2754
2755 /* MAC Cause register should be cleared */
2756 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
2757
2758 if (phy_mode == PHY_INTERFACE_MODE_SGMII)
2759 mvneta_port_sgmii_config(pp);
2760
2761 mvneta_gmac_rgmii_set(pp, 1);
2762
2763 /* Cancel Port Reset */
2764 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
2765 val &= ~MVNETA_GMAC2_PORT_RESET;
2766 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
2767
2768 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
2769 MVNETA_GMAC2_PORT_RESET) != 0)
2770 continue;
2771}
2772
2773/* Device initialization routine */
03ce758e 2774static int mvneta_probe(struct platform_device *pdev)
c5aff182
TP
2775{
2776 const struct mbus_dram_target_info *dram_target_info;
2777 struct device_node *dn = pdev->dev.of_node;
2778 struct device_node *phy_node;
189dd626 2779 u32 phy_addr;
c5aff182
TP
2780 struct mvneta_port *pp;
2781 struct net_device *dev;
8cc3e439
TP
2782 const char *dt_mac_addr;
2783 char hw_mac_addr[ETH_ALEN];
2784 const char *mac_from;
c5aff182
TP
2785 int phy_mode;
2786 int err;
2787
6a20c175 2788 /* Our multiqueue support is not complete, so for now, only
c5aff182
TP
2789 * allow the usage of the first RX queue
2790 */
2791 if (rxq_def != 0) {
2792 dev_err(&pdev->dev, "Invalid rxq_def argument: %d\n", rxq_def);
2793 return -EINVAL;
2794 }
2795
ee40a116 2796 dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
c5aff182
TP
2797 if (!dev)
2798 return -ENOMEM;
2799
2800 dev->irq = irq_of_parse_and_map(dn, 0);
2801 if (dev->irq == 0) {
2802 err = -EINVAL;
2803 goto err_free_netdev;
2804 }
2805
2806 phy_node = of_parse_phandle(dn, "phy", 0);
2807 if (!phy_node) {
2808 dev_err(&pdev->dev, "no associated PHY\n");
2809 err = -ENODEV;
2810 goto err_free_irq;
2811 }
2812
2813 phy_mode = of_get_phy_mode(dn);
2814 if (phy_mode < 0) {
2815 dev_err(&pdev->dev, "incorrect phy-mode\n");
2816 err = -EINVAL;
2817 goto err_free_irq;
2818 }
2819
c5aff182
TP
2820 dev->tx_queue_len = MVNETA_MAX_TXD;
2821 dev->watchdog_timeo = 5 * HZ;
2822 dev->netdev_ops = &mvneta_netdev_ops;
2823
2824 SET_ETHTOOL_OPS(dev, &mvneta_eth_tool_ops);
2825
2826 pp = netdev_priv(dev);
2827
c5aff182 2828 pp->weight = MVNETA_RX_POLL_WEIGHT;
c5aff182
TP
2829 pp->phy_node = phy_node;
2830 pp->phy_interface = phy_mode;
2831
189dd626
TP
2832 pp->clk = devm_clk_get(&pdev->dev, NULL);
2833 if (IS_ERR(pp->clk)) {
2834 err = PTR_ERR(pp->clk);
5445eaf3 2835 goto err_free_irq;
189dd626
TP
2836 }
2837
2838 clk_prepare_enable(pp->clk);
2839
5445eaf3
APR
2840 pp->base = of_iomap(dn, 0);
2841 if (pp->base == NULL) {
2842 err = -ENOMEM;
2843 goto err_clk;
2844 }
2845
74c41b04 2846 /* Alloc per-cpu stats */
1c213bd2 2847 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
74c41b04 2848 if (!pp->stats) {
2849 err = -ENOMEM;
2850 goto err_unmap;
2851 }
2852
8cc3e439 2853 dt_mac_addr = of_get_mac_address(dn);
6c7a9a3c 2854 if (dt_mac_addr) {
8cc3e439
TP
2855 mac_from = "device tree";
2856 memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
2857 } else {
2858 mvneta_get_mac_addr(pp, hw_mac_addr);
2859 if (is_valid_ether_addr(hw_mac_addr)) {
2860 mac_from = "hardware";
2861 memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
2862 } else {
2863 mac_from = "random";
2864 eth_hw_addr_random(dev);
2865 }
2866 }
2867
c5aff182
TP
2868 pp->tx_ring_size = MVNETA_MAX_TXD;
2869 pp->rx_ring_size = MVNETA_MAX_RXD;
2870
2871 pp->dev = dev;
2872 SET_NETDEV_DEV(dev, &pdev->dev);
2873
2874 err = mvneta_init(pp, phy_addr);
2875 if (err < 0) {
2876 dev_err(&pdev->dev, "can't init eth hal\n");
74c41b04 2877 goto err_free_stats;
c5aff182
TP
2878 }
2879 mvneta_port_power_up(pp, phy_mode);
2880
2881 dram_target_info = mv_mbus_dram_info();
2882 if (dram_target_info)
2883 mvneta_conf_mbus_windows(pp, dram_target_info);
2884
2885 netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight);
2886
b50b72de 2887 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
2888 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2889 dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2890 dev->priv_flags |= IFF_UNICAST_FLT;
2891
c5aff182
TP
2892 err = register_netdev(dev);
2893 if (err < 0) {
2894 dev_err(&pdev->dev, "failed to register\n");
2895 goto err_deinit;
2896 }
2897
8cc3e439
TP
2898 netdev_info(dev, "Using %s mac address %pM\n", mac_from,
2899 dev->dev_addr);
c5aff182
TP
2900
2901 platform_set_drvdata(pdev, pp->dev);
2902
2903 return 0;
2904
2905err_deinit:
2906 mvneta_deinit(pp);
74c41b04 2907err_free_stats:
2908 free_percpu(pp->stats);
c5aff182
TP
2909err_unmap:
2910 iounmap(pp->base);
5445eaf3
APR
2911err_clk:
2912 clk_disable_unprepare(pp->clk);
c5aff182
TP
2913err_free_irq:
2914 irq_dispose_mapping(dev->irq);
2915err_free_netdev:
2916 free_netdev(dev);
2917 return err;
2918}
2919
2920/* Device removal routine */
03ce758e 2921static int mvneta_remove(struct platform_device *pdev)
c5aff182
TP
2922{
2923 struct net_device *dev = platform_get_drvdata(pdev);
2924 struct mvneta_port *pp = netdev_priv(dev);
2925
2926 unregister_netdev(dev);
2927 mvneta_deinit(pp);
189dd626 2928 clk_disable_unprepare(pp->clk);
74c41b04 2929 free_percpu(pp->stats);
c5aff182
TP
2930 iounmap(pp->base);
2931 irq_dispose_mapping(dev->irq);
2932 free_netdev(dev);
2933
c5aff182
TP
2934 return 0;
2935}
2936
2937static const struct of_device_id mvneta_match[] = {
2938 { .compatible = "marvell,armada-370-neta" },
2939 { }
2940};
2941MODULE_DEVICE_TABLE(of, mvneta_match);
2942
2943static struct platform_driver mvneta_driver = {
2944 .probe = mvneta_probe,
03ce758e 2945 .remove = mvneta_remove,
c5aff182
TP
2946 .driver = {
2947 .name = MVNETA_DRIVER_NAME,
2948 .of_match_table = mvneta_match,
2949 },
2950};
2951
2952module_platform_driver(mvneta_driver);
2953
2954MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
2955MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
2956MODULE_LICENSE("GPL");
2957
2958module_param(rxq_number, int, S_IRUGO);
2959module_param(txq_number, int, S_IRUGO);
2960
2961module_param(rxq_def, int, S_IRUGO);
f19fadfc 2962module_param(rx_copybreak, int, S_IRUGO | S_IWUSR);
This page took 0.254564 seconds and 5 git commands to generate.