intel: Move the Intel wired LAN drivers
[deliverable/linux.git] / drivers / net / qlcnic / qlcnic_hw.c
CommitLineData
af19b491 1/*
40839129
SV
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
af19b491 4 *
40839129 5 * See LICENSE.qlcnic for copyright and licensing details.
af19b491
AKS
6 */
7
8#include "qlcnic.h"
9
5a0e3ad6 10#include <linux/slab.h>
af19b491 11#include <net/ip.h>
18f2f616 12#include <linux/bitops.h>
af19b491
AKS
13
14#define MASK(n) ((1ULL<<(n))-1)
15#define OCM_WIN_P3P(addr) (addr & 0xffc0000)
16
17#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
18
19#define CRB_BLK(off) ((off >> 20) & 0x3f)
20#define CRB_SUBBLK(off) ((off >> 16) & 0xf)
21#define CRB_WINDOW_2M (0x130060)
22#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
23#define CRB_INDIRECT_2M (0x1e0000UL)
24
25
26#ifndef readq
27static inline u64 readq(void __iomem *addr)
28{
29 return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
30}
31#endif
32
33#ifndef writeq
34static inline void writeq(u64 val, void __iomem *addr)
35{
36 writel(((u32) (val)), (addr));
37 writel(((u32) (val >> 32)), (addr + 4));
38}
39#endif
40
af19b491
AKS
41static const struct crb_128M_2M_block_map
42crb_128M_2M_map[64] __cacheline_aligned_in_smp = {
43 {{{0, 0, 0, 0} } }, /* 0: PCI */
44 {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */
45 {1, 0x0110000, 0x0120000, 0x130000},
46 {1, 0x0120000, 0x0122000, 0x124000},
47 {1, 0x0130000, 0x0132000, 0x126000},
48 {1, 0x0140000, 0x0142000, 0x128000},
49 {1, 0x0150000, 0x0152000, 0x12a000},
50 {1, 0x0160000, 0x0170000, 0x110000},
51 {1, 0x0170000, 0x0172000, 0x12e000},
52 {0, 0x0000000, 0x0000000, 0x000000},
53 {0, 0x0000000, 0x0000000, 0x000000},
54 {0, 0x0000000, 0x0000000, 0x000000},
55 {0, 0x0000000, 0x0000000, 0x000000},
56 {0, 0x0000000, 0x0000000, 0x000000},
57 {0, 0x0000000, 0x0000000, 0x000000},
58 {1, 0x01e0000, 0x01e0800, 0x122000},
59 {0, 0x0000000, 0x0000000, 0x000000} } },
60 {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */
61 {{{0, 0, 0, 0} } }, /* 3: */
62 {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */
63 {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */
64 {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */
65 {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */
66 {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */
67 {0, 0x0000000, 0x0000000, 0x000000},
68 {0, 0x0000000, 0x0000000, 0x000000},
69 {0, 0x0000000, 0x0000000, 0x000000},
70 {0, 0x0000000, 0x0000000, 0x000000},
71 {0, 0x0000000, 0x0000000, 0x000000},
72 {0, 0x0000000, 0x0000000, 0x000000},
73 {0, 0x0000000, 0x0000000, 0x000000},
74 {0, 0x0000000, 0x0000000, 0x000000},
75 {0, 0x0000000, 0x0000000, 0x000000},
76 {0, 0x0000000, 0x0000000, 0x000000},
77 {0, 0x0000000, 0x0000000, 0x000000},
78 {0, 0x0000000, 0x0000000, 0x000000},
79 {0, 0x0000000, 0x0000000, 0x000000},
80 {0, 0x0000000, 0x0000000, 0x000000},
81 {1, 0x08f0000, 0x08f2000, 0x172000} } },
82 {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/
83 {0, 0x0000000, 0x0000000, 0x000000},
84 {0, 0x0000000, 0x0000000, 0x000000},
85 {0, 0x0000000, 0x0000000, 0x000000},
86 {0, 0x0000000, 0x0000000, 0x000000},
87 {0, 0x0000000, 0x0000000, 0x000000},
88 {0, 0x0000000, 0x0000000, 0x000000},
89 {0, 0x0000000, 0x0000000, 0x000000},
90 {0, 0x0000000, 0x0000000, 0x000000},
91 {0, 0x0000000, 0x0000000, 0x000000},
92 {0, 0x0000000, 0x0000000, 0x000000},
93 {0, 0x0000000, 0x0000000, 0x000000},
94 {0, 0x0000000, 0x0000000, 0x000000},
95 {0, 0x0000000, 0x0000000, 0x000000},
96 {0, 0x0000000, 0x0000000, 0x000000},
97 {1, 0x09f0000, 0x09f2000, 0x176000} } },
98 {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/
99 {0, 0x0000000, 0x0000000, 0x000000},
100 {0, 0x0000000, 0x0000000, 0x000000},
101 {0, 0x0000000, 0x0000000, 0x000000},
102 {0, 0x0000000, 0x0000000, 0x000000},
103 {0, 0x0000000, 0x0000000, 0x000000},
104 {0, 0x0000000, 0x0000000, 0x000000},
105 {0, 0x0000000, 0x0000000, 0x000000},
106 {0, 0x0000000, 0x0000000, 0x000000},
107 {0, 0x0000000, 0x0000000, 0x000000},
108 {0, 0x0000000, 0x0000000, 0x000000},
109 {0, 0x0000000, 0x0000000, 0x000000},
110 {0, 0x0000000, 0x0000000, 0x000000},
111 {0, 0x0000000, 0x0000000, 0x000000},
112 {0, 0x0000000, 0x0000000, 0x000000},
113 {1, 0x0af0000, 0x0af2000, 0x17a000} } },
114 {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/
115 {0, 0x0000000, 0x0000000, 0x000000},
116 {0, 0x0000000, 0x0000000, 0x000000},
117 {0, 0x0000000, 0x0000000, 0x000000},
118 {0, 0x0000000, 0x0000000, 0x000000},
119 {0, 0x0000000, 0x0000000, 0x000000},
120 {0, 0x0000000, 0x0000000, 0x000000},
121 {0, 0x0000000, 0x0000000, 0x000000},
122 {0, 0x0000000, 0x0000000, 0x000000},
123 {0, 0x0000000, 0x0000000, 0x000000},
124 {0, 0x0000000, 0x0000000, 0x000000},
125 {0, 0x0000000, 0x0000000, 0x000000},
126 {0, 0x0000000, 0x0000000, 0x000000},
127 {0, 0x0000000, 0x0000000, 0x000000},
128 {0, 0x0000000, 0x0000000, 0x000000},
129 {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
130 {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */
131 {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */
132 {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */
133 {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */
134 {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */
135 {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */
136 {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */
137 {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */
138 {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */
139 {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */
140 {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */
141 {{{0, 0, 0, 0} } }, /* 23: */
142 {{{0, 0, 0, 0} } }, /* 24: */
143 {{{0, 0, 0, 0} } }, /* 25: */
144 {{{0, 0, 0, 0} } }, /* 26: */
145 {{{0, 0, 0, 0} } }, /* 27: */
146 {{{0, 0, 0, 0} } }, /* 28: */
147 {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */
148 {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */
149 {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */
150 {{{0} } }, /* 32: PCI */
151 {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */
152 {1, 0x2110000, 0x2120000, 0x130000},
153 {1, 0x2120000, 0x2122000, 0x124000},
154 {1, 0x2130000, 0x2132000, 0x126000},
155 {1, 0x2140000, 0x2142000, 0x128000},
156 {1, 0x2150000, 0x2152000, 0x12a000},
157 {1, 0x2160000, 0x2170000, 0x110000},
158 {1, 0x2170000, 0x2172000, 0x12e000},
159 {0, 0x0000000, 0x0000000, 0x000000},
160 {0, 0x0000000, 0x0000000, 0x000000},
161 {0, 0x0000000, 0x0000000, 0x000000},
162 {0, 0x0000000, 0x0000000, 0x000000},
163 {0, 0x0000000, 0x0000000, 0x000000},
164 {0, 0x0000000, 0x0000000, 0x000000},
165 {0, 0x0000000, 0x0000000, 0x000000},
166 {0, 0x0000000, 0x0000000, 0x000000} } },
167 {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */
168 {{{0} } }, /* 35: */
169 {{{0} } }, /* 36: */
170 {{{0} } }, /* 37: */
171 {{{0} } }, /* 38: */
172 {{{0} } }, /* 39: */
173 {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */
174 {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */
175 {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */
176 {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */
177 {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */
178 {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */
179 {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */
180 {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */
181 {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */
182 {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */
183 {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */
184 {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */
185 {{{0} } }, /* 52: */
186 {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */
187 {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */
188 {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */
189 {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */
190 {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */
191 {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */
192 {{{0} } }, /* 59: I2C0 */
193 {{{0} } }, /* 60: I2C1 */
194 {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */
195 {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */
196 {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */
197};
198
199/*
200 * top 12 bits of crb internal address (hub, agent)
201 */
202static const unsigned crb_hub_agt[64] = {
203 0,
204 QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
205 QLCNIC_HW_CRB_HUB_AGT_ADR_MN,
206 QLCNIC_HW_CRB_HUB_AGT_ADR_MS,
207 0,
208 QLCNIC_HW_CRB_HUB_AGT_ADR_SRE,
209 QLCNIC_HW_CRB_HUB_AGT_ADR_NIU,
210 QLCNIC_HW_CRB_HUB_AGT_ADR_QMN,
211 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0,
212 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1,
213 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2,
214 QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3,
215 QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
216 QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
217 QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
218 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4,
219 QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
220 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0,
221 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1,
222 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2,
223 QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3,
224 QLCNIC_HW_CRB_HUB_AGT_ADR_PGND,
225 QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI,
226 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0,
227 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1,
228 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2,
229 QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3,
230 0,
231 QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI,
232 QLCNIC_HW_CRB_HUB_AGT_ADR_SN,
233 0,
234 QLCNIC_HW_CRB_HUB_AGT_ADR_EG,
235 0,
236 QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
237 QLCNIC_HW_CRB_HUB_AGT_ADR_CAM,
238 0,
239 0,
240 0,
241 0,
242 0,
243 QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
244 0,
245 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1,
246 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2,
247 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3,
248 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4,
249 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5,
250 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6,
251 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7,
252 QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
253 QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
254 QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
255 0,
256 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0,
257 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8,
258 QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9,
259 QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0,
260 0,
261 QLCNIC_HW_CRB_HUB_AGT_ADR_SMB,
262 QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0,
263 QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1,
264 0,
265 QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC,
266 0,
267};
268
269/* PCI Windowing for DDR regions. */
270
271#define QLCNIC_PCIE_SEM_TIMEOUT 10000
272
273int
274qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
275{
276 int done = 0, timeout = 0;
277
278 while (!done) {
279 done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)));
280 if (done == 1)
281 break;
65b5b420
AKS
282 if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) {
283 dev_err(&adapter->pdev->dev,
091754a1
SC
284 "Failed to acquire sem=%d lock; holdby=%d\n",
285 sem, id_reg ? QLCRD32(adapter, id_reg) : -1);
af19b491 286 return -EIO;
65b5b420 287 }
af19b491
AKS
288 msleep(1);
289 }
290
291 if (id_reg)
292 QLCWR32(adapter, id_reg, adapter->portnum);
293
294 return 0;
295}
296
297void
298qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem)
299{
300 QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
301}
302
303static int
304qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
305 struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
306{
307 u32 i, producer, consumer;
308 struct qlcnic_cmd_buffer *pbuf;
309 struct cmd_desc_type0 *cmd_desc;
310 struct qlcnic_host_tx_ring *tx_ring;
311
312 i = 0;
313
8a15ad1f 314 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
af19b491
AKS
315 return -EIO;
316
317 tx_ring = adapter->tx_ring;
318 __netif_tx_lock_bh(tx_ring->txq);
319
320 producer = tx_ring->producer;
321 consumer = tx_ring->sw_consumer;
322
323 if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
324 netif_tx_stop_queue(tx_ring->txq);
ef71ff83
RB
325 smp_mb();
326 if (qlcnic_tx_avail(tx_ring) > nr_desc) {
327 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
328 netif_tx_wake_queue(tx_ring->txq);
329 } else {
330 adapter->stats.xmit_off++;
331 __netif_tx_unlock_bh(tx_ring->txq);
332 return -EBUSY;
333 }
af19b491
AKS
334 }
335
336 do {
337 cmd_desc = &cmd_desc_arr[i];
338
339 pbuf = &tx_ring->cmd_buf_arr[producer];
340 pbuf->skb = NULL;
341 pbuf->frag_count = 0;
342
343 memcpy(&tx_ring->desc_head[producer],
344 &cmd_desc_arr[i], sizeof(struct cmd_desc_type0));
345
346 producer = get_next_index(producer, tx_ring->num_desc);
347 i++;
348
349 } while (i != nr_desc);
350
351 tx_ring->producer = producer;
352
353 qlcnic_update_cmd_producer(adapter, tx_ring);
354
355 __netif_tx_unlock_bh(tx_ring->txq);
356
357 return 0;
358}
359
360static int
361qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
7e56cac4 362 __le16 vlan_id, unsigned op)
af19b491
AKS
363{
364 struct qlcnic_nic_req req;
365 struct qlcnic_mac_req *mac_req;
7e56cac4 366 struct qlcnic_vlan_req *vlan_req;
af19b491
AKS
367 u64 word;
368
369 memset(&req, 0, sizeof(struct qlcnic_nic_req));
370 req.qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
371
372 word = QLCNIC_MAC_EVENT | ((u64)adapter->portnum << 16);
373 req.req_hdr = cpu_to_le64(word);
374
375 mac_req = (struct qlcnic_mac_req *)&req.words[0];
376 mac_req->op = op;
377 memcpy(mac_req->mac_addr, addr, 6);
378
7e56cac4
SC
379 vlan_req = (struct qlcnic_vlan_req *)&req.words[1];
380 vlan_req->vlan_id = vlan_id;
03c5d770 381
af19b491
AKS
382 return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
383}
384
215faf9c 385static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr)
af19b491
AKS
386{
387 struct list_head *head;
388 struct qlcnic_mac_list_s *cur;
389
390 /* look up if already exists */
9ab17b39 391 list_for_each(head, &adapter->mac_list) {
af19b491 392 cur = list_entry(head, struct qlcnic_mac_list_s, list);
9ab17b39 393 if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0)
af19b491 394 return 0;
af19b491
AKS
395 }
396
397 cur = kzalloc(sizeof(struct qlcnic_mac_list_s), GFP_ATOMIC);
398 if (cur == NULL) {
399 dev_err(&adapter->netdev->dev,
400 "failed to add mac address filter\n");
401 return -ENOMEM;
402 }
403 memcpy(cur->mac_addr, addr, ETH_ALEN);
af19b491 404
42f65cba 405 if (qlcnic_sre_macaddr_change(adapter,
03c5d770 406 cur->mac_addr, 0, QLCNIC_MAC_ADD)) {
42f65cba
AKS
407 kfree(cur);
408 return -EIO;
409 }
410
411 list_add_tail(&cur->list, &adapter->mac_list);
412 return 0;
af19b491
AKS
413}
414
415void qlcnic_set_multi(struct net_device *netdev)
416{
417 struct qlcnic_adapter *adapter = netdev_priv(netdev);
22bedad3 418 struct netdev_hw_addr *ha;
215faf9c
JP
419 static const u8 bcast_addr[ETH_ALEN] = {
420 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
421 };
af19b491 422 u32 mode = VPORT_MISS_MODE_DROP;
af19b491 423
8a15ad1f 424 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
a55cb185
AKS
425 return;
426
9ab17b39
SC
427 qlcnic_nic_add_mac(adapter, adapter->mac_addr);
428 qlcnic_nic_add_mac(adapter, bcast_addr);
af19b491
AKS
429
430 if (netdev->flags & IFF_PROMISC) {
ee07c1a7
RB
431 if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
432 mode = VPORT_MISS_MODE_ACCEPT_ALL;
af19b491
AKS
433 goto send_fw_cmd;
434 }
435
436 if ((netdev->flags & IFF_ALLMULTI) ||
4cd24eaf 437 (netdev_mc_count(netdev) > adapter->max_mc_count)) {
af19b491
AKS
438 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
439 goto send_fw_cmd;
440 }
441
4cd24eaf 442 if (!netdev_mc_empty(netdev)) {
22bedad3
JP
443 netdev_for_each_mc_addr(ha, netdev) {
444 qlcnic_nic_add_mac(adapter, ha->addr);
af19b491
AKS
445 }
446 }
447
448send_fw_cmd:
e5dcf6dc
SC
449 if (mode == VPORT_MISS_MODE_ACCEPT_ALL) {
450 qlcnic_alloc_lb_filters_mem(adapter);
451 adapter->mac_learn = 1;
452 } else {
453 adapter->mac_learn = 0;
454 }
455
af19b491 456 qlcnic_nic_set_promisc(adapter, mode);
af19b491
AKS
457}
458
459int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
460{
461 struct qlcnic_nic_req req;
462 u64 word;
463
464 memset(&req, 0, sizeof(struct qlcnic_nic_req));
465
466 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
467
b1fc6d3c 468 word = QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE |
af19b491
AKS
469 ((u64)adapter->portnum << 16);
470 req.req_hdr = cpu_to_le64(word);
471
472 req.words[0] = cpu_to_le64(mode);
473
474 return qlcnic_send_cmd_descs(adapter,
475 (struct cmd_desc_type0 *)&req, 1);
476}
477
478void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
479{
480 struct qlcnic_mac_list_s *cur;
481 struct list_head *head = &adapter->mac_list;
482
483 while (!list_empty(head)) {
484 cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
485 qlcnic_sre_macaddr_change(adapter,
03c5d770 486 cur->mac_addr, 0, QLCNIC_MAC_DEL);
af19b491
AKS
487 list_del(&cur->list);
488 kfree(cur);
489 }
490}
491
b5e5492c
AKS
492void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
493{
494 struct qlcnic_filter *tmp_fil;
495 struct hlist_node *tmp_hnode, *n;
496 struct hlist_head *head;
497 int i;
498
499 for (i = 0; i < adapter->fhash.fmax; i++) {
500 head = &(adapter->fhash.fhead[i]);
501
502 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode)
503 {
504 if (jiffies >
505 (QLCNIC_FILTER_AGE * HZ + tmp_fil->ftime)) {
506 qlcnic_sre_macaddr_change(adapter,
03c5d770
AKS
507 tmp_fil->faddr, tmp_fil->vlan_id,
508 tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
509 QLCNIC_MAC_DEL);
b5e5492c
AKS
510 spin_lock_bh(&adapter->mac_learn_lock);
511 adapter->fhash.fnum--;
512 hlist_del(&tmp_fil->fnode);
513 spin_unlock_bh(&adapter->mac_learn_lock);
514 kfree(tmp_fil);
515 }
516 }
517 }
518}
519
520void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter)
521{
522 struct qlcnic_filter *tmp_fil;
523 struct hlist_node *tmp_hnode, *n;
524 struct hlist_head *head;
525 int i;
526
527 for (i = 0; i < adapter->fhash.fmax; i++) {
528 head = &(adapter->fhash.fhead[i]);
529
530 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
03c5d770
AKS
531 qlcnic_sre_macaddr_change(adapter, tmp_fil->faddr,
532 tmp_fil->vlan_id, tmp_fil->vlan_id ?
533 QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL);
b5e5492c
AKS
534 spin_lock_bh(&adapter->mac_learn_lock);
535 adapter->fhash.fnum--;
536 hlist_del(&tmp_fil->fnode);
537 spin_unlock_bh(&adapter->mac_learn_lock);
538 kfree(tmp_fil);
539 }
540 }
541}
542
22c8c934
SC
543int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u8 flag)
544{
545 struct qlcnic_nic_req req;
546 int rv;
547
548 memset(&req, 0, sizeof(struct qlcnic_nic_req));
549
550 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
551 req.req_hdr = cpu_to_le64(QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK |
552 ((u64) adapter->portnum << 16) | ((u64) 0x1 << 32));
553
554 req.words[0] = cpu_to_le64(flag);
555
556 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
557 if (rv != 0)
558 dev_err(&adapter->pdev->dev, "%sting loopback mode failed\n",
559 flag ? "Set" : "Reset");
560 return rv;
561}
562
563int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
564{
565 if (qlcnic_set_fw_loopback(adapter, mode))
566 return -EIO;
567
568 if (qlcnic_nic_set_promisc(adapter, VPORT_MISS_MODE_ACCEPT_ALL)) {
569 qlcnic_set_fw_loopback(adapter, mode);
570 return -EIO;
571 }
572
573 msleep(1000);
574 return 0;
575}
576
577void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter)
578{
579 int mode = VPORT_MISS_MODE_DROP;
580 struct net_device *netdev = adapter->netdev;
581
582 qlcnic_set_fw_loopback(adapter, 0);
583
584 if (netdev->flags & IFF_PROMISC)
585 mode = VPORT_MISS_MODE_ACCEPT_ALL;
586 else if (netdev->flags & IFF_ALLMULTI)
587 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
588
589 qlcnic_nic_set_promisc(adapter, mode);
590 msleep(1000);
591}
592
af19b491
AKS
593/*
594 * Send the interrupt coalescing parameter set by ethtool to the card.
595 */
596int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
597{
598 struct qlcnic_nic_req req;
8816d009 599 int rv;
af19b491
AKS
600
601 memset(&req, 0, sizeof(struct qlcnic_nic_req));
602
603 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
604
8816d009
AC
605 req.req_hdr = cpu_to_le64(QLCNIC_CONFIG_INTR_COALESCE |
606 ((u64) adapter->portnum << 16));
af19b491 607
8816d009
AC
608 req.words[0] = cpu_to_le64(((u64) adapter->ahw->coal.flag) << 32);
609 req.words[2] = cpu_to_le64(adapter->ahw->coal.rx_packets |
610 ((u64) adapter->ahw->coal.rx_time_us) << 16);
611 req.words[5] = cpu_to_le64(adapter->ahw->coal.timer_out |
612 ((u64) adapter->ahw->coal.type) << 32 |
613 ((u64) adapter->ahw->coal.sts_ring_mask) << 40);
af19b491
AKS
614 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
615 if (rv != 0)
616 dev_err(&adapter->netdev->dev,
617 "Could not send interrupt coalescing parameters\n");
af19b491
AKS
618 return rv;
619}
620
621int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
622{
623 struct qlcnic_nic_req req;
624 u64 word;
625 int rv;
626
b56421d0
RB
627 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
628 return 0;
629
af19b491
AKS
630 memset(&req, 0, sizeof(struct qlcnic_nic_req));
631
632 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
633
634 word = QLCNIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16);
635 req.req_hdr = cpu_to_le64(word);
636
637 req.words[0] = cpu_to_le64(enable);
638
639 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
640 if (rv != 0)
641 dev_err(&adapter->netdev->dev,
642 "Could not send configure hw lro request\n");
643
af19b491
AKS
644 return rv;
645}
646
2e9d722d 647int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
af19b491
AKS
648{
649 struct qlcnic_nic_req req;
650 u64 word;
651 int rv;
652
653 if (!!(adapter->flags & QLCNIC_BRIDGE_ENABLED) == enable)
654 return 0;
655
656 memset(&req, 0, sizeof(struct qlcnic_nic_req));
657
658 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
659
660 word = QLCNIC_H2C_OPCODE_CONFIG_BRIDGING |
661 ((u64)adapter->portnum << 16);
662 req.req_hdr = cpu_to_le64(word);
663
664 req.words[0] = cpu_to_le64(enable);
665
666 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
667 if (rv != 0)
668 dev_err(&adapter->netdev->dev,
669 "Could not send configure bridge mode request\n");
670
671 adapter->flags ^= QLCNIC_BRIDGE_ENABLED;
672
673 return rv;
674}
675
676
677#define RSS_HASHTYPE_IP_TCP 0x3
678
679int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
680{
681 struct qlcnic_nic_req req;
682 u64 word;
683 int i, rv;
684
215faf9c
JP
685 static const u64 key[] = {
686 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
687 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
688 0x255b0ec26d5a56daULL
689 };
af19b491
AKS
690
691 memset(&req, 0, sizeof(struct qlcnic_nic_req));
692 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
693
694 word = QLCNIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16);
695 req.req_hdr = cpu_to_le64(word);
696
697 /*
698 * RSS request:
699 * bits 3-0: hash_method
700 * 5-4: hash_type_ipv4
701 * 7-6: hash_type_ipv6
702 * 8: enable
703 * 9: use indirection table
704 * 47-10: reserved
705 * 63-48: indirection table mask
706 */
707 word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
708 ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
709 ((u64)(enable & 0x1) << 8) |
710 ((0x7ULL) << 48);
711 req.words[0] = cpu_to_le64(word);
712 for (i = 0; i < 5; i++)
713 req.words[i+1] = cpu_to_le64(key[i]);
714
715 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
716 if (rv != 0)
717 dev_err(&adapter->netdev->dev, "could not configure RSS\n");
718
719 return rv;
720}
721
b501595c 722int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd)
af19b491
AKS
723{
724 struct qlcnic_nic_req req;
b501595c 725 struct qlcnic_ipaddr *ipa;
af19b491
AKS
726 u64 word;
727 int rv;
728
729 memset(&req, 0, sizeof(struct qlcnic_nic_req));
730 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
731
732 word = QLCNIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16);
733 req.req_hdr = cpu_to_le64(word);
734
735 req.words[0] = cpu_to_le64(cmd);
b501595c
SC
736 ipa = (struct qlcnic_ipaddr *)&req.words[1];
737 ipa->ipv4 = ip;
af19b491
AKS
738
739 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
740 if (rv != 0)
741 dev_err(&adapter->netdev->dev,
742 "could not notify %s IP 0x%x reuqest\n",
743 (cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
744
745 return rv;
746}
747
748int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable)
749{
750 struct qlcnic_nic_req req;
751 u64 word;
752 int rv;
753
754 memset(&req, 0, sizeof(struct qlcnic_nic_req));
755 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
756
757 word = QLCNIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16);
758 req.req_hdr = cpu_to_le64(word);
759 req.words[0] = cpu_to_le64(enable | (enable << 8));
760
761 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
762 if (rv != 0)
763 dev_err(&adapter->netdev->dev,
764 "could not configure link notification\n");
765
766 return rv;
767}
768
769int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter)
770{
771 struct qlcnic_nic_req req;
772 u64 word;
773 int rv;
774
b56421d0
RB
775 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
776 return 0;
777
af19b491
AKS
778 memset(&req, 0, sizeof(struct qlcnic_nic_req));
779 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
780
781 word = QLCNIC_H2C_OPCODE_LRO_REQUEST |
782 ((u64)adapter->portnum << 16) |
783 ((u64)QLCNIC_LRO_REQUEST_CLEANUP << 56) ;
784
785 req.req_hdr = cpu_to_le64(word);
786
787 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
788 if (rv != 0)
789 dev_err(&adapter->netdev->dev,
790 "could not cleanup lro flows\n");
791
792 return rv;
793}
794
795/*
796 * qlcnic_change_mtu - Change the Maximum Transfer Unit
797 * @returns 0 on success, negative on failure
798 */
799
800int qlcnic_change_mtu(struct net_device *netdev, int mtu)
801{
802 struct qlcnic_adapter *adapter = netdev_priv(netdev);
803 int rc = 0;
804
ff1b1bf8 805 if (mtu < P3P_MIN_MTU || mtu > P3P_MAX_MTU) {
0bd9e6a9 806 dev_err(&adapter->netdev->dev, "%d bytes < mtu < %d bytes"
ff1b1bf8 807 " not supported\n", P3P_MAX_MTU, P3P_MIN_MTU);
af19b491
AKS
808 return -EINVAL;
809 }
810
811 rc = qlcnic_fw_cmd_set_mtu(adapter, mtu);
812
813 if (!rc)
814 netdev->mtu = mtu;
815
816 return rc;
817}
818
135d84a9
MM
819
820u32 qlcnic_fix_features(struct net_device *netdev, u32 features)
821{
822 struct qlcnic_adapter *adapter = netdev_priv(netdev);
823
824 if ((adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
825 u32 changed = features ^ netdev->features;
826 features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
827 }
828
829 if (!(features & NETIF_F_RXCSUM))
830 features &= ~NETIF_F_LRO;
831
832 return features;
833}
834
835
836int qlcnic_set_features(struct net_device *netdev, u32 features)
837{
838 struct qlcnic_adapter *adapter = netdev_priv(netdev);
839 u32 changed = netdev->features ^ features;
840 int hw_lro = (features & NETIF_F_LRO) ? QLCNIC_LRO_ENABLED : 0;
841
842 if (!(changed & NETIF_F_LRO))
843 return 0;
844
845 netdev->features = features ^ NETIF_F_LRO;
846
847 if (qlcnic_config_hw_lro(adapter, hw_lro))
848 return -EIO;
849
850 if ((hw_lro == 0) && qlcnic_send_lro_cleanup(adapter))
851 return -EIO;
852
853 return 0;
854}
855
af19b491
AKS
856/*
857 * Changes the CRB window to the specified window.
858 */
859 /* Returns < 0 if off is not valid,
860 * 1 if window access is needed. 'off' is set to offset from
861 * CRB space in 128M pci map
862 * 0 if no window access is needed. 'off' is set to 2M addr
863 * In: 'off' is offset from base in 128M pci map
864 */
865static int
866qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter,
867 ulong off, void __iomem **addr)
868{
869 const struct crb_128M_2M_sub_block_map *m;
870
871 if ((off >= QLCNIC_CRB_MAX) || (off < QLCNIC_PCI_CRBSPACE))
872 return -EINVAL;
873
874 off -= QLCNIC_PCI_CRBSPACE;
875
876 /*
877 * Try direct map
878 */
879 m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
880
881 if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
b1fc6d3c 882 *addr = adapter->ahw->pci_base0 + m->start_2M +
af19b491
AKS
883 (off - m->start_128M);
884 return 0;
885 }
886
887 /*
888 * Not in direct map, use crb window
889 */
b1fc6d3c 890 *addr = adapter->ahw->pci_base0 + CRB_INDIRECT_2M + (off & MASK(16));
af19b491
AKS
891 return 1;
892}
893
894/*
895 * In: 'off' is offset from CRB space in 128M pci map
896 * Out: 'off' is 2M pci map addr
897 * side effect: lock crb window
898 */
4de57826 899static int
af19b491
AKS
900qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
901{
902 u32 window;
b1fc6d3c 903 void __iomem *addr = adapter->ahw->pci_base0 + CRB_WINDOW_2M;
af19b491
AKS
904
905 off -= QLCNIC_PCI_CRBSPACE;
906
907 window = CRB_HI(off);
4de57826
AKS
908 if (window == 0) {
909 dev_err(&adapter->pdev->dev, "Invalid offset 0x%lx\n", off);
910 return -EIO;
911 }
af19b491 912
af19b491
AKS
913 writel(window, addr);
914 if (readl(addr) != window) {
915 if (printk_ratelimit())
916 dev_warn(&adapter->pdev->dev,
917 "failed to set CRB window to %d off 0x%lx\n",
918 window, off);
4de57826 919 return -EIO;
af19b491 920 }
4de57826 921 return 0;
af19b491
AKS
922}
923
924int
925qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
926{
927 unsigned long flags;
928 int rv;
929 void __iomem *addr = NULL;
930
931 rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
932
933 if (rv == 0) {
934 writel(data, addr);
935 return 0;
936 }
937
938 if (rv > 0) {
939 /* indirect access */
b1fc6d3c 940 write_lock_irqsave(&adapter->ahw->crb_lock, flags);
af19b491 941 crb_win_lock(adapter);
4de57826
AKS
942 rv = qlcnic_pci_set_crbwindow_2M(adapter, off);
943 if (!rv)
944 writel(data, addr);
af19b491 945 crb_win_unlock(adapter);
b1fc6d3c 946 write_unlock_irqrestore(&adapter->ahw->crb_lock, flags);
4de57826 947 return rv;
af19b491
AKS
948 }
949
950 dev_err(&adapter->pdev->dev,
951 "%s: invalid offset: 0x%016lx\n", __func__, off);
952 dump_stack();
953 return -EIO;
954}
955
956u32
957qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
958{
959 unsigned long flags;
960 int rv;
4de57826 961 u32 data = -1;
af19b491
AKS
962 void __iomem *addr = NULL;
963
964 rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
965
966 if (rv == 0)
967 return readl(addr);
968
969 if (rv > 0) {
970 /* indirect access */
b1fc6d3c 971 write_lock_irqsave(&adapter->ahw->crb_lock, flags);
af19b491 972 crb_win_lock(adapter);
4de57826
AKS
973 if (!qlcnic_pci_set_crbwindow_2M(adapter, off))
974 data = readl(addr);
af19b491 975 crb_win_unlock(adapter);
b1fc6d3c 976 write_unlock_irqrestore(&adapter->ahw->crb_lock, flags);
af19b491
AKS
977 return data;
978 }
979
980 dev_err(&adapter->pdev->dev,
981 "%s: invalid offset: 0x%016lx\n", __func__, off);
982 dump_stack();
983 return -1;
984}
985
986
987void __iomem *
988qlcnic_get_ioaddr(struct qlcnic_adapter *adapter, u32 offset)
989{
990 void __iomem *addr = NULL;
991
992 WARN_ON(qlcnic_pci_get_crb_addr_2M(adapter, offset, &addr));
993
994 return addr;
995}
996
997
998static int
999qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter,
1000 u64 addr, u32 *start)
1001{
1002 u32 window;
af19b491
AKS
1003
1004 window = OCM_WIN_P3P(addr);
1005
b1fc6d3c 1006 writel(window, adapter->ahw->ocm_win_crb);
af19b491 1007 /* read back to flush */
b1fc6d3c 1008 readl(adapter->ahw->ocm_win_crb);
af19b491 1009
af19b491
AKS
1010 *start = QLCNIC_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr);
1011 return 0;
1012}
1013
1014static int
1015qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
1016 u64 *data, int op)
1017{
0c39aa48 1018 void __iomem *addr;
af19b491
AKS
1019 int ret;
1020 u32 start;
1021
b1fc6d3c 1022 mutex_lock(&adapter->ahw->mem_lock);
af19b491
AKS
1023
1024 ret = qlcnic_pci_set_window_2M(adapter, off, &start);
1025 if (ret != 0)
1026 goto unlock;
1027
b1fc6d3c 1028 addr = adapter->ahw->pci_base0 + start;
af19b491 1029
af19b491
AKS
1030 if (op == 0) /* read */
1031 *data = readq(addr);
1032 else /* write */
1033 writeq(*data, addr);
1034
1035unlock:
b1fc6d3c 1036 mutex_unlock(&adapter->ahw->mem_lock);
af19b491 1037
af19b491
AKS
1038 return ret;
1039}
1040
897e8c7c
DP
1041void
1042qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
1043{
b1fc6d3c 1044 void __iomem *addr = adapter->ahw->pci_base0 +
897e8c7c
DP
1045 QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
1046
b1fc6d3c 1047 mutex_lock(&adapter->ahw->mem_lock);
897e8c7c 1048 *data = readq(addr);
b1fc6d3c 1049 mutex_unlock(&adapter->ahw->mem_lock);
897e8c7c
DP
1050}
1051
1052void
1053qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data)
1054{
b1fc6d3c 1055 void __iomem *addr = adapter->ahw->pci_base0 +
897e8c7c
DP
1056 QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
1057
b1fc6d3c 1058 mutex_lock(&adapter->ahw->mem_lock);
897e8c7c 1059 writeq(data, addr);
b1fc6d3c 1060 mutex_unlock(&adapter->ahw->mem_lock);
897e8c7c
DP
1061}
1062
af19b491
AKS
1063#define MAX_CTL_CHECK 1000
1064
1065int
1066qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
1067 u64 off, u64 data)
1068{
1069 int i, j, ret;
1070 u32 temp, off8;
af19b491
AKS
1071 void __iomem *mem_crb;
1072
1073 /* Only 64-bit aligned access */
1074 if (off & 7)
1075 return -EIO;
1076
1077 /* P3 onward, test agent base for MIU and SIU is same */
1078 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
b47acacd 1079 QLCNIC_ADDR_QDR_NET_MAX)) {
af19b491
AKS
1080 mem_crb = qlcnic_get_ioaddr(adapter,
1081 QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
1082 goto correct;
1083 }
1084
1085 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
1086 mem_crb = qlcnic_get_ioaddr(adapter,
1087 QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
1088 goto correct;
1089 }
1090
1091 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX))
1092 return qlcnic_pci_mem_access_direct(adapter, off, &data, 1);
1093
1094 return -EIO;
1095
1096correct:
b47acacd 1097 off8 = off & ~0xf;
af19b491 1098
b1fc6d3c 1099 mutex_lock(&adapter->ahw->mem_lock);
af19b491
AKS
1100
1101 writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
1102 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
1103
1104 i = 0;
b47acacd
DP
1105 writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
1106 writel((TA_CTL_START | TA_CTL_ENABLE),
1107 (mem_crb + TEST_AGT_CTRL));
af19b491 1108
b47acacd
DP
1109 for (j = 0; j < MAX_CTL_CHECK; j++) {
1110 temp = readl(mem_crb + TEST_AGT_CTRL);
1111 if ((temp & TA_CTL_BUSY) == 0)
1112 break;
1113 }
af19b491 1114
b47acacd
DP
1115 if (j >= MAX_CTL_CHECK) {
1116 ret = -EIO;
1117 goto done;
af19b491
AKS
1118 }
1119
b47acacd
DP
1120 i = (off & 0xf) ? 0 : 2;
1121 writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
1122 mem_crb + MIU_TEST_AGT_WRDATA(i));
1123 writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
1124 mem_crb + MIU_TEST_AGT_WRDATA(i+1));
1125 i = (off & 0xf) ? 2 : 0;
1126
af19b491
AKS
1127 writel(data & 0xffffffff,
1128 mem_crb + MIU_TEST_AGT_WRDATA(i));
1129 writel((data >> 32) & 0xffffffff,
1130 mem_crb + MIU_TEST_AGT_WRDATA(i+1));
1131
1132 writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
1133 writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
1134 (mem_crb + TEST_AGT_CTRL));
1135
1136 for (j = 0; j < MAX_CTL_CHECK; j++) {
1137 temp = readl(mem_crb + TEST_AGT_CTRL);
1138 if ((temp & TA_CTL_BUSY) == 0)
1139 break;
1140 }
1141
1142 if (j >= MAX_CTL_CHECK) {
1143 if (printk_ratelimit())
1144 dev_err(&adapter->pdev->dev,
1145 "failed to write through agent\n");
1146 ret = -EIO;
1147 } else
1148 ret = 0;
1149
1150done:
b1fc6d3c 1151 mutex_unlock(&adapter->ahw->mem_lock);
af19b491
AKS
1152
1153 return ret;
1154}
1155
1156int
1157qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
1158 u64 off, u64 *data)
1159{
1160 int j, ret;
1161 u32 temp, off8;
b47acacd 1162 u64 val;
af19b491
AKS
1163 void __iomem *mem_crb;
1164
1165 /* Only 64-bit aligned access */
1166 if (off & 7)
1167 return -EIO;
1168
1169 /* P3 onward, test agent base for MIU and SIU is same */
1170 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
b47acacd 1171 QLCNIC_ADDR_QDR_NET_MAX)) {
af19b491
AKS
1172 mem_crb = qlcnic_get_ioaddr(adapter,
1173 QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
1174 goto correct;
1175 }
1176
1177 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
1178 mem_crb = qlcnic_get_ioaddr(adapter,
1179 QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
1180 goto correct;
1181 }
1182
1183 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX)) {
1184 return qlcnic_pci_mem_access_direct(adapter,
1185 off, data, 0);
1186 }
1187
1188 return -EIO;
1189
1190correct:
b47acacd 1191 off8 = off & ~0xf;
af19b491 1192
b1fc6d3c 1193 mutex_lock(&adapter->ahw->mem_lock);
af19b491
AKS
1194
1195 writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
1196 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
1197 writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
1198 writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL));
1199
1200 for (j = 0; j < MAX_CTL_CHECK; j++) {
1201 temp = readl(mem_crb + TEST_AGT_CTRL);
1202 if ((temp & TA_CTL_BUSY) == 0)
1203 break;
1204 }
1205
1206 if (j >= MAX_CTL_CHECK) {
1207 if (printk_ratelimit())
1208 dev_err(&adapter->pdev->dev,
1209 "failed to read through agent\n");
1210 ret = -EIO;
1211 } else {
1212 off8 = MIU_TEST_AGT_RDDATA_LO;
b47acacd 1213 if (off & 0xf)
af19b491
AKS
1214 off8 = MIU_TEST_AGT_RDDATA_UPPER_LO;
1215
1216 temp = readl(mem_crb + off8 + 4);
1217 val = (u64)temp << 32;
1218 val |= readl(mem_crb + off8);
1219 *data = val;
1220 ret = 0;
1221 }
1222
b1fc6d3c 1223 mutex_unlock(&adapter->ahw->mem_lock);
af19b491
AKS
1224
1225 return ret;
1226}
1227
1228int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
1229{
1230 int offset, board_type, magic;
1231 struct pci_dev *pdev = adapter->pdev;
1232
1233 offset = QLCNIC_FW_MAGIC_OFFSET;
1234 if (qlcnic_rom_fast_read(adapter, offset, &magic))
1235 return -EIO;
1236
1237 if (magic != QLCNIC_BDINFO_MAGIC) {
1238 dev_err(&pdev->dev, "invalid board config, magic=%08x\n",
1239 magic);
1240 return -EIO;
1241 }
1242
1243 offset = QLCNIC_BRDTYPE_OFFSET;
1244 if (qlcnic_rom_fast_read(adapter, offset, &board_type))
1245 return -EIO;
1246
b1fc6d3c 1247 adapter->ahw->board_type = board_type;
af19b491 1248
ff1b1bf8 1249 if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) {
af19b491
AKS
1250 u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I);
1251 if ((gpio & 0x8000) == 0)
ff1b1bf8 1252 board_type = QLCNIC_BRDTYPE_P3P_10G_TP;
af19b491
AKS
1253 }
1254
1255 switch (board_type) {
ff1b1bf8
SV
1256 case QLCNIC_BRDTYPE_P3P_HMEZ:
1257 case QLCNIC_BRDTYPE_P3P_XG_LOM:
1258 case QLCNIC_BRDTYPE_P3P_10G_CX4:
1259 case QLCNIC_BRDTYPE_P3P_10G_CX4_LP:
1260 case QLCNIC_BRDTYPE_P3P_IMEZ:
1261 case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS:
1262 case QLCNIC_BRDTYPE_P3P_10G_SFP_CT:
1263 case QLCNIC_BRDTYPE_P3P_10G_SFP_QT:
1264 case QLCNIC_BRDTYPE_P3P_10G_XFP:
1265 case QLCNIC_BRDTYPE_P3P_10000_BASE_T:
b1fc6d3c 1266 adapter->ahw->port_type = QLCNIC_XGBE;
af19b491 1267 break;
ff1b1bf8
SV
1268 case QLCNIC_BRDTYPE_P3P_REF_QG:
1269 case QLCNIC_BRDTYPE_P3P_4_GB:
1270 case QLCNIC_BRDTYPE_P3P_4_GB_MM:
b1fc6d3c 1271 adapter->ahw->port_type = QLCNIC_GBE;
af19b491 1272 break;
ff1b1bf8 1273 case QLCNIC_BRDTYPE_P3P_10G_TP:
b1fc6d3c 1274 adapter->ahw->port_type = (adapter->portnum < 2) ?
af19b491
AKS
1275 QLCNIC_XGBE : QLCNIC_GBE;
1276 break;
1277 default:
1278 dev_err(&pdev->dev, "unknown board type %x\n", board_type);
b1fc6d3c 1279 adapter->ahw->port_type = QLCNIC_XGBE;
af19b491
AKS
1280 break;
1281 }
1282
1283 return 0;
1284}
1285
1286int
1287qlcnic_wol_supported(struct qlcnic_adapter *adapter)
1288{
1289 u32 wol_cfg;
1290
1291 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
1292 if (wol_cfg & (1UL << adapter->portnum)) {
1293 wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
1294 if (wol_cfg & (1 << adapter->portnum))
1295 return 1;
1296 }
1297
1298 return 0;
1299}
897d3596
SC
1300
1301int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
1302{
1303 struct qlcnic_nic_req req;
1304 int rv;
1305 u64 word;
1306
1307 memset(&req, 0, sizeof(struct qlcnic_nic_req));
1308 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
1309
1310 word = QLCNIC_H2C_OPCODE_CONFIG_LED | ((u64)adapter->portnum << 16);
1311 req.req_hdr = cpu_to_le64(word);
1312
1313 req.words[0] = cpu_to_le64((u64)rate << 32);
1314 req.words[1] = cpu_to_le64(state);
1315
1316 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
1317 if (rv)
1318 dev_err(&adapter->pdev->dev, "LED configuration failed.\n");
1319
1320 return rv;
1321}
18f2f616
AC
1322
1323/* FW dump related functions */
1324static u32
1325qlcnic_dump_crb(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
1326 u32 *buffer)
1327{
1328 int i;
1329 u32 addr, data;
1330 struct __crb *crb = &entry->region.crb;
1331 void __iomem *base = adapter->ahw->pci_base0;
1332
1333 addr = crb->addr;
1334
1335 for (i = 0; i < crb->no_ops; i++) {
1336 QLCNIC_RD_DUMP_REG(addr, base, &data);
1337 *buffer++ = cpu_to_le32(addr);
1338 *buffer++ = cpu_to_le32(data);
1339 addr += crb->stride;
1340 }
1341 return crb->no_ops * 2 * sizeof(u32);
1342}
1343
1344static u32
1345qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
1346 struct qlcnic_dump_entry *entry, u32 *buffer)
1347{
1348 int i, k, timeout = 0;
1349 void __iomem *base = adapter->ahw->pci_base0;
1350 u32 addr, data;
1351 u8 opcode, no_ops;
1352 struct __ctrl *ctr = &entry->region.ctrl;
1353 struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
1354
1355 addr = ctr->addr;
1356 no_ops = ctr->no_ops;
1357
1358 for (i = 0; i < no_ops; i++) {
1359 k = 0;
1360 opcode = 0;
1361 for (k = 0; k < 8; k++) {
1362 if (!(ctr->opcode & (1 << k)))
1363 continue;
1364 switch (1 << k) {
1365 case QLCNIC_DUMP_WCRB:
1366 QLCNIC_WR_DUMP_REG(addr, base, ctr->val1);
1367 break;
1368 case QLCNIC_DUMP_RWCRB:
1369 QLCNIC_RD_DUMP_REG(addr, base, &data);
1370 QLCNIC_WR_DUMP_REG(addr, base, data);
1371 break;
1372 case QLCNIC_DUMP_ANDCRB:
1373 QLCNIC_RD_DUMP_REG(addr, base, &data);
1374 QLCNIC_WR_DUMP_REG(addr, base,
1375 (data & ctr->val2));
1376 break;
1377 case QLCNIC_DUMP_ORCRB:
1378 QLCNIC_RD_DUMP_REG(addr, base, &data);
1379 QLCNIC_WR_DUMP_REG(addr, base,
1380 (data | ctr->val3));
1381 break;
1382 case QLCNIC_DUMP_POLLCRB:
1383 while (timeout <= ctr->timeout) {
1384 QLCNIC_RD_DUMP_REG(addr, base, &data);
1385 if ((data & ctr->val2) == ctr->val1)
1386 break;
1387 msleep(1);
1388 timeout++;
1389 }
1390 if (timeout > ctr->timeout) {
1391 dev_info(&adapter->pdev->dev,
1392 "Timed out, aborting poll CRB\n");
1393 return -EINVAL;
1394 }
1395 break;
1396 case QLCNIC_DUMP_RD_SAVE:
1397 if (ctr->index_a)
1398 addr = t_hdr->saved_state[ctr->index_a];
1399 QLCNIC_RD_DUMP_REG(addr, base, &data);
1400 t_hdr->saved_state[ctr->index_v] = data;
1401 break;
1402 case QLCNIC_DUMP_WRT_SAVED:
1403 if (ctr->index_v)
1404 data = t_hdr->saved_state[ctr->index_v];
1405 else
1406 data = ctr->val1;
1407 if (ctr->index_a)
1408 addr = t_hdr->saved_state[ctr->index_a];
1409 QLCNIC_WR_DUMP_REG(addr, base, data);
1410 break;
1411 case QLCNIC_DUMP_MOD_SAVE_ST:
1412 data = t_hdr->saved_state[ctr->index_v];
1413 data <<= ctr->shl_val;
1414 data >>= ctr->shr_val;
1415 if (ctr->val2)
1416 data &= ctr->val2;
1417 data |= ctr->val3;
1418 data += ctr->val1;
1419 t_hdr->saved_state[ctr->index_v] = data;
1420 break;
1421 default:
1422 dev_info(&adapter->pdev->dev,
1423 "Unknown opcode\n");
1424 break;
1425 }
1426 }
1427 addr += ctr->stride;
1428 }
1429 return 0;
1430}
1431
1432static u32
1433qlcnic_dump_mux(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
1434 u32 *buffer)
1435{
1436 int loop;
1437 u32 val, data = 0;
1438 struct __mux *mux = &entry->region.mux;
1439 void __iomem *base = adapter->ahw->pci_base0;
1440
1441 val = mux->val;
1442 for (loop = 0; loop < mux->no_ops; loop++) {
1443 QLCNIC_WR_DUMP_REG(mux->addr, base, val);
1444 QLCNIC_RD_DUMP_REG(mux->read_addr, base, &data);
1445 *buffer++ = cpu_to_le32(val);
1446 *buffer++ = cpu_to_le32(data);
1447 val += mux->val_stride;
1448 }
1449 return 2 * mux->no_ops * sizeof(u32);
1450}
1451
1452static u32
1453qlcnic_dump_que(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
1454 u32 *buffer)
1455{
1456 int i, loop;
1457 u32 cnt, addr, data, que_id = 0;
1458 void __iomem *base = adapter->ahw->pci_base0;
1459 struct __queue *que = &entry->region.que;
1460
1461 addr = que->read_addr;
1462 cnt = que->read_addr_cnt;
1463
1464 for (loop = 0; loop < que->no_ops; loop++) {
1465 QLCNIC_WR_DUMP_REG(que->sel_addr, base, que_id);
54ff502c 1466 addr = que->read_addr;
18f2f616
AC
1467 for (i = 0; i < cnt; i++) {
1468 QLCNIC_RD_DUMP_REG(addr, base, &data);
1469 *buffer++ = cpu_to_le32(data);
1470 addr += que->read_addr_stride;
1471 }
1472 que_id += que->stride;
1473 }
1474 return que->no_ops * cnt * sizeof(u32);
1475}
1476
1477static u32
1478qlcnic_dump_ocm(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
1479 u32 *buffer)
1480{
1481 int i;
1482 u32 data;
1483 void __iomem *addr;
1484 struct __ocm *ocm = &entry->region.ocm;
1485
1486 addr = adapter->ahw->pci_base0 + ocm->read_addr;
1487 for (i = 0; i < ocm->no_ops; i++) {
1488 data = readl(addr);
1489 *buffer++ = cpu_to_le32(data);
1490 addr += ocm->read_addr_stride;
1491 }
1492 return ocm->no_ops * sizeof(u32);
1493}
1494
1495static u32
1496qlcnic_read_rom(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
1497 u32 *buffer)
1498{
1499 int i, count = 0;
1500 u32 fl_addr, size, val, lck_val, addr;
1501 struct __mem *rom = &entry->region.mem;
1502 void __iomem *base = adapter->ahw->pci_base0;
1503
1504 fl_addr = rom->addr;
1505 size = rom->size/4;
1506lock_try:
1507 lck_val = readl(base + QLCNIC_FLASH_SEM2_LK);
1508 if (!lck_val && count < MAX_CTL_CHECK) {
1509 msleep(10);
1510 count++;
1511 goto lock_try;
1512 }
1513 writel(adapter->ahw->pci_func, (base + QLCNIC_FLASH_LOCK_ID));
1514 for (i = 0; i < size; i++) {
1515 addr = fl_addr & 0xFFFF0000;
1516 QLCNIC_WR_DUMP_REG(FLASH_ROM_WINDOW, base, addr);
1517 addr = LSW(fl_addr) + FLASH_ROM_DATA;
1518 QLCNIC_RD_DUMP_REG(addr, base, &val);
1519 fl_addr += 4;
1520 *buffer++ = cpu_to_le32(val);
1521 }
1522 readl(base + QLCNIC_FLASH_SEM2_ULK);
1523 return rom->size;
1524}
1525
1526static u32
1527qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
1528 struct qlcnic_dump_entry *entry, u32 *buffer)
1529{
1530 int i;
1531 u32 cnt, val, data, addr;
1532 void __iomem *base = adapter->ahw->pci_base0;
1533 struct __cache *l1 = &entry->region.cache;
1534
1535 val = l1->init_tag_val;
1536
1537 for (i = 0; i < l1->no_ops; i++) {
1538 QLCNIC_WR_DUMP_REG(l1->addr, base, val);
1539 QLCNIC_WR_DUMP_REG(l1->ctrl_addr, base, LSW(l1->ctrl_val));
1540 addr = l1->read_addr;
1541 cnt = l1->read_addr_num;
1542 while (cnt) {
1543 QLCNIC_RD_DUMP_REG(addr, base, &data);
1544 *buffer++ = cpu_to_le32(data);
1545 addr += l1->read_addr_stride;
1546 cnt--;
1547 }
1548 val += l1->stride;
1549 }
1550 return l1->no_ops * l1->read_addr_num * sizeof(u32);
1551}
1552
1553static u32
1554qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
1555 struct qlcnic_dump_entry *entry, u32 *buffer)
1556{
1557 int i;
1558 u32 cnt, val, data, addr;
1559 u8 poll_mask, poll_to, time_out = 0;
1560 void __iomem *base = adapter->ahw->pci_base0;
1561 struct __cache *l2 = &entry->region.cache;
1562
1563 val = l2->init_tag_val;
1564 poll_mask = LSB(MSW(l2->ctrl_val));
1565 poll_to = MSB(MSW(l2->ctrl_val));
1566
1567 for (i = 0; i < l2->no_ops; i++) {
1568 QLCNIC_WR_DUMP_REG(l2->addr, base, val);
c40f4ef7 1569 if (LSW(l2->ctrl_val))
18f2f616
AC
1570 QLCNIC_WR_DUMP_REG(l2->ctrl_addr, base,
1571 LSW(l2->ctrl_val));
c40f4ef7
AC
1572 if (!poll_mask)
1573 goto skip_poll;
1574 do {
18f2f616
AC
1575 QLCNIC_RD_DUMP_REG(l2->ctrl_addr, base, &data);
1576 if (!(data & poll_mask))
1577 break;
1578 msleep(1);
1579 time_out++;
1580 } while (time_out <= poll_to);
18f2f616 1581
c40f4ef7
AC
1582 if (time_out > poll_to) {
1583 dev_err(&adapter->pdev->dev,
1584 "Timeout exceeded in %s, aborting dump\n",
1585 __func__);
1586 return -EINVAL;
1587 }
1588skip_poll:
18f2f616
AC
1589 addr = l2->read_addr;
1590 cnt = l2->read_addr_num;
1591 while (cnt) {
1592 QLCNIC_RD_DUMP_REG(addr, base, &data);
1593 *buffer++ = cpu_to_le32(data);
1594 addr += l2->read_addr_stride;
1595 cnt--;
1596 }
1597 val += l2->stride;
1598 }
1599 return l2->no_ops * l2->read_addr_num * sizeof(u32);
1600}
1601
1602static u32
1603qlcnic_read_memory(struct qlcnic_adapter *adapter,
1604 struct qlcnic_dump_entry *entry, u32 *buffer)
1605{
1606 u32 addr, data, test, ret = 0;
1607 int i, reg_read;
1608 struct __mem *mem = &entry->region.mem;
1609 void __iomem *base = adapter->ahw->pci_base0;
1610
1611 reg_read = mem->size;
1612 addr = mem->addr;
1613 /* check for data size of multiple of 16 and 16 byte alignment */
1614 if ((addr & 0xf) || (reg_read%16)) {
1615 dev_info(&adapter->pdev->dev,
1616 "Unaligned memory addr:0x%x size:0x%x\n",
1617 addr, reg_read);
1618 return -EINVAL;
1619 }
1620
1621 mutex_lock(&adapter->ahw->mem_lock);
1622
1623 while (reg_read != 0) {
1624 QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_LO, base, addr);
1625 QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_HI, base, 0);
1626 QLCNIC_WR_DUMP_REG(MIU_TEST_CTR, base,
1627 TA_CTL_ENABLE | TA_CTL_START);
1628
1629 for (i = 0; i < MAX_CTL_CHECK; i++) {
1630 QLCNIC_RD_DUMP_REG(MIU_TEST_CTR, base, &test);
1631 if (!(test & TA_CTL_BUSY))
1632 break;
1633 }
1634 if (i == MAX_CTL_CHECK) {
1635 if (printk_ratelimit()) {
1636 dev_err(&adapter->pdev->dev,
1637 "failed to read through agent\n");
1638 ret = -EINVAL;
1639 goto out;
1640 }
1641 }
1642 for (i = 0; i < 4; i++) {
1643 QLCNIC_RD_DUMP_REG(MIU_TEST_READ_DATA[i], base, &data);
1644 *buffer++ = cpu_to_le32(data);
1645 }
1646 addr += 16;
1647 reg_read -= 16;
1648 ret += 16;
1649 }
1650out:
1651 mutex_unlock(&adapter->ahw->mem_lock);
1652 return mem->size;
1653}
1654
1655static u32
1656qlcnic_dump_nop(struct qlcnic_adapter *adapter,
1657 struct qlcnic_dump_entry *entry, u32 *buffer)
1658{
1659 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1660 return 0;
1661}
1662
1663struct qlcnic_dump_operations fw_dump_ops[] = {
1664 { QLCNIC_DUMP_NOP, qlcnic_dump_nop },
1665 { QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb },
1666 { QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux },
1667 { QLCNIC_DUMP_QUEUE, qlcnic_dump_que },
1668 { QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom },
1669 { QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm },
1670 { QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl },
1671 { QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache },
1672 { QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache },
1673 { QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache },
1674 { QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache },
1675 { QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache },
1676 { QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache },
1677 { QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache },
1678 { QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache },
1679 { QLCNIC_DUMP_READ_ROM, qlcnic_read_rom },
1680 { QLCNIC_DUMP_READ_MEM, qlcnic_read_memory },
1681 { QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl },
1682 { QLCNIC_DUMP_TLHDR, qlcnic_dump_nop },
1683 { QLCNIC_DUMP_RDEND, qlcnic_dump_nop },
1684};
1685
1686/* Walk the template and collect dump for each entry in the dump template */
1687static int
1688qlcnic_valid_dump_entry(struct device *dev, struct qlcnic_dump_entry *entry,
1689 u32 size)
1690{
1691 int ret = 1;
1692 if (size != entry->hdr.cap_size) {
1693 dev_info(dev,
1694 "Invalidate dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
1695 entry->hdr.type, entry->hdr.mask, size, entry->hdr.cap_size);
1696 dev_info(dev, "Aborting further dump capture\n");
1697 ret = 0;
1698 }
1699 return ret;
1700}
1701
1702int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1703{
1704 u32 *buffer;
1705 char mesg[64];
1706 char *msg[] = {mesg, NULL};
1707 int i, k, ops_cnt, ops_index, dump_size = 0;
1708 u32 entry_offset, dump, no_entries, buf_offset = 0;
1709 struct qlcnic_dump_entry *entry;
1710 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1711 struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
1712
1713 if (fw_dump->clr) {
1714 dev_info(&adapter->pdev->dev,
1715 "Previous dump not cleared, not capturing dump\n");
1716 return -EIO;
1717 }
1718 /* Calculate the size for dump data area only */
1719 for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
1720 if (i & tmpl_hdr->drv_cap_mask)
1721 dump_size += tmpl_hdr->cap_sizes[k];
1722 if (!dump_size)
1723 return -EIO;
1724
1725 fw_dump->data = vzalloc(dump_size);
1726 if (!fw_dump->data) {
1727 dev_info(&adapter->pdev->dev,
1728 "Unable to allocate (%d KB) for fw dump\n",
1729 dump_size/1024);
1730 return -ENOMEM;
1731 }
1732 buffer = fw_dump->data;
1733 fw_dump->size = dump_size;
1734 no_entries = tmpl_hdr->num_entries;
1735 ops_cnt = ARRAY_SIZE(fw_dump_ops);
1736 entry_offset = tmpl_hdr->offset;
1737 tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
1738 tmpl_hdr->sys_info[1] = adapter->fw_version;
1739
1740 for (i = 0; i < no_entries; i++) {
43d620c8 1741 entry = (void *)tmpl_hdr + entry_offset;
18f2f616
AC
1742 if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
1743 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1744 entry_offset += entry->hdr.offset;
1745 continue;
1746 }
1747 /* Find the handler for this entry */
1748 ops_index = 0;
1749 while (ops_index < ops_cnt) {
1750 if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
1751 break;
1752 ops_index++;
1753 }
1754 if (ops_index == ops_cnt) {
1755 dev_info(&adapter->pdev->dev,
1756 "Invalid entry type %d, exiting dump\n",
1757 entry->hdr.type);
1758 goto error;
1759 }
1760 /* Collect dump for this entry */
1761 dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
1762 if (dump && !qlcnic_valid_dump_entry(&adapter->pdev->dev, entry,
1763 dump))
1764 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1765 buf_offset += entry->hdr.cap_size;
1766 entry_offset += entry->hdr.offset;
1767 buffer = fw_dump->data + buf_offset;
1768 }
1769 if (dump_size != buf_offset) {
1770 dev_info(&adapter->pdev->dev,
1771 "Captured(%d) and expected size(%d) do not match\n",
1772 buf_offset, dump_size);
1773 goto error;
1774 } else {
1775 fw_dump->clr = 1;
51f675fd
AC
1776 snprintf(mesg, sizeof(mesg), "FW_DUMP=%s",
1777 adapter->netdev->name);
18f2f616
AC
1778 dev_info(&adapter->pdev->dev, "Dump data, %d bytes captured\n",
1779 fw_dump->size);
1780 /* Send a udev event to notify availability of FW dump */
1781 kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
1782 return 0;
1783 }
1784error:
1785 vfree(fw_dump->data);
1786 return -EINVAL;
1787}
This page took 0.299903 seconds and 5 git commands to generate.