cxgb3 - fix EEH
[deliverable/linux.git] / drivers / net / cxgb3 / t3_hw.c
CommitLineData
4d22de3e 1/*
1d68e93d 2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4d22de3e 3 *
1d68e93d
DLR
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
4d22de3e 9 *
1d68e93d
DLR
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
4d22de3e 31 */
4d22de3e
DLR
32#include "common.h"
33#include "regs.h"
34#include "sge_defs.h"
35#include "firmware_exports.h"
36
f2c6879e
DLR
37/**
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
46 *
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
51 */
4d22de3e
DLR
52
53int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
55{
56 while (1) {
57 u32 val = t3_read_reg(adapter, reg);
58
59 if (!!(val & mask) == polarity) {
60 if (valp)
61 *valp = val;
62 return 0;
63 }
64 if (--attempts == 0)
b881955b 65 return -EAGAIN;
4d22de3e
DLR
66 if (delay)
67 udelay(delay);
68 }
69}
70
71/**
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
77 *
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
81 */
82void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
84{
85 while (n--) {
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
87 p++;
88 }
89}
90
91/**
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
97 *
98 * Sets a register field specified by the supplied mask to the
99 * given value.
100 */
101void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
102 u32 val)
103{
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
105
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
108}
109
110/**
111 * t3_read_indirect - read indirectly addressed registers
112 * @adap: the adapter
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
118 *
119 * Reads registers that are accessed indirectly through an address/data
120 * register pair.
121 */
9265fabf
SH
122static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals,
124 unsigned int nregs, unsigned int start_idx)
4d22de3e
DLR
125{
126 while (nregs--) {
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
129 start_idx++;
130 }
131}
132
133/**
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
139 *
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
141 * accesses.
142 */
143int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
144 u64 *buf)
145{
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
148
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
151
152 if (start >= size64 || start + n > size64)
153 return -EINVAL;
154
155 start *= (8 << mc7->width);
156 while (n--) {
157 int i;
158 u64 val64 = 0;
159
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
161 int attempts = 10;
162 u32 val;
163
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
170 if (val & F_BUSY)
171 return -EIO;
172
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
176 mc7->offset +
177 A_MC7_BD_DATA0);
178 val64 |= (u64) val << 32;
179 } else {
180 if (mc7->width > 1)
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
183 }
184 start += 8;
185 }
186 *buf++ = val64;
187 }
188 return 0;
189}
190
191/*
192 * Initialize MI1.
193 */
194static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
195{
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
197 u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
198 V_CLKDIV(clkdiv);
199
200 if (!(ai->caps & SUPPORTED_10000baseT_Full))
201 val |= V_ST(1);
202 t3_write_reg(adap, A_MI1_CFG, val);
203}
204
205#define MDIO_ATTEMPTS 10
206
207/*
208 * MI1 read/write operations for direct-addressed PHYs.
209 */
210static int mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
211 int reg_addr, unsigned int *valp)
212{
213 int ret;
214 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
215
216 if (mmd_addr)
217 return -EINVAL;
218
219 mutex_lock(&adapter->mdio_lock);
220 t3_write_reg(adapter, A_MI1_ADDR, addr);
221 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
222 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
223 if (!ret)
224 *valp = t3_read_reg(adapter, A_MI1_DATA);
225 mutex_unlock(&adapter->mdio_lock);
226 return ret;
227}
228
229static int mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
230 int reg_addr, unsigned int val)
231{
232 int ret;
233 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
234
235 if (mmd_addr)
236 return -EINVAL;
237
238 mutex_lock(&adapter->mdio_lock);
239 t3_write_reg(adapter, A_MI1_ADDR, addr);
240 t3_write_reg(adapter, A_MI1_DATA, val);
241 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
242 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
243 mutex_unlock(&adapter->mdio_lock);
244 return ret;
245}
246
247static const struct mdio_ops mi1_mdio_ops = {
248 mi1_read,
249 mi1_write
250};
251
252/*
253 * MI1 read/write operations for indirect-addressed PHYs.
254 */
255static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
256 int reg_addr, unsigned int *valp)
257{
258 int ret;
259 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
260
261 mutex_lock(&adapter->mdio_lock);
262 t3_write_reg(adapter, A_MI1_ADDR, addr);
263 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
264 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
265 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
266 if (!ret) {
267 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
268 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
269 MDIO_ATTEMPTS, 20);
270 if (!ret)
271 *valp = t3_read_reg(adapter, A_MI1_DATA);
272 }
273 mutex_unlock(&adapter->mdio_lock);
274 return ret;
275}
276
277static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
278 int reg_addr, unsigned int val)
279{
280 int ret;
281 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
282
283 mutex_lock(&adapter->mdio_lock);
284 t3_write_reg(adapter, A_MI1_ADDR, addr);
285 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
286 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
287 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
288 if (!ret) {
289 t3_write_reg(adapter, A_MI1_DATA, val);
290 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
291 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
292 MDIO_ATTEMPTS, 20);
293 }
294 mutex_unlock(&adapter->mdio_lock);
295 return ret;
296}
297
298static const struct mdio_ops mi1_mdio_ext_ops = {
299 mi1_ext_read,
300 mi1_ext_write
301};
302
303/**
304 * t3_mdio_change_bits - modify the value of a PHY register
305 * @phy: the PHY to operate on
306 * @mmd: the device address
307 * @reg: the register address
308 * @clear: what part of the register value to mask off
309 * @set: what part of the register value to set
310 *
311 * Changes the value of a PHY register by applying a mask to its current
312 * value and ORing the result with a new value.
313 */
314int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
315 unsigned int set)
316{
317 int ret;
318 unsigned int val;
319
320 ret = mdio_read(phy, mmd, reg, &val);
321 if (!ret) {
322 val &= ~clear;
323 ret = mdio_write(phy, mmd, reg, val | set);
324 }
325 return ret;
326}
327
328/**
329 * t3_phy_reset - reset a PHY block
330 * @phy: the PHY to operate on
331 * @mmd: the device address of the PHY block to reset
332 * @wait: how long to wait for the reset to complete in 1ms increments
333 *
334 * Resets a PHY block and optionally waits for the reset to complete.
335 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
336 * for 10G PHYs.
337 */
338int t3_phy_reset(struct cphy *phy, int mmd, int wait)
339{
340 int err;
341 unsigned int ctl;
342
343 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
344 if (err || !wait)
345 return err;
346
347 do {
348 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
349 if (err)
350 return err;
351 ctl &= BMCR_RESET;
352 if (ctl)
353 msleep(1);
354 } while (ctl && --wait);
355
356 return ctl ? -1 : 0;
357}
358
359/**
360 * t3_phy_advertise - set the PHY advertisement registers for autoneg
361 * @phy: the PHY to operate on
362 * @advert: bitmap of capabilities the PHY should advertise
363 *
364 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
365 * requested capabilities.
366 */
367int t3_phy_advertise(struct cphy *phy, unsigned int advert)
368{
369 int err;
370 unsigned int val = 0;
371
372 err = mdio_read(phy, 0, MII_CTRL1000, &val);
373 if (err)
374 return err;
375
376 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
377 if (advert & ADVERTISED_1000baseT_Half)
378 val |= ADVERTISE_1000HALF;
379 if (advert & ADVERTISED_1000baseT_Full)
380 val |= ADVERTISE_1000FULL;
381
382 err = mdio_write(phy, 0, MII_CTRL1000, val);
383 if (err)
384 return err;
385
386 val = 1;
387 if (advert & ADVERTISED_10baseT_Half)
388 val |= ADVERTISE_10HALF;
389 if (advert & ADVERTISED_10baseT_Full)
390 val |= ADVERTISE_10FULL;
391 if (advert & ADVERTISED_100baseT_Half)
392 val |= ADVERTISE_100HALF;
393 if (advert & ADVERTISED_100baseT_Full)
394 val |= ADVERTISE_100FULL;
395 if (advert & ADVERTISED_Pause)
396 val |= ADVERTISE_PAUSE_CAP;
397 if (advert & ADVERTISED_Asym_Pause)
398 val |= ADVERTISE_PAUSE_ASYM;
399 return mdio_write(phy, 0, MII_ADVERTISE, val);
400}
401
402/**
403 * t3_set_phy_speed_duplex - force PHY speed and duplex
404 * @phy: the PHY to operate on
405 * @speed: requested PHY speed
406 * @duplex: requested PHY duplex
407 *
408 * Force a 10/100/1000 PHY's speed and duplex. This also disables
409 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
410 */
411int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
412{
413 int err;
414 unsigned int ctl;
415
416 err = mdio_read(phy, 0, MII_BMCR, &ctl);
417 if (err)
418 return err;
419
420 if (speed >= 0) {
421 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
422 if (speed == SPEED_100)
423 ctl |= BMCR_SPEED100;
424 else if (speed == SPEED_1000)
425 ctl |= BMCR_SPEED1000;
426 }
427 if (duplex >= 0) {
428 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
429 if (duplex == DUPLEX_FULL)
430 ctl |= BMCR_FULLDPLX;
431 }
432 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
433 ctl |= BMCR_ANENABLE;
434 return mdio_write(phy, 0, MII_BMCR, ctl);
435}
436
437static const struct adapter_info t3_adap_info[] = {
438 {2, 0, 0, 0,
439 F_GPIO2_OEN | F_GPIO4_OEN |
440 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
8ac3ba68 441 0,
4d22de3e
DLR
442 &mi1_mdio_ops, "Chelsio PE9000"},
443 {2, 0, 0, 0,
444 F_GPIO2_OEN | F_GPIO4_OEN |
445 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
8ac3ba68 446 0,
4d22de3e
DLR
447 &mi1_mdio_ops, "Chelsio T302"},
448 {1, 0, 0, 0,
449 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
75758e8a
DLR
450 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
451 0, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
4d22de3e
DLR
452 &mi1_mdio_ext_ops, "Chelsio T310"},
453 {2, 0, 0, 0,
454 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
455 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
456 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
8ac3ba68 457 SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
4d22de3e
DLR
458 &mi1_mdio_ext_ops, "Chelsio T320"},
459};
460
461/*
462 * Return the adapter_info structure with a given index. Out-of-range indices
463 * return NULL.
464 */
465const struct adapter_info *t3_get_adapter_info(unsigned int id)
466{
467 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
468}
469
470#define CAPS_1G (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | \
471 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII)
472#define CAPS_10G (SUPPORTED_10000baseT_Full | SUPPORTED_AUI)
473
474static const struct port_type_info port_types[] = {
475 {NULL},
476 {t3_ael1002_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
477 "10GBASE-XR"},
478 {t3_vsc8211_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
479 "10/100/1000BASE-T"},
480 {NULL, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
481 "10/100/1000BASE-T"},
482 {t3_xaui_direct_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
483 {NULL, CAPS_10G, "10GBASE-KX4"},
484 {t3_qt2045_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
485 {t3_ael1006_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
486 "10GBASE-SR"},
487 {NULL, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
488};
489
490#undef CAPS_1G
491#undef CAPS_10G
492
493#define VPD_ENTRY(name, len) \
494 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
495
496/*
497 * Partial EEPROM Vital Product Data structure. Includes only the ID and
498 * VPD-R sections.
499 */
500struct t3_vpd {
501 u8 id_tag;
502 u8 id_len[2];
503 u8 id_data[16];
504 u8 vpdr_tag;
505 u8 vpdr_len[2];
506 VPD_ENTRY(pn, 16); /* part number */
507 VPD_ENTRY(ec, 16); /* EC level */
167cdf5f 508 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
4d22de3e
DLR
509 VPD_ENTRY(na, 12); /* MAC address base */
510 VPD_ENTRY(cclk, 6); /* core clock */
511 VPD_ENTRY(mclk, 6); /* mem clock */
512 VPD_ENTRY(uclk, 6); /* uP clk */
513 VPD_ENTRY(mdc, 6); /* MDIO clk */
514 VPD_ENTRY(mt, 2); /* mem timing */
515 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
516 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
517 VPD_ENTRY(port0, 2); /* PHY0 complex */
518 VPD_ENTRY(port1, 2); /* PHY1 complex */
519 VPD_ENTRY(port2, 2); /* PHY2 complex */
520 VPD_ENTRY(port3, 2); /* PHY3 complex */
521 VPD_ENTRY(rv, 1); /* csum */
522 u32 pad; /* for multiple-of-4 sizing and alignment */
523};
524
525#define EEPROM_MAX_POLL 4
526#define EEPROM_STAT_ADDR 0x4000
527#define VPD_BASE 0xc00
528
529/**
530 * t3_seeprom_read - read a VPD EEPROM location
531 * @adapter: adapter to read
532 * @addr: EEPROM address
533 * @data: where to store the read data
534 *
535 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
536 * VPD ROM capability. A zero is written to the flag bit when the
537 * addres is written to the control register. The hardware device will
538 * set the flag to 1 when 4 bytes have been read into the data register.
539 */
05e5c116 540int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
4d22de3e
DLR
541{
542 u16 val;
543 int attempts = EEPROM_MAX_POLL;
05e5c116 544 u32 v;
4d22de3e
DLR
545 unsigned int base = adapter->params.pci.vpd_cap_addr;
546
547 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
548 return -EINVAL;
549
550 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
551 do {
552 udelay(10);
553 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
554 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
555
556 if (!(val & PCI_VPD_ADDR_F)) {
557 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
558 return -EIO;
559 }
05e5c116
AV
560 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
561 *data = cpu_to_le32(v);
4d22de3e
DLR
562 return 0;
563}
564
565/**
566 * t3_seeprom_write - write a VPD EEPROM location
567 * @adapter: adapter to write
568 * @addr: EEPROM address
569 * @data: value to write
570 *
571 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
572 * VPD ROM capability.
573 */
05e5c116 574int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
4d22de3e
DLR
575{
576 u16 val;
577 int attempts = EEPROM_MAX_POLL;
578 unsigned int base = adapter->params.pci.vpd_cap_addr;
579
580 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
581 return -EINVAL;
582
583 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
05e5c116 584 le32_to_cpu(data));
4d22de3e
DLR
585 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
586 addr | PCI_VPD_ADDR_F);
587 do {
588 msleep(1);
589 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
590 } while ((val & PCI_VPD_ADDR_F) && --attempts);
591
592 if (val & PCI_VPD_ADDR_F) {
593 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
594 return -EIO;
595 }
596 return 0;
597}
598
599/**
600 * t3_seeprom_wp - enable/disable EEPROM write protection
601 * @adapter: the adapter
602 * @enable: 1 to enable write protection, 0 to disable it
603 *
604 * Enables or disables write protection on the serial EEPROM.
605 */
606int t3_seeprom_wp(struct adapter *adapter, int enable)
607{
608 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
609}
610
611/*
612 * Convert a character holding a hex digit to a number.
613 */
614static unsigned int hex2int(unsigned char c)
615{
616 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
617}
618
619/**
620 * get_vpd_params - read VPD parameters from VPD EEPROM
621 * @adapter: adapter to read
622 * @p: where to store the parameters
623 *
624 * Reads card parameters stored in VPD EEPROM.
625 */
626static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
627{
628 int i, addr, ret;
629 struct t3_vpd vpd;
630
631 /*
632 * Card information is normally at VPD_BASE but some early cards had
633 * it at 0.
634 */
05e5c116 635 ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
4d22de3e
DLR
636 if (ret)
637 return ret;
638 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
639
640 for (i = 0; i < sizeof(vpd); i += 4) {
641 ret = t3_seeprom_read(adapter, addr + i,
05e5c116 642 (__le32 *)((u8 *)&vpd + i));
4d22de3e
DLR
643 if (ret)
644 return ret;
645 }
646
647 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
648 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
649 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
650 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
651 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
167cdf5f 652 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
4d22de3e
DLR
653
654 /* Old eeproms didn't have port information */
655 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
656 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
657 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
658 } else {
659 p->port_type[0] = hex2int(vpd.port0_data[0]);
660 p->port_type[1] = hex2int(vpd.port1_data[0]);
661 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
662 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
663 }
664
665 for (i = 0; i < 6; i++)
666 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
667 hex2int(vpd.na_data[2 * i + 1]);
668 return 0;
669}
670
671/* serial flash and firmware constants */
672enum {
673 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
674 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
675 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
676
677 /* flash command opcodes */
678 SF_PROG_PAGE = 2, /* program page */
679 SF_WR_DISABLE = 4, /* disable writes */
680 SF_RD_STATUS = 5, /* read status register */
681 SF_WR_ENABLE = 6, /* enable writes */
682 SF_RD_DATA_FAST = 0xb, /* read flash */
683 SF_ERASE_SECTOR = 0xd8, /* erase sector */
684
685 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
2e283962
DLR
686 FW_VERS_ADDR = 0x77ffc, /* flash address holding FW version */
687 FW_MIN_SIZE = 8 /* at least version and csum */
4d22de3e
DLR
688};
689
690/**
691 * sf1_read - read data from the serial flash
692 * @adapter: the adapter
693 * @byte_cnt: number of bytes to read
694 * @cont: whether another operation will be chained
695 * @valp: where to store the read data
696 *
697 * Reads up to 4 bytes of data from the serial flash. The location of
698 * the read needs to be specified prior to calling this by issuing the
699 * appropriate commands to the serial flash.
700 */
701static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
702 u32 *valp)
703{
704 int ret;
705
706 if (!byte_cnt || byte_cnt > 4)
707 return -EINVAL;
708 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
709 return -EBUSY;
710 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
711 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
712 if (!ret)
713 *valp = t3_read_reg(adapter, A_SF_DATA);
714 return ret;
715}
716
717/**
718 * sf1_write - write data to the serial flash
719 * @adapter: the adapter
720 * @byte_cnt: number of bytes to write
721 * @cont: whether another operation will be chained
722 * @val: value to write
723 *
724 * Writes up to 4 bytes of data to the serial flash. The location of
725 * the write needs to be specified prior to calling this by issuing the
726 * appropriate commands to the serial flash.
727 */
728static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
729 u32 val)
730{
731 if (!byte_cnt || byte_cnt > 4)
732 return -EINVAL;
733 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
734 return -EBUSY;
735 t3_write_reg(adapter, A_SF_DATA, val);
736 t3_write_reg(adapter, A_SF_OP,
737 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
738 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
739}
740
741/**
742 * flash_wait_op - wait for a flash operation to complete
743 * @adapter: the adapter
744 * @attempts: max number of polls of the status register
745 * @delay: delay between polls in ms
746 *
747 * Wait for a flash operation to complete by polling the status register.
748 */
749static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
750{
751 int ret;
752 u32 status;
753
754 while (1) {
755 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
756 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
757 return ret;
758 if (!(status & 1))
759 return 0;
760 if (--attempts == 0)
761 return -EAGAIN;
762 if (delay)
763 msleep(delay);
764 }
765}
766
767/**
768 * t3_read_flash - read words from serial flash
769 * @adapter: the adapter
770 * @addr: the start address for the read
771 * @nwords: how many 32-bit words to read
772 * @data: where to store the read data
773 * @byte_oriented: whether to store data as bytes or as words
774 *
775 * Read the specified number of 32-bit words from the serial flash.
776 * If @byte_oriented is set the read data is stored as a byte array
777 * (i.e., big-endian), otherwise as 32-bit words in the platform's
778 * natural endianess.
779 */
780int t3_read_flash(struct adapter *adapter, unsigned int addr,
781 unsigned int nwords, u32 *data, int byte_oriented)
782{
783 int ret;
784
785 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
786 return -EINVAL;
787
788 addr = swab32(addr) | SF_RD_DATA_FAST;
789
790 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
791 (ret = sf1_read(adapter, 1, 1, data)) != 0)
792 return ret;
793
794 for (; nwords; nwords--, data++) {
795 ret = sf1_read(adapter, 4, nwords > 1, data);
796 if (ret)
797 return ret;
798 if (byte_oriented)
799 *data = htonl(*data);
800 }
801 return 0;
802}
803
804/**
805 * t3_write_flash - write up to a page of data to the serial flash
806 * @adapter: the adapter
807 * @addr: the start address to write
808 * @n: length of data to write
809 * @data: the data to write
810 *
811 * Writes up to a page of data (256 bytes) to the serial flash starting
812 * at the given address.
813 */
814static int t3_write_flash(struct adapter *adapter, unsigned int addr,
815 unsigned int n, const u8 *data)
816{
817 int ret;
818 u32 buf[64];
819 unsigned int i, c, left, val, offset = addr & 0xff;
820
821 if (addr + n > SF_SIZE || offset + n > 256)
822 return -EINVAL;
823
824 val = swab32(addr) | SF_PROG_PAGE;
825
826 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
827 (ret = sf1_write(adapter, 4, 1, val)) != 0)
828 return ret;
829
830 for (left = n; left; left -= c) {
831 c = min(left, 4U);
832 for (val = 0, i = 0; i < c; ++i)
833 val = (val << 8) + *data++;
834
835 ret = sf1_write(adapter, c, c != left, val);
836 if (ret)
837 return ret;
838 }
839 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
840 return ret;
841
842 /* Read the page to verify the write succeeded */
843 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
844 if (ret)
845 return ret;
846
847 if (memcmp(data - n, (u8 *) buf + offset, n))
848 return -EIO;
849 return 0;
850}
851
480fe1a3 852/**
47330077 853 * t3_get_tp_version - read the tp sram version
480fe1a3 854 * @adapter: the adapter
47330077 855 * @vers: where to place the version
480fe1a3 856 *
47330077 857 * Reads the protocol sram version from sram.
480fe1a3 858 */
47330077 859int t3_get_tp_version(struct adapter *adapter, u32 *vers)
480fe1a3
DLR
860{
861 int ret;
480fe1a3
DLR
862
863 /* Get version loaded in SRAM */
864 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
865 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
866 1, 1, 5, 1);
867 if (ret)
868 return ret;
2eab17ab 869
47330077
DLR
870 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
871
872 return 0;
873}
874
875/**
876 * t3_check_tpsram_version - read the tp sram version
877 * @adapter: the adapter
878 * @must_load: set to 1 if loading a new microcode image is required
879 *
880 * Reads the protocol sram version from flash.
881 */
882int t3_check_tpsram_version(struct adapter *adapter, int *must_load)
883{
884 int ret;
885 u32 vers;
886 unsigned int major, minor;
887
888 if (adapter->params.rev == T3_REV_A)
889 return 0;
890
891 *must_load = 1;
892
893 ret = t3_get_tp_version(adapter, &vers);
894 if (ret)
895 return ret;
480fe1a3
DLR
896
897 major = G_TP_VERSION_MAJOR(vers);
898 minor = G_TP_VERSION_MINOR(vers);
899
2eab17ab 900 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
480fe1a3
DLR
901 return 0;
902
47330077
DLR
903 if (major != TP_VERSION_MAJOR)
904 CH_ERR(adapter, "found wrong TP version (%u.%u), "
905 "driver needs version %d.%d\n", major, minor,
906 TP_VERSION_MAJOR, TP_VERSION_MINOR);
907 else {
908 *must_load = 0;
909 CH_ERR(adapter, "found wrong TP version (%u.%u), "
910 "driver compiled for version %d.%d\n", major, minor,
911 TP_VERSION_MAJOR, TP_VERSION_MINOR);
912 }
480fe1a3
DLR
913 return -EINVAL;
914}
915
916/**
2eab17ab 917 * t3_check_tpsram - check if provided protocol SRAM
480fe1a3
DLR
918 * is compatible with this driver
919 * @adapter: the adapter
920 * @tp_sram: the firmware image to write
921 * @size: image size
922 *
923 * Checks if an adapter's tp sram is compatible with the driver.
924 * Returns 0 if the versions are compatible, a negative error otherwise.
925 */
926int t3_check_tpsram(struct adapter *adapter, u8 *tp_sram, unsigned int size)
927{
928 u32 csum;
929 unsigned int i;
05e5c116 930 const __be32 *p = (const __be32 *)tp_sram;
480fe1a3
DLR
931
932 /* Verify checksum */
933 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
934 csum += ntohl(p[i]);
935 if (csum != 0xffffffff) {
936 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
937 csum);
938 return -EINVAL;
939 }
940
941 return 0;
942}
943
4aac3899
DLR
944enum fw_version_type {
945 FW_VERSION_N3,
946 FW_VERSION_T3
947};
948
4d22de3e
DLR
949/**
950 * t3_get_fw_version - read the firmware version
951 * @adapter: the adapter
952 * @vers: where to place the version
953 *
954 * Reads the FW version from flash.
955 */
956int t3_get_fw_version(struct adapter *adapter, u32 *vers)
957{
958 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
959}
960
961/**
962 * t3_check_fw_version - check if the FW is compatible with this driver
963 * @adapter: the adapter
a5a3b460
DLR
964 * @must_load: set to 1 if loading a new FW image is required
965
4d22de3e
DLR
966 * Checks if an adapter's FW is compatible with the driver. Returns 0
967 * if the versions are compatible, a negative error otherwise.
968 */
a5a3b460 969int t3_check_fw_version(struct adapter *adapter, int *must_load)
4d22de3e
DLR
970{
971 int ret;
972 u32 vers;
4aac3899 973 unsigned int type, major, minor;
4d22de3e 974
a5a3b460 975 *must_load = 1;
4d22de3e
DLR
976 ret = t3_get_fw_version(adapter, &vers);
977 if (ret)
978 return ret;
979
4aac3899
DLR
980 type = G_FW_VERSION_TYPE(vers);
981 major = G_FW_VERSION_MAJOR(vers);
982 minor = G_FW_VERSION_MINOR(vers);
4d22de3e 983
75d8626f
DLR
984 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
985 minor == FW_VERSION_MINOR)
4d22de3e
DLR
986 return 0;
987
a5a3b460
DLR
988 if (major != FW_VERSION_MAJOR)
989 CH_ERR(adapter, "found wrong FW version(%u.%u), "
990 "driver needs version %u.%u\n", major, minor,
991 FW_VERSION_MAJOR, FW_VERSION_MINOR);
273fa904 992 else if (minor < FW_VERSION_MINOR) {
a5a3b460 993 *must_load = 0;
273fa904
DLR
994 CH_WARN(adapter, "found old FW minor version(%u.%u), "
995 "driver compiled for version %u.%u\n", major, minor,
996 FW_VERSION_MAJOR, FW_VERSION_MINOR);
997 } else {
998 CH_WARN(adapter, "found newer FW version(%u.%u), "
a5a3b460
DLR
999 "driver compiled for version %u.%u\n", major, minor,
1000 FW_VERSION_MAJOR, FW_VERSION_MINOR);
273fa904 1001 return 0;
a5a3b460 1002 }
4d22de3e
DLR
1003 return -EINVAL;
1004}
1005
1006/**
1007 * t3_flash_erase_sectors - erase a range of flash sectors
1008 * @adapter: the adapter
1009 * @start: the first sector to erase
1010 * @end: the last sector to erase
1011 *
1012 * Erases the sectors in the given range.
1013 */
1014static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1015{
1016 while (start <= end) {
1017 int ret;
1018
1019 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1020 (ret = sf1_write(adapter, 4, 0,
1021 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1022 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1023 return ret;
1024 start++;
1025 }
1026 return 0;
1027}
1028
1029/*
1030 * t3_load_fw - download firmware
1031 * @adapter: the adapter
8a9fab22 1032 * @fw_data: the firmware image to write
4d22de3e
DLR
1033 * @size: image size
1034 *
1035 * Write the supplied firmware image to the card's serial flash.
1036 * The FW image has the following sections: @size - 8 bytes of code and
1037 * data, followed by 4 bytes of FW version, followed by the 32-bit
1038 * 1's complement checksum of the whole image.
1039 */
1040int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1041{
1042 u32 csum;
1043 unsigned int i;
05e5c116 1044 const __be32 *p = (const __be32 *)fw_data;
4d22de3e
DLR
1045 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1046
2e283962 1047 if ((size & 3) || size < FW_MIN_SIZE)
4d22de3e
DLR
1048 return -EINVAL;
1049 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1050 return -EFBIG;
1051
1052 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1053 csum += ntohl(p[i]);
1054 if (csum != 0xffffffff) {
1055 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1056 csum);
1057 return -EINVAL;
1058 }
1059
1060 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1061 if (ret)
1062 goto out;
1063
1064 size -= 8; /* trim off version and checksum */
1065 for (addr = FW_FLASH_BOOT_ADDR; size;) {
1066 unsigned int chunk_size = min(size, 256U);
1067
1068 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1069 if (ret)
1070 goto out;
1071
1072 addr += chunk_size;
1073 fw_data += chunk_size;
1074 size -= chunk_size;
1075 }
1076
1077 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1078out:
1079 if (ret)
1080 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1081 return ret;
1082}
1083
1084#define CIM_CTL_BASE 0x2000
1085
1086/**
1087 * t3_cim_ctl_blk_read - read a block from CIM control region
1088 *
1089 * @adap: the adapter
1090 * @addr: the start address within the CIM control region
1091 * @n: number of words to read
1092 * @valp: where to store the result
1093 *
1094 * Reads a block of 4-byte words from the CIM control region.
1095 */
1096int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1097 unsigned int n, unsigned int *valp)
1098{
1099 int ret = 0;
1100
1101 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1102 return -EBUSY;
1103
1104 for ( ; !ret && n--; addr += 4) {
1105 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1106 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1107 0, 5, 2);
1108 if (!ret)
1109 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1110 }
1111 return ret;
1112}
1113
1114
1115/**
1116 * t3_link_changed - handle interface link changes
1117 * @adapter: the adapter
1118 * @port_id: the port index that changed link state
1119 *
1120 * Called when a port's link settings change to propagate the new values
1121 * to the associated PHY and MAC. After performing the common tasks it
1122 * invokes an OS-specific handler.
1123 */
1124void t3_link_changed(struct adapter *adapter, int port_id)
1125{
1126 int link_ok, speed, duplex, fc;
1127 struct port_info *pi = adap2pinfo(adapter, port_id);
1128 struct cphy *phy = &pi->phy;
1129 struct cmac *mac = &pi->mac;
1130 struct link_config *lc = &pi->link_config;
1131
1132 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1133
1134 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1135 uses_xaui(adapter)) {
1136 if (link_ok)
1137 t3b_pcs_reset(mac);
1138 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1139 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1140 }
1141 lc->link_ok = link_ok;
1142 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1143 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1144 if (lc->requested_fc & PAUSE_AUTONEG)
1145 fc &= lc->requested_fc;
1146 else
1147 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1148
1149 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1150 /* Set MAC speed, duplex, and flow control to match PHY. */
1151 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1152 lc->fc = fc;
1153 }
1154
1155 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1156}
1157
1158/**
1159 * t3_link_start - apply link configuration to MAC/PHY
1160 * @phy: the PHY to setup
1161 * @mac: the MAC to setup
1162 * @lc: the requested link configuration
1163 *
1164 * Set up a port's MAC and PHY according to a desired link configuration.
1165 * - If the PHY can auto-negotiate first decide what to advertise, then
1166 * enable/disable auto-negotiation as desired, and reset.
1167 * - If the PHY does not auto-negotiate just reset it.
1168 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1169 * otherwise do it later based on the outcome of auto-negotiation.
1170 */
1171int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1172{
1173 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1174
1175 lc->link_ok = 0;
1176 if (lc->supported & SUPPORTED_Autoneg) {
1177 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1178 if (fc) {
1179 lc->advertising |= ADVERTISED_Asym_Pause;
1180 if (fc & PAUSE_RX)
1181 lc->advertising |= ADVERTISED_Pause;
1182 }
1183 phy->ops->advertise(phy, lc->advertising);
1184
1185 if (lc->autoneg == AUTONEG_DISABLE) {
1186 lc->speed = lc->requested_speed;
1187 lc->duplex = lc->requested_duplex;
1188 lc->fc = (unsigned char)fc;
1189 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1190 fc);
1191 /* Also disables autoneg */
1192 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1193 phy->ops->reset(phy, 0);
1194 } else
1195 phy->ops->autoneg_enable(phy);
1196 } else {
1197 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1198 lc->fc = (unsigned char)fc;
1199 phy->ops->reset(phy, 0);
1200 }
1201 return 0;
1202}
1203
1204/**
1205 * t3_set_vlan_accel - control HW VLAN extraction
1206 * @adapter: the adapter
1207 * @ports: bitmap of adapter ports to operate on
1208 * @on: enable (1) or disable (0) HW VLAN extraction
1209 *
1210 * Enables or disables HW extraction of VLAN tags for the given port.
1211 */
1212void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1213{
1214 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1215 ports << S_VLANEXTRACTIONENABLE,
1216 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1217}
1218
1219struct intr_info {
1220 unsigned int mask; /* bits to check in interrupt status */
1221 const char *msg; /* message to print or NULL */
1222 short stat_idx; /* stat counter to increment or -1 */
1223 unsigned short fatal:1; /* whether the condition reported is fatal */
1224};
1225
1226/**
1227 * t3_handle_intr_status - table driven interrupt handler
1228 * @adapter: the adapter that generated the interrupt
1229 * @reg: the interrupt status register to process
1230 * @mask: a mask to apply to the interrupt status
1231 * @acts: table of interrupt actions
1232 * @stats: statistics counters tracking interrupt occurences
1233 *
1234 * A table driven interrupt handler that applies a set of masks to an
1235 * interrupt status word and performs the corresponding actions if the
1236 * interrupts described by the mask have occured. The actions include
1237 * optionally printing a warning or alert message, and optionally
1238 * incrementing a stat counter. The table is terminated by an entry
1239 * specifying mask 0. Returns the number of fatal interrupt conditions.
1240 */
1241static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1242 unsigned int mask,
1243 const struct intr_info *acts,
1244 unsigned long *stats)
1245{
1246 int fatal = 0;
1247 unsigned int status = t3_read_reg(adapter, reg) & mask;
1248
1249 for (; acts->mask; ++acts) {
1250 if (!(status & acts->mask))
1251 continue;
1252 if (acts->fatal) {
1253 fatal++;
1254 CH_ALERT(adapter, "%s (0x%x)\n",
1255 acts->msg, status & acts->mask);
1256 } else if (acts->msg)
1257 CH_WARN(adapter, "%s (0x%x)\n",
1258 acts->msg, status & acts->mask);
1259 if (acts->stat_idx >= 0)
1260 stats[acts->stat_idx]++;
1261 }
1262 if (status) /* clear processed interrupts */
1263 t3_write_reg(adapter, reg, status);
1264 return fatal;
1265}
1266
b881955b
DLR
1267#define SGE_INTR_MASK (F_RSPQDISABLED | \
1268 F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1269 F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1270 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1271 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1272 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1273 F_HIRCQPARITYERROR)
4d22de3e
DLR
1274#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1275 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1276 F_NFASRCHFAIL)
1277#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1278#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1279 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1280 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1281#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1282 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1283 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1284 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1285 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1286 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1287#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1288 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1289 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
b881955b
DLR
1290 F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1291 F_TXPARERR | V_BISTERR(M_BISTERR))
1292#define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1293 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1294 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1295#define ULPTX_INTR_MASK 0xfc
1296#define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
4d22de3e
DLR
1297 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1298 F_ZERO_SWITCH_ERROR)
1299#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1300 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1301 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
b881955b
DLR
1302 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1303 F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1304 F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1305 F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1306 F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
4d22de3e
DLR
1307#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1308 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1309 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1310#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1311 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1312 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1313#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1314 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1315 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1316 V_MCAPARERRENB(M_MCAPARERRENB))
1317#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1318 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1319 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1320 F_MPS0 | F_CPL_SWITCH)
1321
1322/*
1323 * Interrupt handler for the PCIX1 module.
1324 */
1325static void pci_intr_handler(struct adapter *adapter)
1326{
1327 static const struct intr_info pcix1_intr_info[] = {
4d22de3e
DLR
1328 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1329 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1330 {F_RCVTARABT, "PCI received target abort", -1, 1},
1331 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1332 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1333 {F_DETPARERR, "PCI detected parity error", -1, 1},
1334 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1335 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1336 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1337 1},
1338 {F_DETCORECCERR, "PCI correctable ECC error",
1339 STAT_PCI_CORR_ECC, 0},
1340 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1341 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1342 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1343 1},
1344 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1345 1},
1346 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1347 1},
1348 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1349 "error", -1, 1},
1350 {0}
1351 };
1352
1353 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1354 pcix1_intr_info, adapter->irq_stats))
1355 t3_fatal_err(adapter);
1356}
1357
1358/*
1359 * Interrupt handler for the PCIE module.
1360 */
1361static void pcie_intr_handler(struct adapter *adapter)
1362{
1363 static const struct intr_info pcie_intr_info[] = {
b5a44bcb 1364 {F_PEXERR, "PCI PEX error", -1, 1},
4d22de3e
DLR
1365 {F_UNXSPLCPLERRR,
1366 "PCI unexpected split completion DMA read error", -1, 1},
1367 {F_UNXSPLCPLERRC,
1368 "PCI unexpected split completion DMA command error", -1, 1},
1369 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1370 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1371 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1372 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1373 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1374 "PCI MSI-X table/PBA parity error", -1, 1},
b881955b
DLR
1375 {F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1376 {F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1377 {F_RXPARERR, "PCI Rx parity error", -1, 1},
1378 {F_TXPARERR, "PCI Tx parity error", -1, 1},
4d22de3e
DLR
1379 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1380 {0}
1381 };
1382
3eea3337
DLR
1383 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1384 CH_ALERT(adapter, "PEX error code 0x%x\n",
1385 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1386
4d22de3e
DLR
1387 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1388 pcie_intr_info, adapter->irq_stats))
1389 t3_fatal_err(adapter);
1390}
1391
1392/*
1393 * TP interrupt handler.
1394 */
1395static void tp_intr_handler(struct adapter *adapter)
1396{
1397 static const struct intr_info tp_intr_info[] = {
1398 {0xffffff, "TP parity error", -1, 1},
1399 {0x1000000, "TP out of Rx pages", -1, 1},
1400 {0x2000000, "TP out of Tx pages", -1, 1},
1401 {0}
1402 };
1403
a2604be5 1404 static struct intr_info tp_intr_info_t3c[] = {
b881955b
DLR
1405 {0x1fffffff, "TP parity error", -1, 1},
1406 {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1407 {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1408 {0}
a2604be5
DLR
1409 };
1410
4d22de3e 1411 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
a2604be5 1412 adapter->params.rev < T3_REV_C ?
b881955b 1413 tp_intr_info : tp_intr_info_t3c, NULL))
4d22de3e
DLR
1414 t3_fatal_err(adapter);
1415}
1416
1417/*
1418 * CIM interrupt handler.
1419 */
1420static void cim_intr_handler(struct adapter *adapter)
1421{
1422 static const struct intr_info cim_intr_info[] = {
1423 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1424 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1425 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1426 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1427 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1428 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1429 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1430 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1431 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1432 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1433 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1434 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
b881955b
DLR
1435 {F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1436 {F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1437 {F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1438 {F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1439 {F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1440 {F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1441 {F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1442 {F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1443 {F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1444 {F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1445 {F_ITAGPARERR, "CIM itag parity error", -1, 1},
1446 {F_DTAGPARERR, "CIM dtag parity error", -1, 1},
4d22de3e
DLR
1447 {0}
1448 };
1449
1450 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1451 cim_intr_info, NULL))
1452 t3_fatal_err(adapter);
1453}
1454
1455/*
1456 * ULP RX interrupt handler.
1457 */
1458static void ulprx_intr_handler(struct adapter *adapter)
1459{
1460 static const struct intr_info ulprx_intr_info[] = {
b881955b
DLR
1461 {F_PARERRDATA, "ULP RX data parity error", -1, 1},
1462 {F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1463 {F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1464 {F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1465 {F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1466 {F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1467 {F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1468 {F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
4d22de3e
DLR
1469 {0}
1470 };
1471
1472 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1473 ulprx_intr_info, NULL))
1474 t3_fatal_err(adapter);
1475}
1476
1477/*
1478 * ULP TX interrupt handler.
1479 */
1480static void ulptx_intr_handler(struct adapter *adapter)
1481{
1482 static const struct intr_info ulptx_intr_info[] = {
1483 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1484 STAT_ULP_CH0_PBL_OOB, 0},
1485 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1486 STAT_ULP_CH1_PBL_OOB, 0},
b881955b 1487 {0xfc, "ULP TX parity error", -1, 1},
4d22de3e
DLR
1488 {0}
1489 };
1490
1491 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1492 ulptx_intr_info, adapter->irq_stats))
1493 t3_fatal_err(adapter);
1494}
1495
1496#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1497 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1498 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1499 F_ICSPI1_TX_FRAMING_ERROR)
1500#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1501 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1502 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1503 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1504
1505/*
1506 * PM TX interrupt handler.
1507 */
1508static void pmtx_intr_handler(struct adapter *adapter)
1509{
1510 static const struct intr_info pmtx_intr_info[] = {
1511 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1512 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1513 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1514 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1515 "PMTX ispi parity error", -1, 1},
1516 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1517 "PMTX ospi parity error", -1, 1},
1518 {0}
1519 };
1520
1521 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1522 pmtx_intr_info, NULL))
1523 t3_fatal_err(adapter);
1524}
1525
1526#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1527 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1528 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1529 F_IESPI1_TX_FRAMING_ERROR)
1530#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1531 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1532 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1533 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1534
1535/*
1536 * PM RX interrupt handler.
1537 */
1538static void pmrx_intr_handler(struct adapter *adapter)
1539{
1540 static const struct intr_info pmrx_intr_info[] = {
1541 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1542 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1543 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1544 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1545 "PMRX ispi parity error", -1, 1},
1546 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1547 "PMRX ospi parity error", -1, 1},
1548 {0}
1549 };
1550
1551 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1552 pmrx_intr_info, NULL))
1553 t3_fatal_err(adapter);
1554}
1555
1556/*
1557 * CPL switch interrupt handler.
1558 */
1559static void cplsw_intr_handler(struct adapter *adapter)
1560{
1561 static const struct intr_info cplsw_intr_info[] = {
b881955b
DLR
1562 {F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1563 {F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
4d22de3e
DLR
1564 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1565 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1566 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1567 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1568 {0}
1569 };
1570
1571 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1572 cplsw_intr_info, NULL))
1573 t3_fatal_err(adapter);
1574}
1575
1576/*
1577 * MPS interrupt handler.
1578 */
1579static void mps_intr_handler(struct adapter *adapter)
1580{
1581 static const struct intr_info mps_intr_info[] = {
1582 {0x1ff, "MPS parity error", -1, 1},
1583 {0}
1584 };
1585
1586 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1587 mps_intr_info, NULL))
1588 t3_fatal_err(adapter);
1589}
1590
1591#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1592
1593/*
1594 * MC7 interrupt handler.
1595 */
1596static void mc7_intr_handler(struct mc7 *mc7)
1597{
1598 struct adapter *adapter = mc7->adapter;
1599 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1600
1601 if (cause & F_CE) {
1602 mc7->stats.corr_err++;
1603 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1604 "data 0x%x 0x%x 0x%x\n", mc7->name,
1605 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1606 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1607 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1608 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1609 }
1610
1611 if (cause & F_UE) {
1612 mc7->stats.uncorr_err++;
1613 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1614 "data 0x%x 0x%x 0x%x\n", mc7->name,
1615 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1616 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1617 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1618 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1619 }
1620
1621 if (G_PE(cause)) {
1622 mc7->stats.parity_err++;
1623 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1624 mc7->name, G_PE(cause));
1625 }
1626
1627 if (cause & F_AE) {
1628 u32 addr = 0;
1629
1630 if (adapter->params.rev > 0)
1631 addr = t3_read_reg(adapter,
1632 mc7->offset + A_MC7_ERR_ADDR);
1633 mc7->stats.addr_err++;
1634 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1635 mc7->name, addr);
1636 }
1637
1638 if (cause & MC7_INTR_FATAL)
1639 t3_fatal_err(adapter);
1640
1641 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1642}
1643
1644#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1645 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1646/*
1647 * XGMAC interrupt handler.
1648 */
1649static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1650{
1651 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1652 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1653
1654 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1655 mac->stats.tx_fifo_parity_err++;
1656 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1657 }
1658 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1659 mac->stats.rx_fifo_parity_err++;
1660 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1661 }
1662 if (cause & F_TXFIFO_UNDERRUN)
1663 mac->stats.tx_fifo_urun++;
1664 if (cause & F_RXFIFO_OVERFLOW)
1665 mac->stats.rx_fifo_ovfl++;
1666 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1667 mac->stats.serdes_signal_loss++;
1668 if (cause & F_XAUIPCSCTCERR)
1669 mac->stats.xaui_pcs_ctc_err++;
1670 if (cause & F_XAUIPCSALIGNCHANGE)
1671 mac->stats.xaui_pcs_align_change++;
1672
1673 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1674 if (cause & XGM_INTR_FATAL)
1675 t3_fatal_err(adap);
1676 return cause != 0;
1677}
1678
1679/*
1680 * Interrupt handler for PHY events.
1681 */
1682int t3_phy_intr_handler(struct adapter *adapter)
1683{
1ca03cbc 1684 u32 mask, gpi = adapter_info(adapter)->gpio_intr;
4d22de3e
DLR
1685 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1686
1687 for_each_port(adapter, i) {
1ca03cbc
DLR
1688 struct port_info *p = adap2pinfo(adapter, i);
1689
1690 mask = gpi - (gpi & (gpi - 1));
1691 gpi -= mask;
1692
1693 if (!(p->port_type->caps & SUPPORTED_IRQ))
1694 continue;
1695
1696 if (cause & mask) {
1697 int phy_cause = p->phy.ops->intr_handler(&p->phy);
4d22de3e
DLR
1698
1699 if (phy_cause & cphy_cause_link_change)
1700 t3_link_changed(adapter, i);
1701 if (phy_cause & cphy_cause_fifo_error)
1ca03cbc 1702 p->phy.fifo_errors++;
4d22de3e
DLR
1703 }
1704 }
1705
1706 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1707 return 0;
1708}
1709
1710/*
1711 * T3 slow path (non-data) interrupt handler.
1712 */
1713int t3_slow_intr_handler(struct adapter *adapter)
1714{
1715 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1716
1717 cause &= adapter->slow_intr_mask;
1718 if (!cause)
1719 return 0;
1720 if (cause & F_PCIM0) {
1721 if (is_pcie(adapter))
1722 pcie_intr_handler(adapter);
1723 else
1724 pci_intr_handler(adapter);
1725 }
1726 if (cause & F_SGE3)
1727 t3_sge_err_intr_handler(adapter);
1728 if (cause & F_MC7_PMRX)
1729 mc7_intr_handler(&adapter->pmrx);
1730 if (cause & F_MC7_PMTX)
1731 mc7_intr_handler(&adapter->pmtx);
1732 if (cause & F_MC7_CM)
1733 mc7_intr_handler(&adapter->cm);
1734 if (cause & F_CIM)
1735 cim_intr_handler(adapter);
1736 if (cause & F_TP1)
1737 tp_intr_handler(adapter);
1738 if (cause & F_ULP2_RX)
1739 ulprx_intr_handler(adapter);
1740 if (cause & F_ULP2_TX)
1741 ulptx_intr_handler(adapter);
1742 if (cause & F_PM1_RX)
1743 pmrx_intr_handler(adapter);
1744 if (cause & F_PM1_TX)
1745 pmtx_intr_handler(adapter);
1746 if (cause & F_CPL_SWITCH)
1747 cplsw_intr_handler(adapter);
1748 if (cause & F_MPS0)
1749 mps_intr_handler(adapter);
1750 if (cause & F_MC5A)
1751 t3_mc5_intr_handler(&adapter->mc5);
1752 if (cause & F_XGMAC0_0)
1753 mac_intr_handler(adapter, 0);
1754 if (cause & F_XGMAC0_1)
1755 mac_intr_handler(adapter, 1);
1756 if (cause & F_T3DBG)
1757 t3_os_ext_intr_handler(adapter);
1758
1759 /* Clear the interrupts just processed. */
1760 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1761 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1762 return 1;
1763}
1764
1765/**
1766 * t3_intr_enable - enable interrupts
1767 * @adapter: the adapter whose interrupts should be enabled
1768 *
1769 * Enable interrupts by setting the interrupt enable registers of the
1770 * various HW modules and then enabling the top-level interrupt
1771 * concentrator.
1772 */
1773void t3_intr_enable(struct adapter *adapter)
1774{
1775 static const struct addr_val_pair intr_en_avp[] = {
1776 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1777 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1778 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1779 MC7_INTR_MASK},
1780 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1781 MC7_INTR_MASK},
1782 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1783 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
4d22de3e
DLR
1784 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1785 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1786 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1787 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1788 };
1789
1790 adapter->slow_intr_mask = PL_INTR_MASK;
1791
1792 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
a2604be5
DLR
1793 t3_write_reg(adapter, A_TP_INT_ENABLE,
1794 adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
4d22de3e
DLR
1795
1796 if (adapter->params.rev > 0) {
1797 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1798 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1799 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1800 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1801 F_PBL_BOUND_ERR_CH1);
1802 } else {
1803 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1804 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1805 }
1806
1807 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW,
1808 adapter_info(adapter)->gpio_intr);
1809 t3_write_reg(adapter, A_T3DBG_INT_ENABLE,
1810 adapter_info(adapter)->gpio_intr);
1811 if (is_pcie(adapter))
1812 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1813 else
1814 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1815 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1816 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1817}
1818
1819/**
1820 * t3_intr_disable - disable a card's interrupts
1821 * @adapter: the adapter whose interrupts should be disabled
1822 *
1823 * Disable interrupts. We only disable the top-level interrupt
1824 * concentrator and the SGE data interrupts.
1825 */
1826void t3_intr_disable(struct adapter *adapter)
1827{
1828 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1829 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1830 adapter->slow_intr_mask = 0;
1831}
1832
1833/**
1834 * t3_intr_clear - clear all interrupts
1835 * @adapter: the adapter whose interrupts should be cleared
1836 *
1837 * Clears all interrupts.
1838 */
1839void t3_intr_clear(struct adapter *adapter)
1840{
1841 static const unsigned int cause_reg_addr[] = {
1842 A_SG_INT_CAUSE,
1843 A_SG_RSPQ_FL_STATUS,
1844 A_PCIX_INT_CAUSE,
1845 A_MC7_INT_CAUSE,
1846 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1847 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1848 A_CIM_HOST_INT_CAUSE,
1849 A_TP_INT_CAUSE,
1850 A_MC5_DB_INT_CAUSE,
1851 A_ULPRX_INT_CAUSE,
1852 A_ULPTX_INT_CAUSE,
1853 A_CPL_INTR_CAUSE,
1854 A_PM1_TX_INT_CAUSE,
1855 A_PM1_RX_INT_CAUSE,
1856 A_MPS_INT_CAUSE,
1857 A_T3DBG_INT_CAUSE,
1858 };
1859 unsigned int i;
1860
1861 /* Clear PHY and MAC interrupts for each port. */
1862 for_each_port(adapter, i)
1863 t3_port_intr_clear(adapter, i);
1864
1865 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1866 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1867
3eea3337
DLR
1868 if (is_pcie(adapter))
1869 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
4d22de3e
DLR
1870 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1871 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1872}
1873
1874/**
1875 * t3_port_intr_enable - enable port-specific interrupts
1876 * @adapter: associated adapter
1877 * @idx: index of port whose interrupts should be enabled
1878 *
1879 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1880 * adapter port.
1881 */
1882void t3_port_intr_enable(struct adapter *adapter, int idx)
1883{
1884 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1885
1886 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
1887 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1888 phy->ops->intr_enable(phy);
1889}
1890
1891/**
1892 * t3_port_intr_disable - disable port-specific interrupts
1893 * @adapter: associated adapter
1894 * @idx: index of port whose interrupts should be disabled
1895 *
1896 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
1897 * adapter port.
1898 */
1899void t3_port_intr_disable(struct adapter *adapter, int idx)
1900{
1901 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1902
1903 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
1904 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1905 phy->ops->intr_disable(phy);
1906}
1907
1908/**
1909 * t3_port_intr_clear - clear port-specific interrupts
1910 * @adapter: associated adapter
1911 * @idx: index of port whose interrupts to clear
1912 *
1913 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
1914 * adapter port.
1915 */
1916void t3_port_intr_clear(struct adapter *adapter, int idx)
1917{
1918 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1919
1920 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
1921 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
1922 phy->ops->intr_clear(phy);
1923}
1924
bb9366af
DLR
1925#define SG_CONTEXT_CMD_ATTEMPTS 100
1926
4d22de3e
DLR
1927/**
1928 * t3_sge_write_context - write an SGE context
1929 * @adapter: the adapter
1930 * @id: the context id
1931 * @type: the context type
1932 *
1933 * Program an SGE context with the values already loaded in the
1934 * CONTEXT_DATA? registers.
1935 */
1936static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
1937 unsigned int type)
1938{
1939 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
1940 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
1941 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
1942 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
1943 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1944 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
1945 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 1946 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
1947}
1948
b881955b
DLR
1949static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
1950 unsigned int type)
1951{
1952 t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
1953 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
1954 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
1955 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
1956 return t3_sge_write_context(adap, id, type);
1957}
1958
4d22de3e
DLR
1959/**
1960 * t3_sge_init_ecntxt - initialize an SGE egress context
1961 * @adapter: the adapter to configure
1962 * @id: the context id
1963 * @gts_enable: whether to enable GTS for the context
1964 * @type: the egress context type
1965 * @respq: associated response queue
1966 * @base_addr: base address of queue
1967 * @size: number of queue entries
1968 * @token: uP token
1969 * @gen: initial generation value for the context
1970 * @cidx: consumer pointer
1971 *
1972 * Initialize an SGE egress context and make it ready for use. If the
1973 * platform allows concurrent context operations, the caller is
1974 * responsible for appropriate locking.
1975 */
1976int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
1977 enum sge_context_type type, int respq, u64 base_addr,
1978 unsigned int size, unsigned int token, int gen,
1979 unsigned int cidx)
1980{
1981 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
1982
1983 if (base_addr & 0xfff) /* must be 4K aligned */
1984 return -EINVAL;
1985 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1986 return -EBUSY;
1987
1988 base_addr >>= 12;
1989 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
1990 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
1991 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
1992 V_EC_BASE_LO(base_addr & 0xffff));
1993 base_addr >>= 16;
1994 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
1995 base_addr >>= 32;
1996 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1997 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
1998 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
1999 F_EC_VALID);
2000 return t3_sge_write_context(adapter, id, F_EGRESS);
2001}
2002
2003/**
2004 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2005 * @adapter: the adapter to configure
2006 * @id: the context id
2007 * @gts_enable: whether to enable GTS for the context
2008 * @base_addr: base address of queue
2009 * @size: number of queue entries
2010 * @bsize: size of each buffer for this queue
2011 * @cong_thres: threshold to signal congestion to upstream producers
2012 * @gen: initial generation value for the context
2013 * @cidx: consumer pointer
2014 *
2015 * Initialize an SGE free list context and make it ready for use. The
2016 * caller is responsible for ensuring only one context operation occurs
2017 * at a time.
2018 */
2019int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2020 int gts_enable, u64 base_addr, unsigned int size,
2021 unsigned int bsize, unsigned int cong_thres, int gen,
2022 unsigned int cidx)
2023{
2024 if (base_addr & 0xfff) /* must be 4K aligned */
2025 return -EINVAL;
2026 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2027 return -EBUSY;
2028
2029 base_addr >>= 12;
2030 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2031 base_addr >>= 32;
2032 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2033 V_FL_BASE_HI((u32) base_addr) |
2034 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2035 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2036 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2037 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2038 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2039 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2040 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2041 return t3_sge_write_context(adapter, id, F_FREELIST);
2042}
2043
2044/**
2045 * t3_sge_init_rspcntxt - initialize an SGE response queue context
2046 * @adapter: the adapter to configure
2047 * @id: the context id
2048 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2049 * @base_addr: base address of queue
2050 * @size: number of queue entries
2051 * @fl_thres: threshold for selecting the normal or jumbo free list
2052 * @gen: initial generation value for the context
2053 * @cidx: consumer pointer
2054 *
2055 * Initialize an SGE response queue context and make it ready for use.
2056 * The caller is responsible for ensuring only one context operation
2057 * occurs at a time.
2058 */
2059int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2060 int irq_vec_idx, u64 base_addr, unsigned int size,
2061 unsigned int fl_thres, int gen, unsigned int cidx)
2062{
2063 unsigned int intr = 0;
2064
2065 if (base_addr & 0xfff) /* must be 4K aligned */
2066 return -EINVAL;
2067 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2068 return -EBUSY;
2069
2070 base_addr >>= 12;
2071 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2072 V_CQ_INDEX(cidx));
2073 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2074 base_addr >>= 32;
2075 if (irq_vec_idx >= 0)
2076 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2077 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2078 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2079 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2080 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2081}
2082
2083/**
2084 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2085 * @adapter: the adapter to configure
2086 * @id: the context id
2087 * @base_addr: base address of queue
2088 * @size: number of queue entries
2089 * @rspq: response queue for async notifications
2090 * @ovfl_mode: CQ overflow mode
2091 * @credits: completion queue credits
2092 * @credit_thres: the credit threshold
2093 *
2094 * Initialize an SGE completion queue context and make it ready for use.
2095 * The caller is responsible for ensuring only one context operation
2096 * occurs at a time.
2097 */
2098int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2099 unsigned int size, int rspq, int ovfl_mode,
2100 unsigned int credits, unsigned int credit_thres)
2101{
2102 if (base_addr & 0xfff) /* must be 4K aligned */
2103 return -EINVAL;
2104 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2105 return -EBUSY;
2106
2107 base_addr >>= 12;
2108 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2109 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2110 base_addr >>= 32;
2111 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2112 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
1c17ae8a
DLR
2113 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2114 V_CQ_ERR(ovfl_mode));
4d22de3e
DLR
2115 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2116 V_CQ_CREDIT_THRES(credit_thres));
2117 return t3_sge_write_context(adapter, id, F_CQ);
2118}
2119
2120/**
2121 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2122 * @adapter: the adapter
2123 * @id: the egress context id
2124 * @enable: enable (1) or disable (0) the context
2125 *
2126 * Enable or disable an SGE egress context. The caller is responsible for
2127 * ensuring only one context operation occurs at a time.
2128 */
2129int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2130{
2131 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2132 return -EBUSY;
2133
2134 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2135 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2136 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2137 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2138 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2139 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2140 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2141 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2142 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2143}
2144
2145/**
2146 * t3_sge_disable_fl - disable an SGE free-buffer list
2147 * @adapter: the adapter
2148 * @id: the free list context id
2149 *
2150 * Disable an SGE free-buffer list. The caller is responsible for
2151 * ensuring only one context operation occurs at a time.
2152 */
2153int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2154{
2155 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2156 return -EBUSY;
2157
2158 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2159 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2160 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2161 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2162 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2163 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2164 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2165 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2166 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2167}
2168
2169/**
2170 * t3_sge_disable_rspcntxt - disable an SGE response queue
2171 * @adapter: the adapter
2172 * @id: the response queue context id
2173 *
2174 * Disable an SGE response queue. The caller is responsible for
2175 * ensuring only one context operation occurs at a time.
2176 */
2177int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2178{
2179 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2180 return -EBUSY;
2181
2182 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2183 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2184 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2185 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2186 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2187 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2188 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2189 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2190 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2191}
2192
2193/**
2194 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2195 * @adapter: the adapter
2196 * @id: the completion queue context id
2197 *
2198 * Disable an SGE completion queue. The caller is responsible for
2199 * ensuring only one context operation occurs at a time.
2200 */
2201int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2202{
2203 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2204 return -EBUSY;
2205
2206 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2207 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2208 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2209 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2210 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2211 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2212 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2213 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2214 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2215}
2216
2217/**
2218 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2219 * @adapter: the adapter
2220 * @id: the context id
2221 * @op: the operation to perform
2222 *
2223 * Perform the selected operation on an SGE completion queue context.
2224 * The caller is responsible for ensuring only one context operation
2225 * occurs at a time.
2226 */
2227int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2228 unsigned int credits)
2229{
2230 u32 val;
2231
2232 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2233 return -EBUSY;
2234
2235 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2236 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2237 V_CONTEXT(id) | F_CQ);
2238 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2239 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
4d22de3e
DLR
2240 return -EIO;
2241
2242 if (op >= 2 && op < 7) {
2243 if (adapter->params.rev > 0)
2244 return G_CQ_INDEX(val);
2245
2246 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2247 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2248 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
bb9366af
DLR
2249 F_CONTEXT_CMD_BUSY, 0,
2250 SG_CONTEXT_CMD_ATTEMPTS, 1))
4d22de3e
DLR
2251 return -EIO;
2252 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2253 }
2254 return 0;
2255}
2256
2257/**
2258 * t3_sge_read_context - read an SGE context
2259 * @type: the context type
2260 * @adapter: the adapter
2261 * @id: the context id
2262 * @data: holds the retrieved context
2263 *
2264 * Read an SGE egress context. The caller is responsible for ensuring
2265 * only one context operation occurs at a time.
2266 */
2267static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2268 unsigned int id, u32 data[4])
2269{
2270 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2271 return -EBUSY;
2272
2273 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2274 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2275 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
bb9366af 2276 SG_CONTEXT_CMD_ATTEMPTS, 1))
4d22de3e
DLR
2277 return -EIO;
2278 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2279 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2280 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2281 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2282 return 0;
2283}
2284
2285/**
2286 * t3_sge_read_ecntxt - read an SGE egress context
2287 * @adapter: the adapter
2288 * @id: the context id
2289 * @data: holds the retrieved context
2290 *
2291 * Read an SGE egress context. The caller is responsible for ensuring
2292 * only one context operation occurs at a time.
2293 */
2294int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2295{
2296 if (id >= 65536)
2297 return -EINVAL;
2298 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2299}
2300
2301/**
2302 * t3_sge_read_cq - read an SGE CQ context
2303 * @adapter: the adapter
2304 * @id: the context id
2305 * @data: holds the retrieved context
2306 *
2307 * Read an SGE CQ context. The caller is responsible for ensuring
2308 * only one context operation occurs at a time.
2309 */
2310int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2311{
2312 if (id >= 65536)
2313 return -EINVAL;
2314 return t3_sge_read_context(F_CQ, adapter, id, data);
2315}
2316
2317/**
2318 * t3_sge_read_fl - read an SGE free-list context
2319 * @adapter: the adapter
2320 * @id: the context id
2321 * @data: holds the retrieved context
2322 *
2323 * Read an SGE free-list context. The caller is responsible for ensuring
2324 * only one context operation occurs at a time.
2325 */
2326int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2327{
2328 if (id >= SGE_QSETS * 2)
2329 return -EINVAL;
2330 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2331}
2332
2333/**
2334 * t3_sge_read_rspq - read an SGE response queue context
2335 * @adapter: the adapter
2336 * @id: the context id
2337 * @data: holds the retrieved context
2338 *
2339 * Read an SGE response queue context. The caller is responsible for
2340 * ensuring only one context operation occurs at a time.
2341 */
2342int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2343{
2344 if (id >= SGE_QSETS)
2345 return -EINVAL;
2346 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2347}
2348
2349/**
2350 * t3_config_rss - configure Rx packet steering
2351 * @adapter: the adapter
2352 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2353 * @cpus: values for the CPU lookup table (0xff terminated)
2354 * @rspq: values for the response queue lookup table (0xffff terminated)
2355 *
2356 * Programs the receive packet steering logic. @cpus and @rspq provide
2357 * the values for the CPU and response queue lookup tables. If they
2358 * provide fewer values than the size of the tables the supplied values
2359 * are used repeatedly until the tables are fully populated.
2360 */
2361void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2362 const u8 * cpus, const u16 *rspq)
2363{
2364 int i, j, cpu_idx = 0, q_idx = 0;
2365
2366 if (cpus)
2367 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2368 u32 val = i << 16;
2369
2370 for (j = 0; j < 2; ++j) {
2371 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2372 if (cpus[cpu_idx] == 0xff)
2373 cpu_idx = 0;
2374 }
2375 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2376 }
2377
2378 if (rspq)
2379 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2380 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2381 (i << 16) | rspq[q_idx++]);
2382 if (rspq[q_idx] == 0xffff)
2383 q_idx = 0;
2384 }
2385
2386 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2387}
2388
2389/**
2390 * t3_read_rss - read the contents of the RSS tables
2391 * @adapter: the adapter
2392 * @lkup: holds the contents of the RSS lookup table
2393 * @map: holds the contents of the RSS map table
2394 *
2395 * Reads the contents of the receive packet steering tables.
2396 */
2397int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2398{
2399 int i;
2400 u32 val;
2401
2402 if (lkup)
2403 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2404 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2405 0xffff0000 | i);
2406 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2407 if (!(val & 0x80000000))
2408 return -EAGAIN;
2409 *lkup++ = val;
2410 *lkup++ = (val >> 8);
2411 }
2412
2413 if (map)
2414 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2415 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2416 0xffff0000 | i);
2417 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2418 if (!(val & 0x80000000))
2419 return -EAGAIN;
2420 *map++ = val;
2421 }
2422 return 0;
2423}
2424
2425/**
2426 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2427 * @adap: the adapter
2428 * @enable: 1 to select offload mode, 0 for regular NIC
2429 *
2430 * Switches TP to NIC/offload mode.
2431 */
2432void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2433{
2434 if (is_offload(adap) || !enable)
2435 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2436 V_NICMODE(!enable));
2437}
2438
2439/**
2440 * pm_num_pages - calculate the number of pages of the payload memory
2441 * @mem_size: the size of the payload memory
2442 * @pg_size: the size of each payload memory page
2443 *
2444 * Calculate the number of pages, each of the given size, that fit in a
2445 * memory of the specified size, respecting the HW requirement that the
2446 * number of pages must be a multiple of 24.
2447 */
2448static inline unsigned int pm_num_pages(unsigned int mem_size,
2449 unsigned int pg_size)
2450{
2451 unsigned int n = mem_size / pg_size;
2452
2453 return n - n % 24;
2454}
2455
2456#define mem_region(adap, start, size, reg) \
2457 t3_write_reg((adap), A_ ## reg, (start)); \
2458 start += size
2459
b881955b 2460/**
4d22de3e
DLR
2461 * partition_mem - partition memory and configure TP memory settings
2462 * @adap: the adapter
2463 * @p: the TP parameters
2464 *
2465 * Partitions context and payload memory and configures TP's memory
2466 * registers.
2467 */
2468static void partition_mem(struct adapter *adap, const struct tp_params *p)
2469{
2470 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2471 unsigned int timers = 0, timers_shift = 22;
2472
2473 if (adap->params.rev > 0) {
2474 if (tids <= 16 * 1024) {
2475 timers = 1;
2476 timers_shift = 16;
2477 } else if (tids <= 64 * 1024) {
2478 timers = 2;
2479 timers_shift = 18;
2480 } else if (tids <= 256 * 1024) {
2481 timers = 3;
2482 timers_shift = 20;
2483 }
2484 }
2485
2486 t3_write_reg(adap, A_TP_PMM_SIZE,
2487 p->chan_rx_size | (p->chan_tx_size >> 16));
2488
2489 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2490 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2491 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2492 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2493 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2494
2495 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2496 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2497 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2498
2499 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2500 /* Add a bit of headroom and make multiple of 24 */
2501 pstructs += 48;
2502 pstructs -= pstructs % 24;
2503 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2504
2505 m = tids * TCB_SIZE;
2506 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2507 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2508 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2509 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2510 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2511 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2512 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2513 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2514
2515 m = (m + 4095) & ~0xfff;
2516 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2517 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2518
2519 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2520 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2521 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2522 if (tids < m)
2523 adap->params.mc5.nservers += m - tids;
2524}
2525
2526static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2527 u32 val)
2528{
2529 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2530 t3_write_reg(adap, A_TP_PIO_DATA, val);
2531}
2532
2533static void tp_config(struct adapter *adap, const struct tp_params *p)
2534{
4d22de3e
DLR
2535 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2536 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2537 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2538 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2539 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
8a9fab22 2540 V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
4d22de3e
DLR
2541 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2542 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2543 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2544 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
b881955b 2545 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
4d22de3e
DLR
2546 F_IPV6ENABLE | F_NICMODE);
2547 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2548 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
8a9fab22
DLR
2549 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2550 adap->params.rev > 0 ? F_ENABLEESND :
2551 F_T3A_ENABLEESND);
4d22de3e 2552
3b1d307b 2553 t3_set_reg_field(adap, A_TP_PC_CONFIG,
8a9fab22
DLR
2554 F_ENABLEEPCMDAFULL,
2555 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2556 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
b881955b
DLR
2557 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2558 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2559 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
8a9fab22
DLR
2560 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2561 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2eab17ab 2562
4d22de3e
DLR
2563 if (adap->params.rev > 0) {
2564 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2565 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2566 F_TXPACEAUTO);
2567 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2568 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2569 } else
2570 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2571
a2604be5
DLR
2572 if (adap->params.rev == T3_REV_C)
2573 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2574 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2575 V_TABLELATENCYDELTA(4));
2576
8a9fab22
DLR
2577 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2578 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2579 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2580 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
4d22de3e
DLR
2581}
2582
2583/* Desired TP timer resolution in usec */
2584#define TP_TMR_RES 50
2585
2586/* TCP timer values in ms */
2587#define TP_DACK_TIMER 50
2588#define TP_RTO_MIN 250
2589
2590/**
2591 * tp_set_timers - set TP timing parameters
2592 * @adap: the adapter to set
2593 * @core_clk: the core clock frequency in Hz
2594 *
2595 * Set TP's timing parameters, such as the various timer resolutions and
2596 * the TCP timer values.
2597 */
2598static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2599{
2600 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2601 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2602 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2603 unsigned int tps = core_clk >> tre;
2604
2605 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2606 V_DELAYEDACKRESOLUTION(dack_re) |
2607 V_TIMESTAMPRESOLUTION(tstamp_re));
2608 t3_write_reg(adap, A_TP_DACK_TIMER,
2609 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2610 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2611 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2612 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2613 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2614 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2615 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2616 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2617 V_KEEPALIVEMAX(9));
2618
2619#define SECONDS * tps
2620
2621 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2622 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2623 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2624 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2625 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2626 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2627 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2628 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2629 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2630
2631#undef SECONDS
2632}
2633
2634/**
2635 * t3_tp_set_coalescing_size - set receive coalescing size
2636 * @adap: the adapter
2637 * @size: the receive coalescing size
2638 * @psh: whether a set PSH bit should deliver coalesced data
2639 *
2640 * Set the receive coalescing size and PSH bit handling.
2641 */
2642int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2643{
2644 u32 val;
2645
2646 if (size > MAX_RX_COALESCING_LEN)
2647 return -EINVAL;
2648
2649 val = t3_read_reg(adap, A_TP_PARA_REG3);
2650 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2651
2652 if (size) {
2653 val |= F_RXCOALESCEENABLE;
2654 if (psh)
2655 val |= F_RXCOALESCEPSHEN;
8a9fab22 2656 size = min(MAX_RX_COALESCING_LEN, size);
4d22de3e
DLR
2657 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2658 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2659 }
2660 t3_write_reg(adap, A_TP_PARA_REG3, val);
2661 return 0;
2662}
2663
2664/**
2665 * t3_tp_set_max_rxsize - set the max receive size
2666 * @adap: the adapter
2667 * @size: the max receive size
2668 *
2669 * Set TP's max receive size. This is the limit that applies when
2670 * receive coalescing is disabled.
2671 */
2672void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2673{
2674 t3_write_reg(adap, A_TP_PARA_REG7,
2675 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2676}
2677
7b9b0943 2678static void init_mtus(unsigned short mtus[])
4d22de3e
DLR
2679{
2680 /*
2681 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2682 * it can accomodate max size TCP/IP headers when SACK and timestamps
2683 * are enabled and still have at least 8 bytes of payload.
2684 */
75758e8a 2685 mtus[0] = 88;
8a9fab22
DLR
2686 mtus[1] = 88;
2687 mtus[2] = 256;
2688 mtus[3] = 512;
2689 mtus[4] = 576;
4d22de3e
DLR
2690 mtus[5] = 1024;
2691 mtus[6] = 1280;
2692 mtus[7] = 1492;
2693 mtus[8] = 1500;
2694 mtus[9] = 2002;
2695 mtus[10] = 2048;
2696 mtus[11] = 4096;
2697 mtus[12] = 4352;
2698 mtus[13] = 8192;
2699 mtus[14] = 9000;
2700 mtus[15] = 9600;
2701}
2702
2703/*
2704 * Initial congestion control parameters.
2705 */
7b9b0943 2706static void init_cong_ctrl(unsigned short *a, unsigned short *b)
4d22de3e
DLR
2707{
2708 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2709 a[9] = 2;
2710 a[10] = 3;
2711 a[11] = 4;
2712 a[12] = 5;
2713 a[13] = 6;
2714 a[14] = 7;
2715 a[15] = 8;
2716 a[16] = 9;
2717 a[17] = 10;
2718 a[18] = 14;
2719 a[19] = 17;
2720 a[20] = 21;
2721 a[21] = 25;
2722 a[22] = 30;
2723 a[23] = 35;
2724 a[24] = 45;
2725 a[25] = 60;
2726 a[26] = 80;
2727 a[27] = 100;
2728 a[28] = 200;
2729 a[29] = 300;
2730 a[30] = 400;
2731 a[31] = 500;
2732
2733 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2734 b[9] = b[10] = 1;
2735 b[11] = b[12] = 2;
2736 b[13] = b[14] = b[15] = b[16] = 3;
2737 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2738 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2739 b[28] = b[29] = 6;
2740 b[30] = b[31] = 7;
2741}
2742
2743/* The minimum additive increment value for the congestion control table */
2744#define CC_MIN_INCR 2U
2745
2746/**
2747 * t3_load_mtus - write the MTU and congestion control HW tables
2748 * @adap: the adapter
2749 * @mtus: the unrestricted values for the MTU table
2750 * @alphs: the values for the congestion control alpha parameter
2751 * @beta: the values for the congestion control beta parameter
2752 * @mtu_cap: the maximum permitted effective MTU
2753 *
2754 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2755 * Update the high-speed congestion control table with the supplied alpha,
2756 * beta, and MTUs.
2757 */
2758void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2759 unsigned short alpha[NCCTRL_WIN],
2760 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2761{
2762 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2763 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2764 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2765 28672, 40960, 57344, 81920, 114688, 163840, 229376
2766 };
2767
2768 unsigned int i, w;
2769
2770 for (i = 0; i < NMTUS; ++i) {
2771 unsigned int mtu = min(mtus[i], mtu_cap);
2772 unsigned int log2 = fls(mtu);
2773
2774 if (!(mtu & ((1 << log2) >> 2))) /* round */
2775 log2--;
2776 t3_write_reg(adap, A_TP_MTU_TABLE,
2777 (i << 24) | (log2 << 16) | mtu);
2778
2779 for (w = 0; w < NCCTRL_WIN; ++w) {
2780 unsigned int inc;
2781
2782 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2783 CC_MIN_INCR);
2784
2785 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2786 (w << 16) | (beta[w] << 13) | inc);
2787 }
2788 }
2789}
2790
2791/**
2792 * t3_read_hw_mtus - returns the values in the HW MTU table
2793 * @adap: the adapter
2794 * @mtus: where to store the HW MTU values
2795 *
2796 * Reads the HW MTU table.
2797 */
2798void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
2799{
2800 int i;
2801
2802 for (i = 0; i < NMTUS; ++i) {
2803 unsigned int val;
2804
2805 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2806 val = t3_read_reg(adap, A_TP_MTU_TABLE);
2807 mtus[i] = val & 0x3fff;
2808 }
2809}
2810
2811/**
2812 * t3_get_cong_cntl_tab - reads the congestion control table
2813 * @adap: the adapter
2814 * @incr: where to store the alpha values
2815 *
2816 * Reads the additive increments programmed into the HW congestion
2817 * control table.
2818 */
2819void t3_get_cong_cntl_tab(struct adapter *adap,
2820 unsigned short incr[NMTUS][NCCTRL_WIN])
2821{
2822 unsigned int mtu, w;
2823
2824 for (mtu = 0; mtu < NMTUS; ++mtu)
2825 for (w = 0; w < NCCTRL_WIN; ++w) {
2826 t3_write_reg(adap, A_TP_CCTRL_TABLE,
2827 0xffff0000 | (mtu << 5) | w);
2828 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
2829 0x1fff;
2830 }
2831}
2832
2833/**
2834 * t3_tp_get_mib_stats - read TP's MIB counters
2835 * @adap: the adapter
2836 * @tps: holds the returned counter values
2837 *
2838 * Returns the values of TP's MIB counters.
2839 */
2840void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2841{
2842 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2843 sizeof(*tps) / sizeof(u32), 0);
2844}
2845
2846#define ulp_region(adap, name, start, len) \
2847 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2848 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2849 (start) + (len) - 1); \
2850 start += len
2851
2852#define ulptx_region(adap, name, start, len) \
2853 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2854 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2855 (start) + (len) - 1)
2856
2857static void ulp_config(struct adapter *adap, const struct tp_params *p)
2858{
2859 unsigned int m = p->chan_rx_size;
2860
2861 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2862 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2863 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2864 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2865 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2866 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2867 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2868 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2869}
2870
480fe1a3
DLR
2871/**
2872 * t3_set_proto_sram - set the contents of the protocol sram
2873 * @adapter: the adapter
2874 * @data: the protocol image
2875 *
2876 * Write the contents of the protocol SRAM.
2877 */
2878int t3_set_proto_sram(struct adapter *adap, u8 *data)
2879{
2880 int i;
05e5c116 2881 __be32 *buf = (__be32 *)data;
480fe1a3
DLR
2882
2883 for (i = 0; i < PROTO_SRAM_LINES; i++) {
05e5c116
AV
2884 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
2885 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
2886 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
2887 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
2888 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
2eab17ab 2889
480fe1a3
DLR
2890 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2891 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2892 return -EIO;
2893 }
2894 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
2895
2896 return 0;
2897}
2898
4d22de3e
DLR
2899void t3_config_trace_filter(struct adapter *adapter,
2900 const struct trace_params *tp, int filter_index,
2901 int invert, int enable)
2902{
2903 u32 addr, key[4], mask[4];
2904
2905 key[0] = tp->sport | (tp->sip << 16);
2906 key[1] = (tp->sip >> 16) | (tp->dport << 16);
2907 key[2] = tp->dip;
2908 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2909
2910 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2911 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2912 mask[2] = tp->dip_mask;
2913 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2914
2915 if (invert)
2916 key[3] |= (1 << 29);
2917 if (enable)
2918 key[3] |= (1 << 28);
2919
2920 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2921 tp_wr_indirect(adapter, addr++, key[0]);
2922 tp_wr_indirect(adapter, addr++, mask[0]);
2923 tp_wr_indirect(adapter, addr++, key[1]);
2924 tp_wr_indirect(adapter, addr++, mask[1]);
2925 tp_wr_indirect(adapter, addr++, key[2]);
2926 tp_wr_indirect(adapter, addr++, mask[2]);
2927 tp_wr_indirect(adapter, addr++, key[3]);
2928 tp_wr_indirect(adapter, addr, mask[3]);
2929 t3_read_reg(adapter, A_TP_PIO_DATA);
2930}
2931
2932/**
2933 * t3_config_sched - configure a HW traffic scheduler
2934 * @adap: the adapter
2935 * @kbps: target rate in Kbps
2936 * @sched: the scheduler index
2937 *
2938 * Configure a HW scheduler for the target rate
2939 */
2940int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
2941{
2942 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2943 unsigned int clk = adap->params.vpd.cclk * 1000;
2944 unsigned int selected_cpt = 0, selected_bpt = 0;
2945
2946 if (kbps > 0) {
2947 kbps *= 125; /* -> bytes */
2948 for (cpt = 1; cpt <= 255; cpt++) {
2949 tps = clk / cpt;
2950 bpt = (kbps + tps / 2) / tps;
2951 if (bpt > 0 && bpt <= 255) {
2952 v = bpt * tps;
2953 delta = v >= kbps ? v - kbps : kbps - v;
2954 if (delta <= mindelta) {
2955 mindelta = delta;
2956 selected_cpt = cpt;
2957 selected_bpt = bpt;
2958 }
2959 } else if (selected_cpt)
2960 break;
2961 }
2962 if (!selected_cpt)
2963 return -EINVAL;
2964 }
2965 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
2966 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
2967 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
2968 if (sched & 1)
2969 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
2970 else
2971 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
2972 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
2973 return 0;
2974}
2975
2976static int tp_init(struct adapter *adap, const struct tp_params *p)
2977{
2978 int busy = 0;
2979
2980 tp_config(adap, p);
2981 t3_set_vlan_accel(adap, 3, 0);
2982
2983 if (is_offload(adap)) {
2984 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
2985 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
2986 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
2987 0, 1000, 5);
2988 if (busy)
2989 CH_ERR(adap, "TP initialization timed out\n");
2990 }
2991
2992 if (!busy)
2993 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
2994 return busy;
2995}
2996
2997int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
2998{
2999 if (port_mask & ~((1 << adap->params.nports) - 1))
3000 return -EINVAL;
3001 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3002 port_mask << S_PORT0ACTIVE);
3003 return 0;
3004}
3005
3006/*
3007 * Perform the bits of HW initialization that are dependent on the number
3008 * of available ports.
3009 */
3010static void init_hw_for_avail_ports(struct adapter *adap, int nports)
3011{
3012 int i;
3013
3014 if (nports == 1) {
3015 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3016 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3017 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
3018 F_PORT0ACTIVE | F_ENFORCEPKT);
8a9fab22 3019 t3_write_reg(adap, A_PM1_TX_CFG, 0xffffffff);
4d22de3e
DLR
3020 } else {
3021 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3022 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3023 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3024 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3025 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3026 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3027 F_ENFORCEPKT);
3028 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3029 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3030 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3031 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3032 for (i = 0; i < 16; i++)
3033 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3034 (i << 16) | 0x1010);
3035 }
3036}
3037
3038static int calibrate_xgm(struct adapter *adapter)
3039{
3040 if (uses_xaui(adapter)) {
3041 unsigned int v, i;
3042
3043 for (i = 0; i < 5; ++i) {
3044 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3045 t3_read_reg(adapter, A_XGM_XAUI_IMP);
3046 msleep(1);
3047 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3048 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3049 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3050 V_XAUIIMP(G_CALIMP(v) >> 2));
3051 return 0;
3052 }
3053 }
3054 CH_ERR(adapter, "MAC calibration failed\n");
3055 return -1;
3056 } else {
3057 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3058 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3059 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3060 F_XGM_IMPSETUPDATE);
3061 }
3062 return 0;
3063}
3064
3065static void calibrate_xgm_t3b(struct adapter *adapter)
3066{
3067 if (!uses_xaui(adapter)) {
3068 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3069 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3070 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3071 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3072 F_XGM_IMPSETUPDATE);
3073 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3074 0);
3075 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3076 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3077 }
3078}
3079
3080struct mc7_timing_params {
3081 unsigned char ActToPreDly;
3082 unsigned char ActToRdWrDly;
3083 unsigned char PreCyc;
3084 unsigned char RefCyc[5];
3085 unsigned char BkCyc;
3086 unsigned char WrToRdDly;
3087 unsigned char RdToWrDly;
3088};
3089
3090/*
3091 * Write a value to a register and check that the write completed. These
3092 * writes normally complete in a cycle or two, so one read should suffice.
3093 * The very first read exists to flush the posted write to the device.
3094 */
3095static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3096{
3097 t3_write_reg(adapter, addr, val);
3098 t3_read_reg(adapter, addr); /* flush */
3099 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3100 return 0;
3101 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3102 return -EIO;
3103}
3104
3105static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3106{
3107 static const unsigned int mc7_mode[] = {
3108 0x632, 0x642, 0x652, 0x432, 0x442
3109 };
3110 static const struct mc7_timing_params mc7_timings[] = {
3111 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3112 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3113 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3114 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3115 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3116 };
3117
3118 u32 val;
3119 unsigned int width, density, slow, attempts;
3120 struct adapter *adapter = mc7->adapter;
3121 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3122
8ac3ba68
DLR
3123 if (!mc7->size)
3124 return 0;
3125
4d22de3e
DLR
3126 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3127 slow = val & F_SLOW;
3128 width = G_WIDTH(val);
3129 density = G_DEN(val);
3130
3131 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3132 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3133 msleep(1);
3134
3135 if (!slow) {
3136 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3137 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3138 msleep(1);
3139 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3140 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3141 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3142 mc7->name);
3143 goto out_fail;
3144 }
3145 }
3146
3147 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3148 V_ACTTOPREDLY(p->ActToPreDly) |
3149 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3150 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3151 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3152
3153 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3154 val | F_CLKEN | F_TERM150);
3155 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3156
3157 if (!slow)
3158 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3159 F_DLLENB);
3160 udelay(1);
3161
3162 val = slow ? 3 : 6;
3163 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3164 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3165 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3166 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3167 goto out_fail;
3168
3169 if (!slow) {
3170 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3171 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3172 udelay(5);
3173 }
3174
3175 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3176 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3177 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3178 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3179 mc7_mode[mem_type]) ||
3180 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3181 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3182 goto out_fail;
3183
3184 /* clock value is in KHz */
3185 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3186 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3187
3188 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3189 F_PERREFEN | V_PREREFDIV(mc7_clock));
3190 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3191
3192 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3193 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3194 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3195 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3196 (mc7->size << width) - 1);
3197 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3198 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3199
3200 attempts = 50;
3201 do {
3202 msleep(250);
3203 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3204 } while ((val & F_BUSY) && --attempts);
3205 if (val & F_BUSY) {
3206 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3207 goto out_fail;
3208 }
3209
3210 /* Enable normal memory accesses. */
3211 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3212 return 0;
3213
3214out_fail:
3215 return -1;
3216}
3217
3218static void config_pcie(struct adapter *adap)
3219{
3220 static const u16 ack_lat[4][6] = {
3221 {237, 416, 559, 1071, 2095, 4143},
3222 {128, 217, 289, 545, 1057, 2081},
3223 {73, 118, 154, 282, 538, 1050},
3224 {67, 107, 86, 150, 278, 534}
3225 };
3226 static const u16 rpl_tmr[4][6] = {
3227 {711, 1248, 1677, 3213, 6285, 12429},
3228 {384, 651, 867, 1635, 3171, 6243},
3229 {219, 354, 462, 846, 1614, 3150},
3230 {201, 321, 258, 450, 834, 1602}
3231 };
3232
3233 u16 val;
3234 unsigned int log2_width, pldsize;
3235 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3236
3237 pci_read_config_word(adap->pdev,
3238 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3239 &val);
3240 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3241 pci_read_config_word(adap->pdev,
3242 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3243 &val);
3244
3245 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3246 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3247 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3248 log2_width = fls(adap->params.pci.width) - 1;
3249 acklat = ack_lat[log2_width][pldsize];
3250 if (val & 1) /* check LOsEnable */
3251 acklat += fst_trn_tx * 4;
3252 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3253
3254 if (adap->params.rev == 0)
3255 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3256 V_T3A_ACKLAT(M_T3A_ACKLAT),
3257 V_T3A_ACKLAT(acklat));
3258 else
3259 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3260 V_ACKLAT(acklat));
3261
3262 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3263 V_REPLAYLMT(rpllmt));
3264
3265 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
b881955b 3266 t3_set_reg_field(adap, A_PCIE_CFG, 0,
204e2f98 3267 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
b881955b 3268 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
4d22de3e
DLR
3269}
3270
3271/*
3272 * Initialize and configure T3 HW modules. This performs the
3273 * initialization steps that need to be done once after a card is reset.
3274 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3275 *
3276 * fw_params are passed to FW and their value is platform dependent. Only the
3277 * top 8 bits are available for use, the rest must be 0.
3278 */
3279int t3_init_hw(struct adapter *adapter, u32 fw_params)
3280{
b881955b 3281 int err = -EIO, attempts, i;
4d22de3e
DLR
3282 const struct vpd_params *vpd = &adapter->params.vpd;
3283
3284 if (adapter->params.rev > 0)
3285 calibrate_xgm_t3b(adapter);
3286 else if (calibrate_xgm(adapter))
3287 goto out_err;
3288
3289 if (vpd->mclk) {
3290 partition_mem(adapter, &adapter->params.tp);
3291
3292 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3293 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3294 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3295 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3296 adapter->params.mc5.nfilters,
3297 adapter->params.mc5.nroutes))
3298 goto out_err;
b881955b
DLR
3299
3300 for (i = 0; i < 32; i++)
3301 if (clear_sge_ctxt(adapter, i, F_CQ))
3302 goto out_err;
4d22de3e
DLR
3303 }
3304
3305 if (tp_init(adapter, &adapter->params.tp))
3306 goto out_err;
3307
3308 t3_tp_set_coalescing_size(adapter,
3309 min(adapter->params.sge.max_pkt_size,
3310 MAX_RX_COALESCING_LEN), 1);
3311 t3_tp_set_max_rxsize(adapter,
3312 min(adapter->params.sge.max_pkt_size, 16384U));
3313 ulp_config(adapter, &adapter->params.tp);
3314
3315 if (is_pcie(adapter))
3316 config_pcie(adapter);
3317 else
b881955b
DLR
3318 t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3319 F_DMASTOPEN | F_CLIDECEN);
4d22de3e 3320
a2604be5
DLR
3321 if (adapter->params.rev == T3_REV_C)
3322 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3323 F_CFG_CQE_SOP_MASK);
3324
8a9fab22 3325 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3f61e427
DLR
3326 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3327 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
4d22de3e
DLR
3328 init_hw_for_avail_ports(adapter, adapter->params.nports);
3329 t3_sge_init(adapter, &adapter->params.sge);
3330
3331 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3332 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3333 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3334 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3335
b881955b 3336 attempts = 100;
4d22de3e
DLR
3337 do { /* wait for uP to initialize */
3338 msleep(20);
3339 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
8ac3ba68
DLR
3340 if (!attempts) {
3341 CH_ERR(adapter, "uP initialization timed out\n");
4d22de3e 3342 goto out_err;
8ac3ba68 3343 }
4d22de3e
DLR
3344
3345 err = 0;
3346out_err:
3347 return err;
3348}
3349
3350/**
3351 * get_pci_mode - determine a card's PCI mode
3352 * @adapter: the adapter
3353 * @p: where to store the PCI settings
3354 *
3355 * Determines a card's PCI mode and associated parameters, such as speed
3356 * and width.
3357 */
7b9b0943 3358static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
4d22de3e
DLR
3359{
3360 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3361 u32 pci_mode, pcie_cap;
3362
3363 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3364 if (pcie_cap) {
3365 u16 val;
3366
3367 p->variant = PCI_VARIANT_PCIE;
3368 p->pcie_cap_addr = pcie_cap;
3369 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3370 &val);
3371 p->width = (val >> 4) & 0x3f;
3372 return;
3373 }
3374
3375 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3376 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3377 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3378 pci_mode = G_PCIXINITPAT(pci_mode);
3379 if (pci_mode == 0)
3380 p->variant = PCI_VARIANT_PCI;
3381 else if (pci_mode < 4)
3382 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3383 else if (pci_mode < 8)
3384 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3385 else
3386 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3387}
3388
3389/**
3390 * init_link_config - initialize a link's SW state
3391 * @lc: structure holding the link state
3392 * @ai: information about the current card
3393 *
3394 * Initializes the SW state maintained for each link, including the link's
3395 * capabilities and default speed/duplex/flow-control/autonegotiation
3396 * settings.
3397 */
7b9b0943 3398static void init_link_config(struct link_config *lc, unsigned int caps)
4d22de3e
DLR
3399{
3400 lc->supported = caps;
3401 lc->requested_speed = lc->speed = SPEED_INVALID;
3402 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3403 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3404 if (lc->supported & SUPPORTED_Autoneg) {
3405 lc->advertising = lc->supported;
3406 lc->autoneg = AUTONEG_ENABLE;
3407 lc->requested_fc |= PAUSE_AUTONEG;
3408 } else {
3409 lc->advertising = 0;
3410 lc->autoneg = AUTONEG_DISABLE;
3411 }
3412}
3413
3414/**
3415 * mc7_calc_size - calculate MC7 memory size
3416 * @cfg: the MC7 configuration
3417 *
3418 * Calculates the size of an MC7 memory in bytes from the value of its
3419 * configuration register.
3420 */
7b9b0943 3421static unsigned int mc7_calc_size(u32 cfg)
4d22de3e
DLR
3422{
3423 unsigned int width = G_WIDTH(cfg);
3424 unsigned int banks = !!(cfg & F_BKS) + 1;
3425 unsigned int org = !!(cfg & F_ORG) + 1;
3426 unsigned int density = G_DEN(cfg);
3427 unsigned int MBs = ((256 << density) * banks) / (org << width);
3428
3429 return MBs << 20;
3430}
3431
7b9b0943
RD
3432static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3433 unsigned int base_addr, const char *name)
4d22de3e
DLR
3434{
3435 u32 cfg;
3436
3437 mc7->adapter = adapter;
3438 mc7->name = name;
3439 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3440 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
8ac3ba68 3441 mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
4d22de3e
DLR
3442 mc7->width = G_WIDTH(cfg);
3443}
3444
3445void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3446{
3447 mac->adapter = adapter;
3448 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3449 mac->nucast = 1;
3450
3451 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3452 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3453 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3454 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3455 F_ENRGMII, 0);
3456 }
3457}
3458
3459void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3460{
3461 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3462
3463 mi1_init(adapter, ai);
3464 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3465 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3466 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3467 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
8ac3ba68 3468 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
b881955b 3469 t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
4d22de3e
DLR
3470
3471 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3472 val |= F_ENRGMII;
3473
3474 /* Enable MAC clocks so we can access the registers */
3475 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3476 t3_read_reg(adapter, A_XGM_PORT_CFG);
3477
3478 val |= F_CLKDIVRESET_;
3479 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3480 t3_read_reg(adapter, A_XGM_PORT_CFG);
3481 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3482 t3_read_reg(adapter, A_XGM_PORT_CFG);
3483}
3484
3485/*
2eab17ab 3486 * Reset the adapter.
e4d08359 3487 * Older PCIe cards lose their config space during reset, PCI-X
4d22de3e
DLR
3488 * ones don't.
3489 */
9265fabf 3490static int t3_reset_adapter(struct adapter *adapter)
4d22de3e 3491{
2eab17ab 3492 int i, save_and_restore_pcie =
e4d08359 3493 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
4d22de3e
DLR
3494 uint16_t devid = 0;
3495
e4d08359 3496 if (save_and_restore_pcie)
4d22de3e
DLR
3497 pci_save_state(adapter->pdev);
3498 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3499
3500 /*
3501 * Delay. Give Some time to device to reset fully.
3502 * XXX The delay time should be modified.
3503 */
3504 for (i = 0; i < 10; i++) {
3505 msleep(50);
3506 pci_read_config_word(adapter->pdev, 0x00, &devid);
3507 if (devid == 0x1425)
3508 break;
3509 }
3510
3511 if (devid != 0x1425)
3512 return -1;
3513
e4d08359 3514 if (save_and_restore_pcie)
4d22de3e
DLR
3515 pci_restore_state(adapter->pdev);
3516 return 0;
3517}
3518
7b9b0943 3519static int init_parity(struct adapter *adap)
b881955b
DLR
3520{
3521 int i, err, addr;
3522
3523 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3524 return -EBUSY;
3525
3526 for (err = i = 0; !err && i < 16; i++)
3527 err = clear_sge_ctxt(adap, i, F_EGRESS);
3528 for (i = 0xfff0; !err && i <= 0xffff; i++)
3529 err = clear_sge_ctxt(adap, i, F_EGRESS);
3530 for (i = 0; !err && i < SGE_QSETS; i++)
3531 err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3532 if (err)
3533 return err;
3534
3535 t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3536 for (i = 0; i < 4; i++)
3537 for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3538 t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3539 F_IBQDBGWR | V_IBQDBGQID(i) |
3540 V_IBQDBGADDR(addr));
3541 err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3542 F_IBQDBGBUSY, 0, 2, 1);
3543 if (err)
3544 return err;
3545 }
3546 return 0;
3547}
3548
4d22de3e
DLR
3549/*
3550 * Initialize adapter SW state for the various HW modules, set initial values
3551 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3552 * interface.
3553 */
7b9b0943
RD
3554int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3555 int reset)
4d22de3e
DLR
3556{
3557 int ret;
3558 unsigned int i, j = 0;
3559
3560 get_pci_mode(adapter, &adapter->params.pci);
3561
3562 adapter->params.info = ai;
3563 adapter->params.nports = ai->nports;
3564 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3565 adapter->params.linkpoll_period = 0;
3566 adapter->params.stats_update_period = is_10G(adapter) ?
3567 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3568 adapter->params.pci.vpd_cap_addr =
3569 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3570 ret = get_vpd_params(adapter, &adapter->params.vpd);
3571 if (ret < 0)
3572 return ret;
3573
3574 if (reset && t3_reset_adapter(adapter))
3575 return -1;
3576
3577 t3_sge_prep(adapter, &adapter->params.sge);
3578
3579 if (adapter->params.vpd.mclk) {
3580 struct tp_params *p = &adapter->params.tp;
3581
3582 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3583 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3584 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3585
3586 p->nchan = ai->nports;
3587 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3588 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3589 p->cm_size = t3_mc7_size(&adapter->cm);
3590 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3591 p->chan_tx_size = p->pmtx_size / p->nchan;
3592 p->rx_pg_size = 64 * 1024;
3593 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3594 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3595 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3596 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3597 adapter->params.rev > 0 ? 12 : 6;
8ac3ba68
DLR
3598 }
3599
3600 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3601 t3_mc7_size(&adapter->pmtx) &&
3602 t3_mc7_size(&adapter->cm);
4d22de3e 3603
8ac3ba68 3604 if (is_offload(adapter)) {
4d22de3e
DLR
3605 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3606 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3607 DEFAULT_NFILTERS : 0;
3608 adapter->params.mc5.nroutes = 0;
3609 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3610
3611 init_mtus(adapter->params.mtus);
3612 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3613 }
3614
3615 early_hw_init(adapter, ai);
b881955b
DLR
3616 ret = init_parity(adapter);
3617 if (ret)
3618 return ret;
4d22de3e
DLR
3619
3620 for_each_port(adapter, i) {
3621 u8 hw_addr[6];
3622 struct port_info *p = adap2pinfo(adapter, i);
3623
3624 while (!adapter->params.vpd.port_type[j])
3625 ++j;
3626
3627 p->port_type = &port_types[adapter->params.vpd.port_type[j]];
3628 p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3629 ai->mdio_ops);
3630 mac_prep(&p->mac, adapter, j);
3631 ++j;
3632
3633 /*
3634 * The VPD EEPROM stores the base Ethernet address for the
3635 * card. A port's address is derived from the base by adding
3636 * the port's index to the base's low octet.
3637 */
3638 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3639 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3640
3641 memcpy(adapter->port[i]->dev_addr, hw_addr,
3642 ETH_ALEN);
3643 memcpy(adapter->port[i]->perm_addr, hw_addr,
3644 ETH_ALEN);
3645 init_link_config(&p->link_config, p->port_type->caps);
3646 p->phy.ops->power_down(&p->phy, 1);
3647 if (!(p->port_type->caps & SUPPORTED_IRQ))
3648 adapter->params.linkpoll_period = 10;
3649 }
3650
3651 return 0;
3652}
3653
3654void t3_led_ready(struct adapter *adapter)
3655{
3656 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3657 F_GPIO0_OUT_VAL);
3658}
204e2f98
DLR
3659
3660int t3_replay_prep_adapter(struct adapter *adapter)
3661{
3662 const struct adapter_info *ai = adapter->params.info;
3663 unsigned int i, j = 0;
3664 int ret;
3665
3666 early_hw_init(adapter, ai);
3667 ret = init_parity(adapter);
3668 if (ret)
3669 return ret;
3670
3671 for_each_port(adapter, i) {
3672 struct port_info *p = adap2pinfo(adapter, i);
3673 while (!adapter->params.vpd.port_type[j])
3674 ++j;
3675
3676 p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3677 ai->mdio_ops);
3678
3679 p->phy.ops->power_down(&p->phy, 1);
3680 ++j;
3681 }
3682
3683return 0;
3684}
3685
This page took 0.402558 seconds and 5 git commands to generate.