[NET]: Make NAPI polling independent of struct net_device objects.
[deliverable/linux.git] / drivers / net / cxgb3 / t3_hw.c
CommitLineData
4d22de3e 1/*
1d68e93d 2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4d22de3e 3 *
1d68e93d
DLR
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
4d22de3e 9 *
1d68e93d
DLR
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
4d22de3e 31 */
4d22de3e
DLR
32#include "common.h"
33#include "regs.h"
34#include "sge_defs.h"
35#include "firmware_exports.h"
36
f2c6879e
DLR
37/**
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
46 *
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
51 */
4d22de3e
DLR
52
53int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
55{
56 while (1) {
57 u32 val = t3_read_reg(adapter, reg);
58
59 if (!!(val & mask) == polarity) {
60 if (valp)
61 *valp = val;
62 return 0;
63 }
64 if (--attempts == 0)
65 return -EAGAIN;
66 if (delay)
67 udelay(delay);
68 }
69}
70
71/**
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
77 *
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
81 */
82void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
84{
85 while (n--) {
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
87 p++;
88 }
89}
90
91/**
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
97 *
98 * Sets a register field specified by the supplied mask to the
99 * given value.
100 */
101void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
102 u32 val)
103{
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
105
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
108}
109
110/**
111 * t3_read_indirect - read indirectly addressed registers
112 * @adap: the adapter
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
118 *
119 * Reads registers that are accessed indirectly through an address/data
120 * register pair.
121 */
122void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals, unsigned int nregs,
124 unsigned int start_idx)
125{
126 while (nregs--) {
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
129 start_idx++;
130 }
131}
132
133/**
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
139 *
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
141 * accesses.
142 */
143int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
144 u64 *buf)
145{
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
148
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
151
152 if (start >= size64 || start + n > size64)
153 return -EINVAL;
154
155 start *= (8 << mc7->width);
156 while (n--) {
157 int i;
158 u64 val64 = 0;
159
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
161 int attempts = 10;
162 u32 val;
163
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
170 if (val & F_BUSY)
171 return -EIO;
172
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
176 mc7->offset +
177 A_MC7_BD_DATA0);
178 val64 |= (u64) val << 32;
179 } else {
180 if (mc7->width > 1)
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
183 }
184 start += 8;
185 }
186 *buf++ = val64;
187 }
188 return 0;
189}
190
191/*
192 * Initialize MI1.
193 */
194static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
195{
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
197 u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
198 V_CLKDIV(clkdiv);
199
200 if (!(ai->caps & SUPPORTED_10000baseT_Full))
201 val |= V_ST(1);
202 t3_write_reg(adap, A_MI1_CFG, val);
203}
204
205#define MDIO_ATTEMPTS 10
206
207/*
208 * MI1 read/write operations for direct-addressed PHYs.
209 */
210static int mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
211 int reg_addr, unsigned int *valp)
212{
213 int ret;
214 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
215
216 if (mmd_addr)
217 return -EINVAL;
218
219 mutex_lock(&adapter->mdio_lock);
220 t3_write_reg(adapter, A_MI1_ADDR, addr);
221 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
222 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
223 if (!ret)
224 *valp = t3_read_reg(adapter, A_MI1_DATA);
225 mutex_unlock(&adapter->mdio_lock);
226 return ret;
227}
228
229static int mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
230 int reg_addr, unsigned int val)
231{
232 int ret;
233 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
234
235 if (mmd_addr)
236 return -EINVAL;
237
238 mutex_lock(&adapter->mdio_lock);
239 t3_write_reg(adapter, A_MI1_ADDR, addr);
240 t3_write_reg(adapter, A_MI1_DATA, val);
241 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
242 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
243 mutex_unlock(&adapter->mdio_lock);
244 return ret;
245}
246
247static const struct mdio_ops mi1_mdio_ops = {
248 mi1_read,
249 mi1_write
250};
251
252/*
253 * MI1 read/write operations for indirect-addressed PHYs.
254 */
255static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
256 int reg_addr, unsigned int *valp)
257{
258 int ret;
259 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
260
261 mutex_lock(&adapter->mdio_lock);
262 t3_write_reg(adapter, A_MI1_ADDR, addr);
263 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
264 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
265 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
266 if (!ret) {
267 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
268 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
269 MDIO_ATTEMPTS, 20);
270 if (!ret)
271 *valp = t3_read_reg(adapter, A_MI1_DATA);
272 }
273 mutex_unlock(&adapter->mdio_lock);
274 return ret;
275}
276
277static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
278 int reg_addr, unsigned int val)
279{
280 int ret;
281 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
282
283 mutex_lock(&adapter->mdio_lock);
284 t3_write_reg(adapter, A_MI1_ADDR, addr);
285 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
286 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
287 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
288 if (!ret) {
289 t3_write_reg(adapter, A_MI1_DATA, val);
290 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
291 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
292 MDIO_ATTEMPTS, 20);
293 }
294 mutex_unlock(&adapter->mdio_lock);
295 return ret;
296}
297
298static const struct mdio_ops mi1_mdio_ext_ops = {
299 mi1_ext_read,
300 mi1_ext_write
301};
302
303/**
304 * t3_mdio_change_bits - modify the value of a PHY register
305 * @phy: the PHY to operate on
306 * @mmd: the device address
307 * @reg: the register address
308 * @clear: what part of the register value to mask off
309 * @set: what part of the register value to set
310 *
311 * Changes the value of a PHY register by applying a mask to its current
312 * value and ORing the result with a new value.
313 */
314int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
315 unsigned int set)
316{
317 int ret;
318 unsigned int val;
319
320 ret = mdio_read(phy, mmd, reg, &val);
321 if (!ret) {
322 val &= ~clear;
323 ret = mdio_write(phy, mmd, reg, val | set);
324 }
325 return ret;
326}
327
328/**
329 * t3_phy_reset - reset a PHY block
330 * @phy: the PHY to operate on
331 * @mmd: the device address of the PHY block to reset
332 * @wait: how long to wait for the reset to complete in 1ms increments
333 *
334 * Resets a PHY block and optionally waits for the reset to complete.
335 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
336 * for 10G PHYs.
337 */
338int t3_phy_reset(struct cphy *phy, int mmd, int wait)
339{
340 int err;
341 unsigned int ctl;
342
343 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
344 if (err || !wait)
345 return err;
346
347 do {
348 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
349 if (err)
350 return err;
351 ctl &= BMCR_RESET;
352 if (ctl)
353 msleep(1);
354 } while (ctl && --wait);
355
356 return ctl ? -1 : 0;
357}
358
359/**
360 * t3_phy_advertise - set the PHY advertisement registers for autoneg
361 * @phy: the PHY to operate on
362 * @advert: bitmap of capabilities the PHY should advertise
363 *
364 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
365 * requested capabilities.
366 */
367int t3_phy_advertise(struct cphy *phy, unsigned int advert)
368{
369 int err;
370 unsigned int val = 0;
371
372 err = mdio_read(phy, 0, MII_CTRL1000, &val);
373 if (err)
374 return err;
375
376 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
377 if (advert & ADVERTISED_1000baseT_Half)
378 val |= ADVERTISE_1000HALF;
379 if (advert & ADVERTISED_1000baseT_Full)
380 val |= ADVERTISE_1000FULL;
381
382 err = mdio_write(phy, 0, MII_CTRL1000, val);
383 if (err)
384 return err;
385
386 val = 1;
387 if (advert & ADVERTISED_10baseT_Half)
388 val |= ADVERTISE_10HALF;
389 if (advert & ADVERTISED_10baseT_Full)
390 val |= ADVERTISE_10FULL;
391 if (advert & ADVERTISED_100baseT_Half)
392 val |= ADVERTISE_100HALF;
393 if (advert & ADVERTISED_100baseT_Full)
394 val |= ADVERTISE_100FULL;
395 if (advert & ADVERTISED_Pause)
396 val |= ADVERTISE_PAUSE_CAP;
397 if (advert & ADVERTISED_Asym_Pause)
398 val |= ADVERTISE_PAUSE_ASYM;
399 return mdio_write(phy, 0, MII_ADVERTISE, val);
400}
401
402/**
403 * t3_set_phy_speed_duplex - force PHY speed and duplex
404 * @phy: the PHY to operate on
405 * @speed: requested PHY speed
406 * @duplex: requested PHY duplex
407 *
408 * Force a 10/100/1000 PHY's speed and duplex. This also disables
409 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
410 */
411int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
412{
413 int err;
414 unsigned int ctl;
415
416 err = mdio_read(phy, 0, MII_BMCR, &ctl);
417 if (err)
418 return err;
419
420 if (speed >= 0) {
421 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
422 if (speed == SPEED_100)
423 ctl |= BMCR_SPEED100;
424 else if (speed == SPEED_1000)
425 ctl |= BMCR_SPEED1000;
426 }
427 if (duplex >= 0) {
428 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
429 if (duplex == DUPLEX_FULL)
430 ctl |= BMCR_FULLDPLX;
431 }
432 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
433 ctl |= BMCR_ANENABLE;
434 return mdio_write(phy, 0, MII_BMCR, ctl);
435}
436
437static const struct adapter_info t3_adap_info[] = {
438 {2, 0, 0, 0,
439 F_GPIO2_OEN | F_GPIO4_OEN |
440 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
8ac3ba68 441 0,
4d22de3e
DLR
442 &mi1_mdio_ops, "Chelsio PE9000"},
443 {2, 0, 0, 0,
444 F_GPIO2_OEN | F_GPIO4_OEN |
445 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
8ac3ba68 446 0,
4d22de3e
DLR
447 &mi1_mdio_ops, "Chelsio T302"},
448 {1, 0, 0, 0,
449 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
450 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
8ac3ba68 451 SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
4d22de3e
DLR
452 &mi1_mdio_ext_ops, "Chelsio T310"},
453 {2, 0, 0, 0,
454 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
455 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
456 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
8ac3ba68 457 SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
4d22de3e
DLR
458 &mi1_mdio_ext_ops, "Chelsio T320"},
459};
460
461/*
462 * Return the adapter_info structure with a given index. Out-of-range indices
463 * return NULL.
464 */
465const struct adapter_info *t3_get_adapter_info(unsigned int id)
466{
467 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
468}
469
470#define CAPS_1G (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | \
471 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII)
472#define CAPS_10G (SUPPORTED_10000baseT_Full | SUPPORTED_AUI)
473
474static const struct port_type_info port_types[] = {
475 {NULL},
476 {t3_ael1002_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
477 "10GBASE-XR"},
478 {t3_vsc8211_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
479 "10/100/1000BASE-T"},
480 {NULL, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
481 "10/100/1000BASE-T"},
482 {t3_xaui_direct_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
483 {NULL, CAPS_10G, "10GBASE-KX4"},
484 {t3_qt2045_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
485 {t3_ael1006_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
486 "10GBASE-SR"},
487 {NULL, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
488};
489
490#undef CAPS_1G
491#undef CAPS_10G
492
493#define VPD_ENTRY(name, len) \
494 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
495
496/*
497 * Partial EEPROM Vital Product Data structure. Includes only the ID and
498 * VPD-R sections.
499 */
500struct t3_vpd {
501 u8 id_tag;
502 u8 id_len[2];
503 u8 id_data[16];
504 u8 vpdr_tag;
505 u8 vpdr_len[2];
506 VPD_ENTRY(pn, 16); /* part number */
507 VPD_ENTRY(ec, 16); /* EC level */
508 VPD_ENTRY(sn, 16); /* serial number */
509 VPD_ENTRY(na, 12); /* MAC address base */
510 VPD_ENTRY(cclk, 6); /* core clock */
511 VPD_ENTRY(mclk, 6); /* mem clock */
512 VPD_ENTRY(uclk, 6); /* uP clk */
513 VPD_ENTRY(mdc, 6); /* MDIO clk */
514 VPD_ENTRY(mt, 2); /* mem timing */
515 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
516 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
517 VPD_ENTRY(port0, 2); /* PHY0 complex */
518 VPD_ENTRY(port1, 2); /* PHY1 complex */
519 VPD_ENTRY(port2, 2); /* PHY2 complex */
520 VPD_ENTRY(port3, 2); /* PHY3 complex */
521 VPD_ENTRY(rv, 1); /* csum */
522 u32 pad; /* for multiple-of-4 sizing and alignment */
523};
524
525#define EEPROM_MAX_POLL 4
526#define EEPROM_STAT_ADDR 0x4000
527#define VPD_BASE 0xc00
528
529/**
530 * t3_seeprom_read - read a VPD EEPROM location
531 * @adapter: adapter to read
532 * @addr: EEPROM address
533 * @data: where to store the read data
534 *
535 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
536 * VPD ROM capability. A zero is written to the flag bit when the
537 * addres is written to the control register. The hardware device will
538 * set the flag to 1 when 4 bytes have been read into the data register.
539 */
540int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
541{
542 u16 val;
543 int attempts = EEPROM_MAX_POLL;
544 unsigned int base = adapter->params.pci.vpd_cap_addr;
545
546 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
547 return -EINVAL;
548
549 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
550 do {
551 udelay(10);
552 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
553 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
554
555 if (!(val & PCI_VPD_ADDR_F)) {
556 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
557 return -EIO;
558 }
559 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, data);
560 *data = le32_to_cpu(*data);
561 return 0;
562}
563
564/**
565 * t3_seeprom_write - write a VPD EEPROM location
566 * @adapter: adapter to write
567 * @addr: EEPROM address
568 * @data: value to write
569 *
570 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
571 * VPD ROM capability.
572 */
573int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
574{
575 u16 val;
576 int attempts = EEPROM_MAX_POLL;
577 unsigned int base = adapter->params.pci.vpd_cap_addr;
578
579 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
580 return -EINVAL;
581
582 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
583 cpu_to_le32(data));
584 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
585 addr | PCI_VPD_ADDR_F);
586 do {
587 msleep(1);
588 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
589 } while ((val & PCI_VPD_ADDR_F) && --attempts);
590
591 if (val & PCI_VPD_ADDR_F) {
592 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
593 return -EIO;
594 }
595 return 0;
596}
597
598/**
599 * t3_seeprom_wp - enable/disable EEPROM write protection
600 * @adapter: the adapter
601 * @enable: 1 to enable write protection, 0 to disable it
602 *
603 * Enables or disables write protection on the serial EEPROM.
604 */
605int t3_seeprom_wp(struct adapter *adapter, int enable)
606{
607 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
608}
609
610/*
611 * Convert a character holding a hex digit to a number.
612 */
613static unsigned int hex2int(unsigned char c)
614{
615 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
616}
617
618/**
619 * get_vpd_params - read VPD parameters from VPD EEPROM
620 * @adapter: adapter to read
621 * @p: where to store the parameters
622 *
623 * Reads card parameters stored in VPD EEPROM.
624 */
625static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
626{
627 int i, addr, ret;
628 struct t3_vpd vpd;
629
630 /*
631 * Card information is normally at VPD_BASE but some early cards had
632 * it at 0.
633 */
634 ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
635 if (ret)
636 return ret;
637 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
638
639 for (i = 0; i < sizeof(vpd); i += 4) {
640 ret = t3_seeprom_read(adapter, addr + i,
641 (u32 *)((u8 *)&vpd + i));
642 if (ret)
643 return ret;
644 }
645
646 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
647 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
648 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
649 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
650 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
651
652 /* Old eeproms didn't have port information */
653 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
654 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
655 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
656 } else {
657 p->port_type[0] = hex2int(vpd.port0_data[0]);
658 p->port_type[1] = hex2int(vpd.port1_data[0]);
659 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
660 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
661 }
662
663 for (i = 0; i < 6; i++)
664 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
665 hex2int(vpd.na_data[2 * i + 1]);
666 return 0;
667}
668
669/* serial flash and firmware constants */
670enum {
671 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
672 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
673 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
674
675 /* flash command opcodes */
676 SF_PROG_PAGE = 2, /* program page */
677 SF_WR_DISABLE = 4, /* disable writes */
678 SF_RD_STATUS = 5, /* read status register */
679 SF_WR_ENABLE = 6, /* enable writes */
680 SF_RD_DATA_FAST = 0xb, /* read flash */
681 SF_ERASE_SECTOR = 0xd8, /* erase sector */
682
683 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
2e283962
DLR
684 FW_VERS_ADDR = 0x77ffc, /* flash address holding FW version */
685 FW_MIN_SIZE = 8 /* at least version and csum */
4d22de3e
DLR
686};
687
688/**
689 * sf1_read - read data from the serial flash
690 * @adapter: the adapter
691 * @byte_cnt: number of bytes to read
692 * @cont: whether another operation will be chained
693 * @valp: where to store the read data
694 *
695 * Reads up to 4 bytes of data from the serial flash. The location of
696 * the read needs to be specified prior to calling this by issuing the
697 * appropriate commands to the serial flash.
698 */
699static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
700 u32 *valp)
701{
702 int ret;
703
704 if (!byte_cnt || byte_cnt > 4)
705 return -EINVAL;
706 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
707 return -EBUSY;
708 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
709 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
710 if (!ret)
711 *valp = t3_read_reg(adapter, A_SF_DATA);
712 return ret;
713}
714
715/**
716 * sf1_write - write data to the serial flash
717 * @adapter: the adapter
718 * @byte_cnt: number of bytes to write
719 * @cont: whether another operation will be chained
720 * @val: value to write
721 *
722 * Writes up to 4 bytes of data to the serial flash. The location of
723 * the write needs to be specified prior to calling this by issuing the
724 * appropriate commands to the serial flash.
725 */
726static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
727 u32 val)
728{
729 if (!byte_cnt || byte_cnt > 4)
730 return -EINVAL;
731 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
732 return -EBUSY;
733 t3_write_reg(adapter, A_SF_DATA, val);
734 t3_write_reg(adapter, A_SF_OP,
735 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
736 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
737}
738
739/**
740 * flash_wait_op - wait for a flash operation to complete
741 * @adapter: the adapter
742 * @attempts: max number of polls of the status register
743 * @delay: delay between polls in ms
744 *
745 * Wait for a flash operation to complete by polling the status register.
746 */
747static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
748{
749 int ret;
750 u32 status;
751
752 while (1) {
753 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
754 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
755 return ret;
756 if (!(status & 1))
757 return 0;
758 if (--attempts == 0)
759 return -EAGAIN;
760 if (delay)
761 msleep(delay);
762 }
763}
764
765/**
766 * t3_read_flash - read words from serial flash
767 * @adapter: the adapter
768 * @addr: the start address for the read
769 * @nwords: how many 32-bit words to read
770 * @data: where to store the read data
771 * @byte_oriented: whether to store data as bytes or as words
772 *
773 * Read the specified number of 32-bit words from the serial flash.
774 * If @byte_oriented is set the read data is stored as a byte array
775 * (i.e., big-endian), otherwise as 32-bit words in the platform's
776 * natural endianess.
777 */
778int t3_read_flash(struct adapter *adapter, unsigned int addr,
779 unsigned int nwords, u32 *data, int byte_oriented)
780{
781 int ret;
782
783 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
784 return -EINVAL;
785
786 addr = swab32(addr) | SF_RD_DATA_FAST;
787
788 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
789 (ret = sf1_read(adapter, 1, 1, data)) != 0)
790 return ret;
791
792 for (; nwords; nwords--, data++) {
793 ret = sf1_read(adapter, 4, nwords > 1, data);
794 if (ret)
795 return ret;
796 if (byte_oriented)
797 *data = htonl(*data);
798 }
799 return 0;
800}
801
802/**
803 * t3_write_flash - write up to a page of data to the serial flash
804 * @adapter: the adapter
805 * @addr: the start address to write
806 * @n: length of data to write
807 * @data: the data to write
808 *
809 * Writes up to a page of data (256 bytes) to the serial flash starting
810 * at the given address.
811 */
812static int t3_write_flash(struct adapter *adapter, unsigned int addr,
813 unsigned int n, const u8 *data)
814{
815 int ret;
816 u32 buf[64];
817 unsigned int i, c, left, val, offset = addr & 0xff;
818
819 if (addr + n > SF_SIZE || offset + n > 256)
820 return -EINVAL;
821
822 val = swab32(addr) | SF_PROG_PAGE;
823
824 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
825 (ret = sf1_write(adapter, 4, 1, val)) != 0)
826 return ret;
827
828 for (left = n; left; left -= c) {
829 c = min(left, 4U);
830 for (val = 0, i = 0; i < c; ++i)
831 val = (val << 8) + *data++;
832
833 ret = sf1_write(adapter, c, c != left, val);
834 if (ret)
835 return ret;
836 }
837 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
838 return ret;
839
840 /* Read the page to verify the write succeeded */
841 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
842 if (ret)
843 return ret;
844
845 if (memcmp(data - n, (u8 *) buf + offset, n))
846 return -EIO;
847 return 0;
848}
849
480fe1a3 850/**
47330077 851 * t3_get_tp_version - read the tp sram version
480fe1a3 852 * @adapter: the adapter
47330077 853 * @vers: where to place the version
480fe1a3 854 *
47330077 855 * Reads the protocol sram version from sram.
480fe1a3 856 */
47330077 857int t3_get_tp_version(struct adapter *adapter, u32 *vers)
480fe1a3
DLR
858{
859 int ret;
480fe1a3
DLR
860
861 /* Get version loaded in SRAM */
862 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
863 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
864 1, 1, 5, 1);
865 if (ret)
866 return ret;
867
47330077
DLR
868 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
869
870 return 0;
871}
872
873/**
874 * t3_check_tpsram_version - read the tp sram version
875 * @adapter: the adapter
876 * @must_load: set to 1 if loading a new microcode image is required
877 *
878 * Reads the protocol sram version from flash.
879 */
880int t3_check_tpsram_version(struct adapter *adapter, int *must_load)
881{
882 int ret;
883 u32 vers;
884 unsigned int major, minor;
885
886 if (adapter->params.rev == T3_REV_A)
887 return 0;
888
889 *must_load = 1;
890
891 ret = t3_get_tp_version(adapter, &vers);
892 if (ret)
893 return ret;
480fe1a3
DLR
894
895 major = G_TP_VERSION_MAJOR(vers);
896 minor = G_TP_VERSION_MINOR(vers);
897
898 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
899 return 0;
900
47330077
DLR
901 if (major != TP_VERSION_MAJOR)
902 CH_ERR(adapter, "found wrong TP version (%u.%u), "
903 "driver needs version %d.%d\n", major, minor,
904 TP_VERSION_MAJOR, TP_VERSION_MINOR);
905 else {
906 *must_load = 0;
907 CH_ERR(adapter, "found wrong TP version (%u.%u), "
908 "driver compiled for version %d.%d\n", major, minor,
909 TP_VERSION_MAJOR, TP_VERSION_MINOR);
910 }
480fe1a3
DLR
911 return -EINVAL;
912}
913
914/**
915 * t3_check_tpsram - check if provided protocol SRAM
916 * is compatible with this driver
917 * @adapter: the adapter
918 * @tp_sram: the firmware image to write
919 * @size: image size
920 *
921 * Checks if an adapter's tp sram is compatible with the driver.
922 * Returns 0 if the versions are compatible, a negative error otherwise.
923 */
924int t3_check_tpsram(struct adapter *adapter, u8 *tp_sram, unsigned int size)
925{
926 u32 csum;
927 unsigned int i;
928 const u32 *p = (const u32 *)tp_sram;
929
930 /* Verify checksum */
931 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
932 csum += ntohl(p[i]);
933 if (csum != 0xffffffff) {
934 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
935 csum);
936 return -EINVAL;
937 }
938
939 return 0;
940}
941
4aac3899
DLR
942enum fw_version_type {
943 FW_VERSION_N3,
944 FW_VERSION_T3
945};
946
4d22de3e
DLR
947/**
948 * t3_get_fw_version - read the firmware version
949 * @adapter: the adapter
950 * @vers: where to place the version
951 *
952 * Reads the FW version from flash.
953 */
954int t3_get_fw_version(struct adapter *adapter, u32 *vers)
955{
956 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
957}
958
959/**
960 * t3_check_fw_version - check if the FW is compatible with this driver
961 * @adapter: the adapter
962 *
963 * Checks if an adapter's FW is compatible with the driver. Returns 0
964 * if the versions are compatible, a negative error otherwise.
965 */
966int t3_check_fw_version(struct adapter *adapter)
967{
968 int ret;
969 u32 vers;
4aac3899 970 unsigned int type, major, minor;
4d22de3e
DLR
971
972 ret = t3_get_fw_version(adapter, &vers);
973 if (ret)
974 return ret;
975
4aac3899
DLR
976 type = G_FW_VERSION_TYPE(vers);
977 major = G_FW_VERSION_MAJOR(vers);
978 minor = G_FW_VERSION_MINOR(vers);
4d22de3e 979
75d8626f
DLR
980 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
981 minor == FW_VERSION_MINOR)
4d22de3e
DLR
982 return 0;
983
4aac3899 984 CH_ERR(adapter, "found wrong FW version(%u.%u), "
75d8626f
DLR
985 "driver needs version %u.%u\n", major, minor,
986 FW_VERSION_MAJOR, FW_VERSION_MINOR);
4d22de3e
DLR
987 return -EINVAL;
988}
989
990/**
991 * t3_flash_erase_sectors - erase a range of flash sectors
992 * @adapter: the adapter
993 * @start: the first sector to erase
994 * @end: the last sector to erase
995 *
996 * Erases the sectors in the given range.
997 */
998static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
999{
1000 while (start <= end) {
1001 int ret;
1002
1003 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1004 (ret = sf1_write(adapter, 4, 0,
1005 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1006 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1007 return ret;
1008 start++;
1009 }
1010 return 0;
1011}
1012
1013/*
1014 * t3_load_fw - download firmware
1015 * @adapter: the adapter
8a9fab22 1016 * @fw_data: the firmware image to write
4d22de3e
DLR
1017 * @size: image size
1018 *
1019 * Write the supplied firmware image to the card's serial flash.
1020 * The FW image has the following sections: @size - 8 bytes of code and
1021 * data, followed by 4 bytes of FW version, followed by the 32-bit
1022 * 1's complement checksum of the whole image.
1023 */
1024int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1025{
1026 u32 csum;
1027 unsigned int i;
1028 const u32 *p = (const u32 *)fw_data;
1029 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1030
2e283962 1031 if ((size & 3) || size < FW_MIN_SIZE)
4d22de3e
DLR
1032 return -EINVAL;
1033 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1034 return -EFBIG;
1035
1036 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1037 csum += ntohl(p[i]);
1038 if (csum != 0xffffffff) {
1039 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1040 csum);
1041 return -EINVAL;
1042 }
1043
1044 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1045 if (ret)
1046 goto out;
1047
1048 size -= 8; /* trim off version and checksum */
1049 for (addr = FW_FLASH_BOOT_ADDR; size;) {
1050 unsigned int chunk_size = min(size, 256U);
1051
1052 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1053 if (ret)
1054 goto out;
1055
1056 addr += chunk_size;
1057 fw_data += chunk_size;
1058 size -= chunk_size;
1059 }
1060
1061 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1062out:
1063 if (ret)
1064 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1065 return ret;
1066}
1067
1068#define CIM_CTL_BASE 0x2000
1069
1070/**
1071 * t3_cim_ctl_blk_read - read a block from CIM control region
1072 *
1073 * @adap: the adapter
1074 * @addr: the start address within the CIM control region
1075 * @n: number of words to read
1076 * @valp: where to store the result
1077 *
1078 * Reads a block of 4-byte words from the CIM control region.
1079 */
1080int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1081 unsigned int n, unsigned int *valp)
1082{
1083 int ret = 0;
1084
1085 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1086 return -EBUSY;
1087
1088 for ( ; !ret && n--; addr += 4) {
1089 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1090 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1091 0, 5, 2);
1092 if (!ret)
1093 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1094 }
1095 return ret;
1096}
1097
1098
1099/**
1100 * t3_link_changed - handle interface link changes
1101 * @adapter: the adapter
1102 * @port_id: the port index that changed link state
1103 *
1104 * Called when a port's link settings change to propagate the new values
1105 * to the associated PHY and MAC. After performing the common tasks it
1106 * invokes an OS-specific handler.
1107 */
1108void t3_link_changed(struct adapter *adapter, int port_id)
1109{
1110 int link_ok, speed, duplex, fc;
1111 struct port_info *pi = adap2pinfo(adapter, port_id);
1112 struct cphy *phy = &pi->phy;
1113 struct cmac *mac = &pi->mac;
1114 struct link_config *lc = &pi->link_config;
1115
1116 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1117
1118 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1119 uses_xaui(adapter)) {
1120 if (link_ok)
1121 t3b_pcs_reset(mac);
1122 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1123 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1124 }
1125 lc->link_ok = link_ok;
1126 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1127 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1128 if (lc->requested_fc & PAUSE_AUTONEG)
1129 fc &= lc->requested_fc;
1130 else
1131 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1132
1133 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1134 /* Set MAC speed, duplex, and flow control to match PHY. */
1135 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1136 lc->fc = fc;
1137 }
1138
1139 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1140}
1141
1142/**
1143 * t3_link_start - apply link configuration to MAC/PHY
1144 * @phy: the PHY to setup
1145 * @mac: the MAC to setup
1146 * @lc: the requested link configuration
1147 *
1148 * Set up a port's MAC and PHY according to a desired link configuration.
1149 * - If the PHY can auto-negotiate first decide what to advertise, then
1150 * enable/disable auto-negotiation as desired, and reset.
1151 * - If the PHY does not auto-negotiate just reset it.
1152 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1153 * otherwise do it later based on the outcome of auto-negotiation.
1154 */
1155int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1156{
1157 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1158
1159 lc->link_ok = 0;
1160 if (lc->supported & SUPPORTED_Autoneg) {
1161 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1162 if (fc) {
1163 lc->advertising |= ADVERTISED_Asym_Pause;
1164 if (fc & PAUSE_RX)
1165 lc->advertising |= ADVERTISED_Pause;
1166 }
1167 phy->ops->advertise(phy, lc->advertising);
1168
1169 if (lc->autoneg == AUTONEG_DISABLE) {
1170 lc->speed = lc->requested_speed;
1171 lc->duplex = lc->requested_duplex;
1172 lc->fc = (unsigned char)fc;
1173 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1174 fc);
1175 /* Also disables autoneg */
1176 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1177 phy->ops->reset(phy, 0);
1178 } else
1179 phy->ops->autoneg_enable(phy);
1180 } else {
1181 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1182 lc->fc = (unsigned char)fc;
1183 phy->ops->reset(phy, 0);
1184 }
1185 return 0;
1186}
1187
1188/**
1189 * t3_set_vlan_accel - control HW VLAN extraction
1190 * @adapter: the adapter
1191 * @ports: bitmap of adapter ports to operate on
1192 * @on: enable (1) or disable (0) HW VLAN extraction
1193 *
1194 * Enables or disables HW extraction of VLAN tags for the given port.
1195 */
1196void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1197{
1198 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1199 ports << S_VLANEXTRACTIONENABLE,
1200 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1201}
1202
1203struct intr_info {
1204 unsigned int mask; /* bits to check in interrupt status */
1205 const char *msg; /* message to print or NULL */
1206 short stat_idx; /* stat counter to increment or -1 */
1207 unsigned short fatal:1; /* whether the condition reported is fatal */
1208};
1209
1210/**
1211 * t3_handle_intr_status - table driven interrupt handler
1212 * @adapter: the adapter that generated the interrupt
1213 * @reg: the interrupt status register to process
1214 * @mask: a mask to apply to the interrupt status
1215 * @acts: table of interrupt actions
1216 * @stats: statistics counters tracking interrupt occurences
1217 *
1218 * A table driven interrupt handler that applies a set of masks to an
1219 * interrupt status word and performs the corresponding actions if the
1220 * interrupts described by the mask have occured. The actions include
1221 * optionally printing a warning or alert message, and optionally
1222 * incrementing a stat counter. The table is terminated by an entry
1223 * specifying mask 0. Returns the number of fatal interrupt conditions.
1224 */
1225static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1226 unsigned int mask,
1227 const struct intr_info *acts,
1228 unsigned long *stats)
1229{
1230 int fatal = 0;
1231 unsigned int status = t3_read_reg(adapter, reg) & mask;
1232
1233 for (; acts->mask; ++acts) {
1234 if (!(status & acts->mask))
1235 continue;
1236 if (acts->fatal) {
1237 fatal++;
1238 CH_ALERT(adapter, "%s (0x%x)\n",
1239 acts->msg, status & acts->mask);
1240 } else if (acts->msg)
1241 CH_WARN(adapter, "%s (0x%x)\n",
1242 acts->msg, status & acts->mask);
1243 if (acts->stat_idx >= 0)
1244 stats[acts->stat_idx]++;
1245 }
1246 if (status) /* clear processed interrupts */
1247 t3_write_reg(adapter, reg, status);
1248 return fatal;
1249}
1250
1251#define SGE_INTR_MASK (F_RSPQDISABLED)
1252#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1253 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1254 F_NFASRCHFAIL)
1255#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1256#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1257 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1258 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1259#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1260 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1261 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1262 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1263 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1264 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1265#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1266 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1267 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1268 V_BISTERR(M_BISTERR) | F_PEXERR)
1269#define ULPRX_INTR_MASK F_PARERR
1270#define ULPTX_INTR_MASK 0
1271#define CPLSW_INTR_MASK (F_TP_FRAMING_ERROR | \
1272 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1273 F_ZERO_SWITCH_ERROR)
1274#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1275 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1276 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1277 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT)
1278#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1279 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1280 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1281#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1282 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1283 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1284#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1285 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1286 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1287 V_MCAPARERRENB(M_MCAPARERRENB))
1288#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1289 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1290 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1291 F_MPS0 | F_CPL_SWITCH)
1292
1293/*
1294 * Interrupt handler for the PCIX1 module.
1295 */
1296static void pci_intr_handler(struct adapter *adapter)
1297{
1298 static const struct intr_info pcix1_intr_info[] = {
4d22de3e
DLR
1299 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1300 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1301 {F_RCVTARABT, "PCI received target abort", -1, 1},
1302 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1303 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1304 {F_DETPARERR, "PCI detected parity error", -1, 1},
1305 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1306 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1307 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1308 1},
1309 {F_DETCORECCERR, "PCI correctable ECC error",
1310 STAT_PCI_CORR_ECC, 0},
1311 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1312 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1313 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1314 1},
1315 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1316 1},
1317 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1318 1},
1319 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1320 "error", -1, 1},
1321 {0}
1322 };
1323
1324 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1325 pcix1_intr_info, adapter->irq_stats))
1326 t3_fatal_err(adapter);
1327}
1328
1329/*
1330 * Interrupt handler for the PCIE module.
1331 */
1332static void pcie_intr_handler(struct adapter *adapter)
1333{
1334 static const struct intr_info pcie_intr_info[] = {
b5a44bcb 1335 {F_PEXERR, "PCI PEX error", -1, 1},
4d22de3e
DLR
1336 {F_UNXSPLCPLERRR,
1337 "PCI unexpected split completion DMA read error", -1, 1},
1338 {F_UNXSPLCPLERRC,
1339 "PCI unexpected split completion DMA command error", -1, 1},
1340 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1341 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1342 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1343 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1344 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1345 "PCI MSI-X table/PBA parity error", -1, 1},
1346 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1347 {0}
1348 };
1349
1350 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1351 pcie_intr_info, adapter->irq_stats))
1352 t3_fatal_err(adapter);
1353}
1354
1355/*
1356 * TP interrupt handler.
1357 */
1358static void tp_intr_handler(struct adapter *adapter)
1359{
1360 static const struct intr_info tp_intr_info[] = {
1361 {0xffffff, "TP parity error", -1, 1},
1362 {0x1000000, "TP out of Rx pages", -1, 1},
1363 {0x2000000, "TP out of Tx pages", -1, 1},
1364 {0}
1365 };
1366
1367 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1368 tp_intr_info, NULL))
1369 t3_fatal_err(adapter);
1370}
1371
1372/*
1373 * CIM interrupt handler.
1374 */
1375static void cim_intr_handler(struct adapter *adapter)
1376{
1377 static const struct intr_info cim_intr_info[] = {
1378 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1379 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1380 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1381 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1382 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1383 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1384 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1385 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1386 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1387 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1388 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1389 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1390 {0}
1391 };
1392
1393 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1394 cim_intr_info, NULL))
1395 t3_fatal_err(adapter);
1396}
1397
1398/*
1399 * ULP RX interrupt handler.
1400 */
1401static void ulprx_intr_handler(struct adapter *adapter)
1402{
1403 static const struct intr_info ulprx_intr_info[] = {
1404 {F_PARERR, "ULP RX parity error", -1, 1},
1405 {0}
1406 };
1407
1408 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1409 ulprx_intr_info, NULL))
1410 t3_fatal_err(adapter);
1411}
1412
1413/*
1414 * ULP TX interrupt handler.
1415 */
1416static void ulptx_intr_handler(struct adapter *adapter)
1417{
1418 static const struct intr_info ulptx_intr_info[] = {
1419 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1420 STAT_ULP_CH0_PBL_OOB, 0},
1421 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1422 STAT_ULP_CH1_PBL_OOB, 0},
1423 {0}
1424 };
1425
1426 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1427 ulptx_intr_info, adapter->irq_stats))
1428 t3_fatal_err(adapter);
1429}
1430
1431#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1432 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1433 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1434 F_ICSPI1_TX_FRAMING_ERROR)
1435#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1436 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1437 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1438 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1439
1440/*
1441 * PM TX interrupt handler.
1442 */
1443static void pmtx_intr_handler(struct adapter *adapter)
1444{
1445 static const struct intr_info pmtx_intr_info[] = {
1446 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1447 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1448 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1449 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1450 "PMTX ispi parity error", -1, 1},
1451 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1452 "PMTX ospi parity error", -1, 1},
1453 {0}
1454 };
1455
1456 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1457 pmtx_intr_info, NULL))
1458 t3_fatal_err(adapter);
1459}
1460
1461#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1462 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1463 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1464 F_IESPI1_TX_FRAMING_ERROR)
1465#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1466 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1467 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1468 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1469
1470/*
1471 * PM RX interrupt handler.
1472 */
1473static void pmrx_intr_handler(struct adapter *adapter)
1474{
1475 static const struct intr_info pmrx_intr_info[] = {
1476 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1477 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1478 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1479 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1480 "PMRX ispi parity error", -1, 1},
1481 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1482 "PMRX ospi parity error", -1, 1},
1483 {0}
1484 };
1485
1486 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1487 pmrx_intr_info, NULL))
1488 t3_fatal_err(adapter);
1489}
1490
1491/*
1492 * CPL switch interrupt handler.
1493 */
1494static void cplsw_intr_handler(struct adapter *adapter)
1495{
1496 static const struct intr_info cplsw_intr_info[] = {
1497/* { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 }, */
1498 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1499 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1500 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1501 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1502 {0}
1503 };
1504
1505 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1506 cplsw_intr_info, NULL))
1507 t3_fatal_err(adapter);
1508}
1509
1510/*
1511 * MPS interrupt handler.
1512 */
1513static void mps_intr_handler(struct adapter *adapter)
1514{
1515 static const struct intr_info mps_intr_info[] = {
1516 {0x1ff, "MPS parity error", -1, 1},
1517 {0}
1518 };
1519
1520 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1521 mps_intr_info, NULL))
1522 t3_fatal_err(adapter);
1523}
1524
1525#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1526
1527/*
1528 * MC7 interrupt handler.
1529 */
1530static void mc7_intr_handler(struct mc7 *mc7)
1531{
1532 struct adapter *adapter = mc7->adapter;
1533 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1534
1535 if (cause & F_CE) {
1536 mc7->stats.corr_err++;
1537 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1538 "data 0x%x 0x%x 0x%x\n", mc7->name,
1539 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1540 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1541 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1542 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1543 }
1544
1545 if (cause & F_UE) {
1546 mc7->stats.uncorr_err++;
1547 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1548 "data 0x%x 0x%x 0x%x\n", mc7->name,
1549 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1550 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1551 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1552 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1553 }
1554
1555 if (G_PE(cause)) {
1556 mc7->stats.parity_err++;
1557 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1558 mc7->name, G_PE(cause));
1559 }
1560
1561 if (cause & F_AE) {
1562 u32 addr = 0;
1563
1564 if (adapter->params.rev > 0)
1565 addr = t3_read_reg(adapter,
1566 mc7->offset + A_MC7_ERR_ADDR);
1567 mc7->stats.addr_err++;
1568 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1569 mc7->name, addr);
1570 }
1571
1572 if (cause & MC7_INTR_FATAL)
1573 t3_fatal_err(adapter);
1574
1575 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1576}
1577
1578#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1579 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1580/*
1581 * XGMAC interrupt handler.
1582 */
1583static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1584{
1585 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1586 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1587
1588 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1589 mac->stats.tx_fifo_parity_err++;
1590 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1591 }
1592 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1593 mac->stats.rx_fifo_parity_err++;
1594 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1595 }
1596 if (cause & F_TXFIFO_UNDERRUN)
1597 mac->stats.tx_fifo_urun++;
1598 if (cause & F_RXFIFO_OVERFLOW)
1599 mac->stats.rx_fifo_ovfl++;
1600 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1601 mac->stats.serdes_signal_loss++;
1602 if (cause & F_XAUIPCSCTCERR)
1603 mac->stats.xaui_pcs_ctc_err++;
1604 if (cause & F_XAUIPCSALIGNCHANGE)
1605 mac->stats.xaui_pcs_align_change++;
1606
1607 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1608 if (cause & XGM_INTR_FATAL)
1609 t3_fatal_err(adap);
1610 return cause != 0;
1611}
1612
1613/*
1614 * Interrupt handler for PHY events.
1615 */
1616int t3_phy_intr_handler(struct adapter *adapter)
1617{
1ca03cbc 1618 u32 mask, gpi = adapter_info(adapter)->gpio_intr;
4d22de3e
DLR
1619 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1620
1621 for_each_port(adapter, i) {
1ca03cbc
DLR
1622 struct port_info *p = adap2pinfo(adapter, i);
1623
1624 mask = gpi - (gpi & (gpi - 1));
1625 gpi -= mask;
1626
1627 if (!(p->port_type->caps & SUPPORTED_IRQ))
1628 continue;
1629
1630 if (cause & mask) {
1631 int phy_cause = p->phy.ops->intr_handler(&p->phy);
4d22de3e
DLR
1632
1633 if (phy_cause & cphy_cause_link_change)
1634 t3_link_changed(adapter, i);
1635 if (phy_cause & cphy_cause_fifo_error)
1ca03cbc 1636 p->phy.fifo_errors++;
4d22de3e
DLR
1637 }
1638 }
1639
1640 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1641 return 0;
1642}
1643
1644/*
1645 * T3 slow path (non-data) interrupt handler.
1646 */
1647int t3_slow_intr_handler(struct adapter *adapter)
1648{
1649 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1650
1651 cause &= adapter->slow_intr_mask;
1652 if (!cause)
1653 return 0;
1654 if (cause & F_PCIM0) {
1655 if (is_pcie(adapter))
1656 pcie_intr_handler(adapter);
1657 else
1658 pci_intr_handler(adapter);
1659 }
1660 if (cause & F_SGE3)
1661 t3_sge_err_intr_handler(adapter);
1662 if (cause & F_MC7_PMRX)
1663 mc7_intr_handler(&adapter->pmrx);
1664 if (cause & F_MC7_PMTX)
1665 mc7_intr_handler(&adapter->pmtx);
1666 if (cause & F_MC7_CM)
1667 mc7_intr_handler(&adapter->cm);
1668 if (cause & F_CIM)
1669 cim_intr_handler(adapter);
1670 if (cause & F_TP1)
1671 tp_intr_handler(adapter);
1672 if (cause & F_ULP2_RX)
1673 ulprx_intr_handler(adapter);
1674 if (cause & F_ULP2_TX)
1675 ulptx_intr_handler(adapter);
1676 if (cause & F_PM1_RX)
1677 pmrx_intr_handler(adapter);
1678 if (cause & F_PM1_TX)
1679 pmtx_intr_handler(adapter);
1680 if (cause & F_CPL_SWITCH)
1681 cplsw_intr_handler(adapter);
1682 if (cause & F_MPS0)
1683 mps_intr_handler(adapter);
1684 if (cause & F_MC5A)
1685 t3_mc5_intr_handler(&adapter->mc5);
1686 if (cause & F_XGMAC0_0)
1687 mac_intr_handler(adapter, 0);
1688 if (cause & F_XGMAC0_1)
1689 mac_intr_handler(adapter, 1);
1690 if (cause & F_T3DBG)
1691 t3_os_ext_intr_handler(adapter);
1692
1693 /* Clear the interrupts just processed. */
1694 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1695 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1696 return 1;
1697}
1698
1699/**
1700 * t3_intr_enable - enable interrupts
1701 * @adapter: the adapter whose interrupts should be enabled
1702 *
1703 * Enable interrupts by setting the interrupt enable registers of the
1704 * various HW modules and then enabling the top-level interrupt
1705 * concentrator.
1706 */
1707void t3_intr_enable(struct adapter *adapter)
1708{
1709 static const struct addr_val_pair intr_en_avp[] = {
1710 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1711 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1712 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1713 MC7_INTR_MASK},
1714 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1715 MC7_INTR_MASK},
1716 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1717 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1718 {A_TP_INT_ENABLE, 0x3bfffff},
1719 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1720 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1721 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1722 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1723 };
1724
1725 adapter->slow_intr_mask = PL_INTR_MASK;
1726
1727 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1728
1729 if (adapter->params.rev > 0) {
1730 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1731 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1732 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1733 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1734 F_PBL_BOUND_ERR_CH1);
1735 } else {
1736 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1737 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1738 }
1739
1740 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW,
1741 adapter_info(adapter)->gpio_intr);
1742 t3_write_reg(adapter, A_T3DBG_INT_ENABLE,
1743 adapter_info(adapter)->gpio_intr);
1744 if (is_pcie(adapter))
1745 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1746 else
1747 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1748 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1749 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1750}
1751
1752/**
1753 * t3_intr_disable - disable a card's interrupts
1754 * @adapter: the adapter whose interrupts should be disabled
1755 *
1756 * Disable interrupts. We only disable the top-level interrupt
1757 * concentrator and the SGE data interrupts.
1758 */
1759void t3_intr_disable(struct adapter *adapter)
1760{
1761 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1762 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1763 adapter->slow_intr_mask = 0;
1764}
1765
1766/**
1767 * t3_intr_clear - clear all interrupts
1768 * @adapter: the adapter whose interrupts should be cleared
1769 *
1770 * Clears all interrupts.
1771 */
1772void t3_intr_clear(struct adapter *adapter)
1773{
1774 static const unsigned int cause_reg_addr[] = {
1775 A_SG_INT_CAUSE,
1776 A_SG_RSPQ_FL_STATUS,
1777 A_PCIX_INT_CAUSE,
1778 A_MC7_INT_CAUSE,
1779 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1780 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1781 A_CIM_HOST_INT_CAUSE,
1782 A_TP_INT_CAUSE,
1783 A_MC5_DB_INT_CAUSE,
1784 A_ULPRX_INT_CAUSE,
1785 A_ULPTX_INT_CAUSE,
1786 A_CPL_INTR_CAUSE,
1787 A_PM1_TX_INT_CAUSE,
1788 A_PM1_RX_INT_CAUSE,
1789 A_MPS_INT_CAUSE,
1790 A_T3DBG_INT_CAUSE,
1791 };
1792 unsigned int i;
1793
1794 /* Clear PHY and MAC interrupts for each port. */
1795 for_each_port(adapter, i)
1796 t3_port_intr_clear(adapter, i);
1797
1798 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1799 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1800
1801 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1802 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1803}
1804
1805/**
1806 * t3_port_intr_enable - enable port-specific interrupts
1807 * @adapter: associated adapter
1808 * @idx: index of port whose interrupts should be enabled
1809 *
1810 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1811 * adapter port.
1812 */
1813void t3_port_intr_enable(struct adapter *adapter, int idx)
1814{
1815 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1816
1817 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
1818 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1819 phy->ops->intr_enable(phy);
1820}
1821
1822/**
1823 * t3_port_intr_disable - disable port-specific interrupts
1824 * @adapter: associated adapter
1825 * @idx: index of port whose interrupts should be disabled
1826 *
1827 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
1828 * adapter port.
1829 */
1830void t3_port_intr_disable(struct adapter *adapter, int idx)
1831{
1832 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1833
1834 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
1835 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1836 phy->ops->intr_disable(phy);
1837}
1838
1839/**
1840 * t3_port_intr_clear - clear port-specific interrupts
1841 * @adapter: associated adapter
1842 * @idx: index of port whose interrupts to clear
1843 *
1844 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
1845 * adapter port.
1846 */
1847void t3_port_intr_clear(struct adapter *adapter, int idx)
1848{
1849 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1850
1851 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
1852 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
1853 phy->ops->intr_clear(phy);
1854}
1855
1856/**
1857 * t3_sge_write_context - write an SGE context
1858 * @adapter: the adapter
1859 * @id: the context id
1860 * @type: the context type
1861 *
1862 * Program an SGE context with the values already loaded in the
1863 * CONTEXT_DATA? registers.
1864 */
1865static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
1866 unsigned int type)
1867{
1868 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
1869 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
1870 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
1871 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
1872 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1873 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
1874 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1875 0, 5, 1);
1876}
1877
1878/**
1879 * t3_sge_init_ecntxt - initialize an SGE egress context
1880 * @adapter: the adapter to configure
1881 * @id: the context id
1882 * @gts_enable: whether to enable GTS for the context
1883 * @type: the egress context type
1884 * @respq: associated response queue
1885 * @base_addr: base address of queue
1886 * @size: number of queue entries
1887 * @token: uP token
1888 * @gen: initial generation value for the context
1889 * @cidx: consumer pointer
1890 *
1891 * Initialize an SGE egress context and make it ready for use. If the
1892 * platform allows concurrent context operations, the caller is
1893 * responsible for appropriate locking.
1894 */
1895int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
1896 enum sge_context_type type, int respq, u64 base_addr,
1897 unsigned int size, unsigned int token, int gen,
1898 unsigned int cidx)
1899{
1900 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
1901
1902 if (base_addr & 0xfff) /* must be 4K aligned */
1903 return -EINVAL;
1904 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1905 return -EBUSY;
1906
1907 base_addr >>= 12;
1908 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
1909 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
1910 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
1911 V_EC_BASE_LO(base_addr & 0xffff));
1912 base_addr >>= 16;
1913 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
1914 base_addr >>= 32;
1915 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1916 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
1917 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
1918 F_EC_VALID);
1919 return t3_sge_write_context(adapter, id, F_EGRESS);
1920}
1921
1922/**
1923 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
1924 * @adapter: the adapter to configure
1925 * @id: the context id
1926 * @gts_enable: whether to enable GTS for the context
1927 * @base_addr: base address of queue
1928 * @size: number of queue entries
1929 * @bsize: size of each buffer for this queue
1930 * @cong_thres: threshold to signal congestion to upstream producers
1931 * @gen: initial generation value for the context
1932 * @cidx: consumer pointer
1933 *
1934 * Initialize an SGE free list context and make it ready for use. The
1935 * caller is responsible for ensuring only one context operation occurs
1936 * at a time.
1937 */
1938int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
1939 int gts_enable, u64 base_addr, unsigned int size,
1940 unsigned int bsize, unsigned int cong_thres, int gen,
1941 unsigned int cidx)
1942{
1943 if (base_addr & 0xfff) /* must be 4K aligned */
1944 return -EINVAL;
1945 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1946 return -EBUSY;
1947
1948 base_addr >>= 12;
1949 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
1950 base_addr >>= 32;
1951 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
1952 V_FL_BASE_HI((u32) base_addr) |
1953 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
1954 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
1955 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
1956 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
1957 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1958 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
1959 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
1960 return t3_sge_write_context(adapter, id, F_FREELIST);
1961}
1962
1963/**
1964 * t3_sge_init_rspcntxt - initialize an SGE response queue context
1965 * @adapter: the adapter to configure
1966 * @id: the context id
1967 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
1968 * @base_addr: base address of queue
1969 * @size: number of queue entries
1970 * @fl_thres: threshold for selecting the normal or jumbo free list
1971 * @gen: initial generation value for the context
1972 * @cidx: consumer pointer
1973 *
1974 * Initialize an SGE response queue context and make it ready for use.
1975 * The caller is responsible for ensuring only one context operation
1976 * occurs at a time.
1977 */
1978int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
1979 int irq_vec_idx, u64 base_addr, unsigned int size,
1980 unsigned int fl_thres, int gen, unsigned int cidx)
1981{
1982 unsigned int intr = 0;
1983
1984 if (base_addr & 0xfff) /* must be 4K aligned */
1985 return -EINVAL;
1986 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1987 return -EBUSY;
1988
1989 base_addr >>= 12;
1990 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
1991 V_CQ_INDEX(cidx));
1992 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
1993 base_addr >>= 32;
1994 if (irq_vec_idx >= 0)
1995 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
1996 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
1997 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
1998 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
1999 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2000}
2001
2002/**
2003 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2004 * @adapter: the adapter to configure
2005 * @id: the context id
2006 * @base_addr: base address of queue
2007 * @size: number of queue entries
2008 * @rspq: response queue for async notifications
2009 * @ovfl_mode: CQ overflow mode
2010 * @credits: completion queue credits
2011 * @credit_thres: the credit threshold
2012 *
2013 * Initialize an SGE completion queue context and make it ready for use.
2014 * The caller is responsible for ensuring only one context operation
2015 * occurs at a time.
2016 */
2017int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2018 unsigned int size, int rspq, int ovfl_mode,
2019 unsigned int credits, unsigned int credit_thres)
2020{
2021 if (base_addr & 0xfff) /* must be 4K aligned */
2022 return -EINVAL;
2023 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2024 return -EBUSY;
2025
2026 base_addr >>= 12;
2027 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2028 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2029 base_addr >>= 32;
2030 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2031 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
2032 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode));
2033 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2034 V_CQ_CREDIT_THRES(credit_thres));
2035 return t3_sge_write_context(adapter, id, F_CQ);
2036}
2037
2038/**
2039 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2040 * @adapter: the adapter
2041 * @id: the egress context id
2042 * @enable: enable (1) or disable (0) the context
2043 *
2044 * Enable or disable an SGE egress context. The caller is responsible for
2045 * ensuring only one context operation occurs at a time.
2046 */
2047int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2048{
2049 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2050 return -EBUSY;
2051
2052 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2053 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2054 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2055 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2056 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2057 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2058 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2059 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2060 0, 5, 1);
2061}
2062
2063/**
2064 * t3_sge_disable_fl - disable an SGE free-buffer list
2065 * @adapter: the adapter
2066 * @id: the free list context id
2067 *
2068 * Disable an SGE free-buffer list. The caller is responsible for
2069 * ensuring only one context operation occurs at a time.
2070 */
2071int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2072{
2073 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2074 return -EBUSY;
2075
2076 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2077 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2078 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2079 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2080 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2081 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2082 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2083 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2084 0, 5, 1);
2085}
2086
2087/**
2088 * t3_sge_disable_rspcntxt - disable an SGE response queue
2089 * @adapter: the adapter
2090 * @id: the response queue context id
2091 *
2092 * Disable an SGE response queue. The caller is responsible for
2093 * ensuring only one context operation occurs at a time.
2094 */
2095int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2096{
2097 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2098 return -EBUSY;
2099
2100 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2101 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2102 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2103 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2104 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2105 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2106 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2107 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2108 0, 5, 1);
2109}
2110
2111/**
2112 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2113 * @adapter: the adapter
2114 * @id: the completion queue context id
2115 *
2116 * Disable an SGE completion queue. The caller is responsible for
2117 * ensuring only one context operation occurs at a time.
2118 */
2119int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2120{
2121 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2122 return -EBUSY;
2123
2124 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2125 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2126 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2127 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2128 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2129 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2130 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2131 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2132 0, 5, 1);
2133}
2134
2135/**
2136 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2137 * @adapter: the adapter
2138 * @id: the context id
2139 * @op: the operation to perform
2140 *
2141 * Perform the selected operation on an SGE completion queue context.
2142 * The caller is responsible for ensuring only one context operation
2143 * occurs at a time.
2144 */
2145int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2146 unsigned int credits)
2147{
2148 u32 val;
2149
2150 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2151 return -EBUSY;
2152
2153 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2154 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2155 V_CONTEXT(id) | F_CQ);
2156 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2157 0, 5, 1, &val))
2158 return -EIO;
2159
2160 if (op >= 2 && op < 7) {
2161 if (adapter->params.rev > 0)
2162 return G_CQ_INDEX(val);
2163
2164 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2165 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2166 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2167 F_CONTEXT_CMD_BUSY, 0, 5, 1))
2168 return -EIO;
2169 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2170 }
2171 return 0;
2172}
2173
2174/**
2175 * t3_sge_read_context - read an SGE context
2176 * @type: the context type
2177 * @adapter: the adapter
2178 * @id: the context id
2179 * @data: holds the retrieved context
2180 *
2181 * Read an SGE egress context. The caller is responsible for ensuring
2182 * only one context operation occurs at a time.
2183 */
2184static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2185 unsigned int id, u32 data[4])
2186{
2187 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2188 return -EBUSY;
2189
2190 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2191 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2192 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2193 5, 1))
2194 return -EIO;
2195 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2196 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2197 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2198 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2199 return 0;
2200}
2201
2202/**
2203 * t3_sge_read_ecntxt - read an SGE egress context
2204 * @adapter: the adapter
2205 * @id: the context id
2206 * @data: holds the retrieved context
2207 *
2208 * Read an SGE egress context. The caller is responsible for ensuring
2209 * only one context operation occurs at a time.
2210 */
2211int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2212{
2213 if (id >= 65536)
2214 return -EINVAL;
2215 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2216}
2217
2218/**
2219 * t3_sge_read_cq - read an SGE CQ context
2220 * @adapter: the adapter
2221 * @id: the context id
2222 * @data: holds the retrieved context
2223 *
2224 * Read an SGE CQ context. The caller is responsible for ensuring
2225 * only one context operation occurs at a time.
2226 */
2227int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2228{
2229 if (id >= 65536)
2230 return -EINVAL;
2231 return t3_sge_read_context(F_CQ, adapter, id, data);
2232}
2233
2234/**
2235 * t3_sge_read_fl - read an SGE free-list context
2236 * @adapter: the adapter
2237 * @id: the context id
2238 * @data: holds the retrieved context
2239 *
2240 * Read an SGE free-list context. The caller is responsible for ensuring
2241 * only one context operation occurs at a time.
2242 */
2243int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2244{
2245 if (id >= SGE_QSETS * 2)
2246 return -EINVAL;
2247 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2248}
2249
2250/**
2251 * t3_sge_read_rspq - read an SGE response queue context
2252 * @adapter: the adapter
2253 * @id: the context id
2254 * @data: holds the retrieved context
2255 *
2256 * Read an SGE response queue context. The caller is responsible for
2257 * ensuring only one context operation occurs at a time.
2258 */
2259int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2260{
2261 if (id >= SGE_QSETS)
2262 return -EINVAL;
2263 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2264}
2265
2266/**
2267 * t3_config_rss - configure Rx packet steering
2268 * @adapter: the adapter
2269 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2270 * @cpus: values for the CPU lookup table (0xff terminated)
2271 * @rspq: values for the response queue lookup table (0xffff terminated)
2272 *
2273 * Programs the receive packet steering logic. @cpus and @rspq provide
2274 * the values for the CPU and response queue lookup tables. If they
2275 * provide fewer values than the size of the tables the supplied values
2276 * are used repeatedly until the tables are fully populated.
2277 */
2278void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2279 const u8 * cpus, const u16 *rspq)
2280{
2281 int i, j, cpu_idx = 0, q_idx = 0;
2282
2283 if (cpus)
2284 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2285 u32 val = i << 16;
2286
2287 for (j = 0; j < 2; ++j) {
2288 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2289 if (cpus[cpu_idx] == 0xff)
2290 cpu_idx = 0;
2291 }
2292 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2293 }
2294
2295 if (rspq)
2296 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2297 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2298 (i << 16) | rspq[q_idx++]);
2299 if (rspq[q_idx] == 0xffff)
2300 q_idx = 0;
2301 }
2302
2303 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2304}
2305
2306/**
2307 * t3_read_rss - read the contents of the RSS tables
2308 * @adapter: the adapter
2309 * @lkup: holds the contents of the RSS lookup table
2310 * @map: holds the contents of the RSS map table
2311 *
2312 * Reads the contents of the receive packet steering tables.
2313 */
2314int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2315{
2316 int i;
2317 u32 val;
2318
2319 if (lkup)
2320 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2321 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2322 0xffff0000 | i);
2323 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2324 if (!(val & 0x80000000))
2325 return -EAGAIN;
2326 *lkup++ = val;
2327 *lkup++ = (val >> 8);
2328 }
2329
2330 if (map)
2331 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2332 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2333 0xffff0000 | i);
2334 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2335 if (!(val & 0x80000000))
2336 return -EAGAIN;
2337 *map++ = val;
2338 }
2339 return 0;
2340}
2341
2342/**
2343 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2344 * @adap: the adapter
2345 * @enable: 1 to select offload mode, 0 for regular NIC
2346 *
2347 * Switches TP to NIC/offload mode.
2348 */
2349void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2350{
2351 if (is_offload(adap) || !enable)
2352 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2353 V_NICMODE(!enable));
2354}
2355
2356/**
2357 * pm_num_pages - calculate the number of pages of the payload memory
2358 * @mem_size: the size of the payload memory
2359 * @pg_size: the size of each payload memory page
2360 *
2361 * Calculate the number of pages, each of the given size, that fit in a
2362 * memory of the specified size, respecting the HW requirement that the
2363 * number of pages must be a multiple of 24.
2364 */
2365static inline unsigned int pm_num_pages(unsigned int mem_size,
2366 unsigned int pg_size)
2367{
2368 unsigned int n = mem_size / pg_size;
2369
2370 return n - n % 24;
2371}
2372
2373#define mem_region(adap, start, size, reg) \
2374 t3_write_reg((adap), A_ ## reg, (start)); \
2375 start += size
2376
2377/*
2378 * partition_mem - partition memory and configure TP memory settings
2379 * @adap: the adapter
2380 * @p: the TP parameters
2381 *
2382 * Partitions context and payload memory and configures TP's memory
2383 * registers.
2384 */
2385static void partition_mem(struct adapter *adap, const struct tp_params *p)
2386{
2387 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2388 unsigned int timers = 0, timers_shift = 22;
2389
2390 if (adap->params.rev > 0) {
2391 if (tids <= 16 * 1024) {
2392 timers = 1;
2393 timers_shift = 16;
2394 } else if (tids <= 64 * 1024) {
2395 timers = 2;
2396 timers_shift = 18;
2397 } else if (tids <= 256 * 1024) {
2398 timers = 3;
2399 timers_shift = 20;
2400 }
2401 }
2402
2403 t3_write_reg(adap, A_TP_PMM_SIZE,
2404 p->chan_rx_size | (p->chan_tx_size >> 16));
2405
2406 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2407 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2408 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2409 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2410 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2411
2412 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2413 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2414 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2415
2416 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2417 /* Add a bit of headroom and make multiple of 24 */
2418 pstructs += 48;
2419 pstructs -= pstructs % 24;
2420 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2421
2422 m = tids * TCB_SIZE;
2423 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2424 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2425 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2426 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2427 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2428 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2429 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2430 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2431
2432 m = (m + 4095) & ~0xfff;
2433 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2434 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2435
2436 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2437 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2438 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2439 if (tids < m)
2440 adap->params.mc5.nservers += m - tids;
2441}
2442
2443static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2444 u32 val)
2445{
2446 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2447 t3_write_reg(adap, A_TP_PIO_DATA, val);
2448}
2449
2450static void tp_config(struct adapter *adap, const struct tp_params *p)
2451{
4d22de3e
DLR
2452 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2453 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2454 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2455 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2456 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
8a9fab22 2457 V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
4d22de3e
DLR
2458 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2459 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2460 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2461 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2462 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_IPV6ENABLE | F_NICMODE,
2463 F_IPV6ENABLE | F_NICMODE);
2464 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2465 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
8a9fab22
DLR
2466 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2467 adap->params.rev > 0 ? F_ENABLEESND :
2468 F_T3A_ENABLEESND);
4d22de3e 2469
3b1d307b 2470 t3_set_reg_field(adap, A_TP_PC_CONFIG,
8a9fab22
DLR
2471 F_ENABLEEPCMDAFULL,
2472 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2473 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
3b1d307b 2474 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, 0);
8a9fab22
DLR
2475 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2476 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2477
4d22de3e
DLR
2478 if (adap->params.rev > 0) {
2479 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2480 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2481 F_TXPACEAUTO);
2482 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2483 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2484 } else
2485 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2486
8a9fab22
DLR
2487 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2488 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2489 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2490 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
4d22de3e
DLR
2491}
2492
2493/* Desired TP timer resolution in usec */
2494#define TP_TMR_RES 50
2495
2496/* TCP timer values in ms */
2497#define TP_DACK_TIMER 50
2498#define TP_RTO_MIN 250
2499
2500/**
2501 * tp_set_timers - set TP timing parameters
2502 * @adap: the adapter to set
2503 * @core_clk: the core clock frequency in Hz
2504 *
2505 * Set TP's timing parameters, such as the various timer resolutions and
2506 * the TCP timer values.
2507 */
2508static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2509{
2510 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2511 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2512 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2513 unsigned int tps = core_clk >> tre;
2514
2515 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2516 V_DELAYEDACKRESOLUTION(dack_re) |
2517 V_TIMESTAMPRESOLUTION(tstamp_re));
2518 t3_write_reg(adap, A_TP_DACK_TIMER,
2519 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2520 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2521 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2522 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2523 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2524 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2525 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2526 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2527 V_KEEPALIVEMAX(9));
2528
2529#define SECONDS * tps
2530
2531 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2532 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2533 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2534 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2535 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2536 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2537 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2538 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2539 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2540
2541#undef SECONDS
2542}
2543
2544/**
2545 * t3_tp_set_coalescing_size - set receive coalescing size
2546 * @adap: the adapter
2547 * @size: the receive coalescing size
2548 * @psh: whether a set PSH bit should deliver coalesced data
2549 *
2550 * Set the receive coalescing size and PSH bit handling.
2551 */
2552int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2553{
2554 u32 val;
2555
2556 if (size > MAX_RX_COALESCING_LEN)
2557 return -EINVAL;
2558
2559 val = t3_read_reg(adap, A_TP_PARA_REG3);
2560 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2561
2562 if (size) {
2563 val |= F_RXCOALESCEENABLE;
2564 if (psh)
2565 val |= F_RXCOALESCEPSHEN;
8a9fab22 2566 size = min(MAX_RX_COALESCING_LEN, size);
4d22de3e
DLR
2567 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2568 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2569 }
2570 t3_write_reg(adap, A_TP_PARA_REG3, val);
2571 return 0;
2572}
2573
2574/**
2575 * t3_tp_set_max_rxsize - set the max receive size
2576 * @adap: the adapter
2577 * @size: the max receive size
2578 *
2579 * Set TP's max receive size. This is the limit that applies when
2580 * receive coalescing is disabled.
2581 */
2582void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2583{
2584 t3_write_reg(adap, A_TP_PARA_REG7,
2585 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2586}
2587
2588static void __devinit init_mtus(unsigned short mtus[])
2589{
2590 /*
2591 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2592 * it can accomodate max size TCP/IP headers when SACK and timestamps
2593 * are enabled and still have at least 8 bytes of payload.
2594 */
8a9fab22
DLR
2595 mtus[1] = 88;
2596 mtus[1] = 88;
2597 mtus[2] = 256;
2598 mtus[3] = 512;
2599 mtus[4] = 576;
4d22de3e
DLR
2600 mtus[5] = 1024;
2601 mtus[6] = 1280;
2602 mtus[7] = 1492;
2603 mtus[8] = 1500;
2604 mtus[9] = 2002;
2605 mtus[10] = 2048;
2606 mtus[11] = 4096;
2607 mtus[12] = 4352;
2608 mtus[13] = 8192;
2609 mtus[14] = 9000;
2610 mtus[15] = 9600;
2611}
2612
2613/*
2614 * Initial congestion control parameters.
2615 */
2616static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
2617{
2618 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2619 a[9] = 2;
2620 a[10] = 3;
2621 a[11] = 4;
2622 a[12] = 5;
2623 a[13] = 6;
2624 a[14] = 7;
2625 a[15] = 8;
2626 a[16] = 9;
2627 a[17] = 10;
2628 a[18] = 14;
2629 a[19] = 17;
2630 a[20] = 21;
2631 a[21] = 25;
2632 a[22] = 30;
2633 a[23] = 35;
2634 a[24] = 45;
2635 a[25] = 60;
2636 a[26] = 80;
2637 a[27] = 100;
2638 a[28] = 200;
2639 a[29] = 300;
2640 a[30] = 400;
2641 a[31] = 500;
2642
2643 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2644 b[9] = b[10] = 1;
2645 b[11] = b[12] = 2;
2646 b[13] = b[14] = b[15] = b[16] = 3;
2647 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2648 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2649 b[28] = b[29] = 6;
2650 b[30] = b[31] = 7;
2651}
2652
2653/* The minimum additive increment value for the congestion control table */
2654#define CC_MIN_INCR 2U
2655
2656/**
2657 * t3_load_mtus - write the MTU and congestion control HW tables
2658 * @adap: the adapter
2659 * @mtus: the unrestricted values for the MTU table
2660 * @alphs: the values for the congestion control alpha parameter
2661 * @beta: the values for the congestion control beta parameter
2662 * @mtu_cap: the maximum permitted effective MTU
2663 *
2664 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2665 * Update the high-speed congestion control table with the supplied alpha,
2666 * beta, and MTUs.
2667 */
2668void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2669 unsigned short alpha[NCCTRL_WIN],
2670 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2671{
2672 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2673 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2674 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2675 28672, 40960, 57344, 81920, 114688, 163840, 229376
2676 };
2677
2678 unsigned int i, w;
2679
2680 for (i = 0; i < NMTUS; ++i) {
2681 unsigned int mtu = min(mtus[i], mtu_cap);
2682 unsigned int log2 = fls(mtu);
2683
2684 if (!(mtu & ((1 << log2) >> 2))) /* round */
2685 log2--;
2686 t3_write_reg(adap, A_TP_MTU_TABLE,
2687 (i << 24) | (log2 << 16) | mtu);
2688
2689 for (w = 0; w < NCCTRL_WIN; ++w) {
2690 unsigned int inc;
2691
2692 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2693 CC_MIN_INCR);
2694
2695 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2696 (w << 16) | (beta[w] << 13) | inc);
2697 }
2698 }
2699}
2700
2701/**
2702 * t3_read_hw_mtus - returns the values in the HW MTU table
2703 * @adap: the adapter
2704 * @mtus: where to store the HW MTU values
2705 *
2706 * Reads the HW MTU table.
2707 */
2708void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
2709{
2710 int i;
2711
2712 for (i = 0; i < NMTUS; ++i) {
2713 unsigned int val;
2714
2715 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2716 val = t3_read_reg(adap, A_TP_MTU_TABLE);
2717 mtus[i] = val & 0x3fff;
2718 }
2719}
2720
2721/**
2722 * t3_get_cong_cntl_tab - reads the congestion control table
2723 * @adap: the adapter
2724 * @incr: where to store the alpha values
2725 *
2726 * Reads the additive increments programmed into the HW congestion
2727 * control table.
2728 */
2729void t3_get_cong_cntl_tab(struct adapter *adap,
2730 unsigned short incr[NMTUS][NCCTRL_WIN])
2731{
2732 unsigned int mtu, w;
2733
2734 for (mtu = 0; mtu < NMTUS; ++mtu)
2735 for (w = 0; w < NCCTRL_WIN; ++w) {
2736 t3_write_reg(adap, A_TP_CCTRL_TABLE,
2737 0xffff0000 | (mtu << 5) | w);
2738 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
2739 0x1fff;
2740 }
2741}
2742
2743/**
2744 * t3_tp_get_mib_stats - read TP's MIB counters
2745 * @adap: the adapter
2746 * @tps: holds the returned counter values
2747 *
2748 * Returns the values of TP's MIB counters.
2749 */
2750void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2751{
2752 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2753 sizeof(*tps) / sizeof(u32), 0);
2754}
2755
2756#define ulp_region(adap, name, start, len) \
2757 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2758 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2759 (start) + (len) - 1); \
2760 start += len
2761
2762#define ulptx_region(adap, name, start, len) \
2763 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2764 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2765 (start) + (len) - 1)
2766
2767static void ulp_config(struct adapter *adap, const struct tp_params *p)
2768{
2769 unsigned int m = p->chan_rx_size;
2770
2771 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2772 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2773 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2774 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2775 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2776 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2777 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2778 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2779}
2780
480fe1a3
DLR
2781/**
2782 * t3_set_proto_sram - set the contents of the protocol sram
2783 * @adapter: the adapter
2784 * @data: the protocol image
2785 *
2786 * Write the contents of the protocol SRAM.
2787 */
2788int t3_set_proto_sram(struct adapter *adap, u8 *data)
2789{
2790 int i;
2791 u32 *buf = (u32 *)data;
2792
2793 for (i = 0; i < PROTO_SRAM_LINES; i++) {
2794 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, cpu_to_be32(*buf++));
2795 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, cpu_to_be32(*buf++));
2796 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, cpu_to_be32(*buf++));
2797 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, cpu_to_be32(*buf++));
2798 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, cpu_to_be32(*buf++));
2799
2800 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2801 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2802 return -EIO;
2803 }
2804 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
2805
2806 return 0;
2807}
2808
4d22de3e
DLR
2809void t3_config_trace_filter(struct adapter *adapter,
2810 const struct trace_params *tp, int filter_index,
2811 int invert, int enable)
2812{
2813 u32 addr, key[4], mask[4];
2814
2815 key[0] = tp->sport | (tp->sip << 16);
2816 key[1] = (tp->sip >> 16) | (tp->dport << 16);
2817 key[2] = tp->dip;
2818 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2819
2820 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2821 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2822 mask[2] = tp->dip_mask;
2823 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2824
2825 if (invert)
2826 key[3] |= (1 << 29);
2827 if (enable)
2828 key[3] |= (1 << 28);
2829
2830 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2831 tp_wr_indirect(adapter, addr++, key[0]);
2832 tp_wr_indirect(adapter, addr++, mask[0]);
2833 tp_wr_indirect(adapter, addr++, key[1]);
2834 tp_wr_indirect(adapter, addr++, mask[1]);
2835 tp_wr_indirect(adapter, addr++, key[2]);
2836 tp_wr_indirect(adapter, addr++, mask[2]);
2837 tp_wr_indirect(adapter, addr++, key[3]);
2838 tp_wr_indirect(adapter, addr, mask[3]);
2839 t3_read_reg(adapter, A_TP_PIO_DATA);
2840}
2841
2842/**
2843 * t3_config_sched - configure a HW traffic scheduler
2844 * @adap: the adapter
2845 * @kbps: target rate in Kbps
2846 * @sched: the scheduler index
2847 *
2848 * Configure a HW scheduler for the target rate
2849 */
2850int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
2851{
2852 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2853 unsigned int clk = adap->params.vpd.cclk * 1000;
2854 unsigned int selected_cpt = 0, selected_bpt = 0;
2855
2856 if (kbps > 0) {
2857 kbps *= 125; /* -> bytes */
2858 for (cpt = 1; cpt <= 255; cpt++) {
2859 tps = clk / cpt;
2860 bpt = (kbps + tps / 2) / tps;
2861 if (bpt > 0 && bpt <= 255) {
2862 v = bpt * tps;
2863 delta = v >= kbps ? v - kbps : kbps - v;
2864 if (delta <= mindelta) {
2865 mindelta = delta;
2866 selected_cpt = cpt;
2867 selected_bpt = bpt;
2868 }
2869 } else if (selected_cpt)
2870 break;
2871 }
2872 if (!selected_cpt)
2873 return -EINVAL;
2874 }
2875 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
2876 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
2877 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
2878 if (sched & 1)
2879 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
2880 else
2881 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
2882 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
2883 return 0;
2884}
2885
2886static int tp_init(struct adapter *adap, const struct tp_params *p)
2887{
2888 int busy = 0;
2889
2890 tp_config(adap, p);
2891 t3_set_vlan_accel(adap, 3, 0);
2892
2893 if (is_offload(adap)) {
2894 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
2895 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
2896 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
2897 0, 1000, 5);
2898 if (busy)
2899 CH_ERR(adap, "TP initialization timed out\n");
2900 }
2901
2902 if (!busy)
2903 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
2904 return busy;
2905}
2906
2907int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
2908{
2909 if (port_mask & ~((1 << adap->params.nports) - 1))
2910 return -EINVAL;
2911 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
2912 port_mask << S_PORT0ACTIVE);
2913 return 0;
2914}
2915
2916/*
2917 * Perform the bits of HW initialization that are dependent on the number
2918 * of available ports.
2919 */
2920static void init_hw_for_avail_ports(struct adapter *adap, int nports)
2921{
2922 int i;
2923
2924 if (nports == 1) {
2925 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
2926 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
2927 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
2928 F_PORT0ACTIVE | F_ENFORCEPKT);
8a9fab22 2929 t3_write_reg(adap, A_PM1_TX_CFG, 0xffffffff);
4d22de3e
DLR
2930 } else {
2931 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
2932 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
2933 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
2934 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
2935 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
2936 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
2937 F_ENFORCEPKT);
2938 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
2939 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
2940 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
2941 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
2942 for (i = 0; i < 16; i++)
2943 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
2944 (i << 16) | 0x1010);
2945 }
2946}
2947
2948static int calibrate_xgm(struct adapter *adapter)
2949{
2950 if (uses_xaui(adapter)) {
2951 unsigned int v, i;
2952
2953 for (i = 0; i < 5; ++i) {
2954 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
2955 t3_read_reg(adapter, A_XGM_XAUI_IMP);
2956 msleep(1);
2957 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
2958 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
2959 t3_write_reg(adapter, A_XGM_XAUI_IMP,
2960 V_XAUIIMP(G_CALIMP(v) >> 2));
2961 return 0;
2962 }
2963 }
2964 CH_ERR(adapter, "MAC calibration failed\n");
2965 return -1;
2966 } else {
2967 t3_write_reg(adapter, A_XGM_RGMII_IMP,
2968 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2969 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
2970 F_XGM_IMPSETUPDATE);
2971 }
2972 return 0;
2973}
2974
2975static void calibrate_xgm_t3b(struct adapter *adapter)
2976{
2977 if (!uses_xaui(adapter)) {
2978 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
2979 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2980 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
2981 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
2982 F_XGM_IMPSETUPDATE);
2983 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
2984 0);
2985 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
2986 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
2987 }
2988}
2989
2990struct mc7_timing_params {
2991 unsigned char ActToPreDly;
2992 unsigned char ActToRdWrDly;
2993 unsigned char PreCyc;
2994 unsigned char RefCyc[5];
2995 unsigned char BkCyc;
2996 unsigned char WrToRdDly;
2997 unsigned char RdToWrDly;
2998};
2999
3000/*
3001 * Write a value to a register and check that the write completed. These
3002 * writes normally complete in a cycle or two, so one read should suffice.
3003 * The very first read exists to flush the posted write to the device.
3004 */
3005static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3006{
3007 t3_write_reg(adapter, addr, val);
3008 t3_read_reg(adapter, addr); /* flush */
3009 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3010 return 0;
3011 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3012 return -EIO;
3013}
3014
3015static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3016{
3017 static const unsigned int mc7_mode[] = {
3018 0x632, 0x642, 0x652, 0x432, 0x442
3019 };
3020 static const struct mc7_timing_params mc7_timings[] = {
3021 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3022 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3023 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3024 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3025 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3026 };
3027
3028 u32 val;
3029 unsigned int width, density, slow, attempts;
3030 struct adapter *adapter = mc7->adapter;
3031 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3032
8ac3ba68
DLR
3033 if (!mc7->size)
3034 return 0;
3035
4d22de3e
DLR
3036 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3037 slow = val & F_SLOW;
3038 width = G_WIDTH(val);
3039 density = G_DEN(val);
3040
3041 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3042 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3043 msleep(1);
3044
3045 if (!slow) {
3046 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3047 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3048 msleep(1);
3049 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3050 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3051 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3052 mc7->name);
3053 goto out_fail;
3054 }
3055 }
3056
3057 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3058 V_ACTTOPREDLY(p->ActToPreDly) |
3059 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3060 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3061 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3062
3063 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3064 val | F_CLKEN | F_TERM150);
3065 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3066
3067 if (!slow)
3068 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3069 F_DLLENB);
3070 udelay(1);
3071
3072 val = slow ? 3 : 6;
3073 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3074 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3075 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3076 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3077 goto out_fail;
3078
3079 if (!slow) {
3080 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3081 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3082 udelay(5);
3083 }
3084
3085 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3086 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3087 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3088 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3089 mc7_mode[mem_type]) ||
3090 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3091 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3092 goto out_fail;
3093
3094 /* clock value is in KHz */
3095 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3096 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3097
3098 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3099 F_PERREFEN | V_PREREFDIV(mc7_clock));
3100 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3101
3102 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3103 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3104 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3105 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3106 (mc7->size << width) - 1);
3107 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3108 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3109
3110 attempts = 50;
3111 do {
3112 msleep(250);
3113 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3114 } while ((val & F_BUSY) && --attempts);
3115 if (val & F_BUSY) {
3116 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3117 goto out_fail;
3118 }
3119
3120 /* Enable normal memory accesses. */
3121 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3122 return 0;
3123
3124out_fail:
3125 return -1;
3126}
3127
3128static void config_pcie(struct adapter *adap)
3129{
3130 static const u16 ack_lat[4][6] = {
3131 {237, 416, 559, 1071, 2095, 4143},
3132 {128, 217, 289, 545, 1057, 2081},
3133 {73, 118, 154, 282, 538, 1050},
3134 {67, 107, 86, 150, 278, 534}
3135 };
3136 static const u16 rpl_tmr[4][6] = {
3137 {711, 1248, 1677, 3213, 6285, 12429},
3138 {384, 651, 867, 1635, 3171, 6243},
3139 {219, 354, 462, 846, 1614, 3150},
3140 {201, 321, 258, 450, 834, 1602}
3141 };
3142
3143 u16 val;
3144 unsigned int log2_width, pldsize;
3145 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3146
3147 pci_read_config_word(adap->pdev,
3148 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3149 &val);
3150 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3151 pci_read_config_word(adap->pdev,
3152 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3153 &val);
3154
3155 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3156 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3157 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3158 log2_width = fls(adap->params.pci.width) - 1;
3159 acklat = ack_lat[log2_width][pldsize];
3160 if (val & 1) /* check LOsEnable */
3161 acklat += fst_trn_tx * 4;
3162 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3163
3164 if (adap->params.rev == 0)
3165 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3166 V_T3A_ACKLAT(M_T3A_ACKLAT),
3167 V_T3A_ACKLAT(acklat));
3168 else
3169 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3170 V_ACKLAT(acklat));
3171
3172 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3173 V_REPLAYLMT(rpllmt));
3174
3175 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3176 t3_set_reg_field(adap, A_PCIE_CFG, F_PCIE_CLIDECEN, F_PCIE_CLIDECEN);
3177}
3178
3179/*
3180 * Initialize and configure T3 HW modules. This performs the
3181 * initialization steps that need to be done once after a card is reset.
3182 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3183 *
3184 * fw_params are passed to FW and their value is platform dependent. Only the
3185 * top 8 bits are available for use, the rest must be 0.
3186 */
3187int t3_init_hw(struct adapter *adapter, u32 fw_params)
3188{
3189 int err = -EIO, attempts = 100;
3190 const struct vpd_params *vpd = &adapter->params.vpd;
3191
3192 if (adapter->params.rev > 0)
3193 calibrate_xgm_t3b(adapter);
3194 else if (calibrate_xgm(adapter))
3195 goto out_err;
3196
3197 if (vpd->mclk) {
3198 partition_mem(adapter, &adapter->params.tp);
3199
3200 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3201 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3202 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3203 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3204 adapter->params.mc5.nfilters,
3205 adapter->params.mc5.nroutes))
3206 goto out_err;
3207 }
3208
3209 if (tp_init(adapter, &adapter->params.tp))
3210 goto out_err;
3211
3212 t3_tp_set_coalescing_size(adapter,
3213 min(adapter->params.sge.max_pkt_size,
3214 MAX_RX_COALESCING_LEN), 1);
3215 t3_tp_set_max_rxsize(adapter,
3216 min(adapter->params.sge.max_pkt_size, 16384U));
3217 ulp_config(adapter, &adapter->params.tp);
3218
3219 if (is_pcie(adapter))
3220 config_pcie(adapter);
3221 else
3222 t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN);
3223
8a9fab22 3224 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
4d22de3e
DLR
3225 init_hw_for_avail_ports(adapter, adapter->params.nports);
3226 t3_sge_init(adapter, &adapter->params.sge);
3227
3228 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3229 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3230 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3231 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3232
3233 do { /* wait for uP to initialize */
3234 msleep(20);
3235 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
8ac3ba68
DLR
3236 if (!attempts) {
3237 CH_ERR(adapter, "uP initialization timed out\n");
4d22de3e 3238 goto out_err;
8ac3ba68 3239 }
4d22de3e
DLR
3240
3241 err = 0;
3242out_err:
3243 return err;
3244}
3245
3246/**
3247 * get_pci_mode - determine a card's PCI mode
3248 * @adapter: the adapter
3249 * @p: where to store the PCI settings
3250 *
3251 * Determines a card's PCI mode and associated parameters, such as speed
3252 * and width.
3253 */
3254static void __devinit get_pci_mode(struct adapter *adapter,
3255 struct pci_params *p)
3256{
3257 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3258 u32 pci_mode, pcie_cap;
3259
3260 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3261 if (pcie_cap) {
3262 u16 val;
3263
3264 p->variant = PCI_VARIANT_PCIE;
3265 p->pcie_cap_addr = pcie_cap;
3266 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3267 &val);
3268 p->width = (val >> 4) & 0x3f;
3269 return;
3270 }
3271
3272 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3273 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3274 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3275 pci_mode = G_PCIXINITPAT(pci_mode);
3276 if (pci_mode == 0)
3277 p->variant = PCI_VARIANT_PCI;
3278 else if (pci_mode < 4)
3279 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3280 else if (pci_mode < 8)
3281 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3282 else
3283 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3284}
3285
3286/**
3287 * init_link_config - initialize a link's SW state
3288 * @lc: structure holding the link state
3289 * @ai: information about the current card
3290 *
3291 * Initializes the SW state maintained for each link, including the link's
3292 * capabilities and default speed/duplex/flow-control/autonegotiation
3293 * settings.
3294 */
3295static void __devinit init_link_config(struct link_config *lc,
3296 unsigned int caps)
3297{
3298 lc->supported = caps;
3299 lc->requested_speed = lc->speed = SPEED_INVALID;
3300 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3301 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3302 if (lc->supported & SUPPORTED_Autoneg) {
3303 lc->advertising = lc->supported;
3304 lc->autoneg = AUTONEG_ENABLE;
3305 lc->requested_fc |= PAUSE_AUTONEG;
3306 } else {
3307 lc->advertising = 0;
3308 lc->autoneg = AUTONEG_DISABLE;
3309 }
3310}
3311
3312/**
3313 * mc7_calc_size - calculate MC7 memory size
3314 * @cfg: the MC7 configuration
3315 *
3316 * Calculates the size of an MC7 memory in bytes from the value of its
3317 * configuration register.
3318 */
3319static unsigned int __devinit mc7_calc_size(u32 cfg)
3320{
3321 unsigned int width = G_WIDTH(cfg);
3322 unsigned int banks = !!(cfg & F_BKS) + 1;
3323 unsigned int org = !!(cfg & F_ORG) + 1;
3324 unsigned int density = G_DEN(cfg);
3325 unsigned int MBs = ((256 << density) * banks) / (org << width);
3326
3327 return MBs << 20;
3328}
3329
3330static void __devinit mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3331 unsigned int base_addr, const char *name)
3332{
3333 u32 cfg;
3334
3335 mc7->adapter = adapter;
3336 mc7->name = name;
3337 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3338 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
8ac3ba68 3339 mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
4d22de3e
DLR
3340 mc7->width = G_WIDTH(cfg);
3341}
3342
3343void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3344{
3345 mac->adapter = adapter;
3346 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3347 mac->nucast = 1;
3348
3349 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3350 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3351 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3352 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3353 F_ENRGMII, 0);
3354 }
3355}
3356
3357void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3358{
3359 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3360
3361 mi1_init(adapter, ai);
3362 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3363 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3364 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3365 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
8ac3ba68 3366 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
4d22de3e
DLR
3367
3368 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3369 val |= F_ENRGMII;
3370
3371 /* Enable MAC clocks so we can access the registers */
3372 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3373 t3_read_reg(adapter, A_XGM_PORT_CFG);
3374
3375 val |= F_CLKDIVRESET_;
3376 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3377 t3_read_reg(adapter, A_XGM_PORT_CFG);
3378 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3379 t3_read_reg(adapter, A_XGM_PORT_CFG);
3380}
3381
3382/*
e4d08359
DLR
3383 * Reset the adapter.
3384 * Older PCIe cards lose their config space during reset, PCI-X
4d22de3e
DLR
3385 * ones don't.
3386 */
3387int t3_reset_adapter(struct adapter *adapter)
3388{
e4d08359
DLR
3389 int i, save_and_restore_pcie =
3390 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
4d22de3e
DLR
3391 uint16_t devid = 0;
3392
e4d08359 3393 if (save_and_restore_pcie)
4d22de3e
DLR
3394 pci_save_state(adapter->pdev);
3395 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3396
3397 /*
3398 * Delay. Give Some time to device to reset fully.
3399 * XXX The delay time should be modified.
3400 */
3401 for (i = 0; i < 10; i++) {
3402 msleep(50);
3403 pci_read_config_word(adapter->pdev, 0x00, &devid);
3404 if (devid == 0x1425)
3405 break;
3406 }
3407
3408 if (devid != 0x1425)
3409 return -1;
3410
e4d08359 3411 if (save_and_restore_pcie)
4d22de3e
DLR
3412 pci_restore_state(adapter->pdev);
3413 return 0;
3414}
3415
3416/*
3417 * Initialize adapter SW state for the various HW modules, set initial values
3418 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3419 * interface.
3420 */
3421int __devinit t3_prep_adapter(struct adapter *adapter,
3422 const struct adapter_info *ai, int reset)
3423{
3424 int ret;
3425 unsigned int i, j = 0;
3426
3427 get_pci_mode(adapter, &adapter->params.pci);
3428
3429 adapter->params.info = ai;
3430 adapter->params.nports = ai->nports;
3431 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3432 adapter->params.linkpoll_period = 0;
3433 adapter->params.stats_update_period = is_10G(adapter) ?
3434 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3435 adapter->params.pci.vpd_cap_addr =
3436 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3437 ret = get_vpd_params(adapter, &adapter->params.vpd);
3438 if (ret < 0)
3439 return ret;
3440
3441 if (reset && t3_reset_adapter(adapter))
3442 return -1;
3443
3444 t3_sge_prep(adapter, &adapter->params.sge);
3445
3446 if (adapter->params.vpd.mclk) {
3447 struct tp_params *p = &adapter->params.tp;
3448
3449 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3450 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3451 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3452
3453 p->nchan = ai->nports;
3454 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3455 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3456 p->cm_size = t3_mc7_size(&adapter->cm);
3457 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3458 p->chan_tx_size = p->pmtx_size / p->nchan;
3459 p->rx_pg_size = 64 * 1024;
3460 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3461 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3462 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3463 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3464 adapter->params.rev > 0 ? 12 : 6;
8ac3ba68
DLR
3465 }
3466
3467 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3468 t3_mc7_size(&adapter->pmtx) &&
3469 t3_mc7_size(&adapter->cm);
4d22de3e 3470
8ac3ba68 3471 if (is_offload(adapter)) {
4d22de3e
DLR
3472 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3473 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3474 DEFAULT_NFILTERS : 0;
3475 adapter->params.mc5.nroutes = 0;
3476 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3477
3478 init_mtus(adapter->params.mtus);
3479 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3480 }
3481
3482 early_hw_init(adapter, ai);
3483
3484 for_each_port(adapter, i) {
3485 u8 hw_addr[6];
3486 struct port_info *p = adap2pinfo(adapter, i);
3487
3488 while (!adapter->params.vpd.port_type[j])
3489 ++j;
3490
3491 p->port_type = &port_types[adapter->params.vpd.port_type[j]];
3492 p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3493 ai->mdio_ops);
3494 mac_prep(&p->mac, adapter, j);
3495 ++j;
3496
3497 /*
3498 * The VPD EEPROM stores the base Ethernet address for the
3499 * card. A port's address is derived from the base by adding
3500 * the port's index to the base's low octet.
3501 */
3502 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3503 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3504
3505 memcpy(adapter->port[i]->dev_addr, hw_addr,
3506 ETH_ALEN);
3507 memcpy(adapter->port[i]->perm_addr, hw_addr,
3508 ETH_ALEN);
3509 init_link_config(&p->link_config, p->port_type->caps);
3510 p->phy.ops->power_down(&p->phy, 1);
3511 if (!(p->port_type->caps & SUPPORTED_IRQ))
3512 adapter->params.linkpoll_period = 10;
3513 }
3514
3515 return 0;
3516}
3517
3518void t3_led_ready(struct adapter *adapter)
3519{
3520 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3521 F_GPIO0_OUT_VAL);
3522}
This page took 0.267295 seconds and 5 git commands to generate.