cxgb3 - private ioctl cleanup
[deliverable/linux.git] / drivers / net / cxgb3 / t3_hw.c
CommitLineData
4d22de3e 1/*
1d68e93d 2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4d22de3e 3 *
1d68e93d
DLR
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
4d22de3e 9 *
1d68e93d
DLR
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
4d22de3e 31 */
4d22de3e
DLR
32#include "common.h"
33#include "regs.h"
34#include "sge_defs.h"
35#include "firmware_exports.h"
36
f2c6879e
DLR
37/**
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
46 *
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
51 */
4d22de3e
DLR
52
53int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
55{
56 while (1) {
57 u32 val = t3_read_reg(adapter, reg);
58
59 if (!!(val & mask) == polarity) {
60 if (valp)
61 *valp = val;
62 return 0;
63 }
64 if (--attempts == 0)
65 return -EAGAIN;
66 if (delay)
67 udelay(delay);
68 }
69}
70
71/**
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
77 *
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
81 */
82void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
84{
85 while (n--) {
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
87 p++;
88 }
89}
90
91/**
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
97 *
98 * Sets a register field specified by the supplied mask to the
99 * given value.
100 */
101void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
102 u32 val)
103{
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
105
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
108}
109
110/**
111 * t3_read_indirect - read indirectly addressed registers
112 * @adap: the adapter
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
118 *
119 * Reads registers that are accessed indirectly through an address/data
120 * register pair.
121 */
122void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals, unsigned int nregs,
124 unsigned int start_idx)
125{
126 while (nregs--) {
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
129 start_idx++;
130 }
131}
132
133/**
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
139 *
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
141 * accesses.
142 */
143int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
144 u64 *buf)
145{
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
148
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
151
152 if (start >= size64 || start + n > size64)
153 return -EINVAL;
154
155 start *= (8 << mc7->width);
156 while (n--) {
157 int i;
158 u64 val64 = 0;
159
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
161 int attempts = 10;
162 u32 val;
163
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
170 if (val & F_BUSY)
171 return -EIO;
172
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
176 mc7->offset +
177 A_MC7_BD_DATA0);
178 val64 |= (u64) val << 32;
179 } else {
180 if (mc7->width > 1)
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
183 }
184 start += 8;
185 }
186 *buf++ = val64;
187 }
188 return 0;
189}
190
191/*
192 * Initialize MI1.
193 */
194static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
195{
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
197 u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
198 V_CLKDIV(clkdiv);
199
200 if (!(ai->caps & SUPPORTED_10000baseT_Full))
201 val |= V_ST(1);
202 t3_write_reg(adap, A_MI1_CFG, val);
203}
204
205#define MDIO_ATTEMPTS 10
206
207/*
208 * MI1 read/write operations for direct-addressed PHYs.
209 */
210static int mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
211 int reg_addr, unsigned int *valp)
212{
213 int ret;
214 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
215
216 if (mmd_addr)
217 return -EINVAL;
218
219 mutex_lock(&adapter->mdio_lock);
220 t3_write_reg(adapter, A_MI1_ADDR, addr);
221 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
222 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
223 if (!ret)
224 *valp = t3_read_reg(adapter, A_MI1_DATA);
225 mutex_unlock(&adapter->mdio_lock);
226 return ret;
227}
228
229static int mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
230 int reg_addr, unsigned int val)
231{
232 int ret;
233 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
234
235 if (mmd_addr)
236 return -EINVAL;
237
238 mutex_lock(&adapter->mdio_lock);
239 t3_write_reg(adapter, A_MI1_ADDR, addr);
240 t3_write_reg(adapter, A_MI1_DATA, val);
241 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
242 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
243 mutex_unlock(&adapter->mdio_lock);
244 return ret;
245}
246
247static const struct mdio_ops mi1_mdio_ops = {
248 mi1_read,
249 mi1_write
250};
251
252/*
253 * MI1 read/write operations for indirect-addressed PHYs.
254 */
255static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
256 int reg_addr, unsigned int *valp)
257{
258 int ret;
259 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
260
261 mutex_lock(&adapter->mdio_lock);
262 t3_write_reg(adapter, A_MI1_ADDR, addr);
263 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
264 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
265 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
266 if (!ret) {
267 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
268 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
269 MDIO_ATTEMPTS, 20);
270 if (!ret)
271 *valp = t3_read_reg(adapter, A_MI1_DATA);
272 }
273 mutex_unlock(&adapter->mdio_lock);
274 return ret;
275}
276
277static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
278 int reg_addr, unsigned int val)
279{
280 int ret;
281 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
282
283 mutex_lock(&adapter->mdio_lock);
284 t3_write_reg(adapter, A_MI1_ADDR, addr);
285 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
286 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
287 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
288 if (!ret) {
289 t3_write_reg(adapter, A_MI1_DATA, val);
290 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
291 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
292 MDIO_ATTEMPTS, 20);
293 }
294 mutex_unlock(&adapter->mdio_lock);
295 return ret;
296}
297
298static const struct mdio_ops mi1_mdio_ext_ops = {
299 mi1_ext_read,
300 mi1_ext_write
301};
302
303/**
304 * t3_mdio_change_bits - modify the value of a PHY register
305 * @phy: the PHY to operate on
306 * @mmd: the device address
307 * @reg: the register address
308 * @clear: what part of the register value to mask off
309 * @set: what part of the register value to set
310 *
311 * Changes the value of a PHY register by applying a mask to its current
312 * value and ORing the result with a new value.
313 */
314int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
315 unsigned int set)
316{
317 int ret;
318 unsigned int val;
319
320 ret = mdio_read(phy, mmd, reg, &val);
321 if (!ret) {
322 val &= ~clear;
323 ret = mdio_write(phy, mmd, reg, val | set);
324 }
325 return ret;
326}
327
328/**
329 * t3_phy_reset - reset a PHY block
330 * @phy: the PHY to operate on
331 * @mmd: the device address of the PHY block to reset
332 * @wait: how long to wait for the reset to complete in 1ms increments
333 *
334 * Resets a PHY block and optionally waits for the reset to complete.
335 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
336 * for 10G PHYs.
337 */
338int t3_phy_reset(struct cphy *phy, int mmd, int wait)
339{
340 int err;
341 unsigned int ctl;
342
343 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
344 if (err || !wait)
345 return err;
346
347 do {
348 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
349 if (err)
350 return err;
351 ctl &= BMCR_RESET;
352 if (ctl)
353 msleep(1);
354 } while (ctl && --wait);
355
356 return ctl ? -1 : 0;
357}
358
359/**
360 * t3_phy_advertise - set the PHY advertisement registers for autoneg
361 * @phy: the PHY to operate on
362 * @advert: bitmap of capabilities the PHY should advertise
363 *
364 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
365 * requested capabilities.
366 */
367int t3_phy_advertise(struct cphy *phy, unsigned int advert)
368{
369 int err;
370 unsigned int val = 0;
371
372 err = mdio_read(phy, 0, MII_CTRL1000, &val);
373 if (err)
374 return err;
375
376 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
377 if (advert & ADVERTISED_1000baseT_Half)
378 val |= ADVERTISE_1000HALF;
379 if (advert & ADVERTISED_1000baseT_Full)
380 val |= ADVERTISE_1000FULL;
381
382 err = mdio_write(phy, 0, MII_CTRL1000, val);
383 if (err)
384 return err;
385
386 val = 1;
387 if (advert & ADVERTISED_10baseT_Half)
388 val |= ADVERTISE_10HALF;
389 if (advert & ADVERTISED_10baseT_Full)
390 val |= ADVERTISE_10FULL;
391 if (advert & ADVERTISED_100baseT_Half)
392 val |= ADVERTISE_100HALF;
393 if (advert & ADVERTISED_100baseT_Full)
394 val |= ADVERTISE_100FULL;
395 if (advert & ADVERTISED_Pause)
396 val |= ADVERTISE_PAUSE_CAP;
397 if (advert & ADVERTISED_Asym_Pause)
398 val |= ADVERTISE_PAUSE_ASYM;
399 return mdio_write(phy, 0, MII_ADVERTISE, val);
400}
401
402/**
403 * t3_set_phy_speed_duplex - force PHY speed and duplex
404 * @phy: the PHY to operate on
405 * @speed: requested PHY speed
406 * @duplex: requested PHY duplex
407 *
408 * Force a 10/100/1000 PHY's speed and duplex. This also disables
409 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
410 */
411int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
412{
413 int err;
414 unsigned int ctl;
415
416 err = mdio_read(phy, 0, MII_BMCR, &ctl);
417 if (err)
418 return err;
419
420 if (speed >= 0) {
421 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
422 if (speed == SPEED_100)
423 ctl |= BMCR_SPEED100;
424 else if (speed == SPEED_1000)
425 ctl |= BMCR_SPEED1000;
426 }
427 if (duplex >= 0) {
428 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
429 if (duplex == DUPLEX_FULL)
430 ctl |= BMCR_FULLDPLX;
431 }
432 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
433 ctl |= BMCR_ANENABLE;
434 return mdio_write(phy, 0, MII_BMCR, ctl);
435}
436
437static const struct adapter_info t3_adap_info[] = {
438 {2, 0, 0, 0,
439 F_GPIO2_OEN | F_GPIO4_OEN |
440 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
441 SUPPORTED_OFFLOAD,
442 &mi1_mdio_ops, "Chelsio PE9000"},
443 {2, 0, 0, 0,
444 F_GPIO2_OEN | F_GPIO4_OEN |
445 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
446 SUPPORTED_OFFLOAD,
447 &mi1_mdio_ops, "Chelsio T302"},
448 {1, 0, 0, 0,
449 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
450 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
451 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_OFFLOAD,
452 &mi1_mdio_ext_ops, "Chelsio T310"},
453 {2, 0, 0, 0,
454 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
455 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
456 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
457 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_OFFLOAD,
458 &mi1_mdio_ext_ops, "Chelsio T320"},
459};
460
461/*
462 * Return the adapter_info structure with a given index. Out-of-range indices
463 * return NULL.
464 */
465const struct adapter_info *t3_get_adapter_info(unsigned int id)
466{
467 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
468}
469
470#define CAPS_1G (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | \
471 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII)
472#define CAPS_10G (SUPPORTED_10000baseT_Full | SUPPORTED_AUI)
473
474static const struct port_type_info port_types[] = {
475 {NULL},
476 {t3_ael1002_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
477 "10GBASE-XR"},
478 {t3_vsc8211_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
479 "10/100/1000BASE-T"},
480 {NULL, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
481 "10/100/1000BASE-T"},
482 {t3_xaui_direct_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
483 {NULL, CAPS_10G, "10GBASE-KX4"},
484 {t3_qt2045_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
485 {t3_ael1006_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
486 "10GBASE-SR"},
487 {NULL, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
488};
489
490#undef CAPS_1G
491#undef CAPS_10G
492
493#define VPD_ENTRY(name, len) \
494 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
495
496/*
497 * Partial EEPROM Vital Product Data structure. Includes only the ID and
498 * VPD-R sections.
499 */
500struct t3_vpd {
501 u8 id_tag;
502 u8 id_len[2];
503 u8 id_data[16];
504 u8 vpdr_tag;
505 u8 vpdr_len[2];
506 VPD_ENTRY(pn, 16); /* part number */
507 VPD_ENTRY(ec, 16); /* EC level */
508 VPD_ENTRY(sn, 16); /* serial number */
509 VPD_ENTRY(na, 12); /* MAC address base */
510 VPD_ENTRY(cclk, 6); /* core clock */
511 VPD_ENTRY(mclk, 6); /* mem clock */
512 VPD_ENTRY(uclk, 6); /* uP clk */
513 VPD_ENTRY(mdc, 6); /* MDIO clk */
514 VPD_ENTRY(mt, 2); /* mem timing */
515 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
516 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
517 VPD_ENTRY(port0, 2); /* PHY0 complex */
518 VPD_ENTRY(port1, 2); /* PHY1 complex */
519 VPD_ENTRY(port2, 2); /* PHY2 complex */
520 VPD_ENTRY(port3, 2); /* PHY3 complex */
521 VPD_ENTRY(rv, 1); /* csum */
522 u32 pad; /* for multiple-of-4 sizing and alignment */
523};
524
525#define EEPROM_MAX_POLL 4
526#define EEPROM_STAT_ADDR 0x4000
527#define VPD_BASE 0xc00
528
529/**
530 * t3_seeprom_read - read a VPD EEPROM location
531 * @adapter: adapter to read
532 * @addr: EEPROM address
533 * @data: where to store the read data
534 *
535 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
536 * VPD ROM capability. A zero is written to the flag bit when the
537 * addres is written to the control register. The hardware device will
538 * set the flag to 1 when 4 bytes have been read into the data register.
539 */
540int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
541{
542 u16 val;
543 int attempts = EEPROM_MAX_POLL;
544 unsigned int base = adapter->params.pci.vpd_cap_addr;
545
546 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
547 return -EINVAL;
548
549 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
550 do {
551 udelay(10);
552 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
553 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
554
555 if (!(val & PCI_VPD_ADDR_F)) {
556 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
557 return -EIO;
558 }
559 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, data);
560 *data = le32_to_cpu(*data);
561 return 0;
562}
563
564/**
565 * t3_seeprom_write - write a VPD EEPROM location
566 * @adapter: adapter to write
567 * @addr: EEPROM address
568 * @data: value to write
569 *
570 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
571 * VPD ROM capability.
572 */
573int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
574{
575 u16 val;
576 int attempts = EEPROM_MAX_POLL;
577 unsigned int base = adapter->params.pci.vpd_cap_addr;
578
579 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
580 return -EINVAL;
581
582 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
583 cpu_to_le32(data));
584 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
585 addr | PCI_VPD_ADDR_F);
586 do {
587 msleep(1);
588 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
589 } while ((val & PCI_VPD_ADDR_F) && --attempts);
590
591 if (val & PCI_VPD_ADDR_F) {
592 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
593 return -EIO;
594 }
595 return 0;
596}
597
598/**
599 * t3_seeprom_wp - enable/disable EEPROM write protection
600 * @adapter: the adapter
601 * @enable: 1 to enable write protection, 0 to disable it
602 *
603 * Enables or disables write protection on the serial EEPROM.
604 */
605int t3_seeprom_wp(struct adapter *adapter, int enable)
606{
607 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
608}
609
610/*
611 * Convert a character holding a hex digit to a number.
612 */
613static unsigned int hex2int(unsigned char c)
614{
615 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
616}
617
618/**
619 * get_vpd_params - read VPD parameters from VPD EEPROM
620 * @adapter: adapter to read
621 * @p: where to store the parameters
622 *
623 * Reads card parameters stored in VPD EEPROM.
624 */
625static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
626{
627 int i, addr, ret;
628 struct t3_vpd vpd;
629
630 /*
631 * Card information is normally at VPD_BASE but some early cards had
632 * it at 0.
633 */
634 ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
635 if (ret)
636 return ret;
637 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
638
639 for (i = 0; i < sizeof(vpd); i += 4) {
640 ret = t3_seeprom_read(adapter, addr + i,
641 (u32 *)((u8 *)&vpd + i));
642 if (ret)
643 return ret;
644 }
645
646 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
647 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
648 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
649 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
650 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
651
652 /* Old eeproms didn't have port information */
653 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
654 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
655 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
656 } else {
657 p->port_type[0] = hex2int(vpd.port0_data[0]);
658 p->port_type[1] = hex2int(vpd.port1_data[0]);
659 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
660 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
661 }
662
663 for (i = 0; i < 6; i++)
664 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
665 hex2int(vpd.na_data[2 * i + 1]);
666 return 0;
667}
668
669/* serial flash and firmware constants */
670enum {
671 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
672 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
673 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
674
675 /* flash command opcodes */
676 SF_PROG_PAGE = 2, /* program page */
677 SF_WR_DISABLE = 4, /* disable writes */
678 SF_RD_STATUS = 5, /* read status register */
679 SF_WR_ENABLE = 6, /* enable writes */
680 SF_RD_DATA_FAST = 0xb, /* read flash */
681 SF_ERASE_SECTOR = 0xd8, /* erase sector */
682
683 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
684 FW_VERS_ADDR = 0x77ffc /* flash address holding FW version */
685};
686
687/**
688 * sf1_read - read data from the serial flash
689 * @adapter: the adapter
690 * @byte_cnt: number of bytes to read
691 * @cont: whether another operation will be chained
692 * @valp: where to store the read data
693 *
694 * Reads up to 4 bytes of data from the serial flash. The location of
695 * the read needs to be specified prior to calling this by issuing the
696 * appropriate commands to the serial flash.
697 */
698static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
699 u32 *valp)
700{
701 int ret;
702
703 if (!byte_cnt || byte_cnt > 4)
704 return -EINVAL;
705 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
706 return -EBUSY;
707 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
708 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
709 if (!ret)
710 *valp = t3_read_reg(adapter, A_SF_DATA);
711 return ret;
712}
713
714/**
715 * sf1_write - write data to the serial flash
716 * @adapter: the adapter
717 * @byte_cnt: number of bytes to write
718 * @cont: whether another operation will be chained
719 * @val: value to write
720 *
721 * Writes up to 4 bytes of data to the serial flash. The location of
722 * the write needs to be specified prior to calling this by issuing the
723 * appropriate commands to the serial flash.
724 */
725static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
726 u32 val)
727{
728 if (!byte_cnt || byte_cnt > 4)
729 return -EINVAL;
730 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
731 return -EBUSY;
732 t3_write_reg(adapter, A_SF_DATA, val);
733 t3_write_reg(adapter, A_SF_OP,
734 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
735 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
736}
737
738/**
739 * flash_wait_op - wait for a flash operation to complete
740 * @adapter: the adapter
741 * @attempts: max number of polls of the status register
742 * @delay: delay between polls in ms
743 *
744 * Wait for a flash operation to complete by polling the status register.
745 */
746static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
747{
748 int ret;
749 u32 status;
750
751 while (1) {
752 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
753 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
754 return ret;
755 if (!(status & 1))
756 return 0;
757 if (--attempts == 0)
758 return -EAGAIN;
759 if (delay)
760 msleep(delay);
761 }
762}
763
764/**
765 * t3_read_flash - read words from serial flash
766 * @adapter: the adapter
767 * @addr: the start address for the read
768 * @nwords: how many 32-bit words to read
769 * @data: where to store the read data
770 * @byte_oriented: whether to store data as bytes or as words
771 *
772 * Read the specified number of 32-bit words from the serial flash.
773 * If @byte_oriented is set the read data is stored as a byte array
774 * (i.e., big-endian), otherwise as 32-bit words in the platform's
775 * natural endianess.
776 */
777int t3_read_flash(struct adapter *adapter, unsigned int addr,
778 unsigned int nwords, u32 *data, int byte_oriented)
779{
780 int ret;
781
782 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
783 return -EINVAL;
784
785 addr = swab32(addr) | SF_RD_DATA_FAST;
786
787 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
788 (ret = sf1_read(adapter, 1, 1, data)) != 0)
789 return ret;
790
791 for (; nwords; nwords--, data++) {
792 ret = sf1_read(adapter, 4, nwords > 1, data);
793 if (ret)
794 return ret;
795 if (byte_oriented)
796 *data = htonl(*data);
797 }
798 return 0;
799}
800
801/**
802 * t3_write_flash - write up to a page of data to the serial flash
803 * @adapter: the adapter
804 * @addr: the start address to write
805 * @n: length of data to write
806 * @data: the data to write
807 *
808 * Writes up to a page of data (256 bytes) to the serial flash starting
809 * at the given address.
810 */
811static int t3_write_flash(struct adapter *adapter, unsigned int addr,
812 unsigned int n, const u8 *data)
813{
814 int ret;
815 u32 buf[64];
816 unsigned int i, c, left, val, offset = addr & 0xff;
817
818 if (addr + n > SF_SIZE || offset + n > 256)
819 return -EINVAL;
820
821 val = swab32(addr) | SF_PROG_PAGE;
822
823 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
824 (ret = sf1_write(adapter, 4, 1, val)) != 0)
825 return ret;
826
827 for (left = n; left; left -= c) {
828 c = min(left, 4U);
829 for (val = 0, i = 0; i < c; ++i)
830 val = (val << 8) + *data++;
831
832 ret = sf1_write(adapter, c, c != left, val);
833 if (ret)
834 return ret;
835 }
836 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
837 return ret;
838
839 /* Read the page to verify the write succeeded */
840 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
841 if (ret)
842 return ret;
843
844 if (memcmp(data - n, (u8 *) buf + offset, n))
845 return -EIO;
846 return 0;
847}
848
4aac3899
DLR
849enum fw_version_type {
850 FW_VERSION_N3,
851 FW_VERSION_T3
852};
853
4d22de3e
DLR
854/**
855 * t3_get_fw_version - read the firmware version
856 * @adapter: the adapter
857 * @vers: where to place the version
858 *
859 * Reads the FW version from flash.
860 */
861int t3_get_fw_version(struct adapter *adapter, u32 *vers)
862{
863 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
864}
865
866/**
867 * t3_check_fw_version - check if the FW is compatible with this driver
868 * @adapter: the adapter
869 *
870 * Checks if an adapter's FW is compatible with the driver. Returns 0
871 * if the versions are compatible, a negative error otherwise.
872 */
873int t3_check_fw_version(struct adapter *adapter)
874{
875 int ret;
876 u32 vers;
4aac3899 877 unsigned int type, major, minor;
4d22de3e
DLR
878
879 ret = t3_get_fw_version(adapter, &vers);
880 if (ret)
881 return ret;
882
4aac3899
DLR
883 type = G_FW_VERSION_TYPE(vers);
884 major = G_FW_VERSION_MAJOR(vers);
885 minor = G_FW_VERSION_MINOR(vers);
4d22de3e 886
4aac3899 887 if (type == FW_VERSION_T3 && major == 3 && minor == 1)
4d22de3e
DLR
888 return 0;
889
4aac3899
DLR
890 CH_ERR(adapter, "found wrong FW version(%u.%u), "
891 "driver needs version 3.1\n", major, minor);
4d22de3e
DLR
892 return -EINVAL;
893}
894
895/**
896 * t3_flash_erase_sectors - erase a range of flash sectors
897 * @adapter: the adapter
898 * @start: the first sector to erase
899 * @end: the last sector to erase
900 *
901 * Erases the sectors in the given range.
902 */
903static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
904{
905 while (start <= end) {
906 int ret;
907
908 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
909 (ret = sf1_write(adapter, 4, 0,
910 SF_ERASE_SECTOR | (start << 8))) != 0 ||
911 (ret = flash_wait_op(adapter, 5, 500)) != 0)
912 return ret;
913 start++;
914 }
915 return 0;
916}
917
918/*
919 * t3_load_fw - download firmware
920 * @adapter: the adapter
921 * @fw_data: the firrware image to write
922 * @size: image size
923 *
924 * Write the supplied firmware image to the card's serial flash.
925 * The FW image has the following sections: @size - 8 bytes of code and
926 * data, followed by 4 bytes of FW version, followed by the 32-bit
927 * 1's complement checksum of the whole image.
928 */
929int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
930{
931 u32 csum;
932 unsigned int i;
933 const u32 *p = (const u32 *)fw_data;
934 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
935
936 if (size & 3)
937 return -EINVAL;
938 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
939 return -EFBIG;
940
941 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
942 csum += ntohl(p[i]);
943 if (csum != 0xffffffff) {
944 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
945 csum);
946 return -EINVAL;
947 }
948
949 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
950 if (ret)
951 goto out;
952
953 size -= 8; /* trim off version and checksum */
954 for (addr = FW_FLASH_BOOT_ADDR; size;) {
955 unsigned int chunk_size = min(size, 256U);
956
957 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
958 if (ret)
959 goto out;
960
961 addr += chunk_size;
962 fw_data += chunk_size;
963 size -= chunk_size;
964 }
965
966 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
967out:
968 if (ret)
969 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
970 return ret;
971}
972
973#define CIM_CTL_BASE 0x2000
974
975/**
976 * t3_cim_ctl_blk_read - read a block from CIM control region
977 *
978 * @adap: the adapter
979 * @addr: the start address within the CIM control region
980 * @n: number of words to read
981 * @valp: where to store the result
982 *
983 * Reads a block of 4-byte words from the CIM control region.
984 */
985int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
986 unsigned int n, unsigned int *valp)
987{
988 int ret = 0;
989
990 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
991 return -EBUSY;
992
993 for ( ; !ret && n--; addr += 4) {
994 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
995 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
996 0, 5, 2);
997 if (!ret)
998 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
999 }
1000 return ret;
1001}
1002
1003
1004/**
1005 * t3_link_changed - handle interface link changes
1006 * @adapter: the adapter
1007 * @port_id: the port index that changed link state
1008 *
1009 * Called when a port's link settings change to propagate the new values
1010 * to the associated PHY and MAC. After performing the common tasks it
1011 * invokes an OS-specific handler.
1012 */
1013void t3_link_changed(struct adapter *adapter, int port_id)
1014{
1015 int link_ok, speed, duplex, fc;
1016 struct port_info *pi = adap2pinfo(adapter, port_id);
1017 struct cphy *phy = &pi->phy;
1018 struct cmac *mac = &pi->mac;
1019 struct link_config *lc = &pi->link_config;
1020
1021 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1022
1023 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1024 uses_xaui(adapter)) {
1025 if (link_ok)
1026 t3b_pcs_reset(mac);
1027 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1028 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1029 }
1030 lc->link_ok = link_ok;
1031 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1032 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1033 if (lc->requested_fc & PAUSE_AUTONEG)
1034 fc &= lc->requested_fc;
1035 else
1036 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1037
1038 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1039 /* Set MAC speed, duplex, and flow control to match PHY. */
1040 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1041 lc->fc = fc;
1042 }
1043
1044 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1045}
1046
1047/**
1048 * t3_link_start - apply link configuration to MAC/PHY
1049 * @phy: the PHY to setup
1050 * @mac: the MAC to setup
1051 * @lc: the requested link configuration
1052 *
1053 * Set up a port's MAC and PHY according to a desired link configuration.
1054 * - If the PHY can auto-negotiate first decide what to advertise, then
1055 * enable/disable auto-negotiation as desired, and reset.
1056 * - If the PHY does not auto-negotiate just reset it.
1057 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1058 * otherwise do it later based on the outcome of auto-negotiation.
1059 */
1060int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1061{
1062 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1063
1064 lc->link_ok = 0;
1065 if (lc->supported & SUPPORTED_Autoneg) {
1066 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1067 if (fc) {
1068 lc->advertising |= ADVERTISED_Asym_Pause;
1069 if (fc & PAUSE_RX)
1070 lc->advertising |= ADVERTISED_Pause;
1071 }
1072 phy->ops->advertise(phy, lc->advertising);
1073
1074 if (lc->autoneg == AUTONEG_DISABLE) {
1075 lc->speed = lc->requested_speed;
1076 lc->duplex = lc->requested_duplex;
1077 lc->fc = (unsigned char)fc;
1078 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1079 fc);
1080 /* Also disables autoneg */
1081 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1082 phy->ops->reset(phy, 0);
1083 } else
1084 phy->ops->autoneg_enable(phy);
1085 } else {
1086 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1087 lc->fc = (unsigned char)fc;
1088 phy->ops->reset(phy, 0);
1089 }
1090 return 0;
1091}
1092
1093/**
1094 * t3_set_vlan_accel - control HW VLAN extraction
1095 * @adapter: the adapter
1096 * @ports: bitmap of adapter ports to operate on
1097 * @on: enable (1) or disable (0) HW VLAN extraction
1098 *
1099 * Enables or disables HW extraction of VLAN tags for the given port.
1100 */
1101void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1102{
1103 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1104 ports << S_VLANEXTRACTIONENABLE,
1105 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1106}
1107
1108struct intr_info {
1109 unsigned int mask; /* bits to check in interrupt status */
1110 const char *msg; /* message to print or NULL */
1111 short stat_idx; /* stat counter to increment or -1 */
1112 unsigned short fatal:1; /* whether the condition reported is fatal */
1113};
1114
1115/**
1116 * t3_handle_intr_status - table driven interrupt handler
1117 * @adapter: the adapter that generated the interrupt
1118 * @reg: the interrupt status register to process
1119 * @mask: a mask to apply to the interrupt status
1120 * @acts: table of interrupt actions
1121 * @stats: statistics counters tracking interrupt occurences
1122 *
1123 * A table driven interrupt handler that applies a set of masks to an
1124 * interrupt status word and performs the corresponding actions if the
1125 * interrupts described by the mask have occured. The actions include
1126 * optionally printing a warning or alert message, and optionally
1127 * incrementing a stat counter. The table is terminated by an entry
1128 * specifying mask 0. Returns the number of fatal interrupt conditions.
1129 */
1130static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1131 unsigned int mask,
1132 const struct intr_info *acts,
1133 unsigned long *stats)
1134{
1135 int fatal = 0;
1136 unsigned int status = t3_read_reg(adapter, reg) & mask;
1137
1138 for (; acts->mask; ++acts) {
1139 if (!(status & acts->mask))
1140 continue;
1141 if (acts->fatal) {
1142 fatal++;
1143 CH_ALERT(adapter, "%s (0x%x)\n",
1144 acts->msg, status & acts->mask);
1145 } else if (acts->msg)
1146 CH_WARN(adapter, "%s (0x%x)\n",
1147 acts->msg, status & acts->mask);
1148 if (acts->stat_idx >= 0)
1149 stats[acts->stat_idx]++;
1150 }
1151 if (status) /* clear processed interrupts */
1152 t3_write_reg(adapter, reg, status);
1153 return fatal;
1154}
1155
1156#define SGE_INTR_MASK (F_RSPQDISABLED)
1157#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1158 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1159 F_NFASRCHFAIL)
1160#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1161#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1162 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1163 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1164#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1165 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1166 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1167 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1168 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1169 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1170#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1171 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1172 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1173 V_BISTERR(M_BISTERR) | F_PEXERR)
1174#define ULPRX_INTR_MASK F_PARERR
1175#define ULPTX_INTR_MASK 0
1176#define CPLSW_INTR_MASK (F_TP_FRAMING_ERROR | \
1177 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1178 F_ZERO_SWITCH_ERROR)
1179#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1180 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1181 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1182 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT)
1183#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1184 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1185 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1186#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1187 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1188 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1189#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1190 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1191 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1192 V_MCAPARERRENB(M_MCAPARERRENB))
1193#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1194 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1195 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1196 F_MPS0 | F_CPL_SWITCH)
1197
1198/*
1199 * Interrupt handler for the PCIX1 module.
1200 */
1201static void pci_intr_handler(struct adapter *adapter)
1202{
1203 static const struct intr_info pcix1_intr_info[] = {
4d22de3e
DLR
1204 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1205 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1206 {F_RCVTARABT, "PCI received target abort", -1, 1},
1207 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1208 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1209 {F_DETPARERR, "PCI detected parity error", -1, 1},
1210 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1211 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1212 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1213 1},
1214 {F_DETCORECCERR, "PCI correctable ECC error",
1215 STAT_PCI_CORR_ECC, 0},
1216 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1217 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1218 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1219 1},
1220 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1221 1},
1222 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1223 1},
1224 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1225 "error", -1, 1},
1226 {0}
1227 };
1228
1229 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1230 pcix1_intr_info, adapter->irq_stats))
1231 t3_fatal_err(adapter);
1232}
1233
1234/*
1235 * Interrupt handler for the PCIE module.
1236 */
1237static void pcie_intr_handler(struct adapter *adapter)
1238{
1239 static const struct intr_info pcie_intr_info[] = {
b5a44bcb 1240 {F_PEXERR, "PCI PEX error", -1, 1},
4d22de3e
DLR
1241 {F_UNXSPLCPLERRR,
1242 "PCI unexpected split completion DMA read error", -1, 1},
1243 {F_UNXSPLCPLERRC,
1244 "PCI unexpected split completion DMA command error", -1, 1},
1245 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1246 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1247 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1248 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1249 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1250 "PCI MSI-X table/PBA parity error", -1, 1},
1251 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1252 {0}
1253 };
1254
1255 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1256 pcie_intr_info, adapter->irq_stats))
1257 t3_fatal_err(adapter);
1258}
1259
1260/*
1261 * TP interrupt handler.
1262 */
1263static void tp_intr_handler(struct adapter *adapter)
1264{
1265 static const struct intr_info tp_intr_info[] = {
1266 {0xffffff, "TP parity error", -1, 1},
1267 {0x1000000, "TP out of Rx pages", -1, 1},
1268 {0x2000000, "TP out of Tx pages", -1, 1},
1269 {0}
1270 };
1271
1272 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1273 tp_intr_info, NULL))
1274 t3_fatal_err(adapter);
1275}
1276
1277/*
1278 * CIM interrupt handler.
1279 */
1280static void cim_intr_handler(struct adapter *adapter)
1281{
1282 static const struct intr_info cim_intr_info[] = {
1283 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1284 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1285 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1286 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1287 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1288 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1289 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1290 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1291 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1292 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1293 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1294 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1295 {0}
1296 };
1297
1298 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1299 cim_intr_info, NULL))
1300 t3_fatal_err(adapter);
1301}
1302
1303/*
1304 * ULP RX interrupt handler.
1305 */
1306static void ulprx_intr_handler(struct adapter *adapter)
1307{
1308 static const struct intr_info ulprx_intr_info[] = {
1309 {F_PARERR, "ULP RX parity error", -1, 1},
1310 {0}
1311 };
1312
1313 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1314 ulprx_intr_info, NULL))
1315 t3_fatal_err(adapter);
1316}
1317
1318/*
1319 * ULP TX interrupt handler.
1320 */
1321static void ulptx_intr_handler(struct adapter *adapter)
1322{
1323 static const struct intr_info ulptx_intr_info[] = {
1324 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1325 STAT_ULP_CH0_PBL_OOB, 0},
1326 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1327 STAT_ULP_CH1_PBL_OOB, 0},
1328 {0}
1329 };
1330
1331 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1332 ulptx_intr_info, adapter->irq_stats))
1333 t3_fatal_err(adapter);
1334}
1335
1336#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1337 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1338 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1339 F_ICSPI1_TX_FRAMING_ERROR)
1340#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1341 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1342 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1343 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1344
1345/*
1346 * PM TX interrupt handler.
1347 */
1348static void pmtx_intr_handler(struct adapter *adapter)
1349{
1350 static const struct intr_info pmtx_intr_info[] = {
1351 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1352 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1353 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1354 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1355 "PMTX ispi parity error", -1, 1},
1356 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1357 "PMTX ospi parity error", -1, 1},
1358 {0}
1359 };
1360
1361 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1362 pmtx_intr_info, NULL))
1363 t3_fatal_err(adapter);
1364}
1365
1366#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1367 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1368 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1369 F_IESPI1_TX_FRAMING_ERROR)
1370#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1371 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1372 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1373 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1374
1375/*
1376 * PM RX interrupt handler.
1377 */
1378static void pmrx_intr_handler(struct adapter *adapter)
1379{
1380 static const struct intr_info pmrx_intr_info[] = {
1381 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1382 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1383 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1384 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1385 "PMRX ispi parity error", -1, 1},
1386 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1387 "PMRX ospi parity error", -1, 1},
1388 {0}
1389 };
1390
1391 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1392 pmrx_intr_info, NULL))
1393 t3_fatal_err(adapter);
1394}
1395
1396/*
1397 * CPL switch interrupt handler.
1398 */
1399static void cplsw_intr_handler(struct adapter *adapter)
1400{
1401 static const struct intr_info cplsw_intr_info[] = {
1402/* { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 }, */
1403 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1404 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1405 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1406 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1407 {0}
1408 };
1409
1410 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1411 cplsw_intr_info, NULL))
1412 t3_fatal_err(adapter);
1413}
1414
1415/*
1416 * MPS interrupt handler.
1417 */
1418static void mps_intr_handler(struct adapter *adapter)
1419{
1420 static const struct intr_info mps_intr_info[] = {
1421 {0x1ff, "MPS parity error", -1, 1},
1422 {0}
1423 };
1424
1425 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1426 mps_intr_info, NULL))
1427 t3_fatal_err(adapter);
1428}
1429
1430#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1431
1432/*
1433 * MC7 interrupt handler.
1434 */
1435static void mc7_intr_handler(struct mc7 *mc7)
1436{
1437 struct adapter *adapter = mc7->adapter;
1438 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1439
1440 if (cause & F_CE) {
1441 mc7->stats.corr_err++;
1442 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1443 "data 0x%x 0x%x 0x%x\n", mc7->name,
1444 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1445 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1446 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1447 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1448 }
1449
1450 if (cause & F_UE) {
1451 mc7->stats.uncorr_err++;
1452 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1453 "data 0x%x 0x%x 0x%x\n", mc7->name,
1454 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1455 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1456 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1457 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1458 }
1459
1460 if (G_PE(cause)) {
1461 mc7->stats.parity_err++;
1462 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1463 mc7->name, G_PE(cause));
1464 }
1465
1466 if (cause & F_AE) {
1467 u32 addr = 0;
1468
1469 if (adapter->params.rev > 0)
1470 addr = t3_read_reg(adapter,
1471 mc7->offset + A_MC7_ERR_ADDR);
1472 mc7->stats.addr_err++;
1473 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1474 mc7->name, addr);
1475 }
1476
1477 if (cause & MC7_INTR_FATAL)
1478 t3_fatal_err(adapter);
1479
1480 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1481}
1482
1483#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1484 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1485/*
1486 * XGMAC interrupt handler.
1487 */
1488static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1489{
1490 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1491 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1492
1493 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1494 mac->stats.tx_fifo_parity_err++;
1495 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1496 }
1497 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1498 mac->stats.rx_fifo_parity_err++;
1499 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1500 }
1501 if (cause & F_TXFIFO_UNDERRUN)
1502 mac->stats.tx_fifo_urun++;
1503 if (cause & F_RXFIFO_OVERFLOW)
1504 mac->stats.rx_fifo_ovfl++;
1505 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1506 mac->stats.serdes_signal_loss++;
1507 if (cause & F_XAUIPCSCTCERR)
1508 mac->stats.xaui_pcs_ctc_err++;
1509 if (cause & F_XAUIPCSALIGNCHANGE)
1510 mac->stats.xaui_pcs_align_change++;
1511
1512 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1513 if (cause & XGM_INTR_FATAL)
1514 t3_fatal_err(adap);
1515 return cause != 0;
1516}
1517
1518/*
1519 * Interrupt handler for PHY events.
1520 */
1521int t3_phy_intr_handler(struct adapter *adapter)
1522{
1523 static const int intr_gpio_bits[] = { 8, 0x20 };
1524
1525 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1526
1527 for_each_port(adapter, i) {
1528 if (cause & intr_gpio_bits[i]) {
1529 struct cphy *phy = &adap2pinfo(adapter, i)->phy;
1530 int phy_cause = phy->ops->intr_handler(phy);
1531
1532 if (phy_cause & cphy_cause_link_change)
1533 t3_link_changed(adapter, i);
1534 if (phy_cause & cphy_cause_fifo_error)
1535 phy->fifo_errors++;
1536 }
1537 }
1538
1539 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1540 return 0;
1541}
1542
1543/*
1544 * T3 slow path (non-data) interrupt handler.
1545 */
1546int t3_slow_intr_handler(struct adapter *adapter)
1547{
1548 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1549
1550 cause &= adapter->slow_intr_mask;
1551 if (!cause)
1552 return 0;
1553 if (cause & F_PCIM0) {
1554 if (is_pcie(adapter))
1555 pcie_intr_handler(adapter);
1556 else
1557 pci_intr_handler(adapter);
1558 }
1559 if (cause & F_SGE3)
1560 t3_sge_err_intr_handler(adapter);
1561 if (cause & F_MC7_PMRX)
1562 mc7_intr_handler(&adapter->pmrx);
1563 if (cause & F_MC7_PMTX)
1564 mc7_intr_handler(&adapter->pmtx);
1565 if (cause & F_MC7_CM)
1566 mc7_intr_handler(&adapter->cm);
1567 if (cause & F_CIM)
1568 cim_intr_handler(adapter);
1569 if (cause & F_TP1)
1570 tp_intr_handler(adapter);
1571 if (cause & F_ULP2_RX)
1572 ulprx_intr_handler(adapter);
1573 if (cause & F_ULP2_TX)
1574 ulptx_intr_handler(adapter);
1575 if (cause & F_PM1_RX)
1576 pmrx_intr_handler(adapter);
1577 if (cause & F_PM1_TX)
1578 pmtx_intr_handler(adapter);
1579 if (cause & F_CPL_SWITCH)
1580 cplsw_intr_handler(adapter);
1581 if (cause & F_MPS0)
1582 mps_intr_handler(adapter);
1583 if (cause & F_MC5A)
1584 t3_mc5_intr_handler(&adapter->mc5);
1585 if (cause & F_XGMAC0_0)
1586 mac_intr_handler(adapter, 0);
1587 if (cause & F_XGMAC0_1)
1588 mac_intr_handler(adapter, 1);
1589 if (cause & F_T3DBG)
1590 t3_os_ext_intr_handler(adapter);
1591
1592 /* Clear the interrupts just processed. */
1593 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1594 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1595 return 1;
1596}
1597
1598/**
1599 * t3_intr_enable - enable interrupts
1600 * @adapter: the adapter whose interrupts should be enabled
1601 *
1602 * Enable interrupts by setting the interrupt enable registers of the
1603 * various HW modules and then enabling the top-level interrupt
1604 * concentrator.
1605 */
1606void t3_intr_enable(struct adapter *adapter)
1607{
1608 static const struct addr_val_pair intr_en_avp[] = {
1609 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1610 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1611 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1612 MC7_INTR_MASK},
1613 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1614 MC7_INTR_MASK},
1615 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1616 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1617 {A_TP_INT_ENABLE, 0x3bfffff},
1618 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1619 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1620 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1621 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1622 };
1623
1624 adapter->slow_intr_mask = PL_INTR_MASK;
1625
1626 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1627
1628 if (adapter->params.rev > 0) {
1629 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1630 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1631 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1632 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1633 F_PBL_BOUND_ERR_CH1);
1634 } else {
1635 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1636 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1637 }
1638
1639 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW,
1640 adapter_info(adapter)->gpio_intr);
1641 t3_write_reg(adapter, A_T3DBG_INT_ENABLE,
1642 adapter_info(adapter)->gpio_intr);
1643 if (is_pcie(adapter))
1644 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1645 else
1646 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1647 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1648 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1649}
1650
1651/**
1652 * t3_intr_disable - disable a card's interrupts
1653 * @adapter: the adapter whose interrupts should be disabled
1654 *
1655 * Disable interrupts. We only disable the top-level interrupt
1656 * concentrator and the SGE data interrupts.
1657 */
1658void t3_intr_disable(struct adapter *adapter)
1659{
1660 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1661 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1662 adapter->slow_intr_mask = 0;
1663}
1664
1665/**
1666 * t3_intr_clear - clear all interrupts
1667 * @adapter: the adapter whose interrupts should be cleared
1668 *
1669 * Clears all interrupts.
1670 */
1671void t3_intr_clear(struct adapter *adapter)
1672{
1673 static const unsigned int cause_reg_addr[] = {
1674 A_SG_INT_CAUSE,
1675 A_SG_RSPQ_FL_STATUS,
1676 A_PCIX_INT_CAUSE,
1677 A_MC7_INT_CAUSE,
1678 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1679 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1680 A_CIM_HOST_INT_CAUSE,
1681 A_TP_INT_CAUSE,
1682 A_MC5_DB_INT_CAUSE,
1683 A_ULPRX_INT_CAUSE,
1684 A_ULPTX_INT_CAUSE,
1685 A_CPL_INTR_CAUSE,
1686 A_PM1_TX_INT_CAUSE,
1687 A_PM1_RX_INT_CAUSE,
1688 A_MPS_INT_CAUSE,
1689 A_T3DBG_INT_CAUSE,
1690 };
1691 unsigned int i;
1692
1693 /* Clear PHY and MAC interrupts for each port. */
1694 for_each_port(adapter, i)
1695 t3_port_intr_clear(adapter, i);
1696
1697 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1698 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1699
1700 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1701 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1702}
1703
1704/**
1705 * t3_port_intr_enable - enable port-specific interrupts
1706 * @adapter: associated adapter
1707 * @idx: index of port whose interrupts should be enabled
1708 *
1709 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1710 * adapter port.
1711 */
1712void t3_port_intr_enable(struct adapter *adapter, int idx)
1713{
1714 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1715
1716 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
1717 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1718 phy->ops->intr_enable(phy);
1719}
1720
1721/**
1722 * t3_port_intr_disable - disable port-specific interrupts
1723 * @adapter: associated adapter
1724 * @idx: index of port whose interrupts should be disabled
1725 *
1726 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
1727 * adapter port.
1728 */
1729void t3_port_intr_disable(struct adapter *adapter, int idx)
1730{
1731 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1732
1733 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
1734 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1735 phy->ops->intr_disable(phy);
1736}
1737
1738/**
1739 * t3_port_intr_clear - clear port-specific interrupts
1740 * @adapter: associated adapter
1741 * @idx: index of port whose interrupts to clear
1742 *
1743 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
1744 * adapter port.
1745 */
1746void t3_port_intr_clear(struct adapter *adapter, int idx)
1747{
1748 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1749
1750 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
1751 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
1752 phy->ops->intr_clear(phy);
1753}
1754
1755/**
1756 * t3_sge_write_context - write an SGE context
1757 * @adapter: the adapter
1758 * @id: the context id
1759 * @type: the context type
1760 *
1761 * Program an SGE context with the values already loaded in the
1762 * CONTEXT_DATA? registers.
1763 */
1764static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
1765 unsigned int type)
1766{
1767 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
1768 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
1769 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
1770 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
1771 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1772 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
1773 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1774 0, 5, 1);
1775}
1776
1777/**
1778 * t3_sge_init_ecntxt - initialize an SGE egress context
1779 * @adapter: the adapter to configure
1780 * @id: the context id
1781 * @gts_enable: whether to enable GTS for the context
1782 * @type: the egress context type
1783 * @respq: associated response queue
1784 * @base_addr: base address of queue
1785 * @size: number of queue entries
1786 * @token: uP token
1787 * @gen: initial generation value for the context
1788 * @cidx: consumer pointer
1789 *
1790 * Initialize an SGE egress context and make it ready for use. If the
1791 * platform allows concurrent context operations, the caller is
1792 * responsible for appropriate locking.
1793 */
1794int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
1795 enum sge_context_type type, int respq, u64 base_addr,
1796 unsigned int size, unsigned int token, int gen,
1797 unsigned int cidx)
1798{
1799 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
1800
1801 if (base_addr & 0xfff) /* must be 4K aligned */
1802 return -EINVAL;
1803 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1804 return -EBUSY;
1805
1806 base_addr >>= 12;
1807 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
1808 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
1809 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
1810 V_EC_BASE_LO(base_addr & 0xffff));
1811 base_addr >>= 16;
1812 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
1813 base_addr >>= 32;
1814 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1815 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
1816 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
1817 F_EC_VALID);
1818 return t3_sge_write_context(adapter, id, F_EGRESS);
1819}
1820
1821/**
1822 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
1823 * @adapter: the adapter to configure
1824 * @id: the context id
1825 * @gts_enable: whether to enable GTS for the context
1826 * @base_addr: base address of queue
1827 * @size: number of queue entries
1828 * @bsize: size of each buffer for this queue
1829 * @cong_thres: threshold to signal congestion to upstream producers
1830 * @gen: initial generation value for the context
1831 * @cidx: consumer pointer
1832 *
1833 * Initialize an SGE free list context and make it ready for use. The
1834 * caller is responsible for ensuring only one context operation occurs
1835 * at a time.
1836 */
1837int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
1838 int gts_enable, u64 base_addr, unsigned int size,
1839 unsigned int bsize, unsigned int cong_thres, int gen,
1840 unsigned int cidx)
1841{
1842 if (base_addr & 0xfff) /* must be 4K aligned */
1843 return -EINVAL;
1844 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1845 return -EBUSY;
1846
1847 base_addr >>= 12;
1848 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
1849 base_addr >>= 32;
1850 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
1851 V_FL_BASE_HI((u32) base_addr) |
1852 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
1853 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
1854 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
1855 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
1856 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1857 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
1858 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
1859 return t3_sge_write_context(adapter, id, F_FREELIST);
1860}
1861
1862/**
1863 * t3_sge_init_rspcntxt - initialize an SGE response queue context
1864 * @adapter: the adapter to configure
1865 * @id: the context id
1866 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
1867 * @base_addr: base address of queue
1868 * @size: number of queue entries
1869 * @fl_thres: threshold for selecting the normal or jumbo free list
1870 * @gen: initial generation value for the context
1871 * @cidx: consumer pointer
1872 *
1873 * Initialize an SGE response queue context and make it ready for use.
1874 * The caller is responsible for ensuring only one context operation
1875 * occurs at a time.
1876 */
1877int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
1878 int irq_vec_idx, u64 base_addr, unsigned int size,
1879 unsigned int fl_thres, int gen, unsigned int cidx)
1880{
1881 unsigned int intr = 0;
1882
1883 if (base_addr & 0xfff) /* must be 4K aligned */
1884 return -EINVAL;
1885 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1886 return -EBUSY;
1887
1888 base_addr >>= 12;
1889 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
1890 V_CQ_INDEX(cidx));
1891 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
1892 base_addr >>= 32;
1893 if (irq_vec_idx >= 0)
1894 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
1895 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
1896 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
1897 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
1898 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
1899}
1900
1901/**
1902 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
1903 * @adapter: the adapter to configure
1904 * @id: the context id
1905 * @base_addr: base address of queue
1906 * @size: number of queue entries
1907 * @rspq: response queue for async notifications
1908 * @ovfl_mode: CQ overflow mode
1909 * @credits: completion queue credits
1910 * @credit_thres: the credit threshold
1911 *
1912 * Initialize an SGE completion queue context and make it ready for use.
1913 * The caller is responsible for ensuring only one context operation
1914 * occurs at a time.
1915 */
1916int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
1917 unsigned int size, int rspq, int ovfl_mode,
1918 unsigned int credits, unsigned int credit_thres)
1919{
1920 if (base_addr & 0xfff) /* must be 4K aligned */
1921 return -EINVAL;
1922 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1923 return -EBUSY;
1924
1925 base_addr >>= 12;
1926 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
1927 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
1928 base_addr >>= 32;
1929 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
1930 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
1931 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode));
1932 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
1933 V_CQ_CREDIT_THRES(credit_thres));
1934 return t3_sge_write_context(adapter, id, F_CQ);
1935}
1936
1937/**
1938 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
1939 * @adapter: the adapter
1940 * @id: the egress context id
1941 * @enable: enable (1) or disable (0) the context
1942 *
1943 * Enable or disable an SGE egress context. The caller is responsible for
1944 * ensuring only one context operation occurs at a time.
1945 */
1946int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
1947{
1948 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1949 return -EBUSY;
1950
1951 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
1952 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
1953 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
1954 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
1955 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
1956 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1957 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
1958 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1959 0, 5, 1);
1960}
1961
1962/**
1963 * t3_sge_disable_fl - disable an SGE free-buffer list
1964 * @adapter: the adapter
1965 * @id: the free list context id
1966 *
1967 * Disable an SGE free-buffer list. The caller is responsible for
1968 * ensuring only one context operation occurs at a time.
1969 */
1970int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
1971{
1972 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1973 return -EBUSY;
1974
1975 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
1976 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
1977 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
1978 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
1979 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
1980 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1981 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
1982 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1983 0, 5, 1);
1984}
1985
1986/**
1987 * t3_sge_disable_rspcntxt - disable an SGE response queue
1988 * @adapter: the adapter
1989 * @id: the response queue context id
1990 *
1991 * Disable an SGE response queue. The caller is responsible for
1992 * ensuring only one context operation occurs at a time.
1993 */
1994int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
1995{
1996 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1997 return -EBUSY;
1998
1999 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2000 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2001 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2002 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2003 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2004 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2005 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2006 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2007 0, 5, 1);
2008}
2009
2010/**
2011 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2012 * @adapter: the adapter
2013 * @id: the completion queue context id
2014 *
2015 * Disable an SGE completion queue. The caller is responsible for
2016 * ensuring only one context operation occurs at a time.
2017 */
2018int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2019{
2020 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2021 return -EBUSY;
2022
2023 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2024 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2025 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2026 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2027 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2028 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2029 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2030 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2031 0, 5, 1);
2032}
2033
2034/**
2035 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2036 * @adapter: the adapter
2037 * @id: the context id
2038 * @op: the operation to perform
2039 *
2040 * Perform the selected operation on an SGE completion queue context.
2041 * The caller is responsible for ensuring only one context operation
2042 * occurs at a time.
2043 */
2044int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2045 unsigned int credits)
2046{
2047 u32 val;
2048
2049 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2050 return -EBUSY;
2051
2052 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2053 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2054 V_CONTEXT(id) | F_CQ);
2055 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2056 0, 5, 1, &val))
2057 return -EIO;
2058
2059 if (op >= 2 && op < 7) {
2060 if (adapter->params.rev > 0)
2061 return G_CQ_INDEX(val);
2062
2063 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2064 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2065 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2066 F_CONTEXT_CMD_BUSY, 0, 5, 1))
2067 return -EIO;
2068 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2069 }
2070 return 0;
2071}
2072
2073/**
2074 * t3_sge_read_context - read an SGE context
2075 * @type: the context type
2076 * @adapter: the adapter
2077 * @id: the context id
2078 * @data: holds the retrieved context
2079 *
2080 * Read an SGE egress context. The caller is responsible for ensuring
2081 * only one context operation occurs at a time.
2082 */
2083static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2084 unsigned int id, u32 data[4])
2085{
2086 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2087 return -EBUSY;
2088
2089 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2090 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2091 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2092 5, 1))
2093 return -EIO;
2094 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2095 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2096 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2097 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2098 return 0;
2099}
2100
2101/**
2102 * t3_sge_read_ecntxt - read an SGE egress context
2103 * @adapter: the adapter
2104 * @id: the context id
2105 * @data: holds the retrieved context
2106 *
2107 * Read an SGE egress context. The caller is responsible for ensuring
2108 * only one context operation occurs at a time.
2109 */
2110int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2111{
2112 if (id >= 65536)
2113 return -EINVAL;
2114 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2115}
2116
2117/**
2118 * t3_sge_read_cq - read an SGE CQ context
2119 * @adapter: the adapter
2120 * @id: the context id
2121 * @data: holds the retrieved context
2122 *
2123 * Read an SGE CQ context. The caller is responsible for ensuring
2124 * only one context operation occurs at a time.
2125 */
2126int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2127{
2128 if (id >= 65536)
2129 return -EINVAL;
2130 return t3_sge_read_context(F_CQ, adapter, id, data);
2131}
2132
2133/**
2134 * t3_sge_read_fl - read an SGE free-list context
2135 * @adapter: the adapter
2136 * @id: the context id
2137 * @data: holds the retrieved context
2138 *
2139 * Read an SGE free-list context. The caller is responsible for ensuring
2140 * only one context operation occurs at a time.
2141 */
2142int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2143{
2144 if (id >= SGE_QSETS * 2)
2145 return -EINVAL;
2146 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2147}
2148
2149/**
2150 * t3_sge_read_rspq - read an SGE response queue context
2151 * @adapter: the adapter
2152 * @id: the context id
2153 * @data: holds the retrieved context
2154 *
2155 * Read an SGE response queue context. The caller is responsible for
2156 * ensuring only one context operation occurs at a time.
2157 */
2158int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2159{
2160 if (id >= SGE_QSETS)
2161 return -EINVAL;
2162 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2163}
2164
2165/**
2166 * t3_config_rss - configure Rx packet steering
2167 * @adapter: the adapter
2168 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2169 * @cpus: values for the CPU lookup table (0xff terminated)
2170 * @rspq: values for the response queue lookup table (0xffff terminated)
2171 *
2172 * Programs the receive packet steering logic. @cpus and @rspq provide
2173 * the values for the CPU and response queue lookup tables. If they
2174 * provide fewer values than the size of the tables the supplied values
2175 * are used repeatedly until the tables are fully populated.
2176 */
2177void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2178 const u8 * cpus, const u16 *rspq)
2179{
2180 int i, j, cpu_idx = 0, q_idx = 0;
2181
2182 if (cpus)
2183 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2184 u32 val = i << 16;
2185
2186 for (j = 0; j < 2; ++j) {
2187 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2188 if (cpus[cpu_idx] == 0xff)
2189 cpu_idx = 0;
2190 }
2191 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2192 }
2193
2194 if (rspq)
2195 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2196 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2197 (i << 16) | rspq[q_idx++]);
2198 if (rspq[q_idx] == 0xffff)
2199 q_idx = 0;
2200 }
2201
2202 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2203}
2204
2205/**
2206 * t3_read_rss - read the contents of the RSS tables
2207 * @adapter: the adapter
2208 * @lkup: holds the contents of the RSS lookup table
2209 * @map: holds the contents of the RSS map table
2210 *
2211 * Reads the contents of the receive packet steering tables.
2212 */
2213int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2214{
2215 int i;
2216 u32 val;
2217
2218 if (lkup)
2219 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2220 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2221 0xffff0000 | i);
2222 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2223 if (!(val & 0x80000000))
2224 return -EAGAIN;
2225 *lkup++ = val;
2226 *lkup++ = (val >> 8);
2227 }
2228
2229 if (map)
2230 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2231 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2232 0xffff0000 | i);
2233 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2234 if (!(val & 0x80000000))
2235 return -EAGAIN;
2236 *map++ = val;
2237 }
2238 return 0;
2239}
2240
2241/**
2242 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2243 * @adap: the adapter
2244 * @enable: 1 to select offload mode, 0 for regular NIC
2245 *
2246 * Switches TP to NIC/offload mode.
2247 */
2248void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2249{
2250 if (is_offload(adap) || !enable)
2251 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2252 V_NICMODE(!enable));
2253}
2254
2255/**
2256 * pm_num_pages - calculate the number of pages of the payload memory
2257 * @mem_size: the size of the payload memory
2258 * @pg_size: the size of each payload memory page
2259 *
2260 * Calculate the number of pages, each of the given size, that fit in a
2261 * memory of the specified size, respecting the HW requirement that the
2262 * number of pages must be a multiple of 24.
2263 */
2264static inline unsigned int pm_num_pages(unsigned int mem_size,
2265 unsigned int pg_size)
2266{
2267 unsigned int n = mem_size / pg_size;
2268
2269 return n - n % 24;
2270}
2271
2272#define mem_region(adap, start, size, reg) \
2273 t3_write_reg((adap), A_ ## reg, (start)); \
2274 start += size
2275
2276/*
2277 * partition_mem - partition memory and configure TP memory settings
2278 * @adap: the adapter
2279 * @p: the TP parameters
2280 *
2281 * Partitions context and payload memory and configures TP's memory
2282 * registers.
2283 */
2284static void partition_mem(struct adapter *adap, const struct tp_params *p)
2285{
2286 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2287 unsigned int timers = 0, timers_shift = 22;
2288
2289 if (adap->params.rev > 0) {
2290 if (tids <= 16 * 1024) {
2291 timers = 1;
2292 timers_shift = 16;
2293 } else if (tids <= 64 * 1024) {
2294 timers = 2;
2295 timers_shift = 18;
2296 } else if (tids <= 256 * 1024) {
2297 timers = 3;
2298 timers_shift = 20;
2299 }
2300 }
2301
2302 t3_write_reg(adap, A_TP_PMM_SIZE,
2303 p->chan_rx_size | (p->chan_tx_size >> 16));
2304
2305 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2306 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2307 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2308 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2309 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2310
2311 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2312 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2313 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2314
2315 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2316 /* Add a bit of headroom and make multiple of 24 */
2317 pstructs += 48;
2318 pstructs -= pstructs % 24;
2319 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2320
2321 m = tids * TCB_SIZE;
2322 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2323 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2324 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2325 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2326 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2327 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2328 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2329 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2330
2331 m = (m + 4095) & ~0xfff;
2332 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2333 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2334
2335 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2336 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2337 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2338 if (tids < m)
2339 adap->params.mc5.nservers += m - tids;
2340}
2341
2342static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2343 u32 val)
2344{
2345 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2346 t3_write_reg(adap, A_TP_PIO_DATA, val);
2347}
2348
2349static void tp_config(struct adapter *adap, const struct tp_params *p)
2350{
4d22de3e
DLR
2351 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2352 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2353 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2354 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2355 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2356 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2357 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2358 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2359 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2360 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2361 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_IPV6ENABLE | F_NICMODE,
2362 F_IPV6ENABLE | F_NICMODE);
2363 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2364 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2365 t3_set_reg_field(adap, A_TP_PARA_REG6,
2366 adap->params.rev > 0 ? F_ENABLEESND : F_T3A_ENABLEESND,
2367 0);
2368
3b1d307b
DLR
2369 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2370 F_ENABLEEPCMDAFULL | F_ENABLEOCSPIFULL,
2371 F_TXDEFERENABLE | F_HEARBEATDACK | F_TXCONGESTIONMODE |
2372 F_RXCONGESTIONMODE);
2373 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, 0);
4d22de3e
DLR
2374
2375 if (adap->params.rev > 0) {
2376 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2377 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2378 F_TXPACEAUTO);
2379 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2380 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2381 } else
2382 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2383
2384 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0x12121212);
2385 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0x12121212);
2386 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0x1212);
2387}
2388
2389/* Desired TP timer resolution in usec */
2390#define TP_TMR_RES 50
2391
2392/* TCP timer values in ms */
2393#define TP_DACK_TIMER 50
2394#define TP_RTO_MIN 250
2395
2396/**
2397 * tp_set_timers - set TP timing parameters
2398 * @adap: the adapter to set
2399 * @core_clk: the core clock frequency in Hz
2400 *
2401 * Set TP's timing parameters, such as the various timer resolutions and
2402 * the TCP timer values.
2403 */
2404static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2405{
2406 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2407 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2408 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2409 unsigned int tps = core_clk >> tre;
2410
2411 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2412 V_DELAYEDACKRESOLUTION(dack_re) |
2413 V_TIMESTAMPRESOLUTION(tstamp_re));
2414 t3_write_reg(adap, A_TP_DACK_TIMER,
2415 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2416 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2417 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2418 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2419 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2420 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2421 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2422 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2423 V_KEEPALIVEMAX(9));
2424
2425#define SECONDS * tps
2426
2427 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2428 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2429 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2430 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2431 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2432 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2433 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2434 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2435 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2436
2437#undef SECONDS
2438}
2439
2440/**
2441 * t3_tp_set_coalescing_size - set receive coalescing size
2442 * @adap: the adapter
2443 * @size: the receive coalescing size
2444 * @psh: whether a set PSH bit should deliver coalesced data
2445 *
2446 * Set the receive coalescing size and PSH bit handling.
2447 */
2448int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2449{
2450 u32 val;
2451
2452 if (size > MAX_RX_COALESCING_LEN)
2453 return -EINVAL;
2454
2455 val = t3_read_reg(adap, A_TP_PARA_REG3);
2456 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2457
2458 if (size) {
2459 val |= F_RXCOALESCEENABLE;
2460 if (psh)
2461 val |= F_RXCOALESCEPSHEN;
2462 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2463 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2464 }
2465 t3_write_reg(adap, A_TP_PARA_REG3, val);
2466 return 0;
2467}
2468
2469/**
2470 * t3_tp_set_max_rxsize - set the max receive size
2471 * @adap: the adapter
2472 * @size: the max receive size
2473 *
2474 * Set TP's max receive size. This is the limit that applies when
2475 * receive coalescing is disabled.
2476 */
2477void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2478{
2479 t3_write_reg(adap, A_TP_PARA_REG7,
2480 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2481}
2482
2483static void __devinit init_mtus(unsigned short mtus[])
2484{
2485 /*
2486 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2487 * it can accomodate max size TCP/IP headers when SACK and timestamps
2488 * are enabled and still have at least 8 bytes of payload.
2489 */
2490 mtus[0] = 88;
2491 mtus[1] = 256;
2492 mtus[2] = 512;
2493 mtus[3] = 576;
2494 mtus[4] = 808;
2495 mtus[5] = 1024;
2496 mtus[6] = 1280;
2497 mtus[7] = 1492;
2498 mtus[8] = 1500;
2499 mtus[9] = 2002;
2500 mtus[10] = 2048;
2501 mtus[11] = 4096;
2502 mtus[12] = 4352;
2503 mtus[13] = 8192;
2504 mtus[14] = 9000;
2505 mtus[15] = 9600;
2506}
2507
2508/*
2509 * Initial congestion control parameters.
2510 */
2511static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
2512{
2513 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2514 a[9] = 2;
2515 a[10] = 3;
2516 a[11] = 4;
2517 a[12] = 5;
2518 a[13] = 6;
2519 a[14] = 7;
2520 a[15] = 8;
2521 a[16] = 9;
2522 a[17] = 10;
2523 a[18] = 14;
2524 a[19] = 17;
2525 a[20] = 21;
2526 a[21] = 25;
2527 a[22] = 30;
2528 a[23] = 35;
2529 a[24] = 45;
2530 a[25] = 60;
2531 a[26] = 80;
2532 a[27] = 100;
2533 a[28] = 200;
2534 a[29] = 300;
2535 a[30] = 400;
2536 a[31] = 500;
2537
2538 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2539 b[9] = b[10] = 1;
2540 b[11] = b[12] = 2;
2541 b[13] = b[14] = b[15] = b[16] = 3;
2542 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2543 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2544 b[28] = b[29] = 6;
2545 b[30] = b[31] = 7;
2546}
2547
2548/* The minimum additive increment value for the congestion control table */
2549#define CC_MIN_INCR 2U
2550
2551/**
2552 * t3_load_mtus - write the MTU and congestion control HW tables
2553 * @adap: the adapter
2554 * @mtus: the unrestricted values for the MTU table
2555 * @alphs: the values for the congestion control alpha parameter
2556 * @beta: the values for the congestion control beta parameter
2557 * @mtu_cap: the maximum permitted effective MTU
2558 *
2559 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2560 * Update the high-speed congestion control table with the supplied alpha,
2561 * beta, and MTUs.
2562 */
2563void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2564 unsigned short alpha[NCCTRL_WIN],
2565 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2566{
2567 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2568 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2569 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2570 28672, 40960, 57344, 81920, 114688, 163840, 229376
2571 };
2572
2573 unsigned int i, w;
2574
2575 for (i = 0; i < NMTUS; ++i) {
2576 unsigned int mtu = min(mtus[i], mtu_cap);
2577 unsigned int log2 = fls(mtu);
2578
2579 if (!(mtu & ((1 << log2) >> 2))) /* round */
2580 log2--;
2581 t3_write_reg(adap, A_TP_MTU_TABLE,
2582 (i << 24) | (log2 << 16) | mtu);
2583
2584 for (w = 0; w < NCCTRL_WIN; ++w) {
2585 unsigned int inc;
2586
2587 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2588 CC_MIN_INCR);
2589
2590 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2591 (w << 16) | (beta[w] << 13) | inc);
2592 }
2593 }
2594}
2595
2596/**
2597 * t3_read_hw_mtus - returns the values in the HW MTU table
2598 * @adap: the adapter
2599 * @mtus: where to store the HW MTU values
2600 *
2601 * Reads the HW MTU table.
2602 */
2603void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
2604{
2605 int i;
2606
2607 for (i = 0; i < NMTUS; ++i) {
2608 unsigned int val;
2609
2610 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2611 val = t3_read_reg(adap, A_TP_MTU_TABLE);
2612 mtus[i] = val & 0x3fff;
2613 }
2614}
2615
2616/**
2617 * t3_get_cong_cntl_tab - reads the congestion control table
2618 * @adap: the adapter
2619 * @incr: where to store the alpha values
2620 *
2621 * Reads the additive increments programmed into the HW congestion
2622 * control table.
2623 */
2624void t3_get_cong_cntl_tab(struct adapter *adap,
2625 unsigned short incr[NMTUS][NCCTRL_WIN])
2626{
2627 unsigned int mtu, w;
2628
2629 for (mtu = 0; mtu < NMTUS; ++mtu)
2630 for (w = 0; w < NCCTRL_WIN; ++w) {
2631 t3_write_reg(adap, A_TP_CCTRL_TABLE,
2632 0xffff0000 | (mtu << 5) | w);
2633 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
2634 0x1fff;
2635 }
2636}
2637
2638/**
2639 * t3_tp_get_mib_stats - read TP's MIB counters
2640 * @adap: the adapter
2641 * @tps: holds the returned counter values
2642 *
2643 * Returns the values of TP's MIB counters.
2644 */
2645void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2646{
2647 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2648 sizeof(*tps) / sizeof(u32), 0);
2649}
2650
2651#define ulp_region(adap, name, start, len) \
2652 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2653 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2654 (start) + (len) - 1); \
2655 start += len
2656
2657#define ulptx_region(adap, name, start, len) \
2658 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2659 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2660 (start) + (len) - 1)
2661
2662static void ulp_config(struct adapter *adap, const struct tp_params *p)
2663{
2664 unsigned int m = p->chan_rx_size;
2665
2666 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2667 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2668 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2669 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2670 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2671 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2672 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2673 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2674}
2675
2676void t3_config_trace_filter(struct adapter *adapter,
2677 const struct trace_params *tp, int filter_index,
2678 int invert, int enable)
2679{
2680 u32 addr, key[4], mask[4];
2681
2682 key[0] = tp->sport | (tp->sip << 16);
2683 key[1] = (tp->sip >> 16) | (tp->dport << 16);
2684 key[2] = tp->dip;
2685 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2686
2687 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2688 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2689 mask[2] = tp->dip_mask;
2690 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2691
2692 if (invert)
2693 key[3] |= (1 << 29);
2694 if (enable)
2695 key[3] |= (1 << 28);
2696
2697 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2698 tp_wr_indirect(adapter, addr++, key[0]);
2699 tp_wr_indirect(adapter, addr++, mask[0]);
2700 tp_wr_indirect(adapter, addr++, key[1]);
2701 tp_wr_indirect(adapter, addr++, mask[1]);
2702 tp_wr_indirect(adapter, addr++, key[2]);
2703 tp_wr_indirect(adapter, addr++, mask[2]);
2704 tp_wr_indirect(adapter, addr++, key[3]);
2705 tp_wr_indirect(adapter, addr, mask[3]);
2706 t3_read_reg(adapter, A_TP_PIO_DATA);
2707}
2708
2709/**
2710 * t3_config_sched - configure a HW traffic scheduler
2711 * @adap: the adapter
2712 * @kbps: target rate in Kbps
2713 * @sched: the scheduler index
2714 *
2715 * Configure a HW scheduler for the target rate
2716 */
2717int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
2718{
2719 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2720 unsigned int clk = adap->params.vpd.cclk * 1000;
2721 unsigned int selected_cpt = 0, selected_bpt = 0;
2722
2723 if (kbps > 0) {
2724 kbps *= 125; /* -> bytes */
2725 for (cpt = 1; cpt <= 255; cpt++) {
2726 tps = clk / cpt;
2727 bpt = (kbps + tps / 2) / tps;
2728 if (bpt > 0 && bpt <= 255) {
2729 v = bpt * tps;
2730 delta = v >= kbps ? v - kbps : kbps - v;
2731 if (delta <= mindelta) {
2732 mindelta = delta;
2733 selected_cpt = cpt;
2734 selected_bpt = bpt;
2735 }
2736 } else if (selected_cpt)
2737 break;
2738 }
2739 if (!selected_cpt)
2740 return -EINVAL;
2741 }
2742 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
2743 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
2744 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
2745 if (sched & 1)
2746 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
2747 else
2748 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
2749 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
2750 return 0;
2751}
2752
2753static int tp_init(struct adapter *adap, const struct tp_params *p)
2754{
2755 int busy = 0;
2756
2757 tp_config(adap, p);
2758 t3_set_vlan_accel(adap, 3, 0);
2759
2760 if (is_offload(adap)) {
2761 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
2762 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
2763 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
2764 0, 1000, 5);
2765 if (busy)
2766 CH_ERR(adap, "TP initialization timed out\n");
2767 }
2768
2769 if (!busy)
2770 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
2771 return busy;
2772}
2773
2774int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
2775{
2776 if (port_mask & ~((1 << adap->params.nports) - 1))
2777 return -EINVAL;
2778 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
2779 port_mask << S_PORT0ACTIVE);
2780 return 0;
2781}
2782
2783/*
2784 * Perform the bits of HW initialization that are dependent on the number
2785 * of available ports.
2786 */
2787static void init_hw_for_avail_ports(struct adapter *adap, int nports)
2788{
2789 int i;
2790
2791 if (nports == 1) {
2792 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
2793 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
2794 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
2795 F_PORT0ACTIVE | F_ENFORCEPKT);
2796 t3_write_reg(adap, A_PM1_TX_CFG, 0xc000c000);
2797 } else {
2798 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
2799 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
2800 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
2801 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
2802 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
2803 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
2804 F_ENFORCEPKT);
2805 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
2806 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
2807 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
2808 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
2809 for (i = 0; i < 16; i++)
2810 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
2811 (i << 16) | 0x1010);
2812 }
2813}
2814
2815static int calibrate_xgm(struct adapter *adapter)
2816{
2817 if (uses_xaui(adapter)) {
2818 unsigned int v, i;
2819
2820 for (i = 0; i < 5; ++i) {
2821 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
2822 t3_read_reg(adapter, A_XGM_XAUI_IMP);
2823 msleep(1);
2824 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
2825 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
2826 t3_write_reg(adapter, A_XGM_XAUI_IMP,
2827 V_XAUIIMP(G_CALIMP(v) >> 2));
2828 return 0;
2829 }
2830 }
2831 CH_ERR(adapter, "MAC calibration failed\n");
2832 return -1;
2833 } else {
2834 t3_write_reg(adapter, A_XGM_RGMII_IMP,
2835 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2836 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
2837 F_XGM_IMPSETUPDATE);
2838 }
2839 return 0;
2840}
2841
2842static void calibrate_xgm_t3b(struct adapter *adapter)
2843{
2844 if (!uses_xaui(adapter)) {
2845 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
2846 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2847 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
2848 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
2849 F_XGM_IMPSETUPDATE);
2850 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
2851 0);
2852 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
2853 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
2854 }
2855}
2856
2857struct mc7_timing_params {
2858 unsigned char ActToPreDly;
2859 unsigned char ActToRdWrDly;
2860 unsigned char PreCyc;
2861 unsigned char RefCyc[5];
2862 unsigned char BkCyc;
2863 unsigned char WrToRdDly;
2864 unsigned char RdToWrDly;
2865};
2866
2867/*
2868 * Write a value to a register and check that the write completed. These
2869 * writes normally complete in a cycle or two, so one read should suffice.
2870 * The very first read exists to flush the posted write to the device.
2871 */
2872static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
2873{
2874 t3_write_reg(adapter, addr, val);
2875 t3_read_reg(adapter, addr); /* flush */
2876 if (!(t3_read_reg(adapter, addr) & F_BUSY))
2877 return 0;
2878 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
2879 return -EIO;
2880}
2881
2882static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
2883{
2884 static const unsigned int mc7_mode[] = {
2885 0x632, 0x642, 0x652, 0x432, 0x442
2886 };
2887 static const struct mc7_timing_params mc7_timings[] = {
2888 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
2889 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
2890 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
2891 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
2892 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
2893 };
2894
2895 u32 val;
2896 unsigned int width, density, slow, attempts;
2897 struct adapter *adapter = mc7->adapter;
2898 const struct mc7_timing_params *p = &mc7_timings[mem_type];
2899
2900 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
2901 slow = val & F_SLOW;
2902 width = G_WIDTH(val);
2903 density = G_DEN(val);
2904
2905 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
2906 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
2907 msleep(1);
2908
2909 if (!slow) {
2910 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
2911 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
2912 msleep(1);
2913 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
2914 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
2915 CH_ERR(adapter, "%s MC7 calibration timed out\n",
2916 mc7->name);
2917 goto out_fail;
2918 }
2919 }
2920
2921 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
2922 V_ACTTOPREDLY(p->ActToPreDly) |
2923 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
2924 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
2925 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
2926
2927 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
2928 val | F_CLKEN | F_TERM150);
2929 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
2930
2931 if (!slow)
2932 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
2933 F_DLLENB);
2934 udelay(1);
2935
2936 val = slow ? 3 : 6;
2937 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
2938 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
2939 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
2940 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
2941 goto out_fail;
2942
2943 if (!slow) {
2944 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
2945 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
2946 udelay(5);
2947 }
2948
2949 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
2950 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
2951 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
2952 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
2953 mc7_mode[mem_type]) ||
2954 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
2955 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
2956 goto out_fail;
2957
2958 /* clock value is in KHz */
2959 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
2960 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
2961
2962 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
2963 F_PERREFEN | V_PREREFDIV(mc7_clock));
2964 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
2965
2966 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
2967 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
2968 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
2969 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
2970 (mc7->size << width) - 1);
2971 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
2972 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
2973
2974 attempts = 50;
2975 do {
2976 msleep(250);
2977 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
2978 } while ((val & F_BUSY) && --attempts);
2979 if (val & F_BUSY) {
2980 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
2981 goto out_fail;
2982 }
2983
2984 /* Enable normal memory accesses. */
2985 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
2986 return 0;
2987
2988out_fail:
2989 return -1;
2990}
2991
2992static void config_pcie(struct adapter *adap)
2993{
2994 static const u16 ack_lat[4][6] = {
2995 {237, 416, 559, 1071, 2095, 4143},
2996 {128, 217, 289, 545, 1057, 2081},
2997 {73, 118, 154, 282, 538, 1050},
2998 {67, 107, 86, 150, 278, 534}
2999 };
3000 static const u16 rpl_tmr[4][6] = {
3001 {711, 1248, 1677, 3213, 6285, 12429},
3002 {384, 651, 867, 1635, 3171, 6243},
3003 {219, 354, 462, 846, 1614, 3150},
3004 {201, 321, 258, 450, 834, 1602}
3005 };
3006
3007 u16 val;
3008 unsigned int log2_width, pldsize;
3009 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3010
3011 pci_read_config_word(adap->pdev,
3012 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3013 &val);
3014 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3015 pci_read_config_word(adap->pdev,
3016 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3017 &val);
3018
3019 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3020 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3021 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3022 log2_width = fls(adap->params.pci.width) - 1;
3023 acklat = ack_lat[log2_width][pldsize];
3024 if (val & 1) /* check LOsEnable */
3025 acklat += fst_trn_tx * 4;
3026 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3027
3028 if (adap->params.rev == 0)
3029 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3030 V_T3A_ACKLAT(M_T3A_ACKLAT),
3031 V_T3A_ACKLAT(acklat));
3032 else
3033 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3034 V_ACKLAT(acklat));
3035
3036 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3037 V_REPLAYLMT(rpllmt));
3038
3039 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3040 t3_set_reg_field(adap, A_PCIE_CFG, F_PCIE_CLIDECEN, F_PCIE_CLIDECEN);
3041}
3042
3043/*
3044 * Initialize and configure T3 HW modules. This performs the
3045 * initialization steps that need to be done once after a card is reset.
3046 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3047 *
3048 * fw_params are passed to FW and their value is platform dependent. Only the
3049 * top 8 bits are available for use, the rest must be 0.
3050 */
3051int t3_init_hw(struct adapter *adapter, u32 fw_params)
3052{
3053 int err = -EIO, attempts = 100;
3054 const struct vpd_params *vpd = &adapter->params.vpd;
3055
3056 if (adapter->params.rev > 0)
3057 calibrate_xgm_t3b(adapter);
3058 else if (calibrate_xgm(adapter))
3059 goto out_err;
3060
3061 if (vpd->mclk) {
3062 partition_mem(adapter, &adapter->params.tp);
3063
3064 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3065 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3066 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3067 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3068 adapter->params.mc5.nfilters,
3069 adapter->params.mc5.nroutes))
3070 goto out_err;
3071 }
3072
3073 if (tp_init(adapter, &adapter->params.tp))
3074 goto out_err;
3075
3076 t3_tp_set_coalescing_size(adapter,
3077 min(adapter->params.sge.max_pkt_size,
3078 MAX_RX_COALESCING_LEN), 1);
3079 t3_tp_set_max_rxsize(adapter,
3080 min(adapter->params.sge.max_pkt_size, 16384U));
3081 ulp_config(adapter, &adapter->params.tp);
3082
3083 if (is_pcie(adapter))
3084 config_pcie(adapter);
3085 else
3086 t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN);
3087
3088 t3_write_reg(adapter, A_PM1_RX_CFG, 0xf000f000);
3089 init_hw_for_avail_ports(adapter, adapter->params.nports);
3090 t3_sge_init(adapter, &adapter->params.sge);
3091
3092 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3093 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3094 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3095 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3096
3097 do { /* wait for uP to initialize */
3098 msleep(20);
3099 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3100 if (!attempts)
3101 goto out_err;
3102
3103 err = 0;
3104out_err:
3105 return err;
3106}
3107
3108/**
3109 * get_pci_mode - determine a card's PCI mode
3110 * @adapter: the adapter
3111 * @p: where to store the PCI settings
3112 *
3113 * Determines a card's PCI mode and associated parameters, such as speed
3114 * and width.
3115 */
3116static void __devinit get_pci_mode(struct adapter *adapter,
3117 struct pci_params *p)
3118{
3119 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3120 u32 pci_mode, pcie_cap;
3121
3122 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3123 if (pcie_cap) {
3124 u16 val;
3125
3126 p->variant = PCI_VARIANT_PCIE;
3127 p->pcie_cap_addr = pcie_cap;
3128 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3129 &val);
3130 p->width = (val >> 4) & 0x3f;
3131 return;
3132 }
3133
3134 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3135 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3136 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3137 pci_mode = G_PCIXINITPAT(pci_mode);
3138 if (pci_mode == 0)
3139 p->variant = PCI_VARIANT_PCI;
3140 else if (pci_mode < 4)
3141 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3142 else if (pci_mode < 8)
3143 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3144 else
3145 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3146}
3147
3148/**
3149 * init_link_config - initialize a link's SW state
3150 * @lc: structure holding the link state
3151 * @ai: information about the current card
3152 *
3153 * Initializes the SW state maintained for each link, including the link's
3154 * capabilities and default speed/duplex/flow-control/autonegotiation
3155 * settings.
3156 */
3157static void __devinit init_link_config(struct link_config *lc,
3158 unsigned int caps)
3159{
3160 lc->supported = caps;
3161 lc->requested_speed = lc->speed = SPEED_INVALID;
3162 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3163 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3164 if (lc->supported & SUPPORTED_Autoneg) {
3165 lc->advertising = lc->supported;
3166 lc->autoneg = AUTONEG_ENABLE;
3167 lc->requested_fc |= PAUSE_AUTONEG;
3168 } else {
3169 lc->advertising = 0;
3170 lc->autoneg = AUTONEG_DISABLE;
3171 }
3172}
3173
3174/**
3175 * mc7_calc_size - calculate MC7 memory size
3176 * @cfg: the MC7 configuration
3177 *
3178 * Calculates the size of an MC7 memory in bytes from the value of its
3179 * configuration register.
3180 */
3181static unsigned int __devinit mc7_calc_size(u32 cfg)
3182{
3183 unsigned int width = G_WIDTH(cfg);
3184 unsigned int banks = !!(cfg & F_BKS) + 1;
3185 unsigned int org = !!(cfg & F_ORG) + 1;
3186 unsigned int density = G_DEN(cfg);
3187 unsigned int MBs = ((256 << density) * banks) / (org << width);
3188
3189 return MBs << 20;
3190}
3191
3192static void __devinit mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3193 unsigned int base_addr, const char *name)
3194{
3195 u32 cfg;
3196
3197 mc7->adapter = adapter;
3198 mc7->name = name;
3199 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3200 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3201 mc7->size = mc7_calc_size(cfg);
3202 mc7->width = G_WIDTH(cfg);
3203}
3204
3205void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3206{
3207 mac->adapter = adapter;
3208 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3209 mac->nucast = 1;
3210
3211 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3212 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3213 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3214 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3215 F_ENRGMII, 0);
3216 }
3217}
3218
3219void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3220{
3221 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3222
3223 mi1_init(adapter, ai);
3224 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3225 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3226 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3227 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3228
3229 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3230 val |= F_ENRGMII;
3231
3232 /* Enable MAC clocks so we can access the registers */
3233 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3234 t3_read_reg(adapter, A_XGM_PORT_CFG);
3235
3236 val |= F_CLKDIVRESET_;
3237 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3238 t3_read_reg(adapter, A_XGM_PORT_CFG);
3239 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3240 t3_read_reg(adapter, A_XGM_PORT_CFG);
3241}
3242
3243/*
3244 * Reset the adapter. PCIe cards lose their config space during reset, PCI-X
3245 * ones don't.
3246 */
3247int t3_reset_adapter(struct adapter *adapter)
3248{
3249 int i;
3250 uint16_t devid = 0;
3251
3252 if (is_pcie(adapter))
3253 pci_save_state(adapter->pdev);
3254 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3255
3256 /*
3257 * Delay. Give Some time to device to reset fully.
3258 * XXX The delay time should be modified.
3259 */
3260 for (i = 0; i < 10; i++) {
3261 msleep(50);
3262 pci_read_config_word(adapter->pdev, 0x00, &devid);
3263 if (devid == 0x1425)
3264 break;
3265 }
3266
3267 if (devid != 0x1425)
3268 return -1;
3269
3270 if (is_pcie(adapter))
3271 pci_restore_state(adapter->pdev);
3272 return 0;
3273}
3274
3275/*
3276 * Initialize adapter SW state for the various HW modules, set initial values
3277 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3278 * interface.
3279 */
3280int __devinit t3_prep_adapter(struct adapter *adapter,
3281 const struct adapter_info *ai, int reset)
3282{
3283 int ret;
3284 unsigned int i, j = 0;
3285
3286 get_pci_mode(adapter, &adapter->params.pci);
3287
3288 adapter->params.info = ai;
3289 adapter->params.nports = ai->nports;
3290 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3291 adapter->params.linkpoll_period = 0;
3292 adapter->params.stats_update_period = is_10G(adapter) ?
3293 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3294 adapter->params.pci.vpd_cap_addr =
3295 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3296 ret = get_vpd_params(adapter, &adapter->params.vpd);
3297 if (ret < 0)
3298 return ret;
3299
3300 if (reset && t3_reset_adapter(adapter))
3301 return -1;
3302
3303 t3_sge_prep(adapter, &adapter->params.sge);
3304
3305 if (adapter->params.vpd.mclk) {
3306 struct tp_params *p = &adapter->params.tp;
3307
3308 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3309 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3310 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3311
3312 p->nchan = ai->nports;
3313 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3314 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3315 p->cm_size = t3_mc7_size(&adapter->cm);
3316 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3317 p->chan_tx_size = p->pmtx_size / p->nchan;
3318 p->rx_pg_size = 64 * 1024;
3319 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3320 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3321 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3322 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3323 adapter->params.rev > 0 ? 12 : 6;
3324
3325 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3326 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3327 DEFAULT_NFILTERS : 0;
3328 adapter->params.mc5.nroutes = 0;
3329 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3330
3331 init_mtus(adapter->params.mtus);
3332 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3333 }
3334
3335 early_hw_init(adapter, ai);
3336
3337 for_each_port(adapter, i) {
3338 u8 hw_addr[6];
3339 struct port_info *p = adap2pinfo(adapter, i);
3340
3341 while (!adapter->params.vpd.port_type[j])
3342 ++j;
3343
3344 p->port_type = &port_types[adapter->params.vpd.port_type[j]];
3345 p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3346 ai->mdio_ops);
3347 mac_prep(&p->mac, adapter, j);
3348 ++j;
3349
3350 /*
3351 * The VPD EEPROM stores the base Ethernet address for the
3352 * card. A port's address is derived from the base by adding
3353 * the port's index to the base's low octet.
3354 */
3355 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3356 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3357
3358 memcpy(adapter->port[i]->dev_addr, hw_addr,
3359 ETH_ALEN);
3360 memcpy(adapter->port[i]->perm_addr, hw_addr,
3361 ETH_ALEN);
3362 init_link_config(&p->link_config, p->port_type->caps);
3363 p->phy.ops->power_down(&p->phy, 1);
3364 if (!(p->port_type->caps & SUPPORTED_IRQ))
3365 adapter->params.linkpoll_period = 10;
3366 }
3367
3368 return 0;
3369}
3370
3371void t3_led_ready(struct adapter *adapter)
3372{
3373 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3374 F_GPIO0_OUT_VAL);
3375}
This page took 0.167609 seconds and 5 git commands to generate.