Merge branch 'fix/hda' into for-linus
[deliverable/linux.git] / drivers / net / cxgb3 / t3_hw.c
CommitLineData
4d22de3e 1/*
a02d44a0 2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4d22de3e 3 *
1d68e93d
DLR
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
4d22de3e 9 *
1d68e93d
DLR
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
4d22de3e 31 */
4d22de3e
DLR
32#include "common.h"
33#include "regs.h"
34#include "sge_defs.h"
35#include "firmware_exports.h"
36
f2c6879e
DLR
37/**
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
46 *
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
51 */
4d22de3e
DLR
52
53int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
55{
56 while (1) {
57 u32 val = t3_read_reg(adapter, reg);
58
59 if (!!(val & mask) == polarity) {
60 if (valp)
61 *valp = val;
62 return 0;
63 }
64 if (--attempts == 0)
b881955b 65 return -EAGAIN;
4d22de3e
DLR
66 if (delay)
67 udelay(delay);
68 }
69}
70
71/**
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
77 *
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
81 */
82void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
84{
85 while (n--) {
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
87 p++;
88 }
89}
90
91/**
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
97 *
98 * Sets a register field specified by the supplied mask to the
99 * given value.
100 */
101void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
102 u32 val)
103{
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
105
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
108}
109
110/**
111 * t3_read_indirect - read indirectly addressed registers
112 * @adap: the adapter
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
118 *
119 * Reads registers that are accessed indirectly through an address/data
120 * register pair.
121 */
9265fabf
SH
122static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals,
124 unsigned int nregs, unsigned int start_idx)
4d22de3e
DLR
125{
126 while (nregs--) {
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
129 start_idx++;
130 }
131}
132
133/**
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
139 *
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
141 * accesses.
142 */
143int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
144 u64 *buf)
145{
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
148
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
151
152 if (start >= size64 || start + n > size64)
153 return -EINVAL;
154
155 start *= (8 << mc7->width);
156 while (n--) {
157 int i;
158 u64 val64 = 0;
159
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
161 int attempts = 10;
162 u32 val;
163
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
170 if (val & F_BUSY)
171 return -EIO;
172
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
176 mc7->offset +
177 A_MC7_BD_DATA0);
178 val64 |= (u64) val << 32;
179 } else {
180 if (mc7->width > 1)
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
183 }
184 start += 8;
185 }
186 *buf++ = val64;
187 }
188 return 0;
189}
190
191/*
192 * Initialize MI1.
193 */
194static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
195{
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
04497982 197 u32 val = F_PREEN | V_CLKDIV(clkdiv);
4d22de3e 198
4d22de3e
DLR
199 t3_write_reg(adap, A_MI1_CFG, val);
200}
201
04497982 202#define MDIO_ATTEMPTS 20
4d22de3e
DLR
203
204/*
04497982 205 * MI1 read/write operations for clause 22 PHYs.
4d22de3e 206 */
04497982
DLR
207static int t3_mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
208 int reg_addr, unsigned int *valp)
4d22de3e
DLR
209{
210 int ret;
211 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
212
213 if (mmd_addr)
214 return -EINVAL;
215
216 mutex_lock(&adapter->mdio_lock);
04497982 217 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
4d22de3e
DLR
218 t3_write_reg(adapter, A_MI1_ADDR, addr);
219 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
04497982 220 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
4d22de3e
DLR
221 if (!ret)
222 *valp = t3_read_reg(adapter, A_MI1_DATA);
223 mutex_unlock(&adapter->mdio_lock);
224 return ret;
225}
226
04497982 227static int t3_mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
4d22de3e
DLR
228 int reg_addr, unsigned int val)
229{
230 int ret;
231 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
232
233 if (mmd_addr)
234 return -EINVAL;
235
236 mutex_lock(&adapter->mdio_lock);
04497982 237 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
4d22de3e
DLR
238 t3_write_reg(adapter, A_MI1_ADDR, addr);
239 t3_write_reg(adapter, A_MI1_DATA, val);
240 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
04497982 241 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
4d22de3e
DLR
242 mutex_unlock(&adapter->mdio_lock);
243 return ret;
244}
245
246static const struct mdio_ops mi1_mdio_ops = {
04497982
DLR
247 t3_mi1_read,
248 t3_mi1_write
4d22de3e
DLR
249};
250
04497982
DLR
251/*
252 * Performs the address cycle for clause 45 PHYs.
253 * Must be called with the MDIO_LOCK held.
254 */
255static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
256 int reg_addr)
257{
258 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
259
260 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
261 t3_write_reg(adapter, A_MI1_ADDR, addr);
262 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
263 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
264 return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
265 MDIO_ATTEMPTS, 10);
266}
267
4d22de3e
DLR
268/*
269 * MI1 read/write operations for indirect-addressed PHYs.
270 */
271static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
272 int reg_addr, unsigned int *valp)
273{
274 int ret;
4d22de3e
DLR
275
276 mutex_lock(&adapter->mdio_lock);
04497982 277 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
4d22de3e
DLR
278 if (!ret) {
279 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
280 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
04497982 281 MDIO_ATTEMPTS, 10);
4d22de3e
DLR
282 if (!ret)
283 *valp = t3_read_reg(adapter, A_MI1_DATA);
284 }
285 mutex_unlock(&adapter->mdio_lock);
286 return ret;
287}
288
289static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
290 int reg_addr, unsigned int val)
291{
292 int ret;
4d22de3e
DLR
293
294 mutex_lock(&adapter->mdio_lock);
04497982 295 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
4d22de3e
DLR
296 if (!ret) {
297 t3_write_reg(adapter, A_MI1_DATA, val);
298 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
299 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
04497982 300 MDIO_ATTEMPTS, 10);
4d22de3e
DLR
301 }
302 mutex_unlock(&adapter->mdio_lock);
303 return ret;
304}
305
306static const struct mdio_ops mi1_mdio_ext_ops = {
307 mi1_ext_read,
308 mi1_ext_write
309};
310
311/**
312 * t3_mdio_change_bits - modify the value of a PHY register
313 * @phy: the PHY to operate on
314 * @mmd: the device address
315 * @reg: the register address
316 * @clear: what part of the register value to mask off
317 * @set: what part of the register value to set
318 *
319 * Changes the value of a PHY register by applying a mask to its current
320 * value and ORing the result with a new value.
321 */
322int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
323 unsigned int set)
324{
325 int ret;
326 unsigned int val;
327
328 ret = mdio_read(phy, mmd, reg, &val);
329 if (!ret) {
330 val &= ~clear;
331 ret = mdio_write(phy, mmd, reg, val | set);
332 }
333 return ret;
334}
335
336/**
337 * t3_phy_reset - reset a PHY block
338 * @phy: the PHY to operate on
339 * @mmd: the device address of the PHY block to reset
340 * @wait: how long to wait for the reset to complete in 1ms increments
341 *
342 * Resets a PHY block and optionally waits for the reset to complete.
343 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
344 * for 10G PHYs.
345 */
346int t3_phy_reset(struct cphy *phy, int mmd, int wait)
347{
348 int err;
349 unsigned int ctl;
350
351 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
352 if (err || !wait)
353 return err;
354
355 do {
356 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
357 if (err)
358 return err;
359 ctl &= BMCR_RESET;
360 if (ctl)
361 msleep(1);
362 } while (ctl && --wait);
363
364 return ctl ? -1 : 0;
365}
366
367/**
368 * t3_phy_advertise - set the PHY advertisement registers for autoneg
369 * @phy: the PHY to operate on
370 * @advert: bitmap of capabilities the PHY should advertise
371 *
372 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
373 * requested capabilities.
374 */
375int t3_phy_advertise(struct cphy *phy, unsigned int advert)
376{
377 int err;
378 unsigned int val = 0;
379
380 err = mdio_read(phy, 0, MII_CTRL1000, &val);
381 if (err)
382 return err;
383
384 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
385 if (advert & ADVERTISED_1000baseT_Half)
386 val |= ADVERTISE_1000HALF;
387 if (advert & ADVERTISED_1000baseT_Full)
388 val |= ADVERTISE_1000FULL;
389
390 err = mdio_write(phy, 0, MII_CTRL1000, val);
391 if (err)
392 return err;
393
394 val = 1;
395 if (advert & ADVERTISED_10baseT_Half)
396 val |= ADVERTISE_10HALF;
397 if (advert & ADVERTISED_10baseT_Full)
398 val |= ADVERTISE_10FULL;
399 if (advert & ADVERTISED_100baseT_Half)
400 val |= ADVERTISE_100HALF;
401 if (advert & ADVERTISED_100baseT_Full)
402 val |= ADVERTISE_100FULL;
403 if (advert & ADVERTISED_Pause)
404 val |= ADVERTISE_PAUSE_CAP;
405 if (advert & ADVERTISED_Asym_Pause)
406 val |= ADVERTISE_PAUSE_ASYM;
407 return mdio_write(phy, 0, MII_ADVERTISE, val);
408}
409
0ce2f03b
DLR
410/**
411 * t3_phy_advertise_fiber - set fiber PHY advertisement register
412 * @phy: the PHY to operate on
413 * @advert: bitmap of capabilities the PHY should advertise
414 *
415 * Sets a fiber PHY's advertisement register to advertise the
416 * requested capabilities.
417 */
418int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
419{
420 unsigned int val = 0;
421
422 if (advert & ADVERTISED_1000baseT_Half)
423 val |= ADVERTISE_1000XHALF;
424 if (advert & ADVERTISED_1000baseT_Full)
425 val |= ADVERTISE_1000XFULL;
426 if (advert & ADVERTISED_Pause)
427 val |= ADVERTISE_1000XPAUSE;
428 if (advert & ADVERTISED_Asym_Pause)
429 val |= ADVERTISE_1000XPSE_ASYM;
430 return mdio_write(phy, 0, MII_ADVERTISE, val);
431}
432
4d22de3e
DLR
433/**
434 * t3_set_phy_speed_duplex - force PHY speed and duplex
435 * @phy: the PHY to operate on
436 * @speed: requested PHY speed
437 * @duplex: requested PHY duplex
438 *
439 * Force a 10/100/1000 PHY's speed and duplex. This also disables
440 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
441 */
442int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
443{
444 int err;
445 unsigned int ctl;
446
447 err = mdio_read(phy, 0, MII_BMCR, &ctl);
448 if (err)
449 return err;
450
451 if (speed >= 0) {
452 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
453 if (speed == SPEED_100)
454 ctl |= BMCR_SPEED100;
455 else if (speed == SPEED_1000)
456 ctl |= BMCR_SPEED1000;
457 }
458 if (duplex >= 0) {
459 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
460 if (duplex == DUPLEX_FULL)
461 ctl |= BMCR_FULLDPLX;
462 }
463 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
464 ctl |= BMCR_ANENABLE;
465 return mdio_write(phy, 0, MII_BMCR, ctl);
466}
467
9b1e3656
DLR
468int t3_phy_lasi_intr_enable(struct cphy *phy)
469{
470 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
471}
472
473int t3_phy_lasi_intr_disable(struct cphy *phy)
474{
475 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
476}
477
478int t3_phy_lasi_intr_clear(struct cphy *phy)
479{
480 u32 val;
481
482 return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
483}
484
485int t3_phy_lasi_intr_handler(struct cphy *phy)
486{
487 unsigned int status;
488 int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
489
490 if (err)
491 return err;
492 return (status & 1) ? cphy_cause_link_change : 0;
493}
494
4d22de3e 495static const struct adapter_info t3_adap_info[] = {
04497982 496 {2, 0,
4d22de3e 497 F_GPIO2_OEN | F_GPIO4_OEN |
f231e0a5 498 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
4d22de3e 499 &mi1_mdio_ops, "Chelsio PE9000"},
04497982 500 {2, 0,
4d22de3e 501 F_GPIO2_OEN | F_GPIO4_OEN |
f231e0a5 502 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
4d22de3e 503 &mi1_mdio_ops, "Chelsio T302"},
04497982 504 {1, 0,
4d22de3e 505 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
75758e8a 506 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
f231e0a5 507 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
4d22de3e 508 &mi1_mdio_ext_ops, "Chelsio T310"},
04497982 509 {2, 0,
4d22de3e
DLR
510 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
511 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
f231e0a5
DLR
512 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
513 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
4d22de3e
DLR
514 &mi1_mdio_ext_ops, "Chelsio T320"},
515};
516
517/*
518 * Return the adapter_info structure with a given index. Out-of-range indices
519 * return NULL.
520 */
521const struct adapter_info *t3_get_adapter_info(unsigned int id)
522{
523 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
524}
525
04497982
DLR
526struct port_type_info {
527 int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
528 int phy_addr, const struct mdio_ops *ops);
529};
4d22de3e
DLR
530
531static const struct port_type_info port_types[] = {
04497982
DLR
532 { NULL },
533 { t3_ael1002_phy_prep },
534 { t3_vsc8211_phy_prep },
535 { NULL},
536 { t3_xaui_direct_phy_prep },
1e882025 537 { t3_ael2005_phy_prep },
04497982
DLR
538 { t3_qt2045_phy_prep },
539 { t3_ael1006_phy_prep },
540 { NULL },
4d22de3e
DLR
541};
542
4d22de3e
DLR
543#define VPD_ENTRY(name, len) \
544 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
545
546/*
547 * Partial EEPROM Vital Product Data structure. Includes only the ID and
548 * VPD-R sections.
549 */
550struct t3_vpd {
551 u8 id_tag;
552 u8 id_len[2];
553 u8 id_data[16];
554 u8 vpdr_tag;
555 u8 vpdr_len[2];
556 VPD_ENTRY(pn, 16); /* part number */
557 VPD_ENTRY(ec, 16); /* EC level */
167cdf5f 558 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
4d22de3e
DLR
559 VPD_ENTRY(na, 12); /* MAC address base */
560 VPD_ENTRY(cclk, 6); /* core clock */
561 VPD_ENTRY(mclk, 6); /* mem clock */
562 VPD_ENTRY(uclk, 6); /* uP clk */
563 VPD_ENTRY(mdc, 6); /* MDIO clk */
564 VPD_ENTRY(mt, 2); /* mem timing */
565 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
566 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
567 VPD_ENTRY(port0, 2); /* PHY0 complex */
568 VPD_ENTRY(port1, 2); /* PHY1 complex */
569 VPD_ENTRY(port2, 2); /* PHY2 complex */
570 VPD_ENTRY(port3, 2); /* PHY3 complex */
571 VPD_ENTRY(rv, 1); /* csum */
572 u32 pad; /* for multiple-of-4 sizing and alignment */
573};
574
9f64306b 575#define EEPROM_MAX_POLL 40
4d22de3e
DLR
576#define EEPROM_STAT_ADDR 0x4000
577#define VPD_BASE 0xc00
578
579/**
580 * t3_seeprom_read - read a VPD EEPROM location
581 * @adapter: adapter to read
582 * @addr: EEPROM address
583 * @data: where to store the read data
584 *
585 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
586 * VPD ROM capability. A zero is written to the flag bit when the
587 * addres is written to the control register. The hardware device will
588 * set the flag to 1 when 4 bytes have been read into the data register.
589 */
05e5c116 590int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
4d22de3e
DLR
591{
592 u16 val;
593 int attempts = EEPROM_MAX_POLL;
05e5c116 594 u32 v;
4d22de3e
DLR
595 unsigned int base = adapter->params.pci.vpd_cap_addr;
596
597 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
598 return -EINVAL;
599
600 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
601 do {
602 udelay(10);
603 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
604 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
605
606 if (!(val & PCI_VPD_ADDR_F)) {
607 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
608 return -EIO;
609 }
05e5c116
AV
610 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
611 *data = cpu_to_le32(v);
4d22de3e
DLR
612 return 0;
613}
614
615/**
616 * t3_seeprom_write - write a VPD EEPROM location
617 * @adapter: adapter to write
618 * @addr: EEPROM address
619 * @data: value to write
620 *
621 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
622 * VPD ROM capability.
623 */
05e5c116 624int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
4d22de3e
DLR
625{
626 u16 val;
627 int attempts = EEPROM_MAX_POLL;
628 unsigned int base = adapter->params.pci.vpd_cap_addr;
629
630 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
631 return -EINVAL;
632
633 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
05e5c116 634 le32_to_cpu(data));
4d22de3e
DLR
635 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
636 addr | PCI_VPD_ADDR_F);
637 do {
638 msleep(1);
639 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
640 } while ((val & PCI_VPD_ADDR_F) && --attempts);
641
642 if (val & PCI_VPD_ADDR_F) {
643 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
644 return -EIO;
645 }
646 return 0;
647}
648
649/**
650 * t3_seeprom_wp - enable/disable EEPROM write protection
651 * @adapter: the adapter
652 * @enable: 1 to enable write protection, 0 to disable it
653 *
654 * Enables or disables write protection on the serial EEPROM.
655 */
656int t3_seeprom_wp(struct adapter *adapter, int enable)
657{
658 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
659}
660
661/*
662 * Convert a character holding a hex digit to a number.
663 */
664static unsigned int hex2int(unsigned char c)
665{
666 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
667}
668
669/**
670 * get_vpd_params - read VPD parameters from VPD EEPROM
671 * @adapter: adapter to read
672 * @p: where to store the parameters
673 *
674 * Reads card parameters stored in VPD EEPROM.
675 */
676static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
677{
678 int i, addr, ret;
679 struct t3_vpd vpd;
680
681 /*
682 * Card information is normally at VPD_BASE but some early cards had
683 * it at 0.
684 */
05e5c116 685 ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
4d22de3e
DLR
686 if (ret)
687 return ret;
688 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
689
690 for (i = 0; i < sizeof(vpd); i += 4) {
691 ret = t3_seeprom_read(adapter, addr + i,
05e5c116 692 (__le32 *)((u8 *)&vpd + i));
4d22de3e
DLR
693 if (ret)
694 return ret;
695 }
696
697 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
698 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
699 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
700 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
701 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
167cdf5f 702 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
4d22de3e
DLR
703
704 /* Old eeproms didn't have port information */
705 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
706 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
707 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
708 } else {
709 p->port_type[0] = hex2int(vpd.port0_data[0]);
710 p->port_type[1] = hex2int(vpd.port1_data[0]);
711 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
712 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
713 }
714
715 for (i = 0; i < 6; i++)
716 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
717 hex2int(vpd.na_data[2 * i + 1]);
718 return 0;
719}
720
721/* serial flash and firmware constants */
722enum {
723 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
724 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
725 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
726
727 /* flash command opcodes */
728 SF_PROG_PAGE = 2, /* program page */
729 SF_WR_DISABLE = 4, /* disable writes */
730 SF_RD_STATUS = 5, /* read status register */
731 SF_WR_ENABLE = 6, /* enable writes */
732 SF_RD_DATA_FAST = 0xb, /* read flash */
733 SF_ERASE_SECTOR = 0xd8, /* erase sector */
734
735 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
80513675 736 FW_VERS_ADDR = 0x7fffc, /* flash address holding FW version */
2e283962 737 FW_MIN_SIZE = 8 /* at least version and csum */
4d22de3e
DLR
738};
739
740/**
741 * sf1_read - read data from the serial flash
742 * @adapter: the adapter
743 * @byte_cnt: number of bytes to read
744 * @cont: whether another operation will be chained
745 * @valp: where to store the read data
746 *
747 * Reads up to 4 bytes of data from the serial flash. The location of
748 * the read needs to be specified prior to calling this by issuing the
749 * appropriate commands to the serial flash.
750 */
751static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
752 u32 *valp)
753{
754 int ret;
755
756 if (!byte_cnt || byte_cnt > 4)
757 return -EINVAL;
758 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
759 return -EBUSY;
760 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
761 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
762 if (!ret)
763 *valp = t3_read_reg(adapter, A_SF_DATA);
764 return ret;
765}
766
767/**
768 * sf1_write - write data to the serial flash
769 * @adapter: the adapter
770 * @byte_cnt: number of bytes to write
771 * @cont: whether another operation will be chained
772 * @val: value to write
773 *
774 * Writes up to 4 bytes of data to the serial flash. The location of
775 * the write needs to be specified prior to calling this by issuing the
776 * appropriate commands to the serial flash.
777 */
778static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
779 u32 val)
780{
781 if (!byte_cnt || byte_cnt > 4)
782 return -EINVAL;
783 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
784 return -EBUSY;
785 t3_write_reg(adapter, A_SF_DATA, val);
786 t3_write_reg(adapter, A_SF_OP,
787 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
788 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
789}
790
791/**
792 * flash_wait_op - wait for a flash operation to complete
793 * @adapter: the adapter
794 * @attempts: max number of polls of the status register
795 * @delay: delay between polls in ms
796 *
797 * Wait for a flash operation to complete by polling the status register.
798 */
799static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
800{
801 int ret;
802 u32 status;
803
804 while (1) {
805 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
806 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
807 return ret;
808 if (!(status & 1))
809 return 0;
810 if (--attempts == 0)
811 return -EAGAIN;
812 if (delay)
813 msleep(delay);
814 }
815}
816
817/**
818 * t3_read_flash - read words from serial flash
819 * @adapter: the adapter
820 * @addr: the start address for the read
821 * @nwords: how many 32-bit words to read
822 * @data: where to store the read data
823 * @byte_oriented: whether to store data as bytes or as words
824 *
825 * Read the specified number of 32-bit words from the serial flash.
826 * If @byte_oriented is set the read data is stored as a byte array
827 * (i.e., big-endian), otherwise as 32-bit words in the platform's
828 * natural endianess.
829 */
830int t3_read_flash(struct adapter *adapter, unsigned int addr,
831 unsigned int nwords, u32 *data, int byte_oriented)
832{
833 int ret;
834
835 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
836 return -EINVAL;
837
838 addr = swab32(addr) | SF_RD_DATA_FAST;
839
840 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
841 (ret = sf1_read(adapter, 1, 1, data)) != 0)
842 return ret;
843
844 for (; nwords; nwords--, data++) {
845 ret = sf1_read(adapter, 4, nwords > 1, data);
846 if (ret)
847 return ret;
848 if (byte_oriented)
849 *data = htonl(*data);
850 }
851 return 0;
852}
853
854/**
855 * t3_write_flash - write up to a page of data to the serial flash
856 * @adapter: the adapter
857 * @addr: the start address to write
858 * @n: length of data to write
859 * @data: the data to write
860 *
861 * Writes up to a page of data (256 bytes) to the serial flash starting
862 * at the given address.
863 */
864static int t3_write_flash(struct adapter *adapter, unsigned int addr,
865 unsigned int n, const u8 *data)
866{
867 int ret;
868 u32 buf[64];
869 unsigned int i, c, left, val, offset = addr & 0xff;
870
871 if (addr + n > SF_SIZE || offset + n > 256)
872 return -EINVAL;
873
874 val = swab32(addr) | SF_PROG_PAGE;
875
876 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
877 (ret = sf1_write(adapter, 4, 1, val)) != 0)
878 return ret;
879
880 for (left = n; left; left -= c) {
881 c = min(left, 4U);
882 for (val = 0, i = 0; i < c; ++i)
883 val = (val << 8) + *data++;
884
885 ret = sf1_write(adapter, c, c != left, val);
886 if (ret)
887 return ret;
888 }
889 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
890 return ret;
891
892 /* Read the page to verify the write succeeded */
893 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
894 if (ret)
895 return ret;
896
897 if (memcmp(data - n, (u8 *) buf + offset, n))
898 return -EIO;
899 return 0;
900}
901
480fe1a3 902/**
47330077 903 * t3_get_tp_version - read the tp sram version
480fe1a3 904 * @adapter: the adapter
47330077 905 * @vers: where to place the version
480fe1a3 906 *
47330077 907 * Reads the protocol sram version from sram.
480fe1a3 908 */
47330077 909int t3_get_tp_version(struct adapter *adapter, u32 *vers)
480fe1a3
DLR
910{
911 int ret;
480fe1a3
DLR
912
913 /* Get version loaded in SRAM */
914 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
915 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
916 1, 1, 5, 1);
917 if (ret)
918 return ret;
2eab17ab 919
47330077
DLR
920 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
921
922 return 0;
923}
924
925/**
926 * t3_check_tpsram_version - read the tp sram version
927 * @adapter: the adapter
47330077
DLR
928 *
929 * Reads the protocol sram version from flash.
930 */
8207befa 931int t3_check_tpsram_version(struct adapter *adapter)
47330077
DLR
932{
933 int ret;
934 u32 vers;
935 unsigned int major, minor;
936
937 if (adapter->params.rev == T3_REV_A)
938 return 0;
939
47330077
DLR
940
941 ret = t3_get_tp_version(adapter, &vers);
942 if (ret)
943 return ret;
480fe1a3
DLR
944
945 major = G_TP_VERSION_MAJOR(vers);
946 minor = G_TP_VERSION_MINOR(vers);
947
2eab17ab 948 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
480fe1a3 949 return 0;
47330077 950 else {
47330077
DLR
951 CH_ERR(adapter, "found wrong TP version (%u.%u), "
952 "driver compiled for version %d.%d\n", major, minor,
953 TP_VERSION_MAJOR, TP_VERSION_MINOR);
954 }
480fe1a3
DLR
955 return -EINVAL;
956}
957
958/**
2eab17ab 959 * t3_check_tpsram - check if provided protocol SRAM
480fe1a3
DLR
960 * is compatible with this driver
961 * @adapter: the adapter
962 * @tp_sram: the firmware image to write
963 * @size: image size
964 *
965 * Checks if an adapter's tp sram is compatible with the driver.
966 * Returns 0 if the versions are compatible, a negative error otherwise.
967 */
2c733a16
DW
968int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
969 unsigned int size)
480fe1a3
DLR
970{
971 u32 csum;
972 unsigned int i;
05e5c116 973 const __be32 *p = (const __be32 *)tp_sram;
480fe1a3
DLR
974
975 /* Verify checksum */
976 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
977 csum += ntohl(p[i]);
978 if (csum != 0xffffffff) {
979 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
980 csum);
981 return -EINVAL;
982 }
983
984 return 0;
985}
986
4aac3899
DLR
987enum fw_version_type {
988 FW_VERSION_N3,
989 FW_VERSION_T3
990};
991
4d22de3e
DLR
992/**
993 * t3_get_fw_version - read the firmware version
994 * @adapter: the adapter
995 * @vers: where to place the version
996 *
997 * Reads the FW version from flash.
998 */
999int t3_get_fw_version(struct adapter *adapter, u32 *vers)
1000{
1001 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1002}
1003
1004/**
1005 * t3_check_fw_version - check if the FW is compatible with this driver
1006 * @adapter: the adapter
8207befa 1007 *
4d22de3e
DLR
1008 * Checks if an adapter's FW is compatible with the driver. Returns 0
1009 * if the versions are compatible, a negative error otherwise.
1010 */
8207befa 1011int t3_check_fw_version(struct adapter *adapter)
4d22de3e
DLR
1012{
1013 int ret;
1014 u32 vers;
4aac3899 1015 unsigned int type, major, minor;
4d22de3e
DLR
1016
1017 ret = t3_get_fw_version(adapter, &vers);
1018 if (ret)
1019 return ret;
1020
4aac3899
DLR
1021 type = G_FW_VERSION_TYPE(vers);
1022 major = G_FW_VERSION_MAJOR(vers);
1023 minor = G_FW_VERSION_MINOR(vers);
4d22de3e 1024
75d8626f
DLR
1025 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1026 minor == FW_VERSION_MINOR)
4d22de3e 1027 return 0;
8207befa 1028 else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
273fa904
DLR
1029 CH_WARN(adapter, "found old FW minor version(%u.%u), "
1030 "driver compiled for version %u.%u\n", major, minor,
1031 FW_VERSION_MAJOR, FW_VERSION_MINOR);
8207befa 1032 else {
273fa904 1033 CH_WARN(adapter, "found newer FW version(%u.%u), "
a5a3b460
DLR
1034 "driver compiled for version %u.%u\n", major, minor,
1035 FW_VERSION_MAJOR, FW_VERSION_MINOR);
273fa904 1036 return 0;
a5a3b460 1037 }
4d22de3e
DLR
1038 return -EINVAL;
1039}
1040
1041/**
1042 * t3_flash_erase_sectors - erase a range of flash sectors
1043 * @adapter: the adapter
1044 * @start: the first sector to erase
1045 * @end: the last sector to erase
1046 *
1047 * Erases the sectors in the given range.
1048 */
1049static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1050{
1051 while (start <= end) {
1052 int ret;
1053
1054 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1055 (ret = sf1_write(adapter, 4, 0,
1056 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1057 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1058 return ret;
1059 start++;
1060 }
1061 return 0;
1062}
1063
1064/*
1065 * t3_load_fw - download firmware
1066 * @adapter: the adapter
8a9fab22 1067 * @fw_data: the firmware image to write
4d22de3e
DLR
1068 * @size: image size
1069 *
1070 * Write the supplied firmware image to the card's serial flash.
1071 * The FW image has the following sections: @size - 8 bytes of code and
1072 * data, followed by 4 bytes of FW version, followed by the 32-bit
1073 * 1's complement checksum of the whole image.
1074 */
1075int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1076{
1077 u32 csum;
1078 unsigned int i;
05e5c116 1079 const __be32 *p = (const __be32 *)fw_data;
4d22de3e
DLR
1080 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1081
2e283962 1082 if ((size & 3) || size < FW_MIN_SIZE)
4d22de3e
DLR
1083 return -EINVAL;
1084 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1085 return -EFBIG;
1086
1087 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1088 csum += ntohl(p[i]);
1089 if (csum != 0xffffffff) {
1090 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1091 csum);
1092 return -EINVAL;
1093 }
1094
1095 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1096 if (ret)
1097 goto out;
1098
1099 size -= 8; /* trim off version and checksum */
1100 for (addr = FW_FLASH_BOOT_ADDR; size;) {
1101 unsigned int chunk_size = min(size, 256U);
1102
1103 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1104 if (ret)
1105 goto out;
1106
1107 addr += chunk_size;
1108 fw_data += chunk_size;
1109 size -= chunk_size;
1110 }
1111
1112 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1113out:
1114 if (ret)
1115 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1116 return ret;
1117}
1118
1119#define CIM_CTL_BASE 0x2000
1120
1121/**
1122 * t3_cim_ctl_blk_read - read a block from CIM control region
1123 *
1124 * @adap: the adapter
1125 * @addr: the start address within the CIM control region
1126 * @n: number of words to read
1127 * @valp: where to store the result
1128 *
1129 * Reads a block of 4-byte words from the CIM control region.
1130 */
1131int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1132 unsigned int n, unsigned int *valp)
1133{
1134 int ret = 0;
1135
1136 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1137 return -EBUSY;
1138
1139 for ( ; !ret && n--; addr += 4) {
1140 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1141 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1142 0, 5, 2);
1143 if (!ret)
1144 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1145 }
1146 return ret;
1147}
1148
1149
1150/**
1151 * t3_link_changed - handle interface link changes
1152 * @adapter: the adapter
1153 * @port_id: the port index that changed link state
1154 *
1155 * Called when a port's link settings change to propagate the new values
1156 * to the associated PHY and MAC. After performing the common tasks it
1157 * invokes an OS-specific handler.
1158 */
1159void t3_link_changed(struct adapter *adapter, int port_id)
1160{
1161 int link_ok, speed, duplex, fc;
1162 struct port_info *pi = adap2pinfo(adapter, port_id);
1163 struct cphy *phy = &pi->phy;
1164 struct cmac *mac = &pi->mac;
1165 struct link_config *lc = &pi->link_config;
1166
1167 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1168
9b1e3656
DLR
1169 if (lc->requested_fc & PAUSE_AUTONEG)
1170 fc &= lc->requested_fc;
1171 else
1172 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1173
1174 if (link_ok == lc->link_ok && speed == lc->speed &&
1175 duplex == lc->duplex && fc == lc->fc)
1176 return; /* nothing changed */
1177
4d22de3e
DLR
1178 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1179 uses_xaui(adapter)) {
1180 if (link_ok)
1181 t3b_pcs_reset(mac);
1182 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1183 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1184 }
1185 lc->link_ok = link_ok;
1186 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1187 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
4d22de3e
DLR
1188
1189 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1190 /* Set MAC speed, duplex, and flow control to match PHY. */
1191 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1192 lc->fc = fc;
1193 }
1194
1195 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1196}
1197
1198/**
1199 * t3_link_start - apply link configuration to MAC/PHY
1200 * @phy: the PHY to setup
1201 * @mac: the MAC to setup
1202 * @lc: the requested link configuration
1203 *
1204 * Set up a port's MAC and PHY according to a desired link configuration.
1205 * - If the PHY can auto-negotiate first decide what to advertise, then
1206 * enable/disable auto-negotiation as desired, and reset.
1207 * - If the PHY does not auto-negotiate just reset it.
1208 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1209 * otherwise do it later based on the outcome of auto-negotiation.
1210 */
1211int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1212{
1213 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1214
1215 lc->link_ok = 0;
1216 if (lc->supported & SUPPORTED_Autoneg) {
1217 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1218 if (fc) {
1219 lc->advertising |= ADVERTISED_Asym_Pause;
1220 if (fc & PAUSE_RX)
1221 lc->advertising |= ADVERTISED_Pause;
1222 }
1223 phy->ops->advertise(phy, lc->advertising);
1224
1225 if (lc->autoneg == AUTONEG_DISABLE) {
1226 lc->speed = lc->requested_speed;
1227 lc->duplex = lc->requested_duplex;
1228 lc->fc = (unsigned char)fc;
1229 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1230 fc);
1231 /* Also disables autoneg */
1232 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
4d22de3e
DLR
1233 } else
1234 phy->ops->autoneg_enable(phy);
1235 } else {
1236 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1237 lc->fc = (unsigned char)fc;
1238 phy->ops->reset(phy, 0);
1239 }
1240 return 0;
1241}
1242
1243/**
1244 * t3_set_vlan_accel - control HW VLAN extraction
1245 * @adapter: the adapter
1246 * @ports: bitmap of adapter ports to operate on
1247 * @on: enable (1) or disable (0) HW VLAN extraction
1248 *
1249 * Enables or disables HW extraction of VLAN tags for the given port.
1250 */
1251void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1252{
1253 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1254 ports << S_VLANEXTRACTIONENABLE,
1255 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1256}
1257
1258struct intr_info {
1259 unsigned int mask; /* bits to check in interrupt status */
1260 const char *msg; /* message to print or NULL */
1261 short stat_idx; /* stat counter to increment or -1 */
20d3fc11 1262 unsigned short fatal; /* whether the condition reported is fatal */
4d22de3e
DLR
1263};
1264
1265/**
1266 * t3_handle_intr_status - table driven interrupt handler
1267 * @adapter: the adapter that generated the interrupt
1268 * @reg: the interrupt status register to process
1269 * @mask: a mask to apply to the interrupt status
1270 * @acts: table of interrupt actions
1271 * @stats: statistics counters tracking interrupt occurences
1272 *
1273 * A table driven interrupt handler that applies a set of masks to an
1274 * interrupt status word and performs the corresponding actions if the
1275 * interrupts described by the mask have occured. The actions include
1276 * optionally printing a warning or alert message, and optionally
1277 * incrementing a stat counter. The table is terminated by an entry
1278 * specifying mask 0. Returns the number of fatal interrupt conditions.
1279 */
1280static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1281 unsigned int mask,
1282 const struct intr_info *acts,
1283 unsigned long *stats)
1284{
1285 int fatal = 0;
1286 unsigned int status = t3_read_reg(adapter, reg) & mask;
1287
1288 for (; acts->mask; ++acts) {
1289 if (!(status & acts->mask))
1290 continue;
1291 if (acts->fatal) {
1292 fatal++;
1293 CH_ALERT(adapter, "%s (0x%x)\n",
1294 acts->msg, status & acts->mask);
1295 } else if (acts->msg)
1296 CH_WARN(adapter, "%s (0x%x)\n",
1297 acts->msg, status & acts->mask);
1298 if (acts->stat_idx >= 0)
1299 stats[acts->stat_idx]++;
1300 }
1301 if (status) /* clear processed interrupts */
1302 t3_write_reg(adapter, reg, status);
1303 return fatal;
1304}
1305
b881955b
DLR
1306#define SGE_INTR_MASK (F_RSPQDISABLED | \
1307 F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1308 F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1309 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1310 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1311 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1312 F_HIRCQPARITYERROR)
4d22de3e
DLR
1313#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1314 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1315 F_NFASRCHFAIL)
1316#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1317#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1318 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1319 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1320#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1321 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1322 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1323 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1324 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1325 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1326#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1327 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1328 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
b881955b
DLR
1329 F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1330 F_TXPARERR | V_BISTERR(M_BISTERR))
1331#define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1332 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1333 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1334#define ULPTX_INTR_MASK 0xfc
1335#define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
4d22de3e
DLR
1336 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1337 F_ZERO_SWITCH_ERROR)
1338#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1339 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1340 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
b881955b
DLR
1341 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1342 F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1343 F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1344 F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1345 F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
4d22de3e
DLR
1346#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1347 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1348 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1349#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1350 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1351 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1352#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1353 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1354 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1355 V_MCAPARERRENB(M_MCAPARERRENB))
1356#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1357 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1358 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1359 F_MPS0 | F_CPL_SWITCH)
1360
1361/*
1362 * Interrupt handler for the PCIX1 module.
1363 */
1364static void pci_intr_handler(struct adapter *adapter)
1365{
1366 static const struct intr_info pcix1_intr_info[] = {
4d22de3e
DLR
1367 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1368 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1369 {F_RCVTARABT, "PCI received target abort", -1, 1},
1370 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1371 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1372 {F_DETPARERR, "PCI detected parity error", -1, 1},
1373 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1374 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1375 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1376 1},
1377 {F_DETCORECCERR, "PCI correctable ECC error",
1378 STAT_PCI_CORR_ECC, 0},
1379 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1380 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1381 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1382 1},
1383 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1384 1},
1385 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1386 1},
1387 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1388 "error", -1, 1},
1389 {0}
1390 };
1391
1392 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1393 pcix1_intr_info, adapter->irq_stats))
1394 t3_fatal_err(adapter);
1395}
1396
1397/*
1398 * Interrupt handler for the PCIE module.
1399 */
1400static void pcie_intr_handler(struct adapter *adapter)
1401{
1402 static const struct intr_info pcie_intr_info[] = {
b5a44bcb 1403 {F_PEXERR, "PCI PEX error", -1, 1},
4d22de3e
DLR
1404 {F_UNXSPLCPLERRR,
1405 "PCI unexpected split completion DMA read error", -1, 1},
1406 {F_UNXSPLCPLERRC,
1407 "PCI unexpected split completion DMA command error", -1, 1},
1408 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1409 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1410 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1411 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1412 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1413 "PCI MSI-X table/PBA parity error", -1, 1},
b881955b
DLR
1414 {F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1415 {F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1416 {F_RXPARERR, "PCI Rx parity error", -1, 1},
1417 {F_TXPARERR, "PCI Tx parity error", -1, 1},
4d22de3e
DLR
1418 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1419 {0}
1420 };
1421
3eea3337
DLR
1422 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1423 CH_ALERT(adapter, "PEX error code 0x%x\n",
1424 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1425
4d22de3e
DLR
1426 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1427 pcie_intr_info, adapter->irq_stats))
1428 t3_fatal_err(adapter);
1429}
1430
1431/*
1432 * TP interrupt handler.
1433 */
1434static void tp_intr_handler(struct adapter *adapter)
1435{
1436 static const struct intr_info tp_intr_info[] = {
1437 {0xffffff, "TP parity error", -1, 1},
1438 {0x1000000, "TP out of Rx pages", -1, 1},
1439 {0x2000000, "TP out of Tx pages", -1, 1},
1440 {0}
1441 };
1442
a2604be5 1443 static struct intr_info tp_intr_info_t3c[] = {
b881955b
DLR
1444 {0x1fffffff, "TP parity error", -1, 1},
1445 {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1446 {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1447 {0}
a2604be5
DLR
1448 };
1449
4d22de3e 1450 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
a2604be5 1451 adapter->params.rev < T3_REV_C ?
b881955b 1452 tp_intr_info : tp_intr_info_t3c, NULL))
4d22de3e
DLR
1453 t3_fatal_err(adapter);
1454}
1455
1456/*
1457 * CIM interrupt handler.
1458 */
1459static void cim_intr_handler(struct adapter *adapter)
1460{
1461 static const struct intr_info cim_intr_info[] = {
1462 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1463 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1464 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1465 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1466 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1467 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1468 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1469 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1470 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1471 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1472 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1473 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
b881955b
DLR
1474 {F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1475 {F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1476 {F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1477 {F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1478 {F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1479 {F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1480 {F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1481 {F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1482 {F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1483 {F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1484 {F_ITAGPARERR, "CIM itag parity error", -1, 1},
1485 {F_DTAGPARERR, "CIM dtag parity error", -1, 1},
4d22de3e
DLR
1486 {0}
1487 };
1488
1489 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1490 cim_intr_info, NULL))
1491 t3_fatal_err(adapter);
1492}
1493
1494/*
1495 * ULP RX interrupt handler.
1496 */
1497static void ulprx_intr_handler(struct adapter *adapter)
1498{
1499 static const struct intr_info ulprx_intr_info[] = {
b881955b
DLR
1500 {F_PARERRDATA, "ULP RX data parity error", -1, 1},
1501 {F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1502 {F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1503 {F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1504 {F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1505 {F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1506 {F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1507 {F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
4d22de3e
DLR
1508 {0}
1509 };
1510
1511 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1512 ulprx_intr_info, NULL))
1513 t3_fatal_err(adapter);
1514}
1515
1516/*
1517 * ULP TX interrupt handler.
1518 */
1519static void ulptx_intr_handler(struct adapter *adapter)
1520{
1521 static const struct intr_info ulptx_intr_info[] = {
1522 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1523 STAT_ULP_CH0_PBL_OOB, 0},
1524 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1525 STAT_ULP_CH1_PBL_OOB, 0},
b881955b 1526 {0xfc, "ULP TX parity error", -1, 1},
4d22de3e
DLR
1527 {0}
1528 };
1529
1530 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1531 ulptx_intr_info, adapter->irq_stats))
1532 t3_fatal_err(adapter);
1533}
1534
1535#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1536 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1537 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1538 F_ICSPI1_TX_FRAMING_ERROR)
1539#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1540 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1541 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1542 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1543
1544/*
1545 * PM TX interrupt handler.
1546 */
1547static void pmtx_intr_handler(struct adapter *adapter)
1548{
1549 static const struct intr_info pmtx_intr_info[] = {
1550 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1551 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1552 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1553 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1554 "PMTX ispi parity error", -1, 1},
1555 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1556 "PMTX ospi parity error", -1, 1},
1557 {0}
1558 };
1559
1560 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1561 pmtx_intr_info, NULL))
1562 t3_fatal_err(adapter);
1563}
1564
1565#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1566 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1567 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1568 F_IESPI1_TX_FRAMING_ERROR)
1569#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1570 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1571 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1572 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1573
1574/*
1575 * PM RX interrupt handler.
1576 */
1577static void pmrx_intr_handler(struct adapter *adapter)
1578{
1579 static const struct intr_info pmrx_intr_info[] = {
1580 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1581 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1582 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1583 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1584 "PMRX ispi parity error", -1, 1},
1585 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1586 "PMRX ospi parity error", -1, 1},
1587 {0}
1588 };
1589
1590 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1591 pmrx_intr_info, NULL))
1592 t3_fatal_err(adapter);
1593}
1594
1595/*
1596 * CPL switch interrupt handler.
1597 */
1598static void cplsw_intr_handler(struct adapter *adapter)
1599{
1600 static const struct intr_info cplsw_intr_info[] = {
b881955b
DLR
1601 {F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1602 {F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
4d22de3e
DLR
1603 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1604 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1605 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1606 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1607 {0}
1608 };
1609
1610 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1611 cplsw_intr_info, NULL))
1612 t3_fatal_err(adapter);
1613}
1614
1615/*
1616 * MPS interrupt handler.
1617 */
1618static void mps_intr_handler(struct adapter *adapter)
1619{
1620 static const struct intr_info mps_intr_info[] = {
1621 {0x1ff, "MPS parity error", -1, 1},
1622 {0}
1623 };
1624
1625 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1626 mps_intr_info, NULL))
1627 t3_fatal_err(adapter);
1628}
1629
1630#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1631
1632/*
1633 * MC7 interrupt handler.
1634 */
1635static void mc7_intr_handler(struct mc7 *mc7)
1636{
1637 struct adapter *adapter = mc7->adapter;
1638 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1639
1640 if (cause & F_CE) {
1641 mc7->stats.corr_err++;
1642 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1643 "data 0x%x 0x%x 0x%x\n", mc7->name,
1644 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1645 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1646 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1647 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1648 }
1649
1650 if (cause & F_UE) {
1651 mc7->stats.uncorr_err++;
1652 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1653 "data 0x%x 0x%x 0x%x\n", mc7->name,
1654 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1655 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1656 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1657 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1658 }
1659
1660 if (G_PE(cause)) {
1661 mc7->stats.parity_err++;
1662 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1663 mc7->name, G_PE(cause));
1664 }
1665
1666 if (cause & F_AE) {
1667 u32 addr = 0;
1668
1669 if (adapter->params.rev > 0)
1670 addr = t3_read_reg(adapter,
1671 mc7->offset + A_MC7_ERR_ADDR);
1672 mc7->stats.addr_err++;
1673 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1674 mc7->name, addr);
1675 }
1676
1677 if (cause & MC7_INTR_FATAL)
1678 t3_fatal_err(adapter);
1679
1680 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1681}
1682
1683#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1684 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1685/*
1686 * XGMAC interrupt handler.
1687 */
1688static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1689{
1690 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1691 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1692
1693 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1694 mac->stats.tx_fifo_parity_err++;
1695 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1696 }
1697 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1698 mac->stats.rx_fifo_parity_err++;
1699 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1700 }
1701 if (cause & F_TXFIFO_UNDERRUN)
1702 mac->stats.tx_fifo_urun++;
1703 if (cause & F_RXFIFO_OVERFLOW)
1704 mac->stats.rx_fifo_ovfl++;
1705 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1706 mac->stats.serdes_signal_loss++;
1707 if (cause & F_XAUIPCSCTCERR)
1708 mac->stats.xaui_pcs_ctc_err++;
1709 if (cause & F_XAUIPCSALIGNCHANGE)
1710 mac->stats.xaui_pcs_align_change++;
1711
1712 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1713 if (cause & XGM_INTR_FATAL)
1714 t3_fatal_err(adap);
1715 return cause != 0;
1716}
1717
1718/*
1719 * Interrupt handler for PHY events.
1720 */
1721int t3_phy_intr_handler(struct adapter *adapter)
1722{
4d22de3e
DLR
1723 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1724
1725 for_each_port(adapter, i) {
1ca03cbc
DLR
1726 struct port_info *p = adap2pinfo(adapter, i);
1727
04497982 1728 if (!(p->phy.caps & SUPPORTED_IRQ))
1ca03cbc
DLR
1729 continue;
1730
f231e0a5 1731 if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
1ca03cbc 1732 int phy_cause = p->phy.ops->intr_handler(&p->phy);
4d22de3e
DLR
1733
1734 if (phy_cause & cphy_cause_link_change)
1735 t3_link_changed(adapter, i);
1736 if (phy_cause & cphy_cause_fifo_error)
1ca03cbc 1737 p->phy.fifo_errors++;
1e882025
DLR
1738 if (phy_cause & cphy_cause_module_change)
1739 t3_os_phymod_changed(adapter, i);
4d22de3e
DLR
1740 }
1741 }
1742
1743 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1744 return 0;
1745}
1746
1747/*
1748 * T3 slow path (non-data) interrupt handler.
1749 */
1750int t3_slow_intr_handler(struct adapter *adapter)
1751{
1752 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1753
1754 cause &= adapter->slow_intr_mask;
1755 if (!cause)
1756 return 0;
1757 if (cause & F_PCIM0) {
1758 if (is_pcie(adapter))
1759 pcie_intr_handler(adapter);
1760 else
1761 pci_intr_handler(adapter);
1762 }
1763 if (cause & F_SGE3)
1764 t3_sge_err_intr_handler(adapter);
1765 if (cause & F_MC7_PMRX)
1766 mc7_intr_handler(&adapter->pmrx);
1767 if (cause & F_MC7_PMTX)
1768 mc7_intr_handler(&adapter->pmtx);
1769 if (cause & F_MC7_CM)
1770 mc7_intr_handler(&adapter->cm);
1771 if (cause & F_CIM)
1772 cim_intr_handler(adapter);
1773 if (cause & F_TP1)
1774 tp_intr_handler(adapter);
1775 if (cause & F_ULP2_RX)
1776 ulprx_intr_handler(adapter);
1777 if (cause & F_ULP2_TX)
1778 ulptx_intr_handler(adapter);
1779 if (cause & F_PM1_RX)
1780 pmrx_intr_handler(adapter);
1781 if (cause & F_PM1_TX)
1782 pmtx_intr_handler(adapter);
1783 if (cause & F_CPL_SWITCH)
1784 cplsw_intr_handler(adapter);
1785 if (cause & F_MPS0)
1786 mps_intr_handler(adapter);
1787 if (cause & F_MC5A)
1788 t3_mc5_intr_handler(&adapter->mc5);
1789 if (cause & F_XGMAC0_0)
1790 mac_intr_handler(adapter, 0);
1791 if (cause & F_XGMAC0_1)
1792 mac_intr_handler(adapter, 1);
1793 if (cause & F_T3DBG)
1794 t3_os_ext_intr_handler(adapter);
1795
1796 /* Clear the interrupts just processed. */
1797 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1798 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1799 return 1;
1800}
1801
f231e0a5
DLR
1802static unsigned int calc_gpio_intr(struct adapter *adap)
1803{
1804 unsigned int i, gpi_intr = 0;
1805
1806 for_each_port(adap, i)
1807 if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1808 adapter_info(adap)->gpio_intr[i])
1809 gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1810 return gpi_intr;
1811}
1812
4d22de3e
DLR
1813/**
1814 * t3_intr_enable - enable interrupts
1815 * @adapter: the adapter whose interrupts should be enabled
1816 *
1817 * Enable interrupts by setting the interrupt enable registers of the
1818 * various HW modules and then enabling the top-level interrupt
1819 * concentrator.
1820 */
1821void t3_intr_enable(struct adapter *adapter)
1822{
1823 static const struct addr_val_pair intr_en_avp[] = {
1824 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1825 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1826 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1827 MC7_INTR_MASK},
1828 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1829 MC7_INTR_MASK},
1830 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1831 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
4d22de3e
DLR
1832 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1833 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1834 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1835 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1836 };
1837
1838 adapter->slow_intr_mask = PL_INTR_MASK;
1839
1840 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
a2604be5
DLR
1841 t3_write_reg(adapter, A_TP_INT_ENABLE,
1842 adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
4d22de3e
DLR
1843
1844 if (adapter->params.rev > 0) {
1845 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1846 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1847 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1848 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1849 F_PBL_BOUND_ERR_CH1);
1850 } else {
1851 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1852 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1853 }
1854
f231e0a5
DLR
1855 t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
1856
4d22de3e
DLR
1857 if (is_pcie(adapter))
1858 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1859 else
1860 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1861 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1862 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1863}
1864
1865/**
1866 * t3_intr_disable - disable a card's interrupts
1867 * @adapter: the adapter whose interrupts should be disabled
1868 *
1869 * Disable interrupts. We only disable the top-level interrupt
1870 * concentrator and the SGE data interrupts.
1871 */
1872void t3_intr_disable(struct adapter *adapter)
1873{
1874 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1875 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1876 adapter->slow_intr_mask = 0;
1877}
1878
1879/**
1880 * t3_intr_clear - clear all interrupts
1881 * @adapter: the adapter whose interrupts should be cleared
1882 *
1883 * Clears all interrupts.
1884 */
1885void t3_intr_clear(struct adapter *adapter)
1886{
1887 static const unsigned int cause_reg_addr[] = {
1888 A_SG_INT_CAUSE,
1889 A_SG_RSPQ_FL_STATUS,
1890 A_PCIX_INT_CAUSE,
1891 A_MC7_INT_CAUSE,
1892 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1893 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1894 A_CIM_HOST_INT_CAUSE,
1895 A_TP_INT_CAUSE,
1896 A_MC5_DB_INT_CAUSE,
1897 A_ULPRX_INT_CAUSE,
1898 A_ULPTX_INT_CAUSE,
1899 A_CPL_INTR_CAUSE,
1900 A_PM1_TX_INT_CAUSE,
1901 A_PM1_RX_INT_CAUSE,
1902 A_MPS_INT_CAUSE,
1903 A_T3DBG_INT_CAUSE,
1904 };
1905 unsigned int i;
1906
1907 /* Clear PHY and MAC interrupts for each port. */
1908 for_each_port(adapter, i)
1909 t3_port_intr_clear(adapter, i);
1910
1911 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1912 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1913
3eea3337
DLR
1914 if (is_pcie(adapter))
1915 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
4d22de3e
DLR
1916 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1917 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1918}
1919
1920/**
1921 * t3_port_intr_enable - enable port-specific interrupts
1922 * @adapter: associated adapter
1923 * @idx: index of port whose interrupts should be enabled
1924 *
1925 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1926 * adapter port.
1927 */
1928void t3_port_intr_enable(struct adapter *adapter, int idx)
1929{
1930 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1931
1932 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
1933 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1934 phy->ops->intr_enable(phy);
1935}
1936
1937/**
1938 * t3_port_intr_disable - disable port-specific interrupts
1939 * @adapter: associated adapter
1940 * @idx: index of port whose interrupts should be disabled
1941 *
1942 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
1943 * adapter port.
1944 */
1945void t3_port_intr_disable(struct adapter *adapter, int idx)
1946{
1947 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1948
1949 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
1950 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1951 phy->ops->intr_disable(phy);
1952}
1953
1954/**
1955 * t3_port_intr_clear - clear port-specific interrupts
1956 * @adapter: associated adapter
1957 * @idx: index of port whose interrupts to clear
1958 *
1959 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
1960 * adapter port.
1961 */
1962void t3_port_intr_clear(struct adapter *adapter, int idx)
1963{
1964 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1965
1966 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
1967 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
1968 phy->ops->intr_clear(phy);
1969}
1970
bb9366af
DLR
1971#define SG_CONTEXT_CMD_ATTEMPTS 100
1972
4d22de3e
DLR
1973/**
1974 * t3_sge_write_context - write an SGE context
1975 * @adapter: the adapter
1976 * @id: the context id
1977 * @type: the context type
1978 *
1979 * Program an SGE context with the values already loaded in the
1980 * CONTEXT_DATA? registers.
1981 */
1982static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
1983 unsigned int type)
1984{
1985 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
1986 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
1987 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
1988 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
1989 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1990 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
1991 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 1992 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
1993}
1994
b881955b
DLR
1995static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
1996 unsigned int type)
1997{
1998 t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
1999 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2000 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2001 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2002 return t3_sge_write_context(adap, id, type);
2003}
2004
4d22de3e
DLR
2005/**
2006 * t3_sge_init_ecntxt - initialize an SGE egress context
2007 * @adapter: the adapter to configure
2008 * @id: the context id
2009 * @gts_enable: whether to enable GTS for the context
2010 * @type: the egress context type
2011 * @respq: associated response queue
2012 * @base_addr: base address of queue
2013 * @size: number of queue entries
2014 * @token: uP token
2015 * @gen: initial generation value for the context
2016 * @cidx: consumer pointer
2017 *
2018 * Initialize an SGE egress context and make it ready for use. If the
2019 * platform allows concurrent context operations, the caller is
2020 * responsible for appropriate locking.
2021 */
2022int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
2023 enum sge_context_type type, int respq, u64 base_addr,
2024 unsigned int size, unsigned int token, int gen,
2025 unsigned int cidx)
2026{
2027 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2028
2029 if (base_addr & 0xfff) /* must be 4K aligned */
2030 return -EINVAL;
2031 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2032 return -EBUSY;
2033
2034 base_addr >>= 12;
2035 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2036 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2037 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2038 V_EC_BASE_LO(base_addr & 0xffff));
2039 base_addr >>= 16;
2040 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
2041 base_addr >>= 32;
2042 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2043 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
2044 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2045 F_EC_VALID);
2046 return t3_sge_write_context(adapter, id, F_EGRESS);
2047}
2048
2049/**
2050 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2051 * @adapter: the adapter to configure
2052 * @id: the context id
2053 * @gts_enable: whether to enable GTS for the context
2054 * @base_addr: base address of queue
2055 * @size: number of queue entries
2056 * @bsize: size of each buffer for this queue
2057 * @cong_thres: threshold to signal congestion to upstream producers
2058 * @gen: initial generation value for the context
2059 * @cidx: consumer pointer
2060 *
2061 * Initialize an SGE free list context and make it ready for use. The
2062 * caller is responsible for ensuring only one context operation occurs
2063 * at a time.
2064 */
2065int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2066 int gts_enable, u64 base_addr, unsigned int size,
2067 unsigned int bsize, unsigned int cong_thres, int gen,
2068 unsigned int cidx)
2069{
2070 if (base_addr & 0xfff) /* must be 4K aligned */
2071 return -EINVAL;
2072 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2073 return -EBUSY;
2074
2075 base_addr >>= 12;
2076 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2077 base_addr >>= 32;
2078 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2079 V_FL_BASE_HI((u32) base_addr) |
2080 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2081 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2082 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2083 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2084 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2085 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2086 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2087 return t3_sge_write_context(adapter, id, F_FREELIST);
2088}
2089
2090/**
2091 * t3_sge_init_rspcntxt - initialize an SGE response queue context
2092 * @adapter: the adapter to configure
2093 * @id: the context id
2094 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2095 * @base_addr: base address of queue
2096 * @size: number of queue entries
2097 * @fl_thres: threshold for selecting the normal or jumbo free list
2098 * @gen: initial generation value for the context
2099 * @cidx: consumer pointer
2100 *
2101 * Initialize an SGE response queue context and make it ready for use.
2102 * The caller is responsible for ensuring only one context operation
2103 * occurs at a time.
2104 */
2105int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2106 int irq_vec_idx, u64 base_addr, unsigned int size,
2107 unsigned int fl_thres, int gen, unsigned int cidx)
2108{
2109 unsigned int intr = 0;
2110
2111 if (base_addr & 0xfff) /* must be 4K aligned */
2112 return -EINVAL;
2113 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2114 return -EBUSY;
2115
2116 base_addr >>= 12;
2117 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2118 V_CQ_INDEX(cidx));
2119 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2120 base_addr >>= 32;
2121 if (irq_vec_idx >= 0)
2122 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2123 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2124 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2125 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2126 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2127}
2128
2129/**
2130 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2131 * @adapter: the adapter to configure
2132 * @id: the context id
2133 * @base_addr: base address of queue
2134 * @size: number of queue entries
2135 * @rspq: response queue for async notifications
2136 * @ovfl_mode: CQ overflow mode
2137 * @credits: completion queue credits
2138 * @credit_thres: the credit threshold
2139 *
2140 * Initialize an SGE completion queue context and make it ready for use.
2141 * The caller is responsible for ensuring only one context operation
2142 * occurs at a time.
2143 */
2144int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2145 unsigned int size, int rspq, int ovfl_mode,
2146 unsigned int credits, unsigned int credit_thres)
2147{
2148 if (base_addr & 0xfff) /* must be 4K aligned */
2149 return -EINVAL;
2150 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2151 return -EBUSY;
2152
2153 base_addr >>= 12;
2154 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2155 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2156 base_addr >>= 32;
2157 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2158 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
1c17ae8a
DLR
2159 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2160 V_CQ_ERR(ovfl_mode));
4d22de3e
DLR
2161 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2162 V_CQ_CREDIT_THRES(credit_thres));
2163 return t3_sge_write_context(adapter, id, F_CQ);
2164}
2165
2166/**
2167 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2168 * @adapter: the adapter
2169 * @id: the egress context id
2170 * @enable: enable (1) or disable (0) the context
2171 *
2172 * Enable or disable an SGE egress context. The caller is responsible for
2173 * ensuring only one context operation occurs at a time.
2174 */
2175int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2176{
2177 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2178 return -EBUSY;
2179
2180 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2181 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2182 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2183 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2184 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2185 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2186 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2187 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2188 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2189}
2190
2191/**
2192 * t3_sge_disable_fl - disable an SGE free-buffer list
2193 * @adapter: the adapter
2194 * @id: the free list context id
2195 *
2196 * Disable an SGE free-buffer list. The caller is responsible for
2197 * ensuring only one context operation occurs at a time.
2198 */
2199int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2200{
2201 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2202 return -EBUSY;
2203
2204 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2205 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2206 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2207 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2208 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2209 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2210 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2211 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2212 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2213}
2214
2215/**
2216 * t3_sge_disable_rspcntxt - disable an SGE response queue
2217 * @adapter: the adapter
2218 * @id: the response queue context id
2219 *
2220 * Disable an SGE response queue. The caller is responsible for
2221 * ensuring only one context operation occurs at a time.
2222 */
2223int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2224{
2225 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2226 return -EBUSY;
2227
2228 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2229 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2230 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2231 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2232 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2233 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2234 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2235 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2236 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2237}
2238
2239/**
2240 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2241 * @adapter: the adapter
2242 * @id: the completion queue context id
2243 *
2244 * Disable an SGE completion queue. The caller is responsible for
2245 * ensuring only one context operation occurs at a time.
2246 */
2247int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2248{
2249 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2250 return -EBUSY;
2251
2252 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2253 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2254 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2255 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2256 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2257 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2258 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2259 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2260 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2261}
2262
2263/**
2264 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2265 * @adapter: the adapter
2266 * @id: the context id
2267 * @op: the operation to perform
2268 *
2269 * Perform the selected operation on an SGE completion queue context.
2270 * The caller is responsible for ensuring only one context operation
2271 * occurs at a time.
2272 */
2273int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2274 unsigned int credits)
2275{
2276 u32 val;
2277
2278 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2279 return -EBUSY;
2280
2281 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2282 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2283 V_CONTEXT(id) | F_CQ);
2284 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2285 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
4d22de3e
DLR
2286 return -EIO;
2287
2288 if (op >= 2 && op < 7) {
2289 if (adapter->params.rev > 0)
2290 return G_CQ_INDEX(val);
2291
2292 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2293 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2294 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
bb9366af
DLR
2295 F_CONTEXT_CMD_BUSY, 0,
2296 SG_CONTEXT_CMD_ATTEMPTS, 1))
4d22de3e
DLR
2297 return -EIO;
2298 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2299 }
2300 return 0;
2301}
2302
2303/**
2304 * t3_sge_read_context - read an SGE context
2305 * @type: the context type
2306 * @adapter: the adapter
2307 * @id: the context id
2308 * @data: holds the retrieved context
2309 *
2310 * Read an SGE egress context. The caller is responsible for ensuring
2311 * only one context operation occurs at a time.
2312 */
2313static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2314 unsigned int id, u32 data[4])
2315{
2316 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2317 return -EBUSY;
2318
2319 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2320 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2321 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
bb9366af 2322 SG_CONTEXT_CMD_ATTEMPTS, 1))
4d22de3e
DLR
2323 return -EIO;
2324 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2325 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2326 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2327 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2328 return 0;
2329}
2330
2331/**
2332 * t3_sge_read_ecntxt - read an SGE egress context
2333 * @adapter: the adapter
2334 * @id: the context id
2335 * @data: holds the retrieved context
2336 *
2337 * Read an SGE egress context. The caller is responsible for ensuring
2338 * only one context operation occurs at a time.
2339 */
2340int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2341{
2342 if (id >= 65536)
2343 return -EINVAL;
2344 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2345}
2346
2347/**
2348 * t3_sge_read_cq - read an SGE CQ context
2349 * @adapter: the adapter
2350 * @id: the context id
2351 * @data: holds the retrieved context
2352 *
2353 * Read an SGE CQ context. The caller is responsible for ensuring
2354 * only one context operation occurs at a time.
2355 */
2356int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2357{
2358 if (id >= 65536)
2359 return -EINVAL;
2360 return t3_sge_read_context(F_CQ, adapter, id, data);
2361}
2362
2363/**
2364 * t3_sge_read_fl - read an SGE free-list context
2365 * @adapter: the adapter
2366 * @id: the context id
2367 * @data: holds the retrieved context
2368 *
2369 * Read an SGE free-list context. The caller is responsible for ensuring
2370 * only one context operation occurs at a time.
2371 */
2372int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2373{
2374 if (id >= SGE_QSETS * 2)
2375 return -EINVAL;
2376 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2377}
2378
2379/**
2380 * t3_sge_read_rspq - read an SGE response queue context
2381 * @adapter: the adapter
2382 * @id: the context id
2383 * @data: holds the retrieved context
2384 *
2385 * Read an SGE response queue context. The caller is responsible for
2386 * ensuring only one context operation occurs at a time.
2387 */
2388int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2389{
2390 if (id >= SGE_QSETS)
2391 return -EINVAL;
2392 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2393}
2394
2395/**
2396 * t3_config_rss - configure Rx packet steering
2397 * @adapter: the adapter
2398 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2399 * @cpus: values for the CPU lookup table (0xff terminated)
2400 * @rspq: values for the response queue lookup table (0xffff terminated)
2401 *
2402 * Programs the receive packet steering logic. @cpus and @rspq provide
2403 * the values for the CPU and response queue lookup tables. If they
2404 * provide fewer values than the size of the tables the supplied values
2405 * are used repeatedly until the tables are fully populated.
2406 */
2407void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2408 const u8 * cpus, const u16 *rspq)
2409{
2410 int i, j, cpu_idx = 0, q_idx = 0;
2411
2412 if (cpus)
2413 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2414 u32 val = i << 16;
2415
2416 for (j = 0; j < 2; ++j) {
2417 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2418 if (cpus[cpu_idx] == 0xff)
2419 cpu_idx = 0;
2420 }
2421 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2422 }
2423
2424 if (rspq)
2425 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2426 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2427 (i << 16) | rspq[q_idx++]);
2428 if (rspq[q_idx] == 0xffff)
2429 q_idx = 0;
2430 }
2431
2432 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2433}
2434
2435/**
2436 * t3_read_rss - read the contents of the RSS tables
2437 * @adapter: the adapter
2438 * @lkup: holds the contents of the RSS lookup table
2439 * @map: holds the contents of the RSS map table
2440 *
2441 * Reads the contents of the receive packet steering tables.
2442 */
2443int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2444{
2445 int i;
2446 u32 val;
2447
2448 if (lkup)
2449 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2450 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2451 0xffff0000 | i);
2452 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2453 if (!(val & 0x80000000))
2454 return -EAGAIN;
2455 *lkup++ = val;
2456 *lkup++ = (val >> 8);
2457 }
2458
2459 if (map)
2460 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2461 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2462 0xffff0000 | i);
2463 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2464 if (!(val & 0x80000000))
2465 return -EAGAIN;
2466 *map++ = val;
2467 }
2468 return 0;
2469}
2470
2471/**
2472 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2473 * @adap: the adapter
2474 * @enable: 1 to select offload mode, 0 for regular NIC
2475 *
2476 * Switches TP to NIC/offload mode.
2477 */
2478void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2479{
2480 if (is_offload(adap) || !enable)
2481 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2482 V_NICMODE(!enable));
2483}
2484
2485/**
2486 * pm_num_pages - calculate the number of pages of the payload memory
2487 * @mem_size: the size of the payload memory
2488 * @pg_size: the size of each payload memory page
2489 *
2490 * Calculate the number of pages, each of the given size, that fit in a
2491 * memory of the specified size, respecting the HW requirement that the
2492 * number of pages must be a multiple of 24.
2493 */
2494static inline unsigned int pm_num_pages(unsigned int mem_size,
2495 unsigned int pg_size)
2496{
2497 unsigned int n = mem_size / pg_size;
2498
2499 return n - n % 24;
2500}
2501
2502#define mem_region(adap, start, size, reg) \
2503 t3_write_reg((adap), A_ ## reg, (start)); \
2504 start += size
2505
b881955b 2506/**
4d22de3e
DLR
2507 * partition_mem - partition memory and configure TP memory settings
2508 * @adap: the adapter
2509 * @p: the TP parameters
2510 *
2511 * Partitions context and payload memory and configures TP's memory
2512 * registers.
2513 */
2514static void partition_mem(struct adapter *adap, const struct tp_params *p)
2515{
2516 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2517 unsigned int timers = 0, timers_shift = 22;
2518
2519 if (adap->params.rev > 0) {
2520 if (tids <= 16 * 1024) {
2521 timers = 1;
2522 timers_shift = 16;
2523 } else if (tids <= 64 * 1024) {
2524 timers = 2;
2525 timers_shift = 18;
2526 } else if (tids <= 256 * 1024) {
2527 timers = 3;
2528 timers_shift = 20;
2529 }
2530 }
2531
2532 t3_write_reg(adap, A_TP_PMM_SIZE,
2533 p->chan_rx_size | (p->chan_tx_size >> 16));
2534
2535 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2536 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2537 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2538 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2539 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2540
2541 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2542 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2543 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2544
2545 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2546 /* Add a bit of headroom and make multiple of 24 */
2547 pstructs += 48;
2548 pstructs -= pstructs % 24;
2549 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2550
2551 m = tids * TCB_SIZE;
2552 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2553 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2554 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2555 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2556 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2557 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2558 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2559 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2560
2561 m = (m + 4095) & ~0xfff;
2562 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2563 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2564
2565 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2566 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2567 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2568 if (tids < m)
2569 adap->params.mc5.nservers += m - tids;
2570}
2571
2572static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2573 u32 val)
2574{
2575 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2576 t3_write_reg(adap, A_TP_PIO_DATA, val);
2577}
2578
2579static void tp_config(struct adapter *adap, const struct tp_params *p)
2580{
4d22de3e
DLR
2581 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2582 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2583 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2584 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2585 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
8a9fab22 2586 V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
4d22de3e
DLR
2587 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2588 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2589 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2590 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
b881955b 2591 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
4d22de3e
DLR
2592 F_IPV6ENABLE | F_NICMODE);
2593 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2594 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
8a9fab22
DLR
2595 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2596 adap->params.rev > 0 ? F_ENABLEESND :
2597 F_T3A_ENABLEESND);
4d22de3e 2598
3b1d307b 2599 t3_set_reg_field(adap, A_TP_PC_CONFIG,
8a9fab22
DLR
2600 F_ENABLEEPCMDAFULL,
2601 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2602 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
b881955b
DLR
2603 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2604 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2605 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
8a9fab22
DLR
2606 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2607 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2eab17ab 2608
4d22de3e
DLR
2609 if (adap->params.rev > 0) {
2610 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2611 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2612 F_TXPACEAUTO);
2613 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2614 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2615 } else
2616 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2617
a2604be5
DLR
2618 if (adap->params.rev == T3_REV_C)
2619 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2620 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2621 V_TABLELATENCYDELTA(4));
2622
8a9fab22
DLR
2623 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2624 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2625 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2626 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
4d22de3e
DLR
2627}
2628
2629/* Desired TP timer resolution in usec */
2630#define TP_TMR_RES 50
2631
2632/* TCP timer values in ms */
2633#define TP_DACK_TIMER 50
2634#define TP_RTO_MIN 250
2635
2636/**
2637 * tp_set_timers - set TP timing parameters
2638 * @adap: the adapter to set
2639 * @core_clk: the core clock frequency in Hz
2640 *
2641 * Set TP's timing parameters, such as the various timer resolutions and
2642 * the TCP timer values.
2643 */
2644static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2645{
2646 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2647 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2648 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2649 unsigned int tps = core_clk >> tre;
2650
2651 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2652 V_DELAYEDACKRESOLUTION(dack_re) |
2653 V_TIMESTAMPRESOLUTION(tstamp_re));
2654 t3_write_reg(adap, A_TP_DACK_TIMER,
2655 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2656 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2657 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2658 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2659 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2660 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2661 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2662 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2663 V_KEEPALIVEMAX(9));
2664
2665#define SECONDS * tps
2666
2667 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2668 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2669 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2670 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2671 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2672 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2673 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2674 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2675 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2676
2677#undef SECONDS
2678}
2679
2680/**
2681 * t3_tp_set_coalescing_size - set receive coalescing size
2682 * @adap: the adapter
2683 * @size: the receive coalescing size
2684 * @psh: whether a set PSH bit should deliver coalesced data
2685 *
2686 * Set the receive coalescing size and PSH bit handling.
2687 */
2688int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2689{
2690 u32 val;
2691
2692 if (size > MAX_RX_COALESCING_LEN)
2693 return -EINVAL;
2694
2695 val = t3_read_reg(adap, A_TP_PARA_REG3);
2696 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2697
2698 if (size) {
2699 val |= F_RXCOALESCEENABLE;
2700 if (psh)
2701 val |= F_RXCOALESCEPSHEN;
8a9fab22 2702 size = min(MAX_RX_COALESCING_LEN, size);
4d22de3e
DLR
2703 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2704 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2705 }
2706 t3_write_reg(adap, A_TP_PARA_REG3, val);
2707 return 0;
2708}
2709
2710/**
2711 * t3_tp_set_max_rxsize - set the max receive size
2712 * @adap: the adapter
2713 * @size: the max receive size
2714 *
2715 * Set TP's max receive size. This is the limit that applies when
2716 * receive coalescing is disabled.
2717 */
2718void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2719{
2720 t3_write_reg(adap, A_TP_PARA_REG7,
2721 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2722}
2723
7b9b0943 2724static void init_mtus(unsigned short mtus[])
4d22de3e
DLR
2725{
2726 /*
2727 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2728 * it can accomodate max size TCP/IP headers when SACK and timestamps
2729 * are enabled and still have at least 8 bytes of payload.
2730 */
75758e8a 2731 mtus[0] = 88;
8a9fab22
DLR
2732 mtus[1] = 88;
2733 mtus[2] = 256;
2734 mtus[3] = 512;
2735 mtus[4] = 576;
4d22de3e
DLR
2736 mtus[5] = 1024;
2737 mtus[6] = 1280;
2738 mtus[7] = 1492;
2739 mtus[8] = 1500;
2740 mtus[9] = 2002;
2741 mtus[10] = 2048;
2742 mtus[11] = 4096;
2743 mtus[12] = 4352;
2744 mtus[13] = 8192;
2745 mtus[14] = 9000;
2746 mtus[15] = 9600;
2747}
2748
2749/*
2750 * Initial congestion control parameters.
2751 */
7b9b0943 2752static void init_cong_ctrl(unsigned short *a, unsigned short *b)
4d22de3e
DLR
2753{
2754 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2755 a[9] = 2;
2756 a[10] = 3;
2757 a[11] = 4;
2758 a[12] = 5;
2759 a[13] = 6;
2760 a[14] = 7;
2761 a[15] = 8;
2762 a[16] = 9;
2763 a[17] = 10;
2764 a[18] = 14;
2765 a[19] = 17;
2766 a[20] = 21;
2767 a[21] = 25;
2768 a[22] = 30;
2769 a[23] = 35;
2770 a[24] = 45;
2771 a[25] = 60;
2772 a[26] = 80;
2773 a[27] = 100;
2774 a[28] = 200;
2775 a[29] = 300;
2776 a[30] = 400;
2777 a[31] = 500;
2778
2779 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2780 b[9] = b[10] = 1;
2781 b[11] = b[12] = 2;
2782 b[13] = b[14] = b[15] = b[16] = 3;
2783 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2784 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2785 b[28] = b[29] = 6;
2786 b[30] = b[31] = 7;
2787}
2788
2789/* The minimum additive increment value for the congestion control table */
2790#define CC_MIN_INCR 2U
2791
2792/**
2793 * t3_load_mtus - write the MTU and congestion control HW tables
2794 * @adap: the adapter
2795 * @mtus: the unrestricted values for the MTU table
2796 * @alphs: the values for the congestion control alpha parameter
2797 * @beta: the values for the congestion control beta parameter
2798 * @mtu_cap: the maximum permitted effective MTU
2799 *
2800 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2801 * Update the high-speed congestion control table with the supplied alpha,
2802 * beta, and MTUs.
2803 */
2804void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2805 unsigned short alpha[NCCTRL_WIN],
2806 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2807{
2808 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2809 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2810 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2811 28672, 40960, 57344, 81920, 114688, 163840, 229376
2812 };
2813
2814 unsigned int i, w;
2815
2816 for (i = 0; i < NMTUS; ++i) {
2817 unsigned int mtu = min(mtus[i], mtu_cap);
2818 unsigned int log2 = fls(mtu);
2819
2820 if (!(mtu & ((1 << log2) >> 2))) /* round */
2821 log2--;
2822 t3_write_reg(adap, A_TP_MTU_TABLE,
2823 (i << 24) | (log2 << 16) | mtu);
2824
2825 for (w = 0; w < NCCTRL_WIN; ++w) {
2826 unsigned int inc;
2827
2828 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2829 CC_MIN_INCR);
2830
2831 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2832 (w << 16) | (beta[w] << 13) | inc);
2833 }
2834 }
2835}
2836
2837/**
2838 * t3_read_hw_mtus - returns the values in the HW MTU table
2839 * @adap: the adapter
2840 * @mtus: where to store the HW MTU values
2841 *
2842 * Reads the HW MTU table.
2843 */
2844void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
2845{
2846 int i;
2847
2848 for (i = 0; i < NMTUS; ++i) {
2849 unsigned int val;
2850
2851 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2852 val = t3_read_reg(adap, A_TP_MTU_TABLE);
2853 mtus[i] = val & 0x3fff;
2854 }
2855}
2856
2857/**
2858 * t3_get_cong_cntl_tab - reads the congestion control table
2859 * @adap: the adapter
2860 * @incr: where to store the alpha values
2861 *
2862 * Reads the additive increments programmed into the HW congestion
2863 * control table.
2864 */
2865void t3_get_cong_cntl_tab(struct adapter *adap,
2866 unsigned short incr[NMTUS][NCCTRL_WIN])
2867{
2868 unsigned int mtu, w;
2869
2870 for (mtu = 0; mtu < NMTUS; ++mtu)
2871 for (w = 0; w < NCCTRL_WIN; ++w) {
2872 t3_write_reg(adap, A_TP_CCTRL_TABLE,
2873 0xffff0000 | (mtu << 5) | w);
2874 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
2875 0x1fff;
2876 }
2877}
2878
2879/**
2880 * t3_tp_get_mib_stats - read TP's MIB counters
2881 * @adap: the adapter
2882 * @tps: holds the returned counter values
2883 *
2884 * Returns the values of TP's MIB counters.
2885 */
2886void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2887{
2888 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2889 sizeof(*tps) / sizeof(u32), 0);
2890}
2891
2892#define ulp_region(adap, name, start, len) \
2893 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2894 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2895 (start) + (len) - 1); \
2896 start += len
2897
2898#define ulptx_region(adap, name, start, len) \
2899 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2900 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2901 (start) + (len) - 1)
2902
2903static void ulp_config(struct adapter *adap, const struct tp_params *p)
2904{
2905 unsigned int m = p->chan_rx_size;
2906
2907 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2908 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2909 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2910 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2911 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2912 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2913 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2914 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2915}
2916
480fe1a3
DLR
2917/**
2918 * t3_set_proto_sram - set the contents of the protocol sram
2919 * @adapter: the adapter
2920 * @data: the protocol image
2921 *
2922 * Write the contents of the protocol SRAM.
2923 */
2c733a16 2924int t3_set_proto_sram(struct adapter *adap, const u8 *data)
480fe1a3
DLR
2925{
2926 int i;
2c733a16 2927 const __be32 *buf = (const __be32 *)data;
480fe1a3
DLR
2928
2929 for (i = 0; i < PROTO_SRAM_LINES; i++) {
05e5c116
AV
2930 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
2931 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
2932 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
2933 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
2934 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
2eab17ab 2935
480fe1a3
DLR
2936 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2937 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2938 return -EIO;
2939 }
2940 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
2941
2942 return 0;
2943}
2944
4d22de3e
DLR
2945void t3_config_trace_filter(struct adapter *adapter,
2946 const struct trace_params *tp, int filter_index,
2947 int invert, int enable)
2948{
2949 u32 addr, key[4], mask[4];
2950
2951 key[0] = tp->sport | (tp->sip << 16);
2952 key[1] = (tp->sip >> 16) | (tp->dport << 16);
2953 key[2] = tp->dip;
2954 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2955
2956 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2957 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2958 mask[2] = tp->dip_mask;
2959 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2960
2961 if (invert)
2962 key[3] |= (1 << 29);
2963 if (enable)
2964 key[3] |= (1 << 28);
2965
2966 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2967 tp_wr_indirect(adapter, addr++, key[0]);
2968 tp_wr_indirect(adapter, addr++, mask[0]);
2969 tp_wr_indirect(adapter, addr++, key[1]);
2970 tp_wr_indirect(adapter, addr++, mask[1]);
2971 tp_wr_indirect(adapter, addr++, key[2]);
2972 tp_wr_indirect(adapter, addr++, mask[2]);
2973 tp_wr_indirect(adapter, addr++, key[3]);
2974 tp_wr_indirect(adapter, addr, mask[3]);
2975 t3_read_reg(adapter, A_TP_PIO_DATA);
2976}
2977
2978/**
2979 * t3_config_sched - configure a HW traffic scheduler
2980 * @adap: the adapter
2981 * @kbps: target rate in Kbps
2982 * @sched: the scheduler index
2983 *
2984 * Configure a HW scheduler for the target rate
2985 */
2986int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
2987{
2988 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2989 unsigned int clk = adap->params.vpd.cclk * 1000;
2990 unsigned int selected_cpt = 0, selected_bpt = 0;
2991
2992 if (kbps > 0) {
2993 kbps *= 125; /* -> bytes */
2994 for (cpt = 1; cpt <= 255; cpt++) {
2995 tps = clk / cpt;
2996 bpt = (kbps + tps / 2) / tps;
2997 if (bpt > 0 && bpt <= 255) {
2998 v = bpt * tps;
2999 delta = v >= kbps ? v - kbps : kbps - v;
3000 if (delta <= mindelta) {
3001 mindelta = delta;
3002 selected_cpt = cpt;
3003 selected_bpt = bpt;
3004 }
3005 } else if (selected_cpt)
3006 break;
3007 }
3008 if (!selected_cpt)
3009 return -EINVAL;
3010 }
3011 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3012 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3013 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3014 if (sched & 1)
3015 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3016 else
3017 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3018 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3019 return 0;
3020}
3021
3022static int tp_init(struct adapter *adap, const struct tp_params *p)
3023{
3024 int busy = 0;
3025
3026 tp_config(adap, p);
3027 t3_set_vlan_accel(adap, 3, 0);
3028
3029 if (is_offload(adap)) {
3030 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3031 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3032 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3033 0, 1000, 5);
3034 if (busy)
3035 CH_ERR(adap, "TP initialization timed out\n");
3036 }
3037
3038 if (!busy)
3039 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3040 return busy;
3041}
3042
3043int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
3044{
3045 if (port_mask & ~((1 << adap->params.nports) - 1))
3046 return -EINVAL;
3047 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3048 port_mask << S_PORT0ACTIVE);
3049 return 0;
3050}
3051
3052/*
3053 * Perform the bits of HW initialization that are dependent on the number
3054 * of available ports.
3055 */
3056static void init_hw_for_avail_ports(struct adapter *adap, int nports)
3057{
3058 int i;
3059
3060 if (nports == 1) {
3061 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3062 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3063 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
3064 F_PORT0ACTIVE | F_ENFORCEPKT);
8a9fab22 3065 t3_write_reg(adap, A_PM1_TX_CFG, 0xffffffff);
4d22de3e
DLR
3066 } else {
3067 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3068 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3069 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3070 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3071 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3072 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3073 F_ENFORCEPKT);
3074 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3075 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3076 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3077 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3078 for (i = 0; i < 16; i++)
3079 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3080 (i << 16) | 0x1010);
3081 }
3082}
3083
3084static int calibrate_xgm(struct adapter *adapter)
3085{
3086 if (uses_xaui(adapter)) {
3087 unsigned int v, i;
3088
3089 for (i = 0; i < 5; ++i) {
3090 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3091 t3_read_reg(adapter, A_XGM_XAUI_IMP);
3092 msleep(1);
3093 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3094 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3095 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3096 V_XAUIIMP(G_CALIMP(v) >> 2));
3097 return 0;
3098 }
3099 }
3100 CH_ERR(adapter, "MAC calibration failed\n");
3101 return -1;
3102 } else {
3103 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3104 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3105 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3106 F_XGM_IMPSETUPDATE);
3107 }
3108 return 0;
3109}
3110
3111static void calibrate_xgm_t3b(struct adapter *adapter)
3112{
3113 if (!uses_xaui(adapter)) {
3114 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3115 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3116 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3117 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3118 F_XGM_IMPSETUPDATE);
3119 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3120 0);
3121 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3122 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3123 }
3124}
3125
3126struct mc7_timing_params {
3127 unsigned char ActToPreDly;
3128 unsigned char ActToRdWrDly;
3129 unsigned char PreCyc;
3130 unsigned char RefCyc[5];
3131 unsigned char BkCyc;
3132 unsigned char WrToRdDly;
3133 unsigned char RdToWrDly;
3134};
3135
3136/*
3137 * Write a value to a register and check that the write completed. These
3138 * writes normally complete in a cycle or two, so one read should suffice.
3139 * The very first read exists to flush the posted write to the device.
3140 */
3141static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3142{
3143 t3_write_reg(adapter, addr, val);
3144 t3_read_reg(adapter, addr); /* flush */
3145 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3146 return 0;
3147 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3148 return -EIO;
3149}
3150
3151static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3152{
3153 static const unsigned int mc7_mode[] = {
3154 0x632, 0x642, 0x652, 0x432, 0x442
3155 };
3156 static const struct mc7_timing_params mc7_timings[] = {
3157 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3158 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3159 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3160 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3161 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3162 };
3163
3164 u32 val;
3165 unsigned int width, density, slow, attempts;
3166 struct adapter *adapter = mc7->adapter;
3167 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3168
8ac3ba68
DLR
3169 if (!mc7->size)
3170 return 0;
3171
4d22de3e
DLR
3172 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3173 slow = val & F_SLOW;
3174 width = G_WIDTH(val);
3175 density = G_DEN(val);
3176
3177 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3178 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3179 msleep(1);
3180
3181 if (!slow) {
3182 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3183 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3184 msleep(1);
3185 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3186 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3187 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3188 mc7->name);
3189 goto out_fail;
3190 }
3191 }
3192
3193 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3194 V_ACTTOPREDLY(p->ActToPreDly) |
3195 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3196 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3197 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3198
3199 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3200 val | F_CLKEN | F_TERM150);
3201 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3202
3203 if (!slow)
3204 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3205 F_DLLENB);
3206 udelay(1);
3207
3208 val = slow ? 3 : 6;
3209 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3210 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3211 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3212 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3213 goto out_fail;
3214
3215 if (!slow) {
3216 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3217 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3218 udelay(5);
3219 }
3220
3221 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3222 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3223 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3224 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3225 mc7_mode[mem_type]) ||
3226 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3227 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3228 goto out_fail;
3229
3230 /* clock value is in KHz */
3231 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3232 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3233
3234 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3235 F_PERREFEN | V_PREREFDIV(mc7_clock));
3236 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3237
3238 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3239 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3240 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3241 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3242 (mc7->size << width) - 1);
3243 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3244 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3245
3246 attempts = 50;
3247 do {
3248 msleep(250);
3249 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3250 } while ((val & F_BUSY) && --attempts);
3251 if (val & F_BUSY) {
3252 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3253 goto out_fail;
3254 }
3255
3256 /* Enable normal memory accesses. */
3257 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3258 return 0;
3259
3260out_fail:
3261 return -1;
3262}
3263
3264static void config_pcie(struct adapter *adap)
3265{
3266 static const u16 ack_lat[4][6] = {
3267 {237, 416, 559, 1071, 2095, 4143},
3268 {128, 217, 289, 545, 1057, 2081},
3269 {73, 118, 154, 282, 538, 1050},
3270 {67, 107, 86, 150, 278, 534}
3271 };
3272 static const u16 rpl_tmr[4][6] = {
3273 {711, 1248, 1677, 3213, 6285, 12429},
3274 {384, 651, 867, 1635, 3171, 6243},
3275 {219, 354, 462, 846, 1614, 3150},
3276 {201, 321, 258, 450, 834, 1602}
3277 };
3278
3279 u16 val;
3280 unsigned int log2_width, pldsize;
3281 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3282
3283 pci_read_config_word(adap->pdev,
3284 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3285 &val);
3286 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3287 pci_read_config_word(adap->pdev,
3288 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3289 &val);
3290
3291 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3292 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3293 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3294 log2_width = fls(adap->params.pci.width) - 1;
3295 acklat = ack_lat[log2_width][pldsize];
3296 if (val & 1) /* check LOsEnable */
3297 acklat += fst_trn_tx * 4;
3298 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3299
3300 if (adap->params.rev == 0)
3301 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3302 V_T3A_ACKLAT(M_T3A_ACKLAT),
3303 V_T3A_ACKLAT(acklat));
3304 else
3305 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3306 V_ACKLAT(acklat));
3307
3308 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3309 V_REPLAYLMT(rpllmt));
3310
3311 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
b881955b 3312 t3_set_reg_field(adap, A_PCIE_CFG, 0,
204e2f98 3313 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
b881955b 3314 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
4d22de3e
DLR
3315}
3316
3317/*
3318 * Initialize and configure T3 HW modules. This performs the
3319 * initialization steps that need to be done once after a card is reset.
3320 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3321 *
3322 * fw_params are passed to FW and their value is platform dependent. Only the
3323 * top 8 bits are available for use, the rest must be 0.
3324 */
3325int t3_init_hw(struct adapter *adapter, u32 fw_params)
3326{
b881955b 3327 int err = -EIO, attempts, i;
4d22de3e
DLR
3328 const struct vpd_params *vpd = &adapter->params.vpd;
3329
3330 if (adapter->params.rev > 0)
3331 calibrate_xgm_t3b(adapter);
3332 else if (calibrate_xgm(adapter))
3333 goto out_err;
3334
3335 if (vpd->mclk) {
3336 partition_mem(adapter, &adapter->params.tp);
3337
3338 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3339 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3340 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3341 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3342 adapter->params.mc5.nfilters,
3343 adapter->params.mc5.nroutes))
3344 goto out_err;
b881955b
DLR
3345
3346 for (i = 0; i < 32; i++)
3347 if (clear_sge_ctxt(adapter, i, F_CQ))
3348 goto out_err;
4d22de3e
DLR
3349 }
3350
3351 if (tp_init(adapter, &adapter->params.tp))
3352 goto out_err;
3353
3354 t3_tp_set_coalescing_size(adapter,
3355 min(adapter->params.sge.max_pkt_size,
3356 MAX_RX_COALESCING_LEN), 1);
3357 t3_tp_set_max_rxsize(adapter,
3358 min(adapter->params.sge.max_pkt_size, 16384U));
3359 ulp_config(adapter, &adapter->params.tp);
3360
3361 if (is_pcie(adapter))
3362 config_pcie(adapter);
3363 else
b881955b
DLR
3364 t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3365 F_DMASTOPEN | F_CLIDECEN);
4d22de3e 3366
a2604be5
DLR
3367 if (adapter->params.rev == T3_REV_C)
3368 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3369 F_CFG_CQE_SOP_MASK);
3370
8a9fab22 3371 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3f61e427
DLR
3372 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3373 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
4d22de3e
DLR
3374 init_hw_for_avail_ports(adapter, adapter->params.nports);
3375 t3_sge_init(adapter, &adapter->params.sge);
3376
f231e0a5
DLR
3377 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3378
4d22de3e
DLR
3379 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3380 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3381 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3382 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3383
b881955b 3384 attempts = 100;
4d22de3e
DLR
3385 do { /* wait for uP to initialize */
3386 msleep(20);
3387 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
8ac3ba68
DLR
3388 if (!attempts) {
3389 CH_ERR(adapter, "uP initialization timed out\n");
4d22de3e 3390 goto out_err;
8ac3ba68 3391 }
4d22de3e
DLR
3392
3393 err = 0;
3394out_err:
3395 return err;
3396}
3397
3398/**
3399 * get_pci_mode - determine a card's PCI mode
3400 * @adapter: the adapter
3401 * @p: where to store the PCI settings
3402 *
3403 * Determines a card's PCI mode and associated parameters, such as speed
3404 * and width.
3405 */
7b9b0943 3406static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
4d22de3e
DLR
3407{
3408 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3409 u32 pci_mode, pcie_cap;
3410
3411 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3412 if (pcie_cap) {
3413 u16 val;
3414
3415 p->variant = PCI_VARIANT_PCIE;
3416 p->pcie_cap_addr = pcie_cap;
3417 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3418 &val);
3419 p->width = (val >> 4) & 0x3f;
3420 return;
3421 }
3422
3423 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3424 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3425 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3426 pci_mode = G_PCIXINITPAT(pci_mode);
3427 if (pci_mode == 0)
3428 p->variant = PCI_VARIANT_PCI;
3429 else if (pci_mode < 4)
3430 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3431 else if (pci_mode < 8)
3432 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3433 else
3434 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3435}
3436
3437/**
3438 * init_link_config - initialize a link's SW state
3439 * @lc: structure holding the link state
3440 * @ai: information about the current card
3441 *
3442 * Initializes the SW state maintained for each link, including the link's
3443 * capabilities and default speed/duplex/flow-control/autonegotiation
3444 * settings.
3445 */
7b9b0943 3446static void init_link_config(struct link_config *lc, unsigned int caps)
4d22de3e
DLR
3447{
3448 lc->supported = caps;
3449 lc->requested_speed = lc->speed = SPEED_INVALID;
3450 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3451 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3452 if (lc->supported & SUPPORTED_Autoneg) {
3453 lc->advertising = lc->supported;
3454 lc->autoneg = AUTONEG_ENABLE;
3455 lc->requested_fc |= PAUSE_AUTONEG;
3456 } else {
3457 lc->advertising = 0;
3458 lc->autoneg = AUTONEG_DISABLE;
3459 }
3460}
3461
3462/**
3463 * mc7_calc_size - calculate MC7 memory size
3464 * @cfg: the MC7 configuration
3465 *
3466 * Calculates the size of an MC7 memory in bytes from the value of its
3467 * configuration register.
3468 */
7b9b0943 3469static unsigned int mc7_calc_size(u32 cfg)
4d22de3e
DLR
3470{
3471 unsigned int width = G_WIDTH(cfg);
3472 unsigned int banks = !!(cfg & F_BKS) + 1;
3473 unsigned int org = !!(cfg & F_ORG) + 1;
3474 unsigned int density = G_DEN(cfg);
3475 unsigned int MBs = ((256 << density) * banks) / (org << width);
3476
3477 return MBs << 20;
3478}
3479
7b9b0943
RD
3480static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3481 unsigned int base_addr, const char *name)
4d22de3e
DLR
3482{
3483 u32 cfg;
3484
3485 mc7->adapter = adapter;
3486 mc7->name = name;
3487 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3488 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
8ac3ba68 3489 mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
4d22de3e
DLR
3490 mc7->width = G_WIDTH(cfg);
3491}
3492
3493void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3494{
3495 mac->adapter = adapter;
3496 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3497 mac->nucast = 1;
3498
3499 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3500 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3501 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3502 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3503 F_ENRGMII, 0);
3504 }
3505}
3506
3507void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3508{
3509 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3510
3511 mi1_init(adapter, ai);
3512 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3513 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3514 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3515 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
8ac3ba68 3516 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
b881955b 3517 t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
4d22de3e
DLR
3518
3519 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3520 val |= F_ENRGMII;
3521
3522 /* Enable MAC clocks so we can access the registers */
3523 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3524 t3_read_reg(adapter, A_XGM_PORT_CFG);
3525
3526 val |= F_CLKDIVRESET_;
3527 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3528 t3_read_reg(adapter, A_XGM_PORT_CFG);
3529 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3530 t3_read_reg(adapter, A_XGM_PORT_CFG);
3531}
3532
3533/*
2eab17ab 3534 * Reset the adapter.
e4d08359 3535 * Older PCIe cards lose their config space during reset, PCI-X
4d22de3e
DLR
3536 * ones don't.
3537 */
20d3fc11 3538int t3_reset_adapter(struct adapter *adapter)
4d22de3e 3539{
2eab17ab 3540 int i, save_and_restore_pcie =
e4d08359 3541 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
4d22de3e
DLR
3542 uint16_t devid = 0;
3543
e4d08359 3544 if (save_and_restore_pcie)
4d22de3e
DLR
3545 pci_save_state(adapter->pdev);
3546 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3547
3548 /*
3549 * Delay. Give Some time to device to reset fully.
3550 * XXX The delay time should be modified.
3551 */
3552 for (i = 0; i < 10; i++) {
3553 msleep(50);
3554 pci_read_config_word(adapter->pdev, 0x00, &devid);
3555 if (devid == 0x1425)
3556 break;
3557 }
3558
3559 if (devid != 0x1425)
3560 return -1;
3561
e4d08359 3562 if (save_and_restore_pcie)
4d22de3e
DLR
3563 pci_restore_state(adapter->pdev);
3564 return 0;
3565}
3566
7b9b0943 3567static int init_parity(struct adapter *adap)
b881955b
DLR
3568{
3569 int i, err, addr;
3570
3571 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3572 return -EBUSY;
3573
3574 for (err = i = 0; !err && i < 16; i++)
3575 err = clear_sge_ctxt(adap, i, F_EGRESS);
3576 for (i = 0xfff0; !err && i <= 0xffff; i++)
3577 err = clear_sge_ctxt(adap, i, F_EGRESS);
3578 for (i = 0; !err && i < SGE_QSETS; i++)
3579 err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3580 if (err)
3581 return err;
3582
3583 t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3584 for (i = 0; i < 4; i++)
3585 for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3586 t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3587 F_IBQDBGWR | V_IBQDBGQID(i) |
3588 V_IBQDBGADDR(addr));
3589 err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3590 F_IBQDBGBUSY, 0, 2, 1);
3591 if (err)
3592 return err;
3593 }
3594 return 0;
3595}
3596
4d22de3e
DLR
3597/*
3598 * Initialize adapter SW state for the various HW modules, set initial values
3599 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3600 * interface.
3601 */
7b9b0943
RD
3602int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3603 int reset)
4d22de3e
DLR
3604{
3605 int ret;
04497982 3606 unsigned int i, j = -1;
4d22de3e
DLR
3607
3608 get_pci_mode(adapter, &adapter->params.pci);
3609
3610 adapter->params.info = ai;
3611 adapter->params.nports = ai->nports;
3612 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3613 adapter->params.linkpoll_period = 0;
3614 adapter->params.stats_update_period = is_10G(adapter) ?
3615 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3616 adapter->params.pci.vpd_cap_addr =
3617 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3618 ret = get_vpd_params(adapter, &adapter->params.vpd);
3619 if (ret < 0)
3620 return ret;
3621
3622 if (reset && t3_reset_adapter(adapter))
3623 return -1;
3624
3625 t3_sge_prep(adapter, &adapter->params.sge);
3626
3627 if (adapter->params.vpd.mclk) {
3628 struct tp_params *p = &adapter->params.tp;
3629
3630 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3631 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3632 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3633
3634 p->nchan = ai->nports;
3635 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3636 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3637 p->cm_size = t3_mc7_size(&adapter->cm);
3638 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3639 p->chan_tx_size = p->pmtx_size / p->nchan;
3640 p->rx_pg_size = 64 * 1024;
3641 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3642 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3643 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3644 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3645 adapter->params.rev > 0 ? 12 : 6;
8ac3ba68
DLR
3646 }
3647
3648 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3649 t3_mc7_size(&adapter->pmtx) &&
3650 t3_mc7_size(&adapter->cm);
4d22de3e 3651
8ac3ba68 3652 if (is_offload(adapter)) {
4d22de3e
DLR
3653 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3654 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3655 DEFAULT_NFILTERS : 0;
3656 adapter->params.mc5.nroutes = 0;
3657 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3658
3659 init_mtus(adapter->params.mtus);
3660 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3661 }
3662
3663 early_hw_init(adapter, ai);
b881955b
DLR
3664 ret = init_parity(adapter);
3665 if (ret)
3666 return ret;
4d22de3e
DLR
3667
3668 for_each_port(adapter, i) {
3669 u8 hw_addr[6];
04497982 3670 const struct port_type_info *pti;
4d22de3e
DLR
3671 struct port_info *p = adap2pinfo(adapter, i);
3672
04497982
DLR
3673 while (!adapter->params.vpd.port_type[++j])
3674 ;
4d22de3e 3675
04497982 3676 pti = &port_types[adapter->params.vpd.port_type[j]];
9f64306b
DLR
3677 if (!pti->phy_prep) {
3678 CH_ALERT(adapter, "Invalid port type index %d\n",
3679 adapter->params.vpd.port_type[j]);
3680 return -EINVAL;
3681 }
3682
04497982
DLR
3683 ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3684 ai->mdio_ops);
78e4689e
DLR
3685 if (ret)
3686 return ret;
4d22de3e 3687 mac_prep(&p->mac, adapter, j);
4d22de3e
DLR
3688
3689 /*
3690 * The VPD EEPROM stores the base Ethernet address for the
3691 * card. A port's address is derived from the base by adding
3692 * the port's index to the base's low octet.
3693 */
3694 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3695 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3696
3697 memcpy(adapter->port[i]->dev_addr, hw_addr,
3698 ETH_ALEN);
3699 memcpy(adapter->port[i]->perm_addr, hw_addr,
3700 ETH_ALEN);
04497982 3701 init_link_config(&p->link_config, p->phy.caps);
4d22de3e 3702 p->phy.ops->power_down(&p->phy, 1);
04497982 3703 if (!(p->phy.caps & SUPPORTED_IRQ))
4d22de3e
DLR
3704 adapter->params.linkpoll_period = 10;
3705 }
3706
3707 return 0;
3708}
3709
3710void t3_led_ready(struct adapter *adapter)
3711{
3712 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3713 F_GPIO0_OUT_VAL);
3714}
204e2f98
DLR
3715
3716int t3_replay_prep_adapter(struct adapter *adapter)
3717{
3718 const struct adapter_info *ai = adapter->params.info;
04497982 3719 unsigned int i, j = -1;
204e2f98
DLR
3720 int ret;
3721
3722 early_hw_init(adapter, ai);
3723 ret = init_parity(adapter);
3724 if (ret)
3725 return ret;
3726
3727 for_each_port(adapter, i) {
04497982 3728 const struct port_type_info *pti;
204e2f98 3729 struct port_info *p = adap2pinfo(adapter, i);
204e2f98 3730
04497982
DLR
3731 while (!adapter->params.vpd.port_type[++j])
3732 ;
3733
3734 pti = &port_types[adapter->params.vpd.port_type[j]];
3735 ret = pti->phy_prep(&p->phy, adapter, p->phy.addr, NULL);
78e4689e
DLR
3736 if (ret)
3737 return ret;
204e2f98 3738 p->phy.ops->power_down(&p->phy, 1);
204e2f98
DLR
3739 }
3740
3741return 0;
3742}
3743
This page took 0.575193 seconds and 5 git commands to generate.