S2IO: getringparam ethtool option
[deliverable/linux.git] / drivers / net / s2io.c
CommitLineData
1da177e4 1/************************************************************************
776bd20f 2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
0c61ed5f 3 * Copyright(c) 2002-2007 Neterion Inc.
1da177e4
LT
4
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
20346722 14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
1da177e4
LT
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
20346722 21 * Francois Romieu : For pointing out all code part that were
1da177e4 22 * deprecated and also styling related comments.
20346722 23 * Grant Grundler : For helping me get rid of some Architecture
1da177e4
LT
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
20346722 26 *
1da177e4
LT
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
9dc737a7 29 *
20346722 30 * rx_ring_num : This can be used to program the number of receive rings used
31 * in the driver.
9dc737a7
AR
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
da6971d8
AR
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35 * values are 1, 2 and 3.
1da177e4 36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
20346722 37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
1da177e4 38 * Tx descriptors that can be associated with each corresponding FIFO.
9dc737a7
AR
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 1(MSI), 2(MSI_X). Default value is '0(INTA)'
41 * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
926930b2
SS
45 * napi: This parameter used to enable/disable NAPI (polling Rx)
46 * Possible values '1' for enable and '0' for disable. Default is '1'
47 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48 * Possible values '1' for enable and '0' for disable. Default is '0'
49 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50 * Possible values '1' for enable , '0' for disable.
51 * Default is '2' - which means disable in promisc mode
52 * and enable in non-promiscuous mode.
1da177e4
LT
53 ************************************************************************/
54
1da177e4
LT
55#include <linux/module.h>
56#include <linux/types.h>
57#include <linux/errno.h>
58#include <linux/ioport.h>
59#include <linux/pci.h>
1e7f0bd8 60#include <linux/dma-mapping.h>
1da177e4
LT
61#include <linux/kernel.h>
62#include <linux/netdevice.h>
63#include <linux/etherdevice.h>
64#include <linux/skbuff.h>
65#include <linux/init.h>
66#include <linux/delay.h>
67#include <linux/stddef.h>
68#include <linux/ioctl.h>
69#include <linux/timex.h>
1da177e4 70#include <linux/ethtool.h>
1da177e4 71#include <linux/workqueue.h>
be3a6b02 72#include <linux/if_vlan.h>
7d3d0439
RA
73#include <linux/ip.h>
74#include <linux/tcp.h>
75#include <net/tcp.h>
1da177e4 76
1da177e4
LT
77#include <asm/system.h>
78#include <asm/uaccess.h>
20346722 79#include <asm/io.h>
fe931395 80#include <asm/div64.h>
330ce0de 81#include <asm/irq.h>
1da177e4
LT
82
83/* local include */
84#include "s2io.h"
85#include "s2io-regs.h"
86
3ef34b80 87#define DRV_VERSION "2.0.22.1"
6c1792f4 88
1da177e4 89/* S2io Driver name & version. */
20346722 90static char s2io_driver_name[] = "Neterion";
6c1792f4 91static char s2io_driver_version[] = DRV_VERSION;
1da177e4 92
26df54bf
AB
93static int rxd_size[4] = {32,48,48,64};
94static int rxd_count[4] = {127,85,85,63};
da6971d8 95
1ee6dd77 96static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
5e25b9dd 97{
98 int ret;
99
100 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
101 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
102
103 return ret;
104}
105
20346722 106/*
1da177e4
LT
107 * Cards with following subsystem_id have a link state indication
108 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
109 * macro below identifies these cards given the subsystem_id.
110 */
541ae68f 111#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
112 (dev_type == XFRAME_I_DEVICE) ? \
113 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
114 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
1da177e4
LT
115
116#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
117 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
118#define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
119#define PANIC 1
120#define LOW 2
1ee6dd77 121static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
1da177e4 122{
1ee6dd77 123 struct mac_info *mac_control;
20346722 124
125 mac_control = &sp->mac_control;
863c11a9
AR
126 if (rxb_size <= rxd_count[sp->rxd_mode])
127 return PANIC;
128 else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
129 return LOW;
130 return 0;
1da177e4
LT
131}
132
133/* Ethtool related variables and Macros. */
134static char s2io_gstrings[][ETH_GSTRING_LEN] = {
135 "Register test\t(offline)",
136 "Eeprom test\t(offline)",
137 "Link test\t(online)",
138 "RLDRAM test\t(offline)",
139 "BIST Test\t(offline)"
140};
141
fa1f0cb3 142static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
1da177e4
LT
143 {"tmac_frms"},
144 {"tmac_data_octets"},
145 {"tmac_drop_frms"},
146 {"tmac_mcst_frms"},
147 {"tmac_bcst_frms"},
148 {"tmac_pause_ctrl_frms"},
bd1034f0
AR
149 {"tmac_ttl_octets"},
150 {"tmac_ucst_frms"},
151 {"tmac_nucst_frms"},
1da177e4 152 {"tmac_any_err_frms"},
bd1034f0 153 {"tmac_ttl_less_fb_octets"},
1da177e4
LT
154 {"tmac_vld_ip_octets"},
155 {"tmac_vld_ip"},
156 {"tmac_drop_ip"},
157 {"tmac_icmp"},
158 {"tmac_rst_tcp"},
159 {"tmac_tcp"},
160 {"tmac_udp"},
161 {"rmac_vld_frms"},
162 {"rmac_data_octets"},
163 {"rmac_fcs_err_frms"},
164 {"rmac_drop_frms"},
165 {"rmac_vld_mcst_frms"},
166 {"rmac_vld_bcst_frms"},
167 {"rmac_in_rng_len_err_frms"},
bd1034f0 168 {"rmac_out_rng_len_err_frms"},
1da177e4
LT
169 {"rmac_long_frms"},
170 {"rmac_pause_ctrl_frms"},
bd1034f0
AR
171 {"rmac_unsup_ctrl_frms"},
172 {"rmac_ttl_octets"},
173 {"rmac_accepted_ucst_frms"},
174 {"rmac_accepted_nucst_frms"},
1da177e4 175 {"rmac_discarded_frms"},
bd1034f0
AR
176 {"rmac_drop_events"},
177 {"rmac_ttl_less_fb_octets"},
178 {"rmac_ttl_frms"},
1da177e4
LT
179 {"rmac_usized_frms"},
180 {"rmac_osized_frms"},
181 {"rmac_frag_frms"},
182 {"rmac_jabber_frms"},
bd1034f0
AR
183 {"rmac_ttl_64_frms"},
184 {"rmac_ttl_65_127_frms"},
185 {"rmac_ttl_128_255_frms"},
186 {"rmac_ttl_256_511_frms"},
187 {"rmac_ttl_512_1023_frms"},
188 {"rmac_ttl_1024_1518_frms"},
1da177e4
LT
189 {"rmac_ip"},
190 {"rmac_ip_octets"},
191 {"rmac_hdr_err_ip"},
192 {"rmac_drop_ip"},
193 {"rmac_icmp"},
194 {"rmac_tcp"},
195 {"rmac_udp"},
196 {"rmac_err_drp_udp"},
bd1034f0
AR
197 {"rmac_xgmii_err_sym"},
198 {"rmac_frms_q0"},
199 {"rmac_frms_q1"},
200 {"rmac_frms_q2"},
201 {"rmac_frms_q3"},
202 {"rmac_frms_q4"},
203 {"rmac_frms_q5"},
204 {"rmac_frms_q6"},
205 {"rmac_frms_q7"},
206 {"rmac_full_q0"},
207 {"rmac_full_q1"},
208 {"rmac_full_q2"},
209 {"rmac_full_q3"},
210 {"rmac_full_q4"},
211 {"rmac_full_q5"},
212 {"rmac_full_q6"},
213 {"rmac_full_q7"},
1da177e4 214 {"rmac_pause_cnt"},
bd1034f0
AR
215 {"rmac_xgmii_data_err_cnt"},
216 {"rmac_xgmii_ctrl_err_cnt"},
1da177e4
LT
217 {"rmac_accepted_ip"},
218 {"rmac_err_tcp"},
bd1034f0
AR
219 {"rd_req_cnt"},
220 {"new_rd_req_cnt"},
221 {"new_rd_req_rtry_cnt"},
222 {"rd_rtry_cnt"},
223 {"wr_rtry_rd_ack_cnt"},
224 {"wr_req_cnt"},
225 {"new_wr_req_cnt"},
226 {"new_wr_req_rtry_cnt"},
227 {"wr_rtry_cnt"},
228 {"wr_disc_cnt"},
229 {"rd_rtry_wr_ack_cnt"},
230 {"txp_wr_cnt"},
231 {"txd_rd_cnt"},
232 {"txd_wr_cnt"},
233 {"rxd_rd_cnt"},
234 {"rxd_wr_cnt"},
235 {"txf_rd_cnt"},
fa1f0cb3
SS
236 {"rxf_wr_cnt"}
237};
238
239static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
bd1034f0
AR
240 {"rmac_ttl_1519_4095_frms"},
241 {"rmac_ttl_4096_8191_frms"},
242 {"rmac_ttl_8192_max_frms"},
243 {"rmac_ttl_gt_max_frms"},
244 {"rmac_osized_alt_frms"},
245 {"rmac_jabber_alt_frms"},
246 {"rmac_gt_max_alt_frms"},
247 {"rmac_vlan_frms"},
248 {"rmac_len_discard"},
249 {"rmac_fcs_discard"},
250 {"rmac_pf_discard"},
251 {"rmac_da_discard"},
252 {"rmac_red_discard"},
253 {"rmac_rts_discard"},
254 {"rmac_ingm_full_discard"},
fa1f0cb3
SS
255 {"link_fault_cnt"}
256};
257
258static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
7ba013ac 259 {"\n DRIVER STATISTICS"},
260 {"single_bit_ecc_errs"},
261 {"double_bit_ecc_errs"},
bd1034f0
AR
262 {"parity_err_cnt"},
263 {"serious_err_cnt"},
264 {"soft_reset_cnt"},
265 {"fifo_full_cnt"},
266 {"ring_full_cnt"},
267 ("alarm_transceiver_temp_high"),
268 ("alarm_transceiver_temp_low"),
269 ("alarm_laser_bias_current_high"),
270 ("alarm_laser_bias_current_low"),
271 ("alarm_laser_output_power_high"),
272 ("alarm_laser_output_power_low"),
273 ("warn_transceiver_temp_high"),
274 ("warn_transceiver_temp_low"),
275 ("warn_laser_bias_current_high"),
276 ("warn_laser_bias_current_low"),
277 ("warn_laser_output_power_high"),
278 ("warn_laser_output_power_low"),
7d3d0439
RA
279 ("lro_aggregated_pkts"),
280 ("lro_flush_both_count"),
281 ("lro_out_of_sequence_pkts"),
282 ("lro_flush_due_to_max_pkts"),
283 ("lro_avg_aggr_pkts"),
1da177e4
LT
284};
285
fa1f0cb3
SS
286#define S2IO_XENA_STAT_LEN sizeof(ethtool_xena_stats_keys)/ ETH_GSTRING_LEN
287#define S2IO_ENHANCED_STAT_LEN sizeof(ethtool_enhanced_stats_keys)/ \
288 ETH_GSTRING_LEN
289#define S2IO_DRIVER_STAT_LEN sizeof(ethtool_driver_stats_keys)/ ETH_GSTRING_LEN
290
291#define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
292#define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
293
294#define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
295#define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
1da177e4
LT
296
297#define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
298#define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
299
25fff88e 300#define S2IO_TIMER_CONF(timer, handle, arg, exp) \
301 init_timer(&timer); \
302 timer.function = handle; \
303 timer.data = (unsigned long) arg; \
304 mod_timer(&timer, (jiffies + exp)) \
305
be3a6b02 306/* Add the vlan */
307static void s2io_vlan_rx_register(struct net_device *dev,
308 struct vlan_group *grp)
309{
1ee6dd77 310 struct s2io_nic *nic = dev->priv;
be3a6b02 311 unsigned long flags;
312
313 spin_lock_irqsave(&nic->tx_lock, flags);
314 nic->vlgrp = grp;
315 spin_unlock_irqrestore(&nic->tx_lock, flags);
316}
317
926930b2 318/* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
7b490343 319static int vlan_strip_flag;
926930b2 320
be3a6b02 321/* Unregister the vlan */
322static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
323{
1ee6dd77 324 struct s2io_nic *nic = dev->priv;
be3a6b02 325 unsigned long flags;
326
327 spin_lock_irqsave(&nic->tx_lock, flags);
5c15bdec 328 vlan_group_set_device(nic->vlgrp, vid, NULL);
be3a6b02 329 spin_unlock_irqrestore(&nic->tx_lock, flags);
330}
331
20346722 332/*
1da177e4
LT
333 * Constants to be programmed into the Xena's registers, to configure
334 * the XAUI.
335 */
336
1da177e4 337#define END_SIGN 0x0
f71e1309 338static const u64 herc_act_dtx_cfg[] = {
541ae68f 339 /* Set address */
e960fc5c 340 0x8000051536750000ULL, 0x80000515367500E0ULL,
541ae68f 341 /* Write data */
e960fc5c 342 0x8000051536750004ULL, 0x80000515367500E4ULL,
541ae68f 343 /* Set address */
344 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
345 /* Write data */
346 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
347 /* Set address */
e960fc5c 348 0x801205150D440000ULL, 0x801205150D4400E0ULL,
349 /* Write data */
350 0x801205150D440004ULL, 0x801205150D4400E4ULL,
351 /* Set address */
541ae68f 352 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
353 /* Write data */
354 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
355 /* Done */
356 END_SIGN
357};
358
f71e1309 359static const u64 xena_dtx_cfg[] = {
c92ca04b 360 /* Set address */
1da177e4 361 0x8000051500000000ULL, 0x80000515000000E0ULL,
c92ca04b
AR
362 /* Write data */
363 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
364 /* Set address */
365 0x8001051500000000ULL, 0x80010515000000E0ULL,
366 /* Write data */
367 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
368 /* Set address */
1da177e4 369 0x8002051500000000ULL, 0x80020515000000E0ULL,
c92ca04b
AR
370 /* Write data */
371 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
1da177e4
LT
372 END_SIGN
373};
374
20346722 375/*
1da177e4
LT
376 * Constants for Fixing the MacAddress problem seen mostly on
377 * Alpha machines.
378 */
f71e1309 379static const u64 fix_mac[] = {
1da177e4
LT
380 0x0060000000000000ULL, 0x0060600000000000ULL,
381 0x0040600000000000ULL, 0x0000600000000000ULL,
382 0x0020600000000000ULL, 0x0060600000000000ULL,
383 0x0020600000000000ULL, 0x0060600000000000ULL,
384 0x0020600000000000ULL, 0x0060600000000000ULL,
385 0x0020600000000000ULL, 0x0060600000000000ULL,
386 0x0020600000000000ULL, 0x0060600000000000ULL,
387 0x0020600000000000ULL, 0x0060600000000000ULL,
388 0x0020600000000000ULL, 0x0060600000000000ULL,
389 0x0020600000000000ULL, 0x0060600000000000ULL,
390 0x0020600000000000ULL, 0x0060600000000000ULL,
391 0x0020600000000000ULL, 0x0060600000000000ULL,
392 0x0020600000000000ULL, 0x0000600000000000ULL,
393 0x0040600000000000ULL, 0x0060600000000000ULL,
394 END_SIGN
395};
396
b41477f3
AR
397MODULE_LICENSE("GPL");
398MODULE_VERSION(DRV_VERSION);
399
400
1da177e4 401/* Module Loadable parameters. */
b41477f3
AR
402S2IO_PARM_INT(tx_fifo_num, 1);
403S2IO_PARM_INT(rx_ring_num, 1);
404
405
406S2IO_PARM_INT(rx_ring_mode, 1);
407S2IO_PARM_INT(use_continuous_tx_intrs, 1);
408S2IO_PARM_INT(rmac_pause_time, 0x100);
409S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
410S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
411S2IO_PARM_INT(shared_splits, 0);
412S2IO_PARM_INT(tmac_util_period, 5);
413S2IO_PARM_INT(rmac_util_period, 5);
414S2IO_PARM_INT(bimodal, 0);
415S2IO_PARM_INT(l3l4hdr_size, 128);
303bcb4b 416/* Frequency of Rx desc syncs expressed as power of 2 */
b41477f3 417S2IO_PARM_INT(rxsync_frequency, 3);
cc6e7c44 418/* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
b41477f3 419S2IO_PARM_INT(intr_type, 0);
7d3d0439 420/* Large receive offload feature */
b41477f3 421S2IO_PARM_INT(lro, 0);
7d3d0439
RA
422/* Max pkts to be aggregated by LRO at one time. If not specified,
423 * aggregation happens until we hit max IP pkt size(64K)
424 */
b41477f3 425S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
b41477f3 426S2IO_PARM_INT(indicate_max_pkts, 0);
db874e65
SS
427
428S2IO_PARM_INT(napi, 1);
429S2IO_PARM_INT(ufo, 0);
926930b2 430S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
b41477f3
AR
431
432static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
433 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
434static unsigned int rx_ring_sz[MAX_RX_RINGS] =
435 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
436static unsigned int rts_frm_len[MAX_RX_RINGS] =
437 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
438
439module_param_array(tx_fifo_len, uint, NULL, 0);
440module_param_array(rx_ring_sz, uint, NULL, 0);
441module_param_array(rts_frm_len, uint, NULL, 0);
1da177e4 442
20346722 443/*
1da177e4 444 * S2IO device table.
20346722 445 * This table lists all the devices that this driver supports.
1da177e4
LT
446 */
447static struct pci_device_id s2io_tbl[] __devinitdata = {
448 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
449 PCI_ANY_ID, PCI_ANY_ID},
450 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
451 PCI_ANY_ID, PCI_ANY_ID},
452 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
20346722 453 PCI_ANY_ID, PCI_ANY_ID},
454 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
455 PCI_ANY_ID, PCI_ANY_ID},
1da177e4
LT
456 {0,}
457};
458
459MODULE_DEVICE_TABLE(pci, s2io_tbl);
460
461static struct pci_driver s2io_driver = {
462 .name = "S2IO",
463 .id_table = s2io_tbl,
464 .probe = s2io_init_nic,
465 .remove = __devexit_p(s2io_rem_nic),
466};
467
468/* A simplifier macro used both by init and free shared_mem Fns(). */
469#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
470
471/**
472 * init_shared_mem - Allocation and Initialization of Memory
473 * @nic: Device private variable.
20346722 474 * Description: The function allocates all the memory areas shared
475 * between the NIC and the driver. This includes Tx descriptors,
1da177e4
LT
476 * Rx descriptors and the statistics block.
477 */
478
479static int init_shared_mem(struct s2io_nic *nic)
480{
481 u32 size;
482 void *tmp_v_addr, *tmp_v_addr_next;
483 dma_addr_t tmp_p_addr, tmp_p_addr_next;
1ee6dd77 484 struct RxD_block *pre_rxd_blk = NULL;
372cc597 485 int i, j, blk_cnt;
1da177e4
LT
486 int lst_size, lst_per_page;
487 struct net_device *dev = nic->dev;
8ae418cf 488 unsigned long tmp;
1ee6dd77 489 struct buffAdd *ba;
1da177e4 490
1ee6dd77 491 struct mac_info *mac_control;
1da177e4
LT
492 struct config_param *config;
493
494 mac_control = &nic->mac_control;
495 config = &nic->config;
496
497
498 /* Allocation and initialization of TXDLs in FIOFs */
499 size = 0;
500 for (i = 0; i < config->tx_fifo_num; i++) {
501 size += config->tx_cfg[i].fifo_len;
502 }
503 if (size > MAX_AVAILABLE_TXDS) {
b41477f3 504 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
0b1f7ebe 505 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
b41477f3 506 return -EINVAL;
1da177e4
LT
507 }
508
1ee6dd77 509 lst_size = (sizeof(struct TxD) * config->max_txds);
1da177e4
LT
510 lst_per_page = PAGE_SIZE / lst_size;
511
512 for (i = 0; i < config->tx_fifo_num; i++) {
513 int fifo_len = config->tx_cfg[i].fifo_len;
1ee6dd77 514 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
20346722 515 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
516 GFP_KERNEL);
517 if (!mac_control->fifos[i].list_info) {
0c61ed5f 518 DBG_PRINT(INFO_DBG,
1da177e4
LT
519 "Malloc failed for list_info\n");
520 return -ENOMEM;
521 }
20346722 522 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
1da177e4
LT
523 }
524 for (i = 0; i < config->tx_fifo_num; i++) {
525 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
526 lst_per_page);
20346722 527 mac_control->fifos[i].tx_curr_put_info.offset = 0;
528 mac_control->fifos[i].tx_curr_put_info.fifo_len =
1da177e4 529 config->tx_cfg[i].fifo_len - 1;
20346722 530 mac_control->fifos[i].tx_curr_get_info.offset = 0;
531 mac_control->fifos[i].tx_curr_get_info.fifo_len =
1da177e4 532 config->tx_cfg[i].fifo_len - 1;
20346722 533 mac_control->fifos[i].fifo_no = i;
534 mac_control->fifos[i].nic = nic;
fed5eccd 535 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
20346722 536
1da177e4
LT
537 for (j = 0; j < page_num; j++) {
538 int k = 0;
539 dma_addr_t tmp_p;
540 void *tmp_v;
541 tmp_v = pci_alloc_consistent(nic->pdev,
542 PAGE_SIZE, &tmp_p);
543 if (!tmp_v) {
0c61ed5f 544 DBG_PRINT(INFO_DBG,
1da177e4 545 "pci_alloc_consistent ");
0c61ed5f 546 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
1da177e4
LT
547 return -ENOMEM;
548 }
776bd20f 549 /* If we got a zero DMA address(can happen on
550 * certain platforms like PPC), reallocate.
551 * Store virtual address of page we don't want,
552 * to be freed later.
553 */
554 if (!tmp_p) {
555 mac_control->zerodma_virt_addr = tmp_v;
6aa20a22 556 DBG_PRINT(INIT_DBG,
776bd20f 557 "%s: Zero DMA address for TxDL. ", dev->name);
6aa20a22 558 DBG_PRINT(INIT_DBG,
6b4d617d 559 "Virtual address %p\n", tmp_v);
776bd20f 560 tmp_v = pci_alloc_consistent(nic->pdev,
561 PAGE_SIZE, &tmp_p);
562 if (!tmp_v) {
0c61ed5f 563 DBG_PRINT(INFO_DBG,
776bd20f 564 "pci_alloc_consistent ");
0c61ed5f 565 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
776bd20f 566 return -ENOMEM;
567 }
568 }
1da177e4
LT
569 while (k < lst_per_page) {
570 int l = (j * lst_per_page) + k;
571 if (l == config->tx_cfg[i].fifo_len)
20346722 572 break;
573 mac_control->fifos[i].list_info[l].list_virt_addr =
1da177e4 574 tmp_v + (k * lst_size);
20346722 575 mac_control->fifos[i].list_info[l].list_phy_addr =
1da177e4
LT
576 tmp_p + (k * lst_size);
577 k++;
578 }
579 }
580 }
1da177e4 581
4384247b 582 nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
fed5eccd
AR
583 if (!nic->ufo_in_band_v)
584 return -ENOMEM;
585
1da177e4
LT
586 /* Allocation and initialization of RXDs in Rings */
587 size = 0;
588 for (i = 0; i < config->rx_ring_num; i++) {
da6971d8
AR
589 if (config->rx_cfg[i].num_rxd %
590 (rxd_count[nic->rxd_mode] + 1)) {
1da177e4
LT
591 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
592 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
593 i);
594 DBG_PRINT(ERR_DBG, "RxDs per Block");
595 return FAILURE;
596 }
597 size += config->rx_cfg[i].num_rxd;
20346722 598 mac_control->rings[i].block_count =
da6971d8
AR
599 config->rx_cfg[i].num_rxd /
600 (rxd_count[nic->rxd_mode] + 1 );
601 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
602 mac_control->rings[i].block_count;
1da177e4 603 }
da6971d8 604 if (nic->rxd_mode == RXD_MODE_1)
1ee6dd77 605 size = (size * (sizeof(struct RxD1)));
da6971d8 606 else
1ee6dd77 607 size = (size * (sizeof(struct RxD3)));
1da177e4
LT
608
609 for (i = 0; i < config->rx_ring_num; i++) {
20346722 610 mac_control->rings[i].rx_curr_get_info.block_index = 0;
611 mac_control->rings[i].rx_curr_get_info.offset = 0;
612 mac_control->rings[i].rx_curr_get_info.ring_len =
1da177e4 613 config->rx_cfg[i].num_rxd - 1;
20346722 614 mac_control->rings[i].rx_curr_put_info.block_index = 0;
615 mac_control->rings[i].rx_curr_put_info.offset = 0;
616 mac_control->rings[i].rx_curr_put_info.ring_len =
1da177e4 617 config->rx_cfg[i].num_rxd - 1;
20346722 618 mac_control->rings[i].nic = nic;
619 mac_control->rings[i].ring_no = i;
620
da6971d8
AR
621 blk_cnt = config->rx_cfg[i].num_rxd /
622 (rxd_count[nic->rxd_mode] + 1);
1da177e4
LT
623 /* Allocating all the Rx blocks */
624 for (j = 0; j < blk_cnt; j++) {
1ee6dd77 625 struct rx_block_info *rx_blocks;
da6971d8
AR
626 int l;
627
628 rx_blocks = &mac_control->rings[i].rx_blocks[j];
629 size = SIZE_OF_BLOCK; //size is always page size
1da177e4
LT
630 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
631 &tmp_p_addr);
632 if (tmp_v_addr == NULL) {
633 /*
20346722 634 * In case of failure, free_shared_mem()
635 * is called, which should free any
636 * memory that was alloced till the
1da177e4
LT
637 * failure happened.
638 */
da6971d8 639 rx_blocks->block_virt_addr = tmp_v_addr;
1da177e4
LT
640 return -ENOMEM;
641 }
642 memset(tmp_v_addr, 0, size);
da6971d8
AR
643 rx_blocks->block_virt_addr = tmp_v_addr;
644 rx_blocks->block_dma_addr = tmp_p_addr;
1ee6dd77 645 rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
da6971d8
AR
646 rxd_count[nic->rxd_mode],
647 GFP_KERNEL);
372cc597
SS
648 if (!rx_blocks->rxds)
649 return -ENOMEM;
da6971d8
AR
650 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
651 rx_blocks->rxds[l].virt_addr =
652 rx_blocks->block_virt_addr +
653 (rxd_size[nic->rxd_mode] * l);
654 rx_blocks->rxds[l].dma_addr =
655 rx_blocks->block_dma_addr +
656 (rxd_size[nic->rxd_mode] * l);
657 }
1da177e4
LT
658 }
659 /* Interlinking all Rx Blocks */
660 for (j = 0; j < blk_cnt; j++) {
20346722 661 tmp_v_addr =
662 mac_control->rings[i].rx_blocks[j].block_virt_addr;
1da177e4 663 tmp_v_addr_next =
20346722 664 mac_control->rings[i].rx_blocks[(j + 1) %
1da177e4 665 blk_cnt].block_virt_addr;
20346722 666 tmp_p_addr =
667 mac_control->rings[i].rx_blocks[j].block_dma_addr;
1da177e4 668 tmp_p_addr_next =
20346722 669 mac_control->rings[i].rx_blocks[(j + 1) %
1da177e4
LT
670 blk_cnt].block_dma_addr;
671
1ee6dd77 672 pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
1da177e4
LT
673 pre_rxd_blk->reserved_2_pNext_RxD_block =
674 (unsigned long) tmp_v_addr_next;
1da177e4
LT
675 pre_rxd_blk->pNext_RxD_Blk_physical =
676 (u64) tmp_p_addr_next;
677 }
678 }
da6971d8
AR
679 if (nic->rxd_mode >= RXD_MODE_3A) {
680 /*
681 * Allocation of Storages for buffer addresses in 2BUFF mode
682 * and the buffers as well.
683 */
684 for (i = 0; i < config->rx_ring_num; i++) {
685 blk_cnt = config->rx_cfg[i].num_rxd /
686 (rxd_count[nic->rxd_mode]+ 1);
687 mac_control->rings[i].ba =
1ee6dd77 688 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
1da177e4 689 GFP_KERNEL);
da6971d8 690 if (!mac_control->rings[i].ba)
1da177e4 691 return -ENOMEM;
da6971d8
AR
692 for (j = 0; j < blk_cnt; j++) {
693 int k = 0;
694 mac_control->rings[i].ba[j] =
1ee6dd77 695 kmalloc((sizeof(struct buffAdd) *
da6971d8
AR
696 (rxd_count[nic->rxd_mode] + 1)),
697 GFP_KERNEL);
698 if (!mac_control->rings[i].ba[j])
1da177e4 699 return -ENOMEM;
da6971d8
AR
700 while (k != rxd_count[nic->rxd_mode]) {
701 ba = &mac_control->rings[i].ba[j][k];
702
703 ba->ba_0_org = (void *) kmalloc
704 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
705 if (!ba->ba_0_org)
706 return -ENOMEM;
707 tmp = (unsigned long)ba->ba_0_org;
708 tmp += ALIGN_SIZE;
709 tmp &= ~((unsigned long) ALIGN_SIZE);
710 ba->ba_0 = (void *) tmp;
711
712 ba->ba_1_org = (void *) kmalloc
713 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
714 if (!ba->ba_1_org)
715 return -ENOMEM;
716 tmp = (unsigned long) ba->ba_1_org;
717 tmp += ALIGN_SIZE;
718 tmp &= ~((unsigned long) ALIGN_SIZE);
719 ba->ba_1 = (void *) tmp;
720 k++;
721 }
1da177e4
LT
722 }
723 }
724 }
1da177e4
LT
725
726 /* Allocation and initialization of Statistics block */
1ee6dd77 727 size = sizeof(struct stat_block);
1da177e4
LT
728 mac_control->stats_mem = pci_alloc_consistent
729 (nic->pdev, size, &mac_control->stats_mem_phy);
730
731 if (!mac_control->stats_mem) {
20346722 732 /*
733 * In case of failure, free_shared_mem() is called, which
734 * should free any memory that was alloced till the
1da177e4
LT
735 * failure happened.
736 */
737 return -ENOMEM;
738 }
739 mac_control->stats_mem_sz = size;
740
741 tmp_v_addr = mac_control->stats_mem;
1ee6dd77 742 mac_control->stats_info = (struct stat_block *) tmp_v_addr;
1da177e4 743 memset(tmp_v_addr, 0, size);
1da177e4
LT
744 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
745 (unsigned long long) tmp_p_addr);
746
747 return SUCCESS;
748}
749
20346722 750/**
751 * free_shared_mem - Free the allocated Memory
1da177e4
LT
752 * @nic: Device private variable.
753 * Description: This function is to free all memory locations allocated by
754 * the init_shared_mem() function and return it to the kernel.
755 */
756
757static void free_shared_mem(struct s2io_nic *nic)
758{
759 int i, j, blk_cnt, size;
760 void *tmp_v_addr;
761 dma_addr_t tmp_p_addr;
1ee6dd77 762 struct mac_info *mac_control;
1da177e4
LT
763 struct config_param *config;
764 int lst_size, lst_per_page;
776bd20f 765 struct net_device *dev = nic->dev;
1da177e4
LT
766
767 if (!nic)
768 return;
769
770 mac_control = &nic->mac_control;
771 config = &nic->config;
772
1ee6dd77 773 lst_size = (sizeof(struct TxD) * config->max_txds);
1da177e4
LT
774 lst_per_page = PAGE_SIZE / lst_size;
775
776 for (i = 0; i < config->tx_fifo_num; i++) {
777 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
778 lst_per_page);
779 for (j = 0; j < page_num; j++) {
780 int mem_blks = (j * lst_per_page);
776bd20f 781 if (!mac_control->fifos[i].list_info)
6aa20a22 782 return;
776bd20f 783 if (!mac_control->fifos[i].list_info[mem_blks].
784 list_virt_addr)
1da177e4
LT
785 break;
786 pci_free_consistent(nic->pdev, PAGE_SIZE,
20346722 787 mac_control->fifos[i].
788 list_info[mem_blks].
1da177e4 789 list_virt_addr,
20346722 790 mac_control->fifos[i].
791 list_info[mem_blks].
1da177e4
LT
792 list_phy_addr);
793 }
776bd20f 794 /* If we got a zero DMA address during allocation,
795 * free the page now
796 */
797 if (mac_control->zerodma_virt_addr) {
798 pci_free_consistent(nic->pdev, PAGE_SIZE,
799 mac_control->zerodma_virt_addr,
800 (dma_addr_t)0);
6aa20a22 801 DBG_PRINT(INIT_DBG,
6b4d617d
AM
802 "%s: Freeing TxDL with zero DMA addr. ",
803 dev->name);
804 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
805 mac_control->zerodma_virt_addr);
776bd20f 806 }
20346722 807 kfree(mac_control->fifos[i].list_info);
1da177e4
LT
808 }
809
1da177e4 810 size = SIZE_OF_BLOCK;
1da177e4 811 for (i = 0; i < config->rx_ring_num; i++) {
20346722 812 blk_cnt = mac_control->rings[i].block_count;
1da177e4 813 for (j = 0; j < blk_cnt; j++) {
20346722 814 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
815 block_virt_addr;
816 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
817 block_dma_addr;
1da177e4
LT
818 if (tmp_v_addr == NULL)
819 break;
820 pci_free_consistent(nic->pdev, size,
821 tmp_v_addr, tmp_p_addr);
da6971d8 822 kfree(mac_control->rings[i].rx_blocks[j].rxds);
1da177e4
LT
823 }
824 }
825
da6971d8
AR
826 if (nic->rxd_mode >= RXD_MODE_3A) {
827 /* Freeing buffer storage addresses in 2BUFF mode. */
828 for (i = 0; i < config->rx_ring_num; i++) {
829 blk_cnt = config->rx_cfg[i].num_rxd /
830 (rxd_count[nic->rxd_mode] + 1);
831 for (j = 0; j < blk_cnt; j++) {
832 int k = 0;
833 if (!mac_control->rings[i].ba[j])
834 continue;
835 while (k != rxd_count[nic->rxd_mode]) {
1ee6dd77 836 struct buffAdd *ba =
da6971d8
AR
837 &mac_control->rings[i].ba[j][k];
838 kfree(ba->ba_0_org);
839 kfree(ba->ba_1_org);
840 k++;
841 }
842 kfree(mac_control->rings[i].ba[j]);
1da177e4 843 }
da6971d8 844 kfree(mac_control->rings[i].ba);
1da177e4 845 }
1da177e4 846 }
1da177e4
LT
847
848 if (mac_control->stats_mem) {
849 pci_free_consistent(nic->pdev,
850 mac_control->stats_mem_sz,
851 mac_control->stats_mem,
852 mac_control->stats_mem_phy);
853 }
fed5eccd
AR
854 if (nic->ufo_in_band_v)
855 kfree(nic->ufo_in_band_v);
1da177e4
LT
856}
857
541ae68f 858/**
859 * s2io_verify_pci_mode -
860 */
861
1ee6dd77 862static int s2io_verify_pci_mode(struct s2io_nic *nic)
541ae68f 863{
1ee6dd77 864 struct XENA_dev_config __iomem *bar0 = nic->bar0;
541ae68f 865 register u64 val64 = 0;
866 int mode;
867
868 val64 = readq(&bar0->pci_mode);
869 mode = (u8)GET_PCI_MODE(val64);
870
871 if ( val64 & PCI_MODE_UNKNOWN_MODE)
872 return -1; /* Unknown PCI mode */
873 return mode;
874}
875
c92ca04b
AR
876#define NEC_VENID 0x1033
877#define NEC_DEVID 0x0125
878static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
879{
880 struct pci_dev *tdev = NULL;
26d36b64
AC
881 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
882 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
c92ca04b 883 if (tdev->bus == s2io_pdev->bus->parent)
26d36b64 884 pci_dev_put(tdev);
c92ca04b
AR
885 return 1;
886 }
887 }
888 return 0;
889}
541ae68f 890
7b32a312 891static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
541ae68f 892/**
893 * s2io_print_pci_mode -
894 */
1ee6dd77 895static int s2io_print_pci_mode(struct s2io_nic *nic)
541ae68f 896{
1ee6dd77 897 struct XENA_dev_config __iomem *bar0 = nic->bar0;
541ae68f 898 register u64 val64 = 0;
899 int mode;
900 struct config_param *config = &nic->config;
901
902 val64 = readq(&bar0->pci_mode);
903 mode = (u8)GET_PCI_MODE(val64);
904
905 if ( val64 & PCI_MODE_UNKNOWN_MODE)
906 return -1; /* Unknown PCI mode */
907
c92ca04b
AR
908 config->bus_speed = bus_speed[mode];
909
910 if (s2io_on_nec_bridge(nic->pdev)) {
911 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
912 nic->dev->name);
913 return mode;
914 }
915
541ae68f 916 if (val64 & PCI_MODE_32_BITS) {
917 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
918 } else {
919 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
920 }
921
922 switch(mode) {
923 case PCI_MODE_PCI_33:
924 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
541ae68f 925 break;
926 case PCI_MODE_PCI_66:
927 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
541ae68f 928 break;
929 case PCI_MODE_PCIX_M1_66:
930 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
541ae68f 931 break;
932 case PCI_MODE_PCIX_M1_100:
933 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
541ae68f 934 break;
935 case PCI_MODE_PCIX_M1_133:
936 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
541ae68f 937 break;
938 case PCI_MODE_PCIX_M2_66:
939 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
541ae68f 940 break;
941 case PCI_MODE_PCIX_M2_100:
942 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
541ae68f 943 break;
944 case PCI_MODE_PCIX_M2_133:
945 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
541ae68f 946 break;
947 default:
948 return -1; /* Unsupported bus speed */
949 }
950
951 return mode;
952}
953
20346722 954/**
955 * init_nic - Initialization of hardware
1da177e4 956 * @nic: device peivate variable
20346722 957 * Description: The function sequentially configures every block
958 * of the H/W from their reset values.
959 * Return Value: SUCCESS on success and
1da177e4
LT
960 * '-1' on failure (endian settings incorrect).
961 */
962
963static int init_nic(struct s2io_nic *nic)
964{
1ee6dd77 965 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
966 struct net_device *dev = nic->dev;
967 register u64 val64 = 0;
968 void __iomem *add;
969 u32 time;
970 int i, j;
1ee6dd77 971 struct mac_info *mac_control;
1da177e4 972 struct config_param *config;
c92ca04b 973 int dtx_cnt = 0;
1da177e4 974 unsigned long long mem_share;
20346722 975 int mem_size;
1da177e4
LT
976
977 mac_control = &nic->mac_control;
978 config = &nic->config;
979
5e25b9dd 980 /* to set the swapper controle on the card */
20346722 981 if(s2io_set_swapper(nic)) {
1da177e4
LT
982 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
983 return -1;
984 }
985
541ae68f 986 /*
987 * Herc requires EOI to be removed from reset before XGXS, so..
988 */
989 if (nic->device_type & XFRAME_II_DEVICE) {
990 val64 = 0xA500000000ULL;
991 writeq(val64, &bar0->sw_reset);
992 msleep(500);
993 val64 = readq(&bar0->sw_reset);
994 }
995
1da177e4
LT
996 /* Remove XGXS from reset state */
997 val64 = 0;
998 writeq(val64, &bar0->sw_reset);
1da177e4 999 msleep(500);
20346722 1000 val64 = readq(&bar0->sw_reset);
1da177e4
LT
1001
1002 /* Enable Receiving broadcasts */
1003 add = &bar0->mac_cfg;
1004 val64 = readq(&bar0->mac_cfg);
1005 val64 |= MAC_RMAC_BCAST_ENABLE;
1006 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1007 writel((u32) val64, add);
1008 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1009 writel((u32) (val64 >> 32), (add + 4));
1010
1011 /* Read registers in all blocks */
1012 val64 = readq(&bar0->mac_int_mask);
1013 val64 = readq(&bar0->mc_int_mask);
1014 val64 = readq(&bar0->xgxs_int_mask);
1015
1016 /* Set MTU */
1017 val64 = dev->mtu;
1018 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1019
541ae68f 1020 if (nic->device_type & XFRAME_II_DEVICE) {
1021 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
303bcb4b 1022 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1da177e4 1023 &bar0->dtx_control, UF);
541ae68f 1024 if (dtx_cnt & 0x1)
1025 msleep(1); /* Necessary!! */
1da177e4
LT
1026 dtx_cnt++;
1027 }
541ae68f 1028 } else {
c92ca04b
AR
1029 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1030 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1031 &bar0->dtx_control, UF);
1032 val64 = readq(&bar0->dtx_control);
1033 dtx_cnt++;
1da177e4
LT
1034 }
1035 }
1036
1037 /* Tx DMA Initialization */
1038 val64 = 0;
1039 writeq(val64, &bar0->tx_fifo_partition_0);
1040 writeq(val64, &bar0->tx_fifo_partition_1);
1041 writeq(val64, &bar0->tx_fifo_partition_2);
1042 writeq(val64, &bar0->tx_fifo_partition_3);
1043
1044
1045 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1046 val64 |=
1047 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1048 13) | vBIT(config->tx_cfg[i].fifo_priority,
1049 ((i * 32) + 5), 3);
1050
1051 if (i == (config->tx_fifo_num - 1)) {
1052 if (i % 2 == 0)
1053 i++;
1054 }
1055
1056 switch (i) {
1057 case 1:
1058 writeq(val64, &bar0->tx_fifo_partition_0);
1059 val64 = 0;
1060 break;
1061 case 3:
1062 writeq(val64, &bar0->tx_fifo_partition_1);
1063 val64 = 0;
1064 break;
1065 case 5:
1066 writeq(val64, &bar0->tx_fifo_partition_2);
1067 val64 = 0;
1068 break;
1069 case 7:
1070 writeq(val64, &bar0->tx_fifo_partition_3);
1071 break;
1072 }
1073 }
1074
5e25b9dd 1075 /*
1076 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1077 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1078 */
541ae68f 1079 if ((nic->device_type == XFRAME_I_DEVICE) &&
1080 (get_xena_rev_id(nic->pdev) < 4))
5e25b9dd 1081 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1082
1da177e4
LT
1083 val64 = readq(&bar0->tx_fifo_partition_0);
1084 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1085 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1086
20346722 1087 /*
1088 * Initialization of Tx_PA_CONFIG register to ignore packet
1da177e4
LT
1089 * integrity checking.
1090 */
1091 val64 = readq(&bar0->tx_pa_cfg);
1092 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1093 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1094 writeq(val64, &bar0->tx_pa_cfg);
1095
1096 /* Rx DMA intialization. */
1097 val64 = 0;
1098 for (i = 0; i < config->rx_ring_num; i++) {
1099 val64 |=
1100 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1101 3);
1102 }
1103 writeq(val64, &bar0->rx_queue_priority);
1104
20346722 1105 /*
1106 * Allocating equal share of memory to all the
1da177e4
LT
1107 * configured Rings.
1108 */
1109 val64 = 0;
541ae68f 1110 if (nic->device_type & XFRAME_II_DEVICE)
1111 mem_size = 32;
1112 else
1113 mem_size = 64;
1114
1da177e4
LT
1115 for (i = 0; i < config->rx_ring_num; i++) {
1116 switch (i) {
1117 case 0:
20346722 1118 mem_share = (mem_size / config->rx_ring_num +
1119 mem_size % config->rx_ring_num);
1da177e4
LT
1120 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1121 continue;
1122 case 1:
20346722 1123 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1124 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1125 continue;
1126 case 2:
20346722 1127 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1128 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1129 continue;
1130 case 3:
20346722 1131 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1132 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1133 continue;
1134 case 4:
20346722 1135 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1136 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1137 continue;
1138 case 5:
20346722 1139 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1140 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1141 continue;
1142 case 6:
20346722 1143 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1144 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1145 continue;
1146 case 7:
20346722 1147 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1148 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1149 continue;
1150 }
1151 }
1152 writeq(val64, &bar0->rx_queue_cfg);
1153
20346722 1154 /*
5e25b9dd 1155 * Filling Tx round robin registers
1156 * as per the number of FIFOs
1da177e4 1157 */
5e25b9dd 1158 switch (config->tx_fifo_num) {
1159 case 1:
1160 val64 = 0x0000000000000000ULL;
1161 writeq(val64, &bar0->tx_w_round_robin_0);
1162 writeq(val64, &bar0->tx_w_round_robin_1);
1163 writeq(val64, &bar0->tx_w_round_robin_2);
1164 writeq(val64, &bar0->tx_w_round_robin_3);
1165 writeq(val64, &bar0->tx_w_round_robin_4);
1166 break;
1167 case 2:
1168 val64 = 0x0000010000010000ULL;
1169 writeq(val64, &bar0->tx_w_round_robin_0);
1170 val64 = 0x0100000100000100ULL;
1171 writeq(val64, &bar0->tx_w_round_robin_1);
1172 val64 = 0x0001000001000001ULL;
1173 writeq(val64, &bar0->tx_w_round_robin_2);
1174 val64 = 0x0000010000010000ULL;
1175 writeq(val64, &bar0->tx_w_round_robin_3);
1176 val64 = 0x0100000000000000ULL;
1177 writeq(val64, &bar0->tx_w_round_robin_4);
1178 break;
1179 case 3:
1180 val64 = 0x0001000102000001ULL;
1181 writeq(val64, &bar0->tx_w_round_robin_0);
1182 val64 = 0x0001020000010001ULL;
1183 writeq(val64, &bar0->tx_w_round_robin_1);
1184 val64 = 0x0200000100010200ULL;
1185 writeq(val64, &bar0->tx_w_round_robin_2);
1186 val64 = 0x0001000102000001ULL;
1187 writeq(val64, &bar0->tx_w_round_robin_3);
1188 val64 = 0x0001020000000000ULL;
1189 writeq(val64, &bar0->tx_w_round_robin_4);
1190 break;
1191 case 4:
1192 val64 = 0x0001020300010200ULL;
1193 writeq(val64, &bar0->tx_w_round_robin_0);
1194 val64 = 0x0100000102030001ULL;
1195 writeq(val64, &bar0->tx_w_round_robin_1);
1196 val64 = 0x0200010000010203ULL;
1197 writeq(val64, &bar0->tx_w_round_robin_2);
1198 val64 = 0x0001020001000001ULL;
1199 writeq(val64, &bar0->tx_w_round_robin_3);
1200 val64 = 0x0203000100000000ULL;
1201 writeq(val64, &bar0->tx_w_round_robin_4);
1202 break;
1203 case 5:
1204 val64 = 0x0001000203000102ULL;
1205 writeq(val64, &bar0->tx_w_round_robin_0);
1206 val64 = 0x0001020001030004ULL;
1207 writeq(val64, &bar0->tx_w_round_robin_1);
1208 val64 = 0x0001000203000102ULL;
1209 writeq(val64, &bar0->tx_w_round_robin_2);
1210 val64 = 0x0001020001030004ULL;
1211 writeq(val64, &bar0->tx_w_round_robin_3);
1212 val64 = 0x0001000000000000ULL;
1213 writeq(val64, &bar0->tx_w_round_robin_4);
1214 break;
1215 case 6:
1216 val64 = 0x0001020304000102ULL;
1217 writeq(val64, &bar0->tx_w_round_robin_0);
1218 val64 = 0x0304050001020001ULL;
1219 writeq(val64, &bar0->tx_w_round_robin_1);
1220 val64 = 0x0203000100000102ULL;
1221 writeq(val64, &bar0->tx_w_round_robin_2);
1222 val64 = 0x0304000102030405ULL;
1223 writeq(val64, &bar0->tx_w_round_robin_3);
1224 val64 = 0x0001000200000000ULL;
1225 writeq(val64, &bar0->tx_w_round_robin_4);
1226 break;
1227 case 7:
1228 val64 = 0x0001020001020300ULL;
1229 writeq(val64, &bar0->tx_w_round_robin_0);
1230 val64 = 0x0102030400010203ULL;
1231 writeq(val64, &bar0->tx_w_round_robin_1);
1232 val64 = 0x0405060001020001ULL;
1233 writeq(val64, &bar0->tx_w_round_robin_2);
1234 val64 = 0x0304050000010200ULL;
1235 writeq(val64, &bar0->tx_w_round_robin_3);
1236 val64 = 0x0102030000000000ULL;
1237 writeq(val64, &bar0->tx_w_round_robin_4);
1238 break;
1239 case 8:
1240 val64 = 0x0001020300040105ULL;
1241 writeq(val64, &bar0->tx_w_round_robin_0);
1242 val64 = 0x0200030106000204ULL;
1243 writeq(val64, &bar0->tx_w_round_robin_1);
1244 val64 = 0x0103000502010007ULL;
1245 writeq(val64, &bar0->tx_w_round_robin_2);
1246 val64 = 0x0304010002060500ULL;
1247 writeq(val64, &bar0->tx_w_round_robin_3);
1248 val64 = 0x0103020400000000ULL;
1249 writeq(val64, &bar0->tx_w_round_robin_4);
1250 break;
1251 }
1252
b41477f3 1253 /* Enable all configured Tx FIFO partitions */
5d3213cc
AR
1254 val64 = readq(&bar0->tx_fifo_partition_0);
1255 val64 |= (TX_FIFO_PARTITION_EN);
1256 writeq(val64, &bar0->tx_fifo_partition_0);
1257
5e25b9dd 1258 /* Filling the Rx round robin registers as per the
1259 * number of Rings and steering based on QoS.
1260 */
1261 switch (config->rx_ring_num) {
1262 case 1:
1263 val64 = 0x8080808080808080ULL;
1264 writeq(val64, &bar0->rts_qos_steering);
1265 break;
1266 case 2:
1267 val64 = 0x0000010000010000ULL;
1268 writeq(val64, &bar0->rx_w_round_robin_0);
1269 val64 = 0x0100000100000100ULL;
1270 writeq(val64, &bar0->rx_w_round_robin_1);
1271 val64 = 0x0001000001000001ULL;
1272 writeq(val64, &bar0->rx_w_round_robin_2);
1273 val64 = 0x0000010000010000ULL;
1274 writeq(val64, &bar0->rx_w_round_robin_3);
1275 val64 = 0x0100000000000000ULL;
1276 writeq(val64, &bar0->rx_w_round_robin_4);
1277
1278 val64 = 0x8080808040404040ULL;
1279 writeq(val64, &bar0->rts_qos_steering);
1280 break;
1281 case 3:
1282 val64 = 0x0001000102000001ULL;
1283 writeq(val64, &bar0->rx_w_round_robin_0);
1284 val64 = 0x0001020000010001ULL;
1285 writeq(val64, &bar0->rx_w_round_robin_1);
1286 val64 = 0x0200000100010200ULL;
1287 writeq(val64, &bar0->rx_w_round_robin_2);
1288 val64 = 0x0001000102000001ULL;
1289 writeq(val64, &bar0->rx_w_round_robin_3);
1290 val64 = 0x0001020000000000ULL;
1291 writeq(val64, &bar0->rx_w_round_robin_4);
1292
1293 val64 = 0x8080804040402020ULL;
1294 writeq(val64, &bar0->rts_qos_steering);
1295 break;
1296 case 4:
1297 val64 = 0x0001020300010200ULL;
1298 writeq(val64, &bar0->rx_w_round_robin_0);
1299 val64 = 0x0100000102030001ULL;
1300 writeq(val64, &bar0->rx_w_round_robin_1);
1301 val64 = 0x0200010000010203ULL;
1302 writeq(val64, &bar0->rx_w_round_robin_2);
6aa20a22 1303 val64 = 0x0001020001000001ULL;
5e25b9dd 1304 writeq(val64, &bar0->rx_w_round_robin_3);
1305 val64 = 0x0203000100000000ULL;
1306 writeq(val64, &bar0->rx_w_round_robin_4);
1307
1308 val64 = 0x8080404020201010ULL;
1309 writeq(val64, &bar0->rts_qos_steering);
1310 break;
1311 case 5:
1312 val64 = 0x0001000203000102ULL;
1313 writeq(val64, &bar0->rx_w_round_robin_0);
1314 val64 = 0x0001020001030004ULL;
1315 writeq(val64, &bar0->rx_w_round_robin_1);
1316 val64 = 0x0001000203000102ULL;
1317 writeq(val64, &bar0->rx_w_round_robin_2);
1318 val64 = 0x0001020001030004ULL;
1319 writeq(val64, &bar0->rx_w_round_robin_3);
1320 val64 = 0x0001000000000000ULL;
1321 writeq(val64, &bar0->rx_w_round_robin_4);
1322
1323 val64 = 0x8080404020201008ULL;
1324 writeq(val64, &bar0->rts_qos_steering);
1325 break;
1326 case 6:
1327 val64 = 0x0001020304000102ULL;
1328 writeq(val64, &bar0->rx_w_round_robin_0);
1329 val64 = 0x0304050001020001ULL;
1330 writeq(val64, &bar0->rx_w_round_robin_1);
1331 val64 = 0x0203000100000102ULL;
1332 writeq(val64, &bar0->rx_w_round_robin_2);
1333 val64 = 0x0304000102030405ULL;
1334 writeq(val64, &bar0->rx_w_round_robin_3);
1335 val64 = 0x0001000200000000ULL;
1336 writeq(val64, &bar0->rx_w_round_robin_4);
1337
1338 val64 = 0x8080404020100804ULL;
1339 writeq(val64, &bar0->rts_qos_steering);
1340 break;
1341 case 7:
1342 val64 = 0x0001020001020300ULL;
1343 writeq(val64, &bar0->rx_w_round_robin_0);
1344 val64 = 0x0102030400010203ULL;
1345 writeq(val64, &bar0->rx_w_round_robin_1);
1346 val64 = 0x0405060001020001ULL;
1347 writeq(val64, &bar0->rx_w_round_robin_2);
1348 val64 = 0x0304050000010200ULL;
1349 writeq(val64, &bar0->rx_w_round_robin_3);
1350 val64 = 0x0102030000000000ULL;
1351 writeq(val64, &bar0->rx_w_round_robin_4);
1352
1353 val64 = 0x8080402010080402ULL;
1354 writeq(val64, &bar0->rts_qos_steering);
1355 break;
1356 case 8:
1357 val64 = 0x0001020300040105ULL;
1358 writeq(val64, &bar0->rx_w_round_robin_0);
1359 val64 = 0x0200030106000204ULL;
1360 writeq(val64, &bar0->rx_w_round_robin_1);
1361 val64 = 0x0103000502010007ULL;
1362 writeq(val64, &bar0->rx_w_round_robin_2);
1363 val64 = 0x0304010002060500ULL;
1364 writeq(val64, &bar0->rx_w_round_robin_3);
1365 val64 = 0x0103020400000000ULL;
1366 writeq(val64, &bar0->rx_w_round_robin_4);
1367
1368 val64 = 0x8040201008040201ULL;
1369 writeq(val64, &bar0->rts_qos_steering);
1370 break;
1371 }
1da177e4
LT
1372
1373 /* UDP Fix */
1374 val64 = 0;
20346722 1375 for (i = 0; i < 8; i++)
1da177e4
LT
1376 writeq(val64, &bar0->rts_frm_len_n[i]);
1377
5e25b9dd 1378 /* Set the default rts frame length for the rings configured */
1379 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1380 for (i = 0 ; i < config->rx_ring_num ; i++)
1381 writeq(val64, &bar0->rts_frm_len_n[i]);
1382
1383 /* Set the frame length for the configured rings
1384 * desired by the user
1385 */
1386 for (i = 0; i < config->rx_ring_num; i++) {
1387 /* If rts_frm_len[i] == 0 then it is assumed that user not
1388 * specified frame length steering.
1389 * If the user provides the frame length then program
1390 * the rts_frm_len register for those values or else
1391 * leave it as it is.
1392 */
1393 if (rts_frm_len[i] != 0) {
1394 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1395 &bar0->rts_frm_len_n[i]);
1396 }
1397 }
926930b2 1398
9fc93a41
SS
1399 /* Disable differentiated services steering logic */
1400 for (i = 0; i < 64; i++) {
1401 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1402 DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1403 dev->name);
1404 DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1405 return FAILURE;
1406 }
1407 }
1408
20346722 1409 /* Program statistics memory */
1da177e4 1410 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1da177e4 1411
541ae68f 1412 if (nic->device_type == XFRAME_II_DEVICE) {
1413 val64 = STAT_BC(0x320);
1414 writeq(val64, &bar0->stat_byte_cnt);
1415 }
1416
20346722 1417 /*
1da177e4
LT
1418 * Initializing the sampling rate for the device to calculate the
1419 * bandwidth utilization.
1420 */
1421 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1422 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1423 writeq(val64, &bar0->mac_link_util);
1424
1425
20346722 1426 /*
1427 * Initializing the Transmit and Receive Traffic Interrupt
1da177e4
LT
1428 * Scheme.
1429 */
20346722 1430 /*
1431 * TTI Initialization. Default Tx timer gets us about
1da177e4
LT
1432 * 250 interrupts per sec. Continuous interrupts are enabled
1433 * by default.
1434 */
541ae68f 1435 if (nic->device_type == XFRAME_II_DEVICE) {
1436 int count = (nic->config.bus_speed * 125)/2;
1437 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1438 } else {
1439
1440 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1441 }
1442 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1da177e4 1443 TTI_DATA1_MEM_TX_URNG_B(0x10) |
5e25b9dd 1444 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
541ae68f 1445 if (use_continuous_tx_intrs)
1446 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1da177e4
LT
1447 writeq(val64, &bar0->tti_data1_mem);
1448
1449 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1450 TTI_DATA2_MEM_TX_UFC_B(0x20) |
19a60522 1451 TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1da177e4
LT
1452 writeq(val64, &bar0->tti_data2_mem);
1453
1454 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1455 writeq(val64, &bar0->tti_command_mem);
1456
20346722 1457 /*
1da177e4
LT
1458 * Once the operation completes, the Strobe bit of the command
1459 * register will be reset. We poll for this particular condition
1460 * We wait for a maximum of 500ms for the operation to complete,
1461 * if it's not complete by then we return error.
1462 */
1463 time = 0;
1464 while (TRUE) {
1465 val64 = readq(&bar0->tti_command_mem);
1466 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1467 break;
1468 }
1469 if (time > 10) {
1470 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1471 dev->name);
1472 return -1;
1473 }
1474 msleep(50);
1475 time++;
1476 }
1477
b6e3f982 1478 if (nic->config.bimodal) {
1479 int k = 0;
1480 for (k = 0; k < config->rx_ring_num; k++) {
1481 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1482 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1483 writeq(val64, &bar0->tti_command_mem);
541ae68f 1484
541ae68f 1485 /*
b6e3f982 1486 * Once the operation completes, the Strobe bit of the command
1487 * register will be reset. We poll for this particular condition
1488 * We wait for a maximum of 500ms for the operation to complete,
1489 * if it's not complete by then we return error.
1490 */
1491 time = 0;
1492 while (TRUE) {
1493 val64 = readq(&bar0->tti_command_mem);
1494 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1495 break;
1496 }
1497 if (time > 10) {
1498 DBG_PRINT(ERR_DBG,
1499 "%s: TTI init Failed\n",
1500 dev->name);
1501 return -1;
1502 }
1503 time++;
1504 msleep(50);
1505 }
1506 }
541ae68f 1507 } else {
1da177e4 1508
b6e3f982 1509 /* RTI Initialization */
1510 if (nic->device_type == XFRAME_II_DEVICE) {
1511 /*
1512 * Programmed to generate Apprx 500 Intrs per
1513 * second
1514 */
1515 int count = (nic->config.bus_speed * 125)/4;
1516 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1517 } else {
1518 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1519 }
1520 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1521 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1522 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1da177e4 1523
b6e3f982 1524 writeq(val64, &bar0->rti_data1_mem);
1da177e4 1525
b6e3f982 1526 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
cc6e7c44
RA
1527 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1528 if (nic->intr_type == MSI_X)
1529 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1530 RTI_DATA2_MEM_RX_UFC_D(0x40));
1531 else
1532 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1533 RTI_DATA2_MEM_RX_UFC_D(0x80));
b6e3f982 1534 writeq(val64, &bar0->rti_data2_mem);
1da177e4 1535
b6e3f982 1536 for (i = 0; i < config->rx_ring_num; i++) {
1537 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1538 | RTI_CMD_MEM_OFFSET(i);
1539 writeq(val64, &bar0->rti_command_mem);
1540
1541 /*
1542 * Once the operation completes, the Strobe bit of the
1543 * command register will be reset. We poll for this
1544 * particular condition. We wait for a maximum of 500ms
1545 * for the operation to complete, if it's not complete
1546 * by then we return error.
1547 */
1548 time = 0;
1549 while (TRUE) {
1550 val64 = readq(&bar0->rti_command_mem);
1551 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1552 break;
1553 }
1554 if (time > 10) {
1555 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1556 dev->name);
1557 return -1;
1558 }
1559 time++;
1560 msleep(50);
1561 }
1da177e4 1562 }
1da177e4
LT
1563 }
1564
20346722 1565 /*
1566 * Initializing proper values as Pause threshold into all
1da177e4
LT
1567 * the 8 Queues on Rx side.
1568 */
1569 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1570 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1571
1572 /* Disable RMAC PAD STRIPPING */
509a2671 1573 add = &bar0->mac_cfg;
1da177e4
LT
1574 val64 = readq(&bar0->mac_cfg);
1575 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1576 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1577 writel((u32) (val64), add);
1578 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1579 writel((u32) (val64 >> 32), (add + 4));
1580 val64 = readq(&bar0->mac_cfg);
1581
7d3d0439
RA
1582 /* Enable FCS stripping by adapter */
1583 add = &bar0->mac_cfg;
1584 val64 = readq(&bar0->mac_cfg);
1585 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1586 if (nic->device_type == XFRAME_II_DEVICE)
1587 writeq(val64, &bar0->mac_cfg);
1588 else {
1589 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1590 writel((u32) (val64), add);
1591 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1592 writel((u32) (val64 >> 32), (add + 4));
1593 }
1594
20346722 1595 /*
1596 * Set the time value to be inserted in the pause frame
1da177e4
LT
1597 * generated by xena.
1598 */
1599 val64 = readq(&bar0->rmac_pause_cfg);
1600 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1601 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1602 writeq(val64, &bar0->rmac_pause_cfg);
1603
20346722 1604 /*
1da177e4
LT
1605 * Set the Threshold Limit for Generating the pause frame
1606 * If the amount of data in any Queue exceeds ratio of
1607 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1608 * pause frame is generated
1609 */
1610 val64 = 0;
1611 for (i = 0; i < 4; i++) {
1612 val64 |=
1613 (((u64) 0xFF00 | nic->mac_control.
1614 mc_pause_threshold_q0q3)
1615 << (i * 2 * 8));
1616 }
1617 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1618
1619 val64 = 0;
1620 for (i = 0; i < 4; i++) {
1621 val64 |=
1622 (((u64) 0xFF00 | nic->mac_control.
1623 mc_pause_threshold_q4q7)
1624 << (i * 2 * 8));
1625 }
1626 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1627
20346722 1628 /*
1629 * TxDMA will stop Read request if the number of read split has
1da177e4
LT
1630 * exceeded the limit pointed by shared_splits
1631 */
1632 val64 = readq(&bar0->pic_control);
1633 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1634 writeq(val64, &bar0->pic_control);
1635
863c11a9
AR
1636 if (nic->config.bus_speed == 266) {
1637 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1638 writeq(0x0, &bar0->read_retry_delay);
1639 writeq(0x0, &bar0->write_retry_delay);
1640 }
1641
541ae68f 1642 /*
1643 * Programming the Herc to split every write transaction
1644 * that does not start on an ADB to reduce disconnects.
1645 */
1646 if (nic->device_type == XFRAME_II_DEVICE) {
19a60522
SS
1647 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1648 MISC_LINK_STABILITY_PRD(3);
863c11a9
AR
1649 writeq(val64, &bar0->misc_control);
1650 val64 = readq(&bar0->pic_control2);
1651 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1652 writeq(val64, &bar0->pic_control2);
541ae68f 1653 }
c92ca04b
AR
1654 if (strstr(nic->product_name, "CX4")) {
1655 val64 = TMAC_AVG_IPG(0x17);
1656 writeq(val64, &bar0->tmac_avg_ipg);
a371a07d 1657 }
1658
1da177e4
LT
1659 return SUCCESS;
1660}
a371a07d 1661#define LINK_UP_DOWN_INTERRUPT 1
1662#define MAC_RMAC_ERR_TIMER 2
1663
1ee6dd77 1664static int s2io_link_fault_indication(struct s2io_nic *nic)
a371a07d 1665{
cc6e7c44
RA
1666 if (nic->intr_type != INTA)
1667 return MAC_RMAC_ERR_TIMER;
a371a07d 1668 if (nic->device_type == XFRAME_II_DEVICE)
1669 return LINK_UP_DOWN_INTERRUPT;
1670 else
1671 return MAC_RMAC_ERR_TIMER;
1672}
1da177e4 1673
20346722 1674/**
1675 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1da177e4
LT
1676 * @nic: device private variable,
1677 * @mask: A mask indicating which Intr block must be modified and,
1678 * @flag: A flag indicating whether to enable or disable the Intrs.
1679 * Description: This function will either disable or enable the interrupts
20346722 1680 * depending on the flag argument. The mask argument can be used to
1681 * enable/disable any Intr block.
1da177e4
LT
1682 * Return Value: NONE.
1683 */
1684
1685static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1686{
1ee6dd77 1687 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
1688 register u64 val64 = 0, temp64 = 0;
1689
1690 /* Top level interrupt classification */
1691 /* PIC Interrupts */
1692 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1693 /* Enable PIC Intrs in the general intr mask register */
a113ae06 1694 val64 = TXPIC_INT_M;
1da177e4
LT
1695 if (flag == ENABLE_INTRS) {
1696 temp64 = readq(&bar0->general_int_mask);
1697 temp64 &= ~((u64) val64);
1698 writeq(temp64, &bar0->general_int_mask);
20346722 1699 /*
a371a07d 1700 * If Hercules adapter enable GPIO otherwise
b41477f3 1701 * disable all PCIX, Flash, MDIO, IIC and GPIO
20346722 1702 * interrupts for now.
1703 * TODO
1da177e4 1704 */
a371a07d 1705 if (s2io_link_fault_indication(nic) ==
1706 LINK_UP_DOWN_INTERRUPT ) {
1707 temp64 = readq(&bar0->pic_int_mask);
1708 temp64 &= ~((u64) PIC_INT_GPIO);
1709 writeq(temp64, &bar0->pic_int_mask);
1710 temp64 = readq(&bar0->gpio_int_mask);
1711 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1712 writeq(temp64, &bar0->gpio_int_mask);
1713 } else {
1714 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1715 }
20346722 1716 /*
1da177e4
LT
1717 * No MSI Support is available presently, so TTI and
1718 * RTI interrupts are also disabled.
1719 */
1720 } else if (flag == DISABLE_INTRS) {
20346722 1721 /*
1722 * Disable PIC Intrs in the general
1723 * intr mask register
1da177e4
LT
1724 */
1725 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1726 temp64 = readq(&bar0->general_int_mask);
1727 val64 |= temp64;
1728 writeq(val64, &bar0->general_int_mask);
1729 }
1730 }
1731
1da177e4
LT
1732 /* MAC Interrupts */
1733 /* Enabling/Disabling MAC interrupts */
1734 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1735 val64 = TXMAC_INT_M | RXMAC_INT_M;
1736 if (flag == ENABLE_INTRS) {
1737 temp64 = readq(&bar0->general_int_mask);
1738 temp64 &= ~((u64) val64);
1739 writeq(temp64, &bar0->general_int_mask);
20346722 1740 /*
1741 * All MAC block error interrupts are disabled for now
1da177e4
LT
1742 * TODO
1743 */
1da177e4 1744 } else if (flag == DISABLE_INTRS) {
20346722 1745 /*
1746 * Disable MAC Intrs in the general intr mask register
1da177e4
LT
1747 */
1748 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1749 writeq(DISABLE_ALL_INTRS,
1750 &bar0->mac_rmac_err_mask);
1751
1752 temp64 = readq(&bar0->general_int_mask);
1753 val64 |= temp64;
1754 writeq(val64, &bar0->general_int_mask);
1755 }
1756 }
1757
1da177e4
LT
1758 /* Tx traffic interrupts */
1759 if (mask & TX_TRAFFIC_INTR) {
1760 val64 = TXTRAFFIC_INT_M;
1761 if (flag == ENABLE_INTRS) {
1762 temp64 = readq(&bar0->general_int_mask);
1763 temp64 &= ~((u64) val64);
1764 writeq(temp64, &bar0->general_int_mask);
20346722 1765 /*
1da177e4 1766 * Enable all the Tx side interrupts
20346722 1767 * writing 0 Enables all 64 TX interrupt levels
1da177e4
LT
1768 */
1769 writeq(0x0, &bar0->tx_traffic_mask);
1770 } else if (flag == DISABLE_INTRS) {
20346722 1771 /*
1772 * Disable Tx Traffic Intrs in the general intr mask
1da177e4
LT
1773 * register.
1774 */
1775 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1776 temp64 = readq(&bar0->general_int_mask);
1777 val64 |= temp64;
1778 writeq(val64, &bar0->general_int_mask);
1779 }
1780 }
1781
1782 /* Rx traffic interrupts */
1783 if (mask & RX_TRAFFIC_INTR) {
1784 val64 = RXTRAFFIC_INT_M;
1785 if (flag == ENABLE_INTRS) {
1786 temp64 = readq(&bar0->general_int_mask);
1787 temp64 &= ~((u64) val64);
1788 writeq(temp64, &bar0->general_int_mask);
1789 /* writing 0 Enables all 8 RX interrupt levels */
1790 writeq(0x0, &bar0->rx_traffic_mask);
1791 } else if (flag == DISABLE_INTRS) {
20346722 1792 /*
1793 * Disable Rx Traffic Intrs in the general intr mask
1da177e4
LT
1794 * register.
1795 */
1796 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1797 temp64 = readq(&bar0->general_int_mask);
1798 val64 |= temp64;
1799 writeq(val64, &bar0->general_int_mask);
1800 }
1801 }
1802}
1803
19a60522
SS
1804/**
1805 * verify_pcc_quiescent- Checks for PCC quiescent state
1806 * Return: 1 If PCC is quiescence
1807 * 0 If PCC is not quiescence
1808 */
1ee6dd77 1809static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
20346722 1810{
19a60522 1811 int ret = 0, herc;
1ee6dd77 1812 struct XENA_dev_config __iomem *bar0 = sp->bar0;
19a60522
SS
1813 u64 val64 = readq(&bar0->adapter_status);
1814
1815 herc = (sp->device_type == XFRAME_II_DEVICE);
20346722 1816
1817 if (flag == FALSE) {
19a60522
SS
1818 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1819 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
5e25b9dd 1820 ret = 1;
19a60522
SS
1821 } else {
1822 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
5e25b9dd 1823 ret = 1;
20346722 1824 }
1825 } else {
19a60522 1826 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
5e25b9dd 1827 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
19a60522 1828 ADAPTER_STATUS_RMAC_PCC_IDLE))
5e25b9dd 1829 ret = 1;
5e25b9dd 1830 } else {
1831 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
19a60522 1832 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
5e25b9dd 1833 ret = 1;
20346722 1834 }
1835 }
1836
1837 return ret;
1838}
1839/**
1840 * verify_xena_quiescence - Checks whether the H/W is ready
1da177e4 1841 * Description: Returns whether the H/W is ready to go or not. Depending
20346722 1842 * on whether adapter enable bit was written or not the comparison
1da177e4
LT
1843 * differs and the calling function passes the input argument flag to
1844 * indicate this.
20346722 1845 * Return: 1 If xena is quiescence
1da177e4
LT
1846 * 0 If Xena is not quiescence
1847 */
1848
1ee6dd77 1849static int verify_xena_quiescence(struct s2io_nic *sp)
1da177e4 1850{
19a60522 1851 int mode;
1ee6dd77 1852 struct XENA_dev_config __iomem *bar0 = sp->bar0;
19a60522
SS
1853 u64 val64 = readq(&bar0->adapter_status);
1854 mode = s2io_verify_pci_mode(sp);
1da177e4 1855
19a60522
SS
1856 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
1857 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
1858 return 0;
1859 }
1860 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
1861 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
1862 return 0;
1863 }
1864 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
1865 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
1866 return 0;
1867 }
1868 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
1869 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
1870 return 0;
1871 }
1872 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
1873 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
1874 return 0;
1875 }
1876 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
1877 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
1878 return 0;
1879 }
1880 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
1881 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
1882 return 0;
1883 }
1884 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
1885 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
1886 return 0;
1da177e4
LT
1887 }
1888
19a60522
SS
1889 /*
1890 * In PCI 33 mode, the P_PLL is not used, and therefore,
1891 * the the P_PLL_LOCK bit in the adapter_status register will
1892 * not be asserted.
1893 */
1894 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
1895 sp->device_type == XFRAME_II_DEVICE && mode !=
1896 PCI_MODE_PCI_33) {
1897 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
1898 return 0;
1899 }
1900 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1901 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1902 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
1903 return 0;
1904 }
1905 return 1;
1da177e4
LT
1906}
1907
1908/**
1909 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1910 * @sp: Pointer to device specifc structure
20346722 1911 * Description :
1da177e4
LT
1912 * New procedure to clear mac address reading problems on Alpha platforms
1913 *
1914 */
1915
1ee6dd77 1916static void fix_mac_address(struct s2io_nic * sp)
1da177e4 1917{
1ee6dd77 1918 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
1919 u64 val64;
1920 int i = 0;
1921
1922 while (fix_mac[i] != END_SIGN) {
1923 writeq(fix_mac[i++], &bar0->gpio_control);
20346722 1924 udelay(10);
1da177e4
LT
1925 val64 = readq(&bar0->gpio_control);
1926 }
1927}
1928
1929/**
20346722 1930 * start_nic - Turns the device on
1da177e4 1931 * @nic : device private variable.
20346722 1932 * Description:
1933 * This function actually turns the device on. Before this function is
1934 * called,all Registers are configured from their reset states
1935 * and shared memory is allocated but the NIC is still quiescent. On
1da177e4
LT
1936 * calling this function, the device interrupts are cleared and the NIC is
1937 * literally switched on by writing into the adapter control register.
20346722 1938 * Return Value:
1da177e4
LT
1939 * SUCCESS on success and -1 on failure.
1940 */
1941
1942static int start_nic(struct s2io_nic *nic)
1943{
1ee6dd77 1944 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
1945 struct net_device *dev = nic->dev;
1946 register u64 val64 = 0;
20346722 1947 u16 subid, i;
1ee6dd77 1948 struct mac_info *mac_control;
1da177e4
LT
1949 struct config_param *config;
1950
1951 mac_control = &nic->mac_control;
1952 config = &nic->config;
1953
1954 /* PRC Initialization and configuration */
1955 for (i = 0; i < config->rx_ring_num; i++) {
20346722 1956 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1da177e4
LT
1957 &bar0->prc_rxd0_n[i]);
1958
1959 val64 = readq(&bar0->prc_ctrl_n[i]);
b6e3f982 1960 if (nic->config.bimodal)
1961 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
da6971d8
AR
1962 if (nic->rxd_mode == RXD_MODE_1)
1963 val64 |= PRC_CTRL_RC_ENABLED;
1964 else
1965 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
863c11a9
AR
1966 if (nic->device_type == XFRAME_II_DEVICE)
1967 val64 |= PRC_CTRL_GROUP_READS;
1968 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
1969 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
1da177e4
LT
1970 writeq(val64, &bar0->prc_ctrl_n[i]);
1971 }
1972
da6971d8
AR
1973 if (nic->rxd_mode == RXD_MODE_3B) {
1974 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1975 val64 = readq(&bar0->rx_pa_cfg);
1976 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1977 writeq(val64, &bar0->rx_pa_cfg);
1978 }
1da177e4 1979
926930b2
SS
1980 if (vlan_tag_strip == 0) {
1981 val64 = readq(&bar0->rx_pa_cfg);
1982 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
1983 writeq(val64, &bar0->rx_pa_cfg);
1984 vlan_strip_flag = 0;
1985 }
1986
20346722 1987 /*
1da177e4
LT
1988 * Enabling MC-RLDRAM. After enabling the device, we timeout
1989 * for around 100ms, which is approximately the time required
1990 * for the device to be ready for operation.
1991 */
1992 val64 = readq(&bar0->mc_rldram_mrs);
1993 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1994 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1995 val64 = readq(&bar0->mc_rldram_mrs);
1996
20346722 1997 msleep(100); /* Delay by around 100 ms. */
1da177e4
LT
1998
1999 /* Enabling ECC Protection. */
2000 val64 = readq(&bar0->adapter_control);
2001 val64 &= ~ADAPTER_ECC_EN;
2002 writeq(val64, &bar0->adapter_control);
2003
20346722 2004 /*
2005 * Clearing any possible Link state change interrupts that
1da177e4
LT
2006 * could have popped up just before Enabling the card.
2007 */
2008 val64 = readq(&bar0->mac_rmac_err_reg);
2009 if (val64)
2010 writeq(val64, &bar0->mac_rmac_err_reg);
2011
20346722 2012 /*
2013 * Verify if the device is ready to be enabled, if so enable
1da177e4
LT
2014 * it.
2015 */
2016 val64 = readq(&bar0->adapter_status);
19a60522 2017 if (!verify_xena_quiescence(nic)) {
1da177e4
LT
2018 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2019 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2020 (unsigned long long) val64);
2021 return FAILURE;
2022 }
2023
20346722 2024 /*
1da177e4 2025 * With some switches, link might be already up at this point.
20346722 2026 * Because of this weird behavior, when we enable laser,
2027 * we may not get link. We need to handle this. We cannot
2028 * figure out which switch is misbehaving. So we are forced to
2029 * make a global change.
1da177e4
LT
2030 */
2031
2032 /* Enabling Laser. */
2033 val64 = readq(&bar0->adapter_control);
2034 val64 |= ADAPTER_EOI_TX_ON;
2035 writeq(val64, &bar0->adapter_control);
2036
c92ca04b
AR
2037 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2038 /*
2039 * Dont see link state interrupts initally on some switches,
2040 * so directly scheduling the link state task here.
2041 */
2042 schedule_work(&nic->set_link_task);
2043 }
1da177e4
LT
2044 /* SXE-002: Initialize link and activity LED */
2045 subid = nic->pdev->subsystem_device;
541ae68f 2046 if (((subid & 0xFF) >= 0x07) &&
2047 (nic->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
2048 val64 = readq(&bar0->gpio_control);
2049 val64 |= 0x0000800000000000ULL;
2050 writeq(val64, &bar0->gpio_control);
2051 val64 = 0x0411040400000000ULL;
509a2671 2052 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
2053 }
2054
1da177e4
LT
2055 return SUCCESS;
2056}
fed5eccd
AR
2057/**
2058 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2059 */
1ee6dd77
RB
2060static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2061 TxD *txdlp, int get_off)
fed5eccd 2062{
1ee6dd77 2063 struct s2io_nic *nic = fifo_data->nic;
fed5eccd 2064 struct sk_buff *skb;
1ee6dd77 2065 struct TxD *txds;
fed5eccd
AR
2066 u16 j, frg_cnt;
2067
2068 txds = txdlp;
26b7625c 2069 if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
fed5eccd
AR
2070 pci_unmap_single(nic->pdev, (dma_addr_t)
2071 txds->Buffer_Pointer, sizeof(u64),
2072 PCI_DMA_TODEVICE);
2073 txds++;
2074 }
2075
2076 skb = (struct sk_buff *) ((unsigned long)
2077 txds->Host_Control);
2078 if (!skb) {
1ee6dd77 2079 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
fed5eccd
AR
2080 return NULL;
2081 }
2082 pci_unmap_single(nic->pdev, (dma_addr_t)
2083 txds->Buffer_Pointer,
2084 skb->len - skb->data_len,
2085 PCI_DMA_TODEVICE);
2086 frg_cnt = skb_shinfo(skb)->nr_frags;
2087 if (frg_cnt) {
2088 txds++;
2089 for (j = 0; j < frg_cnt; j++, txds++) {
2090 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2091 if (!txds->Buffer_Pointer)
2092 break;
6aa20a22 2093 pci_unmap_page(nic->pdev, (dma_addr_t)
fed5eccd
AR
2094 txds->Buffer_Pointer,
2095 frag->size, PCI_DMA_TODEVICE);
2096 }
2097 }
1ee6dd77 2098 memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
fed5eccd
AR
2099 return(skb);
2100}
1da177e4 2101
20346722 2102/**
2103 * free_tx_buffers - Free all queued Tx buffers
1da177e4 2104 * @nic : device private variable.
20346722 2105 * Description:
1da177e4 2106 * Free all queued Tx buffers.
20346722 2107 * Return Value: void
1da177e4
LT
2108*/
2109
2110static void free_tx_buffers(struct s2io_nic *nic)
2111{
2112 struct net_device *dev = nic->dev;
2113 struct sk_buff *skb;
1ee6dd77 2114 struct TxD *txdp;
1da177e4 2115 int i, j;
1ee6dd77 2116 struct mac_info *mac_control;
1da177e4 2117 struct config_param *config;
fed5eccd 2118 int cnt = 0;
1da177e4
LT
2119
2120 mac_control = &nic->mac_control;
2121 config = &nic->config;
2122
2123 for (i = 0; i < config->tx_fifo_num; i++) {
2124 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
1ee6dd77 2125 txdp = (struct TxD *) mac_control->fifos[i].list_info[j].
1da177e4 2126 list_virt_addr;
fed5eccd
AR
2127 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2128 if (skb) {
2129 dev_kfree_skb(skb);
2130 cnt++;
1da177e4 2131 }
1da177e4
LT
2132 }
2133 DBG_PRINT(INTR_DBG,
2134 "%s:forcibly freeing %d skbs on FIFO%d\n",
2135 dev->name, cnt, i);
20346722 2136 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2137 mac_control->fifos[i].tx_curr_put_info.offset = 0;
1da177e4
LT
2138 }
2139}
2140
20346722 2141/**
2142 * stop_nic - To stop the nic
1da177e4 2143 * @nic ; device private variable.
20346722 2144 * Description:
2145 * This function does exactly the opposite of what the start_nic()
1da177e4
LT
2146 * function does. This function is called to stop the device.
2147 * Return Value:
2148 * void.
2149 */
2150
2151static void stop_nic(struct s2io_nic *nic)
2152{
1ee6dd77 2153 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4 2154 register u64 val64 = 0;
5d3213cc 2155 u16 interruptible;
1ee6dd77 2156 struct mac_info *mac_control;
1da177e4
LT
2157 struct config_param *config;
2158
2159 mac_control = &nic->mac_control;
2160 config = &nic->config;
2161
2162 /* Disable all interrupts */
e960fc5c 2163 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
a371a07d 2164 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2165 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
1da177e4
LT
2166 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2167
5d3213cc
AR
2168 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2169 val64 = readq(&bar0->adapter_control);
2170 val64 &= ~(ADAPTER_CNTL_EN);
2171 writeq(val64, &bar0->adapter_control);
1da177e4
LT
2172}
2173
1ee6dd77
RB
2174static int fill_rxd_3buf(struct s2io_nic *nic, struct RxD_t *rxdp, struct \
2175 sk_buff *skb)
da6971d8
AR
2176{
2177 struct net_device *dev = nic->dev;
2178 struct sk_buff *frag_list;
50eb8006 2179 void *tmp;
da6971d8
AR
2180
2181 /* Buffer-1 receives L3/L4 headers */
1ee6dd77 2182 ((struct RxD3*)rxdp)->Buffer1_ptr = pci_map_single
da6971d8
AR
2183 (nic->pdev, skb->data, l3l4hdr_size + 4,
2184 PCI_DMA_FROMDEVICE);
2185
2186 /* skb_shinfo(skb)->frag_list will have L4 data payload */
2187 skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE);
2188 if (skb_shinfo(skb)->frag_list == NULL) {
0c61ed5f 2189 DBG_PRINT(INFO_DBG, "%s: dev_alloc_skb failed\n ", dev->name);
da6971d8
AR
2190 return -ENOMEM ;
2191 }
2192 frag_list = skb_shinfo(skb)->frag_list;
372cc597 2193 skb->truesize += frag_list->truesize;
da6971d8 2194 frag_list->next = NULL;
50eb8006
JG
2195 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2196 frag_list->data = tmp;
27a884dc 2197 skb_reset_tail_pointer(frag_list);
da6971d8
AR
2198
2199 /* Buffer-2 receives L4 data payload */
1ee6dd77 2200 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
da6971d8
AR
2201 frag_list->data, dev->mtu,
2202 PCI_DMA_FROMDEVICE);
2203 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
2204 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
2205
2206 return SUCCESS;
2207}
2208
20346722 2209/**
2210 * fill_rx_buffers - Allocates the Rx side skbs
1da177e4 2211 * @nic: device private variable
20346722 2212 * @ring_no: ring number
2213 * Description:
1da177e4
LT
2214 * The function allocates Rx side skbs and puts the physical
2215 * address of these buffers into the RxD buffer pointers, so that the NIC
2216 * can DMA the received frame into these locations.
2217 * The NIC supports 3 receive modes, viz
2218 * 1. single buffer,
2219 * 2. three buffer and
2220 * 3. Five buffer modes.
20346722 2221 * Each mode defines how many fragments the received frame will be split
2222 * up into by the NIC. The frame is split into L3 header, L4 Header,
1da177e4
LT
2223 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2224 * is split into 3 fragments. As of now only single buffer mode is
2225 * supported.
2226 * Return Value:
2227 * SUCCESS on success or an appropriate -ve value on failure.
2228 */
2229
ac1f60db 2230static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1da177e4
LT
2231{
2232 struct net_device *dev = nic->dev;
2233 struct sk_buff *skb;
1ee6dd77 2234 struct RxD_t *rxdp;
1da177e4 2235 int off, off1, size, block_no, block_no1;
1da177e4 2236 u32 alloc_tab = 0;
20346722 2237 u32 alloc_cnt;
1ee6dd77 2238 struct mac_info *mac_control;
1da177e4 2239 struct config_param *config;
20346722 2240 u64 tmp;
1ee6dd77 2241 struct buffAdd *ba;
1da177e4 2242 unsigned long flags;
1ee6dd77 2243 struct RxD_t *first_rxdp = NULL;
363dc367 2244 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
1da177e4
LT
2245
2246 mac_control = &nic->mac_control;
2247 config = &nic->config;
20346722 2248 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2249 atomic_read(&nic->rx_bufs_left[ring_no]);
1da177e4 2250
5d3213cc 2251 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
863c11a9 2252 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
1da177e4 2253 while (alloc_tab < alloc_cnt) {
20346722 2254 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1da177e4 2255 block_index;
20346722 2256 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
1da177e4 2257
da6971d8
AR
2258 rxdp = mac_control->rings[ring_no].
2259 rx_blocks[block_no].rxds[off].virt_addr;
2260
2261 if ((block_no == block_no1) && (off == off1) &&
2262 (rxdp->Host_Control)) {
2263 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2264 dev->name);
1da177e4
LT
2265 DBG_PRINT(INTR_DBG, " info equated\n");
2266 goto end;
2267 }
da6971d8 2268 if (off && (off == rxd_count[nic->rxd_mode])) {
20346722 2269 mac_control->rings[ring_no].rx_curr_put_info.
1da177e4 2270 block_index++;
da6971d8
AR
2271 if (mac_control->rings[ring_no].rx_curr_put_info.
2272 block_index == mac_control->rings[ring_no].
2273 block_count)
2274 mac_control->rings[ring_no].rx_curr_put_info.
2275 block_index = 0;
2276 block_no = mac_control->rings[ring_no].
2277 rx_curr_put_info.block_index;
2278 if (off == rxd_count[nic->rxd_mode])
2279 off = 0;
20346722 2280 mac_control->rings[ring_no].rx_curr_put_info.
da6971d8
AR
2281 offset = off;
2282 rxdp = mac_control->rings[ring_no].
2283 rx_blocks[block_no].block_virt_addr;
1da177e4
LT
2284 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2285 dev->name, rxdp);
2286 }
db874e65
SS
2287 if(!napi) {
2288 spin_lock_irqsave(&nic->put_lock, flags);
2289 mac_control->rings[ring_no].put_pos =
2290 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2291 spin_unlock_irqrestore(&nic->put_lock, flags);
2292 } else {
2293 mac_control->rings[ring_no].put_pos =
2294 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2295 }
da6971d8
AR
2296 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2297 ((nic->rxd_mode >= RXD_MODE_3A) &&
2298 (rxdp->Control_2 & BIT(0)))) {
20346722 2299 mac_control->rings[ring_no].rx_curr_put_info.
da6971d8 2300 offset = off;
1da177e4
LT
2301 goto end;
2302 }
da6971d8
AR
2303 /* calculate size of skb based on ring mode */
2304 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2305 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2306 if (nic->rxd_mode == RXD_MODE_1)
2307 size += NET_IP_ALIGN;
2308 else if (nic->rxd_mode == RXD_MODE_3B)
2309 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2310 else
2311 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
1da177e4 2312
da6971d8
AR
2313 /* allocate skb */
2314 skb = dev_alloc_skb(size);
2315 if(!skb) {
0c61ed5f
RV
2316 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
2317 DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
303bcb4b 2318 if (first_rxdp) {
2319 wmb();
2320 first_rxdp->Control_1 |= RXD_OWN_XENA;
2321 }
da6971d8
AR
2322 return -ENOMEM ;
2323 }
2324 if (nic->rxd_mode == RXD_MODE_1) {
2325 /* 1 buffer mode - normal operation mode */
1ee6dd77 2326 memset(rxdp, 0, sizeof(struct RxD1));
da6971d8 2327 skb_reserve(skb, NET_IP_ALIGN);
1ee6dd77 2328 ((struct RxD1*)rxdp)->Buffer0_ptr = pci_map_single
863c11a9
AR
2329 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2330 PCI_DMA_FROMDEVICE);
2331 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
da6971d8
AR
2332
2333 } else if (nic->rxd_mode >= RXD_MODE_3A) {
2334 /*
2335 * 2 or 3 buffer mode -
2336 * Both 2 buffer mode and 3 buffer mode provides 128
2337 * byte aligned receive buffers.
2338 *
2339 * 3 buffer mode provides header separation where in
2340 * skb->data will have L3/L4 headers where as
2341 * skb_shinfo(skb)->frag_list will have the L4 data
2342 * payload
2343 */
2344
363dc367
RV
2345 /* save the buffer pointers to avoid frequent dma mapping */
2346 Buffer0_ptr = ((struct RxD3*)rxdp)->Buffer0_ptr;
2347 Buffer1_ptr = ((struct RxD3*)rxdp)->Buffer1_ptr;
1ee6dd77 2348 memset(rxdp, 0, sizeof(struct RxD3));
363dc367
RV
2349 /* restore the buffer pointers for dma sync*/
2350 ((struct RxD3*)rxdp)->Buffer0_ptr = Buffer0_ptr;
2351 ((struct RxD3*)rxdp)->Buffer1_ptr = Buffer1_ptr;
2352
da6971d8
AR
2353 ba = &mac_control->rings[ring_no].ba[block_no][off];
2354 skb_reserve(skb, BUF0_LEN);
2355 tmp = (u64)(unsigned long) skb->data;
2356 tmp += ALIGN_SIZE;
2357 tmp &= ~ALIGN_SIZE;
2358 skb->data = (void *) (unsigned long)tmp;
27a884dc 2359 skb_reset_tail_pointer(skb);
da6971d8 2360
1ee6dd77
RB
2361 if (!(((struct RxD3*)rxdp)->Buffer0_ptr))
2362 ((struct RxD3*)rxdp)->Buffer0_ptr =
75c30b13 2363 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
da6971d8 2364 PCI_DMA_FROMDEVICE);
75c30b13
AR
2365 else
2366 pci_dma_sync_single_for_device(nic->pdev,
1ee6dd77 2367 (dma_addr_t) ((struct RxD3*)rxdp)->Buffer0_ptr,
75c30b13 2368 BUF0_LEN, PCI_DMA_FROMDEVICE);
da6971d8
AR
2369 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2370 if (nic->rxd_mode == RXD_MODE_3B) {
2371 /* Two buffer mode */
2372
2373 /*
6aa20a22 2374 * Buffer2 will have L3/L4 header plus
da6971d8
AR
2375 * L4 payload
2376 */
1ee6dd77 2377 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single
da6971d8
AR
2378 (nic->pdev, skb->data, dev->mtu + 4,
2379 PCI_DMA_FROMDEVICE);
2380
75c30b13 2381 /* Buffer-1 will be dummy buffer. Not used */
1ee6dd77
RB
2382 if (!(((struct RxD3*)rxdp)->Buffer1_ptr)) {
2383 ((struct RxD3*)rxdp)->Buffer1_ptr =
6aa20a22 2384 pci_map_single(nic->pdev,
75c30b13
AR
2385 ba->ba_1, BUF1_LEN,
2386 PCI_DMA_FROMDEVICE);
2387 }
da6971d8
AR
2388 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2389 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2390 (dev->mtu + 4);
2391 } else {
2392 /* 3 buffer mode */
2393 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2394 dev_kfree_skb_irq(skb);
2395 if (first_rxdp) {
2396 wmb();
2397 first_rxdp->Control_1 |=
2398 RXD_OWN_XENA;
2399 }
2400 return -ENOMEM ;
2401 }
2402 }
2403 rxdp->Control_2 |= BIT(0);
1da177e4 2404 }
1da177e4 2405 rxdp->Host_Control = (unsigned long) (skb);
303bcb4b 2406 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2407 rxdp->Control_1 |= RXD_OWN_XENA;
1da177e4 2408 off++;
da6971d8
AR
2409 if (off == (rxd_count[nic->rxd_mode] + 1))
2410 off = 0;
20346722 2411 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
20346722 2412
da6971d8 2413 rxdp->Control_2 |= SET_RXD_MARKER;
303bcb4b 2414 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2415 if (first_rxdp) {
2416 wmb();
2417 first_rxdp->Control_1 |= RXD_OWN_XENA;
2418 }
2419 first_rxdp = rxdp;
2420 }
1da177e4
LT
2421 atomic_inc(&nic->rx_bufs_left[ring_no]);
2422 alloc_tab++;
2423 }
2424
2425 end:
303bcb4b 2426 /* Transfer ownership of first descriptor to adapter just before
2427 * exiting. Before that, use memory barrier so that ownership
2428 * and other fields are seen by adapter correctly.
2429 */
2430 if (first_rxdp) {
2431 wmb();
2432 first_rxdp->Control_1 |= RXD_OWN_XENA;
2433 }
2434
1da177e4
LT
2435 return SUCCESS;
2436}
2437
da6971d8
AR
2438static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2439{
2440 struct net_device *dev = sp->dev;
2441 int j;
2442 struct sk_buff *skb;
1ee6dd77
RB
2443 struct RxD_t *rxdp;
2444 struct mac_info *mac_control;
2445 struct buffAdd *ba;
da6971d8
AR
2446
2447 mac_control = &sp->mac_control;
2448 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2449 rxdp = mac_control->rings[ring_no].
2450 rx_blocks[blk].rxds[j].virt_addr;
2451 skb = (struct sk_buff *)
2452 ((unsigned long) rxdp->Host_Control);
2453 if (!skb) {
2454 continue;
2455 }
2456 if (sp->rxd_mode == RXD_MODE_1) {
2457 pci_unmap_single(sp->pdev, (dma_addr_t)
1ee6dd77 2458 ((struct RxD1*)rxdp)->Buffer0_ptr,
da6971d8
AR
2459 dev->mtu +
2460 HEADER_ETHERNET_II_802_3_SIZE
2461 + HEADER_802_2_SIZE +
2462 HEADER_SNAP_SIZE,
2463 PCI_DMA_FROMDEVICE);
1ee6dd77 2464 memset(rxdp, 0, sizeof(struct RxD1));
da6971d8
AR
2465 } else if(sp->rxd_mode == RXD_MODE_3B) {
2466 ba = &mac_control->rings[ring_no].
2467 ba[blk][j];
2468 pci_unmap_single(sp->pdev, (dma_addr_t)
1ee6dd77 2469 ((struct RxD3*)rxdp)->Buffer0_ptr,
da6971d8
AR
2470 BUF0_LEN,
2471 PCI_DMA_FROMDEVICE);
2472 pci_unmap_single(sp->pdev, (dma_addr_t)
1ee6dd77 2473 ((struct RxD3*)rxdp)->Buffer1_ptr,
da6971d8
AR
2474 BUF1_LEN,
2475 PCI_DMA_FROMDEVICE);
2476 pci_unmap_single(sp->pdev, (dma_addr_t)
1ee6dd77 2477 ((struct RxD3*)rxdp)->Buffer2_ptr,
da6971d8
AR
2478 dev->mtu + 4,
2479 PCI_DMA_FROMDEVICE);
1ee6dd77 2480 memset(rxdp, 0, sizeof(struct RxD3));
da6971d8
AR
2481 } else {
2482 pci_unmap_single(sp->pdev, (dma_addr_t)
1ee6dd77 2483 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
da6971d8
AR
2484 PCI_DMA_FROMDEVICE);
2485 pci_unmap_single(sp->pdev, (dma_addr_t)
1ee6dd77 2486 ((struct RxD3*)rxdp)->Buffer1_ptr,
da6971d8
AR
2487 l3l4hdr_size + 4,
2488 PCI_DMA_FROMDEVICE);
2489 pci_unmap_single(sp->pdev, (dma_addr_t)
1ee6dd77 2490 ((struct RxD3*)rxdp)->Buffer2_ptr, dev->mtu,
da6971d8 2491 PCI_DMA_FROMDEVICE);
1ee6dd77 2492 memset(rxdp, 0, sizeof(struct RxD3));
da6971d8
AR
2493 }
2494 dev_kfree_skb(skb);
2495 atomic_dec(&sp->rx_bufs_left[ring_no]);
2496 }
2497}
2498
1da177e4 2499/**
20346722 2500 * free_rx_buffers - Frees all Rx buffers
1da177e4 2501 * @sp: device private variable.
20346722 2502 * Description:
1da177e4
LT
2503 * This function will free all Rx buffers allocated by host.
2504 * Return Value:
2505 * NONE.
2506 */
2507
2508static void free_rx_buffers(struct s2io_nic *sp)
2509{
2510 struct net_device *dev = sp->dev;
da6971d8 2511 int i, blk = 0, buf_cnt = 0;
1ee6dd77 2512 struct mac_info *mac_control;
1da177e4 2513 struct config_param *config;
1da177e4
LT
2514
2515 mac_control = &sp->mac_control;
2516 config = &sp->config;
2517
2518 for (i = 0; i < config->rx_ring_num; i++) {
da6971d8
AR
2519 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2520 free_rxd_blk(sp,i,blk);
1da177e4 2521
20346722 2522 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2523 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2524 mac_control->rings[i].rx_curr_put_info.offset = 0;
2525 mac_control->rings[i].rx_curr_get_info.offset = 0;
1da177e4
LT
2526 atomic_set(&sp->rx_bufs_left[i], 0);
2527 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2528 dev->name, buf_cnt, i);
2529 }
2530}
2531
2532/**
2533 * s2io_poll - Rx interrupt handler for NAPI support
2534 * @dev : pointer to the device structure.
20346722 2535 * @budget : The number of packets that were budgeted to be processed
1da177e4
LT
2536 * during one pass through the 'Poll" function.
2537 * Description:
2538 * Comes into picture only if NAPI support has been incorporated. It does
2539 * the same thing that rx_intr_handler does, but not in a interrupt context
2540 * also It will process only a given number of packets.
2541 * Return value:
2542 * 0 on success and 1 if there are No Rx packets to be processed.
2543 */
2544
1da177e4
LT
2545static int s2io_poll(struct net_device *dev, int *budget)
2546{
1ee6dd77 2547 struct s2io_nic *nic = dev->priv;
20346722 2548 int pkt_cnt = 0, org_pkts_to_process;
1ee6dd77 2549 struct mac_info *mac_control;
1da177e4 2550 struct config_param *config;
1ee6dd77 2551 struct XENA_dev_config __iomem *bar0 = nic->bar0;
20346722 2552 int i;
1da177e4 2553
7ba013ac 2554 atomic_inc(&nic->isr_cnt);
1da177e4
LT
2555 mac_control = &nic->mac_control;
2556 config = &nic->config;
2557
20346722 2558 nic->pkts_to_process = *budget;
2559 if (nic->pkts_to_process > dev->quota)
2560 nic->pkts_to_process = dev->quota;
2561 org_pkts_to_process = nic->pkts_to_process;
1da177e4 2562
19a60522
SS
2563 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2564 readl(&bar0->rx_traffic_int);
1da177e4
LT
2565
2566 for (i = 0; i < config->rx_ring_num; i++) {
20346722 2567 rx_intr_handler(&mac_control->rings[i]);
2568 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2569 if (!nic->pkts_to_process) {
2570 /* Quota for the current iteration has been met */
2571 goto no_rx;
1da177e4 2572 }
1da177e4
LT
2573 }
2574 if (!pkt_cnt)
2575 pkt_cnt = 1;
2576
2577 dev->quota -= pkt_cnt;
2578 *budget -= pkt_cnt;
2579 netif_rx_complete(dev);
2580
2581 for (i = 0; i < config->rx_ring_num; i++) {
2582 if (fill_rx_buffers(nic, i) == -ENOMEM) {
0c61ed5f
RV
2583 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2584 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
1da177e4
LT
2585 break;
2586 }
2587 }
2588 /* Re enable the Rx interrupts. */
c92ca04b 2589 writeq(0x0, &bar0->rx_traffic_mask);
19a60522 2590 readl(&bar0->rx_traffic_mask);
7ba013ac 2591 atomic_dec(&nic->isr_cnt);
1da177e4
LT
2592 return 0;
2593
20346722 2594no_rx:
1da177e4
LT
2595 dev->quota -= pkt_cnt;
2596 *budget -= pkt_cnt;
2597
2598 for (i = 0; i < config->rx_ring_num; i++) {
2599 if (fill_rx_buffers(nic, i) == -ENOMEM) {
0c61ed5f
RV
2600 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2601 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
1da177e4
LT
2602 break;
2603 }
2604 }
7ba013ac 2605 atomic_dec(&nic->isr_cnt);
1da177e4
LT
2606 return 1;
2607}
20346722 2608
b41477f3 2609#ifdef CONFIG_NET_POLL_CONTROLLER
612eff0e 2610/**
b41477f3 2611 * s2io_netpoll - netpoll event handler entry point
612eff0e
BH
2612 * @dev : pointer to the device structure.
2613 * Description:
b41477f3
AR
2614 * This function will be called by upper layer to check for events on the
2615 * interface in situations where interrupts are disabled. It is used for
2616 * specific in-kernel networking tasks, such as remote consoles and kernel
2617 * debugging over the network (example netdump in RedHat).
612eff0e 2618 */
612eff0e
BH
2619static void s2io_netpoll(struct net_device *dev)
2620{
1ee6dd77
RB
2621 struct s2io_nic *nic = dev->priv;
2622 struct mac_info *mac_control;
612eff0e 2623 struct config_param *config;
1ee6dd77 2624 struct XENA_dev_config __iomem *bar0 = nic->bar0;
b41477f3 2625 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
612eff0e
BH
2626 int i;
2627
2628 disable_irq(dev->irq);
2629
2630 atomic_inc(&nic->isr_cnt);
2631 mac_control = &nic->mac_control;
2632 config = &nic->config;
2633
612eff0e 2634 writeq(val64, &bar0->rx_traffic_int);
b41477f3
AR
2635 writeq(val64, &bar0->tx_traffic_int);
2636
6aa20a22 2637 /* we need to free up the transmitted skbufs or else netpoll will
b41477f3
AR
2638 * run out of skbs and will fail and eventually netpoll application such
2639 * as netdump will fail.
2640 */
2641 for (i = 0; i < config->tx_fifo_num; i++)
2642 tx_intr_handler(&mac_control->fifos[i]);
612eff0e 2643
b41477f3 2644 /* check for received packet and indicate up to network */
612eff0e
BH
2645 for (i = 0; i < config->rx_ring_num; i++)
2646 rx_intr_handler(&mac_control->rings[i]);
2647
2648 for (i = 0; i < config->rx_ring_num; i++) {
2649 if (fill_rx_buffers(nic, i) == -ENOMEM) {
0c61ed5f
RV
2650 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2651 DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
612eff0e
BH
2652 break;
2653 }
2654 }
2655 atomic_dec(&nic->isr_cnt);
2656 enable_irq(dev->irq);
2657 return;
2658}
2659#endif
2660
20346722 2661/**
1da177e4
LT
2662 * rx_intr_handler - Rx interrupt handler
2663 * @nic: device private variable.
20346722 2664 * Description:
2665 * If the interrupt is because of a received frame or if the
1da177e4 2666 * receive ring contains fresh as yet un-processed frames,this function is
20346722 2667 * called. It picks out the RxD at which place the last Rx processing had
2668 * stopped and sends the skb to the OSM's Rx handler and then increments
1da177e4
LT
2669 * the offset.
2670 * Return Value:
2671 * NONE.
2672 */
1ee6dd77 2673static void rx_intr_handler(struct ring_info *ring_data)
1da177e4 2674{
1ee6dd77 2675 struct s2io_nic *nic = ring_data->nic;
1da177e4 2676 struct net_device *dev = (struct net_device *) nic->dev;
da6971d8 2677 int get_block, put_block, put_offset;
1ee6dd77
RB
2678 struct rx_curr_get_info get_info, put_info;
2679 struct RxD_t *rxdp;
1da177e4 2680 struct sk_buff *skb;
20346722 2681 int pkt_cnt = 0;
7d3d0439
RA
2682 int i;
2683
7ba013ac 2684 spin_lock(&nic->rx_lock);
2685 if (atomic_read(&nic->card_state) == CARD_DOWN) {
776bd20f 2686 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
7ba013ac 2687 __FUNCTION__, dev->name);
2688 spin_unlock(&nic->rx_lock);
776bd20f 2689 return;
7ba013ac 2690 }
2691
20346722 2692 get_info = ring_data->rx_curr_get_info;
2693 get_block = get_info.block_index;
1ee6dd77 2694 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
20346722 2695 put_block = put_info.block_index;
da6971d8 2696 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
db874e65
SS
2697 if (!napi) {
2698 spin_lock(&nic->put_lock);
2699 put_offset = ring_data->put_pos;
2700 spin_unlock(&nic->put_lock);
2701 } else
2702 put_offset = ring_data->put_pos;
2703
da6971d8 2704 while (RXD_IS_UP2DT(rxdp)) {
db874e65
SS
2705 /*
2706 * If your are next to put index then it's
2707 * FIFO full condition
2708 */
da6971d8
AR
2709 if ((get_block == put_block) &&
2710 (get_info.offset + 1) == put_info.offset) {
75c30b13 2711 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
da6971d8
AR
2712 break;
2713 }
20346722 2714 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2715 if (skb == NULL) {
2716 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2717 dev->name);
2718 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
7ba013ac 2719 spin_unlock(&nic->rx_lock);
20346722 2720 return;
1da177e4 2721 }
da6971d8
AR
2722 if (nic->rxd_mode == RXD_MODE_1) {
2723 pci_unmap_single(nic->pdev, (dma_addr_t)
1ee6dd77 2724 ((struct RxD1*)rxdp)->Buffer0_ptr,
20346722 2725 dev->mtu +
2726 HEADER_ETHERNET_II_802_3_SIZE +
2727 HEADER_802_2_SIZE +
2728 HEADER_SNAP_SIZE,
2729 PCI_DMA_FROMDEVICE);
da6971d8 2730 } else if (nic->rxd_mode == RXD_MODE_3B) {
75c30b13 2731 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
1ee6dd77 2732 ((struct RxD3*)rxdp)->Buffer0_ptr,
20346722 2733 BUF0_LEN, PCI_DMA_FROMDEVICE);
da6971d8 2734 pci_unmap_single(nic->pdev, (dma_addr_t)
1ee6dd77 2735 ((struct RxD3*)rxdp)->Buffer2_ptr,
da6971d8 2736 dev->mtu + 4,
20346722 2737 PCI_DMA_FROMDEVICE);
da6971d8 2738 } else {
75c30b13 2739 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
1ee6dd77 2740 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
da6971d8
AR
2741 PCI_DMA_FROMDEVICE);
2742 pci_unmap_single(nic->pdev, (dma_addr_t)
1ee6dd77 2743 ((struct RxD3*)rxdp)->Buffer1_ptr,
da6971d8
AR
2744 l3l4hdr_size + 4,
2745 PCI_DMA_FROMDEVICE);
2746 pci_unmap_single(nic->pdev, (dma_addr_t)
1ee6dd77 2747 ((struct RxD3*)rxdp)->Buffer2_ptr,
da6971d8
AR
2748 dev->mtu, PCI_DMA_FROMDEVICE);
2749 }
863c11a9 2750 prefetch(skb->data);
20346722 2751 rx_osm_handler(ring_data, rxdp);
2752 get_info.offset++;
da6971d8
AR
2753 ring_data->rx_curr_get_info.offset = get_info.offset;
2754 rxdp = ring_data->rx_blocks[get_block].
2755 rxds[get_info.offset].virt_addr;
2756 if (get_info.offset == rxd_count[nic->rxd_mode]) {
20346722 2757 get_info.offset = 0;
da6971d8 2758 ring_data->rx_curr_get_info.offset = get_info.offset;
20346722 2759 get_block++;
da6971d8
AR
2760 if (get_block == ring_data->block_count)
2761 get_block = 0;
2762 ring_data->rx_curr_get_info.block_index = get_block;
20346722 2763 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2764 }
1da177e4 2765
20346722 2766 nic->pkts_to_process -= 1;
db874e65 2767 if ((napi) && (!nic->pkts_to_process))
20346722 2768 break;
20346722 2769 pkt_cnt++;
1da177e4
LT
2770 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2771 break;
2772 }
7d3d0439
RA
2773 if (nic->lro) {
2774 /* Clear all LRO sessions before exiting */
2775 for (i=0; i<MAX_LRO_SESSIONS; i++) {
1ee6dd77 2776 struct lro *lro = &nic->lro0_n[i];
7d3d0439
RA
2777 if (lro->in_use) {
2778 update_L3L4_header(nic, lro);
2779 queue_rx_frame(lro->parent);
2780 clear_lro_session(lro);
2781 }
2782 }
2783 }
2784
7ba013ac 2785 spin_unlock(&nic->rx_lock);
1da177e4 2786}
20346722 2787
2788/**
1da177e4
LT
2789 * tx_intr_handler - Transmit interrupt handler
2790 * @nic : device private variable
20346722 2791 * Description:
2792 * If an interrupt was raised to indicate DMA complete of the
2793 * Tx packet, this function is called. It identifies the last TxD
2794 * whose buffer was freed and frees all skbs whose data have already
1da177e4
LT
2795 * DMA'ed into the NICs internal memory.
2796 * Return Value:
2797 * NONE
2798 */
2799
1ee6dd77 2800static void tx_intr_handler(struct fifo_info *fifo_data)
1da177e4 2801{
1ee6dd77 2802 struct s2io_nic *nic = fifo_data->nic;
1da177e4 2803 struct net_device *dev = (struct net_device *) nic->dev;
1ee6dd77 2804 struct tx_curr_get_info get_info, put_info;
1da177e4 2805 struct sk_buff *skb;
1ee6dd77 2806 struct TxD *txdlp;
1da177e4 2807
20346722 2808 get_info = fifo_data->tx_curr_get_info;
1ee6dd77
RB
2809 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2810 txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
20346722 2811 list_virt_addr;
2812 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2813 (get_info.offset != put_info.offset) &&
2814 (txdlp->Host_Control)) {
2815 /* Check for TxD errors */
2816 if (txdlp->Control_1 & TXD_T_CODE) {
2817 unsigned long long err;
2818 err = txdlp->Control_1 & TXD_T_CODE;
bd1034f0
AR
2819 if (err & 0x1) {
2820 nic->mac_control.stats_info->sw_stat.
2821 parity_err_cnt++;
2822 }
776bd20f 2823 if ((err >> 48) == 0xA) {
2824 DBG_PRINT(TX_DBG, "TxD returned due \
19a60522 2825 to loss of link\n");
776bd20f 2826 }
2827 else {
19a60522 2828 DBG_PRINT(ERR_DBG, "***TxD error %llx\n", err);
776bd20f 2829 }
20346722 2830 }
1da177e4 2831
fed5eccd 2832 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
20346722 2833 if (skb == NULL) {
2834 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2835 __FUNCTION__);
2836 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2837 return;
2838 }
2839
20346722 2840 /* Updating the statistics block */
20346722 2841 nic->stats.tx_bytes += skb->len;
2842 dev_kfree_skb_irq(skb);
2843
2844 get_info.offset++;
863c11a9
AR
2845 if (get_info.offset == get_info.fifo_len + 1)
2846 get_info.offset = 0;
1ee6dd77 2847 txdlp = (struct TxD *) fifo_data->list_info
20346722 2848 [get_info.offset].list_virt_addr;
2849 fifo_data->tx_curr_get_info.offset =
2850 get_info.offset;
1da177e4
LT
2851 }
2852
2853 spin_lock(&nic->tx_lock);
2854 if (netif_queue_stopped(dev))
2855 netif_wake_queue(dev);
2856 spin_unlock(&nic->tx_lock);
2857}
2858
bd1034f0
AR
2859/**
2860 * s2io_mdio_write - Function to write in to MDIO registers
2861 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2862 * @addr : address value
2863 * @value : data value
2864 * @dev : pointer to net_device structure
2865 * Description:
2866 * This function is used to write values to the MDIO registers
2867 * NONE
2868 */
2869static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
2870{
2871 u64 val64 = 0x0;
1ee6dd77
RB
2872 struct s2io_nic *sp = dev->priv;
2873 struct XENA_dev_config __iomem *bar0 = sp->bar0;
bd1034f0
AR
2874
2875 //address transaction
2876 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2877 | MDIO_MMD_DEV_ADDR(mmd_type)
2878 | MDIO_MMS_PRT_ADDR(0x0);
2879 writeq(val64, &bar0->mdio_control);
2880 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2881 writeq(val64, &bar0->mdio_control);
2882 udelay(100);
2883
2884 //Data transaction
2885 val64 = 0x0;
2886 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2887 | MDIO_MMD_DEV_ADDR(mmd_type)
2888 | MDIO_MMS_PRT_ADDR(0x0)
2889 | MDIO_MDIO_DATA(value)
2890 | MDIO_OP(MDIO_OP_WRITE_TRANS);
2891 writeq(val64, &bar0->mdio_control);
2892 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2893 writeq(val64, &bar0->mdio_control);
2894 udelay(100);
2895
2896 val64 = 0x0;
2897 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2898 | MDIO_MMD_DEV_ADDR(mmd_type)
2899 | MDIO_MMS_PRT_ADDR(0x0)
2900 | MDIO_OP(MDIO_OP_READ_TRANS);
2901 writeq(val64, &bar0->mdio_control);
2902 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2903 writeq(val64, &bar0->mdio_control);
2904 udelay(100);
2905
2906}
2907
2908/**
2909 * s2io_mdio_read - Function to write in to MDIO registers
2910 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2911 * @addr : address value
2912 * @dev : pointer to net_device structure
2913 * Description:
2914 * This function is used to read values to the MDIO registers
2915 * NONE
2916 */
2917static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
2918{
2919 u64 val64 = 0x0;
2920 u64 rval64 = 0x0;
1ee6dd77
RB
2921 struct s2io_nic *sp = dev->priv;
2922 struct XENA_dev_config __iomem *bar0 = sp->bar0;
bd1034f0
AR
2923
2924 /* address transaction */
2925 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2926 | MDIO_MMD_DEV_ADDR(mmd_type)
2927 | MDIO_MMS_PRT_ADDR(0x0);
2928 writeq(val64, &bar0->mdio_control);
2929 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2930 writeq(val64, &bar0->mdio_control);
2931 udelay(100);
2932
2933 /* Data transaction */
2934 val64 = 0x0;
2935 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2936 | MDIO_MMD_DEV_ADDR(mmd_type)
2937 | MDIO_MMS_PRT_ADDR(0x0)
2938 | MDIO_OP(MDIO_OP_READ_TRANS);
2939 writeq(val64, &bar0->mdio_control);
2940 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2941 writeq(val64, &bar0->mdio_control);
2942 udelay(100);
2943
2944 /* Read the value from regs */
2945 rval64 = readq(&bar0->mdio_control);
2946 rval64 = rval64 & 0xFFFF0000;
2947 rval64 = rval64 >> 16;
2948 return rval64;
2949}
2950/**
2951 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
2952 * @counter : couter value to be updated
2953 * @flag : flag to indicate the status
2954 * @type : counter type
2955 * Description:
2956 * This function is to check the status of the xpak counters value
2957 * NONE
2958 */
2959
2960static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
2961{
2962 u64 mask = 0x3;
2963 u64 val64;
2964 int i;
2965 for(i = 0; i <index; i++)
2966 mask = mask << 0x2;
2967
2968 if(flag > 0)
2969 {
2970 *counter = *counter + 1;
2971 val64 = *regs_stat & mask;
2972 val64 = val64 >> (index * 0x2);
2973 val64 = val64 + 1;
2974 if(val64 == 3)
2975 {
2976 switch(type)
2977 {
2978 case 1:
2979 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2980 "service. Excessive temperatures may "
2981 "result in premature transceiver "
2982 "failure \n");
2983 break;
2984 case 2:
2985 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2986 "service Excessive bias currents may "
2987 "indicate imminent laser diode "
2988 "failure \n");
2989 break;
2990 case 3:
2991 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2992 "service Excessive laser output "
2993 "power may saturate far-end "
2994 "receiver\n");
2995 break;
2996 default:
2997 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
2998 "type \n");
2999 }
3000 val64 = 0x0;
3001 }
3002 val64 = val64 << (index * 0x2);
3003 *regs_stat = (*regs_stat & (~mask)) | (val64);
3004
3005 } else {
3006 *regs_stat = *regs_stat & (~mask);
3007 }
3008}
3009
3010/**
3011 * s2io_updt_xpak_counter - Function to update the xpak counters
3012 * @dev : pointer to net_device struct
3013 * Description:
3014 * This function is to upate the status of the xpak counters value
3015 * NONE
3016 */
3017static void s2io_updt_xpak_counter(struct net_device *dev)
3018{
3019 u16 flag = 0x0;
3020 u16 type = 0x0;
3021 u16 val16 = 0x0;
3022 u64 val64 = 0x0;
3023 u64 addr = 0x0;
3024
1ee6dd77
RB
3025 struct s2io_nic *sp = dev->priv;
3026 struct stat_block *stat_info = sp->mac_control.stats_info;
bd1034f0
AR
3027
3028 /* Check the communication with the MDIO slave */
3029 addr = 0x0000;
3030 val64 = 0x0;
3031 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3032 if((val64 == 0xFFFF) || (val64 == 0x0000))
3033 {
3034 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3035 "Returned %llx\n", (unsigned long long)val64);
3036 return;
3037 }
3038
3039 /* Check for the expecte value of 2040 at PMA address 0x0000 */
3040 if(val64 != 0x2040)
3041 {
3042 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3043 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3044 (unsigned long long)val64);
3045 return;
3046 }
3047
3048 /* Loading the DOM register to MDIO register */
3049 addr = 0xA100;
3050 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3051 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3052
3053 /* Reading the Alarm flags */
3054 addr = 0xA070;
3055 val64 = 0x0;
3056 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3057
3058 flag = CHECKBIT(val64, 0x7);
3059 type = 1;
3060 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3061 &stat_info->xpak_stat.xpak_regs_stat,
3062 0x0, flag, type);
3063
3064 if(CHECKBIT(val64, 0x6))
3065 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3066
3067 flag = CHECKBIT(val64, 0x3);
3068 type = 2;
3069 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3070 &stat_info->xpak_stat.xpak_regs_stat,
3071 0x2, flag, type);
3072
3073 if(CHECKBIT(val64, 0x2))
3074 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3075
3076 flag = CHECKBIT(val64, 0x1);
3077 type = 3;
3078 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3079 &stat_info->xpak_stat.xpak_regs_stat,
3080 0x4, flag, type);
3081
3082 if(CHECKBIT(val64, 0x0))
3083 stat_info->xpak_stat.alarm_laser_output_power_low++;
3084
3085 /* Reading the Warning flags */
3086 addr = 0xA074;
3087 val64 = 0x0;
3088 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3089
3090 if(CHECKBIT(val64, 0x7))
3091 stat_info->xpak_stat.warn_transceiver_temp_high++;
3092
3093 if(CHECKBIT(val64, 0x6))
3094 stat_info->xpak_stat.warn_transceiver_temp_low++;
3095
3096 if(CHECKBIT(val64, 0x3))
3097 stat_info->xpak_stat.warn_laser_bias_current_high++;
3098
3099 if(CHECKBIT(val64, 0x2))
3100 stat_info->xpak_stat.warn_laser_bias_current_low++;
3101
3102 if(CHECKBIT(val64, 0x1))
3103 stat_info->xpak_stat.warn_laser_output_power_high++;
3104
3105 if(CHECKBIT(val64, 0x0))
3106 stat_info->xpak_stat.warn_laser_output_power_low++;
3107}
3108
20346722 3109/**
1da177e4
LT
3110 * alarm_intr_handler - Alarm Interrrupt handler
3111 * @nic: device private variable
20346722 3112 * Description: If the interrupt was neither because of Rx packet or Tx
1da177e4 3113 * complete, this function is called. If the interrupt was to indicate
20346722 3114 * a loss of link, the OSM link status handler is invoked for any other
3115 * alarm interrupt the block that raised the interrupt is displayed
1da177e4
LT
3116 * and a H/W reset is issued.
3117 * Return Value:
3118 * NONE
3119*/
3120
3121static void alarm_intr_handler(struct s2io_nic *nic)
3122{
3123 struct net_device *dev = (struct net_device *) nic->dev;
1ee6dd77 3124 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4 3125 register u64 val64 = 0, err_reg = 0;
bd1034f0
AR
3126 u64 cnt;
3127 int i;
372cc597
SS
3128 if (atomic_read(&nic->card_state) == CARD_DOWN)
3129 return;
bd1034f0
AR
3130 nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
3131 /* Handling the XPAK counters update */
3132 if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
3133 /* waiting for an hour */
3134 nic->mac_control.stats_info->xpak_stat.xpak_timer_count++;
3135 } else {
3136 s2io_updt_xpak_counter(dev);
3137 /* reset the count to zero */
3138 nic->mac_control.stats_info->xpak_stat.xpak_timer_count = 0;
3139 }
1da177e4
LT
3140
3141 /* Handling link status change error Intr */
a371a07d 3142 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
3143 err_reg = readq(&bar0->mac_rmac_err_reg);
3144 writeq(err_reg, &bar0->mac_rmac_err_reg);
3145 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
3146 schedule_work(&nic->set_link_task);
3147 }
1da177e4
LT
3148 }
3149
5e25b9dd 3150 /* Handling Ecc errors */
3151 val64 = readq(&bar0->mc_err_reg);
3152 writeq(val64, &bar0->mc_err_reg);
3153 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
3154 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
7ba013ac 3155 nic->mac_control.stats_info->sw_stat.
3156 double_ecc_errs++;
776bd20f 3157 DBG_PRINT(INIT_DBG, "%s: Device indicates ",
5e25b9dd 3158 dev->name);
776bd20f 3159 DBG_PRINT(INIT_DBG, "double ECC error!!\n");
e960fc5c 3160 if (nic->device_type != XFRAME_II_DEVICE) {
776bd20f 3161 /* Reset XframeI only if critical error */
3162 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
3163 MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
3164 netif_stop_queue(dev);
3165 schedule_work(&nic->rst_timer_task);
bd1034f0
AR
3166 nic->mac_control.stats_info->sw_stat.
3167 soft_reset_cnt++;
776bd20f 3168 }
e960fc5c 3169 }
5e25b9dd 3170 } else {
7ba013ac 3171 nic->mac_control.stats_info->sw_stat.
3172 single_ecc_errs++;
5e25b9dd 3173 }
3174 }
3175
1da177e4
LT
3176 /* In case of a serious error, the device will be Reset. */
3177 val64 = readq(&bar0->serr_source);
3178 if (val64 & SERR_SOURCE_ANY) {
bd1034f0 3179 nic->mac_control.stats_info->sw_stat.serious_err_cnt++;
1da177e4 3180 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
6aa20a22 3181 DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
776bd20f 3182 (unsigned long long)val64);
1da177e4
LT
3183 netif_stop_queue(dev);
3184 schedule_work(&nic->rst_timer_task);
bd1034f0 3185 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
1da177e4
LT
3186 }
3187
3188 /*
3189 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
3190 * Error occurs, the adapter will be recycled by disabling the
20346722 3191 * adapter enable bit and enabling it again after the device
1da177e4
LT
3192 * becomes Quiescent.
3193 */
3194 val64 = readq(&bar0->pcc_err_reg);
3195 writeq(val64, &bar0->pcc_err_reg);
3196 if (val64 & PCC_FB_ECC_DB_ERR) {
3197 u64 ac = readq(&bar0->adapter_control);
3198 ac &= ~(ADAPTER_CNTL_EN);
3199 writeq(ac, &bar0->adapter_control);
3200 ac = readq(&bar0->adapter_control);
3201 schedule_work(&nic->set_link_task);
3202 }
bd1034f0
AR
3203 /* Check for data parity error */
3204 val64 = readq(&bar0->pic_int_status);
3205 if (val64 & PIC_INT_GPIO) {
3206 val64 = readq(&bar0->gpio_int_reg);
3207 if (val64 & GPIO_INT_REG_DP_ERR_INT) {
3208 nic->mac_control.stats_info->sw_stat.parity_err_cnt++;
3209 schedule_work(&nic->rst_timer_task);
3210 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3211 }
3212 }
3213
3214 /* Check for ring full counter */
3215 if (nic->device_type & XFRAME_II_DEVICE) {
3216 val64 = readq(&bar0->ring_bump_counter1);
3217 for (i=0; i<4; i++) {
3218 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3219 cnt >>= 64 - ((i+1)*16);
3220 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3221 += cnt;
3222 }
3223
3224 val64 = readq(&bar0->ring_bump_counter2);
3225 for (i=0; i<4; i++) {
3226 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3227 cnt >>= 64 - ((i+1)*16);
3228 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3229 += cnt;
3230 }
3231 }
1da177e4
LT
3232
3233 /* Other type of interrupts are not being handled now, TODO */
3234}
3235
20346722 3236/**
1da177e4 3237 * wait_for_cmd_complete - waits for a command to complete.
20346722 3238 * @sp : private member of the device structure, which is a pointer to the
1da177e4 3239 * s2io_nic structure.
20346722 3240 * Description: Function that waits for a command to Write into RMAC
3241 * ADDR DATA registers to be completed and returns either success or
3242 * error depending on whether the command was complete or not.
1da177e4
LT
3243 * Return value:
3244 * SUCCESS on success and FAILURE on failure.
3245 */
3246
9fc93a41
SS
3247static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3248 int bit_state)
1da177e4 3249{
9fc93a41 3250 int ret = FAILURE, cnt = 0, delay = 1;
1da177e4
LT
3251 u64 val64;
3252
9fc93a41
SS
3253 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3254 return FAILURE;
3255
3256 do {
c92ca04b 3257 val64 = readq(addr);
9fc93a41
SS
3258 if (bit_state == S2IO_BIT_RESET) {
3259 if (!(val64 & busy_bit)) {
3260 ret = SUCCESS;
3261 break;
3262 }
3263 } else {
3264 if (!(val64 & busy_bit)) {
3265 ret = SUCCESS;
3266 break;
3267 }
1da177e4 3268 }
c92ca04b
AR
3269
3270 if(in_interrupt())
9fc93a41 3271 mdelay(delay);
c92ca04b 3272 else
9fc93a41 3273 msleep(delay);
c92ca04b 3274
9fc93a41
SS
3275 if (++cnt >= 10)
3276 delay = 50;
3277 } while (cnt < 20);
1da177e4
LT
3278 return ret;
3279}
19a60522
SS
3280/*
3281 * check_pci_device_id - Checks if the device id is supported
3282 * @id : device id
3283 * Description: Function to check if the pci device id is supported by driver.
3284 * Return value: Actual device id if supported else PCI_ANY_ID
3285 */
3286static u16 check_pci_device_id(u16 id)
3287{
3288 switch (id) {
3289 case PCI_DEVICE_ID_HERC_WIN:
3290 case PCI_DEVICE_ID_HERC_UNI:
3291 return XFRAME_II_DEVICE;
3292 case PCI_DEVICE_ID_S2IO_UNI:
3293 case PCI_DEVICE_ID_S2IO_WIN:
3294 return XFRAME_I_DEVICE;
3295 default:
3296 return PCI_ANY_ID;
3297 }
3298}
1da177e4 3299
20346722 3300/**
3301 * s2io_reset - Resets the card.
1da177e4
LT
3302 * @sp : private member of the device structure.
3303 * Description: Function to Reset the card. This function then also
20346722 3304 * restores the previously saved PCI configuration space registers as
1da177e4
LT
3305 * the card reset also resets the configuration space.
3306 * Return value:
3307 * void.
3308 */
3309
1ee6dd77 3310static void s2io_reset(struct s2io_nic * sp)
1da177e4 3311{
1ee6dd77 3312 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 3313 u64 val64;
5e25b9dd 3314 u16 subid, pci_cmd;
19a60522
SS
3315 int i;
3316 u16 val16;
363dc367 3317 unsigned long long reset_cnt = 0;
19a60522
SS
3318 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3319 __FUNCTION__, sp->dev->name);
1da177e4 3320
0b1f7ebe 3321 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
e960fc5c 3322 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
0b1f7ebe 3323
19a60522
SS
3324 if (sp->device_type == XFRAME_II_DEVICE) {
3325 int ret;
3326 ret = pci_set_power_state(sp->pdev, 3);
3327 if (!ret)
3328 ret = pci_set_power_state(sp->pdev, 0);
3329 else {
3330 DBG_PRINT(ERR_DBG,"%s PME based SW_Reset failed!\n",
3331 __FUNCTION__);
3332 goto old_way;
3333 }
3334 msleep(20);
3335 goto new_way;
3336 }
3337old_way:
1da177e4
LT
3338 val64 = SW_RESET_ALL;
3339 writeq(val64, &bar0->sw_reset);
19a60522 3340new_way:
c92ca04b
AR
3341 if (strstr(sp->product_name, "CX4")) {
3342 msleep(750);
3343 }
19a60522
SS
3344 msleep(250);
3345 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
1da177e4 3346
19a60522
SS
3347 /* Restore the PCI state saved during initialization. */
3348 pci_restore_state(sp->pdev);
3349 pci_read_config_word(sp->pdev, 0x2, &val16);
3350 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3351 break;
3352 msleep(200);
3353 }
1da177e4 3354
19a60522
SS
3355 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3356 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3357 }
3358
3359 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3360
3361 s2io_init_pci(sp);
1da177e4 3362
20346722 3363 /* Set swapper to enable I/O register access */
3364 s2io_set_swapper(sp);
3365
cc6e7c44
RA
3366 /* Restore the MSIX table entries from local variables */
3367 restore_xmsi_data(sp);
3368
5e25b9dd 3369 /* Clear certain PCI/PCI-X fields after reset */
303bcb4b 3370 if (sp->device_type == XFRAME_II_DEVICE) {
b41477f3 3371 /* Clear "detected parity error" bit */
303bcb4b 3372 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
5e25b9dd 3373
303bcb4b 3374 /* Clearing PCIX Ecc status register */
3375 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
5e25b9dd 3376
303bcb4b 3377 /* Clearing PCI_STATUS error reflected here */
3378 writeq(BIT(62), &bar0->txpic_int_reg);
3379 }
5e25b9dd 3380
20346722 3381 /* Reset device statistics maintained by OS */
3382 memset(&sp->stats, 0, sizeof (struct net_device_stats));
363dc367
RV
3383 /* save reset count */
3384 reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3385 memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3386 /* restore reset count */
3387 sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
20346722 3388
1da177e4
LT
3389 /* SXE-002: Configure link and activity LED to turn it off */
3390 subid = sp->pdev->subsystem_device;
541ae68f 3391 if (((subid & 0xFF) >= 0x07) &&
3392 (sp->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
3393 val64 = readq(&bar0->gpio_control);
3394 val64 |= 0x0000800000000000ULL;
3395 writeq(val64, &bar0->gpio_control);
3396 val64 = 0x0411040400000000ULL;
509a2671 3397 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
3398 }
3399
541ae68f 3400 /*
3401 * Clear spurious ECC interrupts that would have occured on
3402 * XFRAME II cards after reset.
3403 */
3404 if (sp->device_type == XFRAME_II_DEVICE) {
3405 val64 = readq(&bar0->pcc_err_reg);
3406 writeq(val64, &bar0->pcc_err_reg);
3407 }
3408
d8d70caf
SS
3409 /* restore the previously assigned mac address */
3410 s2io_set_mac_addr(sp->dev, (u8 *)&sp->def_mac_addr[0].mac_addr);
3411
1da177e4
LT
3412 sp->device_enabled_once = FALSE;
3413}
3414
3415/**
20346722 3416 * s2io_set_swapper - to set the swapper controle on the card
3417 * @sp : private member of the device structure,
1da177e4 3418 * pointer to the s2io_nic structure.
20346722 3419 * Description: Function to set the swapper control on the card
1da177e4
LT
3420 * correctly depending on the 'endianness' of the system.
3421 * Return value:
3422 * SUCCESS on success and FAILURE on failure.
3423 */
3424
1ee6dd77 3425static int s2io_set_swapper(struct s2io_nic * sp)
1da177e4
LT
3426{
3427 struct net_device *dev = sp->dev;
1ee6dd77 3428 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
3429 u64 val64, valt, valr;
3430
20346722 3431 /*
1da177e4
LT
3432 * Set proper endian settings and verify the same by reading
3433 * the PIF Feed-back register.
3434 */
3435
3436 val64 = readq(&bar0->pif_rd_swapper_fb);
3437 if (val64 != 0x0123456789ABCDEFULL) {
3438 int i = 0;
3439 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3440 0x8100008181000081ULL, /* FE=1, SE=0 */
3441 0x4200004242000042ULL, /* FE=0, SE=1 */
3442 0}; /* FE=0, SE=0 */
3443
3444 while(i<4) {
3445 writeq(value[i], &bar0->swapper_ctrl);
3446 val64 = readq(&bar0->pif_rd_swapper_fb);
3447 if (val64 == 0x0123456789ABCDEFULL)
3448 break;
3449 i++;
3450 }
3451 if (i == 4) {
3452 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3453 dev->name);
3454 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3455 (unsigned long long) val64);
3456 return FAILURE;
3457 }
3458 valr = value[i];
3459 } else {
3460 valr = readq(&bar0->swapper_ctrl);
3461 }
3462
3463 valt = 0x0123456789ABCDEFULL;
3464 writeq(valt, &bar0->xmsi_address);
3465 val64 = readq(&bar0->xmsi_address);
3466
3467 if(val64 != valt) {
3468 int i = 0;
3469 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3470 0x0081810000818100ULL, /* FE=1, SE=0 */
3471 0x0042420000424200ULL, /* FE=0, SE=1 */
3472 0}; /* FE=0, SE=0 */
3473
3474 while(i<4) {
3475 writeq((value[i] | valr), &bar0->swapper_ctrl);
3476 writeq(valt, &bar0->xmsi_address);
3477 val64 = readq(&bar0->xmsi_address);
3478 if(val64 == valt)
3479 break;
3480 i++;
3481 }
3482 if(i == 4) {
20346722 3483 unsigned long long x = val64;
1da177e4 3484 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
20346722 3485 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
1da177e4
LT
3486 return FAILURE;
3487 }
3488 }
3489 val64 = readq(&bar0->swapper_ctrl);
3490 val64 &= 0xFFFF000000000000ULL;
3491
3492#ifdef __BIG_ENDIAN
20346722 3493 /*
3494 * The device by default set to a big endian format, so a
1da177e4
LT
3495 * big endian driver need not set anything.
3496 */
3497 val64 |= (SWAPPER_CTRL_TXP_FE |
3498 SWAPPER_CTRL_TXP_SE |
3499 SWAPPER_CTRL_TXD_R_FE |
3500 SWAPPER_CTRL_TXD_W_FE |
3501 SWAPPER_CTRL_TXF_R_FE |
3502 SWAPPER_CTRL_RXD_R_FE |
3503 SWAPPER_CTRL_RXD_W_FE |
3504 SWAPPER_CTRL_RXF_W_FE |
3505 SWAPPER_CTRL_XMSI_FE |
1da177e4 3506 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
92383340 3507 if (sp->intr_type == INTA)
cc6e7c44 3508 val64 |= SWAPPER_CTRL_XMSI_SE;
1da177e4
LT
3509 writeq(val64, &bar0->swapper_ctrl);
3510#else
20346722 3511 /*
1da177e4 3512 * Initially we enable all bits to make it accessible by the
20346722 3513 * driver, then we selectively enable only those bits that
1da177e4
LT
3514 * we want to set.
3515 */
3516 val64 |= (SWAPPER_CTRL_TXP_FE |
3517 SWAPPER_CTRL_TXP_SE |
3518 SWAPPER_CTRL_TXD_R_FE |
3519 SWAPPER_CTRL_TXD_R_SE |
3520 SWAPPER_CTRL_TXD_W_FE |
3521 SWAPPER_CTRL_TXD_W_SE |
3522 SWAPPER_CTRL_TXF_R_FE |
3523 SWAPPER_CTRL_RXD_R_FE |
3524 SWAPPER_CTRL_RXD_R_SE |
3525 SWAPPER_CTRL_RXD_W_FE |
3526 SWAPPER_CTRL_RXD_W_SE |
3527 SWAPPER_CTRL_RXF_W_FE |
3528 SWAPPER_CTRL_XMSI_FE |
1da177e4 3529 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
cc6e7c44
RA
3530 if (sp->intr_type == INTA)
3531 val64 |= SWAPPER_CTRL_XMSI_SE;
1da177e4
LT
3532 writeq(val64, &bar0->swapper_ctrl);
3533#endif
3534 val64 = readq(&bar0->swapper_ctrl);
3535
20346722 3536 /*
3537 * Verifying if endian settings are accurate by reading a
1da177e4
LT
3538 * feedback register.
3539 */
3540 val64 = readq(&bar0->pif_rd_swapper_fb);
3541 if (val64 != 0x0123456789ABCDEFULL) {
3542 /* Endian settings are incorrect, calls for another dekko. */
3543 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3544 dev->name);
3545 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3546 (unsigned long long) val64);
3547 return FAILURE;
3548 }
3549
3550 return SUCCESS;
3551}
3552
1ee6dd77 3553static int wait_for_msix_trans(struct s2io_nic *nic, int i)
cc6e7c44 3554{
1ee6dd77 3555 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3556 u64 val64;
3557 int ret = 0, cnt = 0;
3558
3559 do {
3560 val64 = readq(&bar0->xmsi_access);
3561 if (!(val64 & BIT(15)))
3562 break;
3563 mdelay(1);
3564 cnt++;
3565 } while(cnt < 5);
3566 if (cnt == 5) {
3567 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3568 ret = 1;
3569 }
3570
3571 return ret;
3572}
3573
1ee6dd77 3574static void restore_xmsi_data(struct s2io_nic *nic)
cc6e7c44 3575{
1ee6dd77 3576 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3577 u64 val64;
3578 int i;
3579
75c30b13 3580 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
cc6e7c44
RA
3581 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3582 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3583 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3584 writeq(val64, &bar0->xmsi_access);
3585 if (wait_for_msix_trans(nic, i)) {
3586 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3587 continue;
3588 }
3589 }
3590}
3591
1ee6dd77 3592static void store_xmsi_data(struct s2io_nic *nic)
cc6e7c44 3593{
1ee6dd77 3594 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3595 u64 val64, addr, data;
3596 int i;
3597
3598 /* Store and display */
75c30b13 3599 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
cc6e7c44
RA
3600 val64 = (BIT(15) | vBIT(i, 26, 6));
3601 writeq(val64, &bar0->xmsi_access);
3602 if (wait_for_msix_trans(nic, i)) {
3603 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3604 continue;
3605 }
3606 addr = readq(&bar0->xmsi_address);
3607 data = readq(&bar0->xmsi_data);
3608 if (addr && data) {
3609 nic->msix_info[i].addr = addr;
3610 nic->msix_info[i].data = data;
3611 }
3612 }
3613}
3614
1ee6dd77 3615int s2io_enable_msi(struct s2io_nic *nic)
cc6e7c44 3616{
1ee6dd77 3617 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3618 u16 msi_ctrl, msg_val;
3619 struct config_param *config = &nic->config;
3620 struct net_device *dev = nic->dev;
3621 u64 val64, tx_mat, rx_mat;
3622 int i, err;
3623
3624 val64 = readq(&bar0->pic_control);
3625 val64 &= ~BIT(1);
3626 writeq(val64, &bar0->pic_control);
3627
3628 err = pci_enable_msi(nic->pdev);
3629 if (err) {
3630 DBG_PRINT(ERR_DBG, "%s: enabling MSI failed\n",
3631 nic->dev->name);
3632 return err;
3633 }
3634
3635 /*
3636 * Enable MSI and use MSI-1 in stead of the standard MSI-0
3637 * for interrupt handling.
3638 */
3639 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3640 msg_val ^= 0x1;
3641 pci_write_config_word(nic->pdev, 0x4c, msg_val);
3642 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3643
3644 pci_read_config_word(nic->pdev, 0x42, &msi_ctrl);
3645 msi_ctrl |= 0x10;
3646 pci_write_config_word(nic->pdev, 0x42, msi_ctrl);
3647
3648 /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */
3649 tx_mat = readq(&bar0->tx_mat0_n[0]);
3650 for (i=0; i<config->tx_fifo_num; i++) {
3651 tx_mat |= TX_MAT_SET(i, 1);
3652 }
3653 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3654
3655 rx_mat = readq(&bar0->rx_mat);
3656 for (i=0; i<config->rx_ring_num; i++) {
3657 rx_mat |= RX_MAT_SET(i, 1);
3658 }
3659 writeq(rx_mat, &bar0->rx_mat);
3660
3661 dev->irq = nic->pdev->irq;
3662 return 0;
3663}
3664
1ee6dd77 3665static int s2io_enable_msi_x(struct s2io_nic *nic)
cc6e7c44 3666{
1ee6dd77 3667 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3668 u64 tx_mat, rx_mat;
3669 u16 msi_control; /* Temp variable */
3670 int ret, i, j, msix_indx = 1;
3671
3672 nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3673 GFP_KERNEL);
3674 if (nic->entries == NULL) {
0c61ed5f 3675 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
cc6e7c44
RA
3676 return -ENOMEM;
3677 }
3678 memset(nic->entries, 0, MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3679
3680 nic->s2io_entries =
3681 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3682 GFP_KERNEL);
3683 if (nic->s2io_entries == NULL) {
0c61ed5f 3684 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
cc6e7c44
RA
3685 kfree(nic->entries);
3686 return -ENOMEM;
3687 }
3688 memset(nic->s2io_entries, 0,
3689 MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3690
3691 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3692 nic->entries[i].entry = i;
3693 nic->s2io_entries[i].entry = i;
3694 nic->s2io_entries[i].arg = NULL;
3695 nic->s2io_entries[i].in_use = 0;
3696 }
3697
3698 tx_mat = readq(&bar0->tx_mat0_n[0]);
3699 for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3700 tx_mat |= TX_MAT_SET(i, msix_indx);
3701 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3702 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3703 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3704 }
3705 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3706
3707 if (!nic->config.bimodal) {
3708 rx_mat = readq(&bar0->rx_mat);
3709 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3710 rx_mat |= RX_MAT_SET(j, msix_indx);
3711 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3712 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3713 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3714 }
3715 writeq(rx_mat, &bar0->rx_mat);
3716 } else {
3717 tx_mat = readq(&bar0->tx_mat0_n[7]);
3718 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3719 tx_mat |= TX_MAT_SET(i, msix_indx);
3720 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3721 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3722 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3723 }
3724 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3725 }
3726
c92ca04b 3727 nic->avail_msix_vectors = 0;
cc6e7c44 3728 ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
c92ca04b
AR
3729 /* We fail init if error or we get less vectors than min required */
3730 if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3731 nic->avail_msix_vectors = ret;
3732 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3733 }
cc6e7c44
RA
3734 if (ret) {
3735 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3736 kfree(nic->entries);
3737 kfree(nic->s2io_entries);
3738 nic->entries = NULL;
3739 nic->s2io_entries = NULL;
c92ca04b 3740 nic->avail_msix_vectors = 0;
cc6e7c44
RA
3741 return -ENOMEM;
3742 }
c92ca04b
AR
3743 if (!nic->avail_msix_vectors)
3744 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
cc6e7c44
RA
3745
3746 /*
3747 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3748 * in the herc NIC. (Temp change, needs to be removed later)
3749 */
3750 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3751 msi_control |= 0x1; /* Enable MSI */
3752 pci_write_config_word(nic->pdev, 0x42, msi_control);
3753
3754 return 0;
3755}
3756
1da177e4
LT
3757/* ********************************************************* *
3758 * Functions defined below concern the OS part of the driver *
3759 * ********************************************************* */
3760
20346722 3761/**
1da177e4
LT
3762 * s2io_open - open entry point of the driver
3763 * @dev : pointer to the device structure.
3764 * Description:
3765 * This function is the open entry point of the driver. It mainly calls a
3766 * function to allocate Rx buffers and inserts them into the buffer
20346722 3767 * descriptors and then enables the Rx part of the NIC.
1da177e4
LT
3768 * Return value:
3769 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3770 * file on failure.
3771 */
3772
ac1f60db 3773static int s2io_open(struct net_device *dev)
1da177e4 3774{
1ee6dd77 3775 struct s2io_nic *sp = dev->priv;
1da177e4
LT
3776 int err = 0;
3777
20346722 3778 /*
3779 * Make sure you have link off by default every time
1da177e4
LT
3780 * Nic is initialized
3781 */
3782 netif_carrier_off(dev);
0b1f7ebe 3783 sp->last_link_state = 0;
1da177e4
LT
3784
3785 /* Initialize H/W and enable interrupts */
c92ca04b
AR
3786 err = s2io_card_up(sp);
3787 if (err) {
1da177e4
LT
3788 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3789 dev->name);
e6a8fee2 3790 goto hw_init_failed;
1da177e4
LT
3791 }
3792
3793 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3794 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
e6a8fee2 3795 s2io_card_down(sp);
20346722 3796 err = -ENODEV;
e6a8fee2 3797 goto hw_init_failed;
1da177e4
LT
3798 }
3799
3800 netif_start_queue(dev);
3801 return 0;
20346722 3802
20346722 3803hw_init_failed:
cc6e7c44
RA
3804 if (sp->intr_type == MSI_X) {
3805 if (sp->entries)
3806 kfree(sp->entries);
3807 if (sp->s2io_entries)
3808 kfree(sp->s2io_entries);
3809 }
20346722 3810 return err;
1da177e4
LT
3811}
3812
3813/**
3814 * s2io_close -close entry point of the driver
3815 * @dev : device pointer.
3816 * Description:
3817 * This is the stop entry point of the driver. It needs to undo exactly
3818 * whatever was done by the open entry point,thus it's usually referred to
3819 * as the close function.Among other things this function mainly stops the
3820 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3821 * Return value:
3822 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3823 * file on failure.
3824 */
3825
ac1f60db 3826static int s2io_close(struct net_device *dev)
1da177e4 3827{
1ee6dd77 3828 struct s2io_nic *sp = dev->priv;
cc6e7c44 3829
1da177e4
LT
3830 netif_stop_queue(dev);
3831 /* Reset card, kill tasklet and free Tx and Rx buffers. */
e6a8fee2 3832 s2io_card_down(sp);
cc6e7c44 3833
1da177e4
LT
3834 sp->device_close_flag = TRUE; /* Device is shut down. */
3835 return 0;
3836}
3837
3838/**
3839 * s2io_xmit - Tx entry point of te driver
3840 * @skb : the socket buffer containing the Tx data.
3841 * @dev : device pointer.
3842 * Description :
3843 * This function is the Tx entry point of the driver. S2IO NIC supports
3844 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3845 * NOTE: when device cant queue the pkt,just the trans_start variable will
3846 * not be upadted.
3847 * Return value:
3848 * 0 on success & 1 on failure.
3849 */
3850
ac1f60db 3851static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4 3852{
1ee6dd77 3853 struct s2io_nic *sp = dev->priv;
1da177e4
LT
3854 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3855 register u64 val64;
1ee6dd77
RB
3856 struct TxD *txdp;
3857 struct TxFIFO_element __iomem *tx_fifo;
1da177e4 3858 unsigned long flags;
be3a6b02 3859 u16 vlan_tag = 0;
3860 int vlan_priority = 0;
1ee6dd77 3861 struct mac_info *mac_control;
1da177e4 3862 struct config_param *config;
75c30b13 3863 int offload_type;
1da177e4
LT
3864
3865 mac_control = &sp->mac_control;
3866 config = &sp->config;
3867
20346722 3868 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
1da177e4 3869 spin_lock_irqsave(&sp->tx_lock, flags);
1da177e4 3870 if (atomic_read(&sp->card_state) == CARD_DOWN) {
20346722 3871 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
1da177e4
LT
3872 dev->name);
3873 spin_unlock_irqrestore(&sp->tx_lock, flags);
20346722 3874 dev_kfree_skb(skb);
3875 return 0;
1da177e4
LT
3876 }
3877
3878 queue = 0;
1da177e4 3879
be3a6b02 3880 /* Get Fifo number to Transmit based on vlan priority */
3881 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3882 vlan_tag = vlan_tx_tag_get(skb);
3883 vlan_priority = vlan_tag >> 13;
3884 queue = config->fifo_mapping[vlan_priority];
3885 }
3886
20346722 3887 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3888 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
1ee6dd77 3889 txdp = (struct TxD *) mac_control->fifos[queue].list_info[put_off].
20346722 3890 list_virt_addr;
3891
3892 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
1da177e4 3893 /* Avoid "put" pointer going beyond "get" pointer */
863c11a9
AR
3894 if (txdp->Host_Control ||
3895 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
776bd20f 3896 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
1da177e4
LT
3897 netif_stop_queue(dev);
3898 dev_kfree_skb(skb);
3899 spin_unlock_irqrestore(&sp->tx_lock, flags);
3900 return 0;
3901 }
0b1f7ebe 3902
3903 /* A buffer with no data will be dropped */
3904 if (!skb->len) {
3905 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3906 dev_kfree_skb(skb);
3907 spin_unlock_irqrestore(&sp->tx_lock, flags);
3908 return 0;
3909 }
3910
75c30b13 3911 offload_type = s2io_offload_type(skb);
75c30b13 3912 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1da177e4 3913 txdp->Control_1 |= TXD_TCP_LSO_EN;
75c30b13 3914 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
1da177e4 3915 }
84fa7933 3916 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1da177e4
LT
3917 txdp->Control_2 |=
3918 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3919 TXD_TX_CKO_UDP_EN);
3920 }
fed5eccd
AR
3921 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
3922 txdp->Control_1 |= TXD_LIST_OWN_XENA;
1da177e4 3923 txdp->Control_2 |= config->tx_intr_type;
d8892c6e 3924
be3a6b02 3925 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3926 txdp->Control_2 |= TXD_VLAN_ENABLE;
3927 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3928 }
3929
fed5eccd 3930 frg_len = skb->len - skb->data_len;
75c30b13 3931 if (offload_type == SKB_GSO_UDP) {
fed5eccd
AR
3932 int ufo_size;
3933
75c30b13 3934 ufo_size = s2io_udp_mss(skb);
fed5eccd
AR
3935 ufo_size &= ~7;
3936 txdp->Control_1 |= TXD_UFO_EN;
3937 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
3938 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
3939#ifdef __BIG_ENDIAN
3940 sp->ufo_in_band_v[put_off] =
3941 (u64)skb_shinfo(skb)->ip6_frag_id;
3942#else
3943 sp->ufo_in_band_v[put_off] =
3944 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
3945#endif
3946 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
3947 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
3948 sp->ufo_in_band_v,
3949 sizeof(u64), PCI_DMA_TODEVICE);
3950 txdp++;
fed5eccd 3951 }
1da177e4 3952
fed5eccd
AR
3953 txdp->Buffer_Pointer = pci_map_single
3954 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3955 txdp->Host_Control = (unsigned long) skb;
3956 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
75c30b13 3957 if (offload_type == SKB_GSO_UDP)
fed5eccd
AR
3958 txdp->Control_1 |= TXD_UFO_EN;
3959
3960 frg_cnt = skb_shinfo(skb)->nr_frags;
1da177e4
LT
3961 /* For fragmented SKB. */
3962 for (i = 0; i < frg_cnt; i++) {
3963 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
0b1f7ebe 3964 /* A '0' length fragment will be ignored */
3965 if (!frag->size)
3966 continue;
1da177e4
LT
3967 txdp++;
3968 txdp->Buffer_Pointer = (u64) pci_map_page
3969 (sp->pdev, frag->page, frag->page_offset,
3970 frag->size, PCI_DMA_TODEVICE);
efd51b5c 3971 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
75c30b13 3972 if (offload_type == SKB_GSO_UDP)
fed5eccd 3973 txdp->Control_1 |= TXD_UFO_EN;
1da177e4
LT
3974 }
3975 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3976
75c30b13 3977 if (offload_type == SKB_GSO_UDP)
fed5eccd
AR
3978 frg_cnt++; /* as Txd0 was used for inband header */
3979
1da177e4 3980 tx_fifo = mac_control->tx_FIFO_start[queue];
20346722 3981 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
1da177e4
LT
3982 writeq(val64, &tx_fifo->TxDL_Pointer);
3983
3984 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
3985 TX_FIFO_LAST_LIST);
75c30b13 3986 if (offload_type)
fed5eccd 3987 val64 |= TX_FIFO_SPECIAL_FUNC;
75c30b13 3988
1da177e4
LT
3989 writeq(val64, &tx_fifo->List_Control);
3990
303bcb4b 3991 mmiowb();
3992
1da177e4 3993 put_off++;
863c11a9
AR
3994 if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
3995 put_off = 0;
20346722 3996 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
1da177e4
LT
3997
3998 /* Avoid "put" pointer going beyond "get" pointer */
863c11a9 3999 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
bd1034f0 4000 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
1da177e4
LT
4001 DBG_PRINT(TX_DBG,
4002 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4003 put_off, get_off);
4004 netif_stop_queue(dev);
4005 }
4006
4007 dev->trans_start = jiffies;
4008 spin_unlock_irqrestore(&sp->tx_lock, flags);
4009
4010 return 0;
4011}
4012
25fff88e 4013static void
4014s2io_alarm_handle(unsigned long data)
4015{
1ee6dd77 4016 struct s2io_nic *sp = (struct s2io_nic *)data;
25fff88e 4017
4018 alarm_intr_handler(sp);
4019 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4020}
4021
1ee6dd77 4022static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
75c30b13
AR
4023{
4024 int rxb_size, level;
4025
4026 if (!sp->lro) {
4027 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4028 level = rx_buffer_level(sp, rxb_size, rng_n);
4029
4030 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4031 int ret;
4032 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4033 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4034 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
0c61ed5f 4035 DBG_PRINT(INFO_DBG, "Out of memory in %s",
75c30b13
AR
4036 __FUNCTION__);
4037 clear_bit(0, (&sp->tasklet_status));
4038 return -1;
4039 }
4040 clear_bit(0, (&sp->tasklet_status));
4041 } else if (level == LOW)
4042 tasklet_schedule(&sp->task);
4043
4044 } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
0c61ed5f
RV
4045 DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name);
4046 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
75c30b13
AR
4047 }
4048 return 0;
4049}
4050
7d12e780 4051static irqreturn_t s2io_msi_handle(int irq, void *dev_id)
cc6e7c44
RA
4052{
4053 struct net_device *dev = (struct net_device *) dev_id;
1ee6dd77 4054 struct s2io_nic *sp = dev->priv;
cc6e7c44 4055 int i;
1ee6dd77 4056 struct mac_info *mac_control;
cc6e7c44
RA
4057 struct config_param *config;
4058
4059 atomic_inc(&sp->isr_cnt);
4060 mac_control = &sp->mac_control;
4061 config = &sp->config;
4062 DBG_PRINT(INTR_DBG, "%s: MSI handler\n", __FUNCTION__);
4063
4064 /* If Intr is because of Rx Traffic */
4065 for (i = 0; i < config->rx_ring_num; i++)
4066 rx_intr_handler(&mac_control->rings[i]);
4067
4068 /* If Intr is because of Tx Traffic */
4069 for (i = 0; i < config->tx_fifo_num; i++)
4070 tx_intr_handler(&mac_control->fifos[i]);
4071
4072 /*
4073 * If the Rx buffer count is below the panic threshold then
4074 * reallocate the buffers from the interrupt handler itself,
4075 * else schedule a tasklet to reallocate the buffers.
4076 */
75c30b13
AR
4077 for (i = 0; i < config->rx_ring_num; i++)
4078 s2io_chk_rx_buffers(sp, i);
cc6e7c44
RA
4079
4080 atomic_dec(&sp->isr_cnt);
4081 return IRQ_HANDLED;
4082}
4083
7d12e780 4084static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
cc6e7c44 4085{
1ee6dd77
RB
4086 struct ring_info *ring = (struct ring_info *)dev_id;
4087 struct s2io_nic *sp = ring->nic;
cc6e7c44
RA
4088
4089 atomic_inc(&sp->isr_cnt);
cc6e7c44 4090
75c30b13
AR
4091 rx_intr_handler(ring);
4092 s2io_chk_rx_buffers(sp, ring->ring_no);
7d3d0439 4093
cc6e7c44 4094 atomic_dec(&sp->isr_cnt);
cc6e7c44
RA
4095 return IRQ_HANDLED;
4096}
4097
7d12e780 4098static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
cc6e7c44 4099{
1ee6dd77
RB
4100 struct fifo_info *fifo = (struct fifo_info *)dev_id;
4101 struct s2io_nic *sp = fifo->nic;
cc6e7c44
RA
4102
4103 atomic_inc(&sp->isr_cnt);
4104 tx_intr_handler(fifo);
4105 atomic_dec(&sp->isr_cnt);
4106 return IRQ_HANDLED;
4107}
1ee6dd77 4108static void s2io_txpic_intr_handle(struct s2io_nic *sp)
a371a07d 4109{
1ee6dd77 4110 struct XENA_dev_config __iomem *bar0 = sp->bar0;
a371a07d 4111 u64 val64;
4112
4113 val64 = readq(&bar0->pic_int_status);
4114 if (val64 & PIC_INT_GPIO) {
4115 val64 = readq(&bar0->gpio_int_reg);
4116 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4117 (val64 & GPIO_INT_REG_LINK_UP)) {
c92ca04b
AR
4118 /*
4119 * This is unstable state so clear both up/down
4120 * interrupt and adapter to re-evaluate the link state.
4121 */
a371a07d 4122 val64 |= GPIO_INT_REG_LINK_DOWN;
4123 val64 |= GPIO_INT_REG_LINK_UP;
4124 writeq(val64, &bar0->gpio_int_reg);
a371a07d 4125 val64 = readq(&bar0->gpio_int_mask);
c92ca04b
AR
4126 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4127 GPIO_INT_MASK_LINK_DOWN);
a371a07d 4128 writeq(val64, &bar0->gpio_int_mask);
a371a07d 4129 }
c92ca04b
AR
4130 else if (val64 & GPIO_INT_REG_LINK_UP) {
4131 val64 = readq(&bar0->adapter_status);
c92ca04b 4132 /* Enable Adapter */
19a60522
SS
4133 val64 = readq(&bar0->adapter_control);
4134 val64 |= ADAPTER_CNTL_EN;
4135 writeq(val64, &bar0->adapter_control);
4136 val64 |= ADAPTER_LED_ON;
4137 writeq(val64, &bar0->adapter_control);
4138 if (!sp->device_enabled_once)
4139 sp->device_enabled_once = 1;
c92ca04b 4140
19a60522
SS
4141 s2io_link(sp, LINK_UP);
4142 /*
4143 * unmask link down interrupt and mask link-up
4144 * intr
4145 */
4146 val64 = readq(&bar0->gpio_int_mask);
4147 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4148 val64 |= GPIO_INT_MASK_LINK_UP;
4149 writeq(val64, &bar0->gpio_int_mask);
c92ca04b 4150
c92ca04b
AR
4151 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4152 val64 = readq(&bar0->adapter_status);
19a60522
SS
4153 s2io_link(sp, LINK_DOWN);
4154 /* Link is down so unmaks link up interrupt */
4155 val64 = readq(&bar0->gpio_int_mask);
4156 val64 &= ~GPIO_INT_MASK_LINK_UP;
4157 val64 |= GPIO_INT_MASK_LINK_DOWN;
4158 writeq(val64, &bar0->gpio_int_mask);
ac1f90d6
SS
4159
4160 /* turn off LED */
4161 val64 = readq(&bar0->adapter_control);
4162 val64 = val64 &(~ADAPTER_LED_ON);
4163 writeq(val64, &bar0->adapter_control);
a371a07d 4164 }
4165 }
c92ca04b 4166 val64 = readq(&bar0->gpio_int_mask);
a371a07d 4167}
4168
1da177e4
LT
4169/**
4170 * s2io_isr - ISR handler of the device .
4171 * @irq: the irq of the device.
4172 * @dev_id: a void pointer to the dev structure of the NIC.
20346722 4173 * Description: This function is the ISR handler of the device. It
4174 * identifies the reason for the interrupt and calls the relevant
4175 * service routines. As a contongency measure, this ISR allocates the
1da177e4
LT
4176 * recv buffers, if their numbers are below the panic value which is
4177 * presently set to 25% of the original number of rcv buffers allocated.
4178 * Return value:
20346722 4179 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
1da177e4
LT
4180 * IRQ_NONE: will be returned if interrupt is not from our device
4181 */
7d12e780 4182static irqreturn_t s2io_isr(int irq, void *dev_id)
1da177e4
LT
4183{
4184 struct net_device *dev = (struct net_device *) dev_id;
1ee6dd77
RB
4185 struct s2io_nic *sp = dev->priv;
4186 struct XENA_dev_config __iomem *bar0 = sp->bar0;
20346722 4187 int i;
19a60522 4188 u64 reason = 0;
1ee6dd77 4189 struct mac_info *mac_control;
1da177e4
LT
4190 struct config_param *config;
4191
7ba013ac 4192 atomic_inc(&sp->isr_cnt);
1da177e4
LT
4193 mac_control = &sp->mac_control;
4194 config = &sp->config;
4195
20346722 4196 /*
1da177e4
LT
4197 * Identify the cause for interrupt and call the appropriate
4198 * interrupt handler. Causes for the interrupt could be;
4199 * 1. Rx of packet.
4200 * 2. Tx complete.
4201 * 3. Link down.
20346722 4202 * 4. Error in any functional blocks of the NIC.
1da177e4
LT
4203 */
4204 reason = readq(&bar0->general_int_status);
4205
4206 if (!reason) {
19a60522
SS
4207 /* The interrupt was not raised by us. */
4208 atomic_dec(&sp->isr_cnt);
4209 return IRQ_NONE;
4210 }
4211 else if (unlikely(reason == S2IO_MINUS_ONE) ) {
4212 /* Disable device and get out */
7ba013ac 4213 atomic_dec(&sp->isr_cnt);
1da177e4
LT
4214 return IRQ_NONE;
4215 }
5d3213cc 4216
db874e65
SS
4217 if (napi) {
4218 if (reason & GEN_INTR_RXTRAFFIC) {
19a60522 4219 if ( likely ( netif_rx_schedule_prep(dev)) ) {
db874e65 4220 __netif_rx_schedule(dev);
19a60522 4221 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
db874e65 4222 }
19a60522
SS
4223 else
4224 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
db874e65
SS
4225 }
4226 } else {
4227 /*
4228 * Rx handler is called by default, without checking for the
4229 * cause of interrupt.
4230 * rx_traffic_int reg is an R1 register, writing all 1's
4231 * will ensure that the actual interrupt causing bit get's
4232 * cleared and hence a read can be avoided.
4233 */
19a60522
SS
4234 if (reason & GEN_INTR_RXTRAFFIC)
4235 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4236
db874e65
SS
4237 for (i = 0; i < config->rx_ring_num; i++) {
4238 rx_intr_handler(&mac_control->rings[i]);
1da177e4
LT
4239 }
4240 }
1da177e4 4241
863c11a9
AR
4242 /*
4243 * tx_traffic_int reg is an R1 register, writing all 1's
4244 * will ensure that the actual interrupt causing bit get's
4245 * cleared and hence a read can be avoided.
4246 */
19a60522
SS
4247 if (reason & GEN_INTR_TXTRAFFIC)
4248 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
fe113638 4249
863c11a9
AR
4250 for (i = 0; i < config->tx_fifo_num; i++)
4251 tx_intr_handler(&mac_control->fifos[i]);
20346722 4252
a371a07d 4253 if (reason & GEN_INTR_TXPIC)
4254 s2io_txpic_intr_handle(sp);
20346722 4255 /*
4256 * If the Rx buffer count is below the panic threshold then
4257 * reallocate the buffers from the interrupt handler itself,
1da177e4
LT
4258 * else schedule a tasklet to reallocate the buffers.
4259 */
db874e65
SS
4260 if (!napi) {
4261 for (i = 0; i < config->rx_ring_num; i++)
4262 s2io_chk_rx_buffers(sp, i);
4263 }
4264
4265 writeq(0, &bar0->general_int_mask);
4266 readl(&bar0->general_int_status);
4267
7ba013ac 4268 atomic_dec(&sp->isr_cnt);
1da177e4
LT
4269 return IRQ_HANDLED;
4270}
4271
7ba013ac 4272/**
4273 * s2io_updt_stats -
4274 */
1ee6dd77 4275static void s2io_updt_stats(struct s2io_nic *sp)
7ba013ac 4276{
1ee6dd77 4277 struct XENA_dev_config __iomem *bar0 = sp->bar0;
7ba013ac 4278 u64 val64;
4279 int cnt = 0;
4280
4281 if (atomic_read(&sp->card_state) == CARD_UP) {
4282 /* Apprx 30us on a 133 MHz bus */
4283 val64 = SET_UPDT_CLICKS(10) |
4284 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4285 writeq(val64, &bar0->stat_cfg);
4286 do {
4287 udelay(100);
4288 val64 = readq(&bar0->stat_cfg);
4289 if (!(val64 & BIT(0)))
4290 break;
4291 cnt++;
4292 if (cnt == 5)
4293 break; /* Updt failed */
4294 } while(1);
363dc367 4295 }
7ba013ac 4296}
4297
1da177e4 4298/**
20346722 4299 * s2io_get_stats - Updates the device statistics structure.
1da177e4
LT
4300 * @dev : pointer to the device structure.
4301 * Description:
20346722 4302 * This function updates the device statistics structure in the s2io_nic
1da177e4
LT
4303 * structure and returns a pointer to the same.
4304 * Return value:
4305 * pointer to the updated net_device_stats structure.
4306 */
4307
ac1f60db 4308static struct net_device_stats *s2io_get_stats(struct net_device *dev)
1da177e4 4309{
1ee6dd77
RB
4310 struct s2io_nic *sp = dev->priv;
4311 struct mac_info *mac_control;
1da177e4
LT
4312 struct config_param *config;
4313
20346722 4314
1da177e4
LT
4315 mac_control = &sp->mac_control;
4316 config = &sp->config;
4317
7ba013ac 4318 /* Configure Stats for immediate updt */
4319 s2io_updt_stats(sp);
4320
4321 sp->stats.tx_packets =
4322 le32_to_cpu(mac_control->stats_info->tmac_frms);
20346722 4323 sp->stats.tx_errors =
4324 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4325 sp->stats.rx_errors =
ee705dba 4326 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
20346722 4327 sp->stats.multicast =
4328 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
1da177e4 4329 sp->stats.rx_length_errors =
ee705dba 4330 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
1da177e4
LT
4331
4332 return (&sp->stats);
4333}
4334
4335/**
4336 * s2io_set_multicast - entry point for multicast address enable/disable.
4337 * @dev : pointer to the device structure
4338 * Description:
20346722 4339 * This function is a driver entry point which gets called by the kernel
4340 * whenever multicast addresses must be enabled/disabled. This also gets
1da177e4
LT
4341 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4342 * determine, if multicast address must be enabled or if promiscuous mode
4343 * is to be disabled etc.
4344 * Return value:
4345 * void.
4346 */
4347
4348static void s2io_set_multicast(struct net_device *dev)
4349{
4350 int i, j, prev_cnt;
4351 struct dev_mc_list *mclist;
1ee6dd77
RB
4352 struct s2io_nic *sp = dev->priv;
4353 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
4354 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4355 0xfeffffffffffULL;
4356 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4357 void __iomem *add;
4358
4359 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4360 /* Enable all Multicast addresses */
4361 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4362 &bar0->rmac_addr_data0_mem);
4363 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4364 &bar0->rmac_addr_data1_mem);
4365 val64 = RMAC_ADDR_CMD_MEM_WE |
4366 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4367 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4368 writeq(val64, &bar0->rmac_addr_cmd_mem);
4369 /* Wait till command completes */
c92ca04b 4370 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41
SS
4371 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4372 S2IO_BIT_RESET);
1da177e4
LT
4373
4374 sp->m_cast_flg = 1;
4375 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4376 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4377 /* Disable all Multicast addresses */
4378 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4379 &bar0->rmac_addr_data0_mem);
5e25b9dd 4380 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4381 &bar0->rmac_addr_data1_mem);
1da177e4
LT
4382 val64 = RMAC_ADDR_CMD_MEM_WE |
4383 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4384 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4385 writeq(val64, &bar0->rmac_addr_cmd_mem);
4386 /* Wait till command completes */
c92ca04b 4387 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41
SS
4388 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4389 S2IO_BIT_RESET);
1da177e4
LT
4390
4391 sp->m_cast_flg = 0;
4392 sp->all_multi_pos = 0;
4393 }
4394
4395 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4396 /* Put the NIC into promiscuous mode */
4397 add = &bar0->mac_cfg;
4398 val64 = readq(&bar0->mac_cfg);
4399 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4400
4401 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4402 writel((u32) val64, add);
4403 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4404 writel((u32) (val64 >> 32), (add + 4));
4405
926930b2
SS
4406 if (vlan_tag_strip != 1) {
4407 val64 = readq(&bar0->rx_pa_cfg);
4408 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4409 writeq(val64, &bar0->rx_pa_cfg);
4410 vlan_strip_flag = 0;
4411 }
4412
1da177e4
LT
4413 val64 = readq(&bar0->mac_cfg);
4414 sp->promisc_flg = 1;
776bd20f 4415 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
1da177e4
LT
4416 dev->name);
4417 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4418 /* Remove the NIC from promiscuous mode */
4419 add = &bar0->mac_cfg;
4420 val64 = readq(&bar0->mac_cfg);
4421 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4422
4423 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4424 writel((u32) val64, add);
4425 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4426 writel((u32) (val64 >> 32), (add + 4));
4427
926930b2
SS
4428 if (vlan_tag_strip != 0) {
4429 val64 = readq(&bar0->rx_pa_cfg);
4430 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4431 writeq(val64, &bar0->rx_pa_cfg);
4432 vlan_strip_flag = 1;
4433 }
4434
1da177e4
LT
4435 val64 = readq(&bar0->mac_cfg);
4436 sp->promisc_flg = 0;
776bd20f 4437 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
1da177e4
LT
4438 dev->name);
4439 }
4440
4441 /* Update individual M_CAST address list */
4442 if ((!sp->m_cast_flg) && dev->mc_count) {
4443 if (dev->mc_count >
4444 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4445 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4446 dev->name);
4447 DBG_PRINT(ERR_DBG, "can be added, please enable ");
4448 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4449 return;
4450 }
4451
4452 prev_cnt = sp->mc_addr_count;
4453 sp->mc_addr_count = dev->mc_count;
4454
4455 /* Clear out the previous list of Mc in the H/W. */
4456 for (i = 0; i < prev_cnt; i++) {
4457 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4458 &bar0->rmac_addr_data0_mem);
4459 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
20346722 4460 &bar0->rmac_addr_data1_mem);
1da177e4
LT
4461 val64 = RMAC_ADDR_CMD_MEM_WE |
4462 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4463 RMAC_ADDR_CMD_MEM_OFFSET
4464 (MAC_MC_ADDR_START_OFFSET + i);
4465 writeq(val64, &bar0->rmac_addr_cmd_mem);
4466
4467 /* Wait for command completes */
c92ca04b 4468 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41
SS
4469 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4470 S2IO_BIT_RESET)) {
1da177e4
LT
4471 DBG_PRINT(ERR_DBG, "%s: Adding ",
4472 dev->name);
4473 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4474 return;
4475 }
4476 }
4477
4478 /* Create the new Rx filter list and update the same in H/W. */
4479 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4480 i++, mclist = mclist->next) {
4481 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4482 ETH_ALEN);
a7a80d5a 4483 mac_addr = 0;
1da177e4
LT
4484 for (j = 0; j < ETH_ALEN; j++) {
4485 mac_addr |= mclist->dmi_addr[j];
4486 mac_addr <<= 8;
4487 }
4488 mac_addr >>= 8;
4489 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4490 &bar0->rmac_addr_data0_mem);
4491 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
20346722 4492 &bar0->rmac_addr_data1_mem);
1da177e4
LT
4493 val64 = RMAC_ADDR_CMD_MEM_WE |
4494 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4495 RMAC_ADDR_CMD_MEM_OFFSET
4496 (i + MAC_MC_ADDR_START_OFFSET);
4497 writeq(val64, &bar0->rmac_addr_cmd_mem);
4498
4499 /* Wait for command completes */
c92ca04b 4500 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41
SS
4501 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4502 S2IO_BIT_RESET)) {
1da177e4
LT
4503 DBG_PRINT(ERR_DBG, "%s: Adding ",
4504 dev->name);
4505 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4506 return;
4507 }
4508 }
4509 }
4510}
4511
4512/**
20346722 4513 * s2io_set_mac_addr - Programs the Xframe mac address
1da177e4
LT
4514 * @dev : pointer to the device structure.
4515 * @addr: a uchar pointer to the new mac address which is to be set.
20346722 4516 * Description : This procedure will program the Xframe to receive
1da177e4 4517 * frames with new Mac Address
20346722 4518 * Return value: SUCCESS on success and an appropriate (-)ve integer
1da177e4
LT
4519 * as defined in errno.h file on failure.
4520 */
4521
26df54bf 4522static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
1da177e4 4523{
1ee6dd77
RB
4524 struct s2io_nic *sp = dev->priv;
4525 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
4526 register u64 val64, mac_addr = 0;
4527 int i;
d8d70caf 4528 u64 old_mac_addr = 0;
1da177e4 4529
20346722 4530 /*
1da177e4
LT
4531 * Set the new MAC address as the new unicast filter and reflect this
4532 * change on the device address registered with the OS. It will be
20346722 4533 * at offset 0.
1da177e4
LT
4534 */
4535 for (i = 0; i < ETH_ALEN; i++) {
4536 mac_addr <<= 8;
4537 mac_addr |= addr[i];
d8d70caf
SS
4538 old_mac_addr <<= 8;
4539 old_mac_addr |= sp->def_mac_addr[0].mac_addr[i];
4540 }
4541
4542 if(0 == mac_addr)
4543 return SUCCESS;
4544
4545 /* Update the internal structure with this new mac address */
4546 if(mac_addr != old_mac_addr) {
4547 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4548 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_addr);
4549 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_addr >> 8);
4550 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_addr >> 16);
4551 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_addr >> 24);
4552 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_addr >> 32);
4553 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_addr >> 40);
1da177e4
LT
4554 }
4555
4556 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4557 &bar0->rmac_addr_data0_mem);
4558
4559 val64 =
4560 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4561 RMAC_ADDR_CMD_MEM_OFFSET(0);
4562 writeq(val64, &bar0->rmac_addr_cmd_mem);
4563 /* Wait till command completes */
c92ca04b 4564 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41 4565 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET)) {
1da177e4
LT
4566 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4567 return FAILURE;
4568 }
4569
4570 return SUCCESS;
4571}
4572
4573/**
20346722 4574 * s2io_ethtool_sset - Sets different link parameters.
1da177e4
LT
4575 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4576 * @info: pointer to the structure with parameters given by ethtool to set
4577 * link information.
4578 * Description:
20346722 4579 * The function sets different link parameters provided by the user onto
1da177e4
LT
4580 * the NIC.
4581 * Return value:
4582 * 0 on success.
4583*/
4584
4585static int s2io_ethtool_sset(struct net_device *dev,
4586 struct ethtool_cmd *info)
4587{
1ee6dd77 4588 struct s2io_nic *sp = dev->priv;
1da177e4
LT
4589 if ((info->autoneg == AUTONEG_ENABLE) ||
4590 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4591 return -EINVAL;
4592 else {
4593 s2io_close(sp->dev);
4594 s2io_open(sp->dev);
4595 }
4596
4597 return 0;
4598}
4599
4600/**
20346722 4601 * s2io_ethtol_gset - Return link specific information.
1da177e4
LT
4602 * @sp : private member of the device structure, pointer to the
4603 * s2io_nic structure.
4604 * @info : pointer to the structure with parameters given by ethtool
4605 * to return link information.
4606 * Description:
4607 * Returns link specific information like speed, duplex etc.. to ethtool.
4608 * Return value :
4609 * return 0 on success.
4610 */
4611
4612static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4613{
1ee6dd77 4614 struct s2io_nic *sp = dev->priv;
1da177e4
LT
4615 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4616 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4617 info->port = PORT_FIBRE;
4618 /* info->transceiver?? TODO */
4619
4620 if (netif_carrier_ok(sp->dev)) {
4621 info->speed = 10000;
4622 info->duplex = DUPLEX_FULL;
4623 } else {
4624 info->speed = -1;
4625 info->duplex = -1;
4626 }
4627
4628 info->autoneg = AUTONEG_DISABLE;
4629 return 0;
4630}
4631
4632/**
20346722 4633 * s2io_ethtool_gdrvinfo - Returns driver specific information.
4634 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
4635 * s2io_nic structure.
4636 * @info : pointer to the structure with parameters given by ethtool to
4637 * return driver information.
4638 * Description:
4639 * Returns driver specefic information like name, version etc.. to ethtool.
4640 * Return value:
4641 * void
4642 */
4643
4644static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4645 struct ethtool_drvinfo *info)
4646{
1ee6dd77 4647 struct s2io_nic *sp = dev->priv;
1da177e4 4648
dbc2309d
JL
4649 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
4650 strncpy(info->version, s2io_driver_version, sizeof(info->version));
4651 strncpy(info->fw_version, "", sizeof(info->fw_version));
4652 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
1da177e4
LT
4653 info->regdump_len = XENA_REG_SPACE;
4654 info->eedump_len = XENA_EEPROM_SPACE;
4655 info->testinfo_len = S2IO_TEST_LEN;
fa1f0cb3
SS
4656
4657 if (sp->device_type == XFRAME_I_DEVICE)
4658 info->n_stats = XFRAME_I_STAT_LEN;
4659 else
4660 info->n_stats = XFRAME_II_STAT_LEN;
1da177e4
LT
4661}
4662
4663/**
4664 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
20346722 4665 * @sp: private member of the device structure, which is a pointer to the
1da177e4 4666 * s2io_nic structure.
20346722 4667 * @regs : pointer to the structure with parameters given by ethtool for
1da177e4
LT
4668 * dumping the registers.
4669 * @reg_space: The input argumnet into which all the registers are dumped.
4670 * Description:
4671 * Dumps the entire register space of xFrame NIC into the user given
4672 * buffer area.
4673 * Return value :
4674 * void .
4675*/
4676
4677static void s2io_ethtool_gregs(struct net_device *dev,
4678 struct ethtool_regs *regs, void *space)
4679{
4680 int i;
4681 u64 reg;
4682 u8 *reg_space = (u8 *) space;
1ee6dd77 4683 struct s2io_nic *sp = dev->priv;
1da177e4
LT
4684
4685 regs->len = XENA_REG_SPACE;
4686 regs->version = sp->pdev->subsystem_device;
4687
4688 for (i = 0; i < regs->len; i += 8) {
4689 reg = readq(sp->bar0 + i);
4690 memcpy((reg_space + i), &reg, 8);
4691 }
4692}
4693
4694/**
4695 * s2io_phy_id - timer function that alternates adapter LED.
20346722 4696 * @data : address of the private member of the device structure, which
1da177e4 4697 * is a pointer to the s2io_nic structure, provided as an u32.
20346722 4698 * Description: This is actually the timer function that alternates the
4699 * adapter LED bit of the adapter control bit to set/reset every time on
4700 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
1da177e4
LT
4701 * once every second.
4702*/
4703static void s2io_phy_id(unsigned long data)
4704{
1ee6dd77
RB
4705 struct s2io_nic *sp = (struct s2io_nic *) data;
4706 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
4707 u64 val64 = 0;
4708 u16 subid;
4709
4710 subid = sp->pdev->subsystem_device;
541ae68f 4711 if ((sp->device_type == XFRAME_II_DEVICE) ||
4712 ((subid & 0xFF) >= 0x07)) {
1da177e4
LT
4713 val64 = readq(&bar0->gpio_control);
4714 val64 ^= GPIO_CTRL_GPIO_0;
4715 writeq(val64, &bar0->gpio_control);
4716 } else {
4717 val64 = readq(&bar0->adapter_control);
4718 val64 ^= ADAPTER_LED_ON;
4719 writeq(val64, &bar0->adapter_control);
4720 }
4721
4722 mod_timer(&sp->id_timer, jiffies + HZ / 2);
4723}
4724
4725/**
4726 * s2io_ethtool_idnic - To physically identify the nic on the system.
4727 * @sp : private member of the device structure, which is a pointer to the
4728 * s2io_nic structure.
20346722 4729 * @id : pointer to the structure with identification parameters given by
1da177e4
LT
4730 * ethtool.
4731 * Description: Used to physically identify the NIC on the system.
20346722 4732 * The Link LED will blink for a time specified by the user for
1da177e4 4733 * identification.
20346722 4734 * NOTE: The Link has to be Up to be able to blink the LED. Hence
1da177e4
LT
4735 * identification is possible only if it's link is up.
4736 * Return value:
4737 * int , returns 0 on success
4738 */
4739
4740static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
4741{
4742 u64 val64 = 0, last_gpio_ctrl_val;
1ee6dd77
RB
4743 struct s2io_nic *sp = dev->priv;
4744 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
4745 u16 subid;
4746
4747 subid = sp->pdev->subsystem_device;
4748 last_gpio_ctrl_val = readq(&bar0->gpio_control);
541ae68f 4749 if ((sp->device_type == XFRAME_I_DEVICE) &&
4750 ((subid & 0xFF) < 0x07)) {
1da177e4
LT
4751 val64 = readq(&bar0->adapter_control);
4752 if (!(val64 & ADAPTER_CNTL_EN)) {
4753 printk(KERN_ERR
4754 "Adapter Link down, cannot blink LED\n");
4755 return -EFAULT;
4756 }
4757 }
4758 if (sp->id_timer.function == NULL) {
4759 init_timer(&sp->id_timer);
4760 sp->id_timer.function = s2io_phy_id;
4761 sp->id_timer.data = (unsigned long) sp;
4762 }
4763 mod_timer(&sp->id_timer, jiffies);
4764 if (data)
20346722 4765 msleep_interruptible(data * HZ);
1da177e4 4766 else
20346722 4767 msleep_interruptible(MAX_FLICKER_TIME);
1da177e4
LT
4768 del_timer_sync(&sp->id_timer);
4769
541ae68f 4770 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
1da177e4
LT
4771 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
4772 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4773 }
4774
4775 return 0;
4776}
4777
0cec35eb
SH
4778static void s2io_ethtool_gringparam(struct net_device *dev,
4779 struct ethtool_ringparam *ering)
4780{
4781 struct s2io_nic *sp = dev->priv;
4782 int i,tx_desc_count=0,rx_desc_count=0;
4783
4784 if (sp->rxd_mode == RXD_MODE_1)
4785 ering->rx_max_pending = MAX_RX_DESC_1;
4786 else if (sp->rxd_mode == RXD_MODE_3B)
4787 ering->rx_max_pending = MAX_RX_DESC_2;
4788 else if (sp->rxd_mode == RXD_MODE_3A)
4789 ering->rx_max_pending = MAX_RX_DESC_3;
4790
4791 ering->tx_max_pending = MAX_TX_DESC;
4792 for (i = 0 ; i < sp->config.tx_fifo_num ; i++) {
4793 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
4794 }
4795 DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
4796 ering->tx_pending = tx_desc_count;
4797 rx_desc_count = 0;
4798 for (i = 0 ; i < sp->config.rx_ring_num ; i++) {
4799 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
4800 }
4801 ering->rx_pending = rx_desc_count;
4802
4803 ering->rx_mini_max_pending = 0;
4804 ering->rx_mini_pending = 0;
4805 if(sp->rxd_mode == RXD_MODE_1)
4806 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
4807 else if (sp->rxd_mode == RXD_MODE_3B)
4808 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
4809 ering->rx_jumbo_pending = rx_desc_count;
4810}
4811
1da177e4
LT
4812/**
4813 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
20346722 4814 * @sp : private member of the device structure, which is a pointer to the
4815 * s2io_nic structure.
1da177e4
LT
4816 * @ep : pointer to the structure with pause parameters given by ethtool.
4817 * Description:
4818 * Returns the Pause frame generation and reception capability of the NIC.
4819 * Return value:
4820 * void
4821 */
4822static void s2io_ethtool_getpause_data(struct net_device *dev,
4823 struct ethtool_pauseparam *ep)
4824{
4825 u64 val64;
1ee6dd77
RB
4826 struct s2io_nic *sp = dev->priv;
4827 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
4828
4829 val64 = readq(&bar0->rmac_pause_cfg);
4830 if (val64 & RMAC_PAUSE_GEN_ENABLE)
4831 ep->tx_pause = TRUE;
4832 if (val64 & RMAC_PAUSE_RX_ENABLE)
4833 ep->rx_pause = TRUE;
4834 ep->autoneg = FALSE;
4835}
4836
4837/**
4838 * s2io_ethtool_setpause_data - set/reset pause frame generation.
20346722 4839 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
4840 * s2io_nic structure.
4841 * @ep : pointer to the structure with pause parameters given by ethtool.
4842 * Description:
4843 * It can be used to set or reset Pause frame generation or reception
4844 * support of the NIC.
4845 * Return value:
4846 * int, returns 0 on Success
4847 */
4848
4849static int s2io_ethtool_setpause_data(struct net_device *dev,
20346722 4850 struct ethtool_pauseparam *ep)
1da177e4
LT
4851{
4852 u64 val64;
1ee6dd77
RB
4853 struct s2io_nic *sp = dev->priv;
4854 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
4855
4856 val64 = readq(&bar0->rmac_pause_cfg);
4857 if (ep->tx_pause)
4858 val64 |= RMAC_PAUSE_GEN_ENABLE;
4859 else
4860 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
4861 if (ep->rx_pause)
4862 val64 |= RMAC_PAUSE_RX_ENABLE;
4863 else
4864 val64 &= ~RMAC_PAUSE_RX_ENABLE;
4865 writeq(val64, &bar0->rmac_pause_cfg);
4866 return 0;
4867}
4868
4869/**
4870 * read_eeprom - reads 4 bytes of data from user given offset.
20346722 4871 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
4872 * s2io_nic structure.
4873 * @off : offset at which the data must be written
4874 * @data : Its an output parameter where the data read at the given
20346722 4875 * offset is stored.
1da177e4 4876 * Description:
20346722 4877 * Will read 4 bytes of data from the user given offset and return the
1da177e4
LT
4878 * read data.
4879 * NOTE: Will allow to read only part of the EEPROM visible through the
4880 * I2C bus.
4881 * Return value:
4882 * -1 on failure and 0 on success.
4883 */
4884
4885#define S2IO_DEV_ID 5
1ee6dd77 4886static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
1da177e4
LT
4887{
4888 int ret = -1;
4889 u32 exit_cnt = 0;
4890 u64 val64;
1ee6dd77 4891 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 4892
ad4ebed0 4893 if (sp->device_type == XFRAME_I_DEVICE) {
4894 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4895 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
4896 I2C_CONTROL_CNTL_START;
4897 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
1da177e4 4898
ad4ebed0 4899 while (exit_cnt < 5) {
4900 val64 = readq(&bar0->i2c_control);
4901 if (I2C_CONTROL_CNTL_END(val64)) {
4902 *data = I2C_CONTROL_GET_DATA(val64);
4903 ret = 0;
4904 break;
4905 }
4906 msleep(50);
4907 exit_cnt++;
1da177e4 4908 }
1da177e4
LT
4909 }
4910
ad4ebed0 4911 if (sp->device_type == XFRAME_II_DEVICE) {
4912 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
6aa20a22 4913 SPI_CONTROL_BYTECNT(0x3) |
ad4ebed0 4914 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
4915 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4916 val64 |= SPI_CONTROL_REQ;
4917 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4918 while (exit_cnt < 5) {
4919 val64 = readq(&bar0->spi_control);
4920 if (val64 & SPI_CONTROL_NACK) {
4921 ret = 1;
4922 break;
4923 } else if (val64 & SPI_CONTROL_DONE) {
4924 *data = readq(&bar0->spi_data);
4925 *data &= 0xffffff;
4926 ret = 0;
4927 break;
4928 }
4929 msleep(50);
4930 exit_cnt++;
4931 }
4932 }
1da177e4
LT
4933 return ret;
4934}
4935
4936/**
4937 * write_eeprom - actually writes the relevant part of the data value.
4938 * @sp : private member of the device structure, which is a pointer to the
4939 * s2io_nic structure.
4940 * @off : offset at which the data must be written
4941 * @data : The data that is to be written
20346722 4942 * @cnt : Number of bytes of the data that are actually to be written into
1da177e4
LT
4943 * the Eeprom. (max of 3)
4944 * Description:
4945 * Actually writes the relevant part of the data value into the Eeprom
4946 * through the I2C bus.
4947 * Return value:
4948 * 0 on success, -1 on failure.
4949 */
4950
1ee6dd77 4951static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
1da177e4
LT
4952{
4953 int exit_cnt = 0, ret = -1;
4954 u64 val64;
1ee6dd77 4955 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 4956
ad4ebed0 4957 if (sp->device_type == XFRAME_I_DEVICE) {
4958 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4959 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
4960 I2C_CONTROL_CNTL_START;
4961 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4962
4963 while (exit_cnt < 5) {
4964 val64 = readq(&bar0->i2c_control);
4965 if (I2C_CONTROL_CNTL_END(val64)) {
4966 if (!(val64 & I2C_CONTROL_NACK))
4967 ret = 0;
4968 break;
4969 }
4970 msleep(50);
4971 exit_cnt++;
4972 }
4973 }
1da177e4 4974
ad4ebed0 4975 if (sp->device_type == XFRAME_II_DEVICE) {
4976 int write_cnt = (cnt == 8) ? 0 : cnt;
4977 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
4978
4979 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
6aa20a22 4980 SPI_CONTROL_BYTECNT(write_cnt) |
ad4ebed0 4981 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
4982 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4983 val64 |= SPI_CONTROL_REQ;
4984 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4985 while (exit_cnt < 5) {
4986 val64 = readq(&bar0->spi_control);
4987 if (val64 & SPI_CONTROL_NACK) {
4988 ret = 1;
4989 break;
4990 } else if (val64 & SPI_CONTROL_DONE) {
1da177e4 4991 ret = 0;
ad4ebed0 4992 break;
4993 }
4994 msleep(50);
4995 exit_cnt++;
1da177e4 4996 }
1da177e4 4997 }
1da177e4
LT
4998 return ret;
4999}
1ee6dd77 5000static void s2io_vpd_read(struct s2io_nic *nic)
9dc737a7 5001{
b41477f3
AR
5002 u8 *vpd_data;
5003 u8 data;
9dc737a7
AR
5004 int i=0, cnt, fail = 0;
5005 int vpd_addr = 0x80;
5006
5007 if (nic->device_type == XFRAME_II_DEVICE) {
5008 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5009 vpd_addr = 0x80;
5010 }
5011 else {
5012 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5013 vpd_addr = 0x50;
5014 }
19a60522 5015 strcpy(nic->serial_num, "NOT AVAILABLE");
9dc737a7 5016
b41477f3
AR
5017 vpd_data = kmalloc(256, GFP_KERNEL);
5018 if (!vpd_data)
5019 return;
5020
9dc737a7
AR
5021 for (i = 0; i < 256; i +=4 ) {
5022 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5023 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5024 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5025 for (cnt = 0; cnt <5; cnt++) {
5026 msleep(2);
5027 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5028 if (data == 0x80)
5029 break;
5030 }
5031 if (cnt >= 5) {
5032 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5033 fail = 1;
5034 break;
5035 }
5036 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5037 (u32 *)&vpd_data[i]);
5038 }
19a60522
SS
5039
5040 if(!fail) {
5041 /* read serial number of adapter */
5042 for (cnt = 0; cnt < 256; cnt++) {
5043 if ((vpd_data[cnt] == 'S') &&
5044 (vpd_data[cnt+1] == 'N') &&
5045 (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5046 memset(nic->serial_num, 0, VPD_STRING_LEN);
5047 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5048 vpd_data[cnt+2]);
5049 break;
5050 }
5051 }
5052 }
5053
5054 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
9dc737a7
AR
5055 memset(nic->product_name, 0, vpd_data[1]);
5056 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5057 }
b41477f3 5058 kfree(vpd_data);
9dc737a7
AR
5059}
5060
1da177e4
LT
5061/**
5062 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5063 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
20346722 5064 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
5065 * containing all relevant information.
5066 * @data_buf : user defined value to be written into Eeprom.
5067 * Description: Reads the values stored in the Eeprom at given offset
5068 * for a given length. Stores these values int the input argument data
5069 * buffer 'data_buf' and returns these to the caller (ethtool.)
5070 * Return value:
5071 * int 0 on success
5072 */
5073
5074static int s2io_ethtool_geeprom(struct net_device *dev,
20346722 5075 struct ethtool_eeprom *eeprom, u8 * data_buf)
1da177e4 5076{
ad4ebed0 5077 u32 i, valid;
5078 u64 data;
1ee6dd77 5079 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5080
5081 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5082
5083 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5084 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5085
5086 for (i = 0; i < eeprom->len; i += 4) {
5087 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5088 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5089 return -EFAULT;
5090 }
5091 valid = INV(data);
5092 memcpy((data_buf + i), &valid, 4);
5093 }
5094 return 0;
5095}
5096
5097/**
5098 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5099 * @sp : private member of the device structure, which is a pointer to the
5100 * s2io_nic structure.
20346722 5101 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
5102 * containing all relevant information.
5103 * @data_buf ; user defined value to be written into Eeprom.
5104 * Description:
5105 * Tries to write the user provided value in the Eeprom, at the offset
5106 * given by the user.
5107 * Return value:
5108 * 0 on success, -EFAULT on failure.
5109 */
5110
5111static int s2io_ethtool_seeprom(struct net_device *dev,
5112 struct ethtool_eeprom *eeprom,
5113 u8 * data_buf)
5114{
5115 int len = eeprom->len, cnt = 0;
ad4ebed0 5116 u64 valid = 0, data;
1ee6dd77 5117 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5118
5119 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5120 DBG_PRINT(ERR_DBG,
5121 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5122 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5123 eeprom->magic);
5124 return -EFAULT;
5125 }
5126
5127 while (len) {
5128 data = (u32) data_buf[cnt] & 0x000000FF;
5129 if (data) {
5130 valid = (u32) (data << 24);
5131 } else
5132 valid = data;
5133
5134 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5135 DBG_PRINT(ERR_DBG,
5136 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5137 DBG_PRINT(ERR_DBG,
5138 "write into the specified offset\n");
5139 return -EFAULT;
5140 }
5141 cnt++;
5142 len--;
5143 }
5144
5145 return 0;
5146}
5147
5148/**
20346722 5149 * s2io_register_test - reads and writes into all clock domains.
5150 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5151 * s2io_nic structure.
5152 * @data : variable that returns the result of each of the test conducted b
5153 * by the driver.
5154 * Description:
5155 * Read and write into all clock domains. The NIC has 3 clock domains,
5156 * see that registers in all the three regions are accessible.
5157 * Return value:
5158 * 0 on success.
5159 */
5160
1ee6dd77 5161static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
1da177e4 5162{
1ee6dd77 5163 struct XENA_dev_config __iomem *bar0 = sp->bar0;
ad4ebed0 5164 u64 val64 = 0, exp_val;
1da177e4
LT
5165 int fail = 0;
5166
20346722 5167 val64 = readq(&bar0->pif_rd_swapper_fb);
5168 if (val64 != 0x123456789abcdefULL) {
1da177e4
LT
5169 fail = 1;
5170 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5171 }
5172
5173 val64 = readq(&bar0->rmac_pause_cfg);
5174 if (val64 != 0xc000ffff00000000ULL) {
5175 fail = 1;
5176 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5177 }
5178
5179 val64 = readq(&bar0->rx_queue_cfg);
ad4ebed0 5180 if (sp->device_type == XFRAME_II_DEVICE)
5181 exp_val = 0x0404040404040404ULL;
5182 else
5183 exp_val = 0x0808080808080808ULL;
5184 if (val64 != exp_val) {
1da177e4
LT
5185 fail = 1;
5186 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5187 }
5188
5189 val64 = readq(&bar0->xgxs_efifo_cfg);
5190 if (val64 != 0x000000001923141EULL) {
5191 fail = 1;
5192 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5193 }
5194
5195 val64 = 0x5A5A5A5A5A5A5A5AULL;
5196 writeq(val64, &bar0->xmsi_data);
5197 val64 = readq(&bar0->xmsi_data);
5198 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5199 fail = 1;
5200 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5201 }
5202
5203 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5204 writeq(val64, &bar0->xmsi_data);
5205 val64 = readq(&bar0->xmsi_data);
5206 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5207 fail = 1;
5208 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5209 }
5210
5211 *data = fail;
ad4ebed0 5212 return fail;
1da177e4
LT
5213}
5214
5215/**
20346722 5216 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
1da177e4
LT
5217 * @sp : private member of the device structure, which is a pointer to the
5218 * s2io_nic structure.
5219 * @data:variable that returns the result of each of the test conducted by
5220 * the driver.
5221 * Description:
20346722 5222 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
1da177e4
LT
5223 * register.
5224 * Return value:
5225 * 0 on success.
5226 */
5227
1ee6dd77 5228static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
1da177e4
LT
5229{
5230 int fail = 0;
ad4ebed0 5231 u64 ret_data, org_4F0, org_7F0;
5232 u8 saved_4F0 = 0, saved_7F0 = 0;
5233 struct net_device *dev = sp->dev;
1da177e4
LT
5234
5235 /* Test Write Error at offset 0 */
ad4ebed0 5236 /* Note that SPI interface allows write access to all areas
5237 * of EEPROM. Hence doing all negative testing only for Xframe I.
5238 */
5239 if (sp->device_type == XFRAME_I_DEVICE)
5240 if (!write_eeprom(sp, 0, 0, 3))
5241 fail = 1;
5242
5243 /* Save current values at offsets 0x4F0 and 0x7F0 */
5244 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5245 saved_4F0 = 1;
5246 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5247 saved_7F0 = 1;
1da177e4
LT
5248
5249 /* Test Write at offset 4f0 */
ad4ebed0 5250 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
1da177e4
LT
5251 fail = 1;
5252 if (read_eeprom(sp, 0x4F0, &ret_data))
5253 fail = 1;
5254
ad4ebed0 5255 if (ret_data != 0x012345) {
26b7625c
AM
5256 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5257 "Data written %llx Data read %llx\n",
5258 dev->name, (unsigned long long)0x12345,
5259 (unsigned long long)ret_data);
1da177e4 5260 fail = 1;
ad4ebed0 5261 }
1da177e4
LT
5262
5263 /* Reset the EEPROM data go FFFF */
ad4ebed0 5264 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
1da177e4
LT
5265
5266 /* Test Write Request Error at offset 0x7c */
ad4ebed0 5267 if (sp->device_type == XFRAME_I_DEVICE)
5268 if (!write_eeprom(sp, 0x07C, 0, 3))
5269 fail = 1;
1da177e4 5270
ad4ebed0 5271 /* Test Write Request at offset 0x7f0 */
5272 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
1da177e4 5273 fail = 1;
ad4ebed0 5274 if (read_eeprom(sp, 0x7F0, &ret_data))
1da177e4
LT
5275 fail = 1;
5276
ad4ebed0 5277 if (ret_data != 0x012345) {
26b7625c
AM
5278 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5279 "Data written %llx Data read %llx\n",
5280 dev->name, (unsigned long long)0x12345,
5281 (unsigned long long)ret_data);
1da177e4 5282 fail = 1;
ad4ebed0 5283 }
1da177e4
LT
5284
5285 /* Reset the EEPROM data go FFFF */
ad4ebed0 5286 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
1da177e4 5287
ad4ebed0 5288 if (sp->device_type == XFRAME_I_DEVICE) {
5289 /* Test Write Error at offset 0x80 */
5290 if (!write_eeprom(sp, 0x080, 0, 3))
5291 fail = 1;
1da177e4 5292
ad4ebed0 5293 /* Test Write Error at offset 0xfc */
5294 if (!write_eeprom(sp, 0x0FC, 0, 3))
5295 fail = 1;
1da177e4 5296
ad4ebed0 5297 /* Test Write Error at offset 0x100 */
5298 if (!write_eeprom(sp, 0x100, 0, 3))
5299 fail = 1;
1da177e4 5300
ad4ebed0 5301 /* Test Write Error at offset 4ec */
5302 if (!write_eeprom(sp, 0x4EC, 0, 3))
5303 fail = 1;
5304 }
5305
5306 /* Restore values at offsets 0x4F0 and 0x7F0 */
5307 if (saved_4F0)
5308 write_eeprom(sp, 0x4F0, org_4F0, 3);
5309 if (saved_7F0)
5310 write_eeprom(sp, 0x7F0, org_7F0, 3);
1da177e4
LT
5311
5312 *data = fail;
ad4ebed0 5313 return fail;
1da177e4
LT
5314}
5315
5316/**
5317 * s2io_bist_test - invokes the MemBist test of the card .
20346722 5318 * @sp : private member of the device structure, which is a pointer to the
1da177e4 5319 * s2io_nic structure.
20346722 5320 * @data:variable that returns the result of each of the test conducted by
1da177e4
LT
5321 * the driver.
5322 * Description:
5323 * This invokes the MemBist test of the card. We give around
5324 * 2 secs time for the Test to complete. If it's still not complete
20346722 5325 * within this peiod, we consider that the test failed.
1da177e4
LT
5326 * Return value:
5327 * 0 on success and -1 on failure.
5328 */
5329
1ee6dd77 5330static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
1da177e4
LT
5331{
5332 u8 bist = 0;
5333 int cnt = 0, ret = -1;
5334
5335 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5336 bist |= PCI_BIST_START;
5337 pci_write_config_word(sp->pdev, PCI_BIST, bist);
5338
5339 while (cnt < 20) {
5340 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5341 if (!(bist & PCI_BIST_START)) {
5342 *data = (bist & PCI_BIST_CODE_MASK);
5343 ret = 0;
5344 break;
5345 }
5346 msleep(100);
5347 cnt++;
5348 }
5349
5350 return ret;
5351}
5352
5353/**
20346722 5354 * s2io-link_test - verifies the link state of the nic
5355 * @sp ; private member of the device structure, which is a pointer to the
1da177e4
LT
5356 * s2io_nic structure.
5357 * @data: variable that returns the result of each of the test conducted by
5358 * the driver.
5359 * Description:
20346722 5360 * The function verifies the link state of the NIC and updates the input
1da177e4
LT
5361 * argument 'data' appropriately.
5362 * Return value:
5363 * 0 on success.
5364 */
5365
1ee6dd77 5366static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
1da177e4 5367{
1ee6dd77 5368 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
5369 u64 val64;
5370
5371 val64 = readq(&bar0->adapter_status);
c92ca04b 5372 if(!(LINK_IS_UP(val64)))
1da177e4 5373 *data = 1;
c92ca04b
AR
5374 else
5375 *data = 0;
1da177e4 5376
b41477f3 5377 return *data;
1da177e4
LT
5378}
5379
5380/**
20346722 5381 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
5382 * @sp - private member of the device structure, which is a pointer to the
1da177e4 5383 * s2io_nic structure.
20346722 5384 * @data - variable that returns the result of each of the test
1da177e4
LT
5385 * conducted by the driver.
5386 * Description:
20346722 5387 * This is one of the offline test that tests the read and write
1da177e4
LT
5388 * access to the RldRam chip on the NIC.
5389 * Return value:
5390 * 0 on success.
5391 */
5392
1ee6dd77 5393static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
1da177e4 5394{
1ee6dd77 5395 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 5396 u64 val64;
ad4ebed0 5397 int cnt, iteration = 0, test_fail = 0;
1da177e4
LT
5398
5399 val64 = readq(&bar0->adapter_control);
5400 val64 &= ~ADAPTER_ECC_EN;
5401 writeq(val64, &bar0->adapter_control);
5402
5403 val64 = readq(&bar0->mc_rldram_test_ctrl);
5404 val64 |= MC_RLDRAM_TEST_MODE;
ad4ebed0 5405 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
5406
5407 val64 = readq(&bar0->mc_rldram_mrs);
5408 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5409 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5410
5411 val64 |= MC_RLDRAM_MRS_ENABLE;
5412 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5413
5414 while (iteration < 2) {
5415 val64 = 0x55555555aaaa0000ULL;
5416 if (iteration == 1) {
5417 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5418 }
5419 writeq(val64, &bar0->mc_rldram_test_d0);
5420
5421 val64 = 0xaaaa5a5555550000ULL;
5422 if (iteration == 1) {
5423 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5424 }
5425 writeq(val64, &bar0->mc_rldram_test_d1);
5426
5427 val64 = 0x55aaaaaaaa5a0000ULL;
5428 if (iteration == 1) {
5429 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5430 }
5431 writeq(val64, &bar0->mc_rldram_test_d2);
5432
ad4ebed0 5433 val64 = (u64) (0x0000003ffffe0100ULL);
1da177e4
LT
5434 writeq(val64, &bar0->mc_rldram_test_add);
5435
ad4ebed0 5436 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
5437 MC_RLDRAM_TEST_GO;
5438 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
5439
5440 for (cnt = 0; cnt < 5; cnt++) {
5441 val64 = readq(&bar0->mc_rldram_test_ctrl);
5442 if (val64 & MC_RLDRAM_TEST_DONE)
5443 break;
5444 msleep(200);
5445 }
5446
5447 if (cnt == 5)
5448 break;
5449
ad4ebed0 5450 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
5451 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
5452
5453 for (cnt = 0; cnt < 5; cnt++) {
5454 val64 = readq(&bar0->mc_rldram_test_ctrl);
5455 if (val64 & MC_RLDRAM_TEST_DONE)
5456 break;
5457 msleep(500);
5458 }
5459
5460 if (cnt == 5)
5461 break;
5462
5463 val64 = readq(&bar0->mc_rldram_test_ctrl);
ad4ebed0 5464 if (!(val64 & MC_RLDRAM_TEST_PASS))
5465 test_fail = 1;
1da177e4
LT
5466
5467 iteration++;
5468 }
5469
ad4ebed0 5470 *data = test_fail;
1da177e4 5471
ad4ebed0 5472 /* Bring the adapter out of test mode */
5473 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
5474
5475 return test_fail;
1da177e4
LT
5476}
5477
5478/**
5479 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
5480 * @sp : private member of the device structure, which is a pointer to the
5481 * s2io_nic structure.
5482 * @ethtest : pointer to a ethtool command specific structure that will be
5483 * returned to the user.
20346722 5484 * @data : variable that returns the result of each of the test
1da177e4
LT
5485 * conducted by the driver.
5486 * Description:
5487 * This function conducts 6 tests ( 4 offline and 2 online) to determine
5488 * the health of the card.
5489 * Return value:
5490 * void
5491 */
5492
5493static void s2io_ethtool_test(struct net_device *dev,
5494 struct ethtool_test *ethtest,
5495 uint64_t * data)
5496{
1ee6dd77 5497 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5498 int orig_state = netif_running(sp->dev);
5499
5500 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
5501 /* Offline Tests. */
20346722 5502 if (orig_state)
1da177e4 5503 s2io_close(sp->dev);
1da177e4
LT
5504
5505 if (s2io_register_test(sp, &data[0]))
5506 ethtest->flags |= ETH_TEST_FL_FAILED;
5507
5508 s2io_reset(sp);
1da177e4
LT
5509
5510 if (s2io_rldram_test(sp, &data[3]))
5511 ethtest->flags |= ETH_TEST_FL_FAILED;
5512
5513 s2io_reset(sp);
1da177e4
LT
5514
5515 if (s2io_eeprom_test(sp, &data[1]))
5516 ethtest->flags |= ETH_TEST_FL_FAILED;
5517
5518 if (s2io_bist_test(sp, &data[4]))
5519 ethtest->flags |= ETH_TEST_FL_FAILED;
5520
5521 if (orig_state)
5522 s2io_open(sp->dev);
5523
5524 data[2] = 0;
5525 } else {
5526 /* Online Tests. */
5527 if (!orig_state) {
5528 DBG_PRINT(ERR_DBG,
5529 "%s: is not up, cannot run test\n",
5530 dev->name);
5531 data[0] = -1;
5532 data[1] = -1;
5533 data[2] = -1;
5534 data[3] = -1;
5535 data[4] = -1;
5536 }
5537
5538 if (s2io_link_test(sp, &data[2]))
5539 ethtest->flags |= ETH_TEST_FL_FAILED;
5540
5541 data[0] = 0;
5542 data[1] = 0;
5543 data[3] = 0;
5544 data[4] = 0;
5545 }
5546}
5547
5548static void s2io_get_ethtool_stats(struct net_device *dev,
5549 struct ethtool_stats *estats,
5550 u64 * tmp_stats)
5551{
5552 int i = 0;
1ee6dd77
RB
5553 struct s2io_nic *sp = dev->priv;
5554 struct stat_block *stat_info = sp->mac_control.stats_info;
1da177e4 5555
7ba013ac 5556 s2io_updt_stats(sp);
541ae68f 5557 tmp_stats[i++] =
5558 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
5559 le32_to_cpu(stat_info->tmac_frms);
5560 tmp_stats[i++] =
5561 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5562 le32_to_cpu(stat_info->tmac_data_octets);
1da177e4 5563 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
541ae68f 5564 tmp_stats[i++] =
5565 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5566 le32_to_cpu(stat_info->tmac_mcst_frms);
5567 tmp_stats[i++] =
5568 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5569 le32_to_cpu(stat_info->tmac_bcst_frms);
1da177e4 5570 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
bd1034f0
AR
5571 tmp_stats[i++] =
5572 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
5573 le32_to_cpu(stat_info->tmac_ttl_octets);
5574 tmp_stats[i++] =
5575 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
5576 le32_to_cpu(stat_info->tmac_ucst_frms);
5577 tmp_stats[i++] =
5578 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
5579 le32_to_cpu(stat_info->tmac_nucst_frms);
541ae68f 5580 tmp_stats[i++] =
5581 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5582 le32_to_cpu(stat_info->tmac_any_err_frms);
bd1034f0 5583 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
1da177e4 5584 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
541ae68f 5585 tmp_stats[i++] =
5586 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5587 le32_to_cpu(stat_info->tmac_vld_ip);
5588 tmp_stats[i++] =
5589 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5590 le32_to_cpu(stat_info->tmac_drop_ip);
5591 tmp_stats[i++] =
5592 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5593 le32_to_cpu(stat_info->tmac_icmp);
5594 tmp_stats[i++] =
5595 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5596 le32_to_cpu(stat_info->tmac_rst_tcp);
1da177e4 5597 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
541ae68f 5598 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5599 le32_to_cpu(stat_info->tmac_udp);
5600 tmp_stats[i++] =
5601 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5602 le32_to_cpu(stat_info->rmac_vld_frms);
5603 tmp_stats[i++] =
5604 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5605 le32_to_cpu(stat_info->rmac_data_octets);
1da177e4
LT
5606 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5607 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
541ae68f 5608 tmp_stats[i++] =
5609 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5610 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
5611 tmp_stats[i++] =
5612 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
5613 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
1da177e4 5614 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
bd1034f0 5615 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
1da177e4
LT
5616 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
5617 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
bd1034f0
AR
5618 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
5619 tmp_stats[i++] =
5620 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
5621 le32_to_cpu(stat_info->rmac_ttl_octets);
5622 tmp_stats[i++] =
5623 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
5624 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
5625 tmp_stats[i++] =
5626 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
5627 << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
541ae68f 5628 tmp_stats[i++] =
5629 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
5630 le32_to_cpu(stat_info->rmac_discarded_frms);
bd1034f0
AR
5631 tmp_stats[i++] =
5632 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
5633 << 32 | le32_to_cpu(stat_info->rmac_drop_events);
5634 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
5635 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
541ae68f 5636 tmp_stats[i++] =
5637 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
5638 le32_to_cpu(stat_info->rmac_usized_frms);
5639 tmp_stats[i++] =
5640 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
5641 le32_to_cpu(stat_info->rmac_osized_frms);
5642 tmp_stats[i++] =
5643 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
5644 le32_to_cpu(stat_info->rmac_frag_frms);
5645 tmp_stats[i++] =
5646 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
5647 le32_to_cpu(stat_info->rmac_jabber_frms);
bd1034f0
AR
5648 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
5649 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
5650 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
5651 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
5652 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
5653 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
5654 tmp_stats[i++] =
5655 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
541ae68f 5656 le32_to_cpu(stat_info->rmac_ip);
1da177e4
LT
5657 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
5658 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
bd1034f0
AR
5659 tmp_stats[i++] =
5660 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
541ae68f 5661 le32_to_cpu(stat_info->rmac_drop_ip);
bd1034f0
AR
5662 tmp_stats[i++] =
5663 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
541ae68f 5664 le32_to_cpu(stat_info->rmac_icmp);
1da177e4 5665 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
bd1034f0
AR
5666 tmp_stats[i++] =
5667 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
541ae68f 5668 le32_to_cpu(stat_info->rmac_udp);
5669 tmp_stats[i++] =
5670 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
5671 le32_to_cpu(stat_info->rmac_err_drp_udp);
bd1034f0
AR
5672 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
5673 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
5674 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
5675 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
5676 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
5677 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
5678 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
5679 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
5680 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
5681 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
5682 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
5683 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
5684 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
5685 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
5686 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
5687 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
5688 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
541ae68f 5689 tmp_stats[i++] =
5690 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
5691 le32_to_cpu(stat_info->rmac_pause_cnt);
bd1034f0
AR
5692 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
5693 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
541ae68f 5694 tmp_stats[i++] =
5695 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
5696 le32_to_cpu(stat_info->rmac_accepted_ip);
1da177e4 5697 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
bd1034f0
AR
5698 tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
5699 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
5700 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
5701 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
5702 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
5703 tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
5704 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
5705 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
5706 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
5707 tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
5708 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
5709 tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
5710 tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
5711 tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
5712 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
5713 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
5714 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
5715 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
fa1f0cb3
SS
5716
5717 /* Enhanced statistics exist only for Hercules */
5718 if(sp->device_type == XFRAME_II_DEVICE) {
5719 tmp_stats[i++] =
5720 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
5721 tmp_stats[i++] =
5722 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
5723 tmp_stats[i++] =
5724 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
5725 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
5726 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
5727 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
5728 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
5729 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
5730 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
5731 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
5732 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
5733 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
5734 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
5735 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
5736 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
5737 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
5738 }
5739
7ba013ac 5740 tmp_stats[i++] = 0;
5741 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
5742 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
bd1034f0
AR
5743 tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
5744 tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
5745 tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
5746 tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
5747 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt;
5748 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
5749 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
5750 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
5751 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
5752 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
5753 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
5754 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
5755 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
5756 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
5757 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
5758 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
5759 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
7d3d0439
RA
5760 tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
5761 tmp_stats[i++] = stat_info->sw_stat.sending_both;
5762 tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
5763 tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
fe931395 5764 if (stat_info->sw_stat.num_aggregations) {
bd1034f0
AR
5765 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
5766 int count = 0;
6aa20a22 5767 /*
bd1034f0
AR
5768 * Since 64-bit divide does not work on all platforms,
5769 * do repeated subtraction.
5770 */
5771 while (tmp >= stat_info->sw_stat.num_aggregations) {
5772 tmp -= stat_info->sw_stat.num_aggregations;
5773 count++;
5774 }
5775 tmp_stats[i++] = count;
fe931395 5776 }
bd1034f0
AR
5777 else
5778 tmp_stats[i++] = 0;
1da177e4
LT
5779}
5780
ac1f60db 5781static int s2io_ethtool_get_regs_len(struct net_device *dev)
1da177e4
LT
5782{
5783 return (XENA_REG_SPACE);
5784}
5785
5786
ac1f60db 5787static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
1da177e4 5788{
1ee6dd77 5789 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5790
5791 return (sp->rx_csum);
5792}
ac1f60db
AB
5793
5794static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
1da177e4 5795{
1ee6dd77 5796 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5797
5798 if (data)
5799 sp->rx_csum = 1;
5800 else
5801 sp->rx_csum = 0;
5802
5803 return 0;
5804}
ac1f60db
AB
5805
5806static int s2io_get_eeprom_len(struct net_device *dev)
1da177e4
LT
5807{
5808 return (XENA_EEPROM_SPACE);
5809}
5810
ac1f60db 5811static int s2io_ethtool_self_test_count(struct net_device *dev)
1da177e4
LT
5812{
5813 return (S2IO_TEST_LEN);
5814}
ac1f60db
AB
5815
5816static void s2io_ethtool_get_strings(struct net_device *dev,
5817 u32 stringset, u8 * data)
1da177e4 5818{
fa1f0cb3
SS
5819 int stat_size = 0;
5820 struct s2io_nic *sp = dev->priv;
5821
1da177e4
LT
5822 switch (stringset) {
5823 case ETH_SS_TEST:
5824 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
5825 break;
5826 case ETH_SS_STATS:
fa1f0cb3
SS
5827 stat_size = sizeof(ethtool_xena_stats_keys);
5828 memcpy(data, &ethtool_xena_stats_keys,stat_size);
5829 if(sp->device_type == XFRAME_II_DEVICE) {
5830 memcpy(data + stat_size,
5831 &ethtool_enhanced_stats_keys,
5832 sizeof(ethtool_enhanced_stats_keys));
5833 stat_size += sizeof(ethtool_enhanced_stats_keys);
5834 }
5835
5836 memcpy(data + stat_size, &ethtool_driver_stats_keys,
5837 sizeof(ethtool_driver_stats_keys));
1da177e4
LT
5838 }
5839}
1da177e4
LT
5840static int s2io_ethtool_get_stats_count(struct net_device *dev)
5841{
fa1f0cb3
SS
5842 struct s2io_nic *sp = dev->priv;
5843 int stat_count = 0;
5844 switch(sp->device_type) {
5845 case XFRAME_I_DEVICE:
5846 stat_count = XFRAME_I_STAT_LEN;
5847 break;
5848
5849 case XFRAME_II_DEVICE:
5850 stat_count = XFRAME_II_STAT_LEN;
5851 break;
5852 }
5853
5854 return stat_count;
1da177e4
LT
5855}
5856
ac1f60db 5857static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
1da177e4
LT
5858{
5859 if (data)
5860 dev->features |= NETIF_F_IP_CSUM;
5861 else
5862 dev->features &= ~NETIF_F_IP_CSUM;
5863
5864 return 0;
5865}
5866
75c30b13
AR
5867static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
5868{
5869 return (dev->features & NETIF_F_TSO) != 0;
5870}
5871static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
5872{
5873 if (data)
5874 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
5875 else
5876 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
5877
5878 return 0;
5879}
1da177e4 5880
7282d491 5881static const struct ethtool_ops netdev_ethtool_ops = {
1da177e4
LT
5882 .get_settings = s2io_ethtool_gset,
5883 .set_settings = s2io_ethtool_sset,
5884 .get_drvinfo = s2io_ethtool_gdrvinfo,
5885 .get_regs_len = s2io_ethtool_get_regs_len,
5886 .get_regs = s2io_ethtool_gregs,
5887 .get_link = ethtool_op_get_link,
5888 .get_eeprom_len = s2io_get_eeprom_len,
5889 .get_eeprom = s2io_ethtool_geeprom,
5890 .set_eeprom = s2io_ethtool_seeprom,
0cec35eb 5891 .get_ringparam = s2io_ethtool_gringparam,
1da177e4
LT
5892 .get_pauseparam = s2io_ethtool_getpause_data,
5893 .set_pauseparam = s2io_ethtool_setpause_data,
5894 .get_rx_csum = s2io_ethtool_get_rx_csum,
5895 .set_rx_csum = s2io_ethtool_set_rx_csum,
5896 .get_tx_csum = ethtool_op_get_tx_csum,
5897 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
5898 .get_sg = ethtool_op_get_sg,
5899 .set_sg = ethtool_op_set_sg,
75c30b13
AR
5900 .get_tso = s2io_ethtool_op_get_tso,
5901 .set_tso = s2io_ethtool_op_set_tso,
fed5eccd
AR
5902 .get_ufo = ethtool_op_get_ufo,
5903 .set_ufo = ethtool_op_set_ufo,
1da177e4
LT
5904 .self_test_count = s2io_ethtool_self_test_count,
5905 .self_test = s2io_ethtool_test,
5906 .get_strings = s2io_ethtool_get_strings,
5907 .phys_id = s2io_ethtool_idnic,
5908 .get_stats_count = s2io_ethtool_get_stats_count,
5909 .get_ethtool_stats = s2io_get_ethtool_stats
5910};
5911
5912/**
20346722 5913 * s2io_ioctl - Entry point for the Ioctl
1da177e4
LT
5914 * @dev : Device pointer.
5915 * @ifr : An IOCTL specefic structure, that can contain a pointer to
5916 * a proprietary structure used to pass information to the driver.
5917 * @cmd : This is used to distinguish between the different commands that
5918 * can be passed to the IOCTL functions.
5919 * Description:
20346722 5920 * Currently there are no special functionality supported in IOCTL, hence
5921 * function always return EOPNOTSUPPORTED
1da177e4
LT
5922 */
5923
ac1f60db 5924static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1da177e4
LT
5925{
5926 return -EOPNOTSUPP;
5927}
5928
5929/**
5930 * s2io_change_mtu - entry point to change MTU size for the device.
5931 * @dev : device pointer.
5932 * @new_mtu : the new MTU size for the device.
5933 * Description: A driver entry point to change MTU size for the device.
5934 * Before changing the MTU the device must be stopped.
5935 * Return value:
5936 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5937 * file on failure.
5938 */
5939
ac1f60db 5940static int s2io_change_mtu(struct net_device *dev, int new_mtu)
1da177e4 5941{
1ee6dd77 5942 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5943
5944 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
5945 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
5946 dev->name);
5947 return -EPERM;
5948 }
5949
1da177e4 5950 dev->mtu = new_mtu;
d8892c6e 5951 if (netif_running(dev)) {
e6a8fee2 5952 s2io_card_down(sp);
d8892c6e 5953 netif_stop_queue(dev);
5954 if (s2io_card_up(sp)) {
5955 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
5956 __FUNCTION__);
5957 }
5958 if (netif_queue_stopped(dev))
5959 netif_wake_queue(dev);
5960 } else { /* Device is down */
1ee6dd77 5961 struct XENA_dev_config __iomem *bar0 = sp->bar0;
d8892c6e 5962 u64 val64 = new_mtu;
5963
5964 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
5965 }
1da177e4
LT
5966
5967 return 0;
5968}
5969
5970/**
5971 * s2io_tasklet - Bottom half of the ISR.
5972 * @dev_adr : address of the device structure in dma_addr_t format.
5973 * Description:
5974 * This is the tasklet or the bottom half of the ISR. This is
20346722 5975 * an extension of the ISR which is scheduled by the scheduler to be run
1da177e4 5976 * when the load on the CPU is low. All low priority tasks of the ISR can
20346722 5977 * be pushed into the tasklet. For now the tasklet is used only to
1da177e4
LT
5978 * replenish the Rx buffers in the Rx buffer descriptors.
5979 * Return value:
5980 * void.
5981 */
5982
5983static void s2io_tasklet(unsigned long dev_addr)
5984{
5985 struct net_device *dev = (struct net_device *) dev_addr;
1ee6dd77 5986 struct s2io_nic *sp = dev->priv;
1da177e4 5987 int i, ret;
1ee6dd77 5988 struct mac_info *mac_control;
1da177e4
LT
5989 struct config_param *config;
5990
5991 mac_control = &sp->mac_control;
5992 config = &sp->config;
5993
5994 if (!TASKLET_IN_USE) {
5995 for (i = 0; i < config->rx_ring_num; i++) {
5996 ret = fill_rx_buffers(sp, i);
5997 if (ret == -ENOMEM) {
0c61ed5f 5998 DBG_PRINT(INFO_DBG, "%s: Out of ",
1da177e4
LT
5999 dev->name);
6000 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
6001 break;
6002 } else if (ret == -EFILL) {
0c61ed5f 6003 DBG_PRINT(INFO_DBG,
1da177e4
LT
6004 "%s: Rx Ring %d is full\n",
6005 dev->name, i);
6006 break;
6007 }
6008 }
6009 clear_bit(0, (&sp->tasklet_status));
6010 }
6011}
6012
6013/**
6014 * s2io_set_link - Set the LInk status
6015 * @data: long pointer to device private structue
6016 * Description: Sets the link status for the adapter
6017 */
6018
c4028958 6019static void s2io_set_link(struct work_struct *work)
1da177e4 6020{
1ee6dd77 6021 struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
1da177e4 6022 struct net_device *dev = nic->dev;
1ee6dd77 6023 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
6024 register u64 val64;
6025 u16 subid;
6026
22747d6b
FR
6027 rtnl_lock();
6028
6029 if (!netif_running(dev))
6030 goto out_unlock;
6031
1da177e4
LT
6032 if (test_and_set_bit(0, &(nic->link_state))) {
6033 /* The card is being reset, no point doing anything */
22747d6b 6034 goto out_unlock;
1da177e4
LT
6035 }
6036
6037 subid = nic->pdev->subsystem_device;
a371a07d 6038 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6039 /*
6040 * Allow a small delay for the NICs self initiated
6041 * cleanup to complete.
6042 */
6043 msleep(100);
6044 }
1da177e4
LT
6045
6046 val64 = readq(&bar0->adapter_status);
19a60522
SS
6047 if (LINK_IS_UP(val64)) {
6048 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6049 if (verify_xena_quiescence(nic)) {
6050 val64 = readq(&bar0->adapter_control);
6051 val64 |= ADAPTER_CNTL_EN;
1da177e4 6052 writeq(val64, &bar0->adapter_control);
19a60522
SS
6053 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6054 nic->device_type, subid)) {
6055 val64 = readq(&bar0->gpio_control);
6056 val64 |= GPIO_CTRL_GPIO_0;
6057 writeq(val64, &bar0->gpio_control);
6058 val64 = readq(&bar0->gpio_control);
6059 } else {
6060 val64 |= ADAPTER_LED_ON;
6061 writeq(val64, &bar0->adapter_control);
a371a07d 6062 }
1da177e4 6063 nic->device_enabled_once = TRUE;
19a60522
SS
6064 } else {
6065 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6066 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6067 netif_stop_queue(dev);
1da177e4 6068 }
19a60522
SS
6069 }
6070 val64 = readq(&bar0->adapter_status);
6071 if (!LINK_IS_UP(val64)) {
6072 DBG_PRINT(ERR_DBG, "%s:", dev->name);
6073 DBG_PRINT(ERR_DBG, " Link down after enabling ");
6074 DBG_PRINT(ERR_DBG, "device \n");
6075 } else
1da177e4 6076 s2io_link(nic, LINK_UP);
19a60522
SS
6077 } else {
6078 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6079 subid)) {
6080 val64 = readq(&bar0->gpio_control);
6081 val64 &= ~GPIO_CTRL_GPIO_0;
6082 writeq(val64, &bar0->gpio_control);
6083 val64 = readq(&bar0->gpio_control);
1da177e4 6084 }
19a60522 6085 s2io_link(nic, LINK_DOWN);
1da177e4
LT
6086 }
6087 clear_bit(0, &(nic->link_state));
22747d6b
FR
6088
6089out_unlock:
d8d70caf 6090 rtnl_unlock();
1da177e4
LT
6091}
6092
1ee6dd77
RB
6093static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6094 struct buffAdd *ba,
6095 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6096 u64 *temp2, int size)
5d3213cc
AR
6097{
6098 struct net_device *dev = sp->dev;
6099 struct sk_buff *frag_list;
6100
6101 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6102 /* allocate skb */
6103 if (*skb) {
6104 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6105 /*
6106 * As Rx frame are not going to be processed,
6107 * using same mapped address for the Rxd
6108 * buffer pointer
6109 */
1ee6dd77 6110 ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0;
5d3213cc
AR
6111 } else {
6112 *skb = dev_alloc_skb(size);
6113 if (!(*skb)) {
0c61ed5f
RV
6114 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6115 DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
5d3213cc
AR
6116 return -ENOMEM ;
6117 }
6118 /* storing the mapped addr in a temp variable
6119 * such it will be used for next rxd whose
6120 * Host Control is NULL
6121 */
1ee6dd77 6122 ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0 =
5d3213cc
AR
6123 pci_map_single( sp->pdev, (*skb)->data,
6124 size - NET_IP_ALIGN,
6125 PCI_DMA_FROMDEVICE);
6126 rxdp->Host_Control = (unsigned long) (*skb);
6127 }
6128 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6129 /* Two buffer Mode */
6130 if (*skb) {
1ee6dd77
RB
6131 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2;
6132 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0;
6133 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1;
5d3213cc
AR
6134 } else {
6135 *skb = dev_alloc_skb(size);
2ceaac75 6136 if (!(*skb)) {
0c61ed5f 6137 DBG_PRINT(INFO_DBG, "%s: dev_alloc_skb failed\n",
19a60522 6138 dev->name);
2ceaac75
DR
6139 return -ENOMEM;
6140 }
1ee6dd77 6141 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 =
5d3213cc
AR
6142 pci_map_single(sp->pdev, (*skb)->data,
6143 dev->mtu + 4,
6144 PCI_DMA_FROMDEVICE);
1ee6dd77 6145 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 =
5d3213cc
AR
6146 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6147 PCI_DMA_FROMDEVICE);
6148 rxdp->Host_Control = (unsigned long) (*skb);
6149
6150 /* Buffer-1 will be dummy buffer not used */
1ee6dd77 6151 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 =
5d3213cc
AR
6152 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6153 PCI_DMA_FROMDEVICE);
6154 }
6155 } else if ((rxdp->Host_Control == 0)) {
6156 /* Three buffer mode */
6157 if (*skb) {
1ee6dd77
RB
6158 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0;
6159 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1;
6160 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2;
5d3213cc
AR
6161 } else {
6162 *skb = dev_alloc_skb(size);
2ceaac75 6163 if (!(*skb)) {
0c61ed5f 6164 DBG_PRINT(INFO_DBG, "%s: dev_alloc_skb failed\n",
2ceaac75
DR
6165 dev->name);
6166 return -ENOMEM;
6167 }
1ee6dd77 6168 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 =
5d3213cc
AR
6169 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6170 PCI_DMA_FROMDEVICE);
6171 /* Buffer-1 receives L3/L4 headers */
1ee6dd77 6172 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 =
5d3213cc
AR
6173 pci_map_single( sp->pdev, (*skb)->data,
6174 l3l4hdr_size + 4,
6175 PCI_DMA_FROMDEVICE);
6176 /*
6177 * skb_shinfo(skb)->frag_list will have L4
6178 * data payload
6179 */
6180 skb_shinfo(*skb)->frag_list = dev_alloc_skb(dev->mtu +
6181 ALIGN_SIZE);
6182 if (skb_shinfo(*skb)->frag_list == NULL) {
6183 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb \
6184 failed\n ", dev->name);
6185 return -ENOMEM ;
6186 }
6187 frag_list = skb_shinfo(*skb)->frag_list;
6188 frag_list->next = NULL;
6189 /*
6190 * Buffer-2 receives L4 data payload
6191 */
1ee6dd77 6192 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 =
5d3213cc
AR
6193 pci_map_single( sp->pdev, frag_list->data,
6194 dev->mtu, PCI_DMA_FROMDEVICE);
6195 }
6196 }
6197 return 0;
6198}
1ee6dd77
RB
6199static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6200 int size)
5d3213cc
AR
6201{
6202 struct net_device *dev = sp->dev;
6203 if (sp->rxd_mode == RXD_MODE_1) {
6204 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6205 } else if (sp->rxd_mode == RXD_MODE_3B) {
6206 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6207 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6208 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6209 } else {
6210 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6211 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
6212 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
6213 }
6214}
6215
1ee6dd77 6216static int rxd_owner_bit_reset(struct s2io_nic *sp)
5d3213cc
AR
6217{
6218 int i, j, k, blk_cnt = 0, size;
1ee6dd77 6219 struct mac_info * mac_control = &sp->mac_control;
5d3213cc
AR
6220 struct config_param *config = &sp->config;
6221 struct net_device *dev = sp->dev;
1ee6dd77 6222 struct RxD_t *rxdp = NULL;
5d3213cc 6223 struct sk_buff *skb = NULL;
1ee6dd77 6224 struct buffAdd *ba = NULL;
5d3213cc
AR
6225 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6226
6227 /* Calculate the size based on ring mode */
6228 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6229 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6230 if (sp->rxd_mode == RXD_MODE_1)
6231 size += NET_IP_ALIGN;
6232 else if (sp->rxd_mode == RXD_MODE_3B)
6233 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6234 else
6235 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
6236
6237 for (i = 0; i < config->rx_ring_num; i++) {
6238 blk_cnt = config->rx_cfg[i].num_rxd /
6239 (rxd_count[sp->rxd_mode] +1);
6240
6241 for (j = 0; j < blk_cnt; j++) {
6242 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6243 rxdp = mac_control->rings[i].
6244 rx_blocks[j].rxds[k].virt_addr;
6245 if(sp->rxd_mode >= RXD_MODE_3A)
6246 ba = &mac_control->rings[i].ba[j][k];
ac1f90d6 6247 if (set_rxd_buffer_pointer(sp, rxdp, ba,
5d3213cc
AR
6248 &skb,(u64 *)&temp0_64,
6249 (u64 *)&temp1_64,
ac1f90d6
SS
6250 (u64 *)&temp2_64,
6251 size) == ENOMEM) {
6252 return 0;
6253 }
5d3213cc
AR
6254
6255 set_rxd_buffer_size(sp, rxdp, size);
6256 wmb();
6257 /* flip the Ownership bit to Hardware */
6258 rxdp->Control_1 |= RXD_OWN_XENA;
6259 }
6260 }
6261 }
6262 return 0;
6263
6264}
6265
1ee6dd77 6266static int s2io_add_isr(struct s2io_nic * sp)
1da177e4 6267{
e6a8fee2 6268 int ret = 0;
c92ca04b 6269 struct net_device *dev = sp->dev;
e6a8fee2 6270 int err = 0;
1da177e4 6271
e6a8fee2
AR
6272 if (sp->intr_type == MSI)
6273 ret = s2io_enable_msi(sp);
6274 else if (sp->intr_type == MSI_X)
6275 ret = s2io_enable_msi_x(sp);
6276 if (ret) {
6277 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6278 sp->intr_type = INTA;
20346722 6279 }
1da177e4 6280
1ee6dd77 6281 /* Store the values of the MSIX table in the struct s2io_nic structure */
e6a8fee2 6282 store_xmsi_data(sp);
c92ca04b 6283
e6a8fee2
AR
6284 /* After proper initialization of H/W, register ISR */
6285 if (sp->intr_type == MSI) {
6286 err = request_irq((int) sp->pdev->irq, s2io_msi_handle,
6287 IRQF_SHARED, sp->name, dev);
6288 if (err) {
6289 pci_disable_msi(sp->pdev);
6290 DBG_PRINT(ERR_DBG, "%s: MSI registration failed\n",
6291 dev->name);
6292 return -1;
6293 }
6294 }
6295 if (sp->intr_type == MSI_X) {
fb6a825b 6296 int i, msix_tx_cnt=0,msix_rx_cnt=0;
c92ca04b 6297
e6a8fee2
AR
6298 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6299 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
6300 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6301 dev->name, i);
6302 err = request_irq(sp->entries[i].vector,
6303 s2io_msix_fifo_handle, 0, sp->desc[i],
6304 sp->s2io_entries[i].arg);
fb6a825b
SS
6305 /* If either data or addr is zero print it */
6306 if(!(sp->msix_info[i].addr &&
6307 sp->msix_info[i].data)) {
6308 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6309 "Data:0x%lx\n",sp->desc[i],
6310 (unsigned long long)
6311 sp->msix_info[i].addr,
6312 (unsigned long)
6313 ntohl(sp->msix_info[i].data));
6314 } else {
6315 msix_tx_cnt++;
6316 }
e6a8fee2
AR
6317 } else {
6318 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6319 dev->name, i);
6320 err = request_irq(sp->entries[i].vector,
6321 s2io_msix_ring_handle, 0, sp->desc[i],
6322 sp->s2io_entries[i].arg);
fb6a825b
SS
6323 /* If either data or addr is zero print it */
6324 if(!(sp->msix_info[i].addr &&
6325 sp->msix_info[i].data)) {
6326 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6327 "Data:0x%lx\n",sp->desc[i],
6328 (unsigned long long)
6329 sp->msix_info[i].addr,
6330 (unsigned long)
6331 ntohl(sp->msix_info[i].data));
6332 } else {
6333 msix_rx_cnt++;
6334 }
c92ca04b 6335 }
e6a8fee2
AR
6336 if (err) {
6337 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
6338 "failed\n", dev->name, i);
6339 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
6340 return -1;
6341 }
6342 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6343 }
fb6a825b
SS
6344 printk("MSI-X-TX %d entries enabled\n",msix_tx_cnt);
6345 printk("MSI-X-RX %d entries enabled\n",msix_rx_cnt);
e6a8fee2
AR
6346 }
6347 if (sp->intr_type == INTA) {
6348 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
6349 sp->name, dev);
6350 if (err) {
6351 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6352 dev->name);
6353 return -1;
6354 }
6355 }
6356 return 0;
6357}
1ee6dd77 6358static void s2io_rem_isr(struct s2io_nic * sp)
e6a8fee2
AR
6359{
6360 int cnt = 0;
6361 struct net_device *dev = sp->dev;
6362
6363 if (sp->intr_type == MSI_X) {
6364 int i;
6365 u16 msi_control;
6366
6367 for (i=1; (sp->s2io_entries[i].in_use ==
6368 MSIX_REGISTERED_SUCCESS); i++) {
6369 int vector = sp->entries[i].vector;
6370 void *arg = sp->s2io_entries[i].arg;
6371
6372 free_irq(vector, arg);
6373 }
6374 pci_read_config_word(sp->pdev, 0x42, &msi_control);
6375 msi_control &= 0xFFFE; /* Disable MSI */
6376 pci_write_config_word(sp->pdev, 0x42, msi_control);
6377
6378 pci_disable_msix(sp->pdev);
6379 } else {
6380 free_irq(sp->pdev->irq, dev);
6381 if (sp->intr_type == MSI) {
6382 u16 val;
6383
6384 pci_disable_msi(sp->pdev);
6385 pci_read_config_word(sp->pdev, 0x4c, &val);
6386 val ^= 0x1;
6387 pci_write_config_word(sp->pdev, 0x4c, val);
c92ca04b
AR
6388 }
6389 }
6390 /* Waiting till all Interrupt handlers are complete */
6391 cnt = 0;
6392 do {
6393 msleep(10);
6394 if (!atomic_read(&sp->isr_cnt))
6395 break;
6396 cnt++;
6397 } while(cnt < 5);
e6a8fee2
AR
6398}
6399
1ee6dd77 6400static void s2io_card_down(struct s2io_nic * sp)
e6a8fee2
AR
6401{
6402 int cnt = 0;
1ee6dd77 6403 struct XENA_dev_config __iomem *bar0 = sp->bar0;
e6a8fee2
AR
6404 unsigned long flags;
6405 register u64 val64 = 0;
6406
6407 del_timer_sync(&sp->alarm_timer);
6408 /* If s2io_set_link task is executing, wait till it completes. */
6409 while (test_and_set_bit(0, &(sp->link_state))) {
6410 msleep(50);
6411 }
6412 atomic_set(&sp->card_state, CARD_DOWN);
6413
6414 /* disable Tx and Rx traffic on the NIC */
6415 stop_nic(sp);
6416
6417 s2io_rem_isr(sp);
1da177e4
LT
6418
6419 /* Kill tasklet. */
6420 tasklet_kill(&sp->task);
6421
6422 /* Check if the device is Quiescent and then Reset the NIC */
6423 do {
5d3213cc
AR
6424 /* As per the HW requirement we need to replenish the
6425 * receive buffer to avoid the ring bump. Since there is
6426 * no intention of processing the Rx frame at this pointwe are
6427 * just settting the ownership bit of rxd in Each Rx
6428 * ring to HW and set the appropriate buffer size
6429 * based on the ring mode
6430 */
6431 rxd_owner_bit_reset(sp);
6432
1da177e4 6433 val64 = readq(&bar0->adapter_status);
19a60522
SS
6434 if (verify_xena_quiescence(sp)) {
6435 if(verify_pcc_quiescent(sp, sp->device_enabled_once))
1da177e4
LT
6436 break;
6437 }
6438
6439 msleep(50);
6440 cnt++;
6441 if (cnt == 10) {
6442 DBG_PRINT(ERR_DBG,
6443 "s2io_close:Device not Quiescent ");
6444 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
6445 (unsigned long long) val64);
6446 break;
6447 }
6448 } while (1);
1da177e4
LT
6449 s2io_reset(sp);
6450
7ba013ac 6451 spin_lock_irqsave(&sp->tx_lock, flags);
6452 /* Free all Tx buffers */
1da177e4 6453 free_tx_buffers(sp);
7ba013ac 6454 spin_unlock_irqrestore(&sp->tx_lock, flags);
6455
6456 /* Free all Rx buffers */
6457 spin_lock_irqsave(&sp->rx_lock, flags);
1da177e4 6458 free_rx_buffers(sp);
7ba013ac 6459 spin_unlock_irqrestore(&sp->rx_lock, flags);
1da177e4 6460
1da177e4
LT
6461 clear_bit(0, &(sp->link_state));
6462}
6463
1ee6dd77 6464static int s2io_card_up(struct s2io_nic * sp)
1da177e4 6465{
cc6e7c44 6466 int i, ret = 0;
1ee6dd77 6467 struct mac_info *mac_control;
1da177e4
LT
6468 struct config_param *config;
6469 struct net_device *dev = (struct net_device *) sp->dev;
e6a8fee2 6470 u16 interruptible;
1da177e4
LT
6471
6472 /* Initialize the H/W I/O registers */
6473 if (init_nic(sp) != 0) {
6474 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
6475 dev->name);
e6a8fee2 6476 s2io_reset(sp);
1da177e4
LT
6477 return -ENODEV;
6478 }
6479
20346722 6480 /*
6481 * Initializing the Rx buffers. For now we are considering only 1
1da177e4
LT
6482 * Rx ring and initializing buffers into 30 Rx blocks
6483 */
6484 mac_control = &sp->mac_control;
6485 config = &sp->config;
6486
6487 for (i = 0; i < config->rx_ring_num; i++) {
6488 if ((ret = fill_rx_buffers(sp, i))) {
6489 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
6490 dev->name);
6491 s2io_reset(sp);
6492 free_rx_buffers(sp);
6493 return -ENOMEM;
6494 }
6495 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6496 atomic_read(&sp->rx_bufs_left[i]));
6497 }
19a60522
SS
6498 /* Maintain the state prior to the open */
6499 if (sp->promisc_flg)
6500 sp->promisc_flg = 0;
6501 if (sp->m_cast_flg) {
6502 sp->m_cast_flg = 0;
6503 sp->all_multi_pos= 0;
6504 }
1da177e4
LT
6505
6506 /* Setting its receive mode */
6507 s2io_set_multicast(dev);
6508
7d3d0439 6509 if (sp->lro) {
b41477f3 6510 /* Initialize max aggregatable pkts per session based on MTU */
7d3d0439
RA
6511 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6512 /* Check if we can use(if specified) user provided value */
6513 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
6514 sp->lro_max_aggr_per_sess = lro_max_pkts;
6515 }
6516
1da177e4
LT
6517 /* Enable Rx Traffic and interrupts on the NIC */
6518 if (start_nic(sp)) {
6519 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
1da177e4 6520 s2io_reset(sp);
e6a8fee2
AR
6521 free_rx_buffers(sp);
6522 return -ENODEV;
6523 }
6524
6525 /* Add interrupt service routine */
6526 if (s2io_add_isr(sp) != 0) {
6527 if (sp->intr_type == MSI_X)
6528 s2io_rem_isr(sp);
6529 s2io_reset(sp);
1da177e4
LT
6530 free_rx_buffers(sp);
6531 return -ENODEV;
6532 }
6533
25fff88e 6534 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
6535
e6a8fee2
AR
6536 /* Enable tasklet for the device */
6537 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
6538
6539 /* Enable select interrupts */
6540 if (sp->intr_type != INTA)
6541 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
6542 else {
6543 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
6544 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
6545 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
6546 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
6547 }
6548
6549
1da177e4
LT
6550 atomic_set(&sp->card_state, CARD_UP);
6551 return 0;
6552}
6553
20346722 6554/**
1da177e4
LT
6555 * s2io_restart_nic - Resets the NIC.
6556 * @data : long pointer to the device private structure
6557 * Description:
6558 * This function is scheduled to be run by the s2io_tx_watchdog
20346722 6559 * function after 0.5 secs to reset the NIC. The idea is to reduce
1da177e4
LT
6560 * the run time of the watch dog routine which is run holding a
6561 * spin lock.
6562 */
6563
c4028958 6564static void s2io_restart_nic(struct work_struct *work)
1da177e4 6565{
1ee6dd77 6566 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
c4028958 6567 struct net_device *dev = sp->dev;
1da177e4 6568
22747d6b
FR
6569 rtnl_lock();
6570
6571 if (!netif_running(dev))
6572 goto out_unlock;
6573
e6a8fee2 6574 s2io_card_down(sp);
1da177e4
LT
6575 if (s2io_card_up(sp)) {
6576 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6577 dev->name);
6578 }
6579 netif_wake_queue(dev);
6580 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
6581 dev->name);
22747d6b
FR
6582out_unlock:
6583 rtnl_unlock();
1da177e4
LT
6584}
6585
20346722 6586/**
6587 * s2io_tx_watchdog - Watchdog for transmit side.
1da177e4
LT
6588 * @dev : Pointer to net device structure
6589 * Description:
6590 * This function is triggered if the Tx Queue is stopped
6591 * for a pre-defined amount of time when the Interface is still up.
6592 * If the Interface is jammed in such a situation, the hardware is
6593 * reset (by s2io_close) and restarted again (by s2io_open) to
6594 * overcome any problem that might have been caused in the hardware.
6595 * Return value:
6596 * void
6597 */
6598
6599static void s2io_tx_watchdog(struct net_device *dev)
6600{
1ee6dd77 6601 struct s2io_nic *sp = dev->priv;
1da177e4
LT
6602
6603 if (netif_carrier_ok(dev)) {
6604 schedule_work(&sp->rst_timer_task);
bd1034f0 6605 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
1da177e4
LT
6606 }
6607}
6608
6609/**
6610 * rx_osm_handler - To perform some OS related operations on SKB.
6611 * @sp: private member of the device structure,pointer to s2io_nic structure.
6612 * @skb : the socket buffer pointer.
6613 * @len : length of the packet
6614 * @cksum : FCS checksum of the frame.
6615 * @ring_no : the ring from which this RxD was extracted.
20346722 6616 * Description:
b41477f3 6617 * This function is called by the Rx interrupt serivce routine to perform
1da177e4
LT
6618 * some OS related operations on the SKB before passing it to the upper
6619 * layers. It mainly checks if the checksum is OK, if so adds it to the
6620 * SKBs cksum variable, increments the Rx packet count and passes the SKB
6621 * to the upper layer. If the checksum is wrong, it increments the Rx
6622 * packet error count, frees the SKB and returns error.
6623 * Return value:
6624 * SUCCESS on success and -1 on failure.
6625 */
1ee6dd77 6626static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
1da177e4 6627{
1ee6dd77 6628 struct s2io_nic *sp = ring_data->nic;
1da177e4 6629 struct net_device *dev = (struct net_device *) sp->dev;
20346722 6630 struct sk_buff *skb = (struct sk_buff *)
6631 ((unsigned long) rxdp->Host_Control);
6632 int ring_no = ring_data->ring_no;
1da177e4 6633 u16 l3_csum, l4_csum;
863c11a9 6634 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
1ee6dd77 6635 struct lro *lro;
da6971d8 6636
20346722 6637 skb->dev = dev;
c92ca04b 6638
863c11a9 6639 if (err) {
bd1034f0
AR
6640 /* Check for parity error */
6641 if (err & 0x1) {
6642 sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
6643 }
6644
863c11a9
AR
6645 /*
6646 * Drop the packet if bad transfer code. Exception being
6647 * 0x5, which could be due to unsupported IPv6 extension header.
6648 * In this case, we let stack handle the packet.
6649 * Note that in this case, since checksum will be incorrect,
6650 * stack will validate the same.
6651 */
6652 if (err && ((err >> 48) != 0x5)) {
6653 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
6654 dev->name, err);
6655 sp->stats.rx_crc_errors++;
6656 dev_kfree_skb(skb);
6657 atomic_dec(&sp->rx_bufs_left[ring_no]);
6658 rxdp->Host_Control = 0;
6659 return 0;
6660 }
20346722 6661 }
1da177e4 6662
20346722 6663 /* Updating statistics */
6664 rxdp->Host_Control = 0;
20346722 6665 sp->stats.rx_packets++;
da6971d8
AR
6666 if (sp->rxd_mode == RXD_MODE_1) {
6667 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
20346722 6668
da6971d8
AR
6669 sp->stats.rx_bytes += len;
6670 skb_put(skb, len);
6671
6672 } else if (sp->rxd_mode >= RXD_MODE_3A) {
6673 int get_block = ring_data->rx_curr_get_info.block_index;
6674 int get_off = ring_data->rx_curr_get_info.offset;
6675 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
6676 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
6677 unsigned char *buff = skb_push(skb, buf0_len);
6678
1ee6dd77 6679 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
da6971d8
AR
6680 sp->stats.rx_bytes += buf0_len + buf2_len;
6681 memcpy(buff, ba->ba_0, buf0_len);
6682
6683 if (sp->rxd_mode == RXD_MODE_3A) {
6684 int buf1_len = RXD_GET_BUFFER1_SIZE_3(rxdp->Control_2);
6685
6686 skb_put(skb, buf1_len);
6687 skb->len += buf2_len;
6688 skb->data_len += buf2_len;
da6971d8
AR
6689 skb_put(skb_shinfo(skb)->frag_list, buf2_len);
6690 sp->stats.rx_bytes += buf1_len;
6691
6692 } else
6693 skb_put(skb, buf2_len);
6694 }
20346722 6695
7d3d0439
RA
6696 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
6697 (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
20346722 6698 (sp->rx_csum)) {
6699 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
1da177e4
LT
6700 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
6701 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
20346722 6702 /*
1da177e4
LT
6703 * NIC verifies if the Checksum of the received
6704 * frame is Ok or not and accordingly returns
6705 * a flag in the RxD.
6706 */
6707 skb->ip_summed = CHECKSUM_UNNECESSARY;
7d3d0439
RA
6708 if (sp->lro) {
6709 u32 tcp_len;
6710 u8 *tcp;
6711 int ret = 0;
6712
6713 ret = s2io_club_tcp_session(skb->data, &tcp,
6714 &tcp_len, &lro, rxdp, sp);
6715 switch (ret) {
6716 case 3: /* Begin anew */
6717 lro->parent = skb;
6718 goto aggregate;
6719 case 1: /* Aggregate */
6720 {
6721 lro_append_pkt(sp, lro,
6722 skb, tcp_len);
6723 goto aggregate;
6724 }
6725 case 4: /* Flush session */
6726 {
6727 lro_append_pkt(sp, lro,
6728 skb, tcp_len);
6729 queue_rx_frame(lro->parent);
6730 clear_lro_session(lro);
6731 sp->mac_control.stats_info->
6732 sw_stat.flush_max_pkts++;
6733 goto aggregate;
6734 }
6735 case 2: /* Flush both */
6736 lro->parent->data_len =
6737 lro->frags_len;
6738 sp->mac_control.stats_info->
6739 sw_stat.sending_both++;
6740 queue_rx_frame(lro->parent);
6741 clear_lro_session(lro);
6742 goto send_up;
6743 case 0: /* sessions exceeded */
c92ca04b
AR
6744 case -1: /* non-TCP or not
6745 * L2 aggregatable
6746 */
7d3d0439
RA
6747 case 5: /*
6748 * First pkt in session not
6749 * L3/L4 aggregatable
6750 */
6751 break;
6752 default:
6753 DBG_PRINT(ERR_DBG,
6754 "%s: Samadhana!!\n",
6755 __FUNCTION__);
6756 BUG();
6757 }
6758 }
1da177e4 6759 } else {
20346722 6760 /*
6761 * Packet with erroneous checksum, let the
1da177e4
LT
6762 * upper layers deal with it.
6763 */
6764 skb->ip_summed = CHECKSUM_NONE;
6765 }
6766 } else {
6767 skb->ip_summed = CHECKSUM_NONE;
6768 }
6769
7d3d0439
RA
6770 if (!sp->lro) {
6771 skb->protocol = eth_type_trans(skb, dev);
926930b2
SS
6772 if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
6773 vlan_strip_flag)) {
7d3d0439 6774 /* Queueing the vlan frame to the upper layer */
db874e65
SS
6775 if (napi)
6776 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
6777 RXD_GET_VLAN_TAG(rxdp->Control_2));
6778 else
6779 vlan_hwaccel_rx(skb, sp->vlgrp,
6780 RXD_GET_VLAN_TAG(rxdp->Control_2));
7d3d0439 6781 } else {
db874e65
SS
6782 if (napi)
6783 netif_receive_skb(skb);
6784 else
6785 netif_rx(skb);
7d3d0439 6786 }
7d3d0439
RA
6787 } else {
6788send_up:
6789 queue_rx_frame(skb);
6aa20a22 6790 }
1da177e4 6791 dev->last_rx = jiffies;
7d3d0439 6792aggregate:
1da177e4 6793 atomic_dec(&sp->rx_bufs_left[ring_no]);
1da177e4
LT
6794 return SUCCESS;
6795}
6796
6797/**
6798 * s2io_link - stops/starts the Tx queue.
6799 * @sp : private member of the device structure, which is a pointer to the
6800 * s2io_nic structure.
6801 * @link : inidicates whether link is UP/DOWN.
6802 * Description:
6803 * This function stops/starts the Tx queue depending on whether the link
20346722 6804 * status of the NIC is is down or up. This is called by the Alarm
6805 * interrupt handler whenever a link change interrupt comes up.
1da177e4
LT
6806 * Return value:
6807 * void.
6808 */
6809
1ee6dd77 6810static void s2io_link(struct s2io_nic * sp, int link)
1da177e4
LT
6811{
6812 struct net_device *dev = (struct net_device *) sp->dev;
6813
6814 if (link != sp->last_link_state) {
6815 if (link == LINK_DOWN) {
6816 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
6817 netif_carrier_off(dev);
6818 } else {
6819 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
6820 netif_carrier_on(dev);
6821 }
6822 }
6823 sp->last_link_state = link;
6824}
6825
6826/**
20346722 6827 * get_xena_rev_id - to identify revision ID of xena.
6828 * @pdev : PCI Dev structure
6829 * Description:
6830 * Function to identify the Revision ID of xena.
6831 * Return value:
6832 * returns the revision ID of the device.
6833 */
6834
26df54bf 6835static int get_xena_rev_id(struct pci_dev *pdev)
20346722 6836{
6837 u8 id = 0;
6838 int ret;
6839 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
6840 return id;
6841}
6842
6843/**
6844 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
6845 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
6846 * s2io_nic structure.
6847 * Description:
6848 * This function initializes a few of the PCI and PCI-X configuration registers
6849 * with recommended values.
6850 * Return value:
6851 * void
6852 */
6853
1ee6dd77 6854static void s2io_init_pci(struct s2io_nic * sp)
1da177e4 6855{
20346722 6856 u16 pci_cmd = 0, pcix_cmd = 0;
1da177e4
LT
6857
6858 /* Enable Data Parity Error Recovery in PCI-X command register. */
6859 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 6860 &(pcix_cmd));
1da177e4 6861 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 6862 (pcix_cmd | 1));
1da177e4 6863 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 6864 &(pcix_cmd));
1da177e4
LT
6865
6866 /* Set the PErr Response bit in PCI command register. */
6867 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
6868 pci_write_config_word(sp->pdev, PCI_COMMAND,
6869 (pci_cmd | PCI_COMMAND_PARITY));
6870 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
1da177e4
LT
6871}
6872
9dc737a7
AR
6873static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
6874{
6875 if ( tx_fifo_num > 8) {
6876 DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
6877 "supported\n");
6878 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
6879 tx_fifo_num = 8;
6880 }
6881 if ( rx_ring_num > 8) {
6882 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
6883 "supported\n");
6884 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
6885 rx_ring_num = 8;
6886 }
db874e65
SS
6887 if (*dev_intr_type != INTA)
6888 napi = 0;
6889
9dc737a7
AR
6890#ifndef CONFIG_PCI_MSI
6891 if (*dev_intr_type != INTA) {
6892 DBG_PRINT(ERR_DBG, "s2io: This kernel does not support"
6893 "MSI/MSI-X. Defaulting to INTA\n");
6894 *dev_intr_type = INTA;
6895 }
6896#else
6897 if (*dev_intr_type > MSI_X) {
6898 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
6899 "Defaulting to INTA\n");
6900 *dev_intr_type = INTA;
6901 }
6902#endif
6903 if ((*dev_intr_type == MSI_X) &&
6904 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
6905 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
6aa20a22 6906 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
9dc737a7
AR
6907 "Defaulting to INTA\n");
6908 *dev_intr_type = INTA;
6909 }
fb6a825b 6910
9dc737a7
AR
6911 if (rx_ring_mode > 3) {
6912 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
6913 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n");
6914 rx_ring_mode = 3;
6915 }
6916 return SUCCESS;
6917}
6918
9fc93a41
SS
6919/**
6920 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
6921 * or Traffic class respectively.
6922 * @nic: device peivate variable
6923 * Description: The function configures the receive steering to
6924 * desired receive ring.
6925 * Return Value: SUCCESS on success and
6926 * '-1' on failure (endian settings incorrect).
6927 */
6928static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
6929{
6930 struct XENA_dev_config __iomem *bar0 = nic->bar0;
6931 register u64 val64 = 0;
6932
6933 if (ds_codepoint > 63)
6934 return FAILURE;
6935
6936 val64 = RTS_DS_MEM_DATA(ring);
6937 writeq(val64, &bar0->rts_ds_mem_data);
6938
6939 val64 = RTS_DS_MEM_CTRL_WE |
6940 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
6941 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
6942
6943 writeq(val64, &bar0->rts_ds_mem_ctrl);
6944
6945 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
6946 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
6947 S2IO_BIT_RESET);
6948}
6949
1da177e4 6950/**
20346722 6951 * s2io_init_nic - Initialization of the adapter .
1da177e4
LT
6952 * @pdev : structure containing the PCI related information of the device.
6953 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
6954 * Description:
6955 * The function initializes an adapter identified by the pci_dec structure.
20346722 6956 * All OS related initialization including memory and device structure and
6957 * initlaization of the device private variable is done. Also the swapper
6958 * control register is initialized to enable read and write into the I/O
1da177e4
LT
6959 * registers of the device.
6960 * Return value:
6961 * returns 0 on success and negative on failure.
6962 */
6963
6964static int __devinit
6965s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6966{
1ee6dd77 6967 struct s2io_nic *sp;
1da177e4 6968 struct net_device *dev;
1da177e4
LT
6969 int i, j, ret;
6970 int dma_flag = FALSE;
6971 u32 mac_up, mac_down;
6972 u64 val64 = 0, tmp64 = 0;
1ee6dd77 6973 struct XENA_dev_config __iomem *bar0 = NULL;
1da177e4 6974 u16 subid;
1ee6dd77 6975 struct mac_info *mac_control;
1da177e4 6976 struct config_param *config;
541ae68f 6977 int mode;
cc6e7c44 6978 u8 dev_intr_type = intr_type;
1da177e4 6979
9dc737a7
AR
6980 if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
6981 return ret;
1da177e4
LT
6982
6983 if ((ret = pci_enable_device(pdev))) {
6984 DBG_PRINT(ERR_DBG,
6985 "s2io_init_nic: pci_enable_device failed\n");
6986 return ret;
6987 }
6988
1e7f0bd8 6989 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1da177e4
LT
6990 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
6991 dma_flag = TRUE;
1da177e4 6992 if (pci_set_consistent_dma_mask
1e7f0bd8 6993 (pdev, DMA_64BIT_MASK)) {
1da177e4
LT
6994 DBG_PRINT(ERR_DBG,
6995 "Unable to obtain 64bit DMA for \
6996 consistent allocations\n");
6997 pci_disable_device(pdev);
6998 return -ENOMEM;
6999 }
1e7f0bd8 7000 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
1da177e4
LT
7001 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7002 } else {
7003 pci_disable_device(pdev);
7004 return -ENOMEM;
7005 }
cc6e7c44
RA
7006 if (dev_intr_type != MSI_X) {
7007 if (pci_request_regions(pdev, s2io_driver_name)) {
b41477f3
AR
7008 DBG_PRINT(ERR_DBG, "Request Regions failed\n");
7009 pci_disable_device(pdev);
cc6e7c44
RA
7010 return -ENODEV;
7011 }
7012 }
7013 else {
7014 if (!(request_mem_region(pci_resource_start(pdev, 0),
7015 pci_resource_len(pdev, 0), s2io_driver_name))) {
7016 DBG_PRINT(ERR_DBG, "bar0 Request Regions failed\n");
7017 pci_disable_device(pdev);
7018 return -ENODEV;
7019 }
7020 if (!(request_mem_region(pci_resource_start(pdev, 2),
7021 pci_resource_len(pdev, 2), s2io_driver_name))) {
7022 DBG_PRINT(ERR_DBG, "bar1 Request Regions failed\n");
7023 release_mem_region(pci_resource_start(pdev, 0),
7024 pci_resource_len(pdev, 0));
7025 pci_disable_device(pdev);
7026 return -ENODEV;
7027 }
1da177e4
LT
7028 }
7029
1ee6dd77 7030 dev = alloc_etherdev(sizeof(struct s2io_nic));
1da177e4
LT
7031 if (dev == NULL) {
7032 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7033 pci_disable_device(pdev);
7034 pci_release_regions(pdev);
7035 return -ENODEV;
7036 }
7037
7038 pci_set_master(pdev);
7039 pci_set_drvdata(pdev, dev);
7040 SET_MODULE_OWNER(dev);
7041 SET_NETDEV_DEV(dev, &pdev->dev);
7042
7043 /* Private member variable initialized to s2io NIC structure */
7044 sp = dev->priv;
1ee6dd77 7045 memset(sp, 0, sizeof(struct s2io_nic));
1da177e4
LT
7046 sp->dev = dev;
7047 sp->pdev = pdev;
1da177e4 7048 sp->high_dma_flag = dma_flag;
1da177e4 7049 sp->device_enabled_once = FALSE;
da6971d8
AR
7050 if (rx_ring_mode == 1)
7051 sp->rxd_mode = RXD_MODE_1;
7052 if (rx_ring_mode == 2)
7053 sp->rxd_mode = RXD_MODE_3B;
7054 if (rx_ring_mode == 3)
7055 sp->rxd_mode = RXD_MODE_3A;
7056
cc6e7c44 7057 sp->intr_type = dev_intr_type;
1da177e4 7058
541ae68f 7059 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7060 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7061 sp->device_type = XFRAME_II_DEVICE;
7062 else
7063 sp->device_type = XFRAME_I_DEVICE;
7064
7d3d0439 7065 sp->lro = lro;
6aa20a22 7066
1da177e4
LT
7067 /* Initialize some PCI/PCI-X fields of the NIC. */
7068 s2io_init_pci(sp);
7069
20346722 7070 /*
1da177e4 7071 * Setting the device configuration parameters.
20346722 7072 * Most of these parameters can be specified by the user during
7073 * module insertion as they are module loadable parameters. If
7074 * these parameters are not not specified during load time, they
1da177e4
LT
7075 * are initialized with default values.
7076 */
7077 mac_control = &sp->mac_control;
7078 config = &sp->config;
7079
7080 /* Tx side parameters. */
1da177e4
LT
7081 config->tx_fifo_num = tx_fifo_num;
7082 for (i = 0; i < MAX_TX_FIFOS; i++) {
7083 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7084 config->tx_cfg[i].fifo_priority = i;
7085 }
7086
20346722 7087 /* mapping the QoS priority to the configured fifos */
7088 for (i = 0; i < MAX_TX_FIFOS; i++)
7089 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
7090
1da177e4
LT
7091 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7092 for (i = 0; i < config->tx_fifo_num; i++) {
7093 config->tx_cfg[i].f_no_snoop =
7094 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7095 if (config->tx_cfg[i].fifo_len < 65) {
7096 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7097 break;
7098 }
7099 }
fed5eccd
AR
7100 /* + 2 because one Txd for skb->data and one Txd for UFO */
7101 config->max_txds = MAX_SKB_FRAGS + 2;
1da177e4
LT
7102
7103 /* Rx side parameters. */
1da177e4
LT
7104 config->rx_ring_num = rx_ring_num;
7105 for (i = 0; i < MAX_RX_RINGS; i++) {
7106 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
da6971d8 7107 (rxd_count[sp->rxd_mode] + 1);
1da177e4
LT
7108 config->rx_cfg[i].ring_priority = i;
7109 }
7110
7111 for (i = 0; i < rx_ring_num; i++) {
7112 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7113 config->rx_cfg[i].f_no_snoop =
7114 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7115 }
7116
7117 /* Setting Mac Control parameters */
7118 mac_control->rmac_pause_time = rmac_pause_time;
7119 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7120 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7121
7122
7123 /* Initialize Ring buffer parameters. */
7124 for (i = 0; i < config->rx_ring_num; i++)
7125 atomic_set(&sp->rx_bufs_left[i], 0);
7126
7ba013ac 7127 /* Initialize the number of ISRs currently running */
7128 atomic_set(&sp->isr_cnt, 0);
7129
1da177e4
LT
7130 /* initialize the shared memory used by the NIC and the host */
7131 if (init_shared_mem(sp)) {
7132 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
b41477f3 7133 dev->name);
1da177e4
LT
7134 ret = -ENOMEM;
7135 goto mem_alloc_failed;
7136 }
7137
7138 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7139 pci_resource_len(pdev, 0));
7140 if (!sp->bar0) {
19a60522 7141 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
1da177e4
LT
7142 dev->name);
7143 ret = -ENOMEM;
7144 goto bar0_remap_failed;
7145 }
7146
7147 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7148 pci_resource_len(pdev, 2));
7149 if (!sp->bar1) {
19a60522 7150 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
1da177e4
LT
7151 dev->name);
7152 ret = -ENOMEM;
7153 goto bar1_remap_failed;
7154 }
7155
7156 dev->irq = pdev->irq;
7157 dev->base_addr = (unsigned long) sp->bar0;
7158
7159 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7160 for (j = 0; j < MAX_TX_FIFOS; j++) {
1ee6dd77 7161 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
1da177e4
LT
7162 (sp->bar1 + (j * 0x00020000));
7163 }
7164
7165 /* Driver entry points */
7166 dev->open = &s2io_open;
7167 dev->stop = &s2io_close;
7168 dev->hard_start_xmit = &s2io_xmit;
7169 dev->get_stats = &s2io_get_stats;
7170 dev->set_multicast_list = &s2io_set_multicast;
7171 dev->do_ioctl = &s2io_ioctl;
7172 dev->change_mtu = &s2io_change_mtu;
7173 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
be3a6b02 7174 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7175 dev->vlan_rx_register = s2io_vlan_rx_register;
7176 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
20346722 7177
1da177e4
LT
7178 /*
7179 * will use eth_mac_addr() for dev->set_mac_address
7180 * mac address will be set every time dev->open() is called
7181 */
1da177e4 7182 dev->poll = s2io_poll;
20346722 7183 dev->weight = 32;
1da177e4 7184
612eff0e
BH
7185#ifdef CONFIG_NET_POLL_CONTROLLER
7186 dev->poll_controller = s2io_netpoll;
7187#endif
7188
1da177e4
LT
7189 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7190 if (sp->high_dma_flag == TRUE)
7191 dev->features |= NETIF_F_HIGHDMA;
1da177e4 7192 dev->features |= NETIF_F_TSO;
f83ef8c0 7193 dev->features |= NETIF_F_TSO6;
db874e65 7194 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
fed5eccd
AR
7195 dev->features |= NETIF_F_UFO;
7196 dev->features |= NETIF_F_HW_CSUM;
7197 }
1da177e4
LT
7198
7199 dev->tx_timeout = &s2io_tx_watchdog;
7200 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
c4028958
DH
7201 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7202 INIT_WORK(&sp->set_link_task, s2io_set_link);
1da177e4 7203
e960fc5c 7204 pci_save_state(sp->pdev);
1da177e4
LT
7205
7206 /* Setting swapper control on the NIC, for proper reset operation */
7207 if (s2io_set_swapper(sp)) {
7208 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7209 dev->name);
7210 ret = -EAGAIN;
7211 goto set_swap_failed;
7212 }
7213
541ae68f 7214 /* Verify if the Herc works on the slot its placed into */
7215 if (sp->device_type & XFRAME_II_DEVICE) {
7216 mode = s2io_verify_pci_mode(sp);
7217 if (mode < 0) {
7218 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7219 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7220 ret = -EBADSLT;
7221 goto set_swap_failed;
7222 }
7223 }
7224
7225 /* Not needed for Herc */
7226 if (sp->device_type & XFRAME_I_DEVICE) {
7227 /*
7228 * Fix for all "FFs" MAC address problems observed on
7229 * Alpha platforms
7230 */
7231 fix_mac_address(sp);
7232 s2io_reset(sp);
7233 }
1da177e4
LT
7234
7235 /*
1da177e4
LT
7236 * MAC address initialization.
7237 * For now only one mac address will be read and used.
7238 */
7239 bar0 = sp->bar0;
7240 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7241 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
7242 writeq(val64, &bar0->rmac_addr_cmd_mem);
c92ca04b 7243 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41 7244 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
1da177e4
LT
7245 tmp64 = readq(&bar0->rmac_addr_data0_mem);
7246 mac_down = (u32) tmp64;
7247 mac_up = (u32) (tmp64 >> 32);
7248
1da177e4
LT
7249 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7250 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7251 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7252 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7253 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7254 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7255
1da177e4
LT
7256 /* Set the factory defined MAC address initially */
7257 dev->addr_len = ETH_ALEN;
7258 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7259
b41477f3
AR
7260 /* reset Nic and bring it to known state */
7261 s2io_reset(sp);
7262
1da177e4 7263 /*
20346722 7264 * Initialize the tasklet status and link state flags
541ae68f 7265 * and the card state parameter
1da177e4
LT
7266 */
7267 atomic_set(&(sp->card_state), 0);
7268 sp->tasklet_status = 0;
7269 sp->link_state = 0;
7270
1da177e4
LT
7271 /* Initialize spinlocks */
7272 spin_lock_init(&sp->tx_lock);
db874e65
SS
7273
7274 if (!napi)
7275 spin_lock_init(&sp->put_lock);
7ba013ac 7276 spin_lock_init(&sp->rx_lock);
1da177e4 7277
20346722 7278 /*
7279 * SXE-002: Configure link and activity LED to init state
7280 * on driver load.
1da177e4
LT
7281 */
7282 subid = sp->pdev->subsystem_device;
7283 if ((subid & 0xFF) >= 0x07) {
7284 val64 = readq(&bar0->gpio_control);
7285 val64 |= 0x0000800000000000ULL;
7286 writeq(val64, &bar0->gpio_control);
7287 val64 = 0x0411040400000000ULL;
7288 writeq(val64, (void __iomem *) bar0 + 0x2700);
7289 val64 = readq(&bar0->gpio_control);
7290 }
7291
7292 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
7293
7294 if (register_netdev(dev)) {
7295 DBG_PRINT(ERR_DBG, "Device registration failed\n");
7296 ret = -ENODEV;
7297 goto register_failed;
7298 }
9dc737a7 7299 s2io_vpd_read(sp);
0c61ed5f 7300 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
b41477f3
AR
7301 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7302 sp->product_name, get_xena_rev_id(sp->pdev));
7303 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7304 s2io_driver_version);
9dc737a7 7305 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
19a60522 7306 "%02x:%02x:%02x:%02x:%02x:%02x", dev->name,
541ae68f 7307 sp->def_mac_addr[0].mac_addr[0],
7308 sp->def_mac_addr[0].mac_addr[1],
7309 sp->def_mac_addr[0].mac_addr[2],
7310 sp->def_mac_addr[0].mac_addr[3],
7311 sp->def_mac_addr[0].mac_addr[4],
7312 sp->def_mac_addr[0].mac_addr[5]);
19a60522 7313 DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
9dc737a7 7314 if (sp->device_type & XFRAME_II_DEVICE) {
0b1f7ebe 7315 mode = s2io_print_pci_mode(sp);
541ae68f 7316 if (mode < 0) {
9dc737a7 7317 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
541ae68f 7318 ret = -EBADSLT;
9dc737a7 7319 unregister_netdev(dev);
541ae68f 7320 goto set_swap_failed;
7321 }
541ae68f 7322 }
9dc737a7
AR
7323 switch(sp->rxd_mode) {
7324 case RXD_MODE_1:
7325 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
7326 dev->name);
7327 break;
7328 case RXD_MODE_3B:
7329 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7330 dev->name);
7331 break;
7332 case RXD_MODE_3A:
7333 DBG_PRINT(ERR_DBG, "%s: 3-Buffer receive mode enabled\n",
7334 dev->name);
7335 break;
7336 }
db874e65
SS
7337
7338 if (napi)
7339 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
9dc737a7
AR
7340 switch(sp->intr_type) {
7341 case INTA:
7342 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7343 break;
7344 case MSI:
7345 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI\n", dev->name);
7346 break;
7347 case MSI_X:
7348 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7349 break;
7350 }
7d3d0439
RA
7351 if (sp->lro)
7352 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
9dc737a7 7353 dev->name);
db874e65
SS
7354 if (ufo)
7355 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
7356 " enabled\n", dev->name);
7ba013ac 7357 /* Initialize device name */
9dc737a7 7358 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7ba013ac 7359
b6e3f982 7360 /* Initialize bimodal Interrupts */
7361 sp->config.bimodal = bimodal;
7362 if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
7363 sp->config.bimodal = 0;
7364 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
7365 dev->name);
7366 }
7367
20346722 7368 /*
7369 * Make Link state as off at this point, when the Link change
7370 * interrupt comes the state will be automatically changed to
1da177e4
LT
7371 * the right state.
7372 */
7373 netif_carrier_off(dev);
1da177e4
LT
7374
7375 return 0;
7376
7377 register_failed:
7378 set_swap_failed:
7379 iounmap(sp->bar1);
7380 bar1_remap_failed:
7381 iounmap(sp->bar0);
7382 bar0_remap_failed:
7383 mem_alloc_failed:
7384 free_shared_mem(sp);
7385 pci_disable_device(pdev);
cc6e7c44
RA
7386 if (dev_intr_type != MSI_X)
7387 pci_release_regions(pdev);
7388 else {
7389 release_mem_region(pci_resource_start(pdev, 0),
7390 pci_resource_len(pdev, 0));
7391 release_mem_region(pci_resource_start(pdev, 2),
7392 pci_resource_len(pdev, 2));
7393 }
1da177e4
LT
7394 pci_set_drvdata(pdev, NULL);
7395 free_netdev(dev);
7396
7397 return ret;
7398}
7399
7400/**
20346722 7401 * s2io_rem_nic - Free the PCI device
1da177e4 7402 * @pdev: structure containing the PCI related information of the device.
20346722 7403 * Description: This function is called by the Pci subsystem to release a
1da177e4 7404 * PCI device and free up all resource held up by the device. This could
20346722 7405 * be in response to a Hot plug event or when the driver is to be removed
1da177e4
LT
7406 * from memory.
7407 */
7408
7409static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7410{
7411 struct net_device *dev =
7412 (struct net_device *) pci_get_drvdata(pdev);
1ee6dd77 7413 struct s2io_nic *sp;
1da177e4
LT
7414
7415 if (dev == NULL) {
7416 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
7417 return;
7418 }
7419
22747d6b
FR
7420 flush_scheduled_work();
7421
1da177e4
LT
7422 sp = dev->priv;
7423 unregister_netdev(dev);
7424
7425 free_shared_mem(sp);
7426 iounmap(sp->bar0);
7427 iounmap(sp->bar1);
cc6e7c44
RA
7428 if (sp->intr_type != MSI_X)
7429 pci_release_regions(pdev);
7430 else {
7431 release_mem_region(pci_resource_start(pdev, 0),
7432 pci_resource_len(pdev, 0));
7433 release_mem_region(pci_resource_start(pdev, 2),
7434 pci_resource_len(pdev, 2));
7435 }
1da177e4 7436 pci_set_drvdata(pdev, NULL);
1da177e4 7437 free_netdev(dev);
19a60522 7438 pci_disable_device(pdev);
1da177e4
LT
7439}
7440
7441/**
7442 * s2io_starter - Entry point for the driver
7443 * Description: This function is the entry point for the driver. It verifies
7444 * the module loadable parameters and initializes PCI configuration space.
7445 */
7446
7447int __init s2io_starter(void)
7448{
29917620 7449 return pci_register_driver(&s2io_driver);
1da177e4
LT
7450}
7451
7452/**
20346722 7453 * s2io_closer - Cleanup routine for the driver
1da177e4
LT
7454 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7455 */
7456
372cc597 7457static __exit void s2io_closer(void)
1da177e4
LT
7458{
7459 pci_unregister_driver(&s2io_driver);
7460 DBG_PRINT(INIT_DBG, "cleanup done\n");
7461}
7462
7463module_init(s2io_starter);
7464module_exit(s2io_closer);
7d3d0439 7465
6aa20a22 7466static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
1ee6dd77 7467 struct tcphdr **tcp, struct RxD_t *rxdp)
7d3d0439
RA
7468{
7469 int ip_off;
7470 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
7471
7472 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
7473 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
7474 __FUNCTION__);
7475 return -1;
7476 }
7477
7478 /* TODO:
7479 * By default the VLAN field in the MAC is stripped by the card, if this
7480 * feature is turned off in rx_pa_cfg register, then the ip_off field
7481 * has to be shifted by a further 2 bytes
7482 */
7483 switch (l2_type) {
7484 case 0: /* DIX type */
7485 case 4: /* DIX type with VLAN */
7486 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
7487 break;
7488 /* LLC, SNAP etc are considered non-mergeable */
7489 default:
7490 return -1;
7491 }
7492
7493 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
7494 ip_len = (u8)((*ip)->ihl);
7495 ip_len <<= 2;
7496 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
7497
7498 return 0;
7499}
7500
1ee6dd77 7501static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7d3d0439
RA
7502 struct tcphdr *tcp)
7503{
7504 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7505 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
7506 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
7507 return -1;
7508 return 0;
7509}
7510
7511static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7512{
7513 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7514}
7515
1ee6dd77 7516static void initiate_new_session(struct lro *lro, u8 *l2h,
7d3d0439
RA
7517 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7518{
7519 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7520 lro->l2h = l2h;
7521 lro->iph = ip;
7522 lro->tcph = tcp;
7523 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
7524 lro->tcp_ack = ntohl(tcp->ack_seq);
7525 lro->sg_num = 1;
7526 lro->total_len = ntohs(ip->tot_len);
7527 lro->frags_len = 0;
6aa20a22 7528 /*
7d3d0439
RA
7529 * check if we saw TCP timestamp. Other consistency checks have
7530 * already been done.
7531 */
7532 if (tcp->doff == 8) {
7533 u32 *ptr;
7534 ptr = (u32 *)(tcp+1);
7535 lro->saw_ts = 1;
7536 lro->cur_tsval = *(ptr+1);
7537 lro->cur_tsecr = *(ptr+2);
7538 }
7539 lro->in_use = 1;
7540}
7541
1ee6dd77 7542static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7d3d0439
RA
7543{
7544 struct iphdr *ip = lro->iph;
7545 struct tcphdr *tcp = lro->tcph;
bd4f3ae1 7546 __sum16 nchk;
1ee6dd77 7547 struct stat_block *statinfo = sp->mac_control.stats_info;
7d3d0439
RA
7548 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7549
7550 /* Update L3 header */
7551 ip->tot_len = htons(lro->total_len);
7552 ip->check = 0;
7553 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
7554 ip->check = nchk;
7555
7556 /* Update L4 header */
7557 tcp->ack_seq = lro->tcp_ack;
7558 tcp->window = lro->window;
7559
7560 /* Update tsecr field if this session has timestamps enabled */
7561 if (lro->saw_ts) {
7562 u32 *ptr = (u32 *)(tcp + 1);
7563 *(ptr+2) = lro->cur_tsecr;
7564 }
7565
7566 /* Update counters required for calculation of
7567 * average no. of packets aggregated.
7568 */
7569 statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
7570 statinfo->sw_stat.num_aggregations++;
7571}
7572
1ee6dd77 7573static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
7d3d0439
RA
7574 struct tcphdr *tcp, u32 l4_pyld)
7575{
7576 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7577 lro->total_len += l4_pyld;
7578 lro->frags_len += l4_pyld;
7579 lro->tcp_next_seq += l4_pyld;
7580 lro->sg_num++;
7581
7582 /* Update ack seq no. and window ad(from this pkt) in LRO object */
7583 lro->tcp_ack = tcp->ack_seq;
7584 lro->window = tcp->window;
6aa20a22 7585
7d3d0439
RA
7586 if (lro->saw_ts) {
7587 u32 *ptr;
7588 /* Update tsecr and tsval from this packet */
7589 ptr = (u32 *) (tcp + 1);
6aa20a22 7590 lro->cur_tsval = *(ptr + 1);
7d3d0439
RA
7591 lro->cur_tsecr = *(ptr + 2);
7592 }
7593}
7594
1ee6dd77 7595static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7d3d0439
RA
7596 struct tcphdr *tcp, u32 tcp_pyld_len)
7597{
7d3d0439
RA
7598 u8 *ptr;
7599
79dc1901
AM
7600 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7601
7d3d0439
RA
7602 if (!tcp_pyld_len) {
7603 /* Runt frame or a pure ack */
7604 return -1;
7605 }
7606
7607 if (ip->ihl != 5) /* IP has options */
7608 return -1;
7609
75c30b13
AR
7610 /* If we see CE codepoint in IP header, packet is not mergeable */
7611 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
7612 return -1;
7613
7614 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7d3d0439 7615 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
75c30b13 7616 tcp->ece || tcp->cwr || !tcp->ack) {
7d3d0439
RA
7617 /*
7618 * Currently recognize only the ack control word and
7619 * any other control field being set would result in
7620 * flushing the LRO session
7621 */
7622 return -1;
7623 }
7624
6aa20a22 7625 /*
7d3d0439
RA
7626 * Allow only one TCP timestamp option. Don't aggregate if
7627 * any other options are detected.
7628 */
7629 if (tcp->doff != 5 && tcp->doff != 8)
7630 return -1;
7631
7632 if (tcp->doff == 8) {
6aa20a22 7633 ptr = (u8 *)(tcp + 1);
7d3d0439
RA
7634 while (*ptr == TCPOPT_NOP)
7635 ptr++;
7636 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
7637 return -1;
7638
7639 /* Ensure timestamp value increases monotonically */
7640 if (l_lro)
7641 if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
7642 return -1;
7643
7644 /* timestamp echo reply should be non-zero */
6aa20a22 7645 if (*((u32 *)(ptr+6)) == 0)
7d3d0439
RA
7646 return -1;
7647 }
7648
7649 return 0;
7650}
7651
7652static int
1ee6dd77
RB
7653s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
7654 struct RxD_t *rxdp, struct s2io_nic *sp)
7d3d0439
RA
7655{
7656 struct iphdr *ip;
7657 struct tcphdr *tcph;
7658 int ret = 0, i;
7659
7660 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
7661 rxdp))) {
7662 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
7663 ip->saddr, ip->daddr);
7664 } else {
7665 return ret;
7666 }
7667
7668 tcph = (struct tcphdr *)*tcp;
7669 *tcp_len = get_l4_pyld_length(ip, tcph);
7670 for (i=0; i<MAX_LRO_SESSIONS; i++) {
1ee6dd77 7671 struct lro *l_lro = &sp->lro0_n[i];
7d3d0439
RA
7672 if (l_lro->in_use) {
7673 if (check_for_socket_match(l_lro, ip, tcph))
7674 continue;
7675 /* Sock pair matched */
7676 *lro = l_lro;
7677
7678 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
7679 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
7680 "0x%x, actual 0x%x\n", __FUNCTION__,
7681 (*lro)->tcp_next_seq,
7682 ntohl(tcph->seq));
7683
7684 sp->mac_control.stats_info->
7685 sw_stat.outof_sequence_pkts++;
7686 ret = 2;
7687 break;
7688 }
7689
7690 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
7691 ret = 1; /* Aggregate */
7692 else
7693 ret = 2; /* Flush both */
7694 break;
7695 }
7696 }
7697
7698 if (ret == 0) {
7699 /* Before searching for available LRO objects,
7700 * check if the pkt is L3/L4 aggregatable. If not
7701 * don't create new LRO session. Just send this
7702 * packet up.
7703 */
7704 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
7705 return 5;
7706 }
7707
7708 for (i=0; i<MAX_LRO_SESSIONS; i++) {
1ee6dd77 7709 struct lro *l_lro = &sp->lro0_n[i];
7d3d0439
RA
7710 if (!(l_lro->in_use)) {
7711 *lro = l_lro;
7712 ret = 3; /* Begin anew */
7713 break;
7714 }
7715 }
7716 }
7717
7718 if (ret == 0) { /* sessions exceeded */
7719 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
7720 __FUNCTION__);
7721 *lro = NULL;
7722 return ret;
7723 }
7724
7725 switch (ret) {
7726 case 3:
7727 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
7728 break;
7729 case 2:
7730 update_L3L4_header(sp, *lro);
7731 break;
7732 case 1:
7733 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
7734 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
7735 update_L3L4_header(sp, *lro);
7736 ret = 4; /* Flush the LRO */
7737 }
7738 break;
7739 default:
7740 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
7741 __FUNCTION__);
7742 break;
7743 }
7744
7745 return ret;
7746}
7747
1ee6dd77 7748static void clear_lro_session(struct lro *lro)
7d3d0439 7749{
1ee6dd77 7750 static u16 lro_struct_size = sizeof(struct lro);
7d3d0439
RA
7751
7752 memset(lro, 0, lro_struct_size);
7753}
7754
7755static void queue_rx_frame(struct sk_buff *skb)
7756{
7757 struct net_device *dev = skb->dev;
7758
7759 skb->protocol = eth_type_trans(skb, dev);
db874e65
SS
7760 if (napi)
7761 netif_receive_skb(skb);
7762 else
7763 netif_rx(skb);
7d3d0439
RA
7764}
7765
1ee6dd77
RB
7766static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
7767 struct sk_buff *skb,
7d3d0439
RA
7768 u32 tcp_len)
7769{
75c30b13 7770 struct sk_buff *first = lro->parent;
7d3d0439
RA
7771
7772 first->len += tcp_len;
7773 first->data_len = lro->frags_len;
7774 skb_pull(skb, (skb->len - tcp_len));
75c30b13
AR
7775 if (skb_shinfo(first)->frag_list)
7776 lro->last_frag->next = skb;
7d3d0439
RA
7777 else
7778 skb_shinfo(first)->frag_list = skb;
372cc597 7779 first->truesize += skb->truesize;
75c30b13 7780 lro->last_frag = skb;
7d3d0439
RA
7781 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
7782 return;
7783}
This page took 1.003852 seconds and 5 git commands to generate.