Commit | Line | Data |
---|---|---|
4d22de3e | 1 | /* |
a02d44a0 | 2 | * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved. |
4d22de3e DLR |
3 | * |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
428ac43f JP |
33 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
34 | ||
4d22de3e | 35 | #include <linux/list.h> |
5a0e3ad6 | 36 | #include <linux/slab.h> |
4d22de3e DLR |
37 | #include <net/neighbour.h> |
38 | #include <linux/notifier.h> | |
60063497 | 39 | #include <linux/atomic.h> |
4d22de3e DLR |
40 | #include <linux/proc_fs.h> |
41 | #include <linux/if_vlan.h> | |
42 | #include <net/netevent.h> | |
43 | #include <linux/highmem.h> | |
44 | #include <linux/vmalloc.h> | |
ee40fa06 | 45 | #include <linux/export.h> |
4d22de3e DLR |
46 | |
47 | #include "common.h" | |
48 | #include "regs.h" | |
49 | #include "cxgb3_ioctl.h" | |
50 | #include "cxgb3_ctl_defs.h" | |
51 | #include "cxgb3_defs.h" | |
52 | #include "l2t.h" | |
53 | #include "firmware_exports.h" | |
54 | #include "cxgb3_offload.h" | |
55 | ||
56 | static LIST_HEAD(client_list); | |
57 | static LIST_HEAD(ofld_dev_list); | |
58 | static DEFINE_MUTEX(cxgb3_db_lock); | |
59 | ||
60 | static DEFINE_RWLOCK(adapter_list_lock); | |
61 | static LIST_HEAD(adapter_list); | |
62 | ||
63 | static const unsigned int MAX_ATIDS = 64 * 1024; | |
c9a6ce50 | 64 | static const unsigned int ATID_BASE = 0x10000; |
4d22de3e | 65 | |
a5190b4e | 66 | static void cxgb_neigh_update(struct neighbour *neigh); |
60592833 YH |
67 | static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new, |
68 | struct neighbour *neigh, const void *daddr); | |
a5190b4e | 69 | |
4d22de3e DLR |
70 | static inline int offload_activated(struct t3cdev *tdev) |
71 | { | |
72 | const struct adapter *adapter = tdev2adap(tdev); | |
73 | ||
807540ba | 74 | return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map); |
4d22de3e DLR |
75 | } |
76 | ||
77 | /** | |
78 | * cxgb3_register_client - register an offload client | |
79 | * @client: the client | |
80 | * | |
81 | * Add the client to the client list, | |
82 | * and call backs the client for each activated offload device | |
83 | */ | |
84 | void cxgb3_register_client(struct cxgb3_client *client) | |
85 | { | |
86 | struct t3cdev *tdev; | |
87 | ||
88 | mutex_lock(&cxgb3_db_lock); | |
89 | list_add_tail(&client->client_list, &client_list); | |
90 | ||
91 | if (client->add) { | |
92 | list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) { | |
93 | if (offload_activated(tdev)) | |
94 | client->add(tdev); | |
95 | } | |
96 | } | |
97 | mutex_unlock(&cxgb3_db_lock); | |
98 | } | |
99 | ||
100 | EXPORT_SYMBOL(cxgb3_register_client); | |
101 | ||
102 | /** | |
103 | * cxgb3_unregister_client - unregister an offload client | |
104 | * @client: the client | |
105 | * | |
106 | * Remove the client to the client list, | |
107 | * and call backs the client for each activated offload device. | |
108 | */ | |
109 | void cxgb3_unregister_client(struct cxgb3_client *client) | |
110 | { | |
111 | struct t3cdev *tdev; | |
112 | ||
113 | mutex_lock(&cxgb3_db_lock); | |
114 | list_del(&client->client_list); | |
115 | ||
116 | if (client->remove) { | |
117 | list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) { | |
118 | if (offload_activated(tdev)) | |
119 | client->remove(tdev); | |
120 | } | |
121 | } | |
122 | mutex_unlock(&cxgb3_db_lock); | |
123 | } | |
124 | ||
125 | EXPORT_SYMBOL(cxgb3_unregister_client); | |
126 | ||
127 | /** | |
128 | * cxgb3_add_clients - activate registered clients for an offload device | |
129 | * @tdev: the offload device | |
130 | * | |
131 | * Call backs all registered clients once a offload device is activated | |
132 | */ | |
133 | void cxgb3_add_clients(struct t3cdev *tdev) | |
134 | { | |
135 | struct cxgb3_client *client; | |
136 | ||
137 | mutex_lock(&cxgb3_db_lock); | |
138 | list_for_each_entry(client, &client_list, client_list) { | |
139 | if (client->add) | |
140 | client->add(tdev); | |
141 | } | |
142 | mutex_unlock(&cxgb3_db_lock); | |
143 | } | |
144 | ||
145 | /** | |
146 | * cxgb3_remove_clients - deactivates registered clients | |
147 | * for an offload device | |
148 | * @tdev: the offload device | |
149 | * | |
150 | * Call backs all registered clients once a offload device is deactivated | |
151 | */ | |
152 | void cxgb3_remove_clients(struct t3cdev *tdev) | |
153 | { | |
154 | struct cxgb3_client *client; | |
155 | ||
156 | mutex_lock(&cxgb3_db_lock); | |
157 | list_for_each_entry(client, &client_list, client_list) { | |
158 | if (client->remove) | |
159 | client->remove(tdev); | |
160 | } | |
161 | mutex_unlock(&cxgb3_db_lock); | |
162 | } | |
163 | ||
fa0d4c11 | 164 | void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port) |
cb0bc205 DLR |
165 | { |
166 | struct cxgb3_client *client; | |
167 | ||
168 | mutex_lock(&cxgb3_db_lock); | |
169 | list_for_each_entry(client, &client_list, client_list) { | |
fa0d4c11 SW |
170 | if (client->event_handler) |
171 | client->event_handler(tdev, event, port); | |
cb0bc205 DLR |
172 | } |
173 | mutex_unlock(&cxgb3_db_lock); | |
174 | } | |
175 | ||
4d22de3e DLR |
176 | static struct net_device *get_iff_from_mac(struct adapter *adapter, |
177 | const unsigned char *mac, | |
178 | unsigned int vlan) | |
179 | { | |
180 | int i; | |
181 | ||
182 | for_each_port(adapter, i) { | |
4d22de3e | 183 | struct net_device *dev = adapter->port[i]; |
4d22de3e | 184 | |
4c1120b6 | 185 | if (ether_addr_equal(dev->dev_addr, mac)) { |
2eea05b0 | 186 | rcu_read_lock(); |
4d22de3e | 187 | if (vlan && vlan != VLAN_VID_MASK) { |
f06c7f9f | 188 | dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), vlan); |
1765a575 | 189 | } else if (netif_is_bond_slave(dev)) { |
2eea05b0 JP |
190 | struct net_device *upper_dev; |
191 | ||
192 | while ((upper_dev = | |
193 | netdev_master_upper_dev_get_rcu(dev))) | |
194 | dev = upper_dev; | |
1765a575 | 195 | } |
2eea05b0 | 196 | rcu_read_unlock(); |
4d22de3e DLR |
197 | return dev; |
198 | } | |
199 | } | |
200 | return NULL; | |
201 | } | |
202 | ||
203 | static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req, | |
204 | void *data) | |
205 | { | |
a109a5b9 | 206 | int i; |
4d22de3e | 207 | int ret = 0; |
a109a5b9 | 208 | unsigned int val = 0; |
4d22de3e DLR |
209 | struct ulp_iscsi_info *uiip = data; |
210 | ||
211 | switch (req) { | |
212 | case ULP_ISCSI_GET_PARAMS: | |
213 | uiip->pdev = adapter->pdev; | |
214 | uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT); | |
215 | uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT); | |
216 | uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK); | |
a109a5b9 KX |
217 | |
218 | val = t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ); | |
219 | for (i = 0; i < 4; i++, val >>= 8) | |
220 | uiip->pgsz_factor[i] = val & 0xFF; | |
221 | ||
222 | val = t3_read_reg(adapter, A_TP_PARA_REG7); | |
223 | uiip->max_txsz = | |
224 | uiip->max_rxsz = min((val >> S_PMMAXXFERLEN0)&M_PMMAXXFERLEN0, | |
225 | (val >> S_PMMAXXFERLEN1)&M_PMMAXXFERLEN1); | |
4d22de3e DLR |
226 | /* |
227 | * On tx, the iscsi pdu has to be <= tx page size and has to | |
228 | * fit into the Tx PM FIFO. | |
229 | */ | |
a109a5b9 KX |
230 | val = min(adapter->params.tp.tx_pg_size, |
231 | t3_read_reg(adapter, A_PM1_TX_CFG) >> 17); | |
232 | uiip->max_txsz = min(val, uiip->max_txsz); | |
233 | ||
234 | /* set MaxRxData to 16224 */ | |
235 | val = t3_read_reg(adapter, A_TP_PARA_REG2); | |
236 | if ((val >> S_MAXRXDATA) != 0x3f60) { | |
237 | val &= (M_RXCOALESCESIZE << S_RXCOALESCESIZE); | |
238 | val |= V_MAXRXDATA(0x3f60); | |
428ac43f | 239 | pr_info("%s, iscsi set MaxRxData to 16224 (0x%x)\n", |
a109a5b9 KX |
240 | adapter->name, val); |
241 | t3_write_reg(adapter, A_TP_PARA_REG2, val); | |
242 | } | |
243 | ||
244 | /* | |
245 | * on rx, the iscsi pdu has to be < rx page size and the | |
246 | * the max rx data length programmed in TP | |
247 | */ | |
248 | val = min(adapter->params.tp.rx_pg_size, | |
249 | ((t3_read_reg(adapter, A_TP_PARA_REG2)) >> | |
250 | S_MAXRXDATA) & M_MAXRXDATA); | |
251 | uiip->max_rxsz = min(val, uiip->max_rxsz); | |
4d22de3e DLR |
252 | break; |
253 | case ULP_ISCSI_SET_PARAMS: | |
254 | t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask); | |
9439f749 | 255 | /* program the ddp page sizes */ |
a109a5b9 KX |
256 | for (i = 0; i < 4; i++) |
257 | val |= (uiip->pgsz_factor[i] & 0xF) << (8 * i); | |
258 | if (val && (val != t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ))) { | |
428ac43f | 259 | pr_info("%s, setting iscsi pgsz 0x%x, %u,%u,%u,%u\n", |
a109a5b9 KX |
260 | adapter->name, val, uiip->pgsz_factor[0], |
261 | uiip->pgsz_factor[1], uiip->pgsz_factor[2], | |
262 | uiip->pgsz_factor[3]); | |
263 | t3_write_reg(adapter, A_ULPRX_ISCSI_PSZ, val); | |
9439f749 | 264 | } |
4d22de3e DLR |
265 | break; |
266 | default: | |
267 | ret = -EOPNOTSUPP; | |
268 | } | |
269 | return ret; | |
270 | } | |
271 | ||
272 | /* Response queue used for RDMA events. */ | |
273 | #define ASYNC_NOTIF_RSPQ 0 | |
274 | ||
275 | static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data) | |
276 | { | |
277 | int ret = 0; | |
278 | ||
279 | switch (req) { | |
9265fabf SH |
280 | case RDMA_GET_PARAMS: { |
281 | struct rdma_info *rdma = data; | |
4d22de3e DLR |
282 | struct pci_dev *pdev = adapter->pdev; |
283 | ||
9265fabf SH |
284 | rdma->udbell_physbase = pci_resource_start(pdev, 2); |
285 | rdma->udbell_len = pci_resource_len(pdev, 2); | |
286 | rdma->tpt_base = | |
4d22de3e | 287 | t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT); |
9265fabf SH |
288 | rdma->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT); |
289 | rdma->pbl_base = | |
4d22de3e | 290 | t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT); |
9265fabf SH |
291 | rdma->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT); |
292 | rdma->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT); | |
293 | rdma->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT); | |
294 | rdma->kdb_addr = adapter->regs + A_SG_KDOORBELL; | |
295 | rdma->pdev = pdev; | |
4d22de3e DLR |
296 | break; |
297 | } | |
298 | case RDMA_CQ_OP:{ | |
299 | unsigned long flags; | |
9265fabf | 300 | struct rdma_cq_op *rdma = data; |
4d22de3e DLR |
301 | |
302 | /* may be called in any context */ | |
303 | spin_lock_irqsave(&adapter->sge.reg_lock, flags); | |
9265fabf SH |
304 | ret = t3_sge_cqcntxt_op(adapter, rdma->id, rdma->op, |
305 | rdma->credits); | |
4d22de3e DLR |
306 | spin_unlock_irqrestore(&adapter->sge.reg_lock, flags); |
307 | break; | |
308 | } | |
309 | case RDMA_GET_MEM:{ | |
310 | struct ch_mem_range *t = data; | |
311 | struct mc7 *mem; | |
312 | ||
313 | if ((t->addr & 7) || (t->len & 7)) | |
314 | return -EINVAL; | |
315 | if (t->mem_id == MEM_CM) | |
316 | mem = &adapter->cm; | |
317 | else if (t->mem_id == MEM_PMRX) | |
318 | mem = &adapter->pmrx; | |
319 | else if (t->mem_id == MEM_PMTX) | |
320 | mem = &adapter->pmtx; | |
321 | else | |
322 | return -EINVAL; | |
323 | ||
324 | ret = | |
325 | t3_mc7_bd_read(mem, t->addr / 8, t->len / 8, | |
326 | (u64 *) t->buf); | |
327 | if (ret) | |
328 | return ret; | |
329 | break; | |
330 | } | |
331 | case RDMA_CQ_SETUP:{ | |
9265fabf | 332 | struct rdma_cq_setup *rdma = data; |
4d22de3e DLR |
333 | |
334 | spin_lock_irq(&adapter->sge.reg_lock); | |
335 | ret = | |
9265fabf SH |
336 | t3_sge_init_cqcntxt(adapter, rdma->id, |
337 | rdma->base_addr, rdma->size, | |
4d22de3e | 338 | ASYNC_NOTIF_RSPQ, |
9265fabf SH |
339 | rdma->ovfl_mode, rdma->credits, |
340 | rdma->credit_thres); | |
4d22de3e DLR |
341 | spin_unlock_irq(&adapter->sge.reg_lock); |
342 | break; | |
343 | } | |
344 | case RDMA_CQ_DISABLE: | |
345 | spin_lock_irq(&adapter->sge.reg_lock); | |
346 | ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data); | |
347 | spin_unlock_irq(&adapter->sge.reg_lock); | |
348 | break; | |
349 | case RDMA_CTRL_QP_SETUP:{ | |
9265fabf | 350 | struct rdma_ctrlqp_setup *rdma = data; |
4d22de3e DLR |
351 | |
352 | spin_lock_irq(&adapter->sge.reg_lock); | |
353 | ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0, | |
354 | SGE_CNTXT_RDMA, | |
355 | ASYNC_NOTIF_RSPQ, | |
9265fabf | 356 | rdma->base_addr, rdma->size, |
4d22de3e DLR |
357 | FW_RI_TID_START, 1, 0); |
358 | spin_unlock_irq(&adapter->sge.reg_lock); | |
359 | break; | |
360 | } | |
14cc180f SW |
361 | case RDMA_GET_MIB: { |
362 | spin_lock(&adapter->stats_lock); | |
363 | t3_tp_get_mib_stats(adapter, (struct tp_mib_stats *)data); | |
364 | spin_unlock(&adapter->stats_lock); | |
365 | break; | |
366 | } | |
4d22de3e DLR |
367 | default: |
368 | ret = -EOPNOTSUPP; | |
369 | } | |
370 | return ret; | |
371 | } | |
372 | ||
373 | static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data) | |
374 | { | |
375 | struct adapter *adapter = tdev2adap(tdev); | |
376 | struct tid_range *tid; | |
377 | struct mtutab *mtup; | |
378 | struct iff_mac *iffmacp; | |
379 | struct ddp_params *ddpp; | |
380 | struct adap_ports *ports; | |
e22bb45d DLR |
381 | struct ofld_page_info *rx_page_info; |
382 | struct tp_params *tp = &adapter->params.tp; | |
4d22de3e DLR |
383 | int i; |
384 | ||
385 | switch (req) { | |
386 | case GET_MAX_OUTSTANDING_WR: | |
387 | *(unsigned int *)data = FW_WR_NUM; | |
388 | break; | |
389 | case GET_WR_LEN: | |
390 | *(unsigned int *)data = WR_FLITS; | |
391 | break; | |
392 | case GET_TX_MAX_CHUNK: | |
393 | *(unsigned int *)data = 1 << 20; /* 1MB */ | |
394 | break; | |
395 | case GET_TID_RANGE: | |
396 | tid = data; | |
397 | tid->num = t3_mc5_size(&adapter->mc5) - | |
398 | adapter->params.mc5.nroutes - | |
399 | adapter->params.mc5.nfilters - adapter->params.mc5.nservers; | |
400 | tid->base = 0; | |
401 | break; | |
402 | case GET_STID_RANGE: | |
403 | tid = data; | |
404 | tid->num = adapter->params.mc5.nservers; | |
405 | tid->base = t3_mc5_size(&adapter->mc5) - tid->num - | |
406 | adapter->params.mc5.nfilters - adapter->params.mc5.nroutes; | |
407 | break; | |
408 | case GET_L2T_CAPACITY: | |
409 | *(unsigned int *)data = 2048; | |
410 | break; | |
411 | case GET_MTUS: | |
412 | mtup = data; | |
413 | mtup->size = NMTUS; | |
414 | mtup->mtus = adapter->params.mtus; | |
415 | break; | |
416 | case GET_IFF_FROM_MAC: | |
417 | iffmacp = data; | |
418 | iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr, | |
419 | iffmacp->vlan_tag & | |
420 | VLAN_VID_MASK); | |
421 | break; | |
422 | case GET_DDP_PARAMS: | |
423 | ddpp = data; | |
424 | ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT); | |
425 | ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT); | |
426 | ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK); | |
427 | break; | |
428 | case GET_PORTS: | |
429 | ports = data; | |
430 | ports->nports = adapter->params.nports; | |
431 | for_each_port(adapter, i) | |
432 | ports->lldevs[i] = adapter->port[i]; | |
433 | break; | |
434 | case ULP_ISCSI_GET_PARAMS: | |
435 | case ULP_ISCSI_SET_PARAMS: | |
436 | if (!offload_running(adapter)) | |
437 | return -EAGAIN; | |
438 | return cxgb_ulp_iscsi_ctl(adapter, req, data); | |
439 | case RDMA_GET_PARAMS: | |
440 | case RDMA_CQ_OP: | |
441 | case RDMA_CQ_SETUP: | |
442 | case RDMA_CQ_DISABLE: | |
443 | case RDMA_CTRL_QP_SETUP: | |
444 | case RDMA_GET_MEM: | |
14cc180f | 445 | case RDMA_GET_MIB: |
4d22de3e DLR |
446 | if (!offload_running(adapter)) |
447 | return -EAGAIN; | |
448 | return cxgb_rdma_ctl(adapter, req, data); | |
e22bb45d DLR |
449 | case GET_RX_PAGE_INFO: |
450 | rx_page_info = data; | |
451 | rx_page_info->page_size = tp->rx_pg_size; | |
452 | rx_page_info->num = tp->rx_num_pgs; | |
453 | break; | |
a109a5b9 KX |
454 | case GET_ISCSI_IPV4ADDR: { |
455 | struct iscsi_ipv4addr *p = data; | |
456 | struct port_info *pi = netdev_priv(p->dev); | |
457 | p->ipv4addr = pi->iscsi_ipv4addr; | |
458 | break; | |
459 | } | |
4d8cd002 DLR |
460 | case GET_EMBEDDED_INFO: { |
461 | struct ch_embedded_info *e = data; | |
462 | ||
463 | spin_lock(&adapter->stats_lock); | |
464 | t3_get_fw_version(adapter, &e->fw_vers); | |
465 | t3_get_tp_version(adapter, &e->tp_vers); | |
466 | spin_unlock(&adapter->stats_lock); | |
467 | break; | |
468 | } | |
4d22de3e DLR |
469 | default: |
470 | return -EOPNOTSUPP; | |
471 | } | |
472 | return 0; | |
473 | } | |
474 | ||
475 | /* | |
476 | * Dummy handler for Rx offload packets in case we get an offload packet before | |
477 | * proper processing is setup. This complains and drops the packet as it isn't | |
478 | * normal to get offload packets at this stage. | |
479 | */ | |
480 | static int rx_offload_blackhole(struct t3cdev *dev, struct sk_buff **skbs, | |
481 | int n) | |
482 | { | |
4d22de3e DLR |
483 | while (n--) |
484 | dev_kfree_skb_any(skbs[n]); | |
485 | return 0; | |
486 | } | |
487 | ||
488 | static void dummy_neigh_update(struct t3cdev *dev, struct neighbour *neigh) | |
489 | { | |
490 | } | |
491 | ||
492 | void cxgb3_set_dummy_ops(struct t3cdev *dev) | |
493 | { | |
494 | dev->recv = rx_offload_blackhole; | |
495 | dev->neigh_update = dummy_neigh_update; | |
496 | } | |
497 | ||
498 | /* | |
499 | * Free an active-open TID. | |
500 | */ | |
501 | void *cxgb3_free_atid(struct t3cdev *tdev, int atid) | |
502 | { | |
503 | struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; | |
504 | union active_open_entry *p = atid2entry(t, atid); | |
505 | void *ctx = p->t3c_tid.ctx; | |
506 | ||
507 | spin_lock_bh(&t->atid_lock); | |
508 | p->next = t->afree; | |
509 | t->afree = p; | |
510 | t->atids_in_use--; | |
511 | spin_unlock_bh(&t->atid_lock); | |
512 | ||
513 | return ctx; | |
514 | } | |
515 | ||
516 | EXPORT_SYMBOL(cxgb3_free_atid); | |
517 | ||
518 | /* | |
519 | * Free a server TID and return it to the free pool. | |
520 | */ | |
521 | void cxgb3_free_stid(struct t3cdev *tdev, int stid) | |
522 | { | |
523 | struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; | |
524 | union listen_entry *p = stid2entry(t, stid); | |
525 | ||
526 | spin_lock_bh(&t->stid_lock); | |
527 | p->next = t->sfree; | |
528 | t->sfree = p; | |
529 | t->stids_in_use--; | |
530 | spin_unlock_bh(&t->stid_lock); | |
531 | } | |
532 | ||
533 | EXPORT_SYMBOL(cxgb3_free_stid); | |
534 | ||
535 | void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client, | |
536 | void *ctx, unsigned int tid) | |
537 | { | |
538 | struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; | |
539 | ||
540 | t->tid_tab[tid].client = client; | |
541 | t->tid_tab[tid].ctx = ctx; | |
542 | atomic_inc(&t->tids_in_use); | |
543 | } | |
544 | ||
545 | EXPORT_SYMBOL(cxgb3_insert_tid); | |
546 | ||
547 | /* | |
548 | * Populate a TID_RELEASE WR. The skb must be already propely sized. | |
549 | */ | |
550 | static inline void mk_tid_release(struct sk_buff *skb, unsigned int tid) | |
551 | { | |
552 | struct cpl_tid_release *req; | |
553 | ||
554 | skb->priority = CPL_PRIORITY_SETUP; | |
555 | req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req)); | |
556 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | |
557 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid)); | |
558 | } | |
559 | ||
560 | static void t3_process_tid_release_list(struct work_struct *work) | |
561 | { | |
562 | struct t3c_data *td = container_of(work, struct t3c_data, | |
563 | tid_release_task); | |
564 | struct sk_buff *skb; | |
565 | struct t3cdev *tdev = td->dev; | |
2eab17ab | 566 | |
4d22de3e DLR |
567 | |
568 | spin_lock_bh(&td->tid_release_lock); | |
569 | while (td->tid_release_list) { | |
570 | struct t3c_tid_entry *p = td->tid_release_list; | |
571 | ||
43d620c8 | 572 | td->tid_release_list = p->ctx; |
4d22de3e DLR |
573 | spin_unlock_bh(&td->tid_release_lock); |
574 | ||
575 | skb = alloc_skb(sizeof(struct cpl_tid_release), | |
74b793e1 DLR |
576 | GFP_KERNEL); |
577 | if (!skb) | |
578 | skb = td->nofail_skb; | |
579 | if (!skb) { | |
580 | spin_lock_bh(&td->tid_release_lock); | |
581 | p->ctx = (void *)td->tid_release_list; | |
64699336 | 582 | td->tid_release_list = p; |
74b793e1 DLR |
583 | break; |
584 | } | |
4d22de3e DLR |
585 | mk_tid_release(skb, p - td->tid_maps.tid_tab); |
586 | cxgb3_ofld_send(tdev, skb); | |
587 | p->ctx = NULL; | |
74b793e1 DLR |
588 | if (skb == td->nofail_skb) |
589 | td->nofail_skb = | |
590 | alloc_skb(sizeof(struct cpl_tid_release), | |
591 | GFP_KERNEL); | |
4d22de3e DLR |
592 | spin_lock_bh(&td->tid_release_lock); |
593 | } | |
74b793e1 | 594 | td->release_list_incomplete = (td->tid_release_list == NULL) ? 0 : 1; |
4d22de3e | 595 | spin_unlock_bh(&td->tid_release_lock); |
74b793e1 DLR |
596 | |
597 | if (!td->nofail_skb) | |
598 | td->nofail_skb = | |
599 | alloc_skb(sizeof(struct cpl_tid_release), | |
600 | GFP_KERNEL); | |
4d22de3e DLR |
601 | } |
602 | ||
603 | /* use ctx as a next pointer in the tid release list */ | |
604 | void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid) | |
605 | { | |
606 | struct t3c_data *td = T3C_DATA(tdev); | |
607 | struct t3c_tid_entry *p = &td->tid_maps.tid_tab[tid]; | |
608 | ||
609 | spin_lock_bh(&td->tid_release_lock); | |
610 | p->ctx = (void *)td->tid_release_list; | |
606fcd0b | 611 | p->client = NULL; |
4d22de3e | 612 | td->tid_release_list = p; |
74b793e1 | 613 | if (!p->ctx || td->release_list_incomplete) |
4d22de3e DLR |
614 | schedule_work(&td->tid_release_task); |
615 | spin_unlock_bh(&td->tid_release_lock); | |
616 | } | |
617 | ||
618 | EXPORT_SYMBOL(cxgb3_queue_tid_release); | |
619 | ||
620 | /* | |
621 | * Remove a tid from the TID table. A client may defer processing its last | |
622 | * CPL message if it is locked at the time it arrives, and while the message | |
623 | * sits in the client's backlog the TID may be reused for another connection. | |
624 | * To handle this we atomically switch the TID association if it still points | |
625 | * to the original client context. | |
626 | */ | |
627 | void cxgb3_remove_tid(struct t3cdev *tdev, void *ctx, unsigned int tid) | |
628 | { | |
629 | struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; | |
630 | ||
631 | BUG_ON(tid >= t->ntids); | |
632 | if (tdev->type == T3A) | |
633 | (void)cmpxchg(&t->tid_tab[tid].ctx, ctx, NULL); | |
634 | else { | |
635 | struct sk_buff *skb; | |
636 | ||
637 | skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC); | |
638 | if (likely(skb)) { | |
639 | mk_tid_release(skb, tid); | |
640 | cxgb3_ofld_send(tdev, skb); | |
641 | t->tid_tab[tid].ctx = NULL; | |
642 | } else | |
643 | cxgb3_queue_tid_release(tdev, tid); | |
644 | } | |
645 | atomic_dec(&t->tids_in_use); | |
646 | } | |
647 | ||
648 | EXPORT_SYMBOL(cxgb3_remove_tid); | |
649 | ||
650 | int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client, | |
651 | void *ctx) | |
652 | { | |
653 | int atid = -1; | |
654 | struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; | |
655 | ||
656 | spin_lock_bh(&t->atid_lock); | |
9f238486 DLR |
657 | if (t->afree && |
658 | t->atids_in_use + atomic_read(&t->tids_in_use) + MC5_MIN_TIDS <= | |
659 | t->ntids) { | |
4d22de3e DLR |
660 | union active_open_entry *p = t->afree; |
661 | ||
662 | atid = (p - t->atid_tab) + t->atid_base; | |
663 | t->afree = p->next; | |
664 | p->t3c_tid.ctx = ctx; | |
665 | p->t3c_tid.client = client; | |
666 | t->atids_in_use++; | |
667 | } | |
668 | spin_unlock_bh(&t->atid_lock); | |
669 | return atid; | |
670 | } | |
671 | ||
672 | EXPORT_SYMBOL(cxgb3_alloc_atid); | |
673 | ||
674 | int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client, | |
675 | void *ctx) | |
676 | { | |
677 | int stid = -1; | |
678 | struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; | |
679 | ||
680 | spin_lock_bh(&t->stid_lock); | |
681 | if (t->sfree) { | |
682 | union listen_entry *p = t->sfree; | |
683 | ||
684 | stid = (p - t->stid_tab) + t->stid_base; | |
685 | t->sfree = p->next; | |
686 | p->t3c_tid.ctx = ctx; | |
687 | p->t3c_tid.client = client; | |
688 | t->stids_in_use++; | |
689 | } | |
690 | spin_unlock_bh(&t->stid_lock); | |
691 | return stid; | |
692 | } | |
693 | ||
694 | EXPORT_SYMBOL(cxgb3_alloc_stid); | |
695 | ||
5fbf816f DLR |
696 | /* Get the t3cdev associated with a net_device */ |
697 | struct t3cdev *dev2t3cdev(struct net_device *dev) | |
698 | { | |
699 | const struct port_info *pi = netdev_priv(dev); | |
700 | ||
701 | return (struct t3cdev *)pi->adapter; | |
702 | } | |
703 | ||
704 | EXPORT_SYMBOL(dev2t3cdev); | |
705 | ||
4d22de3e DLR |
706 | static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb) |
707 | { | |
708 | struct cpl_smt_write_rpl *rpl = cplhdr(skb); | |
709 | ||
710 | if (rpl->status != CPL_ERR_NONE) | |
428ac43f | 711 | pr_err("Unexpected SMT_WRITE_RPL status %u for entry %u\n", |
4d22de3e DLR |
712 | rpl->status, GET_TID(rpl)); |
713 | ||
714 | return CPL_RET_BUF_DONE; | |
715 | } | |
716 | ||
717 | static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb) | |
718 | { | |
719 | struct cpl_l2t_write_rpl *rpl = cplhdr(skb); | |
720 | ||
721 | if (rpl->status != CPL_ERR_NONE) | |
428ac43f | 722 | pr_err("Unexpected L2T_WRITE_RPL status %u for entry %u\n", |
4d22de3e DLR |
723 | rpl->status, GET_TID(rpl)); |
724 | ||
725 | return CPL_RET_BUF_DONE; | |
726 | } | |
727 | ||
b881955b DLR |
728 | static int do_rte_write_rpl(struct t3cdev *dev, struct sk_buff *skb) |
729 | { | |
730 | struct cpl_rte_write_rpl *rpl = cplhdr(skb); | |
731 | ||
732 | if (rpl->status != CPL_ERR_NONE) | |
428ac43f | 733 | pr_err("Unexpected RTE_WRITE_RPL status %u for entry %u\n", |
b881955b DLR |
734 | rpl->status, GET_TID(rpl)); |
735 | ||
736 | return CPL_RET_BUF_DONE; | |
737 | } | |
738 | ||
4d22de3e DLR |
739 | static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb) |
740 | { | |
741 | struct cpl_act_open_rpl *rpl = cplhdr(skb); | |
742 | unsigned int atid = G_TID(ntohl(rpl->atid)); | |
743 | struct t3c_tid_entry *t3c_tid; | |
744 | ||
745 | t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid); | |
606fcd0b DLR |
746 | if (t3c_tid && t3c_tid->ctx && t3c_tid->client && |
747 | t3c_tid->client->handlers && | |
4d22de3e DLR |
748 | t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) { |
749 | return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb, | |
750 | t3c_tid-> | |
751 | ctx); | |
752 | } else { | |
428ac43f | 753 | pr_err("%s: received clientless CPL command 0x%x\n", |
4d22de3e DLR |
754 | dev->name, CPL_ACT_OPEN_RPL); |
755 | return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; | |
756 | } | |
757 | } | |
758 | ||
759 | static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb) | |
760 | { | |
761 | union opcode_tid *p = cplhdr(skb); | |
762 | unsigned int stid = G_TID(ntohl(p->opcode_tid)); | |
763 | struct t3c_tid_entry *t3c_tid; | |
764 | ||
765 | t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid); | |
606fcd0b | 766 | if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && |
4d22de3e DLR |
767 | t3c_tid->client->handlers[p->opcode]) { |
768 | return t3c_tid->client->handlers[p->opcode] (dev, skb, | |
769 | t3c_tid->ctx); | |
770 | } else { | |
428ac43f | 771 | pr_err("%s: received clientless CPL command 0x%x\n", |
4d22de3e DLR |
772 | dev->name, p->opcode); |
773 | return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; | |
774 | } | |
775 | } | |
776 | ||
777 | static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb) | |
778 | { | |
779 | union opcode_tid *p = cplhdr(skb); | |
780 | unsigned int hwtid = G_TID(ntohl(p->opcode_tid)); | |
781 | struct t3c_tid_entry *t3c_tid; | |
782 | ||
783 | t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); | |
606fcd0b | 784 | if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && |
4d22de3e DLR |
785 | t3c_tid->client->handlers[p->opcode]) { |
786 | return t3c_tid->client->handlers[p->opcode] | |
787 | (dev, skb, t3c_tid->ctx); | |
788 | } else { | |
428ac43f | 789 | pr_err("%s: received clientless CPL command 0x%x\n", |
4d22de3e DLR |
790 | dev->name, p->opcode); |
791 | return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; | |
792 | } | |
793 | } | |
794 | ||
795 | static int do_cr(struct t3cdev *dev, struct sk_buff *skb) | |
796 | { | |
797 | struct cpl_pass_accept_req *req = cplhdr(skb); | |
798 | unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); | |
c9a6ce50 | 799 | struct tid_info *t = &(T3C_DATA(dev))->tid_maps; |
4d22de3e | 800 | struct t3c_tid_entry *t3c_tid; |
c9a6ce50 | 801 | unsigned int tid = GET_TID(req); |
4d22de3e | 802 | |
c9a6ce50 DLR |
803 | if (unlikely(tid >= t->ntids)) { |
804 | printk("%s: passive open TID %u too large\n", | |
805 | dev->name, tid); | |
806 | t3_fatal_err(tdev2adap(dev)); | |
807 | return CPL_RET_BUF_DONE; | |
808 | } | |
809 | ||
810 | t3c_tid = lookup_stid(t, stid); | |
811 | if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && | |
4d22de3e DLR |
812 | t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) { |
813 | return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ] | |
814 | (dev, skb, t3c_tid->ctx); | |
815 | } else { | |
428ac43f | 816 | pr_err("%s: received clientless CPL command 0x%x\n", |
4d22de3e DLR |
817 | dev->name, CPL_PASS_ACCEPT_REQ); |
818 | return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; | |
819 | } | |
820 | } | |
821 | ||
606fcd0b DLR |
822 | /* |
823 | * Returns an sk_buff for a reply CPL message of size len. If the input | |
824 | * sk_buff has no other users it is trimmed and reused, otherwise a new buffer | |
825 | * is allocated. The input skb must be of size at least len. Note that this | |
826 | * operation does not destroy the original skb data even if it decides to reuse | |
827 | * the buffer. | |
828 | */ | |
829 | static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len, | |
1f41bb3a | 830 | gfp_t gfp) |
606fcd0b DLR |
831 | { |
832 | if (likely(!skb_cloned(skb))) { | |
833 | BUG_ON(skb->len < len); | |
834 | __skb_trim(skb, len); | |
835 | skb_get(skb); | |
836 | } else { | |
837 | skb = alloc_skb(len, gfp); | |
838 | if (skb) | |
839 | __skb_put(skb, len); | |
840 | } | |
841 | return skb; | |
842 | } | |
843 | ||
4d22de3e DLR |
844 | static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb) |
845 | { | |
846 | union opcode_tid *p = cplhdr(skb); | |
847 | unsigned int hwtid = G_TID(ntohl(p->opcode_tid)); | |
848 | struct t3c_tid_entry *t3c_tid; | |
849 | ||
850 | t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); | |
606fcd0b | 851 | if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && |
4d22de3e DLR |
852 | t3c_tid->client->handlers[p->opcode]) { |
853 | return t3c_tid->client->handlers[p->opcode] | |
854 | (dev, skb, t3c_tid->ctx); | |
855 | } else { | |
856 | struct cpl_abort_req_rss *req = cplhdr(skb); | |
857 | struct cpl_abort_rpl *rpl; | |
606fcd0b DLR |
858 | struct sk_buff *reply_skb; |
859 | unsigned int tid = GET_TID(req); | |
860 | u8 cmd = req->status; | |
861 | ||
862 | if (req->status == CPL_ERR_RTX_NEG_ADVICE || | |
863 | req->status == CPL_ERR_PERSIST_NEG_ADVICE) | |
864 | goto out; | |
4d22de3e | 865 | |
606fcd0b DLR |
866 | reply_skb = cxgb3_get_cpl_reply_skb(skb, |
867 | sizeof(struct | |
868 | cpl_abort_rpl), | |
869 | GFP_ATOMIC); | |
870 | ||
871 | if (!reply_skb) { | |
4d22de3e DLR |
872 | printk("do_abort_req_rss: couldn't get skb!\n"); |
873 | goto out; | |
874 | } | |
606fcd0b DLR |
875 | reply_skb->priority = CPL_PRIORITY_DATA; |
876 | __skb_put(reply_skb, sizeof(struct cpl_abort_rpl)); | |
877 | rpl = cplhdr(reply_skb); | |
4d22de3e DLR |
878 | rpl->wr.wr_hi = |
879 | htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); | |
606fcd0b DLR |
880 | rpl->wr.wr_lo = htonl(V_WR_TID(tid)); |
881 | OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid)); | |
882 | rpl->cmd = cmd; | |
883 | cxgb3_ofld_send(dev, reply_skb); | |
4d22de3e DLR |
884 | out: |
885 | return CPL_RET_BUF_DONE; | |
886 | } | |
887 | } | |
888 | ||
889 | static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb) | |
890 | { | |
891 | struct cpl_act_establish *req = cplhdr(skb); | |
892 | unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); | |
c9a6ce50 | 893 | struct tid_info *t = &(T3C_DATA(dev))->tid_maps; |
4d22de3e | 894 | struct t3c_tid_entry *t3c_tid; |
c9a6ce50 | 895 | unsigned int tid = GET_TID(req); |
4d22de3e | 896 | |
c9a6ce50 DLR |
897 | if (unlikely(tid >= t->ntids)) { |
898 | printk("%s: active establish TID %u too large\n", | |
899 | dev->name, tid); | |
900 | t3_fatal_err(tdev2adap(dev)); | |
901 | return CPL_RET_BUF_DONE; | |
902 | } | |
903 | ||
904 | t3c_tid = lookup_atid(t, atid); | |
606fcd0b | 905 | if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && |
4d22de3e DLR |
906 | t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) { |
907 | return t3c_tid->client->handlers[CPL_ACT_ESTABLISH] | |
908 | (dev, skb, t3c_tid->ctx); | |
909 | } else { | |
428ac43f | 910 | pr_err("%s: received clientless CPL command 0x%x\n", |
c9a6ce50 | 911 | dev->name, CPL_ACT_ESTABLISH); |
4d22de3e DLR |
912 | return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; |
913 | } | |
914 | } | |
915 | ||
4d22de3e DLR |
916 | static int do_trace(struct t3cdev *dev, struct sk_buff *skb) |
917 | { | |
918 | struct cpl_trace_pkt *p = cplhdr(skb); | |
919 | ||
b5344972 | 920 | skb->protocol = htons(0xffff); |
4d22de3e DLR |
921 | skb->dev = dev->lldev; |
922 | skb_pull(skb, sizeof(*p)); | |
459a98ed | 923 | skb_reset_mac_header(skb); |
4d22de3e DLR |
924 | netif_receive_skb(skb); |
925 | return 0; | |
926 | } | |
927 | ||
fa3a6cb4 AV |
928 | /* |
929 | * That skb would better have come from process_responses() where we abuse | |
930 | * ->priority and ->csum to carry our data. NB: if we get to per-arch | |
931 | * ->csum, the things might get really interesting here. | |
932 | */ | |
933 | ||
934 | static inline u32 get_hwtid(struct sk_buff *skb) | |
935 | { | |
936 | return ntohl((__force __be32)skb->priority) >> 8 & 0xfffff; | |
937 | } | |
938 | ||
939 | static inline u32 get_opcode(struct sk_buff *skb) | |
940 | { | |
941 | return G_OPCODE(ntohl((__force __be32)skb->csum)); | |
942 | } | |
943 | ||
4d22de3e DLR |
944 | static int do_term(struct t3cdev *dev, struct sk_buff *skb) |
945 | { | |
fa3a6cb4 AV |
946 | unsigned int hwtid = get_hwtid(skb); |
947 | unsigned int opcode = get_opcode(skb); | |
4d22de3e DLR |
948 | struct t3c_tid_entry *t3c_tid; |
949 | ||
950 | t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); | |
606fcd0b | 951 | if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && |
4d22de3e DLR |
952 | t3c_tid->client->handlers[opcode]) { |
953 | return t3c_tid->client->handlers[opcode] (dev, skb, | |
954 | t3c_tid->ctx); | |
955 | } else { | |
428ac43f | 956 | pr_err("%s: received clientless CPL command 0x%x\n", |
4d22de3e DLR |
957 | dev->name, opcode); |
958 | return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; | |
959 | } | |
960 | } | |
961 | ||
962 | static int nb_callback(struct notifier_block *self, unsigned long event, | |
963 | void *ctx) | |
964 | { | |
965 | switch (event) { | |
966 | case (NETEVENT_NEIGH_UPDATE):{ | |
967 | cxgb_neigh_update((struct neighbour *)ctx); | |
968 | break; | |
969 | } | |
4d22de3e DLR |
970 | case (NETEVENT_REDIRECT):{ |
971 | struct netevent_redirect *nr = ctx; | |
60592833 | 972 | cxgb_redirect(nr->old, nr->new, nr->neigh, |
534cb283 | 973 | nr->daddr); |
60592833 | 974 | cxgb_neigh_update(nr->neigh); |
4d22de3e DLR |
975 | break; |
976 | } | |
977 | default: | |
978 | break; | |
979 | } | |
980 | return 0; | |
981 | } | |
982 | ||
983 | static struct notifier_block nb = { | |
984 | .notifier_call = nb_callback | |
985 | }; | |
986 | ||
987 | /* | |
988 | * Process a received packet with an unknown/unexpected CPL opcode. | |
989 | */ | |
990 | static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb) | |
991 | { | |
428ac43f | 992 | pr_err("%s: received bad CPL command 0x%x\n", dev->name, *skb->data); |
4d22de3e DLR |
993 | return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; |
994 | } | |
995 | ||
996 | /* | |
997 | * Handlers for each CPL opcode | |
998 | */ | |
999 | static cpl_handler_func cpl_handlers[NUM_CPL_CMDS]; | |
1000 | ||
1001 | /* | |
1002 | * Add a new handler to the CPL dispatch table. A NULL handler may be supplied | |
1003 | * to unregister an existing handler. | |
1004 | */ | |
1005 | void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h) | |
1006 | { | |
1007 | if (opcode < NUM_CPL_CMDS) | |
1008 | cpl_handlers[opcode] = h ? h : do_bad_cpl; | |
1009 | else | |
428ac43f JP |
1010 | pr_err("T3C: handler registration for opcode %x failed\n", |
1011 | opcode); | |
4d22de3e DLR |
1012 | } |
1013 | ||
1014 | EXPORT_SYMBOL(t3_register_cpl_handler); | |
1015 | ||
1016 | /* | |
1017 | * T3CDEV's receive method. | |
1018 | */ | |
a5190b4e | 1019 | static int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n) |
4d22de3e DLR |
1020 | { |
1021 | while (n--) { | |
1022 | struct sk_buff *skb = *skbs++; | |
fa3a6cb4 | 1023 | unsigned int opcode = get_opcode(skb); |
4d22de3e DLR |
1024 | int ret = cpl_handlers[opcode] (dev, skb); |
1025 | ||
1026 | #if VALIDATE_TID | |
1027 | if (ret & CPL_RET_UNKNOWN_TID) { | |
1028 | union opcode_tid *p = cplhdr(skb); | |
1029 | ||
428ac43f JP |
1030 | pr_err("%s: CPL message (opcode %u) had unknown TID %u\n", |
1031 | dev->name, opcode, G_TID(ntohl(p->opcode_tid))); | |
4d22de3e DLR |
1032 | } |
1033 | #endif | |
1034 | if (ret & CPL_RET_BUF_DONE) | |
1035 | kfree_skb(skb); | |
1036 | } | |
1037 | return 0; | |
1038 | } | |
1039 | ||
1040 | /* | |
1041 | * Sends an sk_buff to a T3C driver after dealing with any active network taps. | |
1042 | */ | |
1043 | int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb) | |
1044 | { | |
1045 | int r; | |
1046 | ||
1047 | local_bh_disable(); | |
1048 | r = dev->send(dev, skb); | |
1049 | local_bh_enable(); | |
1050 | return r; | |
1051 | } | |
1052 | ||
1053 | EXPORT_SYMBOL(cxgb3_ofld_send); | |
1054 | ||
1055 | static int is_offloading(struct net_device *dev) | |
1056 | { | |
1057 | struct adapter *adapter; | |
1058 | int i; | |
1059 | ||
1060 | read_lock_bh(&adapter_list_lock); | |
1061 | list_for_each_entry(adapter, &adapter_list, adapter_list) { | |
1062 | for_each_port(adapter, i) { | |
1063 | if (dev == adapter->port[i]) { | |
1064 | read_unlock_bh(&adapter_list_lock); | |
1065 | return 1; | |
1066 | } | |
1067 | } | |
1068 | } | |
1069 | read_unlock_bh(&adapter_list_lock); | |
1070 | return 0; | |
1071 | } | |
1072 | ||
a5190b4e | 1073 | static void cxgb_neigh_update(struct neighbour *neigh) |
4d22de3e | 1074 | { |
c4be62a4 | 1075 | struct net_device *dev; |
4d22de3e | 1076 | |
c4be62a4 DM |
1077 | if (!neigh) |
1078 | return; | |
1079 | dev = neigh->dev; | |
4d22de3e | 1080 | if (dev && (is_offloading(dev))) { |
5fbf816f | 1081 | struct t3cdev *tdev = dev2t3cdev(dev); |
4d22de3e DLR |
1082 | |
1083 | BUG_ON(!tdev); | |
1084 | t3_l2t_update(tdev, neigh); | |
1085 | } | |
1086 | } | |
1087 | ||
1088 | static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e) | |
1089 | { | |
1090 | struct sk_buff *skb; | |
1091 | struct cpl_set_tcb_field *req; | |
1092 | ||
1093 | skb = alloc_skb(sizeof(*req), GFP_ATOMIC); | |
1094 | if (!skb) { | |
428ac43f | 1095 | pr_err("%s: cannot allocate skb!\n", __func__); |
4d22de3e DLR |
1096 | return; |
1097 | } | |
1098 | skb->priority = CPL_PRIORITY_CONTROL; | |
1099 | req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req)); | |
1100 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | |
1101 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); | |
1102 | req->reply = 0; | |
1103 | req->cpu_idx = 0; | |
1104 | req->word = htons(W_TCB_L2T_IX); | |
1105 | req->mask = cpu_to_be64(V_TCB_L2T_IX(M_TCB_L2T_IX)); | |
1106 | req->val = cpu_to_be64(V_TCB_L2T_IX(e->idx)); | |
1107 | tdev->send(tdev, skb); | |
1108 | } | |
1109 | ||
60592833 YH |
1110 | static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new, |
1111 | struct neighbour *neigh, | |
534cb283 | 1112 | const void *daddr) |
4d22de3e | 1113 | { |
60592833 | 1114 | struct net_device *dev; |
4d22de3e DLR |
1115 | struct tid_info *ti; |
1116 | struct t3cdev *tdev; | |
1117 | u32 tid; | |
1118 | int update_tcb; | |
1119 | struct l2t_entry *e; | |
1120 | struct t3c_tid_entry *te; | |
1121 | ||
60592833 | 1122 | dev = neigh->dev; |
c4be62a4 | 1123 | |
60592833 | 1124 | if (!is_offloading(dev)) |
4d22de3e | 1125 | return; |
60592833 | 1126 | tdev = dev2t3cdev(dev); |
4d22de3e | 1127 | BUG_ON(!tdev); |
4d22de3e DLR |
1128 | |
1129 | /* Add new L2T entry */ | |
60592833 | 1130 | e = t3_l2t_get(tdev, new, dev, daddr); |
4d22de3e | 1131 | if (!e) { |
428ac43f | 1132 | pr_err("%s: couldn't allocate new l2t entry!\n", __func__); |
4d22de3e DLR |
1133 | return; |
1134 | } | |
1135 | ||
1136 | /* Walk tid table and notify clients of dst change. */ | |
1137 | ti = &(T3C_DATA(tdev))->tid_maps; | |
1138 | for (tid = 0; tid < ti->ntids; tid++) { | |
1139 | te = lookup_tid(ti, tid); | |
1140 | BUG_ON(!te); | |
606fcd0b | 1141 | if (te && te->ctx && te->client && te->client->redirect) { |
4d22de3e DLR |
1142 | update_tcb = te->client->redirect(te->ctx, old, new, e); |
1143 | if (update_tcb) { | |
e48f129c | 1144 | rcu_read_lock(); |
4d22de3e | 1145 | l2t_hold(L2DATA(tdev), e); |
e48f129c | 1146 | rcu_read_unlock(); |
4d22de3e DLR |
1147 | set_l2t_ix(tdev, tid, e); |
1148 | } | |
1149 | } | |
1150 | } | |
e48f129c | 1151 | l2t_release(tdev, e); |
4d22de3e DLR |
1152 | } |
1153 | ||
1154 | /* | |
1155 | * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc. | |
1156 | * The allocated memory is cleared. | |
1157 | */ | |
1158 | void *cxgb_alloc_mem(unsigned long size) | |
1159 | { | |
8be04b93 | 1160 | void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); |
4d22de3e DLR |
1161 | |
1162 | if (!p) | |
89bf67f1 | 1163 | p = vzalloc(size); |
4d22de3e DLR |
1164 | return p; |
1165 | } | |
1166 | ||
1167 | /* | |
1168 | * Free memory allocated through t3_alloc_mem(). | |
1169 | */ | |
1170 | void cxgb_free_mem(void *addr) | |
1171 | { | |
9e2779fa | 1172 | if (is_vmalloc_addr(addr)) |
4d22de3e DLR |
1173 | vfree(addr); |
1174 | else | |
1175 | kfree(addr); | |
1176 | } | |
1177 | ||
1178 | /* | |
1179 | * Allocate and initialize the TID tables. Returns 0 on success. | |
1180 | */ | |
1181 | static int init_tid_tabs(struct tid_info *t, unsigned int ntids, | |
1182 | unsigned int natids, unsigned int nstids, | |
1183 | unsigned int atid_base, unsigned int stid_base) | |
1184 | { | |
1185 | unsigned long size = ntids * sizeof(*t->tid_tab) + | |
1186 | natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab); | |
1187 | ||
1188 | t->tid_tab = cxgb_alloc_mem(size); | |
1189 | if (!t->tid_tab) | |
1190 | return -ENOMEM; | |
1191 | ||
1192 | t->stid_tab = (union listen_entry *)&t->tid_tab[ntids]; | |
1193 | t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids]; | |
1194 | t->ntids = ntids; | |
1195 | t->nstids = nstids; | |
1196 | t->stid_base = stid_base; | |
1197 | t->sfree = NULL; | |
1198 | t->natids = natids; | |
1199 | t->atid_base = atid_base; | |
1200 | t->afree = NULL; | |
1201 | t->stids_in_use = t->atids_in_use = 0; | |
1202 | atomic_set(&t->tids_in_use, 0); | |
1203 | spin_lock_init(&t->stid_lock); | |
1204 | spin_lock_init(&t->atid_lock); | |
1205 | ||
1206 | /* | |
1207 | * Setup the free lists for stid_tab and atid_tab. | |
1208 | */ | |
1209 | if (nstids) { | |
1210 | while (--nstids) | |
1211 | t->stid_tab[nstids - 1].next = &t->stid_tab[nstids]; | |
1212 | t->sfree = t->stid_tab; | |
1213 | } | |
1214 | if (natids) { | |
1215 | while (--natids) | |
1216 | t->atid_tab[natids - 1].next = &t->atid_tab[natids]; | |
1217 | t->afree = t->atid_tab; | |
1218 | } | |
1219 | return 0; | |
1220 | } | |
1221 | ||
1222 | static void free_tid_maps(struct tid_info *t) | |
1223 | { | |
1224 | cxgb_free_mem(t->tid_tab); | |
1225 | } | |
1226 | ||
1227 | static inline void add_adapter(struct adapter *adap) | |
1228 | { | |
1229 | write_lock_bh(&adapter_list_lock); | |
1230 | list_add_tail(&adap->adapter_list, &adapter_list); | |
1231 | write_unlock_bh(&adapter_list_lock); | |
1232 | } | |
1233 | ||
1234 | static inline void remove_adapter(struct adapter *adap) | |
1235 | { | |
1236 | write_lock_bh(&adapter_list_lock); | |
1237 | list_del(&adap->adapter_list); | |
1238 | write_unlock_bh(&adapter_list_lock); | |
1239 | } | |
1240 | ||
1241 | int cxgb3_offload_activate(struct adapter *adapter) | |
1242 | { | |
1243 | struct t3cdev *dev = &adapter->tdev; | |
1244 | int natids, err; | |
1245 | struct t3c_data *t; | |
1246 | struct tid_range stid_range, tid_range; | |
1247 | struct mtutab mtutab; | |
1248 | unsigned int l2t_capacity; | |
9313eb4b | 1249 | struct l2t_data *l2td; |
4d22de3e | 1250 | |
75ed0a89 | 1251 | t = kzalloc(sizeof(*t), GFP_KERNEL); |
4d22de3e DLR |
1252 | if (!t) |
1253 | return -ENOMEM; | |
1254 | ||
1255 | err = -EOPNOTSUPP; | |
1256 | if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 || | |
1257 | dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 || | |
1258 | dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 || | |
1259 | dev->ctl(dev, GET_MTUS, &mtutab) < 0 || | |
1260 | dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 || | |
1261 | dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0) | |
1262 | goto out_free; | |
1263 | ||
1264 | err = -ENOMEM; | |
9313eb4b JF |
1265 | l2td = t3_init_l2t(l2t_capacity); |
1266 | if (!l2td) | |
4d22de3e DLR |
1267 | goto out_free; |
1268 | ||
1269 | natids = min(tid_range.num / 2, MAX_ATIDS); | |
1270 | err = init_tid_tabs(&t->tid_maps, tid_range.num, natids, | |
1271 | stid_range.num, ATID_BASE, stid_range.base); | |
1272 | if (err) | |
1273 | goto out_free_l2t; | |
1274 | ||
1275 | t->mtus = mtutab.mtus; | |
1276 | t->nmtus = mtutab.size; | |
1277 | ||
1278 | INIT_WORK(&t->tid_release_task, t3_process_tid_release_list); | |
1279 | spin_lock_init(&t->tid_release_lock); | |
1280 | INIT_LIST_HEAD(&t->list_node); | |
1281 | t->dev = dev; | |
1282 | ||
9313eb4b | 1283 | RCU_INIT_POINTER(dev->l2opt, l2td); |
4d22de3e DLR |
1284 | T3C_DATA(dev) = t; |
1285 | dev->recv = process_rx; | |
1286 | dev->neigh_update = t3_l2t_update; | |
1287 | ||
1288 | /* Register netevent handler once */ | |
1289 | if (list_empty(&adapter_list)) | |
1290 | register_netevent_notifier(&nb); | |
1291 | ||
74b793e1 DLR |
1292 | t->nofail_skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_KERNEL); |
1293 | t->release_list_incomplete = 0; | |
1294 | ||
4d22de3e DLR |
1295 | add_adapter(adapter); |
1296 | return 0; | |
1297 | ||
1298 | out_free_l2t: | |
9313eb4b | 1299 | t3_free_l2t(l2td); |
4d22de3e DLR |
1300 | out_free: |
1301 | kfree(t); | |
1302 | return err; | |
1303 | } | |
1304 | ||
e48f129c NH |
1305 | static void clean_l2_data(struct rcu_head *head) |
1306 | { | |
1307 | struct l2t_data *d = container_of(head, struct l2t_data, rcu_head); | |
1308 | t3_free_l2t(d); | |
1309 | } | |
1310 | ||
1311 | ||
4d22de3e DLR |
1312 | void cxgb3_offload_deactivate(struct adapter *adapter) |
1313 | { | |
1314 | struct t3cdev *tdev = &adapter->tdev; | |
1315 | struct t3c_data *t = T3C_DATA(tdev); | |
e48f129c | 1316 | struct l2t_data *d; |
4d22de3e DLR |
1317 | |
1318 | remove_adapter(adapter); | |
1319 | if (list_empty(&adapter_list)) | |
1320 | unregister_netevent_notifier(&nb); | |
1321 | ||
1322 | free_tid_maps(&t->tid_maps); | |
1323 | T3C_DATA(tdev) = NULL; | |
e48f129c NH |
1324 | rcu_read_lock(); |
1325 | d = L2DATA(tdev); | |
1326 | rcu_read_unlock(); | |
2cfa5a04 | 1327 | RCU_INIT_POINTER(tdev->l2opt, NULL); |
e48f129c | 1328 | call_rcu(&d->rcu_head, clean_l2_data); |
74b793e1 DLR |
1329 | if (t->nofail_skb) |
1330 | kfree_skb(t->nofail_skb); | |
4d22de3e DLR |
1331 | kfree(t); |
1332 | } | |
1333 | ||
1334 | static inline void register_tdev(struct t3cdev *tdev) | |
1335 | { | |
1336 | static int unit; | |
1337 | ||
1338 | mutex_lock(&cxgb3_db_lock); | |
1339 | snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++); | |
1340 | list_add_tail(&tdev->ofld_dev_list, &ofld_dev_list); | |
1341 | mutex_unlock(&cxgb3_db_lock); | |
1342 | } | |
1343 | ||
1344 | static inline void unregister_tdev(struct t3cdev *tdev) | |
1345 | { | |
1346 | mutex_lock(&cxgb3_db_lock); | |
1347 | list_del(&tdev->ofld_dev_list); | |
1348 | mutex_unlock(&cxgb3_db_lock); | |
1349 | } | |
1350 | ||
8f85cd7f DLR |
1351 | static inline int adap2type(struct adapter *adapter) |
1352 | { | |
1353 | int type = 0; | |
1354 | ||
1355 | switch (adapter->params.rev) { | |
1356 | case T3_REV_A: | |
1357 | type = T3A; | |
1358 | break; | |
1359 | case T3_REV_B: | |
1360 | case T3_REV_B2: | |
1361 | type = T3B; | |
1362 | break; | |
1363 | case T3_REV_C: | |
1364 | type = T3C; | |
1365 | break; | |
1366 | } | |
1367 | return type; | |
1368 | } | |
1369 | ||
2109eaab | 1370 | void cxgb3_adapter_ofld(struct adapter *adapter) |
4d22de3e DLR |
1371 | { |
1372 | struct t3cdev *tdev = &adapter->tdev; | |
1373 | ||
1374 | INIT_LIST_HEAD(&tdev->ofld_dev_list); | |
1375 | ||
1376 | cxgb3_set_dummy_ops(tdev); | |
1377 | tdev->send = t3_offload_tx; | |
1378 | tdev->ctl = cxgb_offload_ctl; | |
8f85cd7f | 1379 | tdev->type = adap2type(adapter); |
4d22de3e DLR |
1380 | |
1381 | register_tdev(tdev); | |
1382 | } | |
1383 | ||
2109eaab | 1384 | void cxgb3_adapter_unofld(struct adapter *adapter) |
4d22de3e DLR |
1385 | { |
1386 | struct t3cdev *tdev = &adapter->tdev; | |
1387 | ||
1388 | tdev->recv = NULL; | |
1389 | tdev->neigh_update = NULL; | |
1390 | ||
1391 | unregister_tdev(tdev); | |
1392 | } | |
1393 | ||
1394 | void __init cxgb3_offload_init(void) | |
1395 | { | |
1396 | int i; | |
1397 | ||
1398 | for (i = 0; i < NUM_CPL_CMDS; ++i) | |
1399 | cpl_handlers[i] = do_bad_cpl; | |
1400 | ||
1401 | t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl); | |
1402 | t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl); | |
b881955b | 1403 | t3_register_cpl_handler(CPL_RTE_WRITE_RPL, do_rte_write_rpl); |
4d22de3e DLR |
1404 | t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl); |
1405 | t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl); | |
1406 | t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr); | |
1407 | t3_register_cpl_handler(CPL_PASS_ESTABLISH, do_hwtid_rpl); | |
1408 | t3_register_cpl_handler(CPL_ABORT_RPL_RSS, do_hwtid_rpl); | |
1409 | t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl); | |
1410 | t3_register_cpl_handler(CPL_RX_URG_NOTIFY, do_hwtid_rpl); | |
1411 | t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl); | |
1412 | t3_register_cpl_handler(CPL_TX_DATA_ACK, do_hwtid_rpl); | |
1413 | t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl); | |
1414 | t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl); | |
1415 | t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl); | |
1416 | t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl); | |
1417 | t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss); | |
1418 | t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish); | |
6cdbd77e DLR |
1419 | t3_register_cpl_handler(CPL_SET_TCB_RPL, do_hwtid_rpl); |
1420 | t3_register_cpl_handler(CPL_GET_TCB_RPL, do_hwtid_rpl); | |
4d22de3e DLR |
1421 | t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term); |
1422 | t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl); | |
1423 | t3_register_cpl_handler(CPL_TRACE_PKT, do_trace); | |
1424 | t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl); | |
1425 | t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl); | |
1426 | t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl); | |
1427 | } |