Commit | Line | Data |
---|---|---|
7a291083 JBT |
1 | /* |
2 | * linux/drivers/net/ehea/ehea_main.c | |
3 | * | |
4 | * eHEA ethernet device driver for IBM eServer System p | |
5 | * | |
6 | * (C) Copyright IBM Corp. 2006 | |
7 | * | |
8 | * Authors: | |
9 | * Christoph Raisch <raisch@de.ibm.com> | |
10 | * Jan-Bernd Themann <themann@de.ibm.com> | |
11 | * Thomas Klein <tklein@de.ibm.com> | |
12 | * | |
13 | * | |
14 | * This program is free software; you can redistribute it and/or modify | |
15 | * it under the terms of the GNU General Public License as published by | |
16 | * the Free Software Foundation; either version 2, or (at your option) | |
17 | * any later version. | |
18 | * | |
19 | * This program is distributed in the hope that it will be useful, | |
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
22 | * GNU General Public License for more details. | |
23 | * | |
24 | * You should have received a copy of the GNU General Public License | |
25 | * along with this program; if not, write to the Free Software | |
26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
27 | */ | |
28 | ||
29 | #include <linux/in.h> | |
30 | #include <linux/ip.h> | |
31 | #include <linux/tcp.h> | |
32 | #include <linux/udp.h> | |
33 | #include <linux/if.h> | |
34 | #include <linux/list.h> | |
35 | #include <linux/if_ether.h> | |
36 | #include <net/ip.h> | |
37 | ||
38 | #include "ehea.h" | |
39 | #include "ehea_qmr.h" | |
40 | #include "ehea_phyp.h" | |
41 | ||
42 | ||
43 | MODULE_LICENSE("GPL"); | |
44 | MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); | |
45 | MODULE_DESCRIPTION("IBM eServer HEA Driver"); | |
46 | MODULE_VERSION(DRV_VERSION); | |
47 | ||
48 | ||
49 | static int msg_level = -1; | |
50 | static int rq1_entries = EHEA_DEF_ENTRIES_RQ1; | |
51 | static int rq2_entries = EHEA_DEF_ENTRIES_RQ2; | |
52 | static int rq3_entries = EHEA_DEF_ENTRIES_RQ3; | |
53 | static int sq_entries = EHEA_DEF_ENTRIES_SQ; | |
54 | ||
55 | module_param(msg_level, int, 0); | |
56 | module_param(rq1_entries, int, 0); | |
57 | module_param(rq2_entries, int, 0); | |
58 | module_param(rq3_entries, int, 0); | |
59 | module_param(sq_entries, int, 0); | |
60 | ||
61 | MODULE_PARM_DESC(msg_level, "msg_level"); | |
62 | MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 " | |
63 | "[2^x - 1], x = [6..14]. Default = " | |
64 | __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")"); | |
65 | MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 " | |
66 | "[2^x - 1], x = [6..14]. Default = " | |
67 | __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")"); | |
68 | MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 " | |
69 | "[2^x - 1], x = [6..14]. Default = " | |
70 | __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")"); | |
71 | MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue " | |
72 | "[2^x - 1], x = [6..14]. Default = " | |
73 | __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")"); | |
74 | ||
75 | void ehea_dump(void *adr, int len, char *msg) { | |
76 | int x; | |
77 | unsigned char *deb = adr; | |
78 | for (x = 0; x < len; x += 16) { | |
d2db9eea | 79 | printk(DRV_NAME " %s adr=%p ofs=%04x %016lx %016lx\n", msg, |
7a291083 JBT |
80 | deb, x, *((u64*)&deb[0]), *((u64*)&deb[8])); |
81 | deb += 16; | |
82 | } | |
83 | } | |
84 | ||
85 | static struct net_device_stats *ehea_get_stats(struct net_device *dev) | |
86 | { | |
87 | struct ehea_port *port = netdev_priv(dev); | |
88 | struct net_device_stats *stats = &port->stats; | |
89 | struct hcp_ehea_port_cb2 *cb2; | |
90 | u64 hret, rx_packets; | |
91 | int i; | |
92 | ||
93 | memset(stats, 0, sizeof(*stats)); | |
94 | ||
a1d261c5 | 95 | cb2 = kzalloc(PAGE_SIZE, GFP_KERNEL); |
7a291083 JBT |
96 | if (!cb2) { |
97 | ehea_error("no mem for cb2"); | |
98 | goto out; | |
99 | } | |
100 | ||
101 | hret = ehea_h_query_ehea_port(port->adapter->handle, | |
102 | port->logical_port_id, | |
103 | H_PORT_CB2, H_PORT_CB2_ALL, cb2); | |
104 | if (hret != H_SUCCESS) { | |
105 | ehea_error("query_ehea_port failed"); | |
106 | goto out_herr; | |
107 | } | |
108 | ||
109 | if (netif_msg_hw(port)) | |
110 | ehea_dump(cb2, sizeof(*cb2), "net_device_stats"); | |
111 | ||
112 | rx_packets = 0; | |
113 | for (i = 0; i < port->num_def_qps; i++) | |
114 | rx_packets += port->port_res[i].rx_packets; | |
115 | ||
116 | stats->tx_packets = cb2->txucp + cb2->txmcp + cb2->txbcp; | |
117 | stats->multicast = cb2->rxmcp; | |
118 | stats->rx_errors = cb2->rxuerr; | |
119 | stats->rx_bytes = cb2->rxo; | |
120 | stats->tx_bytes = cb2->txo; | |
121 | stats->rx_packets = rx_packets; | |
122 | ||
123 | out_herr: | |
124 | kfree(cb2); | |
125 | out: | |
126 | return stats; | |
127 | } | |
128 | ||
129 | static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes) | |
130 | { | |
131 | struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; | |
132 | struct net_device *dev = pr->port->netdev; | |
133 | int max_index_mask = pr->rq1_skba.len - 1; | |
134 | int i; | |
135 | ||
136 | if (!nr_of_wqes) | |
137 | return; | |
138 | ||
139 | for (i = 0; i < nr_of_wqes; i++) { | |
140 | if (!skb_arr_rq1[index]) { | |
141 | skb_arr_rq1[index] = netdev_alloc_skb(dev, | |
142 | EHEA_L_PKT_SIZE); | |
143 | if (!skb_arr_rq1[index]) { | |
144 | ehea_error("%s: no mem for skb/%d wqes filled", | |
145 | dev->name, i); | |
146 | break; | |
147 | } | |
148 | } | |
149 | index--; | |
150 | index &= max_index_mask; | |
151 | } | |
152 | /* Ring doorbell */ | |
153 | ehea_update_rq1a(pr->qp, i); | |
154 | } | |
155 | ||
156 | static int ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a) | |
157 | { | |
158 | int ret = 0; | |
159 | struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; | |
160 | struct net_device *dev = pr->port->netdev; | |
161 | int i; | |
162 | ||
163 | for (i = 0; i < pr->rq1_skba.len; i++) { | |
164 | skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); | |
165 | if (!skb_arr_rq1[i]) { | |
166 | ehea_error("%s: no mem for skb/%d wqes filled", | |
167 | dev->name, i); | |
168 | ret = -ENOMEM; | |
169 | goto out; | |
170 | } | |
171 | } | |
172 | /* Ring doorbell */ | |
173 | ehea_update_rq1a(pr->qp, nr_rq1a); | |
174 | out: | |
175 | return ret; | |
176 | } | |
177 | ||
178 | static int ehea_refill_rq_def(struct ehea_port_res *pr, | |
179 | struct ehea_q_skb_arr *q_skba, int rq_nr, | |
180 | int num_wqes, int wqe_type, int packet_size) | |
181 | { | |
182 | struct net_device *dev = pr->port->netdev; | |
183 | struct ehea_qp *qp = pr->qp; | |
184 | struct sk_buff **skb_arr = q_skba->arr; | |
185 | struct ehea_rwqe *rwqe; | |
186 | int i, index, max_index_mask, fill_wqes; | |
187 | int ret = 0; | |
188 | ||
189 | fill_wqes = q_skba->os_skbs + num_wqes; | |
190 | ||
191 | if (!fill_wqes) | |
192 | return ret; | |
193 | ||
194 | index = q_skba->index; | |
195 | max_index_mask = q_skba->len - 1; | |
196 | for (i = 0; i < fill_wqes; i++) { | |
197 | struct sk_buff *skb = netdev_alloc_skb(dev, packet_size); | |
198 | if (!skb) { | |
199 | ehea_error("%s: no mem for skb/%d wqes filled", | |
200 | dev->name, i); | |
201 | q_skba->os_skbs = fill_wqes - i; | |
202 | ret = -ENOMEM; | |
203 | break; | |
204 | } | |
205 | skb_reserve(skb, NET_IP_ALIGN); | |
206 | ||
207 | skb_arr[index] = skb; | |
208 | ||
209 | rwqe = ehea_get_next_rwqe(qp, rq_nr); | |
210 | rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type) | |
211 | | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index); | |
212 | rwqe->sg_list[0].l_key = pr->recv_mr.lkey; | |
213 | rwqe->sg_list[0].vaddr = (u64)skb->data; | |
214 | rwqe->sg_list[0].len = packet_size; | |
215 | rwqe->data_segments = 1; | |
216 | ||
217 | index++; | |
218 | index &= max_index_mask; | |
219 | } | |
220 | q_skba->index = index; | |
221 | ||
222 | /* Ring doorbell */ | |
223 | iosync(); | |
224 | if (rq_nr == 2) | |
225 | ehea_update_rq2a(pr->qp, i); | |
226 | else | |
227 | ehea_update_rq3a(pr->qp, i); | |
228 | ||
229 | return ret; | |
230 | } | |
231 | ||
232 | ||
233 | static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes) | |
234 | { | |
235 | return ehea_refill_rq_def(pr, &pr->rq2_skba, 2, | |
236 | nr_of_wqes, EHEA_RWQE2_TYPE, | |
237 | EHEA_RQ2_PKT_SIZE + NET_IP_ALIGN); | |
238 | } | |
239 | ||
240 | ||
241 | static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes) | |
242 | { | |
243 | return ehea_refill_rq_def(pr, &pr->rq3_skba, 3, | |
244 | nr_of_wqes, EHEA_RWQE3_TYPE, | |
245 | EHEA_MAX_PACKET_SIZE + NET_IP_ALIGN); | |
246 | } | |
247 | ||
248 | static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num) | |
249 | { | |
250 | *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5; | |
251 | if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0) | |
252 | return 0; | |
253 | if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) && | |
254 | (cqe->header_length == 0)) | |
255 | return 0; | |
256 | return -EINVAL; | |
257 | } | |
258 | ||
259 | static inline void ehea_fill_skb(struct net_device *dev, | |
260 | struct sk_buff *skb, struct ehea_cqe *cqe) | |
261 | { | |
262 | int length = cqe->num_bytes_transfered - 4; /*remove CRC */ | |
263 | ||
264 | skb_put(skb, length); | |
265 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
266 | skb->protocol = eth_type_trans(skb, dev); | |
267 | } | |
268 | ||
269 | static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array, | |
270 | int arr_len, | |
271 | struct ehea_cqe *cqe) | |
272 | { | |
273 | int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id); | |
274 | struct sk_buff *skb; | |
275 | void *pref; | |
276 | int x; | |
277 | ||
278 | x = skb_index + 1; | |
279 | x &= (arr_len - 1); | |
280 | ||
281 | pref = skb_array[x]; | |
282 | prefetchw(pref); | |
283 | prefetchw(pref + EHEA_CACHE_LINE); | |
284 | ||
285 | pref = (skb_array[x]->data); | |
286 | prefetch(pref); | |
287 | prefetch(pref + EHEA_CACHE_LINE); | |
288 | prefetch(pref + EHEA_CACHE_LINE * 2); | |
289 | prefetch(pref + EHEA_CACHE_LINE * 3); | |
290 | skb = skb_array[skb_index]; | |
291 | skb_array[skb_index] = NULL; | |
292 | return skb; | |
293 | } | |
294 | ||
295 | static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array, | |
296 | int arr_len, int wqe_index) | |
297 | { | |
298 | struct sk_buff *skb; | |
299 | void *pref; | |
300 | int x; | |
301 | ||
302 | x = wqe_index + 1; | |
303 | x &= (arr_len - 1); | |
304 | ||
305 | pref = skb_array[x]; | |
306 | prefetchw(pref); | |
307 | prefetchw(pref + EHEA_CACHE_LINE); | |
308 | ||
309 | pref = (skb_array[x]->data); | |
310 | prefetchw(pref); | |
311 | prefetchw(pref + EHEA_CACHE_LINE); | |
312 | ||
313 | skb = skb_array[wqe_index]; | |
314 | skb_array[wqe_index] = NULL; | |
315 | return skb; | |
316 | } | |
317 | ||
318 | static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq, | |
319 | struct ehea_cqe *cqe, int *processed_rq2, | |
320 | int *processed_rq3) | |
321 | { | |
322 | struct sk_buff *skb; | |
323 | ||
324 | if (netif_msg_rx_err(pr->port)) { | |
325 | ehea_error("CQE Error for QP %d", pr->qp->init_attr.qp_nr); | |
326 | ehea_dump(cqe, sizeof(*cqe), "CQE"); | |
327 | } | |
328 | ||
329 | if (rq == 2) { | |
330 | *processed_rq2 += 1; | |
331 | skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe); | |
332 | dev_kfree_skb(skb); | |
333 | } else if (rq == 3) { | |
334 | *processed_rq3 += 1; | |
335 | skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe); | |
336 | dev_kfree_skb(skb); | |
337 | } | |
338 | ||
339 | if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) { | |
340 | ehea_error("Critical receive error. Resetting port."); | |
341 | queue_work(pr->port->adapter->ehea_wq, &pr->port->reset_task); | |
342 | return 1; | |
343 | } | |
344 | ||
345 | return 0; | |
346 | } | |
347 | ||
348 | static int ehea_poll(struct net_device *dev, int *budget) | |
349 | { | |
350 | struct ehea_port *port = netdev_priv(dev); | |
351 | struct ehea_port_res *pr = &port->port_res[0]; | |
352 | struct ehea_qp *qp = pr->qp; | |
353 | struct ehea_cqe *cqe; | |
354 | struct sk_buff *skb; | |
355 | struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; | |
356 | struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr; | |
357 | struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr; | |
358 | int skb_arr_rq1_len = pr->rq1_skba.len; | |
359 | int skb_arr_rq2_len = pr->rq2_skba.len; | |
360 | int skb_arr_rq3_len = pr->rq3_skba.len; | |
361 | int processed, processed_rq1, processed_rq2, processed_rq3; | |
362 | int wqe_index, last_wqe_index, rq, intreq, my_quota, port_reset; | |
363 | ||
364 | processed = processed_rq1 = processed_rq2 = processed_rq3 = 0; | |
365 | last_wqe_index = 0; | |
366 | my_quota = min(*budget, dev->quota); | |
367 | my_quota = min(my_quota, EHEA_POLL_MAX_RWQE); | |
368 | ||
369 | /* rq0 is low latency RQ */ | |
370 | cqe = ehea_poll_rq1(qp, &wqe_index); | |
371 | while ((my_quota > 0) && cqe) { | |
372 | ehea_inc_rq1(qp); | |
373 | processed_rq1++; | |
374 | processed++; | |
375 | my_quota--; | |
376 | if (netif_msg_rx_status(port)) | |
377 | ehea_dump(cqe, sizeof(*cqe), "CQE"); | |
378 | ||
379 | last_wqe_index = wqe_index; | |
380 | rmb(); | |
381 | if (!ehea_check_cqe(cqe, &rq)) { | |
382 | if (rq == 1) { /* LL RQ1 */ | |
383 | skb = get_skb_by_index_ll(skb_arr_rq1, | |
384 | skb_arr_rq1_len, | |
385 | wqe_index); | |
386 | if (unlikely(!skb)) { | |
387 | if (netif_msg_rx_err(port)) | |
388 | ehea_error("LL rq1: skb=NULL"); | |
389 | skb = netdev_alloc_skb(dev, | |
390 | EHEA_L_PKT_SIZE); | |
391 | if (!skb) | |
392 | break; | |
393 | } | |
394 | memcpy(skb->data, ((char*)cqe) + 64, | |
395 | cqe->num_bytes_transfered - 4); | |
396 | ehea_fill_skb(dev, skb, cqe); | |
397 | } else if (rq == 2) { /* RQ2 */ | |
398 | skb = get_skb_by_index(skb_arr_rq2, | |
399 | skb_arr_rq2_len, cqe); | |
400 | if (unlikely(!skb)) { | |
401 | if (netif_msg_rx_err(port)) | |
402 | ehea_error("rq2: skb=NULL"); | |
403 | break; | |
404 | } | |
405 | ehea_fill_skb(dev, skb, cqe); | |
406 | processed_rq2++; | |
407 | } else { /* RQ3 */ | |
408 | skb = get_skb_by_index(skb_arr_rq3, | |
409 | skb_arr_rq3_len, cqe); | |
410 | if (unlikely(!skb)) { | |
411 | if (netif_msg_rx_err(port)) | |
412 | ehea_error("rq3: skb=NULL"); | |
413 | break; | |
414 | } | |
415 | ehea_fill_skb(dev, skb, cqe); | |
416 | processed_rq3++; | |
417 | } | |
418 | ||
419 | if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) | |
420 | vlan_hwaccel_receive_skb(skb, port->vgrp, | |
421 | cqe->vlan_tag); | |
422 | else | |
423 | netif_receive_skb(skb); | |
424 | ||
425 | } else { /* Error occured */ | |
426 | pr->p_state.poll_receive_errors++; | |
427 | port_reset = ehea_treat_poll_error(pr, rq, cqe, | |
428 | &processed_rq2, | |
429 | &processed_rq3); | |
430 | if (port_reset) | |
431 | break; | |
432 | } | |
433 | cqe = ehea_poll_rq1(qp, &wqe_index); | |
434 | } | |
435 | ||
436 | dev->quota -= processed; | |
437 | *budget -= processed; | |
438 | ||
439 | pr->p_state.ehea_poll += 1; | |
440 | pr->rx_packets += processed; | |
441 | ||
442 | ehea_refill_rq1(pr, last_wqe_index, processed_rq1); | |
443 | ehea_refill_rq2(pr, processed_rq2); | |
444 | ehea_refill_rq3(pr, processed_rq3); | |
445 | ||
446 | intreq = ((pr->p_state.ehea_poll & 0xF) == 0xF); | |
447 | ||
448 | if (!cqe || intreq) { | |
449 | netif_rx_complete(dev); | |
450 | ehea_reset_cq_ep(pr->recv_cq); | |
451 | ehea_reset_cq_n1(pr->recv_cq); | |
452 | cqe = hw_qeit_get_valid(&qp->hw_rqueue1); | |
453 | if (!cqe || intreq) | |
454 | return 0; | |
455 | if (!netif_rx_reschedule(dev, my_quota)) | |
456 | return 0; | |
457 | } | |
458 | return 1; | |
459 | } | |
460 | ||
461 | void free_sent_skbs(struct ehea_cqe *cqe, struct ehea_port_res *pr) | |
462 | { | |
463 | struct sk_buff *skb; | |
464 | int index, max_index_mask, i; | |
465 | ||
466 | index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id); | |
467 | max_index_mask = pr->sq_skba.len - 1; | |
468 | for (i = 0; i < EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id); i++) { | |
469 | skb = pr->sq_skba.arr[index]; | |
470 | if (likely(skb)) { | |
471 | dev_kfree_skb(skb); | |
472 | pr->sq_skba.arr[index] = NULL; | |
473 | } else { | |
474 | ehea_error("skb=NULL, wr_id=%lX, loop=%d, index=%d", | |
475 | cqe->wr_id, i, index); | |
476 | } | |
477 | index--; | |
478 | index &= max_index_mask; | |
479 | } | |
480 | } | |
481 | ||
482 | #define MAX_SENDCOMP_QUOTA 400 | |
483 | void ehea_send_irq_tasklet(unsigned long data) | |
484 | { | |
485 | struct ehea_port_res *pr = (struct ehea_port_res*)data; | |
486 | struct ehea_cq *send_cq = pr->send_cq; | |
487 | struct ehea_cqe *cqe; | |
488 | int quota = MAX_SENDCOMP_QUOTA; | |
489 | int cqe_counter = 0; | |
490 | int swqe_av = 0; | |
491 | unsigned long flags; | |
492 | ||
493 | do { | |
494 | cqe = ehea_poll_cq(send_cq); | |
495 | if (!cqe) { | |
496 | ehea_reset_cq_ep(send_cq); | |
497 | ehea_reset_cq_n1(send_cq); | |
498 | cqe = ehea_poll_cq(send_cq); | |
499 | if (!cqe) | |
500 | break; | |
501 | } | |
502 | cqe_counter++; | |
503 | rmb(); | |
504 | if (cqe->status & EHEA_CQE_STAT_ERR_MASK) { | |
505 | ehea_error("Send Completion Error: Resetting port"); | |
506 | if (netif_msg_tx_err(pr->port)) | |
507 | ehea_dump(cqe, sizeof(*cqe), "Send CQE"); | |
508 | queue_work(pr->port->adapter->ehea_wq, | |
509 | &pr->port->reset_task); | |
510 | break; | |
511 | } | |
512 | ||
513 | if (netif_msg_tx_done(pr->port)) | |
514 | ehea_dump(cqe, sizeof(*cqe), "CQE"); | |
515 | ||
516 | if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id) | |
517 | == EHEA_SWQE2_TYPE)) | |
518 | free_sent_skbs(cqe, pr); | |
519 | ||
520 | swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id); | |
521 | quota--; | |
522 | } while (quota > 0); | |
523 | ||
524 | ehea_update_feca(send_cq, cqe_counter); | |
525 | atomic_add(swqe_av, &pr->swqe_avail); | |
526 | ||
527 | spin_lock_irqsave(&pr->netif_queue, flags); | |
528 | if (pr->queue_stopped && (atomic_read(&pr->swqe_avail) | |
529 | >= pr->swqe_refill_th)) { | |
530 | netif_wake_queue(pr->port->netdev); | |
531 | pr->queue_stopped = 0; | |
532 | } | |
533 | spin_unlock_irqrestore(&pr->netif_queue, flags); | |
534 | ||
535 | if (unlikely(cqe)) | |
536 | tasklet_hi_schedule(&pr->send_comp_task); | |
537 | } | |
538 | ||
7d12e780 | 539 | static irqreturn_t ehea_send_irq_handler(int irq, void *param) |
7a291083 JBT |
540 | { |
541 | struct ehea_port_res *pr = param; | |
542 | tasklet_hi_schedule(&pr->send_comp_task); | |
543 | return IRQ_HANDLED; | |
544 | } | |
545 | ||
7d12e780 | 546 | static irqreturn_t ehea_recv_irq_handler(int irq, void *param) |
7a291083 JBT |
547 | { |
548 | struct ehea_port_res *pr = param; | |
549 | struct ehea_port *port = pr->port; | |
550 | netif_rx_schedule(port->netdev); | |
551 | return IRQ_HANDLED; | |
552 | } | |
553 | ||
7d12e780 | 554 | static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param) |
7a291083 JBT |
555 | { |
556 | struct ehea_port *port = param; | |
557 | struct ehea_eqe *eqe; | |
d2db9eea | 558 | struct ehea_qp *qp; |
7a291083 JBT |
559 | u32 qp_token; |
560 | ||
561 | eqe = ehea_poll_eq(port->qp_eq); | |
bb3a6449 | 562 | |
7a291083 | 563 | while (eqe) { |
7a291083 | 564 | qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry); |
bb3a6449 TK |
565 | ehea_error("QP aff_err: entry=0x%lx, token=0x%x", |
566 | eqe->entry, qp_token); | |
d2db9eea JBT |
567 | |
568 | qp = port->port_res[qp_token].qp; | |
569 | ehea_error_data(port->adapter, qp->fw_handle); | |
bb3a6449 | 570 | eqe = ehea_poll_eq(port->qp_eq); |
7a291083 JBT |
571 | } |
572 | ||
d2db9eea JBT |
573 | queue_work(port->adapter->ehea_wq, &port->reset_task); |
574 | ||
7a291083 JBT |
575 | return IRQ_HANDLED; |
576 | } | |
577 | ||
578 | static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter, | |
579 | int logical_port) | |
580 | { | |
581 | int i; | |
582 | ||
583 | for (i = 0; i < adapter->num_ports; i++) | |
41b69c70 TK |
584 | if (adapter->port[i]) |
585 | if (adapter->port[i]->logical_port_id == logical_port) | |
586 | return adapter->port[i]; | |
7a291083 JBT |
587 | return NULL; |
588 | } | |
589 | ||
590 | int ehea_sense_port_attr(struct ehea_port *port) | |
591 | { | |
592 | int ret; | |
593 | u64 hret; | |
594 | struct hcp_ehea_port_cb0 *cb0; | |
595 | ||
a1d261c5 TK |
596 | cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC); /* May be called via */ |
597 | if (!cb0) { /* ehea_neq_tasklet() */ | |
7a291083 JBT |
598 | ehea_error("no mem for cb0"); |
599 | ret = -ENOMEM; | |
600 | goto out; | |
601 | } | |
602 | ||
603 | hret = ehea_h_query_ehea_port(port->adapter->handle, | |
604 | port->logical_port_id, H_PORT_CB0, | |
605 | EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF), | |
606 | cb0); | |
607 | if (hret != H_SUCCESS) { | |
608 | ret = -EIO; | |
609 | goto out_free; | |
610 | } | |
611 | ||
612 | /* MAC address */ | |
613 | port->mac_addr = cb0->port_mac_addr << 16; | |
614 | ||
615 | if (!is_valid_ether_addr((u8*)&port->mac_addr)) { | |
616 | ret = -EADDRNOTAVAIL; | |
617 | goto out_free; | |
618 | } | |
619 | ||
620 | /* Port speed */ | |
621 | switch (cb0->port_speed) { | |
622 | case H_SPEED_10M_H: | |
623 | port->port_speed = EHEA_SPEED_10M; | |
624 | port->full_duplex = 0; | |
625 | break; | |
626 | case H_SPEED_10M_F: | |
627 | port->port_speed = EHEA_SPEED_10M; | |
628 | port->full_duplex = 1; | |
629 | break; | |
630 | case H_SPEED_100M_H: | |
631 | port->port_speed = EHEA_SPEED_100M; | |
632 | port->full_duplex = 0; | |
633 | break; | |
634 | case H_SPEED_100M_F: | |
635 | port->port_speed = EHEA_SPEED_100M; | |
636 | port->full_duplex = 1; | |
637 | break; | |
638 | case H_SPEED_1G_F: | |
639 | port->port_speed = EHEA_SPEED_1G; | |
640 | port->full_duplex = 1; | |
641 | break; | |
642 | case H_SPEED_10G_F: | |
643 | port->port_speed = EHEA_SPEED_10G; | |
644 | port->full_duplex = 1; | |
645 | break; | |
646 | default: | |
647 | port->port_speed = 0; | |
648 | port->full_duplex = 0; | |
649 | break; | |
650 | } | |
651 | ||
e919b593 TK |
652 | port->autoneg = 1; |
653 | ||
7a291083 JBT |
654 | /* Number of default QPs */ |
655 | port->num_def_qps = cb0->num_default_qps; | |
656 | ||
657 | if (!port->num_def_qps) { | |
658 | ret = -EINVAL; | |
659 | goto out_free; | |
660 | } | |
661 | ||
662 | if (port->num_def_qps >= EHEA_NUM_TX_QP) | |
663 | port->num_add_tx_qps = 0; | |
664 | else | |
665 | port->num_add_tx_qps = EHEA_NUM_TX_QP - port->num_def_qps; | |
666 | ||
667 | ret = 0; | |
668 | out_free: | |
669 | if (ret || netif_msg_probe(port)) | |
670 | ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr"); | |
671 | kfree(cb0); | |
672 | out: | |
673 | return ret; | |
674 | } | |
675 | ||
676 | int ehea_set_portspeed(struct ehea_port *port, u32 port_speed) | |
677 | { | |
678 | struct hcp_ehea_port_cb4 *cb4; | |
679 | u64 hret; | |
680 | int ret = 0; | |
681 | ||
a1d261c5 | 682 | cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL); |
7a291083 JBT |
683 | if (!cb4) { |
684 | ehea_error("no mem for cb4"); | |
685 | ret = -ENOMEM; | |
686 | goto out; | |
687 | } | |
688 | ||
689 | cb4->port_speed = port_speed; | |
690 | ||
691 | netif_carrier_off(port->netdev); | |
692 | ||
693 | hret = ehea_h_modify_ehea_port(port->adapter->handle, | |
694 | port->logical_port_id, | |
695 | H_PORT_CB4, H_PORT_CB4_SPEED, cb4); | |
696 | if (hret == H_SUCCESS) { | |
697 | port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0; | |
698 | ||
699 | hret = ehea_h_query_ehea_port(port->adapter->handle, | |
700 | port->logical_port_id, | |
701 | H_PORT_CB4, H_PORT_CB4_SPEED, | |
702 | cb4); | |
703 | if (hret == H_SUCCESS) { | |
704 | switch (cb4->port_speed) { | |
705 | case H_SPEED_10M_H: | |
706 | port->port_speed = EHEA_SPEED_10M; | |
707 | port->full_duplex = 0; | |
708 | break; | |
709 | case H_SPEED_10M_F: | |
710 | port->port_speed = EHEA_SPEED_10M; | |
711 | port->full_duplex = 1; | |
712 | break; | |
713 | case H_SPEED_100M_H: | |
714 | port->port_speed = EHEA_SPEED_100M; | |
715 | port->full_duplex = 0; | |
716 | break; | |
717 | case H_SPEED_100M_F: | |
718 | port->port_speed = EHEA_SPEED_100M; | |
719 | port->full_duplex = 1; | |
720 | break; | |
721 | case H_SPEED_1G_F: | |
722 | port->port_speed = EHEA_SPEED_1G; | |
723 | port->full_duplex = 1; | |
724 | break; | |
725 | case H_SPEED_10G_F: | |
726 | port->port_speed = EHEA_SPEED_10G; | |
727 | port->full_duplex = 1; | |
728 | break; | |
729 | default: | |
730 | port->port_speed = 0; | |
731 | port->full_duplex = 0; | |
732 | break; | |
733 | } | |
734 | } else { | |
735 | ehea_error("Failed sensing port speed"); | |
736 | ret = -EIO; | |
737 | } | |
738 | } else { | |
739 | if (hret == H_AUTHORITY) { | |
7674a588 | 740 | ehea_info("Hypervisor denied setting port speed"); |
7a291083 JBT |
741 | ret = -EPERM; |
742 | } else { | |
743 | ret = -EIO; | |
744 | ehea_error("Failed setting port speed"); | |
745 | } | |
746 | } | |
747 | netif_carrier_on(port->netdev); | |
748 | kfree(cb4); | |
749 | out: | |
750 | return ret; | |
751 | } | |
752 | ||
753 | static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe) | |
754 | { | |
755 | int ret; | |
756 | u8 ec; | |
757 | u8 portnum; | |
758 | struct ehea_port *port; | |
759 | ||
760 | ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe); | |
761 | portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe); | |
762 | port = ehea_get_port(adapter, portnum); | |
763 | ||
764 | switch (ec) { | |
765 | case EHEA_EC_PORTSTATE_CHG: /* port state change */ | |
766 | ||
767 | if (!port) { | |
768 | ehea_error("unknown portnum %x", portnum); | |
769 | break; | |
770 | } | |
771 | ||
772 | if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) { | |
773 | if (!netif_carrier_ok(port->netdev)) { | |
1e1675cc | 774 | ret = ehea_sense_port_attr(port); |
7a291083 JBT |
775 | if (ret) { |
776 | ehea_error("failed resensing port " | |
777 | "attributes"); | |
778 | break; | |
779 | } | |
780 | ||
781 | if (netif_msg_link(port)) | |
782 | ehea_info("%s: Logical port up: %dMbps " | |
783 | "%s Duplex", | |
784 | port->netdev->name, | |
785 | port->port_speed, | |
786 | port->full_duplex == | |
787 | 1 ? "Full" : "Half"); | |
788 | ||
789 | netif_carrier_on(port->netdev); | |
790 | netif_wake_queue(port->netdev); | |
791 | } | |
792 | } else | |
793 | if (netif_carrier_ok(port->netdev)) { | |
794 | if (netif_msg_link(port)) | |
795 | ehea_info("%s: Logical port down", | |
796 | port->netdev->name); | |
797 | netif_carrier_off(port->netdev); | |
798 | netif_stop_queue(port->netdev); | |
799 | } | |
800 | ||
801 | if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) { | |
802 | if (netif_msg_link(port)) | |
803 | ehea_info("%s: Physical port up", | |
804 | port->netdev->name); | |
805 | } else { | |
806 | if (netif_msg_link(port)) | |
807 | ehea_info("%s: Physical port down", | |
808 | port->netdev->name); | |
809 | } | |
810 | ||
811 | if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe)) | |
812 | ehea_info("External switch port is primary port"); | |
813 | else | |
814 | ehea_info("External switch port is backup port"); | |
815 | ||
816 | break; | |
817 | case EHEA_EC_ADAPTER_MALFUNC: | |
818 | ehea_error("Adapter malfunction"); | |
819 | break; | |
820 | case EHEA_EC_PORT_MALFUNC: | |
821 | ehea_info("Port malfunction: Device: %s", port->netdev->name); | |
822 | netif_carrier_off(port->netdev); | |
823 | netif_stop_queue(port->netdev); | |
824 | break; | |
825 | default: | |
bff0a55f | 826 | ehea_error("unknown event code %x, eqe=0x%lX", ec, eqe); |
7a291083 JBT |
827 | break; |
828 | } | |
829 | } | |
830 | ||
831 | static void ehea_neq_tasklet(unsigned long data) | |
832 | { | |
833 | struct ehea_adapter *adapter = (struct ehea_adapter*)data; | |
834 | struct ehea_eqe *eqe; | |
835 | u64 event_mask; | |
836 | ||
837 | eqe = ehea_poll_eq(adapter->neq); | |
838 | ehea_debug("eqe=%p", eqe); | |
839 | ||
840 | while (eqe) { | |
841 | ehea_debug("*eqe=%lx", eqe->entry); | |
842 | ehea_parse_eqe(adapter, eqe->entry); | |
843 | eqe = ehea_poll_eq(adapter->neq); | |
844 | ehea_debug("next eqe=%p", eqe); | |
845 | } | |
846 | ||
847 | event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1) | |
848 | | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1) | |
849 | | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1); | |
850 | ||
851 | ehea_h_reset_events(adapter->handle, | |
852 | adapter->neq->fw_handle, event_mask); | |
853 | } | |
854 | ||
7d12e780 | 855 | static irqreturn_t ehea_interrupt_neq(int irq, void *param) |
7a291083 JBT |
856 | { |
857 | struct ehea_adapter *adapter = param; | |
858 | tasklet_hi_schedule(&adapter->neq_tasklet); | |
859 | return IRQ_HANDLED; | |
860 | } | |
861 | ||
862 | ||
863 | static int ehea_fill_port_res(struct ehea_port_res *pr) | |
864 | { | |
865 | int ret; | |
866 | struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr; | |
867 | ||
868 | ret = ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1 | |
869 | - init_attr->act_nr_rwqes_rq2 | |
870 | - init_attr->act_nr_rwqes_rq3 - 1); | |
871 | ||
872 | ret |= ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1); | |
873 | ||
874 | ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1); | |
875 | ||
876 | return ret; | |
877 | } | |
878 | ||
879 | static int ehea_reg_interrupts(struct net_device *dev) | |
880 | { | |
881 | struct ehea_port *port = netdev_priv(dev); | |
882 | struct ehea_port_res *pr; | |
883 | int i, ret; | |
884 | ||
885 | for (i = 0; i < port->num_def_qps; i++) { | |
886 | pr = &port->port_res[i]; | |
887 | snprintf(pr->int_recv_name, EHEA_IRQ_NAME_SIZE - 1 | |
888 | , "%s-recv%d", dev->name, i); | |
889 | ret = ibmebus_request_irq(NULL, pr->recv_eq->attr.ist1, | |
890 | ehea_recv_irq_handler, | |
38515e90 | 891 | IRQF_DISABLED, pr->int_recv_name, pr); |
7a291083 JBT |
892 | if (ret) { |
893 | ehea_error("failed registering irq for ehea_recv_int:" | |
894 | "port_res_nr:%d, ist=%X", i, | |
895 | pr->recv_eq->attr.ist1); | |
896 | goto out_free_seq; | |
897 | } | |
898 | if (netif_msg_ifup(port)) | |
899 | ehea_info("irq_handle 0x%X for funct ehea_recv_int %d " | |
900 | "registered", pr->recv_eq->attr.ist1, i); | |
901 | } | |
902 | ||
903 | snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff", | |
904 | dev->name); | |
905 | ||
906 | ret = ibmebus_request_irq(NULL, port->qp_eq->attr.ist1, | |
907 | ehea_qp_aff_irq_handler, | |
38515e90 | 908 | IRQF_DISABLED, port->int_aff_name, port); |
7a291083 JBT |
909 | if (ret) { |
910 | ehea_error("failed registering irq for qp_aff_irq_handler:" | |
911 | "ist=%X", port->qp_eq->attr.ist1); | |
912 | goto out_free_qpeq; | |
913 | } | |
914 | ||
915 | if (netif_msg_ifup(port)) | |
916 | ehea_info("irq_handle 0x%X for function qp_aff_irq_handler " | |
917 | "registered", port->qp_eq->attr.ist1); | |
918 | ||
919 | for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { | |
920 | pr = &port->port_res[i]; | |
921 | snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1, | |
922 | "%s-send%d", dev->name, i); | |
923 | ret = ibmebus_request_irq(NULL, pr->send_eq->attr.ist1, | |
924 | ehea_send_irq_handler, | |
38515e90 | 925 | IRQF_DISABLED, pr->int_send_name, |
7a291083 JBT |
926 | pr); |
927 | if (ret) { | |
928 | ehea_error("failed registering irq for ehea_send " | |
929 | "port_res_nr:%d, ist=%X", i, | |
930 | pr->send_eq->attr.ist1); | |
931 | goto out_free_req; | |
932 | } | |
933 | if (netif_msg_ifup(port)) | |
934 | ehea_info("irq_handle 0x%X for function ehea_send_int " | |
935 | "%d registered", pr->send_eq->attr.ist1, i); | |
936 | } | |
937 | out: | |
938 | return ret; | |
939 | ||
940 | out_free_req: | |
941 | while (--i >= 0) { | |
942 | u32 ist = port->port_res[i].send_eq->attr.ist1; | |
943 | ibmebus_free_irq(NULL, ist, &port->port_res[i]); | |
944 | } | |
945 | out_free_qpeq: | |
946 | ibmebus_free_irq(NULL, port->qp_eq->attr.ist1, port); | |
947 | i = port->num_def_qps; | |
948 | out_free_seq: | |
949 | while (--i >= 0) { | |
950 | u32 ist = port->port_res[i].recv_eq->attr.ist1; | |
951 | ibmebus_free_irq(NULL, ist, &port->port_res[i]); | |
952 | } | |
953 | goto out; | |
954 | } | |
955 | ||
956 | static void ehea_free_interrupts(struct net_device *dev) | |
957 | { | |
958 | struct ehea_port *port = netdev_priv(dev); | |
959 | struct ehea_port_res *pr; | |
960 | int i; | |
961 | ||
962 | /* send */ | |
963 | for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { | |
964 | pr = &port->port_res[i]; | |
965 | ibmebus_free_irq(NULL, pr->send_eq->attr.ist1, pr); | |
966 | if (netif_msg_intr(port)) | |
967 | ehea_info("free send irq for res %d with handle 0x%X", | |
968 | i, pr->send_eq->attr.ist1); | |
969 | } | |
970 | ||
971 | /* receive */ | |
972 | for (i = 0; i < port->num_def_qps; i++) { | |
973 | pr = &port->port_res[i]; | |
974 | ibmebus_free_irq(NULL, pr->recv_eq->attr.ist1, pr); | |
975 | if (netif_msg_intr(port)) | |
976 | ehea_info("free recv irq for res %d with handle 0x%X", | |
977 | i, pr->recv_eq->attr.ist1); | |
978 | } | |
979 | ||
980 | /* associated events */ | |
981 | ibmebus_free_irq(NULL, port->qp_eq->attr.ist1, port); | |
982 | if (netif_msg_intr(port)) | |
983 | ehea_info("associated event interrupt for handle 0x%X freed", | |
984 | port->qp_eq->attr.ist1); | |
985 | } | |
986 | ||
987 | static int ehea_configure_port(struct ehea_port *port) | |
988 | { | |
989 | int ret, i; | |
990 | u64 hret, mask; | |
991 | struct hcp_ehea_port_cb0 *cb0; | |
992 | ||
993 | ret = -ENOMEM; | |
a1d261c5 | 994 | cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); |
7a291083 JBT |
995 | if (!cb0) |
996 | goto out; | |
997 | ||
998 | cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1) | |
999 | | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1) | |
1000 | | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1) | |
1001 | | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1) | |
1002 | | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER, | |
1003 | PXLY_RC_VLAN_FILTER) | |
1004 | | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1); | |
1005 | ||
1006 | for (i = 0; i < port->num_def_qps; i++) | |
602e0d10 | 1007 | cb0->default_qpn_arr[i] = port->port_res[0].qp->init_attr.qp_nr; |
7a291083 JBT |
1008 | |
1009 | if (netif_msg_ifup(port)) | |
1010 | ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port"); | |
1011 | ||
1012 | mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1) | |
1013 | | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1); | |
1014 | ||
1015 | hret = ehea_h_modify_ehea_port(port->adapter->handle, | |
1016 | port->logical_port_id, | |
1017 | H_PORT_CB0, mask, cb0); | |
1018 | ret = -EIO; | |
1019 | if (hret != H_SUCCESS) | |
1020 | goto out_free; | |
1021 | ||
1022 | ret = 0; | |
1023 | ||
1024 | out_free: | |
1025 | kfree(cb0); | |
1026 | out: | |
1027 | return ret; | |
1028 | } | |
1029 | ||
1030 | static int ehea_gen_smrs(struct ehea_port_res *pr) | |
1031 | { | |
1032 | u64 hret; | |
1033 | struct ehea_adapter *adapter = pr->port->adapter; | |
1034 | ||
1035 | hret = ehea_h_register_smr(adapter->handle, adapter->mr.handle, | |
1036 | adapter->mr.vaddr, EHEA_MR_ACC_CTRL, | |
1037 | adapter->pd, &pr->send_mr); | |
1038 | if (hret != H_SUCCESS) | |
1039 | goto out; | |
1040 | ||
1041 | hret = ehea_h_register_smr(adapter->handle, adapter->mr.handle, | |
1042 | adapter->mr.vaddr, EHEA_MR_ACC_CTRL, | |
1043 | adapter->pd, &pr->recv_mr); | |
1044 | if (hret != H_SUCCESS) | |
1045 | goto out_freeres; | |
1046 | ||
1047 | return 0; | |
1048 | ||
1049 | out_freeres: | |
1050 | hret = ehea_h_free_resource(adapter->handle, pr->send_mr.handle); | |
1051 | if (hret != H_SUCCESS) | |
1052 | ehea_error("failed freeing SMR"); | |
1053 | out: | |
1054 | return -EIO; | |
1055 | } | |
1056 | ||
1057 | static int ehea_rem_smrs(struct ehea_port_res *pr) | |
1058 | { | |
1059 | struct ehea_adapter *adapter = pr->port->adapter; | |
1060 | int ret = 0; | |
1061 | u64 hret; | |
1062 | ||
1063 | hret = ehea_h_free_resource(adapter->handle, pr->send_mr.handle); | |
1064 | if (hret != H_SUCCESS) { | |
1065 | ret = -EIO; | |
1066 | ehea_error("failed freeing send SMR for pr=%p", pr); | |
1067 | } | |
1068 | ||
1069 | hret = ehea_h_free_resource(adapter->handle, pr->recv_mr.handle); | |
1070 | if (hret != H_SUCCESS) { | |
1071 | ret = -EIO; | |
1072 | ehea_error("failed freeing recv SMR for pr=%p", pr); | |
1073 | } | |
1074 | ||
1075 | return ret; | |
1076 | } | |
1077 | ||
1078 | static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries) | |
1079 | { | |
1080 | int arr_size = sizeof(void*) * max_q_entries; | |
1081 | ||
1082 | q_skba->arr = vmalloc(arr_size); | |
1083 | if (!q_skba->arr) | |
1084 | return -ENOMEM; | |
1085 | ||
1086 | memset(q_skba->arr, 0, arr_size); | |
1087 | ||
1088 | q_skba->len = max_q_entries; | |
1089 | q_skba->index = 0; | |
1090 | q_skba->os_skbs = 0; | |
1091 | ||
1092 | return 0; | |
1093 | } | |
1094 | ||
1095 | static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr, | |
1096 | struct port_res_cfg *pr_cfg, int queue_token) | |
1097 | { | |
1098 | struct ehea_adapter *adapter = port->adapter; | |
1099 | enum ehea_eq_type eq_type = EHEA_EQ; | |
1100 | struct ehea_qp_init_attr *init_attr = NULL; | |
1101 | int ret = -EIO; | |
1102 | ||
1103 | memset(pr, 0, sizeof(struct ehea_port_res)); | |
1104 | ||
1105 | pr->port = port; | |
1106 | spin_lock_init(&pr->send_lock); | |
1107 | spin_lock_init(&pr->recv_lock); | |
1108 | spin_lock_init(&pr->xmit_lock); | |
1109 | spin_lock_init(&pr->netif_queue); | |
1110 | ||
1111 | pr->recv_eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0); | |
1112 | if (!pr->recv_eq) { | |
1113 | ehea_error("create_eq failed (recv_eq)"); | |
1114 | goto out_free; | |
1115 | } | |
1116 | ||
1117 | pr->send_eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0); | |
1118 | if (!pr->send_eq) { | |
1119 | ehea_error("create_eq failed (send_eq)"); | |
1120 | goto out_free; | |
1121 | } | |
1122 | ||
1123 | pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq, | |
1124 | pr->recv_eq->fw_handle, | |
1125 | port->logical_port_id); | |
1126 | if (!pr->recv_cq) { | |
1127 | ehea_error("create_cq failed (cq_recv)"); | |
1128 | goto out_free; | |
1129 | } | |
1130 | ||
1131 | pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq, | |
1132 | pr->send_eq->fw_handle, | |
1133 | port->logical_port_id); | |
1134 | if (!pr->send_cq) { | |
1135 | ehea_error("create_cq failed (cq_send)"); | |
1136 | goto out_free; | |
1137 | } | |
1138 | ||
1139 | if (netif_msg_ifup(port)) | |
1140 | ehea_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d", | |
1141 | pr->send_cq->attr.act_nr_of_cqes, | |
1142 | pr->recv_cq->attr.act_nr_of_cqes); | |
1143 | ||
1144 | init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL); | |
1145 | if (!init_attr) { | |
1146 | ret = -ENOMEM; | |
1147 | ehea_error("no mem for ehea_qp_init_attr"); | |
1148 | goto out_free; | |
1149 | } | |
1150 | ||
1151 | init_attr->low_lat_rq1 = 1; | |
1152 | init_attr->signalingtype = 1; /* generate CQE if specified in WQE */ | |
1153 | init_attr->rq_count = 3; | |
1154 | init_attr->qp_token = queue_token; | |
1155 | init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq; | |
1156 | init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1; | |
1157 | init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2; | |
1158 | init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3; | |
1159 | init_attr->wqe_size_enc_sq = EHEA_SG_SQ; | |
1160 | init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1; | |
1161 | init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2; | |
1162 | init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3; | |
1163 | init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD; | |
1164 | init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD; | |
1165 | init_attr->port_nr = port->logical_port_id; | |
1166 | init_attr->send_cq_handle = pr->send_cq->fw_handle; | |
1167 | init_attr->recv_cq_handle = pr->recv_cq->fw_handle; | |
1168 | init_attr->aff_eq_handle = port->qp_eq->fw_handle; | |
1169 | ||
1170 | pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr); | |
1171 | if (!pr->qp) { | |
1172 | ehea_error("create_qp failed"); | |
1173 | ret = -EIO; | |
1174 | goto out_free; | |
1175 | } | |
1176 | ||
1177 | if (netif_msg_ifup(port)) | |
1178 | ehea_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n " | |
1179 | "nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d", init_attr->qp_nr, | |
1180 | init_attr->act_nr_send_wqes, | |
1181 | init_attr->act_nr_rwqes_rq1, | |
1182 | init_attr->act_nr_rwqes_rq2, | |
1183 | init_attr->act_nr_rwqes_rq3); | |
1184 | ||
1185 | ret = ehea_init_q_skba(&pr->sq_skba, init_attr->act_nr_send_wqes + 1); | |
1186 | ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1); | |
1187 | ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1); | |
1188 | ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1); | |
1189 | if (ret) | |
1190 | goto out_free; | |
1191 | ||
1192 | pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10; | |
1193 | if (ehea_gen_smrs(pr) != 0) { | |
1194 | ret = -EIO; | |
1195 | goto out_free; | |
1196 | } | |
1197 | tasklet_init(&pr->send_comp_task, ehea_send_irq_tasklet, | |
1198 | (unsigned long)pr); | |
1199 | atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1); | |
1200 | ||
1201 | kfree(init_attr); | |
1202 | ret = 0; | |
1203 | goto out; | |
1204 | ||
1205 | out_free: | |
1206 | kfree(init_attr); | |
1207 | vfree(pr->sq_skba.arr); | |
1208 | vfree(pr->rq1_skba.arr); | |
1209 | vfree(pr->rq2_skba.arr); | |
1210 | vfree(pr->rq3_skba.arr); | |
1211 | ehea_destroy_qp(pr->qp); | |
1212 | ehea_destroy_cq(pr->send_cq); | |
1213 | ehea_destroy_cq(pr->recv_cq); | |
1214 | ehea_destroy_eq(pr->send_eq); | |
1215 | ehea_destroy_eq(pr->recv_eq); | |
1216 | out: | |
1217 | return ret; | |
1218 | } | |
1219 | ||
1220 | static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr) | |
1221 | { | |
1222 | int ret, i; | |
1223 | ||
1224 | ret = ehea_destroy_qp(pr->qp); | |
1225 | ||
1226 | if (!ret) { | |
1227 | ehea_destroy_cq(pr->send_cq); | |
1228 | ehea_destroy_cq(pr->recv_cq); | |
1229 | ehea_destroy_eq(pr->send_eq); | |
1230 | ehea_destroy_eq(pr->recv_eq); | |
1231 | ||
1232 | for (i = 0; i < pr->rq1_skba.len; i++) | |
1233 | if (pr->rq1_skba.arr[i]) | |
1234 | dev_kfree_skb(pr->rq1_skba.arr[i]); | |
1235 | ||
1236 | for (i = 0; i < pr->rq2_skba.len; i++) | |
1237 | if (pr->rq2_skba.arr[i]) | |
1238 | dev_kfree_skb(pr->rq2_skba.arr[i]); | |
1239 | ||
1240 | for (i = 0; i < pr->rq3_skba.len; i++) | |
1241 | if (pr->rq3_skba.arr[i]) | |
1242 | dev_kfree_skb(pr->rq3_skba.arr[i]); | |
1243 | ||
1244 | for (i = 0; i < pr->sq_skba.len; i++) | |
1245 | if (pr->sq_skba.arr[i]) | |
1246 | dev_kfree_skb(pr->sq_skba.arr[i]); | |
1247 | ||
1248 | vfree(pr->rq1_skba.arr); | |
1249 | vfree(pr->rq2_skba.arr); | |
1250 | vfree(pr->rq3_skba.arr); | |
1251 | vfree(pr->sq_skba.arr); | |
1252 | ret = ehea_rem_smrs(pr); | |
1253 | } | |
1254 | return ret; | |
1255 | } | |
1256 | ||
1257 | /* | |
1258 | * The write_* functions store information in swqe which is used by | |
1259 | * the hardware to calculate the ip/tcp/udp checksum | |
1260 | */ | |
1261 | ||
1262 | static inline void write_ip_start_end(struct ehea_swqe *swqe, | |
1263 | const struct sk_buff *skb) | |
1264 | { | |
eddc9ec5 | 1265 | swqe->ip_start = skb_network_offset(skb); |
c9bdd4b5 | 1266 | swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1); |
7a291083 JBT |
1267 | } |
1268 | ||
1269 | static inline void write_tcp_offset_end(struct ehea_swqe *swqe, | |
1270 | const struct sk_buff *skb) | |
1271 | { | |
1272 | swqe->tcp_offset = | |
1273 | (u8)(swqe->ip_end + 1 + offsetof(struct tcphdr, check)); | |
1274 | ||
1275 | swqe->tcp_end = (u16)skb->len - 1; | |
1276 | } | |
1277 | ||
1278 | static inline void write_udp_offset_end(struct ehea_swqe *swqe, | |
1279 | const struct sk_buff *skb) | |
1280 | { | |
1281 | swqe->tcp_offset = | |
1282 | (u8)(swqe->ip_end + 1 + offsetof(struct udphdr, check)); | |
1283 | ||
1284 | swqe->tcp_end = (u16)skb->len - 1; | |
1285 | } | |
1286 | ||
1287 | ||
1288 | static void write_swqe2_TSO(struct sk_buff *skb, | |
1289 | struct ehea_swqe *swqe, u32 lkey) | |
1290 | { | |
1291 | struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; | |
1292 | u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; | |
1293 | int skb_data_size = skb->len - skb->data_len; | |
1294 | int headersize; | |
1295 | u64 tmp_addr; | |
1296 | ||
1297 | /* Packet is TCP with TSO enabled */ | |
1298 | swqe->tx_control |= EHEA_SWQE_TSO; | |
1299 | swqe->mss = skb_shinfo(skb)->gso_size; | |
1300 | /* copy only eth/ip/tcp headers to immediate data and | |
1301 | * the rest of skb->data to sg1entry | |
1302 | */ | |
c9bdd4b5 | 1303 | headersize = ETH_HLEN + ip_hdrlen(skb) + (skb->h.th->doff * 4); |
7a291083 JBT |
1304 | |
1305 | skb_data_size = skb->len - skb->data_len; | |
1306 | ||
1307 | if (skb_data_size >= headersize) { | |
1308 | /* copy immediate data */ | |
1309 | memcpy(imm_data, skb->data, headersize); | |
1310 | swqe->immediate_data_length = headersize; | |
1311 | ||
1312 | if (skb_data_size > headersize) { | |
1313 | /* set sg1entry data */ | |
1314 | sg1entry->l_key = lkey; | |
1315 | sg1entry->len = skb_data_size - headersize; | |
1316 | ||
1317 | tmp_addr = (u64)(skb->data + headersize); | |
1318 | sg1entry->vaddr = tmp_addr; | |
1319 | swqe->descriptors++; | |
1320 | } | |
1321 | } else | |
1322 | ehea_error("cannot handle fragmented headers"); | |
1323 | } | |
1324 | ||
1325 | static void write_swqe2_nonTSO(struct sk_buff *skb, | |
1326 | struct ehea_swqe *swqe, u32 lkey) | |
1327 | { | |
1328 | int skb_data_size = skb->len - skb->data_len; | |
1329 | u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; | |
1330 | struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; | |
1331 | u64 tmp_addr; | |
1332 | ||
1333 | /* Packet is any nonTSO type | |
1334 | * | |
1335 | * Copy as much as possible skb->data to immediate data and | |
1336 | * the rest to sg1entry | |
1337 | */ | |
1338 | if (skb_data_size >= SWQE2_MAX_IMM) { | |
1339 | /* copy immediate data */ | |
1340 | memcpy(imm_data, skb->data, SWQE2_MAX_IMM); | |
1341 | ||
1342 | swqe->immediate_data_length = SWQE2_MAX_IMM; | |
1343 | ||
1344 | if (skb_data_size > SWQE2_MAX_IMM) { | |
1345 | /* copy sg1entry data */ | |
1346 | sg1entry->l_key = lkey; | |
1347 | sg1entry->len = skb_data_size - SWQE2_MAX_IMM; | |
1348 | tmp_addr = (u64)(skb->data + SWQE2_MAX_IMM); | |
1349 | sg1entry->vaddr = tmp_addr; | |
1350 | swqe->descriptors++; | |
1351 | } | |
1352 | } else { | |
1353 | memcpy(imm_data, skb->data, skb_data_size); | |
1354 | swqe->immediate_data_length = skb_data_size; | |
1355 | } | |
1356 | } | |
1357 | ||
1358 | static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev, | |
1359 | struct ehea_swqe *swqe, u32 lkey) | |
1360 | { | |
1361 | struct ehea_vsgentry *sg_list, *sg1entry, *sgentry; | |
1362 | skb_frag_t *frag; | |
1363 | int nfrags, sg1entry_contains_frag_data, i; | |
1364 | u64 tmp_addr; | |
1365 | ||
1366 | nfrags = skb_shinfo(skb)->nr_frags; | |
1367 | sg1entry = &swqe->u.immdata_desc.sg_entry; | |
1368 | sg_list = (struct ehea_vsgentry*)&swqe->u.immdata_desc.sg_list; | |
1369 | swqe->descriptors = 0; | |
1370 | sg1entry_contains_frag_data = 0; | |
1371 | ||
1372 | if ((dev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size) | |
1373 | write_swqe2_TSO(skb, swqe, lkey); | |
1374 | else | |
1375 | write_swqe2_nonTSO(skb, swqe, lkey); | |
1376 | ||
1377 | /* write descriptors */ | |
1378 | if (nfrags > 0) { | |
1379 | if (swqe->descriptors == 0) { | |
1380 | /* sg1entry not yet used */ | |
1381 | frag = &skb_shinfo(skb)->frags[0]; | |
1382 | ||
1383 | /* copy sg1entry data */ | |
1384 | sg1entry->l_key = lkey; | |
1385 | sg1entry->len = frag->size; | |
1386 | tmp_addr = (u64)(page_address(frag->page) | |
1387 | + frag->page_offset); | |
1388 | sg1entry->vaddr = tmp_addr; | |
1389 | swqe->descriptors++; | |
1390 | sg1entry_contains_frag_data = 1; | |
1391 | } | |
1392 | ||
1393 | for (i = sg1entry_contains_frag_data; i < nfrags; i++) { | |
1394 | ||
1395 | frag = &skb_shinfo(skb)->frags[i]; | |
1396 | sgentry = &sg_list[i - sg1entry_contains_frag_data]; | |
1397 | ||
1398 | sgentry->l_key = lkey; | |
1399 | sgentry->len = frag->size; | |
1400 | ||
1401 | tmp_addr = (u64)(page_address(frag->page) | |
1402 | + frag->page_offset); | |
1403 | sgentry->vaddr = tmp_addr; | |
1404 | swqe->descriptors++; | |
1405 | } | |
1406 | } | |
1407 | } | |
1408 | ||
1409 | static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid) | |
1410 | { | |
1411 | int ret = 0; | |
1412 | u64 hret; | |
1413 | u8 reg_type; | |
1414 | ||
1415 | /* De/Register untagged packets */ | |
1416 | reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED; | |
1417 | hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, | |
1418 | port->logical_port_id, | |
1419 | reg_type, port->mac_addr, 0, hcallid); | |
1420 | if (hret != H_SUCCESS) { | |
1421 | ehea_error("reg_dereg_bcmc failed (tagged)"); | |
1422 | ret = -EIO; | |
1423 | goto out_herr; | |
1424 | } | |
1425 | ||
1426 | /* De/Register VLAN packets */ | |
1427 | reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL; | |
1428 | hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, | |
1429 | port->logical_port_id, | |
1430 | reg_type, port->mac_addr, 0, hcallid); | |
1431 | if (hret != H_SUCCESS) { | |
1432 | ehea_error("reg_dereg_bcmc failed (vlan)"); | |
1433 | ret = -EIO; | |
1434 | } | |
1435 | out_herr: | |
1436 | return ret; | |
1437 | } | |
1438 | ||
1439 | static int ehea_set_mac_addr(struct net_device *dev, void *sa) | |
1440 | { | |
1441 | struct ehea_port *port = netdev_priv(dev); | |
1442 | struct sockaddr *mac_addr = sa; | |
1443 | struct hcp_ehea_port_cb0 *cb0; | |
1444 | int ret; | |
1445 | u64 hret; | |
1446 | ||
1447 | if (!is_valid_ether_addr(mac_addr->sa_data)) { | |
1448 | ret = -EADDRNOTAVAIL; | |
1449 | goto out; | |
1450 | } | |
1451 | ||
a1d261c5 | 1452 | cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); |
7a291083 JBT |
1453 | if (!cb0) { |
1454 | ehea_error("no mem for cb0"); | |
1455 | ret = -ENOMEM; | |
1456 | goto out; | |
1457 | } | |
1458 | ||
1459 | memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN); | |
1460 | ||
1461 | cb0->port_mac_addr = cb0->port_mac_addr >> 16; | |
1462 | ||
1463 | hret = ehea_h_modify_ehea_port(port->adapter->handle, | |
1464 | port->logical_port_id, H_PORT_CB0, | |
1465 | EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0); | |
1466 | if (hret != H_SUCCESS) { | |
1467 | ret = -EIO; | |
1468 | goto out_free; | |
1469 | } | |
1470 | ||
1471 | memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len); | |
1472 | ||
1473 | /* Deregister old MAC in pHYP */ | |
1474 | ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC); | |
1475 | if (ret) | |
1476 | goto out_free; | |
1477 | ||
1478 | port->mac_addr = cb0->port_mac_addr << 16; | |
1479 | ||
1480 | /* Register new MAC in pHYP */ | |
1481 | ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); | |
1482 | if (ret) | |
1483 | goto out_free; | |
1484 | ||
1485 | ret = 0; | |
1486 | out_free: | |
1487 | kfree(cb0); | |
1488 | out: | |
1489 | return ret; | |
1490 | } | |
1491 | ||
1492 | static void ehea_promiscuous_error(u64 hret, int enable) | |
1493 | { | |
7674a588 TK |
1494 | if (hret == H_AUTHORITY) |
1495 | ehea_info("Hypervisor denied %sabling promiscuous mode", | |
1496 | enable == 1 ? "en" : "dis"); | |
1497 | else | |
1498 | ehea_error("failed %sabling promiscuous mode", | |
1499 | enable == 1 ? "en" : "dis"); | |
7a291083 JBT |
1500 | } |
1501 | ||
1502 | static void ehea_promiscuous(struct net_device *dev, int enable) | |
1503 | { | |
1504 | struct ehea_port *port = netdev_priv(dev); | |
1505 | struct hcp_ehea_port_cb7 *cb7; | |
1506 | u64 hret; | |
1507 | ||
1508 | if ((enable && port->promisc) || (!enable && !port->promisc)) | |
1509 | return; | |
1510 | ||
a1d261c5 | 1511 | cb7 = kzalloc(PAGE_SIZE, GFP_ATOMIC); |
7a291083 JBT |
1512 | if (!cb7) { |
1513 | ehea_error("no mem for cb7"); | |
1514 | goto out; | |
1515 | } | |
1516 | ||
1517 | /* Modify Pxs_DUCQPN in CB7 */ | |
1518 | cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0; | |
1519 | ||
1520 | hret = ehea_h_modify_ehea_port(port->adapter->handle, | |
1521 | port->logical_port_id, | |
1522 | H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7); | |
1523 | if (hret) { | |
1524 | ehea_promiscuous_error(hret, enable); | |
1525 | goto out; | |
1526 | } | |
1527 | ||
1528 | port->promisc = enable; | |
1529 | out: | |
1530 | kfree(cb7); | |
1531 | return; | |
1532 | } | |
1533 | ||
1534 | static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr, | |
1535 | u32 hcallid) | |
1536 | { | |
1537 | u64 hret; | |
1538 | u8 reg_type; | |
1539 | ||
1540 | reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST | |
1541 | | EHEA_BCMC_UNTAGGED; | |
1542 | ||
1543 | hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, | |
1544 | port->logical_port_id, | |
1545 | reg_type, mc_mac_addr, 0, hcallid); | |
1546 | if (hret) | |
1547 | goto out; | |
1548 | ||
1549 | reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST | |
1550 | | EHEA_BCMC_VLANID_ALL; | |
1551 | ||
1552 | hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, | |
1553 | port->logical_port_id, | |
1554 | reg_type, mc_mac_addr, 0, hcallid); | |
1555 | out: | |
1556 | return hret; | |
1557 | } | |
1558 | ||
1559 | static int ehea_drop_multicast_list(struct net_device *dev) | |
1560 | { | |
1561 | struct ehea_port *port = netdev_priv(dev); | |
1562 | struct ehea_mc_list *mc_entry = port->mc_list; | |
1563 | struct list_head *pos; | |
1564 | struct list_head *temp; | |
1565 | int ret = 0; | |
1566 | u64 hret; | |
1567 | ||
1568 | list_for_each_safe(pos, temp, &(port->mc_list->list)) { | |
1569 | mc_entry = list_entry(pos, struct ehea_mc_list, list); | |
1570 | ||
1571 | hret = ehea_multicast_reg_helper(port, mc_entry->macaddr, | |
1572 | H_DEREG_BCMC); | |
1573 | if (hret) { | |
1574 | ehea_error("failed deregistering mcast MAC"); | |
1575 | ret = -EIO; | |
1576 | } | |
1577 | ||
1578 | list_del(pos); | |
1579 | kfree(mc_entry); | |
1580 | } | |
1581 | return ret; | |
1582 | } | |
1583 | ||
1584 | static void ehea_allmulti(struct net_device *dev, int enable) | |
1585 | { | |
1586 | struct ehea_port *port = netdev_priv(dev); | |
1587 | u64 hret; | |
1588 | ||
1589 | if (!port->allmulti) { | |
1590 | if (enable) { | |
1591 | /* Enable ALLMULTI */ | |
1592 | ehea_drop_multicast_list(dev); | |
1593 | hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC); | |
1594 | if (!hret) | |
1595 | port->allmulti = 1; | |
1596 | else | |
1597 | ehea_error("failed enabling IFF_ALLMULTI"); | |
1598 | } | |
1599 | } else | |
1600 | if (!enable) { | |
1601 | /* Disable ALLMULTI */ | |
1602 | hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC); | |
1603 | if (!hret) | |
1604 | port->allmulti = 0; | |
1605 | else | |
1606 | ehea_error("failed disabling IFF_ALLMULTI"); | |
1607 | } | |
1608 | } | |
1609 | ||
1610 | static void ehea_add_multicast_entry(struct ehea_port* port, u8* mc_mac_addr) | |
1611 | { | |
1612 | struct ehea_mc_list *ehea_mcl_entry; | |
1613 | u64 hret; | |
1614 | ||
1e1675cc | 1615 | ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC); |
7a291083 JBT |
1616 | if (!ehea_mcl_entry) { |
1617 | ehea_error("no mem for mcl_entry"); | |
1618 | return; | |
1619 | } | |
1620 | ||
1621 | INIT_LIST_HEAD(&ehea_mcl_entry->list); | |
1622 | ||
1623 | memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN); | |
1624 | ||
1625 | hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr, | |
1626 | H_REG_BCMC); | |
1627 | if (!hret) | |
1628 | list_add(&ehea_mcl_entry->list, &port->mc_list->list); | |
1629 | else { | |
1630 | ehea_error("failed registering mcast MAC"); | |
1631 | kfree(ehea_mcl_entry); | |
1632 | } | |
1633 | } | |
1634 | ||
1635 | static void ehea_set_multicast_list(struct net_device *dev) | |
1636 | { | |
1637 | struct ehea_port *port = netdev_priv(dev); | |
1638 | struct dev_mc_list *k_mcl_entry; | |
1639 | int ret, i; | |
1640 | ||
1641 | if (dev->flags & IFF_PROMISC) { | |
1642 | ehea_promiscuous(dev, 1); | |
1643 | return; | |
1644 | } | |
1645 | ehea_promiscuous(dev, 0); | |
1646 | ||
1647 | if (dev->flags & IFF_ALLMULTI) { | |
1648 | ehea_allmulti(dev, 1); | |
1649 | return; | |
1650 | } | |
1651 | ehea_allmulti(dev, 0); | |
1652 | ||
1653 | if (dev->mc_count) { | |
1654 | ret = ehea_drop_multicast_list(dev); | |
1655 | if (ret) { | |
1656 | /* Dropping the current multicast list failed. | |
1657 | * Enabling ALL_MULTI is the best we can do. | |
1658 | */ | |
1659 | ehea_allmulti(dev, 1); | |
1660 | } | |
1661 | ||
1662 | if (dev->mc_count > port->adapter->max_mc_mac) { | |
1663 | ehea_info("Mcast registration limit reached (0x%lx). " | |
1664 | "Use ALLMULTI!", | |
1665 | port->adapter->max_mc_mac); | |
1666 | goto out; | |
1667 | } | |
1668 | ||
1669 | for (i = 0, k_mcl_entry = dev->mc_list; | |
1670 | i < dev->mc_count; | |
1671 | i++, k_mcl_entry = k_mcl_entry->next) { | |
1672 | ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr); | |
1673 | } | |
1674 | } | |
1675 | out: | |
1676 | return; | |
1677 | } | |
1678 | ||
1679 | static int ehea_change_mtu(struct net_device *dev, int new_mtu) | |
1680 | { | |
1681 | if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE)) | |
1682 | return -EINVAL; | |
1683 | dev->mtu = new_mtu; | |
1684 | return 0; | |
1685 | } | |
1686 | ||
1687 | static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev, | |
1688 | struct ehea_swqe *swqe, u32 lkey) | |
1689 | { | |
1690 | if (skb->protocol == htons(ETH_P_IP)) { | |
eddc9ec5 | 1691 | const struct iphdr *iph = ip_hdr(skb); |
7a291083 JBT |
1692 | /* IPv4 */ |
1693 | swqe->tx_control |= EHEA_SWQE_CRC | |
1694 | | EHEA_SWQE_IP_CHECKSUM | |
1695 | | EHEA_SWQE_TCP_CHECKSUM | |
1696 | | EHEA_SWQE_IMM_DATA_PRESENT | |
1697 | | EHEA_SWQE_DESCRIPTORS_PRESENT; | |
1698 | ||
1699 | write_ip_start_end(swqe, skb); | |
1700 | ||
eddc9ec5 ACM |
1701 | if (iph->protocol == IPPROTO_UDP) { |
1702 | if ((iph->frag_off & IP_MF) || | |
1703 | (iph->frag_off & IP_OFFSET)) | |
7a291083 JBT |
1704 | /* IP fragment, so don't change cs */ |
1705 | swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM; | |
1706 | else | |
1707 | write_udp_offset_end(swqe, skb); | |
1708 | ||
eddc9ec5 | 1709 | } else if (iph->protocol == IPPROTO_TCP) { |
7a291083 JBT |
1710 | write_tcp_offset_end(swqe, skb); |
1711 | } | |
1712 | ||
1713 | /* icmp (big data) and ip segmentation packets (all other ip | |
1714 | packets) do not require any special handling */ | |
1715 | ||
1716 | } else { | |
1717 | /* Other Ethernet Protocol */ | |
1718 | swqe->tx_control |= EHEA_SWQE_CRC | |
1719 | | EHEA_SWQE_IMM_DATA_PRESENT | |
1720 | | EHEA_SWQE_DESCRIPTORS_PRESENT; | |
1721 | } | |
1722 | ||
1723 | write_swqe2_data(skb, dev, swqe, lkey); | |
1724 | } | |
1725 | ||
1726 | static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev, | |
1727 | struct ehea_swqe *swqe) | |
1728 | { | |
1729 | int nfrags = skb_shinfo(skb)->nr_frags; | |
1730 | u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0]; | |
1731 | skb_frag_t *frag; | |
1732 | int i; | |
1733 | ||
1734 | if (skb->protocol == htons(ETH_P_IP)) { | |
eddc9ec5 | 1735 | const struct iphdr *iph = ip_hdr(skb); |
7a291083 JBT |
1736 | /* IPv4 */ |
1737 | write_ip_start_end(swqe, skb); | |
1738 | ||
eddc9ec5 | 1739 | if (iph->protocol == IPPROTO_TCP) { |
7a291083 JBT |
1740 | swqe->tx_control |= EHEA_SWQE_CRC |
1741 | | EHEA_SWQE_IP_CHECKSUM | |
1742 | | EHEA_SWQE_TCP_CHECKSUM | |
1743 | | EHEA_SWQE_IMM_DATA_PRESENT; | |
1744 | ||
1745 | write_tcp_offset_end(swqe, skb); | |
1746 | ||
eddc9ec5 ACM |
1747 | } else if (iph->protocol == IPPROTO_UDP) { |
1748 | if ((iph->frag_off & IP_MF) || | |
1749 | (iph->frag_off & IP_OFFSET)) | |
7a291083 JBT |
1750 | /* IP fragment, so don't change cs */ |
1751 | swqe->tx_control |= EHEA_SWQE_CRC | |
1752 | | EHEA_SWQE_IMM_DATA_PRESENT; | |
1753 | else { | |
1754 | swqe->tx_control |= EHEA_SWQE_CRC | |
1755 | | EHEA_SWQE_IP_CHECKSUM | |
1756 | | EHEA_SWQE_TCP_CHECKSUM | |
1757 | | EHEA_SWQE_IMM_DATA_PRESENT; | |
1758 | ||
1759 | write_udp_offset_end(swqe, skb); | |
1760 | } | |
1761 | } else { | |
1762 | /* icmp (big data) and | |
1763 | ip segmentation packets (all other ip packets) */ | |
1764 | swqe->tx_control |= EHEA_SWQE_CRC | |
1765 | | EHEA_SWQE_IP_CHECKSUM | |
1766 | | EHEA_SWQE_IMM_DATA_PRESENT; | |
1767 | } | |
1768 | } else { | |
1769 | /* Other Ethernet Protocol */ | |
1770 | swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT; | |
1771 | } | |
1772 | /* copy (immediate) data */ | |
1773 | if (nfrags == 0) { | |
1774 | /* data is in a single piece */ | |
1775 | memcpy(imm_data, skb->data, skb->len); | |
1776 | } else { | |
1777 | /* first copy data from the skb->data buffer ... */ | |
1778 | memcpy(imm_data, skb->data, skb->len - skb->data_len); | |
1779 | imm_data += skb->len - skb->data_len; | |
1780 | ||
1781 | /* ... then copy data from the fragments */ | |
1782 | for (i = 0; i < nfrags; i++) { | |
1783 | frag = &skb_shinfo(skb)->frags[i]; | |
1784 | memcpy(imm_data, | |
1785 | page_address(frag->page) + frag->page_offset, | |
1786 | frag->size); | |
1787 | imm_data += frag->size; | |
1788 | } | |
1789 | } | |
1790 | swqe->immediate_data_length = skb->len; | |
1791 | dev_kfree_skb(skb); | |
1792 | } | |
1793 | ||
1794 | static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
1795 | { | |
1796 | struct ehea_port *port = netdev_priv(dev); | |
1797 | struct ehea_swqe *swqe; | |
1798 | unsigned long flags; | |
1799 | u32 lkey; | |
1800 | int swqe_index; | |
1801 | struct ehea_port_res *pr = &port->port_res[0]; | |
1802 | ||
1803 | spin_lock(&pr->xmit_lock); | |
1804 | ||
1805 | swqe = ehea_get_swqe(pr->qp, &swqe_index); | |
1806 | memset(swqe, 0, SWQE_HEADER_SIZE); | |
1807 | atomic_dec(&pr->swqe_avail); | |
1808 | ||
1809 | if (skb->len <= SWQE3_MAX_IMM) { | |
1810 | u32 sig_iv = port->sig_comp_iv; | |
1811 | u32 swqe_num = pr->swqe_id_counter; | |
1812 | ehea_xmit3(skb, dev, swqe); | |
1813 | swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE) | |
1814 | | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num); | |
1815 | if (pr->swqe_ll_count >= (sig_iv - 1)) { | |
1816 | swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL, | |
1817 | sig_iv); | |
1818 | swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION; | |
1819 | pr->swqe_ll_count = 0; | |
1820 | } else | |
1821 | pr->swqe_ll_count += 1; | |
1822 | } else { | |
1823 | swqe->wr_id = | |
1824 | EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE) | |
1825 | | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter) | |
1826 | | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index); | |
1827 | pr->sq_skba.arr[pr->sq_skba.index] = skb; | |
1828 | ||
1829 | pr->sq_skba.index++; | |
1830 | pr->sq_skba.index &= (pr->sq_skba.len - 1); | |
1831 | ||
1832 | lkey = pr->send_mr.lkey; | |
1833 | ehea_xmit2(skb, dev, swqe, lkey); | |
1834 | ||
1835 | if (pr->swqe_count >= (EHEA_SIG_IV_LONG - 1)) { | |
1836 | swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL, | |
1837 | EHEA_SIG_IV_LONG); | |
1838 | swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION; | |
1839 | pr->swqe_count = 0; | |
1840 | } else | |
1841 | pr->swqe_count += 1; | |
1842 | } | |
1843 | pr->swqe_id_counter += 1; | |
1844 | ||
1845 | if (port->vgrp && vlan_tx_tag_present(skb)) { | |
1846 | swqe->tx_control |= EHEA_SWQE_VLAN_INSERT; | |
1847 | swqe->vlan_tag = vlan_tx_tag_get(skb); | |
1848 | } | |
1849 | ||
1850 | if (netif_msg_tx_queued(port)) { | |
1851 | ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr); | |
bff0a55f | 1852 | ehea_dump(swqe, 512, "swqe"); |
7a291083 JBT |
1853 | } |
1854 | ||
1855 | ehea_post_swqe(pr->qp, swqe); | |
1856 | pr->tx_packets++; | |
1857 | ||
1858 | if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { | |
1859 | spin_lock_irqsave(&pr->netif_queue, flags); | |
1860 | if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { | |
1861 | netif_stop_queue(dev); | |
1862 | pr->queue_stopped = 1; | |
1863 | } | |
1864 | spin_unlock_irqrestore(&pr->netif_queue, flags); | |
1865 | } | |
1866 | dev->trans_start = jiffies; | |
1867 | spin_unlock(&pr->xmit_lock); | |
1868 | ||
1869 | return NETDEV_TX_OK; | |
1870 | } | |
1871 | ||
1872 | static void ehea_vlan_rx_register(struct net_device *dev, | |
1873 | struct vlan_group *grp) | |
1874 | { | |
1875 | struct ehea_port *port = netdev_priv(dev); | |
1876 | struct ehea_adapter *adapter = port->adapter; | |
1877 | struct hcp_ehea_port_cb1 *cb1; | |
1878 | u64 hret; | |
1879 | ||
1880 | port->vgrp = grp; | |
1881 | ||
a1d261c5 | 1882 | cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL); |
7a291083 JBT |
1883 | if (!cb1) { |
1884 | ehea_error("no mem for cb1"); | |
1885 | goto out; | |
1886 | } | |
1887 | ||
1888 | if (grp) | |
1889 | memset(cb1->vlan_filter, 0, sizeof(cb1->vlan_filter)); | |
1890 | else | |
1891 | memset(cb1->vlan_filter, 0xFF, sizeof(cb1->vlan_filter)); | |
1892 | ||
1893 | hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, | |
1894 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); | |
1895 | if (hret != H_SUCCESS) | |
1896 | ehea_error("modify_ehea_port failed"); | |
1897 | ||
1898 | kfree(cb1); | |
1899 | out: | |
1900 | return; | |
1901 | } | |
1902 | ||
1903 | static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) | |
1904 | { | |
1905 | struct ehea_port *port = netdev_priv(dev); | |
1906 | struct ehea_adapter *adapter = port->adapter; | |
1907 | struct hcp_ehea_port_cb1 *cb1; | |
1908 | int index; | |
1909 | u64 hret; | |
1910 | ||
a1d261c5 | 1911 | cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL); |
7a291083 JBT |
1912 | if (!cb1) { |
1913 | ehea_error("no mem for cb1"); | |
1914 | goto out; | |
1915 | } | |
1916 | ||
1917 | hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id, | |
1918 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); | |
1919 | if (hret != H_SUCCESS) { | |
1920 | ehea_error("query_ehea_port failed"); | |
1921 | goto out; | |
1922 | } | |
1923 | ||
1924 | index = (vid / 64); | |
1925 | cb1->vlan_filter[index] |= ((u64)(1 << (vid & 0x3F))); | |
1926 | ||
1927 | hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, | |
1928 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); | |
1929 | if (hret != H_SUCCESS) | |
1930 | ehea_error("modify_ehea_port failed"); | |
1931 | out: | |
1932 | kfree(cb1); | |
1933 | return; | |
1934 | } | |
1935 | ||
1936 | static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |
1937 | { | |
1938 | struct ehea_port *port = netdev_priv(dev); | |
1939 | struct ehea_adapter *adapter = port->adapter; | |
1940 | struct hcp_ehea_port_cb1 *cb1; | |
1941 | int index; | |
1942 | u64 hret; | |
1943 | ||
5c15bdec | 1944 | vlan_group_set_device(port->vgrp, vid, NULL); |
7a291083 | 1945 | |
a1d261c5 | 1946 | cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL); |
7a291083 JBT |
1947 | if (!cb1) { |
1948 | ehea_error("no mem for cb1"); | |
1949 | goto out; | |
1950 | } | |
1951 | ||
1952 | hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id, | |
1953 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); | |
1954 | if (hret != H_SUCCESS) { | |
1955 | ehea_error("query_ehea_port failed"); | |
1956 | goto out; | |
1957 | } | |
1958 | ||
1959 | index = (vid / 64); | |
1960 | cb1->vlan_filter[index] &= ~((u64)(1 << (vid & 0x3F))); | |
1961 | ||
1962 | hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, | |
1963 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); | |
1964 | if (hret != H_SUCCESS) | |
1965 | ehea_error("modify_ehea_port failed"); | |
1966 | out: | |
1967 | kfree(cb1); | |
1968 | return; | |
1969 | } | |
1970 | ||
1971 | int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp) | |
1972 | { | |
1973 | int ret = -EIO; | |
1974 | u64 hret; | |
1975 | u16 dummy16 = 0; | |
1976 | u64 dummy64 = 0; | |
1977 | struct hcp_modify_qp_cb0* cb0; | |
1978 | ||
a1d261c5 | 1979 | cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); |
7a291083 JBT |
1980 | if (!cb0) { |
1981 | ret = -ENOMEM; | |
1982 | goto out; | |
1983 | } | |
1984 | ||
1985 | hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, | |
1986 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); | |
1987 | if (hret != H_SUCCESS) { | |
1988 | ehea_error("query_ehea_qp failed (1)"); | |
1989 | goto out; | |
1990 | } | |
1991 | ||
1992 | cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED; | |
1993 | hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, | |
1994 | EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, | |
1995 | &dummy64, &dummy64, &dummy16, &dummy16); | |
1996 | if (hret != H_SUCCESS) { | |
1997 | ehea_error("modify_ehea_qp failed (1)"); | |
1998 | goto out; | |
1999 | } | |
2000 | ||
2001 | hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, | |
2002 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); | |
2003 | if (hret != H_SUCCESS) { | |
2004 | ehea_error("query_ehea_qp failed (2)"); | |
2005 | goto out; | |
2006 | } | |
2007 | ||
2008 | cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED; | |
2009 | hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, | |
2010 | EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, | |
2011 | &dummy64, &dummy64, &dummy16, &dummy16); | |
2012 | if (hret != H_SUCCESS) { | |
2013 | ehea_error("modify_ehea_qp failed (2)"); | |
2014 | goto out; | |
2015 | } | |
2016 | ||
2017 | hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, | |
2018 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); | |
2019 | if (hret != H_SUCCESS) { | |
2020 | ehea_error("query_ehea_qp failed (3)"); | |
2021 | goto out; | |
2022 | } | |
2023 | ||
2024 | cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND; | |
2025 | hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, | |
2026 | EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, | |
2027 | &dummy64, &dummy64, &dummy16, &dummy16); | |
2028 | if (hret != H_SUCCESS) { | |
2029 | ehea_error("modify_ehea_qp failed (3)"); | |
2030 | goto out; | |
2031 | } | |
2032 | ||
2033 | hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, | |
2034 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); | |
2035 | if (hret != H_SUCCESS) { | |
2036 | ehea_error("query_ehea_qp failed (4)"); | |
2037 | goto out; | |
2038 | } | |
2039 | ||
2040 | ret = 0; | |
2041 | out: | |
2042 | kfree(cb0); | |
2043 | return ret; | |
2044 | } | |
2045 | ||
2046 | static int ehea_port_res_setup(struct ehea_port *port, int def_qps, | |
2047 | int add_tx_qps) | |
2048 | { | |
2049 | int ret, i; | |
2050 | struct port_res_cfg pr_cfg, pr_cfg_small_rx; | |
2051 | enum ehea_eq_type eq_type = EHEA_EQ; | |
2052 | ||
2053 | port->qp_eq = ehea_create_eq(port->adapter, eq_type, | |
2054 | EHEA_MAX_ENTRIES_EQ, 1); | |
2055 | if (!port->qp_eq) { | |
2056 | ret = -EINVAL; | |
2057 | ehea_error("ehea_create_eq failed (qp_eq)"); | |
2058 | goto out_kill_eq; | |
2059 | } | |
2060 | ||
2061 | pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries; | |
2062 | pr_cfg.max_entries_scq = sq_entries; | |
2063 | pr_cfg.max_entries_sq = sq_entries; | |
2064 | pr_cfg.max_entries_rq1 = rq1_entries; | |
2065 | pr_cfg.max_entries_rq2 = rq2_entries; | |
2066 | pr_cfg.max_entries_rq3 = rq3_entries; | |
2067 | ||
2068 | pr_cfg_small_rx.max_entries_rcq = 1; | |
2069 | pr_cfg_small_rx.max_entries_scq = sq_entries; | |
2070 | pr_cfg_small_rx.max_entries_sq = sq_entries; | |
2071 | pr_cfg_small_rx.max_entries_rq1 = 1; | |
2072 | pr_cfg_small_rx.max_entries_rq2 = 1; | |
2073 | pr_cfg_small_rx.max_entries_rq3 = 1; | |
2074 | ||
2075 | for (i = 0; i < def_qps; i++) { | |
2076 | ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i); | |
2077 | if (ret) | |
2078 | goto out_clean_pr; | |
2079 | } | |
2080 | for (i = def_qps; i < def_qps + add_tx_qps; i++) { | |
2081 | ret = ehea_init_port_res(port, &port->port_res[i], | |
2082 | &pr_cfg_small_rx, i); | |
2083 | if (ret) | |
2084 | goto out_clean_pr; | |
2085 | } | |
2086 | ||
2087 | return 0; | |
2088 | ||
2089 | out_clean_pr: | |
2090 | while (--i >= 0) | |
2091 | ehea_clean_portres(port, &port->port_res[i]); | |
2092 | ||
2093 | out_kill_eq: | |
2094 | ehea_destroy_eq(port->qp_eq); | |
2095 | return ret; | |
2096 | } | |
2097 | ||
2098 | static int ehea_clean_all_portres(struct ehea_port *port) | |
2099 | { | |
2100 | int ret = 0; | |
2101 | int i; | |
2102 | ||
2103 | for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) | |
2104 | ret |= ehea_clean_portres(port, &port->port_res[i]); | |
2105 | ||
2106 | ret |= ehea_destroy_eq(port->qp_eq); | |
2107 | ||
2108 | return ret; | |
2109 | } | |
2110 | ||
2111 | static int ehea_up(struct net_device *dev) | |
2112 | { | |
2113 | int ret, i; | |
2114 | struct ehea_port *port = netdev_priv(dev); | |
2115 | u64 mac_addr = 0; | |
2116 | ||
2117 | if (port->state == EHEA_PORT_UP) | |
2118 | return 0; | |
2119 | ||
2120 | ret = ehea_port_res_setup(port, port->num_def_qps, | |
2121 | port->num_add_tx_qps); | |
2122 | if (ret) { | |
2123 | ehea_error("port_res_failed"); | |
2124 | goto out; | |
2125 | } | |
2126 | ||
2127 | /* Set default QP for this port */ | |
2128 | ret = ehea_configure_port(port); | |
2129 | if (ret) { | |
2130 | ehea_error("ehea_configure_port failed. ret:%d", ret); | |
2131 | goto out_clean_pr; | |
2132 | } | |
2133 | ||
2134 | ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); | |
2135 | if (ret) { | |
2136 | ret = -EIO; | |
2137 | ehea_error("out_clean_pr"); | |
2138 | goto out_clean_pr; | |
2139 | } | |
2140 | mac_addr = (*(u64*)dev->dev_addr) >> 16; | |
2141 | ||
2142 | ret = ehea_reg_interrupts(dev); | |
2143 | if (ret) { | |
2144 | ehea_error("out_dereg_bc"); | |
2145 | goto out_dereg_bc; | |
2146 | } | |
2147 | ||
2148 | for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { | |
2149 | ret = ehea_activate_qp(port->adapter, port->port_res[i].qp); | |
2150 | if (ret) { | |
2151 | ehea_error("activate_qp failed"); | |
2152 | goto out_free_irqs; | |
2153 | } | |
2154 | } | |
2155 | ||
2156 | for(i = 0; i < port->num_def_qps; i++) { | |
2157 | ret = ehea_fill_port_res(&port->port_res[i]); | |
2158 | if (ret) { | |
2159 | ehea_error("out_free_irqs"); | |
2160 | goto out_free_irqs; | |
2161 | } | |
2162 | } | |
2163 | ||
2164 | ret = 0; | |
2165 | port->state = EHEA_PORT_UP; | |
2166 | goto out; | |
2167 | ||
2168 | out_free_irqs: | |
2169 | ehea_free_interrupts(dev); | |
2170 | ||
2171 | out_dereg_bc: | |
2172 | ehea_broadcast_reg_helper(port, H_DEREG_BCMC); | |
2173 | ||
2174 | out_clean_pr: | |
2175 | ehea_clean_all_portres(port); | |
2176 | out: | |
2177 | return ret; | |
2178 | } | |
2179 | ||
2180 | static int ehea_open(struct net_device *dev) | |
2181 | { | |
2182 | int ret; | |
2183 | struct ehea_port *port = netdev_priv(dev); | |
2184 | ||
2185 | down(&port->port_lock); | |
2186 | ||
2187 | if (netif_msg_ifup(port)) | |
2188 | ehea_info("enabling port %s", dev->name); | |
2189 | ||
2190 | ret = ehea_up(dev); | |
2191 | if (!ret) | |
2192 | netif_start_queue(dev); | |
2193 | ||
2194 | up(&port->port_lock); | |
2195 | ||
2196 | return ret; | |
2197 | } | |
2198 | ||
2199 | static int ehea_down(struct net_device *dev) | |
2200 | { | |
2201 | int ret, i; | |
2202 | struct ehea_port *port = netdev_priv(dev); | |
2203 | ||
2204 | if (port->state == EHEA_PORT_DOWN) | |
2205 | return 0; | |
2206 | ||
2207 | ehea_drop_multicast_list(dev); | |
2208 | ehea_free_interrupts(dev); | |
2209 | ||
2210 | for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) | |
2211 | tasklet_kill(&port->port_res[i].send_comp_task); | |
2212 | ||
2213 | ehea_broadcast_reg_helper(port, H_DEREG_BCMC); | |
2214 | ret = ehea_clean_all_portres(port); | |
2215 | port->state = EHEA_PORT_DOWN; | |
2216 | return ret; | |
2217 | } | |
2218 | ||
2219 | static int ehea_stop(struct net_device *dev) | |
2220 | { | |
2221 | int ret; | |
2222 | struct ehea_port *port = netdev_priv(dev); | |
2223 | ||
2224 | if (netif_msg_ifdown(port)) | |
2225 | ehea_info("disabling port %s", dev->name); | |
2226 | ||
2227 | flush_workqueue(port->adapter->ehea_wq); | |
2228 | down(&port->port_lock); | |
2229 | netif_stop_queue(dev); | |
2230 | ret = ehea_down(dev); | |
2231 | up(&port->port_lock); | |
2232 | return ret; | |
2233 | } | |
2234 | ||
c4028958 | 2235 | static void ehea_reset_port(struct work_struct *work) |
7a291083 JBT |
2236 | { |
2237 | int ret; | |
c4028958 DH |
2238 | struct ehea_port *port = |
2239 | container_of(work, struct ehea_port, reset_task); | |
2240 | struct net_device *dev = port->netdev; | |
7a291083 JBT |
2241 | |
2242 | port->resets++; | |
2243 | down(&port->port_lock); | |
2244 | netif_stop_queue(dev); | |
2245 | netif_poll_disable(dev); | |
2246 | ||
2247 | ret = ehea_down(dev); | |
2248 | if (ret) | |
2249 | ehea_error("ehea_down failed. not all resources are freed"); | |
2250 | ||
2251 | ret = ehea_up(dev); | |
2252 | if (ret) { | |
2253 | ehea_error("Reset device %s failed: ret=%d", dev->name, ret); | |
2254 | goto out; | |
2255 | } | |
2256 | ||
2257 | if (netif_msg_timer(port)) | |
2258 | ehea_info("Device %s resetted successfully", dev->name); | |
2259 | ||
2260 | netif_poll_enable(dev); | |
2261 | netif_wake_queue(dev); | |
2262 | out: | |
2263 | up(&port->port_lock); | |
2264 | return; | |
2265 | } | |
2266 | ||
2267 | static void ehea_tx_watchdog(struct net_device *dev) | |
2268 | { | |
2269 | struct ehea_port *port = netdev_priv(dev); | |
2270 | ||
2271 | if (netif_carrier_ok(dev)) | |
2272 | queue_work(port->adapter->ehea_wq, &port->reset_task); | |
2273 | } | |
2274 | ||
2275 | int ehea_sense_adapter_attr(struct ehea_adapter *adapter) | |
2276 | { | |
2277 | struct hcp_query_ehea *cb; | |
4e996b32 TK |
2278 | struct device_node *lhea_dn = NULL; |
2279 | struct device_node *eth_dn = NULL; | |
7a291083 JBT |
2280 | u64 hret; |
2281 | int ret; | |
2282 | ||
a1d261c5 | 2283 | cb = kzalloc(PAGE_SIZE, GFP_KERNEL); |
7a291083 JBT |
2284 | if (!cb) { |
2285 | ret = -ENOMEM; | |
2286 | goto out; | |
2287 | } | |
2288 | ||
2289 | hret = ehea_h_query_ehea(adapter->handle, cb); | |
2290 | ||
2291 | if (hret != H_SUCCESS) { | |
2292 | ret = -EIO; | |
2293 | goto out_herr; | |
2294 | } | |
2295 | ||
4e996b32 TK |
2296 | /* Determine the number of available logical ports |
2297 | * by counting the child nodes of the lhea OFDT entry | |
2298 | */ | |
2299 | adapter->num_ports = 0; | |
2300 | lhea_dn = of_find_node_by_name(lhea_dn, "lhea"); | |
2301 | do { | |
2302 | eth_dn = of_get_next_child(lhea_dn, eth_dn); | |
2303 | if (eth_dn) | |
2304 | adapter->num_ports++; | |
2305 | } while ( eth_dn ); | |
2306 | of_node_put(lhea_dn); | |
2307 | ||
7a291083 JBT |
2308 | adapter->max_mc_mac = cb->max_mc_mac - 1; |
2309 | ret = 0; | |
2310 | ||
2311 | out_herr: | |
2312 | kfree(cb); | |
2313 | out: | |
2314 | return ret; | |
2315 | } | |
2316 | ||
2317 | static int ehea_setup_single_port(struct ehea_port *port, | |
2318 | struct device_node *dn) | |
2319 | { | |
2320 | int ret; | |
2321 | u64 hret; | |
2322 | struct net_device *dev = port->netdev; | |
2323 | struct ehea_adapter *adapter = port->adapter; | |
2324 | struct hcp_ehea_port_cb4 *cb4; | |
2325 | u32 *dn_log_port_id; | |
9c750b7d | 2326 | int jumbo = 0; |
7a291083 JBT |
2327 | |
2328 | sema_init(&port->port_lock, 1); | |
2329 | port->state = EHEA_PORT_DOWN; | |
2330 | port->sig_comp_iv = sq_entries / 10; | |
2331 | ||
2332 | if (!dn) { | |
2333 | ehea_error("bad device node: dn=%p", dn); | |
2334 | ret = -EINVAL; | |
2335 | goto out; | |
2336 | } | |
2337 | ||
2338 | port->of_dev_node = dn; | |
2339 | ||
2340 | /* Determine logical port id */ | |
2341 | dn_log_port_id = (u32*)get_property(dn, "ibm,hea-port-no", NULL); | |
2342 | ||
2343 | if (!dn_log_port_id) { | |
2344 | ehea_error("bad device node: dn_log_port_id=%p", | |
2345 | dn_log_port_id); | |
2346 | ret = -EINVAL; | |
2347 | goto out; | |
2348 | } | |
2349 | port->logical_port_id = *dn_log_port_id; | |
2350 | ||
2351 | port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL); | |
2352 | if (!port->mc_list) { | |
2353 | ret = -ENOMEM; | |
2354 | goto out; | |
2355 | } | |
2356 | ||
2357 | INIT_LIST_HEAD(&port->mc_list->list); | |
2358 | ||
7a291083 JBT |
2359 | ret = ehea_sense_port_attr(port); |
2360 | if (ret) | |
2361 | goto out; | |
2362 | ||
2363 | /* Enable Jumbo frames */ | |
a1d261c5 | 2364 | cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL); |
7a291083 JBT |
2365 | if (!cb4) { |
2366 | ehea_error("no mem for cb4"); | |
2367 | } else { | |
9c750b7d TK |
2368 | hret = ehea_h_query_ehea_port(adapter->handle, |
2369 | port->logical_port_id, | |
2370 | H_PORT_CB4, | |
2371 | H_PORT_CB4_JUMBO, cb4); | |
2372 | ||
2373 | if (hret == H_SUCCESS) { | |
2374 | if (cb4->jumbo_frame) | |
2375 | jumbo = 1; | |
2376 | else { | |
2377 | cb4->jumbo_frame = 1; | |
2378 | hret = ehea_h_modify_ehea_port(adapter->handle, | |
2379 | port-> | |
2380 | logical_port_id, | |
2381 | H_PORT_CB4, | |
2382 | H_PORT_CB4_JUMBO, | |
2383 | cb4); | |
2384 | if (hret == H_SUCCESS) | |
2385 | jumbo = 1; | |
2386 | } | |
7a291083 JBT |
2387 | } |
2388 | kfree(cb4); | |
2389 | } | |
2390 | ||
2391 | /* initialize net_device structure */ | |
2392 | SET_MODULE_OWNER(dev); | |
2393 | ||
2394 | memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN); | |
2395 | ||
2396 | dev->open = ehea_open; | |
2397 | dev->poll = ehea_poll; | |
2398 | dev->weight = 64; | |
2399 | dev->stop = ehea_stop; | |
2400 | dev->hard_start_xmit = ehea_start_xmit; | |
2401 | dev->get_stats = ehea_get_stats; | |
2402 | dev->set_multicast_list = ehea_set_multicast_list; | |
2403 | dev->set_mac_address = ehea_set_mac_addr; | |
2404 | dev->change_mtu = ehea_change_mtu; | |
2405 | dev->vlan_rx_register = ehea_vlan_rx_register; | |
2406 | dev->vlan_rx_add_vid = ehea_vlan_rx_add_vid; | |
2407 | dev->vlan_rx_kill_vid = ehea_vlan_rx_kill_vid; | |
2408 | dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO | |
2409 | | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_TX | |
2410 | | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER | |
2411 | | NETIF_F_LLTX; | |
2412 | dev->tx_timeout = &ehea_tx_watchdog; | |
2413 | dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; | |
2414 | ||
c4028958 | 2415 | INIT_WORK(&port->reset_task, ehea_reset_port); |
7a291083 JBT |
2416 | |
2417 | ehea_set_ethtool_ops(dev); | |
2418 | ||
2419 | ret = register_netdev(dev); | |
2420 | if (ret) { | |
2421 | ehea_error("register_netdev failed. ret=%d", ret); | |
2422 | goto out_free; | |
2423 | } | |
2424 | ||
9c750b7d TK |
2425 | ehea_info("%s: Jumbo frames are %sabled", dev->name, |
2426 | jumbo == 1 ? "en" : "dis"); | |
2427 | ||
7a291083 JBT |
2428 | port->netdev = dev; |
2429 | ret = 0; | |
2430 | goto out; | |
2431 | ||
2432 | out_free: | |
2433 | kfree(port->mc_list); | |
2434 | out: | |
2435 | return ret; | |
2436 | } | |
2437 | ||
2438 | static int ehea_setup_ports(struct ehea_adapter *adapter) | |
2439 | { | |
2440 | int ret; | |
2441 | int port_setup_ok = 0; | |
2442 | struct ehea_port *port; | |
2443 | struct device_node *dn = NULL; | |
2444 | struct net_device *dev; | |
2445 | int i; | |
2446 | ||
2447 | /* get port properties for all ports */ | |
2448 | for (i = 0; i < adapter->num_ports; i++) { | |
2449 | ||
2450 | if (adapter->port[i]) | |
2451 | continue; /* port already up and running */ | |
2452 | ||
2453 | /* allocate memory for the port structures */ | |
2454 | dev = alloc_etherdev(sizeof(struct ehea_port)); | |
2455 | ||
2456 | if (!dev) { | |
2457 | ehea_error("no mem for net_device"); | |
2458 | break; | |
2459 | } | |
2460 | ||
2461 | port = netdev_priv(dev); | |
2462 | port->adapter = adapter; | |
2463 | port->netdev = dev; | |
2464 | adapter->port[i] = port; | |
2465 | port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT); | |
2466 | ||
2467 | dn = of_find_node_by_name(dn, "ethernet"); | |
2468 | ret = ehea_setup_single_port(port, dn); | |
2469 | if (ret) { | |
2470 | /* Free mem for this port struct. The others will be | |
2471 | processed on rollback */ | |
2472 | free_netdev(dev); | |
2473 | adapter->port[i] = NULL; | |
2474 | ehea_error("eHEA port %d setup failed, ret=%d", i, ret); | |
2475 | } | |
2476 | } | |
2477 | ||
2478 | of_node_put(dn); | |
2479 | ||
2480 | /* Check for succesfully set up ports */ | |
2481 | for (i = 0; i < adapter->num_ports; i++) | |
2482 | if (adapter->port[i]) | |
2483 | port_setup_ok++; | |
2484 | ||
2485 | if (port_setup_ok) | |
2486 | ret = 0; /* At least some ports are setup correctly */ | |
2487 | else | |
2488 | ret = -EINVAL; | |
2489 | ||
2490 | return ret; | |
2491 | } | |
2492 | ||
2493 | static int __devinit ehea_probe(struct ibmebus_dev *dev, | |
2494 | const struct of_device_id *id) | |
2495 | { | |
2496 | struct ehea_adapter *adapter; | |
2497 | u64 *adapter_handle; | |
2498 | int ret; | |
2499 | ||
2500 | adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); | |
2501 | if (!adapter) { | |
2502 | ret = -ENOMEM; | |
2503 | dev_err(&dev->ofdev.dev, "no mem for ehea_adapter\n"); | |
2504 | goto out; | |
2505 | } | |
2506 | ||
2507 | adapter_handle = (u64*)get_property(dev->ofdev.node, "ibm,hea-handle", | |
2508 | NULL); | |
061bf3cd TK |
2509 | if (adapter_handle) |
2510 | adapter->handle = *adapter_handle; | |
2511 | ||
2512 | if (!adapter->handle) { | |
7a291083 JBT |
2513 | dev_err(&dev->ofdev.dev, "failed getting handle for adapter" |
2514 | " '%s'\n", dev->ofdev.node->full_name); | |
2515 | ret = -ENODEV; | |
2516 | goto out_free_ad; | |
2517 | } | |
2518 | ||
7a291083 JBT |
2519 | adapter->pd = EHEA_PD_ID; |
2520 | ||
2521 | dev->ofdev.dev.driver_data = adapter; | |
2522 | ||
2523 | ret = ehea_reg_mr_adapter(adapter); | |
2524 | if (ret) { | |
2525 | dev_err(&dev->ofdev.dev, "reg_mr_adapter failed\n"); | |
2526 | goto out_free_ad; | |
2527 | } | |
2528 | ||
2529 | /* initialize adapter and ports */ | |
2530 | /* get adapter properties */ | |
2531 | ret = ehea_sense_adapter_attr(adapter); | |
2532 | if (ret) { | |
2533 | dev_err(&dev->ofdev.dev, "sense_adapter_attr failed: %d", ret); | |
2534 | goto out_free_res; | |
2535 | } | |
2536 | dev_info(&dev->ofdev.dev, "%d eHEA ports found\n", adapter->num_ports); | |
2537 | ||
2538 | adapter->neq = ehea_create_eq(adapter, | |
2539 | EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1); | |
2540 | if (!adapter->neq) { | |
2541 | dev_err(&dev->ofdev.dev, "NEQ creation failed"); | |
2542 | goto out_free_res; | |
2543 | } | |
2544 | ||
2545 | tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet, | |
2546 | (unsigned long)adapter); | |
2547 | ||
2548 | ret = ibmebus_request_irq(NULL, adapter->neq->attr.ist1, | |
38515e90 | 2549 | ehea_interrupt_neq, IRQF_DISABLED, |
7a291083 JBT |
2550 | "ehea_neq", adapter); |
2551 | if (ret) { | |
2552 | dev_err(&dev->ofdev.dev, "requesting NEQ IRQ failed"); | |
2553 | goto out_kill_eq; | |
2554 | } | |
2555 | ||
2556 | adapter->ehea_wq = create_workqueue("ehea_wq"); | |
2557 | if (!adapter->ehea_wq) | |
2558 | goto out_free_irq; | |
2559 | ||
2560 | ret = ehea_setup_ports(adapter); | |
2561 | if (ret) { | |
2562 | dev_err(&dev->ofdev.dev, "setup_ports failed"); | |
2563 | goto out_kill_wq; | |
2564 | } | |
2565 | ||
2566 | ret = 0; | |
2567 | goto out; | |
2568 | ||
2569 | out_kill_wq: | |
2570 | destroy_workqueue(adapter->ehea_wq); | |
2571 | ||
2572 | out_free_irq: | |
2573 | ibmebus_free_irq(NULL, adapter->neq->attr.ist1, adapter); | |
2574 | ||
2575 | out_kill_eq: | |
2576 | ehea_destroy_eq(adapter->neq); | |
2577 | ||
2578 | out_free_res: | |
2579 | ehea_h_free_resource(adapter->handle, adapter->mr.handle); | |
2580 | ||
2581 | out_free_ad: | |
2582 | kfree(adapter); | |
2583 | out: | |
2584 | return ret; | |
2585 | } | |
2586 | ||
2587 | static void ehea_shutdown_single_port(struct ehea_port *port) | |
2588 | { | |
2589 | unregister_netdev(port->netdev); | |
2590 | kfree(port->mc_list); | |
2591 | free_netdev(port->netdev); | |
2592 | } | |
2593 | ||
2594 | static int __devexit ehea_remove(struct ibmebus_dev *dev) | |
2595 | { | |
2596 | struct ehea_adapter *adapter = dev->ofdev.dev.driver_data; | |
2597 | u64 hret; | |
2598 | int i; | |
2599 | ||
2600 | for (i = 0; i < adapter->num_ports; i++) | |
2601 | if (adapter->port[i]) { | |
2602 | ehea_shutdown_single_port(adapter->port[i]); | |
2603 | adapter->port[i] = NULL; | |
2604 | } | |
2605 | destroy_workqueue(adapter->ehea_wq); | |
2606 | ||
2607 | ibmebus_free_irq(NULL, adapter->neq->attr.ist1, adapter); | |
d4150a27 | 2608 | tasklet_kill(&adapter->neq_tasklet); |
7a291083 JBT |
2609 | |
2610 | ehea_destroy_eq(adapter->neq); | |
2611 | ||
2612 | hret = ehea_h_free_resource(adapter->handle, adapter->mr.handle); | |
2613 | if (hret) { | |
2614 | dev_err(&dev->ofdev.dev, "free_resource_mr failed"); | |
2615 | return -EIO; | |
2616 | } | |
2617 | kfree(adapter); | |
2618 | return 0; | |
2619 | } | |
2620 | ||
2621 | static int check_module_parm(void) | |
2622 | { | |
2623 | int ret = 0; | |
2624 | ||
2625 | if ((rq1_entries < EHEA_MIN_ENTRIES_QP) || | |
2626 | (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) { | |
2627 | ehea_info("Bad parameter: rq1_entries"); | |
2628 | ret = -EINVAL; | |
2629 | } | |
2630 | if ((rq2_entries < EHEA_MIN_ENTRIES_QP) || | |
2631 | (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) { | |
2632 | ehea_info("Bad parameter: rq2_entries"); | |
2633 | ret = -EINVAL; | |
2634 | } | |
2635 | if ((rq3_entries < EHEA_MIN_ENTRIES_QP) || | |
2636 | (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) { | |
2637 | ehea_info("Bad parameter: rq3_entries"); | |
2638 | ret = -EINVAL; | |
2639 | } | |
2640 | if ((sq_entries < EHEA_MIN_ENTRIES_QP) || | |
2641 | (sq_entries > EHEA_MAX_ENTRIES_SQ)) { | |
2642 | ehea_info("Bad parameter: sq_entries"); | |
2643 | ret = -EINVAL; | |
2644 | } | |
2645 | ||
2646 | return ret; | |
2647 | } | |
2648 | ||
2649 | static struct of_device_id ehea_device_table[] = { | |
2650 | { | |
2651 | .name = "lhea", | |
2652 | .compatible = "IBM,lhea", | |
2653 | }, | |
2654 | {}, | |
2655 | }; | |
2656 | ||
2657 | static struct ibmebus_driver ehea_driver = { | |
2658 | .name = "ehea", | |
2659 | .id_table = ehea_device_table, | |
2660 | .probe = ehea_probe, | |
2661 | .remove = ehea_remove, | |
2662 | }; | |
2663 | ||
2664 | int __init ehea_module_init(void) | |
2665 | { | |
2666 | int ret; | |
2667 | ||
2668 | printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n", | |
2669 | DRV_VERSION); | |
2670 | ||
2671 | ret = check_module_parm(); | |
2672 | if (ret) | |
2673 | goto out; | |
2674 | ret = ibmebus_register_driver(&ehea_driver); | |
2675 | if (ret) | |
2676 | ehea_error("failed registering eHEA device driver on ebus"); | |
2677 | ||
2678 | out: | |
2679 | return ret; | |
2680 | } | |
2681 | ||
2682 | static void __exit ehea_module_exit(void) | |
2683 | { | |
2684 | ibmebus_unregister_driver(&ehea_driver); | |
2685 | } | |
2686 | ||
2687 | module_init(ehea_module_init); | |
2688 | module_exit(ehea_module_exit); |