enic: Add support for fw init command on sriov vf's
[deliverable/linux.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_sp.c
CommitLineData
619c5cb6
VZ
1/* bnx2x_sp.c: Broadcom Everest network driver.
2 *
85b26ea1 3 * Copyright (c) 2011-2012 Broadcom Corporation
619c5cb6
VZ
4 *
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9 *
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
13 * consent.
14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Vladislav Zolotarov
17 *
18 */
f1deab50
JP
19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
042181f5
VZ
22#include <linux/module.h>
23#include <linux/crc32.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/crc32c.h>
27#include "bnx2x.h"
28#include "bnx2x_cmn.h"
29#include "bnx2x_sp.h"
30
619c5cb6
VZ
31#define BNX2X_MAX_EMUL_MULTI 16
32
ed5162a0
AE
33#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
34
619c5cb6 35/**** Exe Queue interfaces ****/
042181f5
VZ
36
37/**
619c5cb6 38 * bnx2x_exe_queue_init - init the Exe Queue object
042181f5 39 *
619c5cb6
VZ
40 * @o: poiter to the object
41 * @exe_len: length
42 * @owner: poiter to the owner
43 * @validate: validate function pointer
44 * @optimize: optimize function pointer
45 * @exec: execute function pointer
46 * @get: get function pointer
042181f5 47 */
619c5cb6
VZ
48static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
49 struct bnx2x_exe_queue_obj *o,
50 int exe_len,
51 union bnx2x_qable_obj *owner,
52 exe_q_validate validate,
460a25cd 53 exe_q_remove remove,
619c5cb6
VZ
54 exe_q_optimize optimize,
55 exe_q_execute exec,
56 exe_q_get get)
042181f5 57{
619c5cb6 58 memset(o, 0, sizeof(*o));
042181f5 59
619c5cb6
VZ
60 INIT_LIST_HEAD(&o->exe_queue);
61 INIT_LIST_HEAD(&o->pending_comp);
042181f5 62
619c5cb6 63 spin_lock_init(&o->lock);
042181f5 64
619c5cb6
VZ
65 o->exe_chunk_len = exe_len;
66 o->owner = owner;
042181f5 67
619c5cb6
VZ
68 /* Owner specific callbacks */
69 o->validate = validate;
460a25cd 70 o->remove = remove;
619c5cb6
VZ
71 o->optimize = optimize;
72 o->execute = exec;
73 o->get = get;
042181f5 74
619c5cb6
VZ
75 DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk "
76 "length of %d\n", exe_len);
042181f5
VZ
77}
78
619c5cb6
VZ
79static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
80 struct bnx2x_exeq_elem *elem)
81{
82 DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
83 kfree(elem);
84}
042181f5 85
619c5cb6 86static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
042181f5 87{
619c5cb6
VZ
88 struct bnx2x_exeq_elem *elem;
89 int cnt = 0;
90
91 spin_lock_bh(&o->lock);
92
93 list_for_each_entry(elem, &o->exe_queue, link)
94 cnt++;
95
96 spin_unlock_bh(&o->lock);
97
98 return cnt;
042181f5
VZ
99}
100
619c5cb6
VZ
101/**
102 * bnx2x_exe_queue_add - add a new element to the execution queue
103 *
104 * @bp: driver handle
105 * @o: queue
106 * @cmd: new command to add
107 * @restore: true - do not optimize the command
042181f5 108 *
619c5cb6 109 * If the element is optimized or is illegal, frees it.
042181f5 110 */
619c5cb6
VZ
111static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
112 struct bnx2x_exe_queue_obj *o,
113 struct bnx2x_exeq_elem *elem,
114 bool restore)
042181f5 115{
619c5cb6 116 int rc;
042181f5 117
619c5cb6 118 spin_lock_bh(&o->lock);
042181f5 119
619c5cb6
VZ
120 if (!restore) {
121 /* Try to cancel this element queue */
122 rc = o->optimize(bp, o->owner, elem);
123 if (rc)
124 goto free_and_exit;
125
126 /* Check if this request is ok */
127 rc = o->validate(bp, o->owner, elem);
128 if (rc) {
129 BNX2X_ERR("Preamble failed: %d\n", rc);
130 goto free_and_exit;
042181f5
VZ
131 }
132 }
133
619c5cb6
VZ
134 /* If so, add it to the execution queue */
135 list_add_tail(&elem->link, &o->exe_queue);
042181f5 136
619c5cb6 137 spin_unlock_bh(&o->lock);
042181f5 138
619c5cb6 139 return 0;
042181f5 140
619c5cb6
VZ
141free_and_exit:
142 bnx2x_exe_queue_free_elem(bp, elem);
042181f5 143
619c5cb6 144 spin_unlock_bh(&o->lock);
042181f5 145
619c5cb6 146 return rc;
042181f5 147
619c5cb6 148}
042181f5 149
619c5cb6
VZ
150static inline void __bnx2x_exe_queue_reset_pending(
151 struct bnx2x *bp,
152 struct bnx2x_exe_queue_obj *o)
153{
154 struct bnx2x_exeq_elem *elem;
042181f5 155
619c5cb6
VZ
156 while (!list_empty(&o->pending_comp)) {
157 elem = list_first_entry(&o->pending_comp,
158 struct bnx2x_exeq_elem, link);
042181f5 159
619c5cb6
VZ
160 list_del(&elem->link);
161 bnx2x_exe_queue_free_elem(bp, elem);
162 }
042181f5
VZ
163}
164
619c5cb6
VZ
165static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
166 struct bnx2x_exe_queue_obj *o)
042181f5 167{
042181f5 168
619c5cb6 169 spin_lock_bh(&o->lock);
042181f5 170
619c5cb6 171 __bnx2x_exe_queue_reset_pending(bp, o);
042181f5 172
619c5cb6 173 spin_unlock_bh(&o->lock);
042181f5 174
042181f5
VZ
175}
176
619c5cb6
VZ
177/**
178 * bnx2x_exe_queue_step - execute one execution chunk atomically
179 *
180 * @bp: driver handle
181 * @o: queue
182 * @ramrod_flags: flags
183 *
184 * (Atomicy is ensured using the exe_queue->lock).
185 */
186static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
187 struct bnx2x_exe_queue_obj *o,
188 unsigned long *ramrod_flags)
042181f5 189{
619c5cb6
VZ
190 struct bnx2x_exeq_elem *elem, spacer;
191 int cur_len = 0, rc;
042181f5 192
619c5cb6 193 memset(&spacer, 0, sizeof(spacer));
042181f5 194
619c5cb6 195 spin_lock_bh(&o->lock);
042181f5 196
619c5cb6
VZ
197 /*
198 * Next step should not be performed until the current is finished,
199 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
200 * properly clear object internals without sending any command to the FW
201 * which also implies there won't be any completion to clear the
202 * 'pending' list.
203 */
204 if (!list_empty(&o->pending_comp)) {
205 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
206 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: "
207 "resetting pending_comp\n");
208 __bnx2x_exe_queue_reset_pending(bp, o);
209 } else {
210 spin_unlock_bh(&o->lock);
211 return 1;
212 }
213 }
042181f5 214
619c5cb6
VZ
215 /*
216 * Run through the pending commands list and create a next
217 * execution chunk.
218 */
219 while (!list_empty(&o->exe_queue)) {
220 elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
221 link);
222 WARN_ON(!elem->cmd_len);
042181f5 223
619c5cb6
VZ
224 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
225 cur_len += elem->cmd_len;
042181f5 226 /*
619c5cb6
VZ
227 * Prevent from both lists being empty when moving an
228 * element. This will allow the call of
229 * bnx2x_exe_queue_empty() without locking.
042181f5 230 */
619c5cb6
VZ
231 list_add_tail(&spacer.link, &o->pending_comp);
232 mb();
233 list_del(&elem->link);
234 list_add_tail(&elem->link, &o->pending_comp);
235 list_del(&spacer.link);
236 } else
237 break;
042181f5 238 }
042181f5 239
619c5cb6
VZ
240 /* Sanity check */
241 if (!cur_len) {
242 spin_unlock_bh(&o->lock);
243 return 0;
042181f5
VZ
244 }
245
619c5cb6
VZ
246 rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
247 if (rc < 0)
248 /*
249 * In case of an error return the commands back to the queue
250 * and reset the pending_comp.
251 */
252 list_splice_init(&o->pending_comp, &o->exe_queue);
253 else if (!rc)
254 /*
255 * If zero is returned, means there are no outstanding pending
256 * completions and we may dismiss the pending list.
257 */
258 __bnx2x_exe_queue_reset_pending(bp, o);
042181f5 259
619c5cb6
VZ
260 spin_unlock_bh(&o->lock);
261 return rc;
262}
042181f5 263
619c5cb6
VZ
264static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
265{
266 bool empty = list_empty(&o->exe_queue);
042181f5 267
619c5cb6
VZ
268 /* Don't reorder!!! */
269 mb();
042181f5 270
619c5cb6
VZ
271 return empty && list_empty(&o->pending_comp);
272}
042181f5 273
619c5cb6
VZ
274static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
275 struct bnx2x *bp)
276{
277 DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
278 return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
279}
042181f5 280
619c5cb6
VZ
281/************************ raw_obj functions ***********************************/
282static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
283{
284 return !!test_bit(o->state, o->pstate);
042181f5
VZ
285}
286
619c5cb6 287static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
042181f5 288{
619c5cb6
VZ
289 smp_mb__before_clear_bit();
290 clear_bit(o->state, o->pstate);
291 smp_mb__after_clear_bit();
292}
042181f5 293
619c5cb6
VZ
294static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
295{
296 smp_mb__before_clear_bit();
297 set_bit(o->state, o->pstate);
298 smp_mb__after_clear_bit();
299}
042181f5 300
619c5cb6
VZ
301/**
302 * bnx2x_state_wait - wait until the given bit(state) is cleared
303 *
304 * @bp: device handle
305 * @state: state which is to be cleared
306 * @state_p: state buffer
307 *
308 */
309static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
310 unsigned long *pstate)
311{
312 /* can take a while if any port is running */
313 int cnt = 5000;
042181f5 314
042181f5 315
619c5cb6
VZ
316 if (CHIP_REV_IS_EMUL(bp))
317 cnt *= 20;
042181f5 318
619c5cb6
VZ
319 DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
320
321 might_sleep();
322 while (cnt--) {
323 if (!test_bit(state, pstate)) {
324#ifdef BNX2X_STOP_ON_ERROR
325 DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt);
042181f5 326#endif
619c5cb6
VZ
327 return 0;
328 }
042181f5 329
619c5cb6 330 usleep_range(1000, 1000);
042181f5 331
619c5cb6
VZ
332 if (bp->panic)
333 return -EIO;
334 }
042181f5 335
619c5cb6
VZ
336 /* timeout! */
337 BNX2X_ERR("timeout waiting for state %d\n", state);
338#ifdef BNX2X_STOP_ON_ERROR
339 bnx2x_panic();
340#endif
042181f5 341
619c5cb6
VZ
342 return -EBUSY;
343}
042181f5 344
619c5cb6
VZ
345static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
346{
347 return bnx2x_state_wait(bp, raw->state, raw->pstate);
042181f5
VZ
348}
349
619c5cb6
VZ
350/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
351/* credit handling callbacks */
352static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
042181f5 353{
619c5cb6
VZ
354 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
355
356 WARN_ON(!mp);
357
358 return mp->get_entry(mp, offset);
042181f5
VZ
359}
360
619c5cb6 361static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
042181f5 362{
619c5cb6
VZ
363 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
364
365 WARN_ON(!mp);
366
367 return mp->get(mp, 1);
042181f5
VZ
368}
369
619c5cb6 370static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
042181f5 371{
619c5cb6 372 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
042181f5 373
619c5cb6 374 WARN_ON(!vp);
042181f5 375
619c5cb6 376 return vp->get_entry(vp, offset);
042181f5
VZ
377}
378
619c5cb6 379static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
042181f5 380{
619c5cb6 381 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
042181f5 382
619c5cb6 383 WARN_ON(!vp);
042181f5 384
619c5cb6 385 return vp->get(vp, 1);
042181f5
VZ
386}
387
619c5cb6 388static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
042181f5 389{
619c5cb6
VZ
390 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
391 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
392
393 if (!mp->get(mp, 1))
394 return false;
042181f5 395
619c5cb6
VZ
396 if (!vp->get(vp, 1)) {
397 mp->put(mp, 1);
398 return false;
399 }
042181f5 400
619c5cb6 401 return true;
042181f5
VZ
402}
403
619c5cb6
VZ
404static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
405{
406 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
407
408 return mp->put_entry(mp, offset);
409}
042181f5 410
619c5cb6 411static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
042181f5 412{
619c5cb6 413 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
042181f5 414
619c5cb6 415 return mp->put(mp, 1);
042181f5
VZ
416}
417
619c5cb6 418static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
042181f5 419{
619c5cb6
VZ
420 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
421
422 return vp->put_entry(vp, offset);
423}
042181f5 424
619c5cb6
VZ
425static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
426{
427 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
042181f5 428
619c5cb6 429 return vp->put(vp, 1);
042181f5
VZ
430}
431
619c5cb6 432static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
042181f5 433{
619c5cb6
VZ
434 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
435 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
436
437 if (!mp->put(mp, 1))
438 return false;
042181f5 439
619c5cb6
VZ
440 if (!vp->put(vp, 1)) {
441 mp->get(mp, 1);
442 return false;
443 }
042181f5 444
619c5cb6 445 return true;
042181f5
VZ
446}
447
ed5162a0
AE
448static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
449 int n, u8 *buf)
450{
451 struct bnx2x_vlan_mac_registry_elem *pos;
452 u8 *next = buf;
453 int counter = 0;
454
455 /* traverse list */
456 list_for_each_entry(pos, &o->head, link) {
457 if (counter < n) {
458 /* place leading zeroes in buffer */
459 memset(next, 0, MAC_LEADING_ZERO_CNT);
460
461 /* place mac after leading zeroes*/
462 memcpy(next + MAC_LEADING_ZERO_CNT, pos->u.mac.mac,
463 ETH_ALEN);
464
465 /* calculate address of next element and
466 * advance counter
467 */
468 counter++;
469 next = buf + counter * ALIGN(ETH_ALEN, sizeof(u32));
470
471 DP(BNX2X_MSG_SP, "copied element number %d to address %p element was %pM\n",
472 counter, next, pos->u.mac.mac);
473 }
474 }
475 return counter * ETH_ALEN;
476}
477
619c5cb6
VZ
478/* check_add() callbacks */
479static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o,
480 union bnx2x_classification_ramrod_data *data)
042181f5 481{
619c5cb6
VZ
482 struct bnx2x_vlan_mac_registry_elem *pos;
483
484 if (!is_valid_ether_addr(data->mac.mac))
485 return -EINVAL;
042181f5 486
619c5cb6
VZ
487 /* Check if a requested MAC already exists */
488 list_for_each_entry(pos, &o->head, link)
489 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
490 return -EEXIST;
042181f5 491
619c5cb6 492 return 0;
042181f5
VZ
493}
494
619c5cb6
VZ
495static int bnx2x_check_vlan_add(struct bnx2x_vlan_mac_obj *o,
496 union bnx2x_classification_ramrod_data *data)
042181f5 497{
619c5cb6 498 struct bnx2x_vlan_mac_registry_elem *pos;
042181f5 499
619c5cb6
VZ
500 list_for_each_entry(pos, &o->head, link)
501 if (data->vlan.vlan == pos->u.vlan.vlan)
502 return -EEXIST;
042181f5 503
619c5cb6 504 return 0;
042181f5
VZ
505}
506
619c5cb6
VZ
507static int bnx2x_check_vlan_mac_add(struct bnx2x_vlan_mac_obj *o,
508 union bnx2x_classification_ramrod_data *data)
042181f5 509{
619c5cb6
VZ
510 struct bnx2x_vlan_mac_registry_elem *pos;
511
512 list_for_each_entry(pos, &o->head, link)
513 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
514 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
515 ETH_ALEN)))
516 return -EEXIST;
042181f5 517
619c5cb6 518 return 0;
042181f5
VZ
519}
520
619c5cb6
VZ
521
522/* check_del() callbacks */
523static struct bnx2x_vlan_mac_registry_elem *
524 bnx2x_check_mac_del(struct bnx2x_vlan_mac_obj *o,
525 union bnx2x_classification_ramrod_data *data)
042181f5 526{
619c5cb6
VZ
527 struct bnx2x_vlan_mac_registry_elem *pos;
528
529 list_for_each_entry(pos, &o->head, link)
530 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
531 return pos;
042181f5 532
619c5cb6 533 return NULL;
042181f5
VZ
534}
535
619c5cb6
VZ
536static struct bnx2x_vlan_mac_registry_elem *
537 bnx2x_check_vlan_del(struct bnx2x_vlan_mac_obj *o,
538 union bnx2x_classification_ramrod_data *data)
042181f5 539{
619c5cb6 540 struct bnx2x_vlan_mac_registry_elem *pos;
042181f5 541
619c5cb6
VZ
542 list_for_each_entry(pos, &o->head, link)
543 if (data->vlan.vlan == pos->u.vlan.vlan)
544 return pos;
545
546 return NULL;
042181f5
VZ
547}
548
619c5cb6
VZ
549static struct bnx2x_vlan_mac_registry_elem *
550 bnx2x_check_vlan_mac_del(struct bnx2x_vlan_mac_obj *o,
551 union bnx2x_classification_ramrod_data *data)
042181f5 552{
619c5cb6
VZ
553 struct bnx2x_vlan_mac_registry_elem *pos;
554
555 list_for_each_entry(pos, &o->head, link)
556 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
557 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
558 ETH_ALEN)))
559 return pos;
042181f5 560
619c5cb6 561 return NULL;
042181f5
VZ
562}
563
619c5cb6
VZ
564/* check_move() callback */
565static bool bnx2x_check_move(struct bnx2x_vlan_mac_obj *src_o,
566 struct bnx2x_vlan_mac_obj *dst_o,
567 union bnx2x_classification_ramrod_data *data)
042181f5 568{
619c5cb6
VZ
569 struct bnx2x_vlan_mac_registry_elem *pos;
570 int rc;
571
572 /* Check if we can delete the requested configuration from the first
573 * object.
574 */
575 pos = src_o->check_del(src_o, data);
576
577 /* check if configuration can be added */
578 rc = dst_o->check_add(dst_o, data);
579
580 /* If this classification can not be added (is already set)
581 * or can't be deleted - return an error.
582 */
583 if (rc || !pos)
584 return false;
585
586 return true;
042181f5
VZ
587}
588
619c5cb6
VZ
589static bool bnx2x_check_move_always_err(
590 struct bnx2x_vlan_mac_obj *src_o,
591 struct bnx2x_vlan_mac_obj *dst_o,
592 union bnx2x_classification_ramrod_data *data)
042181f5 593{
619c5cb6 594 return false;
042181f5
VZ
595}
596
619c5cb6
VZ
597
598static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
042181f5 599{
619c5cb6
VZ
600 struct bnx2x_raw_obj *raw = &o->raw;
601 u8 rx_tx_flag = 0;
042181f5 602
619c5cb6
VZ
603 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
604 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
605 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
042181f5 606
619c5cb6
VZ
607 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
608 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
609 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
610
611 return rx_tx_flag;
042181f5
VZ
612}
613
619c5cb6
VZ
614/* LLH CAM line allocations */
615enum {
616 LLH_CAM_ISCSI_ETH_LINE = 0,
617 LLH_CAM_ETH_LINE,
618 LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
619};
620
621static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp,
622 bool add, unsigned char *dev_addr, int index)
042181f5 623{
619c5cb6
VZ
624 u32 wb_data[2];
625 u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
626 NIG_REG_LLH0_FUNC_MEM;
627
628 if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
629 return;
630
631 DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
632 (add ? "ADD" : "DELETE"), index);
633
634 if (add) {
635 /* LLH_FUNC_MEM is a u64 WB register */
636 reg_offset += 8*index;
042181f5 637
619c5cb6
VZ
638 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
639 (dev_addr[4] << 8) | dev_addr[5]);
640 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
042181f5 641
619c5cb6
VZ
642 REG_WR_DMAE(bp, reg_offset, wb_data, 2);
643 }
042181f5 644
619c5cb6
VZ
645 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
646 NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
647}
042181f5 648
619c5cb6
VZ
649/**
650 * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
651 *
652 * @bp: device handle
653 * @o: queue for which we want to configure this rule
654 * @add: if true the command is an ADD command, DEL otherwise
655 * @opcode: CLASSIFY_RULE_OPCODE_XXX
656 * @hdr: pointer to a header to setup
657 *
658 */
659static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
660 struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
661 struct eth_classify_cmd_header *hdr)
662{
663 struct bnx2x_raw_obj *raw = &o->raw;
042181f5 664
619c5cb6
VZ
665 hdr->client_id = raw->cl_id;
666 hdr->func_id = raw->func_id;
042181f5 667
619c5cb6
VZ
668 /* Rx or/and Tx (internal switching) configuration ? */
669 hdr->cmd_general_data |=
670 bnx2x_vlan_mac_get_rx_tx_flag(o);
042181f5 671
619c5cb6
VZ
672 if (add)
673 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
042181f5 674
619c5cb6
VZ
675 hdr->cmd_general_data |=
676 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
677}
042181f5 678
619c5cb6
VZ
679/**
680 * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
681 *
682 * @cid: connection id
683 * @type: BNX2X_FILTER_XXX_PENDING
684 * @hdr: poiter to header to setup
685 * @rule_cnt:
686 *
687 * currently we always configure one rule and echo field to contain a CID and an
688 * opcode type.
689 */
690static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
691 struct eth_classify_header *hdr, int rule_cnt)
692{
693 hdr->echo = (cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT);
694 hdr->rule_cnt = (u8)rule_cnt;
695}
042181f5 696
042181f5 697
619c5cb6
VZ
698/* hw_config() callbacks */
699static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
700 struct bnx2x_vlan_mac_obj *o,
701 struct bnx2x_exeq_elem *elem, int rule_idx,
702 int cam_offset)
703{
704 struct bnx2x_raw_obj *raw = &o->raw;
705 struct eth_classify_rules_ramrod_data *data =
706 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
707 int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
708 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
709 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
710 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
711 u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
712
713 /*
714 * Set LLH CAM entry: currently only iSCSI and ETH macs are
715 * relevant. In addition, current implementation is tuned for a
716 * single ETH MAC.
717 *
718 * When multiple unicast ETH MACs PF configuration in switch
719 * independent mode is required (NetQ, multiple netdev MACs,
720 * etc.), consider better utilisation of 8 per function MAC
721 * entries in the LLH register. There is also
722 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
723 * total number of CAM entries to 16.
724 *
725 * Currently we won't configure NIG for MACs other than a primary ETH
726 * MAC and iSCSI L2 MAC.
727 *
728 * If this MAC is moving from one Queue to another, no need to change
729 * NIG configuration.
730 */
731 if (cmd != BNX2X_VLAN_MAC_MOVE) {
732 if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
733 bnx2x_set_mac_in_nig(bp, add, mac,
734 LLH_CAM_ISCSI_ETH_LINE);
735 else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
736 bnx2x_set_mac_in_nig(bp, add, mac, LLH_CAM_ETH_LINE);
042181f5
VZ
737 }
738
619c5cb6
VZ
739 /* Reset the ramrod data buffer for the first rule */
740 if (rule_idx == 0)
741 memset(data, 0, sizeof(*data));
742
743 /* Setup a command header */
744 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
745 &rule_entry->mac.header);
746
0f9dad10
JP
747 DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
748 add ? "add" : "delete", mac, raw->cl_id);
619c5cb6
VZ
749
750 /* Set a MAC itself */
751 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
752 &rule_entry->mac.mac_mid,
753 &rule_entry->mac.mac_lsb, mac);
754
755 /* MOVE: Add a rule that will add this MAC to the target Queue */
756 if (cmd == BNX2X_VLAN_MAC_MOVE) {
757 rule_entry++;
758 rule_cnt++;
759
760 /* Setup ramrod data */
761 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
762 elem->cmd_data.vlan_mac.target_obj,
763 true, CLASSIFY_RULE_OPCODE_MAC,
764 &rule_entry->mac.header);
765
766 /* Set a MAC itself */
767 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
768 &rule_entry->mac.mac_mid,
769 &rule_entry->mac.mac_lsb, mac);
042181f5 770 }
619c5cb6
VZ
771
772 /* Set the ramrod data header */
773 /* TODO: take this to the higher level in order to prevent multiple
774 writing */
775 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
776 rule_cnt);
042181f5
VZ
777}
778
619c5cb6
VZ
779/**
780 * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
781 *
782 * @bp: device handle
783 * @o: queue
784 * @type:
785 * @cam_offset: offset in cam memory
786 * @hdr: pointer to a header to setup
787 *
788 * E1/E1H
789 */
790static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
791 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
792 struct mac_configuration_hdr *hdr)
042181f5 793{
619c5cb6 794 struct bnx2x_raw_obj *r = &o->raw;
042181f5 795
619c5cb6
VZ
796 hdr->length = 1;
797 hdr->offset = (u8)cam_offset;
798 hdr->client_id = 0xff;
799 hdr->echo = ((r->cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT));
800}
042181f5 801
619c5cb6
VZ
802static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
803 struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
804 u16 vlan_id, struct mac_configuration_entry *cfg_entry)
805{
806 struct bnx2x_raw_obj *r = &o->raw;
807 u32 cl_bit_vec = (1 << r->cl_id);
808
809 cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
810 cfg_entry->pf_id = r->func_id;
811 cfg_entry->vlan_id = cpu_to_le16(vlan_id);
812
813 if (add) {
814 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
815 T_ETH_MAC_COMMAND_SET);
816 SET_FLAG(cfg_entry->flags,
817 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
818
819 /* Set a MAC in a ramrod data */
820 bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
821 &cfg_entry->middle_mac_addr,
822 &cfg_entry->lsb_mac_addr, mac);
823 } else
824 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
825 T_ETH_MAC_COMMAND_INVALIDATE);
826}
042181f5 827
619c5cb6
VZ
828static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
829 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
830 u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
831{
832 struct mac_configuration_entry *cfg_entry = &config->config_table[0];
833 struct bnx2x_raw_obj *raw = &o->raw;
042181f5 834
619c5cb6
VZ
835 bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
836 &config->hdr);
837 bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
838 cfg_entry);
042181f5 839
0f9dad10
JP
840 DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
841 add ? "setting" : "clearing",
842 mac, raw->cl_id, cam_offset);
042181f5
VZ
843}
844
619c5cb6
VZ
845/**
846 * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
847 *
848 * @bp: device handle
849 * @o: bnx2x_vlan_mac_obj
850 * @elem: bnx2x_exeq_elem
851 * @rule_idx: rule_idx
852 * @cam_offset: cam_offset
853 */
854static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
855 struct bnx2x_vlan_mac_obj *o,
856 struct bnx2x_exeq_elem *elem, int rule_idx,
857 int cam_offset)
042181f5 858{
619c5cb6
VZ
859 struct bnx2x_raw_obj *raw = &o->raw;
860 struct mac_configuration_cmd *config =
861 (struct mac_configuration_cmd *)(raw->rdata);
862 /*
863 * 57710 and 57711 do not support MOVE command,
864 * so it's either ADD or DEL
865 */
866 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
867 true : false;
042181f5 868
619c5cb6
VZ
869 /* Reset the ramrod data buffer */
870 memset(config, 0, sizeof(*config));
042181f5 871
619c5cb6
VZ
872 bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_MAC_PENDING,
873 cam_offset, add,
874 elem->cmd_data.vlan_mac.u.mac.mac, 0,
875 ETH_VLAN_FILTER_ANY_VLAN, config);
876}
042181f5 877
619c5cb6
VZ
878static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
879 struct bnx2x_vlan_mac_obj *o,
880 struct bnx2x_exeq_elem *elem, int rule_idx,
881 int cam_offset)
882{
883 struct bnx2x_raw_obj *raw = &o->raw;
884 struct eth_classify_rules_ramrod_data *data =
885 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
886 int rule_cnt = rule_idx + 1;
887 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
888 int cmd = elem->cmd_data.vlan_mac.cmd;
889 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
890 u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
891
892 /* Reset the ramrod data buffer for the first rule */
893 if (rule_idx == 0)
894 memset(data, 0, sizeof(*data));
895
896 /* Set a rule header */
897 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
898 &rule_entry->vlan.header);
899
900 DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
901 vlan);
902
903 /* Set a VLAN itself */
904 rule_entry->vlan.vlan = cpu_to_le16(vlan);
905
906 /* MOVE: Add a rule that will add this MAC to the target Queue */
907 if (cmd == BNX2X_VLAN_MAC_MOVE) {
908 rule_entry++;
909 rule_cnt++;
910
911 /* Setup ramrod data */
912 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
913 elem->cmd_data.vlan_mac.target_obj,
914 true, CLASSIFY_RULE_OPCODE_VLAN,
915 &rule_entry->vlan.header);
916
917 /* Set a VLAN itself */
918 rule_entry->vlan.vlan = cpu_to_le16(vlan);
919 }
042181f5 920
619c5cb6
VZ
921 /* Set the ramrod data header */
922 /* TODO: take this to the higher level in order to prevent multiple
923 writing */
924 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
925 rule_cnt);
926}
042181f5 927
619c5cb6
VZ
928static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
929 struct bnx2x_vlan_mac_obj *o,
930 struct bnx2x_exeq_elem *elem,
931 int rule_idx, int cam_offset)
932{
933 struct bnx2x_raw_obj *raw = &o->raw;
934 struct eth_classify_rules_ramrod_data *data =
935 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
936 int rule_cnt = rule_idx + 1;
937 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
938 int cmd = elem->cmd_data.vlan_mac.cmd;
939 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
940 u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
941 u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
942
943
944 /* Reset the ramrod data buffer for the first rule */
945 if (rule_idx == 0)
946 memset(data, 0, sizeof(*data));
947
948 /* Set a rule header */
949 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
950 &rule_entry->pair.header);
951
952 /* Set VLAN and MAC themselvs */
953 rule_entry->pair.vlan = cpu_to_le16(vlan);
954 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
955 &rule_entry->pair.mac_mid,
956 &rule_entry->pair.mac_lsb, mac);
957
958 /* MOVE: Add a rule that will add this MAC to the target Queue */
959 if (cmd == BNX2X_VLAN_MAC_MOVE) {
960 rule_entry++;
961 rule_cnt++;
962
963 /* Setup ramrod data */
964 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
965 elem->cmd_data.vlan_mac.target_obj,
966 true, CLASSIFY_RULE_OPCODE_PAIR,
967 &rule_entry->pair.header);
968
969 /* Set a VLAN itself */
970 rule_entry->pair.vlan = cpu_to_le16(vlan);
971 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
972 &rule_entry->pair.mac_mid,
973 &rule_entry->pair.mac_lsb, mac);
042181f5
VZ
974 }
975
619c5cb6
VZ
976 /* Set the ramrod data header */
977 /* TODO: take this to the higher level in order to prevent multiple
978 writing */
979 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
980 rule_cnt);
981}
042181f5 982
619c5cb6
VZ
983/**
984 * bnx2x_set_one_vlan_mac_e1h -
985 *
986 * @bp: device handle
987 * @o: bnx2x_vlan_mac_obj
988 * @elem: bnx2x_exeq_elem
989 * @rule_idx: rule_idx
990 * @cam_offset: cam_offset
991 */
992static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
993 struct bnx2x_vlan_mac_obj *o,
994 struct bnx2x_exeq_elem *elem,
995 int rule_idx, int cam_offset)
996{
997 struct bnx2x_raw_obj *raw = &o->raw;
998 struct mac_configuration_cmd *config =
999 (struct mac_configuration_cmd *)(raw->rdata);
1000 /*
1001 * 57710 and 57711 do not support MOVE command,
1002 * so it's either ADD or DEL
042181f5 1003 */
619c5cb6
VZ
1004 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1005 true : false;
042181f5 1006
619c5cb6
VZ
1007 /* Reset the ramrod data buffer */
1008 memset(config, 0, sizeof(*config));
042181f5 1009
619c5cb6
VZ
1010 bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
1011 cam_offset, add,
1012 elem->cmd_data.vlan_mac.u.vlan_mac.mac,
1013 elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
1014 ETH_VLAN_FILTER_CLASSIFY, config);
042181f5
VZ
1015}
1016
619c5cb6
VZ
1017#define list_next_entry(pos, member) \
1018 list_entry((pos)->member.next, typeof(*(pos)), member)
1019
1020/**
1021 * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1022 *
1023 * @bp: device handle
1024 * @p: command parameters
1025 * @ppos: pointer to the cooky
1026 *
1027 * reconfigure next MAC/VLAN/VLAN-MAC element from the
1028 * previously configured elements list.
1029 *
1030 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
1031 * into an account
1032 *
1033 * pointer to the cooky - that should be given back in the next call to make
1034 * function handle the next element. If *ppos is set to NULL it will restart the
1035 * iterator. If returned *ppos == NULL this means that the last element has been
1036 * handled.
1037 *
1038 */
1039static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1040 struct bnx2x_vlan_mac_ramrod_params *p,
1041 struct bnx2x_vlan_mac_registry_elem **ppos)
1042{
1043 struct bnx2x_vlan_mac_registry_elem *pos;
1044 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1045
1046 /* If list is empty - there is nothing to do here */
1047 if (list_empty(&o->head)) {
1048 *ppos = NULL;
1049 return 0;
1050 }
1051
1052 /* make a step... */
1053 if (*ppos == NULL)
1054 *ppos = list_first_entry(&o->head,
1055 struct bnx2x_vlan_mac_registry_elem,
1056 link);
1057 else
1058 *ppos = list_next_entry(*ppos, link);
1059
1060 pos = *ppos;
1061
1062 /* If it's the last step - return NULL */
1063 if (list_is_last(&pos->link, &o->head))
1064 *ppos = NULL;
1065
1066 /* Prepare a 'user_req' */
1067 memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1068
1069 /* Set the command */
1070 p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1071
1072 /* Set vlan_mac_flags */
1073 p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1074
1075 /* Set a restore bit */
1076 __set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1077
1078 return bnx2x_config_vlan_mac(bp, p);
1079}
1080
1081/*
1082 * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1083 * pointer to an element with a specific criteria and NULL if such an element
1084 * hasn't been found.
1085 */
1086static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1087 struct bnx2x_exe_queue_obj *o,
1088 struct bnx2x_exeq_elem *elem)
1089{
1090 struct bnx2x_exeq_elem *pos;
1091 struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1092
1093 /* Check pending for execution commands */
1094 list_for_each_entry(pos, &o->exe_queue, link)
1095 if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1096 sizeof(*data)) &&
1097 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1098 return pos;
1099
1100 return NULL;
1101}
1102
1103static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1104 struct bnx2x_exe_queue_obj *o,
1105 struct bnx2x_exeq_elem *elem)
1106{
1107 struct bnx2x_exeq_elem *pos;
1108 struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1109
1110 /* Check pending for execution commands */
1111 list_for_each_entry(pos, &o->exe_queue, link)
1112 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1113 sizeof(*data)) &&
1114 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1115 return pos;
1116
1117 return NULL;
1118}
1119
1120static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
1121 struct bnx2x_exe_queue_obj *o,
1122 struct bnx2x_exeq_elem *elem)
1123{
1124 struct bnx2x_exeq_elem *pos;
1125 struct bnx2x_vlan_mac_ramrod_data *data =
1126 &elem->cmd_data.vlan_mac.u.vlan_mac;
1127
1128 /* Check pending for execution commands */
1129 list_for_each_entry(pos, &o->exe_queue, link)
1130 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1131 sizeof(*data)) &&
1132 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1133 return pos;
1134
1135 return NULL;
1136}
1137
1138/**
1139 * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1140 *
1141 * @bp: device handle
1142 * @qo: bnx2x_qable_obj
1143 * @elem: bnx2x_exeq_elem
1144 *
1145 * Checks that the requested configuration can be added. If yes and if
1146 * requested, consume CAM credit.
1147 *
1148 * The 'validate' is run after the 'optimize'.
1149 *
1150 */
1151static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1152 union bnx2x_qable_obj *qo,
1153 struct bnx2x_exeq_elem *elem)
1154{
1155 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1156 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1157 int rc;
1158
1159 /* Check the registry */
1160 rc = o->check_add(o, &elem->cmd_data.vlan_mac.u);
1161 if (rc) {
1162 DP(BNX2X_MSG_SP, "ADD command is not allowed considering "
1163 "current registry state\n");
1164 return rc;
1165 }
1166
1167 /*
1168 * Check if there is a pending ADD command for this
1169 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1170 */
1171 if (exeq->get(exeq, elem)) {
1172 DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1173 return -EEXIST;
1174 }
1175
1176 /*
1177 * TODO: Check the pending MOVE from other objects where this
1178 * object is a destination object.
1179 */
1180
1181 /* Consume the credit if not requested not to */
1182 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1183 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1184 o->get_credit(o)))
1185 return -EINVAL;
1186
1187 return 0;
1188}
1189
1190/**
1191 * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1192 *
1193 * @bp: device handle
1194 * @qo: quable object to check
1195 * @elem: element that needs to be deleted
1196 *
1197 * Checks that the requested configuration can be deleted. If yes and if
1198 * requested, returns a CAM credit.
1199 *
1200 * The 'validate' is run after the 'optimize'.
1201 */
1202static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1203 union bnx2x_qable_obj *qo,
1204 struct bnx2x_exeq_elem *elem)
1205{
1206 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1207 struct bnx2x_vlan_mac_registry_elem *pos;
1208 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1209 struct bnx2x_exeq_elem query_elem;
1210
1211 /* If this classification can not be deleted (doesn't exist)
1212 * - return a BNX2X_EXIST.
1213 */
1214 pos = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1215 if (!pos) {
1216 DP(BNX2X_MSG_SP, "DEL command is not allowed considering "
1217 "current registry state\n");
1218 return -EEXIST;
1219 }
1220
1221 /*
1222 * Check if there are pending DEL or MOVE commands for this
1223 * MAC/VLAN/VLAN-MAC. Return an error if so.
1224 */
1225 memcpy(&query_elem, elem, sizeof(query_elem));
1226
1227 /* Check for MOVE commands */
1228 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1229 if (exeq->get(exeq, &query_elem)) {
1230 BNX2X_ERR("There is a pending MOVE command already\n");
1231 return -EINVAL;
1232 }
1233
1234 /* Check for DEL commands */
1235 if (exeq->get(exeq, elem)) {
1236 DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1237 return -EEXIST;
1238 }
1239
1240 /* Return the credit to the credit pool if not requested not to */
1241 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1242 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1243 o->put_credit(o))) {
1244 BNX2X_ERR("Failed to return a credit\n");
1245 return -EINVAL;
1246 }
1247
1248 return 0;
1249}
1250
1251/**
1252 * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1253 *
1254 * @bp: device handle
1255 * @qo: quable object to check (source)
1256 * @elem: element that needs to be moved
1257 *
1258 * Checks that the requested configuration can be moved. If yes and if
1259 * requested, returns a CAM credit.
1260 *
1261 * The 'validate' is run after the 'optimize'.
1262 */
1263static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1264 union bnx2x_qable_obj *qo,
1265 struct bnx2x_exeq_elem *elem)
1266{
1267 struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1268 struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1269 struct bnx2x_exeq_elem query_elem;
1270 struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1271 struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1272
1273 /*
1274 * Check if we can perform this operation based on the current registry
1275 * state.
1276 */
1277 if (!src_o->check_move(src_o, dest_o, &elem->cmd_data.vlan_mac.u)) {
1278 DP(BNX2X_MSG_SP, "MOVE command is not allowed considering "
1279 "current registry state\n");
1280 return -EINVAL;
1281 }
1282
1283 /*
1284 * Check if there is an already pending DEL or MOVE command for the
1285 * source object or ADD command for a destination object. Return an
1286 * error if so.
1287 */
1288 memcpy(&query_elem, elem, sizeof(query_elem));
1289
1290 /* Check DEL on source */
1291 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1292 if (src_exeq->get(src_exeq, &query_elem)) {
1293 BNX2X_ERR("There is a pending DEL command on the source "
1294 "queue already\n");
1295 return -EINVAL;
1296 }
1297
1298 /* Check MOVE on source */
1299 if (src_exeq->get(src_exeq, elem)) {
1300 DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1301 return -EEXIST;
1302 }
1303
1304 /* Check ADD on destination */
1305 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1306 if (dest_exeq->get(dest_exeq, &query_elem)) {
1307 BNX2X_ERR("There is a pending ADD command on the "
1308 "destination queue already\n");
1309 return -EINVAL;
1310 }
1311
1312 /* Consume the credit if not requested not to */
1313 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1314 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1315 dest_o->get_credit(dest_o)))
1316 return -EINVAL;
1317
1318 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1319 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1320 src_o->put_credit(src_o))) {
1321 /* return the credit taken from dest... */
1322 dest_o->put_credit(dest_o);
1323 return -EINVAL;
1324 }
1325
1326 return 0;
1327}
1328
1329static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1330 union bnx2x_qable_obj *qo,
1331 struct bnx2x_exeq_elem *elem)
1332{
1333 switch (elem->cmd_data.vlan_mac.cmd) {
1334 case BNX2X_VLAN_MAC_ADD:
1335 return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1336 case BNX2X_VLAN_MAC_DEL:
1337 return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1338 case BNX2X_VLAN_MAC_MOVE:
1339 return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1340 default:
1341 return -EINVAL;
1342 }
1343}
1344
460a25cd
YM
1345static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
1346 union bnx2x_qable_obj *qo,
1347 struct bnx2x_exeq_elem *elem)
1348{
1349 int rc = 0;
1350
1351 /* If consumption wasn't required, nothing to do */
1352 if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1353 &elem->cmd_data.vlan_mac.vlan_mac_flags))
1354 return 0;
1355
1356 switch (elem->cmd_data.vlan_mac.cmd) {
1357 case BNX2X_VLAN_MAC_ADD:
1358 case BNX2X_VLAN_MAC_MOVE:
1359 rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1360 break;
1361 case BNX2X_VLAN_MAC_DEL:
1362 rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1363 break;
1364 default:
1365 return -EINVAL;
1366 }
1367
1368 if (rc != true)
1369 return -EINVAL;
1370
1371 return 0;
1372}
1373
619c5cb6
VZ
1374/**
1375 * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes.
1376 *
1377 * @bp: device handle
1378 * @o: bnx2x_vlan_mac_obj
1379 *
1380 */
1381static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1382 struct bnx2x_vlan_mac_obj *o)
1383{
1384 int cnt = 5000, rc;
1385 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1386 struct bnx2x_raw_obj *raw = &o->raw;
1387
1388 while (cnt--) {
1389 /* Wait for the current command to complete */
1390 rc = raw->wait_comp(bp, raw);
1391 if (rc)
1392 return rc;
1393
1394 /* Wait until there are no pending commands */
1395 if (!bnx2x_exe_queue_empty(exeq))
1396 usleep_range(1000, 1000);
1397 else
1398 return 0;
1399 }
1400
1401 return -EBUSY;
1402}
1403
1404/**
1405 * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1406 *
1407 * @bp: device handle
1408 * @o: bnx2x_vlan_mac_obj
1409 * @cqe:
1410 * @cont: if true schedule next execution chunk
1411 *
1412 */
1413static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1414 struct bnx2x_vlan_mac_obj *o,
1415 union event_ring_elem *cqe,
1416 unsigned long *ramrod_flags)
1417{
1418 struct bnx2x_raw_obj *r = &o->raw;
1419 int rc;
1420
1421 /* Reset pending list */
1422 bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1423
1424 /* Clear pending */
1425 r->clear_pending(r);
1426
1427 /* If ramrod failed this is most likely a SW bug */
1428 if (cqe->message.error)
1429 return -EINVAL;
1430
1431 /* Run the next bulk of pending commands if requeted */
1432 if (test_bit(RAMROD_CONT, ramrod_flags)) {
1433 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1434 if (rc < 0)
1435 return rc;
1436 }
1437
1438 /* If there is more work to do return PENDING */
1439 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1440 return 1;
1441
1442 return 0;
1443}
1444
1445/**
1446 * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1447 *
1448 * @bp: device handle
1449 * @o: bnx2x_qable_obj
1450 * @elem: bnx2x_exeq_elem
1451 */
1452static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1453 union bnx2x_qable_obj *qo,
1454 struct bnx2x_exeq_elem *elem)
1455{
1456 struct bnx2x_exeq_elem query, *pos;
1457 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1458 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1459
1460 memcpy(&query, elem, sizeof(query));
1461
1462 switch (elem->cmd_data.vlan_mac.cmd) {
1463 case BNX2X_VLAN_MAC_ADD:
1464 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1465 break;
1466 case BNX2X_VLAN_MAC_DEL:
1467 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1468 break;
1469 default:
1470 /* Don't handle anything other than ADD or DEL */
1471 return 0;
1472 }
1473
1474 /* If we found the appropriate element - delete it */
1475 pos = exeq->get(exeq, &query);
1476 if (pos) {
1477
1478 /* Return the credit of the optimized command */
1479 if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1480 &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1481 if ((query.cmd_data.vlan_mac.cmd ==
1482 BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
1483 BNX2X_ERR("Failed to return the credit for the "
1484 "optimized ADD command\n");
1485 return -EINVAL;
1486 } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1487 BNX2X_ERR("Failed to recover the credit from "
1488 "the optimized DEL command\n");
1489 return -EINVAL;
1490 }
1491 }
1492
1493 DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1494 (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1495 "ADD" : "DEL");
1496
1497 list_del(&pos->link);
1498 bnx2x_exe_queue_free_elem(bp, pos);
1499 return 1;
1500 }
1501
1502 return 0;
1503}
1504
1505/**
1506 * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1507 *
1508 * @bp: device handle
1509 * @o:
1510 * @elem:
1511 * @restore:
1512 * @re:
1513 *
1514 * prepare a registry element according to the current command request.
1515 */
1516static inline int bnx2x_vlan_mac_get_registry_elem(
1517 struct bnx2x *bp,
1518 struct bnx2x_vlan_mac_obj *o,
1519 struct bnx2x_exeq_elem *elem,
1520 bool restore,
1521 struct bnx2x_vlan_mac_registry_elem **re)
1522{
1523 int cmd = elem->cmd_data.vlan_mac.cmd;
1524 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1525
1526 /* Allocate a new registry element if needed. */
1527 if (!restore &&
1528 ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1529 reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1530 if (!reg_elem)
1531 return -ENOMEM;
1532
1533 /* Get a new CAM offset */
1534 if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1535 /*
1536 * This shell never happen, because we have checked the
1537 * CAM availiability in the 'validate'.
1538 */
1539 WARN_ON(1);
1540 kfree(reg_elem);
1541 return -EINVAL;
1542 }
1543
1544 DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1545
1546 /* Set a VLAN-MAC data */
1547 memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1548 sizeof(reg_elem->u));
1549
1550 /* Copy the flags (needed for DEL and RESTORE flows) */
1551 reg_elem->vlan_mac_flags =
1552 elem->cmd_data.vlan_mac.vlan_mac_flags;
1553 } else /* DEL, RESTORE */
1554 reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1555
1556 *re = reg_elem;
1557 return 0;
1558}
1559
1560/**
1561 * bnx2x_execute_vlan_mac - execute vlan mac command
1562 *
1563 * @bp: device handle
1564 * @qo:
1565 * @exe_chunk:
1566 * @ramrod_flags:
1567 *
1568 * go and send a ramrod!
1569 */
1570static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1571 union bnx2x_qable_obj *qo,
1572 struct list_head *exe_chunk,
1573 unsigned long *ramrod_flags)
1574{
1575 struct bnx2x_exeq_elem *elem;
1576 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1577 struct bnx2x_raw_obj *r = &o->raw;
1578 int rc, idx = 0;
1579 bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1580 bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1581 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1582 int cmd;
1583
1584 /*
1585 * If DRIVER_ONLY execution is requested, cleanup a registry
1586 * and exit. Otherwise send a ramrod to FW.
1587 */
1588 if (!drv_only) {
1589 WARN_ON(r->check_pending(r));
1590
1591 /* Set pending */
1592 r->set_pending(r);
1593
1594 /* Fill tha ramrod data */
1595 list_for_each_entry(elem, exe_chunk, link) {
1596 cmd = elem->cmd_data.vlan_mac.cmd;
1597 /*
1598 * We will add to the target object in MOVE command, so
1599 * change the object for a CAM search.
1600 */
1601 if (cmd == BNX2X_VLAN_MAC_MOVE)
1602 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1603 else
1604 cam_obj = o;
1605
1606 rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1607 elem, restore,
1608 &reg_elem);
1609 if (rc)
1610 goto error_exit;
1611
1612 WARN_ON(!reg_elem);
1613
1614 /* Push a new entry into the registry */
1615 if (!restore &&
1616 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1617 (cmd == BNX2X_VLAN_MAC_MOVE)))
1618 list_add(&reg_elem->link, &cam_obj->head);
1619
1620 /* Configure a single command in a ramrod data buffer */
1621 o->set_one_rule(bp, o, elem, idx,
1622 reg_elem->cam_offset);
1623
1624 /* MOVE command consumes 2 entries in the ramrod data */
1625 if (cmd == BNX2X_VLAN_MAC_MOVE)
1626 idx += 2;
1627 else
1628 idx++;
1629 }
1630
53e51e2f
VZ
1631 /*
1632 * No need for an explicit memory barrier here as long we would
1633 * need to ensure the ordering of writing to the SPQ element
1634 * and updating of the SPQ producer which involves a memory
1635 * read and we will have to put a full memory barrier there
1636 * (inside bnx2x_sp_post()).
1637 */
619c5cb6
VZ
1638
1639 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1640 U64_HI(r->rdata_mapping),
1641 U64_LO(r->rdata_mapping),
1642 ETH_CONNECTION_TYPE);
1643 if (rc)
1644 goto error_exit;
1645 }
1646
1647 /* Now, when we are done with the ramrod - clean up the registry */
1648 list_for_each_entry(elem, exe_chunk, link) {
1649 cmd = elem->cmd_data.vlan_mac.cmd;
1650 if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1651 (cmd == BNX2X_VLAN_MAC_MOVE)) {
1652 reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1653
1654 WARN_ON(!reg_elem);
1655
1656 o->put_cam_offset(o, reg_elem->cam_offset);
1657 list_del(&reg_elem->link);
1658 kfree(reg_elem);
1659 }
1660 }
1661
1662 if (!drv_only)
1663 return 1;
1664 else
1665 return 0;
1666
1667error_exit:
1668 r->clear_pending(r);
1669
1670 /* Cleanup a registry in case of a failure */
1671 list_for_each_entry(elem, exe_chunk, link) {
1672 cmd = elem->cmd_data.vlan_mac.cmd;
1673
1674 if (cmd == BNX2X_VLAN_MAC_MOVE)
1675 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1676 else
1677 cam_obj = o;
1678
1679 /* Delete all newly added above entries */
1680 if (!restore &&
1681 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1682 (cmd == BNX2X_VLAN_MAC_MOVE))) {
1683 reg_elem = o->check_del(cam_obj,
1684 &elem->cmd_data.vlan_mac.u);
1685 if (reg_elem) {
1686 list_del(&reg_elem->link);
1687 kfree(reg_elem);
1688 }
1689 }
1690 }
1691
1692 return rc;
1693}
1694
1695static inline int bnx2x_vlan_mac_push_new_cmd(
1696 struct bnx2x *bp,
1697 struct bnx2x_vlan_mac_ramrod_params *p)
1698{
1699 struct bnx2x_exeq_elem *elem;
1700 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1701 bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1702
1703 /* Allocate the execution queue element */
1704 elem = bnx2x_exe_queue_alloc_elem(bp);
1705 if (!elem)
1706 return -ENOMEM;
1707
1708 /* Set the command 'length' */
1709 switch (p->user_req.cmd) {
1710 case BNX2X_VLAN_MAC_MOVE:
1711 elem->cmd_len = 2;
1712 break;
1713 default:
1714 elem->cmd_len = 1;
1715 }
1716
1717 /* Fill the object specific info */
1718 memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1719
1720 /* Try to add a new command to the pending list */
1721 return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1722}
1723
1724/**
1725 * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1726 *
1727 * @bp: device handle
1728 * @p:
1729 *
1730 */
1731int bnx2x_config_vlan_mac(
1732 struct bnx2x *bp,
1733 struct bnx2x_vlan_mac_ramrod_params *p)
1734{
1735 int rc = 0;
1736 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1737 unsigned long *ramrod_flags = &p->ramrod_flags;
1738 bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1739 struct bnx2x_raw_obj *raw = &o->raw;
1740
1741 /*
1742 * Add new elements to the execution list for commands that require it.
1743 */
1744 if (!cont) {
1745 rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1746 if (rc)
1747 return rc;
1748 }
1749
1750 /*
1751 * If nothing will be executed further in this iteration we want to
1752 * return PENDING if there are pending commands
1753 */
1754 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1755 rc = 1;
1756
79616895
VZ
1757 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
1758 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: "
1759 "clearing a pending bit.\n");
1760 raw->clear_pending(raw);
1761 }
1762
619c5cb6
VZ
1763 /* Execute commands if required */
1764 if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1765 test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1766 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1767 if (rc < 0)
1768 return rc;
1769 }
1770
1771 /*
1772 * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1773 * then user want to wait until the last command is done.
1774 */
1775 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1776 /*
1777 * Wait maximum for the current exe_queue length iterations plus
1778 * one (for the current pending command).
1779 */
1780 int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1781
1782 while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
1783 max_iterations--) {
1784
1785 /* Wait for the current command to complete */
1786 rc = raw->wait_comp(bp, raw);
1787 if (rc)
1788 return rc;
1789
1790 /* Make a next step */
1791 rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
1792 ramrod_flags);
1793 if (rc < 0)
1794 return rc;
1795 }
1796
1797 return 0;
1798 }
1799
1800 return rc;
1801}
1802
1803
1804
1805/**
1806 * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1807 *
1808 * @bp: device handle
1809 * @o:
1810 * @vlan_mac_flags:
1811 * @ramrod_flags: execution flags to be used for this deletion
1812 *
1813 * if the last operation has completed successfully and there are no
1814 * moreelements left, positive value if the last operation has completed
1815 * successfully and there are more previously configured elements, negative
1816 * value is current operation has failed.
1817 */
1818static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1819 struct bnx2x_vlan_mac_obj *o,
1820 unsigned long *vlan_mac_flags,
1821 unsigned long *ramrod_flags)
1822{
1823 struct bnx2x_vlan_mac_registry_elem *pos = NULL;
1824 int rc = 0;
1825 struct bnx2x_vlan_mac_ramrod_params p;
1826 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1827 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
1828
1829 /* Clear pending commands first */
1830
1831 spin_lock_bh(&exeq->lock);
1832
1833 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
1834 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
460a25cd
YM
1835 *vlan_mac_flags) {
1836 rc = exeq->remove(bp, exeq->owner, exeq_pos);
1837 if (rc) {
1838 BNX2X_ERR("Failed to remove command\n");
a44acd55 1839 spin_unlock_bh(&exeq->lock);
460a25cd
YM
1840 return rc;
1841 }
619c5cb6 1842 list_del(&exeq_pos->link);
460a25cd 1843 }
619c5cb6
VZ
1844 }
1845
1846 spin_unlock_bh(&exeq->lock);
1847
1848 /* Prepare a command request */
1849 memset(&p, 0, sizeof(p));
1850 p.vlan_mac_obj = o;
1851 p.ramrod_flags = *ramrod_flags;
1852 p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
1853
1854 /*
1855 * Add all but the last VLAN-MAC to the execution queue without actually
1856 * execution anything.
1857 */
1858 __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
1859 __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
1860 __clear_bit(RAMROD_CONT, &p.ramrod_flags);
1861
1862 list_for_each_entry(pos, &o->head, link) {
1863 if (pos->vlan_mac_flags == *vlan_mac_flags) {
1864 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1865 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
1866 rc = bnx2x_config_vlan_mac(bp, &p);
1867 if (rc < 0) {
1868 BNX2X_ERR("Failed to add a new DEL command\n");
1869 return rc;
1870 }
1871 }
1872 }
1873
1874 p.ramrod_flags = *ramrod_flags;
1875 __set_bit(RAMROD_CONT, &p.ramrod_flags);
1876
1877 return bnx2x_config_vlan_mac(bp, &p);
1878}
1879
1880static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
1881 u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
1882 unsigned long *pstate, bnx2x_obj_type type)
1883{
1884 raw->func_id = func_id;
1885 raw->cid = cid;
1886 raw->cl_id = cl_id;
1887 raw->rdata = rdata;
1888 raw->rdata_mapping = rdata_mapping;
1889 raw->state = state;
1890 raw->pstate = pstate;
1891 raw->obj_type = type;
1892 raw->check_pending = bnx2x_raw_check_pending;
1893 raw->clear_pending = bnx2x_raw_clear_pending;
1894 raw->set_pending = bnx2x_raw_set_pending;
1895 raw->wait_comp = bnx2x_raw_wait;
1896}
1897
1898static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
1899 u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
1900 int state, unsigned long *pstate, bnx2x_obj_type type,
1901 struct bnx2x_credit_pool_obj *macs_pool,
1902 struct bnx2x_credit_pool_obj *vlans_pool)
1903{
1904 INIT_LIST_HEAD(&o->head);
1905
1906 o->macs_pool = macs_pool;
1907 o->vlans_pool = vlans_pool;
1908
1909 o->delete_all = bnx2x_vlan_mac_del_all;
1910 o->restore = bnx2x_vlan_mac_restore;
1911 o->complete = bnx2x_complete_vlan_mac;
1912 o->wait = bnx2x_wait_vlan_mac;
1913
1914 bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1915 state, pstate, type);
1916}
1917
1918
1919void bnx2x_init_mac_obj(struct bnx2x *bp,
1920 struct bnx2x_vlan_mac_obj *mac_obj,
1921 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1922 dma_addr_t rdata_mapping, int state,
1923 unsigned long *pstate, bnx2x_obj_type type,
1924 struct bnx2x_credit_pool_obj *macs_pool)
1925{
1926 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
1927
1928 bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1929 rdata_mapping, state, pstate, type,
1930 macs_pool, NULL);
1931
1932 /* CAM credit pool handling */
1933 mac_obj->get_credit = bnx2x_get_credit_mac;
1934 mac_obj->put_credit = bnx2x_put_credit_mac;
1935 mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1936 mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1937
1938 if (CHIP_IS_E1x(bp)) {
1939 mac_obj->set_one_rule = bnx2x_set_one_mac_e1x;
1940 mac_obj->check_del = bnx2x_check_mac_del;
1941 mac_obj->check_add = bnx2x_check_mac_add;
1942 mac_obj->check_move = bnx2x_check_move_always_err;
1943 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
1944
1945 /* Exe Queue */
1946 bnx2x_exe_queue_init(bp,
1947 &mac_obj->exe_queue, 1, qable_obj,
1948 bnx2x_validate_vlan_mac,
460a25cd 1949 bnx2x_remove_vlan_mac,
619c5cb6
VZ
1950 bnx2x_optimize_vlan_mac,
1951 bnx2x_execute_vlan_mac,
1952 bnx2x_exeq_get_mac);
1953 } else {
1954 mac_obj->set_one_rule = bnx2x_set_one_mac_e2;
1955 mac_obj->check_del = bnx2x_check_mac_del;
1956 mac_obj->check_add = bnx2x_check_mac_add;
1957 mac_obj->check_move = bnx2x_check_move;
1958 mac_obj->ramrod_cmd =
1959 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
ed5162a0 1960 mac_obj->get_n_elements = bnx2x_get_n_elements;
619c5cb6
VZ
1961
1962 /* Exe Queue */
1963 bnx2x_exe_queue_init(bp,
1964 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1965 qable_obj, bnx2x_validate_vlan_mac,
460a25cd 1966 bnx2x_remove_vlan_mac,
619c5cb6
VZ
1967 bnx2x_optimize_vlan_mac,
1968 bnx2x_execute_vlan_mac,
1969 bnx2x_exeq_get_mac);
1970 }
1971}
1972
1973void bnx2x_init_vlan_obj(struct bnx2x *bp,
1974 struct bnx2x_vlan_mac_obj *vlan_obj,
1975 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1976 dma_addr_t rdata_mapping, int state,
1977 unsigned long *pstate, bnx2x_obj_type type,
1978 struct bnx2x_credit_pool_obj *vlans_pool)
1979{
1980 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
1981
1982 bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
1983 rdata_mapping, state, pstate, type, NULL,
1984 vlans_pool);
1985
1986 vlan_obj->get_credit = bnx2x_get_credit_vlan;
1987 vlan_obj->put_credit = bnx2x_put_credit_vlan;
1988 vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
1989 vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
1990
1991 if (CHIP_IS_E1x(bp)) {
1992 BNX2X_ERR("Do not support chips others than E2 and newer\n");
1993 BUG();
1994 } else {
1995 vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2;
1996 vlan_obj->check_del = bnx2x_check_vlan_del;
1997 vlan_obj->check_add = bnx2x_check_vlan_add;
1998 vlan_obj->check_move = bnx2x_check_move;
1999 vlan_obj->ramrod_cmd =
2000 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2001
2002 /* Exe Queue */
2003 bnx2x_exe_queue_init(bp,
2004 &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
2005 qable_obj, bnx2x_validate_vlan_mac,
460a25cd 2006 bnx2x_remove_vlan_mac,
619c5cb6
VZ
2007 bnx2x_optimize_vlan_mac,
2008 bnx2x_execute_vlan_mac,
2009 bnx2x_exeq_get_vlan);
2010 }
2011}
2012
2013void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
2014 struct bnx2x_vlan_mac_obj *vlan_mac_obj,
2015 u8 cl_id, u32 cid, u8 func_id, void *rdata,
2016 dma_addr_t rdata_mapping, int state,
2017 unsigned long *pstate, bnx2x_obj_type type,
2018 struct bnx2x_credit_pool_obj *macs_pool,
2019 struct bnx2x_credit_pool_obj *vlans_pool)
2020{
2021 union bnx2x_qable_obj *qable_obj =
2022 (union bnx2x_qable_obj *)vlan_mac_obj;
2023
2024 bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
2025 rdata_mapping, state, pstate, type,
2026 macs_pool, vlans_pool);
2027
2028 /* CAM pool handling */
2029 vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
2030 vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
2031 /*
2032 * CAM offset is relevant for 57710 and 57711 chips only which have a
2033 * single CAM for both MACs and VLAN-MAC pairs. So the offset
2034 * will be taken from MACs' pool object only.
2035 */
2036 vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
2037 vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
2038
2039 if (CHIP_IS_E1(bp)) {
2040 BNX2X_ERR("Do not support chips others than E2\n");
2041 BUG();
2042 } else if (CHIP_IS_E1H(bp)) {
2043 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
2044 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
2045 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
2046 vlan_mac_obj->check_move = bnx2x_check_move_always_err;
2047 vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
2048
2049 /* Exe Queue */
2050 bnx2x_exe_queue_init(bp,
2051 &vlan_mac_obj->exe_queue, 1, qable_obj,
2052 bnx2x_validate_vlan_mac,
460a25cd 2053 bnx2x_remove_vlan_mac,
619c5cb6
VZ
2054 bnx2x_optimize_vlan_mac,
2055 bnx2x_execute_vlan_mac,
2056 bnx2x_exeq_get_vlan_mac);
2057 } else {
2058 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2;
2059 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
2060 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
2061 vlan_mac_obj->check_move = bnx2x_check_move;
2062 vlan_mac_obj->ramrod_cmd =
2063 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2064
2065 /* Exe Queue */
2066 bnx2x_exe_queue_init(bp,
2067 &vlan_mac_obj->exe_queue,
2068 CLASSIFY_RULES_COUNT,
2069 qable_obj, bnx2x_validate_vlan_mac,
460a25cd 2070 bnx2x_remove_vlan_mac,
619c5cb6
VZ
2071 bnx2x_optimize_vlan_mac,
2072 bnx2x_execute_vlan_mac,
2073 bnx2x_exeq_get_vlan_mac);
2074 }
2075
2076}
2077
2078/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2079static inline void __storm_memset_mac_filters(struct bnx2x *bp,
2080 struct tstorm_eth_mac_filter_config *mac_filters,
2081 u16 pf_id)
2082{
2083 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2084
2085 u32 addr = BAR_TSTRORM_INTMEM +
2086 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2087
2088 __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2089}
2090
2091static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2092 struct bnx2x_rx_mode_ramrod_params *p)
2093{
2094 /* update the bp MAC filter structure */
2095 u32 mask = (1 << p->cl_id);
2096
2097 struct tstorm_eth_mac_filter_config *mac_filters =
2098 (struct tstorm_eth_mac_filter_config *)p->rdata;
2099
2100 /* initial seeting is drop-all */
2101 u8 drop_all_ucast = 1, drop_all_mcast = 1;
2102 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2103 u8 unmatched_unicast = 0;
2104
2105 /* In e1x there we only take into account rx acceot flag since tx switching
2106 * isn't enabled. */
2107 if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
2108 /* accept matched ucast */
2109 drop_all_ucast = 0;
2110
2111 if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
2112 /* accept matched mcast */
2113 drop_all_mcast = 0;
2114
2115 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2116 /* accept all mcast */
2117 drop_all_ucast = 0;
2118 accp_all_ucast = 1;
2119 }
2120 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2121 /* accept all mcast */
2122 drop_all_mcast = 0;
2123 accp_all_mcast = 1;
2124 }
2125 if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
2126 /* accept (all) bcast */
2127 accp_all_bcast = 1;
2128 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2129 /* accept unmatched unicasts */
2130 unmatched_unicast = 1;
2131
2132 mac_filters->ucast_drop_all = drop_all_ucast ?
2133 mac_filters->ucast_drop_all | mask :
2134 mac_filters->ucast_drop_all & ~mask;
2135
2136 mac_filters->mcast_drop_all = drop_all_mcast ?
2137 mac_filters->mcast_drop_all | mask :
2138 mac_filters->mcast_drop_all & ~mask;
2139
2140 mac_filters->ucast_accept_all = accp_all_ucast ?
2141 mac_filters->ucast_accept_all | mask :
2142 mac_filters->ucast_accept_all & ~mask;
2143
2144 mac_filters->mcast_accept_all = accp_all_mcast ?
2145 mac_filters->mcast_accept_all | mask :
2146 mac_filters->mcast_accept_all & ~mask;
2147
2148 mac_filters->bcast_accept_all = accp_all_bcast ?
2149 mac_filters->bcast_accept_all | mask :
2150 mac_filters->bcast_accept_all & ~mask;
2151
2152 mac_filters->unmatched_unicast = unmatched_unicast ?
2153 mac_filters->unmatched_unicast | mask :
2154 mac_filters->unmatched_unicast & ~mask;
2155
2156 DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2157 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2158 mac_filters->ucast_drop_all,
2159 mac_filters->mcast_drop_all,
2160 mac_filters->ucast_accept_all,
2161 mac_filters->mcast_accept_all,
2162 mac_filters->bcast_accept_all);
2163
2164 /* write the MAC filter structure*/
2165 __storm_memset_mac_filters(bp, mac_filters, p->func_id);
2166
2167 /* The operation is completed */
2168 clear_bit(p->state, p->pstate);
2169 smp_mb__after_clear_bit();
2170
2171 return 0;
2172}
2173
2174/* Setup ramrod data */
2175static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2176 struct eth_classify_header *hdr,
2177 u8 rule_cnt)
2178{
2179 hdr->echo = cid;
2180 hdr->rule_cnt = rule_cnt;
2181}
2182
2183static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
2184 unsigned long accept_flags,
2185 struct eth_filter_rules_cmd *cmd,
2186 bool clear_accept_all)
2187{
2188 u16 state;
2189
2190 /* start with 'drop-all' */
2191 state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2192 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2193
2194 if (accept_flags) {
2195 if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags))
2196 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2197
2198 if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags))
2199 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2200
2201 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) {
2202 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2203 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2204 }
2205
2206 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) {
2207 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2208 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2209 }
2210 if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags))
2211 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2212
2213 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) {
2214 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2215 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2216 }
2217 if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags))
2218 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2219 }
2220
2221 /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2222 if (clear_accept_all) {
2223 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2224 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2225 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2226 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2227 }
2228
2229 cmd->state = cpu_to_le16(state);
2230
2231}
2232
2233static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2234 struct bnx2x_rx_mode_ramrod_params *p)
2235{
2236 struct eth_filter_rules_ramrod_data *data = p->rdata;
2237 int rc;
2238 u8 rule_idx = 0;
2239
2240 /* Reset the ramrod data buffer */
2241 memset(data, 0, sizeof(*data));
2242
2243 /* Setup ramrod data */
2244
2245 /* Tx (internal switching) */
2246 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2247 data->rules[rule_idx].client_id = p->cl_id;
2248 data->rules[rule_idx].func_id = p->func_id;
2249
2250 data->rules[rule_idx].cmd_general_data =
2251 ETH_FILTER_RULES_CMD_TX_CMD;
2252
2253 bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
2254 &(data->rules[rule_idx++]), false);
2255 }
2256
2257 /* Rx */
2258 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2259 data->rules[rule_idx].client_id = p->cl_id;
2260 data->rules[rule_idx].func_id = p->func_id;
2261
2262 data->rules[rule_idx].cmd_general_data =
2263 ETH_FILTER_RULES_CMD_RX_CMD;
2264
2265 bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
2266 &(data->rules[rule_idx++]), false);
2267 }
2268
2269
2270 /*
2271 * If FCoE Queue configuration has been requested configure the Rx and
2272 * internal switching modes for this queue in separate rules.
2273 *
2274 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2275 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2276 */
2277 if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2278 /* Tx (internal switching) */
2279 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2280 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2281 data->rules[rule_idx].func_id = p->func_id;
2282
2283 data->rules[rule_idx].cmd_general_data =
2284 ETH_FILTER_RULES_CMD_TX_CMD;
2285
2286 bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
2287 &(data->rules[rule_idx++]),
2288 true);
2289 }
2290
2291 /* Rx */
2292 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2293 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2294 data->rules[rule_idx].func_id = p->func_id;
2295
2296 data->rules[rule_idx].cmd_general_data =
2297 ETH_FILTER_RULES_CMD_RX_CMD;
2298
2299 bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
2300 &(data->rules[rule_idx++]),
2301 true);
2302 }
2303 }
2304
2305 /*
2306 * Set the ramrod header (most importantly - number of rules to
2307 * configure).
2308 */
2309 bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2310
2311 DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, "
2312 "tx_accept_flags 0x%lx\n",
2313 data->header.rule_cnt, p->rx_accept_flags,
2314 p->tx_accept_flags);
2315
53e51e2f
VZ
2316 /*
2317 * No need for an explicit memory barrier here as long we would
2318 * need to ensure the ordering of writing to the SPQ element
2319 * and updating of the SPQ producer which involves a memory
2320 * read and we will have to put a full memory barrier there
2321 * (inside bnx2x_sp_post()).
2322 */
619c5cb6
VZ
2323
2324 /* Send a ramrod */
2325 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2326 U64_HI(p->rdata_mapping),
2327 U64_LO(p->rdata_mapping),
2328 ETH_CONNECTION_TYPE);
2329 if (rc)
2330 return rc;
2331
2332 /* Ramrod completion is pending */
2333 return 1;
2334}
2335
2336static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2337 struct bnx2x_rx_mode_ramrod_params *p)
2338{
2339 return bnx2x_state_wait(bp, p->state, p->pstate);
2340}
2341
2342static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2343 struct bnx2x_rx_mode_ramrod_params *p)
2344{
2345 /* Do nothing */
2346 return 0;
2347}
2348
2349int bnx2x_config_rx_mode(struct bnx2x *bp,
2350 struct bnx2x_rx_mode_ramrod_params *p)
2351{
2352 int rc;
2353
2354 /* Configure the new classification in the chip */
2355 rc = p->rx_mode_obj->config_rx_mode(bp, p);
2356 if (rc < 0)
2357 return rc;
2358
2359 /* Wait for a ramrod completion if was requested */
2360 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2361 rc = p->rx_mode_obj->wait_comp(bp, p);
2362 if (rc)
2363 return rc;
2364 }
2365
2366 return rc;
2367}
2368
2369void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2370 struct bnx2x_rx_mode_obj *o)
2371{
2372 if (CHIP_IS_E1x(bp)) {
2373 o->wait_comp = bnx2x_empty_rx_mode_wait;
2374 o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2375 } else {
2376 o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
2377 o->config_rx_mode = bnx2x_set_rx_mode_e2;
2378 }
2379}
2380
2381/********************* Multicast verbs: SET, CLEAR ****************************/
2382static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2383{
2384 return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2385}
2386
2387struct bnx2x_mcast_mac_elem {
2388 struct list_head link;
2389 u8 mac[ETH_ALEN];
2390 u8 pad[2]; /* For a natural alignment of the following buffer */
2391};
2392
2393struct bnx2x_pending_mcast_cmd {
2394 struct list_head link;
2395 int type; /* BNX2X_MCAST_CMD_X */
2396 union {
2397 struct list_head macs_head;
2398 u32 macs_num; /* Needed for DEL command */
2399 int next_bin; /* Needed for RESTORE flow with aprox match */
2400 } data;
2401
2402 bool done; /* set to true, when the command has been handled,
2403 * practically used in 57712 handling only, where one pending
2404 * command may be handled in a few operations. As long as for
2405 * other chips every operation handling is completed in a
2406 * single ramrod, there is no need to utilize this field.
2407 */
2408};
2409
2410static int bnx2x_mcast_wait(struct bnx2x *bp,
2411 struct bnx2x_mcast_obj *o)
2412{
2413 if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2414 o->raw.wait_comp(bp, &o->raw))
2415 return -EBUSY;
2416
2417 return 0;
2418}
2419
2420static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2421 struct bnx2x_mcast_obj *o,
2422 struct bnx2x_mcast_ramrod_params *p,
2423 int cmd)
2424{
2425 int total_sz;
2426 struct bnx2x_pending_mcast_cmd *new_cmd;
2427 struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2428 struct bnx2x_mcast_list_elem *pos;
2429 int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2430 p->mcast_list_len : 0);
2431
2432 /* If the command is empty ("handle pending commands only"), break */
2433 if (!p->mcast_list_len)
2434 return 0;
2435
2436 total_sz = sizeof(*new_cmd) +
2437 macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2438
2439 /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2440 new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2441
2442 if (!new_cmd)
2443 return -ENOMEM;
2444
2445 DP(BNX2X_MSG_SP, "About to enqueue a new %d command. "
2446 "macs_list_len=%d\n", cmd, macs_list_len);
2447
2448 INIT_LIST_HEAD(&new_cmd->data.macs_head);
2449
2450 new_cmd->type = cmd;
2451 new_cmd->done = false;
2452
2453 switch (cmd) {
2454 case BNX2X_MCAST_CMD_ADD:
2455 cur_mac = (struct bnx2x_mcast_mac_elem *)
2456 ((u8 *)new_cmd + sizeof(*new_cmd));
2457
2458 /* Push the MACs of the current command into the pendig command
2459 * MACs list: FIFO
2460 */
2461 list_for_each_entry(pos, &p->mcast_list, link) {
2462 memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2463 list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2464 cur_mac++;
2465 }
2466
2467 break;
2468
2469 case BNX2X_MCAST_CMD_DEL:
2470 new_cmd->data.macs_num = p->mcast_list_len;
2471 break;
2472
2473 case BNX2X_MCAST_CMD_RESTORE:
2474 new_cmd->data.next_bin = 0;
2475 break;
2476
2477 default:
2478 BNX2X_ERR("Unknown command: %d\n", cmd);
2479 return -EINVAL;
2480 }
2481
2482 /* Push the new pending command to the tail of the pending list: FIFO */
2483 list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2484
2485 o->set_sched(o);
2486
2487 return 1;
2488}
2489
2490/**
2491 * bnx2x_mcast_get_next_bin - get the next set bin (index)
2492 *
2493 * @o:
2494 * @last: index to start looking from (including)
2495 *
2496 * returns the next found (set) bin or a negative value if none is found.
2497 */
2498static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2499{
2500 int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2501
2502 for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2503 if (o->registry.aprox_match.vec[i])
2504 for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2505 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2506 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2507 vec, cur_bit)) {
2508 return cur_bit;
2509 }
2510 }
2511 inner_start = 0;
2512 }
2513
2514 /* None found */
2515 return -1;
2516}
2517
2518/**
2519 * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2520 *
2521 * @o:
2522 *
2523 * returns the index of the found bin or -1 if none is found
2524 */
2525static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2526{
2527 int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2528
2529 if (cur_bit >= 0)
2530 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2531
2532 return cur_bit;
2533}
2534
2535static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2536{
2537 struct bnx2x_raw_obj *raw = &o->raw;
2538 u8 rx_tx_flag = 0;
2539
2540 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2541 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2542 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2543
2544 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2545 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2546 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2547
2548 return rx_tx_flag;
2549}
2550
2551static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2552 struct bnx2x_mcast_obj *o, int idx,
2553 union bnx2x_mcast_config_data *cfg_data,
2554 int cmd)
2555{
2556 struct bnx2x_raw_obj *r = &o->raw;
2557 struct eth_multicast_rules_ramrod_data *data =
2558 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2559 u8 func_id = r->func_id;
2560 u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2561 int bin;
2562
2563 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2564 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2565
2566 data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2567
2568 /* Get a bin and update a bins' vector */
2569 switch (cmd) {
2570 case BNX2X_MCAST_CMD_ADD:
2571 bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2572 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2573 break;
2574
2575 case BNX2X_MCAST_CMD_DEL:
2576 /* If there were no more bins to clear
2577 * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2578 * clear any (0xff) bin.
2579 * See bnx2x_mcast_validate_e2() for explanation when it may
2580 * happen.
2581 */
2582 bin = bnx2x_mcast_clear_first_bin(o);
2583 break;
2584
2585 case BNX2X_MCAST_CMD_RESTORE:
2586 bin = cfg_data->bin;
2587 break;
2588
2589 default:
2590 BNX2X_ERR("Unknown command: %d\n", cmd);
2591 return;
2592 }
2593
2594 DP(BNX2X_MSG_SP, "%s bin %d\n",
2595 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2596 "Setting" : "Clearing"), bin);
2597
2598 data->rules[idx].bin_id = (u8)bin;
2599 data->rules[idx].func_id = func_id;
2600 data->rules[idx].engine_id = o->engine_id;
2601}
2602
2603/**
2604 * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2605 *
2606 * @bp: device handle
2607 * @o:
2608 * @start_bin: index in the registry to start from (including)
2609 * @rdata_idx: index in the ramrod data to start from
2610 *
2611 * returns last handled bin index or -1 if all bins have been handled
2612 */
2613static inline int bnx2x_mcast_handle_restore_cmd_e2(
2614 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2615 int *rdata_idx)
2616{
2617 int cur_bin, cnt = *rdata_idx;
2618 union bnx2x_mcast_config_data cfg_data = {0};
2619
2620 /* go through the registry and configure the bins from it */
2621 for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2622 cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
2623
2624 cfg_data.bin = (u8)cur_bin;
2625 o->set_one_rule(bp, o, cnt, &cfg_data,
2626 BNX2X_MCAST_CMD_RESTORE);
2627
2628 cnt++;
2629
2630 DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
2631
2632 /* Break if we reached the maximum number
2633 * of rules.
2634 */
2635 if (cnt >= o->max_cmd_len)
2636 break;
2637 }
2638
2639 *rdata_idx = cnt;
2640
2641 return cur_bin;
2642}
2643
2644static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2645 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2646 int *line_idx)
2647{
2648 struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2649 int cnt = *line_idx;
2650 union bnx2x_mcast_config_data cfg_data = {0};
2651
2652 list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2653 link) {
2654
2655 cfg_data.mac = &pmac_pos->mac[0];
2656 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2657
2658 cnt++;
2659
0f9dad10
JP
2660 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2661 pmac_pos->mac);
619c5cb6
VZ
2662
2663 list_del(&pmac_pos->link);
2664
2665 /* Break if we reached the maximum number
2666 * of rules.
2667 */
2668 if (cnt >= o->max_cmd_len)
2669 break;
2670 }
2671
2672 *line_idx = cnt;
2673
2674 /* if no more MACs to configure - we are done */
2675 if (list_empty(&cmd_pos->data.macs_head))
2676 cmd_pos->done = true;
2677}
2678
2679static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2680 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2681 int *line_idx)
2682{
2683 int cnt = *line_idx;
2684
2685 while (cmd_pos->data.macs_num) {
2686 o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2687
2688 cnt++;
2689
2690 cmd_pos->data.macs_num--;
2691
2692 DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2693 cmd_pos->data.macs_num, cnt);
2694
2695 /* Break if we reached the maximum
2696 * number of rules.
2697 */
2698 if (cnt >= o->max_cmd_len)
2699 break;
2700 }
2701
2702 *line_idx = cnt;
2703
2704 /* If we cleared all bins - we are done */
2705 if (!cmd_pos->data.macs_num)
2706 cmd_pos->done = true;
2707}
2708
2709static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2710 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2711 int *line_idx)
2712{
2713 cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2714 line_idx);
2715
2716 if (cmd_pos->data.next_bin < 0)
2717 /* If o->set_restore returned -1 we are done */
2718 cmd_pos->done = true;
2719 else
2720 /* Start from the next bin next time */
2721 cmd_pos->data.next_bin++;
2722}
2723
2724static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2725 struct bnx2x_mcast_ramrod_params *p)
2726{
2727 struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2728 int cnt = 0;
2729 struct bnx2x_mcast_obj *o = p->mcast_obj;
2730
2731 list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2732 link) {
2733 switch (cmd_pos->type) {
2734 case BNX2X_MCAST_CMD_ADD:
2735 bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2736 break;
2737
2738 case BNX2X_MCAST_CMD_DEL:
2739 bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2740 break;
2741
2742 case BNX2X_MCAST_CMD_RESTORE:
2743 bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2744 &cnt);
2745 break;
2746
2747 default:
2748 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2749 return -EINVAL;
2750 }
2751
2752 /* If the command has been completed - remove it from the list
2753 * and free the memory
2754 */
2755 if (cmd_pos->done) {
2756 list_del(&cmd_pos->link);
2757 kfree(cmd_pos);
2758 }
2759
2760 /* Break if we reached the maximum number of rules */
2761 if (cnt >= o->max_cmd_len)
2762 break;
2763 }
2764
2765 return cnt;
2766}
2767
2768static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2769 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2770 int *line_idx)
2771{
2772 struct bnx2x_mcast_list_elem *mlist_pos;
2773 union bnx2x_mcast_config_data cfg_data = {0};
2774 int cnt = *line_idx;
2775
2776 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2777 cfg_data.mac = mlist_pos->mac;
2778 o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
2779
2780 cnt++;
2781
0f9dad10
JP
2782 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2783 mlist_pos->mac);
619c5cb6
VZ
2784 }
2785
2786 *line_idx = cnt;
2787}
2788
2789static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
2790 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2791 int *line_idx)
2792{
2793 int cnt = *line_idx, i;
2794
2795 for (i = 0; i < p->mcast_list_len; i++) {
2796 o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
2797
2798 cnt++;
2799
2800 DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
2801 p->mcast_list_len - i - 1);
2802 }
2803
2804 *line_idx = cnt;
2805}
2806
2807/**
2808 * bnx2x_mcast_handle_current_cmd -
2809 *
2810 * @bp: device handle
2811 * @p:
2812 * @cmd:
2813 * @start_cnt: first line in the ramrod data that may be used
2814 *
2815 * This function is called iff there is enough place for the current command in
2816 * the ramrod data.
2817 * Returns number of lines filled in the ramrod data in total.
2818 */
2819static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
2820 struct bnx2x_mcast_ramrod_params *p, int cmd,
2821 int start_cnt)
2822{
2823 struct bnx2x_mcast_obj *o = p->mcast_obj;
2824 int cnt = start_cnt;
2825
2826 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
2827
2828 switch (cmd) {
2829 case BNX2X_MCAST_CMD_ADD:
2830 bnx2x_mcast_hdl_add(bp, o, p, &cnt);
2831 break;
2832
2833 case BNX2X_MCAST_CMD_DEL:
2834 bnx2x_mcast_hdl_del(bp, o, p, &cnt);
2835 break;
2836
2837 case BNX2X_MCAST_CMD_RESTORE:
2838 o->hdl_restore(bp, o, 0, &cnt);
2839 break;
2840
2841 default:
2842 BNX2X_ERR("Unknown command: %d\n", cmd);
2843 return -EINVAL;
2844 }
2845
2846 /* The current command has been handled */
2847 p->mcast_list_len = 0;
2848
2849 return cnt;
2850}
2851
2852static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
2853 struct bnx2x_mcast_ramrod_params *p,
2854 int cmd)
2855{
2856 struct bnx2x_mcast_obj *o = p->mcast_obj;
2857 int reg_sz = o->get_registry_size(o);
2858
2859 switch (cmd) {
2860 /* DEL command deletes all currently configured MACs */
2861 case BNX2X_MCAST_CMD_DEL:
2862 o->set_registry_size(o, 0);
2863 /* Don't break */
2864
2865 /* RESTORE command will restore the entire multicast configuration */
2866 case BNX2X_MCAST_CMD_RESTORE:
2867 /* Here we set the approximate amount of work to do, which in
2868 * fact may be only less as some MACs in postponed ADD
2869 * command(s) scheduled before this command may fall into
2870 * the same bin and the actual number of bins set in the
2871 * registry would be less than we estimated here. See
2872 * bnx2x_mcast_set_one_rule_e2() for further details.
2873 */
2874 p->mcast_list_len = reg_sz;
2875 break;
2876
2877 case BNX2X_MCAST_CMD_ADD:
2878 case BNX2X_MCAST_CMD_CONT:
2879 /* Here we assume that all new MACs will fall into new bins.
2880 * However we will correct the real registry size after we
2881 * handle all pending commands.
2882 */
2883 o->set_registry_size(o, reg_sz + p->mcast_list_len);
2884 break;
2885
2886 default:
2887 BNX2X_ERR("Unknown command: %d\n", cmd);
2888 return -EINVAL;
2889
2890 }
2891
2892 /* Increase the total number of MACs pending to be configured */
2893 o->total_pending_num += p->mcast_list_len;
2894
2895 return 0;
2896}
2897
2898static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
2899 struct bnx2x_mcast_ramrod_params *p,
2900 int old_num_bins)
2901{
2902 struct bnx2x_mcast_obj *o = p->mcast_obj;
2903
2904 o->set_registry_size(o, old_num_bins);
2905 o->total_pending_num -= p->mcast_list_len;
2906}
2907
2908/**
2909 * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
2910 *
2911 * @bp: device handle
2912 * @p:
2913 * @len: number of rules to handle
2914 */
2915static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
2916 struct bnx2x_mcast_ramrod_params *p,
2917 u8 len)
2918{
2919 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
2920 struct eth_multicast_rules_ramrod_data *data =
2921 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2922
2923 data->header.echo = ((r->cid & BNX2X_SWCID_MASK) |
2924 (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
2925 data->header.rule_cnt = len;
2926}
2927
2928/**
2929 * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2930 *
2931 * @bp: device handle
2932 * @o:
2933 *
2934 * Recalculate the actual number of set bins in the registry using Brian
2935 * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2936 *
2937 * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
2938 */
2939static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
2940 struct bnx2x_mcast_obj *o)
2941{
2942 int i, cnt = 0;
2943 u64 elem;
2944
2945 for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
2946 elem = o->registry.aprox_match.vec[i];
2947 for (; elem; cnt++)
2948 elem &= elem - 1;
2949 }
2950
2951 o->set_registry_size(o, cnt);
2952
2953 return 0;
2954}
2955
2956static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
2957 struct bnx2x_mcast_ramrod_params *p,
2958 int cmd)
2959{
2960 struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
2961 struct bnx2x_mcast_obj *o = p->mcast_obj;
2962 struct eth_multicast_rules_ramrod_data *data =
2963 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2964 int cnt = 0, rc;
2965
2966 /* Reset the ramrod data buffer */
2967 memset(data, 0, sizeof(*data));
2968
2969 cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
2970
2971 /* If there are no more pending commands - clear SCHEDULED state */
2972 if (list_empty(&o->pending_cmds_head))
2973 o->clear_sched(o);
2974
2975 /* The below may be true iff there was enough room in ramrod
2976 * data for all pending commands and for the current
2977 * command. Otherwise the current command would have been added
2978 * to the pending commands and p->mcast_list_len would have been
2979 * zeroed.
2980 */
2981 if (p->mcast_list_len > 0)
2982 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
2983
2984 /* We've pulled out some MACs - update the total number of
2985 * outstanding.
2986 */
2987 o->total_pending_num -= cnt;
2988
2989 /* send a ramrod */
2990 WARN_ON(o->total_pending_num < 0);
2991 WARN_ON(cnt > o->max_cmd_len);
2992
2993 bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
2994
2995 /* Update a registry size if there are no more pending operations.
2996 *
2997 * We don't want to change the value of the registry size if there are
2998 * pending operations because we want it to always be equal to the
2999 * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
3000 * set bins after the last requested operation in order to properly
3001 * evaluate the size of the next DEL/RESTORE operation.
3002 *
3003 * Note that we update the registry itself during command(s) handling
3004 * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
3005 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
3006 * with a limited amount of update commands (per MAC/bin) and we don't
3007 * know in this scope what the actual state of bins configuration is
3008 * going to be after this ramrod.
3009 */
3010 if (!o->total_pending_num)
3011 bnx2x_mcast_refresh_registry_e2(bp, o);
3012
53e51e2f
VZ
3013 /*
3014 * If CLEAR_ONLY was requested - don't send a ramrod and clear
619c5cb6
VZ
3015 * RAMROD_PENDING status immediately.
3016 */
3017 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3018 raw->clear_pending(raw);
3019 return 0;
3020 } else {
53e51e2f
VZ
3021 /*
3022 * No need for an explicit memory barrier here as long we would
3023 * need to ensure the ordering of writing to the SPQ element
3024 * and updating of the SPQ producer which involves a memory
3025 * read and we will have to put a full memory barrier there
3026 * (inside bnx2x_sp_post()).
3027 */
3028
619c5cb6
VZ
3029 /* Send a ramrod */
3030 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
3031 raw->cid, U64_HI(raw->rdata_mapping),
3032 U64_LO(raw->rdata_mapping),
3033 ETH_CONNECTION_TYPE);
3034 if (rc)
3035 return rc;
3036
3037 /* Ramrod completion is pending */
3038 return 1;
3039 }
3040}
3041
3042static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
3043 struct bnx2x_mcast_ramrod_params *p,
3044 int cmd)
3045{
3046 /* Mark, that there is a work to do */
3047 if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
3048 p->mcast_list_len = 1;
3049
3050 return 0;
3051}
3052
3053static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
3054 struct bnx2x_mcast_ramrod_params *p,
3055 int old_num_bins)
3056{
3057 /* Do nothing */
3058}
3059
3060#define BNX2X_57711_SET_MC_FILTER(filter, bit) \
3061do { \
3062 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3063} while (0)
3064
3065static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
3066 struct bnx2x_mcast_obj *o,
3067 struct bnx2x_mcast_ramrod_params *p,
3068 u32 *mc_filter)
3069{
3070 struct bnx2x_mcast_list_elem *mlist_pos;
3071 int bit;
3072
3073 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
3074 bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
3075 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3076
0f9dad10
JP
3077 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
3078 mlist_pos->mac, bit);
619c5cb6
VZ
3079
3080 /* bookkeeping... */
3081 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3082 bit);
3083 }
3084}
3085
3086static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
3087 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3088 u32 *mc_filter)
3089{
3090 int bit;
3091
3092 for (bit = bnx2x_mcast_get_next_bin(o, 0);
3093 bit >= 0;
3094 bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
3095 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3096 DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3097 }
3098}
3099
3100/* On 57711 we write the multicast MACs' aproximate match
3101 * table by directly into the TSTORM's internal RAM. So we don't
3102 * really need to handle any tricks to make it work.
3103 */
3104static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3105 struct bnx2x_mcast_ramrod_params *p,
3106 int cmd)
3107{
3108 int i;
3109 struct bnx2x_mcast_obj *o = p->mcast_obj;
3110 struct bnx2x_raw_obj *r = &o->raw;
3111
3112 /* If CLEAR_ONLY has been requested - clear the registry
3113 * and clear a pending bit.
3114 */
3115 if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3116 u32 mc_filter[MC_HASH_SIZE] = {0};
3117
3118 /* Set the multicast filter bits before writing it into
3119 * the internal memory.
3120 */
3121 switch (cmd) {
3122 case BNX2X_MCAST_CMD_ADD:
3123 bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3124 break;
3125
3126 case BNX2X_MCAST_CMD_DEL:
94f05b0f
JP
3127 DP(BNX2X_MSG_SP,
3128 "Invalidating multicast MACs configuration\n");
619c5cb6
VZ
3129
3130 /* clear the registry */
3131 memset(o->registry.aprox_match.vec, 0,
3132 sizeof(o->registry.aprox_match.vec));
3133 break;
3134
3135 case BNX2X_MCAST_CMD_RESTORE:
3136 bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3137 break;
3138
3139 default:
3140 BNX2X_ERR("Unknown command: %d\n", cmd);
3141 return -EINVAL;
3142 }
3143
3144 /* Set the mcast filter in the internal memory */
3145 for (i = 0; i < MC_HASH_SIZE; i++)
3146 REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3147 } else
3148 /* clear the registry */
3149 memset(o->registry.aprox_match.vec, 0,
3150 sizeof(o->registry.aprox_match.vec));
3151
3152 /* We are done */
3153 r->clear_pending(r);
3154
3155 return 0;
3156}
3157
3158static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3159 struct bnx2x_mcast_ramrod_params *p,
3160 int cmd)
3161{
3162 struct bnx2x_mcast_obj *o = p->mcast_obj;
3163 int reg_sz = o->get_registry_size(o);
3164
3165 switch (cmd) {
3166 /* DEL command deletes all currently configured MACs */
3167 case BNX2X_MCAST_CMD_DEL:
3168 o->set_registry_size(o, 0);
3169 /* Don't break */
3170
3171 /* RESTORE command will restore the entire multicast configuration */
3172 case BNX2X_MCAST_CMD_RESTORE:
3173 p->mcast_list_len = reg_sz;
3174 DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3175 cmd, p->mcast_list_len);
3176 break;
3177
3178 case BNX2X_MCAST_CMD_ADD:
3179 case BNX2X_MCAST_CMD_CONT:
3180 /* Multicast MACs on 57710 are configured as unicast MACs and
3181 * there is only a limited number of CAM entries for that
3182 * matter.
3183 */
3184 if (p->mcast_list_len > o->max_cmd_len) {
3185 BNX2X_ERR("Can't configure more than %d multicast MACs"
3186 "on 57710\n", o->max_cmd_len);
3187 return -EINVAL;
3188 }
3189 /* Every configured MAC should be cleared if DEL command is
3190 * called. Only the last ADD command is relevant as long as
3191 * every ADD commands overrides the previous configuration.
3192 */
3193 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3194 if (p->mcast_list_len > 0)
3195 o->set_registry_size(o, p->mcast_list_len);
3196
3197 break;
3198
3199 default:
3200 BNX2X_ERR("Unknown command: %d\n", cmd);
3201 return -EINVAL;
3202
3203 }
3204
3205 /* We want to ensure that commands are executed one by one for 57710.
3206 * Therefore each none-empty command will consume o->max_cmd_len.
3207 */
3208 if (p->mcast_list_len)
3209 o->total_pending_num += o->max_cmd_len;
3210
3211 return 0;
3212}
3213
3214static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3215 struct bnx2x_mcast_ramrod_params *p,
3216 int old_num_macs)
3217{
3218 struct bnx2x_mcast_obj *o = p->mcast_obj;
3219
3220 o->set_registry_size(o, old_num_macs);
3221
3222 /* If current command hasn't been handled yet and we are
3223 * here means that it's meant to be dropped and we have to
3224 * update the number of outstandling MACs accordingly.
3225 */
3226 if (p->mcast_list_len)
3227 o->total_pending_num -= o->max_cmd_len;
3228}
3229
3230static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3231 struct bnx2x_mcast_obj *o, int idx,
3232 union bnx2x_mcast_config_data *cfg_data,
3233 int cmd)
3234{
3235 struct bnx2x_raw_obj *r = &o->raw;
3236 struct mac_configuration_cmd *data =
3237 (struct mac_configuration_cmd *)(r->rdata);
3238
3239 /* copy mac */
3240 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3241 bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3242 &data->config_table[idx].middle_mac_addr,
3243 &data->config_table[idx].lsb_mac_addr,
3244 cfg_data->mac);
3245
3246 data->config_table[idx].vlan_id = 0;
3247 data->config_table[idx].pf_id = r->func_id;
3248 data->config_table[idx].clients_bit_vector =
3249 cpu_to_le32(1 << r->cl_id);
3250
3251 SET_FLAG(data->config_table[idx].flags,
3252 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3253 T_ETH_MAC_COMMAND_SET);
3254 }
3255}
3256
3257/**
3258 * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd
3259 *
3260 * @bp: device handle
3261 * @p:
3262 * @len: number of rules to handle
3263 */
3264static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3265 struct bnx2x_mcast_ramrod_params *p,
3266 u8 len)
3267{
3268 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3269 struct mac_configuration_cmd *data =
3270 (struct mac_configuration_cmd *)(r->rdata);
3271
3272 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3273 BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3274 BNX2X_MAX_MULTICAST*(1 + r->func_id));
3275
3276 data->hdr.offset = offset;
3277 data->hdr.client_id = 0xff;
3278 data->hdr.echo = ((r->cid & BNX2X_SWCID_MASK) |
3279 (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
3280 data->hdr.length = len;
3281}
3282
3283/**
3284 * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3285 *
3286 * @bp: device handle
3287 * @o:
3288 * @start_idx: index in the registry to start from
3289 * @rdata_idx: index in the ramrod data to start from
3290 *
3291 * restore command for 57710 is like all other commands - always a stand alone
3292 * command - start_idx and rdata_idx will always be 0. This function will always
3293 * succeed.
3294 * returns -1 to comply with 57712 variant.
3295 */
3296static inline int bnx2x_mcast_handle_restore_cmd_e1(
3297 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3298 int *rdata_idx)
3299{
3300 struct bnx2x_mcast_mac_elem *elem;
3301 int i = 0;
3302 union bnx2x_mcast_config_data cfg_data = {0};
3303
3304 /* go through the registry and configure the MACs from it. */
3305 list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3306 cfg_data.mac = &elem->mac[0];
3307 o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3308
3309 i++;
3310
0f9dad10
JP
3311 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3312 cfg_data.mac);
619c5cb6
VZ
3313 }
3314
3315 *rdata_idx = i;
3316
3317 return -1;
3318}
3319
3320
3321static inline int bnx2x_mcast_handle_pending_cmds_e1(
3322 struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3323{
3324 struct bnx2x_pending_mcast_cmd *cmd_pos;
3325 struct bnx2x_mcast_mac_elem *pmac_pos;
3326 struct bnx2x_mcast_obj *o = p->mcast_obj;
3327 union bnx2x_mcast_config_data cfg_data = {0};
3328 int cnt = 0;
3329
3330
3331 /* If nothing to be done - return */
3332 if (list_empty(&o->pending_cmds_head))
3333 return 0;
3334
3335 /* Handle the first command */
3336 cmd_pos = list_first_entry(&o->pending_cmds_head,
3337 struct bnx2x_pending_mcast_cmd, link);
3338
3339 switch (cmd_pos->type) {
3340 case BNX2X_MCAST_CMD_ADD:
3341 list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3342 cfg_data.mac = &pmac_pos->mac[0];
3343 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3344
3345 cnt++;
3346
0f9dad10
JP
3347 DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3348 pmac_pos->mac);
619c5cb6
VZ
3349 }
3350 break;
3351
3352 case BNX2X_MCAST_CMD_DEL:
3353 cnt = cmd_pos->data.macs_num;
3354 DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3355 break;
3356
3357 case BNX2X_MCAST_CMD_RESTORE:
3358 o->hdl_restore(bp, o, 0, &cnt);
3359 break;
3360
3361 default:
3362 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3363 return -EINVAL;
3364 }
3365
3366 list_del(&cmd_pos->link);
3367 kfree(cmd_pos);
3368
3369 return cnt;
3370}
3371
3372/**
3373 * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3374 *
3375 * @fw_hi:
3376 * @fw_mid:
3377 * @fw_lo:
3378 * @mac:
3379 */
3380static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3381 __le16 *fw_lo, u8 *mac)
3382{
3383 mac[1] = ((u8 *)fw_hi)[0];
3384 mac[0] = ((u8 *)fw_hi)[1];
3385 mac[3] = ((u8 *)fw_mid)[0];
3386 mac[2] = ((u8 *)fw_mid)[1];
3387 mac[5] = ((u8 *)fw_lo)[0];
3388 mac[4] = ((u8 *)fw_lo)[1];
3389}
3390
3391/**
3392 * bnx2x_mcast_refresh_registry_e1 -
3393 *
3394 * @bp: device handle
3395 * @cnt:
3396 *
3397 * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3398 * and update the registry correspondingly: if ADD - allocate a memory and add
3399 * the entries to the registry (list), if DELETE - clear the registry and free
3400 * the memory.
3401 */
3402static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3403 struct bnx2x_mcast_obj *o)
3404{
3405 struct bnx2x_raw_obj *raw = &o->raw;
3406 struct bnx2x_mcast_mac_elem *elem;
3407 struct mac_configuration_cmd *data =
3408 (struct mac_configuration_cmd *)(raw->rdata);
3409
3410 /* If first entry contains a SET bit - the command was ADD,
3411 * otherwise - DEL_ALL
3412 */
3413 if (GET_FLAG(data->config_table[0].flags,
3414 MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3415 int i, len = data->hdr.length;
3416
3417 /* Break if it was a RESTORE command */
3418 if (!list_empty(&o->registry.exact_match.macs))
3419 return 0;
3420
01e23742 3421 elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
619c5cb6
VZ
3422 if (!elem) {
3423 BNX2X_ERR("Failed to allocate registry memory\n");
3424 return -ENOMEM;
3425 }
3426
3427 for (i = 0; i < len; i++, elem++) {
3428 bnx2x_get_fw_mac_addr(
3429 &data->config_table[i].msb_mac_addr,
3430 &data->config_table[i].middle_mac_addr,
3431 &data->config_table[i].lsb_mac_addr,
3432 elem->mac);
0f9dad10
JP
3433 DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
3434 elem->mac);
619c5cb6
VZ
3435 list_add_tail(&elem->link,
3436 &o->registry.exact_match.macs);
3437 }
3438 } else {
3439 elem = list_first_entry(&o->registry.exact_match.macs,
3440 struct bnx2x_mcast_mac_elem, link);
3441 DP(BNX2X_MSG_SP, "Deleting a registry\n");
3442 kfree(elem);
3443 INIT_LIST_HEAD(&o->registry.exact_match.macs);
3444 }
3445
3446 return 0;
3447}
3448
3449static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3450 struct bnx2x_mcast_ramrod_params *p,
3451 int cmd)
3452{
3453 struct bnx2x_mcast_obj *o = p->mcast_obj;
3454 struct bnx2x_raw_obj *raw = &o->raw;
3455 struct mac_configuration_cmd *data =
3456 (struct mac_configuration_cmd *)(raw->rdata);
3457 int cnt = 0, i, rc;
3458
3459 /* Reset the ramrod data buffer */
3460 memset(data, 0, sizeof(*data));
3461
3462 /* First set all entries as invalid */
3463 for (i = 0; i < o->max_cmd_len ; i++)
3464 SET_FLAG(data->config_table[i].flags,
3465 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3466 T_ETH_MAC_COMMAND_INVALIDATE);
3467
3468 /* Handle pending commands first */
3469 cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3470
3471 /* If there are no more pending commands - clear SCHEDULED state */
3472 if (list_empty(&o->pending_cmds_head))
3473 o->clear_sched(o);
3474
3475 /* The below may be true iff there were no pending commands */
3476 if (!cnt)
3477 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3478
3479 /* For 57710 every command has o->max_cmd_len length to ensure that
3480 * commands are done one at a time.
3481 */
3482 o->total_pending_num -= o->max_cmd_len;
3483
3484 /* send a ramrod */
3485
3486 WARN_ON(cnt > o->max_cmd_len);
3487
3488 /* Set ramrod header (in particular, a number of entries to update) */
3489 bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3490
3491 /* update a registry: we need the registry contents to be always up
3492 * to date in order to be able to execute a RESTORE opcode. Here
3493 * we use the fact that for 57710 we sent one command at a time
3494 * hence we may take the registry update out of the command handling
3495 * and do it in a simpler way here.
3496 */
3497 rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3498 if (rc)
3499 return rc;
3500
53e51e2f
VZ
3501 /*
3502 * If CLEAR_ONLY was requested - don't send a ramrod and clear
619c5cb6
VZ
3503 * RAMROD_PENDING status immediately.
3504 */
3505 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3506 raw->clear_pending(raw);
3507 return 0;
3508 } else {
53e51e2f
VZ
3509 /*
3510 * No need for an explicit memory barrier here as long we would
3511 * need to ensure the ordering of writing to the SPQ element
3512 * and updating of the SPQ producer which involves a memory
3513 * read and we will have to put a full memory barrier there
3514 * (inside bnx2x_sp_post()).
3515 */
3516
619c5cb6
VZ
3517 /* Send a ramrod */
3518 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3519 U64_HI(raw->rdata_mapping),
3520 U64_LO(raw->rdata_mapping),
3521 ETH_CONNECTION_TYPE);
3522 if (rc)
3523 return rc;
3524
3525 /* Ramrod completion is pending */
3526 return 1;
3527 }
3528
3529}
3530
3531static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3532{
3533 return o->registry.exact_match.num_macs_set;
3534}
3535
3536static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3537{
3538 return o->registry.aprox_match.num_bins_set;
3539}
3540
3541static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3542 int n)
3543{
3544 o->registry.exact_match.num_macs_set = n;
3545}
3546
3547static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3548 int n)
3549{
3550 o->registry.aprox_match.num_bins_set = n;
3551}
3552
3553int bnx2x_config_mcast(struct bnx2x *bp,
3554 struct bnx2x_mcast_ramrod_params *p,
3555 int cmd)
3556{
3557 struct bnx2x_mcast_obj *o = p->mcast_obj;
3558 struct bnx2x_raw_obj *r = &o->raw;
3559 int rc = 0, old_reg_size;
3560
3561 /* This is needed to recover number of currently configured mcast macs
3562 * in case of failure.
3563 */
3564 old_reg_size = o->get_registry_size(o);
3565
3566 /* Do some calculations and checks */
3567 rc = o->validate(bp, p, cmd);
3568 if (rc)
3569 return rc;
3570
3571 /* Return if there is no work to do */
3572 if ((!p->mcast_list_len) && (!o->check_sched(o)))
3573 return 0;
3574
3575 DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d "
3576 "o->max_cmd_len=%d\n", o->total_pending_num,
3577 p->mcast_list_len, o->max_cmd_len);
3578
3579 /* Enqueue the current command to the pending list if we can't complete
3580 * it in the current iteration
3581 */
3582 if (r->check_pending(r) ||
3583 ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3584 rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3585 if (rc < 0)
3586 goto error_exit1;
3587
3588 /* As long as the current command is in a command list we
3589 * don't need to handle it separately.
3590 */
3591 p->mcast_list_len = 0;
3592 }
3593
3594 if (!r->check_pending(r)) {
3595
3596 /* Set 'pending' state */
3597 r->set_pending(r);
3598
3599 /* Configure the new classification in the chip */
3600 rc = o->config_mcast(bp, p, cmd);
3601 if (rc < 0)
3602 goto error_exit2;
3603
3604 /* Wait for a ramrod completion if was requested */
3605 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3606 rc = o->wait_comp(bp, o);
3607 }
3608
3609 return rc;
3610
3611error_exit2:
3612 r->clear_pending(r);
3613
3614error_exit1:
3615 o->revert(bp, p, old_reg_size);
3616
3617 return rc;
3618}
3619
3620static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3621{
3622 smp_mb__before_clear_bit();
3623 clear_bit(o->sched_state, o->raw.pstate);
3624 smp_mb__after_clear_bit();
3625}
3626
3627static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3628{
3629 smp_mb__before_clear_bit();
3630 set_bit(o->sched_state, o->raw.pstate);
3631 smp_mb__after_clear_bit();
3632}
3633
3634static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3635{
3636 return !!test_bit(o->sched_state, o->raw.pstate);
3637}
3638
3639static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3640{
3641 return o->raw.check_pending(&o->raw) || o->check_sched(o);
3642}
3643
3644void bnx2x_init_mcast_obj(struct bnx2x *bp,
3645 struct bnx2x_mcast_obj *mcast_obj,
3646 u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
3647 u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
3648 int state, unsigned long *pstate, bnx2x_obj_type type)
3649{
3650 memset(mcast_obj, 0, sizeof(*mcast_obj));
3651
3652 bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3653 rdata, rdata_mapping, state, pstate, type);
3654
3655 mcast_obj->engine_id = engine_id;
3656
3657 INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3658
3659 mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
3660 mcast_obj->check_sched = bnx2x_mcast_check_sched;
3661 mcast_obj->set_sched = bnx2x_mcast_set_sched;
3662 mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3663
3664 if (CHIP_IS_E1(bp)) {
3665 mcast_obj->config_mcast = bnx2x_mcast_setup_e1;
3666 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3667 mcast_obj->hdl_restore =
3668 bnx2x_mcast_handle_restore_cmd_e1;
3669 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3670
3671 if (CHIP_REV_IS_SLOW(bp))
3672 mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3673 else
3674 mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3675
3676 mcast_obj->wait_comp = bnx2x_mcast_wait;
3677 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1;
3678 mcast_obj->validate = bnx2x_mcast_validate_e1;
3679 mcast_obj->revert = bnx2x_mcast_revert_e1;
3680 mcast_obj->get_registry_size =
3681 bnx2x_mcast_get_registry_size_exact;
3682 mcast_obj->set_registry_size =
3683 bnx2x_mcast_set_registry_size_exact;
3684
3685 /* 57710 is the only chip that uses the exact match for mcast
3686 * at the moment.
3687 */
3688 INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3689
3690 } else if (CHIP_IS_E1H(bp)) {
3691 mcast_obj->config_mcast = bnx2x_mcast_setup_e1h;
3692 mcast_obj->enqueue_cmd = NULL;
3693 mcast_obj->hdl_restore = NULL;
3694 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3695
3696 /* 57711 doesn't send a ramrod, so it has unlimited credit
3697 * for one command.
3698 */
3699 mcast_obj->max_cmd_len = -1;
3700 mcast_obj->wait_comp = bnx2x_mcast_wait;
3701 mcast_obj->set_one_rule = NULL;
3702 mcast_obj->validate = bnx2x_mcast_validate_e1h;
3703 mcast_obj->revert = bnx2x_mcast_revert_e1h;
3704 mcast_obj->get_registry_size =
3705 bnx2x_mcast_get_registry_size_aprox;
3706 mcast_obj->set_registry_size =
3707 bnx2x_mcast_set_registry_size_aprox;
3708 } else {
3709 mcast_obj->config_mcast = bnx2x_mcast_setup_e2;
3710 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3711 mcast_obj->hdl_restore =
3712 bnx2x_mcast_handle_restore_cmd_e2;
3713 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3714 /* TODO: There should be a proper HSI define for this number!!!
3715 */
3716 mcast_obj->max_cmd_len = 16;
3717 mcast_obj->wait_comp = bnx2x_mcast_wait;
3718 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2;
3719 mcast_obj->validate = bnx2x_mcast_validate_e2;
3720 mcast_obj->revert = bnx2x_mcast_revert_e2;
3721 mcast_obj->get_registry_size =
3722 bnx2x_mcast_get_registry_size_aprox;
3723 mcast_obj->set_registry_size =
3724 bnx2x_mcast_set_registry_size_aprox;
3725 }
3726}
3727
3728/*************************** Credit handling **********************************/
3729
3730/**
3731 * atomic_add_ifless - add if the result is less than a given value.
3732 *
3733 * @v: pointer of type atomic_t
3734 * @a: the amount to add to v...
3735 * @u: ...if (v + a) is less than u.
3736 *
3737 * returns true if (v + a) was less than u, and false otherwise.
3738 *
3739 */
3740static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3741{
3742 int c, old;
3743
3744 c = atomic_read(v);
3745 for (;;) {
3746 if (unlikely(c + a >= u))
3747 return false;
3748
3749 old = atomic_cmpxchg((v), c, c + a);
3750 if (likely(old == c))
3751 break;
3752 c = old;
3753 }
3754
3755 return true;
3756}
3757
3758/**
3759 * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3760 *
3761 * @v: pointer of type atomic_t
3762 * @a: the amount to dec from v...
3763 * @u: ...if (v - a) is more or equal than u.
3764 *
3765 * returns true if (v - a) was more or equal than u, and false
3766 * otherwise.
3767 */
3768static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3769{
3770 int c, old;
3771
3772 c = atomic_read(v);
3773 for (;;) {
3774 if (unlikely(c - a < u))
3775 return false;
3776
3777 old = atomic_cmpxchg((v), c, c - a);
3778 if (likely(old == c))
3779 break;
3780 c = old;
3781 }
3782
3783 return true;
3784}
3785
3786static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
3787{
3788 bool rc;
3789
3790 smp_mb();
3791 rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3792 smp_mb();
3793
3794 return rc;
3795}
3796
3797static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
3798{
3799 bool rc;
3800
3801 smp_mb();
3802
3803 /* Don't let to refill if credit + cnt > pool_sz */
3804 rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3805
3806 smp_mb();
3807
3808 return rc;
3809}
3810
3811static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
3812{
3813 int cur_credit;
3814
3815 smp_mb();
3816 cur_credit = atomic_read(&o->credit);
3817
3818 return cur_credit;
3819}
3820
3821static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
3822 int cnt)
3823{
3824 return true;
3825}
3826
3827
3828static bool bnx2x_credit_pool_get_entry(
3829 struct bnx2x_credit_pool_obj *o,
3830 int *offset)
3831{
3832 int idx, vec, i;
3833
3834 *offset = -1;
3835
3836 /* Find "internal cam-offset" then add to base for this object... */
3837 for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
3838
3839 /* Skip the current vector if there are no free entries in it */
3840 if (!o->pool_mirror[vec])
3841 continue;
3842
3843 /* If we've got here we are going to find a free entry */
3844 for (idx = vec * BNX2X_POOL_VEC_SIZE, i = 0;
3845 i < BIT_VEC64_ELEM_SZ; idx++, i++)
3846
3847 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3848 /* Got one!! */
3849 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3850 *offset = o->base_pool_offset + idx;
3851 return true;
3852 }
3853 }
3854
3855 return false;
3856}
3857
3858static bool bnx2x_credit_pool_put_entry(
3859 struct bnx2x_credit_pool_obj *o,
3860 int offset)
3861{
3862 if (offset < o->base_pool_offset)
3863 return false;
3864
3865 offset -= o->base_pool_offset;
3866
3867 if (offset >= o->pool_sz)
3868 return false;
3869
3870 /* Return the entry to the pool */
3871 BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3872
3873 return true;
3874}
3875
3876static bool bnx2x_credit_pool_put_entry_always_true(
3877 struct bnx2x_credit_pool_obj *o,
3878 int offset)
3879{
3880 return true;
3881}
3882
3883static bool bnx2x_credit_pool_get_entry_always_true(
3884 struct bnx2x_credit_pool_obj *o,
3885 int *offset)
3886{
3887 *offset = -1;
3888 return true;
3889}
3890/**
3891 * bnx2x_init_credit_pool - initialize credit pool internals.
3892 *
3893 * @p:
3894 * @base: Base entry in the CAM to use.
3895 * @credit: pool size.
3896 *
3897 * If base is negative no CAM entries handling will be performed.
3898 * If credit is negative pool operations will always succeed (unlimited pool).
3899 *
3900 */
3901static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
3902 int base, int credit)
3903{
3904 /* Zero the object first */
3905 memset(p, 0, sizeof(*p));
3906
3907 /* Set the table to all 1s */
3908 memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3909
3910 /* Init a pool as full */
3911 atomic_set(&p->credit, credit);
3912
3913 /* The total poll size */
3914 p->pool_sz = credit;
3915
3916 p->base_pool_offset = base;
3917
3918 /* Commit the change */
3919 smp_mb();
3920
3921 p->check = bnx2x_credit_pool_check;
3922
3923 /* if pool credit is negative - disable the checks */
3924 if (credit >= 0) {
3925 p->put = bnx2x_credit_pool_put;
3926 p->get = bnx2x_credit_pool_get;
3927 p->put_entry = bnx2x_credit_pool_put_entry;
3928 p->get_entry = bnx2x_credit_pool_get_entry;
3929 } else {
3930 p->put = bnx2x_credit_pool_always_true;
3931 p->get = bnx2x_credit_pool_always_true;
3932 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3933 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3934 }
3935
3936 /* If base is negative - disable entries handling */
3937 if (base < 0) {
3938 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3939 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3940 }
3941}
3942
3943void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
3944 struct bnx2x_credit_pool_obj *p, u8 func_id,
3945 u8 func_num)
3946{
3947/* TODO: this will be defined in consts as well... */
3948#define BNX2X_CAM_SIZE_EMUL 5
3949
3950 int cam_sz;
3951
3952 if (CHIP_IS_E1(bp)) {
3953 /* In E1, Multicast is saved in cam... */
3954 if (!CHIP_REV_IS_SLOW(bp))
3955 cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
3956 else
3957 cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
3958
3959 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3960
3961 } else if (CHIP_IS_E1H(bp)) {
3962 /* CAM credit is equaly divided between all active functions
3963 * on the PORT!.
3964 */
3965 if ((func_num > 0)) {
3966 if (!CHIP_REV_IS_SLOW(bp))
3967 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
3968 else
3969 cam_sz = BNX2X_CAM_SIZE_EMUL;
3970 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3971 } else {
3972 /* this should never happen! Block MAC operations. */
3973 bnx2x_init_credit_pool(p, 0, 0);
3974 }
3975
3976 } else {
3977
3978 /*
3979 * CAM credit is equaly divided between all active functions
3980 * on the PATH.
3981 */
3982 if ((func_num > 0)) {
3983 if (!CHIP_REV_IS_SLOW(bp))
3984 cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
3985 else
3986 cam_sz = BNX2X_CAM_SIZE_EMUL;
3987
3988 /*
3989 * No need for CAM entries handling for 57712 and
3990 * newer.
3991 */
3992 bnx2x_init_credit_pool(p, -1, cam_sz);
3993 } else {
3994 /* this should never happen! Block MAC operations. */
3995 bnx2x_init_credit_pool(p, 0, 0);
3996 }
3997
3998 }
3999}
4000
4001void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
4002 struct bnx2x_credit_pool_obj *p,
4003 u8 func_id,
4004 u8 func_num)
4005{
4006 if (CHIP_IS_E1x(bp)) {
4007 /*
4008 * There is no VLAN credit in HW on 57710 and 57711 only
4009 * MAC / MAC-VLAN can be set
4010 */
4011 bnx2x_init_credit_pool(p, 0, -1);
4012 } else {
4013 /*
4014 * CAM credit is equaly divided between all active functions
4015 * on the PATH.
4016 */
4017 if (func_num > 0) {
4018 int credit = MAX_VLAN_CREDIT_E2 / func_num;
4019 bnx2x_init_credit_pool(p, func_id * credit, credit);
4020 } else
4021 /* this should never happen! Block VLAN operations. */
4022 bnx2x_init_credit_pool(p, 0, 0);
4023 }
4024}
4025
4026/****************** RSS Configuration ******************/
4027/**
4028 * bnx2x_debug_print_ind_table - prints the indirection table configuration.
4029 *
4030 * @bp: driver hanlde
4031 * @p: pointer to rss configuration
4032 *
4033 * Prints it when NETIF_MSG_IFUP debug level is configured.
4034 */
4035static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
4036 struct bnx2x_config_rss_params *p)
4037{
042181f5
VZ
4038 int i;
4039
619c5cb6
VZ
4040 DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
4041 DP(BNX2X_MSG_SP, "0x0000: ");
4042 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
4043 DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
4044
4045 /* Print 4 bytes in a line */
4046 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
4047 (((i + 1) & 0x3) == 0)) {
4048 DP_CONT(BNX2X_MSG_SP, "\n");
4049 DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
4050 }
4051 }
4052
4053 DP_CONT(BNX2X_MSG_SP, "\n");
4054}
4055
4056/**
4057 * bnx2x_setup_rss - configure RSS
4058 *
4059 * @bp: device handle
4060 * @p: rss configuration
4061 *
4062 * sends on UPDATE ramrod for that matter.
4063 */
4064static int bnx2x_setup_rss(struct bnx2x *bp,
4065 struct bnx2x_config_rss_params *p)
4066{
4067 struct bnx2x_rss_config_obj *o = p->rss_obj;
4068 struct bnx2x_raw_obj *r = &o->raw;
4069 struct eth_rss_update_ramrod_data *data =
4070 (struct eth_rss_update_ramrod_data *)(r->rdata);
4071 u8 rss_mode = 0;
4072 int rc;
4073
4074 memset(data, 0, sizeof(*data));
4075
4076 DP(BNX2X_MSG_SP, "Configuring RSS\n");
4077
4078 /* Set an echo field */
4079 data->echo = (r->cid & BNX2X_SWCID_MASK) |
4080 (r->state << BNX2X_SWCID_SHIFT);
4081
4082 /* RSS mode */
4083 if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
4084 rss_mode = ETH_RSS_MODE_DISABLED;
4085 else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4086 rss_mode = ETH_RSS_MODE_REGULAR;
4087 else if (test_bit(BNX2X_RSS_MODE_VLAN_PRI, &p->rss_flags))
4088 rss_mode = ETH_RSS_MODE_VLAN_PRI;
4089 else if (test_bit(BNX2X_RSS_MODE_E1HOV_PRI, &p->rss_flags))
4090 rss_mode = ETH_RSS_MODE_E1HOV_PRI;
4091 else if (test_bit(BNX2X_RSS_MODE_IP_DSCP, &p->rss_flags))
4092 rss_mode = ETH_RSS_MODE_IP_DSCP;
4093
4094 data->rss_mode = rss_mode;
4095
4096 DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
4097
4098 /* RSS capabilities */
4099 if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4100 data->capabilities |=
4101 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4102
4103 if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4104 data->capabilities |=
4105 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4106
4107 if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4108 data->capabilities |=
4109 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4110
4111 if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4112 data->capabilities |=
4113 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4114
4115 /* Hashing mask */
4116 data->rss_result_mask = p->rss_result_mask;
4117
4118 /* RSS engine ID */
4119 data->rss_engine_id = o->engine_id;
4120
4121 DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4122
4123 /* Indirection table */
4124 memcpy(data->indirection_table, p->ind_table,
4125 T_ETH_INDIRECTION_TABLE_SIZE);
4126
4127 /* Remember the last configuration */
4128 memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4129
4130 /* Print the indirection table */
4131 if (netif_msg_ifup(bp))
4132 bnx2x_debug_print_ind_table(bp, p);
4133
4134 /* RSS keys */
4135 if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4136 memcpy(&data->rss_key[0], &p->rss_key[0],
4137 sizeof(data->rss_key));
4138 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4139 }
4140
53e51e2f
VZ
4141 /*
4142 * No need for an explicit memory barrier here as long we would
4143 * need to ensure the ordering of writing to the SPQ element
4144 * and updating of the SPQ producer which involves a memory
4145 * read and we will have to put a full memory barrier there
4146 * (inside bnx2x_sp_post()).
4147 */
619c5cb6
VZ
4148
4149 /* Send a ramrod */
4150 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
4151 U64_HI(r->rdata_mapping),
4152 U64_LO(r->rdata_mapping),
4153 ETH_CONNECTION_TYPE);
4154
4155 if (rc < 0)
4156 return rc;
4157
4158 return 1;
4159}
4160
4161void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
4162 u8 *ind_table)
4163{
4164 memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4165}
4166
4167int bnx2x_config_rss(struct bnx2x *bp,
4168 struct bnx2x_config_rss_params *p)
4169{
4170 int rc;
4171 struct bnx2x_rss_config_obj *o = p->rss_obj;
4172 struct bnx2x_raw_obj *r = &o->raw;
4173
4174 /* Do nothing if only driver cleanup was requested */
4175 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
4176 return 0;
4177
4178 r->set_pending(r);
4179
4180 rc = o->config_rss(bp, p);
4181 if (rc < 0) {
4182 r->clear_pending(r);
4183 return rc;
4184 }
4185
4186 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
4187 rc = r->wait_comp(bp, r);
4188
4189 return rc;
4190}
4191
4192
4193void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4194 struct bnx2x_rss_config_obj *rss_obj,
4195 u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4196 void *rdata, dma_addr_t rdata_mapping,
4197 int state, unsigned long *pstate,
4198 bnx2x_obj_type type)
4199{
4200 bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4201 rdata_mapping, state, pstate, type);
4202
4203 rss_obj->engine_id = engine_id;
4204 rss_obj->config_rss = bnx2x_setup_rss;
4205}
4206
4207/********************** Queue state object ***********************************/
4208
4209/**
4210 * bnx2x_queue_state_change - perform Queue state change transition
4211 *
4212 * @bp: device handle
4213 * @params: parameters to perform the transition
4214 *
4215 * returns 0 in case of successfully completed transition, negative error
4216 * code in case of failure, positive (EBUSY) value if there is a completion
4217 * to that is still pending (possible only if RAMROD_COMP_WAIT is
4218 * not set in params->ramrod_flags for asynchronous commands).
4219 *
4220 */
4221int bnx2x_queue_state_change(struct bnx2x *bp,
4222 struct bnx2x_queue_state_params *params)
4223{
4224 struct bnx2x_queue_sp_obj *o = params->q_obj;
4225 int rc, pending_bit;
4226 unsigned long *pending = &o->pending;
4227
4228 /* Check that the requested transition is legal */
4229 if (o->check_transition(bp, o, params))
4230 return -EINVAL;
4231
4232 /* Set "pending" bit */
4233 pending_bit = o->set_pending(o, params);
4234
4235 /* Don't send a command if only driver cleanup was requested */
4236 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
4237 o->complete_cmd(bp, o, pending_bit);
4238 else {
4239 /* Send a ramrod */
4240 rc = o->send_cmd(bp, params);
4241 if (rc) {
4242 o->next_state = BNX2X_Q_STATE_MAX;
4243 clear_bit(pending_bit, pending);
4244 smp_mb__after_clear_bit();
4245 return rc;
4246 }
4247
4248 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
4249 rc = o->wait_comp(bp, o, pending_bit);
4250 if (rc)
4251 return rc;
4252
4253 return 0;
4254 }
4255 }
4256
4257 return !!test_bit(pending_bit, pending);
4258}
4259
4260
4261static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4262 struct bnx2x_queue_state_params *params)
4263{
4264 enum bnx2x_queue_cmd cmd = params->cmd, bit;
4265
4266 /* ACTIVATE and DEACTIVATE commands are implemented on top of
4267 * UPDATE command.
4268 */
4269 if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4270 (cmd == BNX2X_Q_CMD_DEACTIVATE))
4271 bit = BNX2X_Q_CMD_UPDATE;
4272 else
4273 bit = cmd;
4274
4275 set_bit(bit, &obj->pending);
4276 return bit;
4277}
4278
4279static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4280 struct bnx2x_queue_sp_obj *o,
4281 enum bnx2x_queue_cmd cmd)
4282{
4283 return bnx2x_state_wait(bp, cmd, &o->pending);
4284}
4285
4286/**
4287 * bnx2x_queue_comp_cmd - complete the state change command.
4288 *
4289 * @bp: device handle
4290 * @o:
4291 * @cmd:
4292 *
4293 * Checks that the arrived completion is expected.
4294 */
4295static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4296 struct bnx2x_queue_sp_obj *o,
4297 enum bnx2x_queue_cmd cmd)
4298{
4299 unsigned long cur_pending = o->pending;
4300
4301 if (!test_and_clear_bit(cmd, &cur_pending)) {
4302 BNX2X_ERR("Bad MC reply %d for queue %d in state %d "
6383c0b3
AE
4303 "pending 0x%lx, next_state %d\n", cmd,
4304 o->cids[BNX2X_PRIMARY_CID_INDEX],
619c5cb6
VZ
4305 o->state, cur_pending, o->next_state);
4306 return -EINVAL;
4307 }
4308
6383c0b3
AE
4309 if (o->next_tx_only >= o->max_cos)
4310 /* >= becuase tx only must always be smaller than cos since the
4311 * primary connection suports COS 0
4312 */
4313 BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4314 o->next_tx_only, o->max_cos);
4315
619c5cb6 4316 DP(BNX2X_MSG_SP, "Completing command %d for queue %d, "
6383c0b3
AE
4317 "setting state to %d\n", cmd,
4318 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
4319
4320 if (o->next_tx_only) /* print num tx-only if any exist */
94f05b0f 4321 DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
6383c0b3 4322 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
619c5cb6
VZ
4323
4324 o->state = o->next_state;
6383c0b3 4325 o->num_tx_only = o->next_tx_only;
619c5cb6
VZ
4326 o->next_state = BNX2X_Q_STATE_MAX;
4327
4328 /* It's important that o->state and o->next_state are
4329 * updated before o->pending.
4330 */
4331 wmb();
4332
4333 clear_bit(cmd, &o->pending);
4334 smp_mb__after_clear_bit();
4335
4336 return 0;
4337}
4338
4339static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4340 struct bnx2x_queue_state_params *cmd_params,
4341 struct client_init_ramrod_data *data)
4342{
4343 struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
4344
4345 /* Rx data */
4346
4347 /* IPv6 TPA supported for E2 and above only */
f5219d8e 4348 data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, &params->flags) *
619c5cb6
VZ
4349 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4350}
4351
6383c0b3
AE
4352static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4353 struct bnx2x_queue_sp_obj *o,
4354 struct bnx2x_general_setup_params *params,
4355 struct client_init_general_data *gen_data,
4356 unsigned long *flags)
4357{
4358 gen_data->client_id = o->cl_id;
4359
4360 if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
4361 gen_data->statistics_counter_id =
4362 params->stat_id;
4363 gen_data->statistics_en_flg = 1;
4364 gen_data->statistics_zero_flg =
4365 test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
619c5cb6 4366 } else
6383c0b3 4367 gen_data->statistics_counter_id =
619c5cb6
VZ
4368 DISABLE_STATISTIC_COUNTER_ID_VALUE;
4369
6383c0b3
AE
4370 gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
4371 gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
4372 gen_data->sp_client_id = params->spcl_id;
4373 gen_data->mtu = cpu_to_le16(params->mtu);
4374 gen_data->func_id = o->func_id;
619c5cb6
VZ
4375
4376
6383c0b3 4377 gen_data->cos = params->cos;
619c5cb6 4378
6383c0b3
AE
4379 gen_data->traffic_type =
4380 test_bit(BNX2X_Q_FLG_FCOE, flags) ?
619c5cb6
VZ
4381 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4382
94f05b0f 4383 DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
6383c0b3
AE
4384 gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4385}
4386
4387static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4388 struct bnx2x_txq_setup_params *params,
4389 struct client_init_tx_data *tx_data,
4390 unsigned long *flags)
4391{
4392 tx_data->enforce_security_flg =
4393 test_bit(BNX2X_Q_FLG_TX_SEC, flags);
4394 tx_data->default_vlan =
4395 cpu_to_le16(params->default_vlan);
4396 tx_data->default_vlan_flg =
4397 test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
4398 tx_data->tx_switching_flg =
4399 test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4400 tx_data->anti_spoofing_flg =
4401 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
4402 tx_data->tx_status_block_id = params->fw_sb_id;
4403 tx_data->tx_sb_index_number = params->sb_cq_index;
4404 tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4405
4406 tx_data->tx_bd_page_base.lo =
4407 cpu_to_le32(U64_LO(params->dscr_map));
4408 tx_data->tx_bd_page_base.hi =
4409 cpu_to_le32(U64_HI(params->dscr_map));
4410
4411 /* Don't configure any Tx switching mode during queue SETUP */
4412 tx_data->state = 0;
4413}
4414
4415static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4416 struct rxq_pause_params *params,
4417 struct client_init_rx_data *rx_data)
4418{
4419 /* flow control data */
4420 rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
4421 rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
4422 rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
4423 rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
4424 rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
4425 rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
4426 rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
4427}
4428
4429static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4430 struct bnx2x_rxq_setup_params *params,
4431 struct client_init_rx_data *rx_data,
4432 unsigned long *flags)
4433{
4434 /* Rx data */
4435 rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
619c5cb6 4436 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
6383c0b3 4437 rx_data->vmqueue_mode_en_flg = 0;
619c5cb6 4438
6383c0b3
AE
4439 rx_data->cache_line_alignment_log_size =
4440 params->cache_line_log;
4441 rx_data->enable_dynamic_hc =
4442 test_bit(BNX2X_Q_FLG_DHC, flags);
4443 rx_data->max_sges_for_packet = params->max_sges_pkt;
4444 rx_data->client_qzone_id = params->cl_qzone_id;
4445 rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
619c5cb6
VZ
4446
4447 /* Always start in DROP_ALL mode */
6383c0b3 4448 rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
619c5cb6
VZ
4449 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4450
4451 /* We don't set drop flags */
6383c0b3
AE
4452 rx_data->drop_ip_cs_err_flg = 0;
4453 rx_data->drop_tcp_cs_err_flg = 0;
4454 rx_data->drop_ttl0_flg = 0;
4455 rx_data->drop_udp_cs_err_flg = 0;
4456 rx_data->inner_vlan_removal_enable_flg =
4457 test_bit(BNX2X_Q_FLG_VLAN, flags);
4458 rx_data->outer_vlan_removal_enable_flg =
4459 test_bit(BNX2X_Q_FLG_OV, flags);
4460 rx_data->status_block_id = params->fw_sb_id;
4461 rx_data->rx_sb_index_number = params->sb_cq_index;
4462 rx_data->max_tpa_queues = params->max_tpa_queues;
4463 rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
4464 rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
4465 rx_data->bd_page_base.lo =
4466 cpu_to_le32(U64_LO(params->dscr_map));
4467 rx_data->bd_page_base.hi =
4468 cpu_to_le32(U64_HI(params->dscr_map));
4469 rx_data->sge_page_base.lo =
4470 cpu_to_le32(U64_LO(params->sge_map));
4471 rx_data->sge_page_base.hi =
4472 cpu_to_le32(U64_HI(params->sge_map));
4473 rx_data->cqe_page_base.lo =
4474 cpu_to_le32(U64_LO(params->rcq_map));
4475 rx_data->cqe_page_base.hi =
4476 cpu_to_le32(U64_HI(params->rcq_map));
4477 rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
4478
4479 if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
4480 rx_data->approx_mcast_engine_id = o->func_id;
4481 rx_data->is_approx_mcast = 1;
619c5cb6
VZ
4482 }
4483
6383c0b3 4484 rx_data->rss_engine_id = params->rss_engine_id;
619c5cb6
VZ
4485
4486 /* silent vlan removal */
6383c0b3
AE
4487 rx_data->silent_vlan_removal_flg =
4488 test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
4489 rx_data->silent_vlan_value =
4490 cpu_to_le16(params->silent_removal_value);
4491 rx_data->silent_vlan_mask =
4492 cpu_to_le16(params->silent_removal_mask);
619c5cb6 4493
619c5cb6
VZ
4494}
4495
6383c0b3
AE
4496/* initialize the general, tx and rx parts of a queue object */
4497static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4498 struct bnx2x_queue_state_params *cmd_params,
4499 struct client_init_ramrod_data *data)
4500{
4501 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4502 &cmd_params->params.setup.gen_params,
4503 &data->general,
4504 &cmd_params->params.setup.flags);
4505
4506 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4507 &cmd_params->params.setup.txq_params,
4508 &data->tx,
4509 &cmd_params->params.setup.flags);
4510
4511 bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
4512 &cmd_params->params.setup.rxq_params,
4513 &data->rx,
4514 &cmd_params->params.setup.flags);
4515
4516 bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
4517 &cmd_params->params.setup.pause_params,
4518 &data->rx);
4519}
4520
4521/* initialize the general and tx parts of a tx-only queue object */
4522static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
4523 struct bnx2x_queue_state_params *cmd_params,
4524 struct tx_queue_init_ramrod_data *data)
4525{
4526 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4527 &cmd_params->params.tx_only.gen_params,
4528 &data->general,
4529 &cmd_params->params.tx_only.flags);
4530
4531 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4532 &cmd_params->params.tx_only.txq_params,
4533 &data->tx,
4534 &cmd_params->params.tx_only.flags);
4535
94f05b0f 4536 DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x\n",cmd_params->q_obj->cids[0],
6383c0b3
AE
4537 data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi);
4538}
619c5cb6
VZ
4539
4540/**
4541 * bnx2x_q_init - init HW/FW queue
4542 *
4543 * @bp: device handle
4544 * @params:
4545 *
4546 * HW/FW initial Queue configuration:
4547 * - HC: Rx and Tx
4548 * - CDU context validation
4549 *
4550 */
4551static inline int bnx2x_q_init(struct bnx2x *bp,
4552 struct bnx2x_queue_state_params *params)
4553{
4554 struct bnx2x_queue_sp_obj *o = params->q_obj;
4555 struct bnx2x_queue_init_params *init = &params->params.init;
4556 u16 hc_usec;
6383c0b3 4557 u8 cos;
619c5cb6
VZ
4558
4559 /* Tx HC configuration */
4560 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4561 test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4562 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4563
4564 bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4565 init->tx.sb_cq_index,
4566 !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
4567 hc_usec);
4568 }
4569
4570 /* Rx HC configuration */
4571 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4572 test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4573 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
4574
4575 bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4576 init->rx.sb_cq_index,
4577 !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4578 hc_usec);
4579 }
4580
4581 /* Set CDU context validation values */
6383c0b3 4582 for (cos = 0; cos < o->max_cos; cos++) {
94f05b0f 4583 DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n",
6383c0b3 4584 o->cids[cos], cos);
94f05b0f 4585 DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]);
6383c0b3
AE
4586 bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
4587 }
619c5cb6
VZ
4588
4589 /* As no ramrod is sent, complete the command immediately */
4590 o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4591
4592 mmiowb();
4593 smp_mb();
4594
4595 return 0;
4596}
4597
4598static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4599 struct bnx2x_queue_state_params *params)
4600{
4601 struct bnx2x_queue_sp_obj *o = params->q_obj;
4602 struct client_init_ramrod_data *rdata =
4603 (struct client_init_ramrod_data *)o->rdata;
4604 dma_addr_t data_mapping = o->rdata_mapping;
4605 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4606
4607 /* Clear the ramrod data */
4608 memset(rdata, 0, sizeof(*rdata));
4609
4610 /* Fill the ramrod data */
4611 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4612
53e51e2f
VZ
4613 /*
4614 * No need for an explicit memory barrier here as long we would
4615 * need to ensure the ordering of writing to the SPQ element
4616 * and updating of the SPQ producer which involves a memory
4617 * read and we will have to put a full memory barrier there
4618 * (inside bnx2x_sp_post()).
4619 */
619c5cb6 4620
6383c0b3
AE
4621 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4622 U64_HI(data_mapping),
619c5cb6
VZ
4623 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4624}
4625
4626static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4627 struct bnx2x_queue_state_params *params)
4628{
4629 struct bnx2x_queue_sp_obj *o = params->q_obj;
4630 struct client_init_ramrod_data *rdata =
4631 (struct client_init_ramrod_data *)o->rdata;
4632 dma_addr_t data_mapping = o->rdata_mapping;
4633 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4634
4635 /* Clear the ramrod data */
4636 memset(rdata, 0, sizeof(*rdata));
4637
4638 /* Fill the ramrod data */
4639 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4640 bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4641
53e51e2f
VZ
4642 /*
4643 * No need for an explicit memory barrier here as long we would
4644 * need to ensure the ordering of writing to the SPQ element
4645 * and updating of the SPQ producer which involves a memory
4646 * read and we will have to put a full memory barrier there
4647 * (inside bnx2x_sp_post()).
4648 */
619c5cb6 4649
6383c0b3
AE
4650 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4651 U64_HI(data_mapping),
4652 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4653}
4654
4655static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4656 struct bnx2x_queue_state_params *params)
4657{
4658 struct bnx2x_queue_sp_obj *o = params->q_obj;
4659 struct tx_queue_init_ramrod_data *rdata =
4660 (struct tx_queue_init_ramrod_data *)o->rdata;
4661 dma_addr_t data_mapping = o->rdata_mapping;
4662 int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4663 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
4664 &params->params.tx_only;
4665 u8 cid_index = tx_only_params->cid_index;
4666
4667
4668 if (cid_index >= o->max_cos) {
4669 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4670 o->cl_id, cid_index);
4671 return -EINVAL;
4672 }
4673
94f05b0f 4674 DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n",
6383c0b3
AE
4675 tx_only_params->gen_params.cos,
4676 tx_only_params->gen_params.spcl_id);
4677
4678 /* Clear the ramrod data */
4679 memset(rdata, 0, sizeof(*rdata));
4680
4681 /* Fill the ramrod data */
4682 bnx2x_q_fill_setup_tx_only(bp, params, rdata);
4683
4684 DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d,"
94f05b0f 4685 "sp-client id %d, cos %d\n",
6383c0b3
AE
4686 o->cids[cid_index],
4687 rdata->general.client_id,
4688 rdata->general.sp_client_id, rdata->general.cos);
4689
4690 /*
4691 * No need for an explicit memory barrier here as long we would
4692 * need to ensure the ordering of writing to the SPQ element
4693 * and updating of the SPQ producer which involves a memory
4694 * read and we will have to put a full memory barrier there
4695 * (inside bnx2x_sp_post()).
4696 */
4697
4698 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4699 U64_HI(data_mapping),
619c5cb6
VZ
4700 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4701}
4702
4703static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4704 struct bnx2x_queue_sp_obj *obj,
4705 struct bnx2x_queue_update_params *params,
4706 struct client_update_ramrod_data *data)
4707{
4708 /* Client ID of the client to update */
4709 data->client_id = obj->cl_id;
4710
4711 /* Function ID of the client to update */
4712 data->func_id = obj->func_id;
4713
4714 /* Default VLAN value */
4715 data->default_vlan = cpu_to_le16(params->def_vlan);
4716
4717 /* Inner VLAN stripping */
4718 data->inner_vlan_removal_enable_flg =
4719 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
4720 data->inner_vlan_removal_change_flg =
4721 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
4722 &params->update_flags);
4723
4724 /* Outer VLAN sripping */
4725 data->outer_vlan_removal_enable_flg =
4726 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
4727 data->outer_vlan_removal_change_flg =
4728 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
4729 &params->update_flags);
4730
4731 /* Drop packets that have source MAC that doesn't belong to this
4732 * Queue.
4733 */
4734 data->anti_spoofing_enable_flg =
4735 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
4736 data->anti_spoofing_change_flg =
4737 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, &params->update_flags);
4738
4739 /* Activate/Deactivate */
4740 data->activate_flg =
4741 test_bit(BNX2X_Q_UPDATE_ACTIVATE, &params->update_flags);
4742 data->activate_change_flg =
4743 test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
4744
4745 /* Enable default VLAN */
4746 data->default_vlan_enable_flg =
4747 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
4748 data->default_vlan_change_flg =
4749 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
4750 &params->update_flags);
4751
4752 /* silent vlan removal */
4753 data->silent_vlan_change_flg =
4754 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4755 &params->update_flags);
4756 data->silent_vlan_removal_flg =
4757 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
4758 data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
4759 data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
4760}
4761
4762static inline int bnx2x_q_send_update(struct bnx2x *bp,
4763 struct bnx2x_queue_state_params *params)
4764{
4765 struct bnx2x_queue_sp_obj *o = params->q_obj;
4766 struct client_update_ramrod_data *rdata =
4767 (struct client_update_ramrod_data *)o->rdata;
4768 dma_addr_t data_mapping = o->rdata_mapping;
6383c0b3
AE
4769 struct bnx2x_queue_update_params *update_params =
4770 &params->params.update;
4771 u8 cid_index = update_params->cid_index;
4772
4773 if (cid_index >= o->max_cos) {
4774 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4775 o->cl_id, cid_index);
4776 return -EINVAL;
4777 }
4778
619c5cb6
VZ
4779
4780 /* Clear the ramrod data */
4781 memset(rdata, 0, sizeof(*rdata));
4782
4783 /* Fill the ramrod data */
6383c0b3 4784 bnx2x_q_fill_update_data(bp, o, update_params, rdata);
619c5cb6 4785
53e51e2f
VZ
4786 /*
4787 * No need for an explicit memory barrier here as long we would
4788 * need to ensure the ordering of writing to the SPQ element
4789 * and updating of the SPQ producer which involves a memory
4790 * read and we will have to put a full memory barrier there
4791 * (inside bnx2x_sp_post()).
4792 */
619c5cb6 4793
6383c0b3
AE
4794 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4795 o->cids[cid_index], U64_HI(data_mapping),
619c5cb6
VZ
4796 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4797}
4798
4799/**
4800 * bnx2x_q_send_deactivate - send DEACTIVATE command
4801 *
4802 * @bp: device handle
4803 * @params:
4804 *
4805 * implemented using the UPDATE command.
4806 */
4807static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
4808 struct bnx2x_queue_state_params *params)
4809{
4810 struct bnx2x_queue_update_params *update = &params->params.update;
4811
4812 memset(update, 0, sizeof(*update));
4813
4814 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4815
4816 return bnx2x_q_send_update(bp, params);
4817}
4818
4819/**
4820 * bnx2x_q_send_activate - send ACTIVATE command
4821 *
4822 * @bp: device handle
4823 * @params:
4824 *
4825 * implemented using the UPDATE command.
4826 */
4827static inline int bnx2x_q_send_activate(struct bnx2x *bp,
4828 struct bnx2x_queue_state_params *params)
4829{
4830 struct bnx2x_queue_update_params *update = &params->params.update;
4831
4832 memset(update, 0, sizeof(*update));
4833
4834 __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
4835 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4836
4837 return bnx2x_q_send_update(bp, params);
4838}
4839
4840static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
4841 struct bnx2x_queue_state_params *params)
4842{
4843 /* TODO: Not implemented yet. */
4844 return -1;
4845}
4846
4847static inline int bnx2x_q_send_halt(struct bnx2x *bp,
4848 struct bnx2x_queue_state_params *params)
4849{
4850 struct bnx2x_queue_sp_obj *o = params->q_obj;
4851
6383c0b3
AE
4852 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
4853 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
619c5cb6
VZ
4854 ETH_CONNECTION_TYPE);
4855}
4856
4857static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
4858 struct bnx2x_queue_state_params *params)
4859{
4860 struct bnx2x_queue_sp_obj *o = params->q_obj;
6383c0b3 4861 u8 cid_idx = params->params.cfc_del.cid_index;
619c5cb6 4862
6383c0b3
AE
4863 if (cid_idx >= o->max_cos) {
4864 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4865 o->cl_id, cid_idx);
4866 return -EINVAL;
4867 }
4868
4869 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
4870 o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
619c5cb6
VZ
4871}
4872
4873static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
4874 struct bnx2x_queue_state_params *params)
4875{
4876 struct bnx2x_queue_sp_obj *o = params->q_obj;
6383c0b3 4877 u8 cid_index = params->params.terminate.cid_index;
619c5cb6 4878
6383c0b3
AE
4879 if (cid_index >= o->max_cos) {
4880 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4881 o->cl_id, cid_index);
4882 return -EINVAL;
4883 }
4884
4885 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
4886 o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
619c5cb6
VZ
4887}
4888
4889static inline int bnx2x_q_send_empty(struct bnx2x *bp,
4890 struct bnx2x_queue_state_params *params)
4891{
4892 struct bnx2x_queue_sp_obj *o = params->q_obj;
4893
6383c0b3
AE
4894 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
4895 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
619c5cb6
VZ
4896 ETH_CONNECTION_TYPE);
4897}
4898
4899static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
4900 struct bnx2x_queue_state_params *params)
4901{
4902 switch (params->cmd) {
4903 case BNX2X_Q_CMD_INIT:
4904 return bnx2x_q_init(bp, params);
6383c0b3
AE
4905 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4906 return bnx2x_q_send_setup_tx_only(bp, params);
619c5cb6
VZ
4907 case BNX2X_Q_CMD_DEACTIVATE:
4908 return bnx2x_q_send_deactivate(bp, params);
4909 case BNX2X_Q_CMD_ACTIVATE:
4910 return bnx2x_q_send_activate(bp, params);
4911 case BNX2X_Q_CMD_UPDATE:
4912 return bnx2x_q_send_update(bp, params);
4913 case BNX2X_Q_CMD_UPDATE_TPA:
4914 return bnx2x_q_send_update_tpa(bp, params);
4915 case BNX2X_Q_CMD_HALT:
4916 return bnx2x_q_send_halt(bp, params);
4917 case BNX2X_Q_CMD_CFC_DEL:
4918 return bnx2x_q_send_cfc_del(bp, params);
4919 case BNX2X_Q_CMD_TERMINATE:
4920 return bnx2x_q_send_terminate(bp, params);
4921 case BNX2X_Q_CMD_EMPTY:
4922 return bnx2x_q_send_empty(bp, params);
4923 default:
4924 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4925 return -EINVAL;
4926 }
4927}
4928
4929static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
4930 struct bnx2x_queue_state_params *params)
4931{
4932 switch (params->cmd) {
4933 case BNX2X_Q_CMD_SETUP:
4934 return bnx2x_q_send_setup_e1x(bp, params);
4935 case BNX2X_Q_CMD_INIT:
6383c0b3 4936 case BNX2X_Q_CMD_SETUP_TX_ONLY:
619c5cb6
VZ
4937 case BNX2X_Q_CMD_DEACTIVATE:
4938 case BNX2X_Q_CMD_ACTIVATE:
4939 case BNX2X_Q_CMD_UPDATE:
4940 case BNX2X_Q_CMD_UPDATE_TPA:
4941 case BNX2X_Q_CMD_HALT:
4942 case BNX2X_Q_CMD_CFC_DEL:
4943 case BNX2X_Q_CMD_TERMINATE:
4944 case BNX2X_Q_CMD_EMPTY:
4945 return bnx2x_queue_send_cmd_cmn(bp, params);
4946 default:
4947 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4948 return -EINVAL;
4949 }
4950}
4951
4952static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
4953 struct bnx2x_queue_state_params *params)
4954{
4955 switch (params->cmd) {
4956 case BNX2X_Q_CMD_SETUP:
4957 return bnx2x_q_send_setup_e2(bp, params);
4958 case BNX2X_Q_CMD_INIT:
6383c0b3 4959 case BNX2X_Q_CMD_SETUP_TX_ONLY:
619c5cb6
VZ
4960 case BNX2X_Q_CMD_DEACTIVATE:
4961 case BNX2X_Q_CMD_ACTIVATE:
4962 case BNX2X_Q_CMD_UPDATE:
4963 case BNX2X_Q_CMD_UPDATE_TPA:
4964 case BNX2X_Q_CMD_HALT:
4965 case BNX2X_Q_CMD_CFC_DEL:
4966 case BNX2X_Q_CMD_TERMINATE:
4967 case BNX2X_Q_CMD_EMPTY:
4968 return bnx2x_queue_send_cmd_cmn(bp, params);
4969 default:
4970 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4971 return -EINVAL;
4972 }
4973}
4974
4975/**
4976 * bnx2x_queue_chk_transition - check state machine of a regular Queue
4977 *
4978 * @bp: device handle
4979 * @o:
4980 * @params:
4981 *
4982 * (not Forwarding)
4983 * It both checks if the requested command is legal in a current
4984 * state and, if it's legal, sets a `next_state' in the object
4985 * that will be used in the completion flow to set the `state'
4986 * of the object.
4987 *
4988 * returns 0 if a requested command is a legal transition,
4989 * -EINVAL otherwise.
4990 */
4991static int bnx2x_queue_chk_transition(struct bnx2x *bp,
4992 struct bnx2x_queue_sp_obj *o,
4993 struct bnx2x_queue_state_params *params)
4994{
4995 enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
4996 enum bnx2x_queue_cmd cmd = params->cmd;
6383c0b3
AE
4997 struct bnx2x_queue_update_params *update_params =
4998 &params->params.update;
4999 u8 next_tx_only = o->num_tx_only;
619c5cb6 5000
6debea87
DK
5001 /*
5002 * Forget all pending for completion commands if a driver only state
5003 * transition has been requested.
5004 */
5005 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5006 o->pending = 0;
5007 o->next_state = BNX2X_Q_STATE_MAX;
5008 }
5009
5010 /*
5011 * Don't allow a next state transition if we are in the middle of
5012 * the previous one.
5013 */
5014 if (o->pending)
5015 return -EBUSY;
5016
619c5cb6
VZ
5017 switch (state) {
5018 case BNX2X_Q_STATE_RESET:
5019 if (cmd == BNX2X_Q_CMD_INIT)
5020 next_state = BNX2X_Q_STATE_INITIALIZED;
5021
5022 break;
5023 case BNX2X_Q_STATE_INITIALIZED:
5024 if (cmd == BNX2X_Q_CMD_SETUP) {
5025 if (test_bit(BNX2X_Q_FLG_ACTIVE,
5026 &params->params.setup.flags))
5027 next_state = BNX2X_Q_STATE_ACTIVE;
5028 else
5029 next_state = BNX2X_Q_STATE_INACTIVE;
5030 }
5031
5032 break;
5033 case BNX2X_Q_STATE_ACTIVE:
5034 if (cmd == BNX2X_Q_CMD_DEACTIVATE)
5035 next_state = BNX2X_Q_STATE_INACTIVE;
5036
5037 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5038 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5039 next_state = BNX2X_Q_STATE_ACTIVE;
5040
6383c0b3
AE
5041 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5042 next_state = BNX2X_Q_STATE_MULTI_COS;
5043 next_tx_only = 1;
5044 }
5045
619c5cb6
VZ
5046 else if (cmd == BNX2X_Q_CMD_HALT)
5047 next_state = BNX2X_Q_STATE_STOPPED;
5048
5049 else if (cmd == BNX2X_Q_CMD_UPDATE) {
6383c0b3
AE
5050 /* If "active" state change is requested, update the
5051 * state accordingly.
5052 */
5053 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5054 &update_params->update_flags) &&
5055 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5056 &update_params->update_flags))
5057 next_state = BNX2X_Q_STATE_INACTIVE;
5058 else
5059 next_state = BNX2X_Q_STATE_ACTIVE;
5060 }
5061
5062 break;
5063 case BNX2X_Q_STATE_MULTI_COS:
5064 if (cmd == BNX2X_Q_CMD_TERMINATE)
5065 next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
5066
5067 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
5068 next_state = BNX2X_Q_STATE_MULTI_COS;
5069 next_tx_only = o->num_tx_only + 1;
5070 }
5071
5072 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5073 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5074 next_state = BNX2X_Q_STATE_MULTI_COS;
619c5cb6 5075
6383c0b3 5076 else if (cmd == BNX2X_Q_CMD_UPDATE) {
619c5cb6
VZ
5077 /* If "active" state change is requested, update the
5078 * state accordingly.
5079 */
5080 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5081 &update_params->update_flags) &&
5082 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5083 &update_params->update_flags))
5084 next_state = BNX2X_Q_STATE_INACTIVE;
5085 else
6383c0b3
AE
5086 next_state = BNX2X_Q_STATE_MULTI_COS;
5087 }
5088
5089 break;
5090 case BNX2X_Q_STATE_MCOS_TERMINATED:
5091 if (cmd == BNX2X_Q_CMD_CFC_DEL) {
5092 next_tx_only = o->num_tx_only - 1;
5093 if (next_tx_only == 0)
619c5cb6 5094 next_state = BNX2X_Q_STATE_ACTIVE;
6383c0b3
AE
5095 else
5096 next_state = BNX2X_Q_STATE_MULTI_COS;
619c5cb6
VZ
5097 }
5098
5099 break;
5100 case BNX2X_Q_STATE_INACTIVE:
5101 if (cmd == BNX2X_Q_CMD_ACTIVATE)
5102 next_state = BNX2X_Q_STATE_ACTIVE;
5103
5104 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5105 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5106 next_state = BNX2X_Q_STATE_INACTIVE;
5107
5108 else if (cmd == BNX2X_Q_CMD_HALT)
5109 next_state = BNX2X_Q_STATE_STOPPED;
5110
5111 else if (cmd == BNX2X_Q_CMD_UPDATE) {
619c5cb6
VZ
5112 /* If "active" state change is requested, update the
5113 * state accordingly.
5114 */
5115 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5116 &update_params->update_flags) &&
5117 test_bit(BNX2X_Q_UPDATE_ACTIVATE,
6383c0b3
AE
5118 &update_params->update_flags)){
5119 if (o->num_tx_only == 0)
5120 next_state = BNX2X_Q_STATE_ACTIVE;
5121 else /* tx only queues exist for this queue */
5122 next_state = BNX2X_Q_STATE_MULTI_COS;
5123 } else
619c5cb6
VZ
5124 next_state = BNX2X_Q_STATE_INACTIVE;
5125 }
5126
5127 break;
5128 case BNX2X_Q_STATE_STOPPED:
5129 if (cmd == BNX2X_Q_CMD_TERMINATE)
5130 next_state = BNX2X_Q_STATE_TERMINATED;
5131
5132 break;
5133 case BNX2X_Q_STATE_TERMINATED:
5134 if (cmd == BNX2X_Q_CMD_CFC_DEL)
5135 next_state = BNX2X_Q_STATE_RESET;
5136
5137 break;
5138 default:
5139 BNX2X_ERR("Illegal state: %d\n", state);
5140 }
5141
5142 /* Transition is assured */
5143 if (next_state != BNX2X_Q_STATE_MAX) {
5144 DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
5145 state, cmd, next_state);
5146 o->next_state = next_state;
6383c0b3 5147 o->next_tx_only = next_tx_only;
619c5cb6
VZ
5148 return 0;
5149 }
5150
5151 DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
5152
5153 return -EINVAL;
5154}
5155
5156void bnx2x_init_queue_obj(struct bnx2x *bp,
5157 struct bnx2x_queue_sp_obj *obj,
6383c0b3
AE
5158 u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
5159 void *rdata,
619c5cb6
VZ
5160 dma_addr_t rdata_mapping, unsigned long type)
5161{
5162 memset(obj, 0, sizeof(*obj));
5163
6383c0b3
AE
5164 /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
5165 BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
5166
5167 memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5168 obj->max_cos = cid_cnt;
619c5cb6
VZ
5169 obj->cl_id = cl_id;
5170 obj->func_id = func_id;
5171 obj->rdata = rdata;
5172 obj->rdata_mapping = rdata_mapping;
5173 obj->type = type;
5174 obj->next_state = BNX2X_Q_STATE_MAX;
5175
5176 if (CHIP_IS_E1x(bp))
5177 obj->send_cmd = bnx2x_queue_send_cmd_e1x;
5178 else
5179 obj->send_cmd = bnx2x_queue_send_cmd_e2;
5180
5181 obj->check_transition = bnx2x_queue_chk_transition;
5182
5183 obj->complete_cmd = bnx2x_queue_comp_cmd;
5184 obj->wait_comp = bnx2x_queue_wait_comp;
5185 obj->set_pending = bnx2x_queue_set_pending;
5186}
5187
6383c0b3
AE
5188void bnx2x_queue_set_cos_cid(struct bnx2x *bp,
5189 struct bnx2x_queue_sp_obj *obj,
5190 u32 cid, u8 index)
5191{
5192 obj->cids[index] = cid;
5193}
5194
619c5cb6 5195/********************** Function state object *********************************/
6debea87
DK
5196enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
5197 struct bnx2x_func_sp_obj *o)
5198{
5199 /* in the middle of transaction - return INVALID state */
5200 if (o->pending)
5201 return BNX2X_F_STATE_MAX;
5202
5203 /*
5204 * unsure the order of reading of o->pending and o->state
5205 * o->pending should be read first
5206 */
5207 rmb();
5208
5209 return o->state;
5210}
619c5cb6
VZ
5211
5212static int bnx2x_func_wait_comp(struct bnx2x *bp,
5213 struct bnx2x_func_sp_obj *o,
5214 enum bnx2x_func_cmd cmd)
5215{
5216 return bnx2x_state_wait(bp, cmd, &o->pending);
5217}
5218
5219/**
5220 * bnx2x_func_state_change_comp - complete the state machine transition
5221 *
5222 * @bp: device handle
5223 * @o:
5224 * @cmd:
5225 *
5226 * Called on state change transition. Completes the state
5227 * machine transition only - no HW interaction.
5228 */
5229static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
5230 struct bnx2x_func_sp_obj *o,
5231 enum bnx2x_func_cmd cmd)
5232{
5233 unsigned long cur_pending = o->pending;
5234
5235 if (!test_and_clear_bit(cmd, &cur_pending)) {
5236 BNX2X_ERR("Bad MC reply %d for func %d in state %d "
5237 "pending 0x%lx, next_state %d\n", cmd, BP_FUNC(bp),
5238 o->state, cur_pending, o->next_state);
5239 return -EINVAL;
5240 }
5241
94f05b0f
JP
5242 DP(BNX2X_MSG_SP,
5243 "Completing command %d for func %d, setting state to %d\n",
5244 cmd, BP_FUNC(bp), o->next_state);
619c5cb6
VZ
5245
5246 o->state = o->next_state;
5247 o->next_state = BNX2X_F_STATE_MAX;
5248
5249 /* It's important that o->state and o->next_state are
5250 * updated before o->pending.
5251 */
5252 wmb();
5253
5254 clear_bit(cmd, &o->pending);
5255 smp_mb__after_clear_bit();
5256
5257 return 0;
5258}
5259
5260/**
5261 * bnx2x_func_comp_cmd - complete the state change command
5262 *
5263 * @bp: device handle
5264 * @o:
5265 * @cmd:
5266 *
5267 * Checks that the arrived completion is expected.
5268 */
5269static int bnx2x_func_comp_cmd(struct bnx2x *bp,
5270 struct bnx2x_func_sp_obj *o,
5271 enum bnx2x_func_cmd cmd)
5272{
5273 /* Complete the state machine part first, check if it's a
5274 * legal completion.
5275 */
5276 int rc = bnx2x_func_state_change_comp(bp, o, cmd);
5277 return rc;
5278}
5279
5280/**
5281 * bnx2x_func_chk_transition - perform function state machine transition
5282 *
5283 * @bp: device handle
5284 * @o:
5285 * @params:
5286 *
5287 * It both checks if the requested command is legal in a current
5288 * state and, if it's legal, sets a `next_state' in the object
5289 * that will be used in the completion flow to set the `state'
5290 * of the object.
5291 *
5292 * returns 0 if a requested command is a legal transition,
5293 * -EINVAL otherwise.
5294 */
5295static int bnx2x_func_chk_transition(struct bnx2x *bp,
5296 struct bnx2x_func_sp_obj *o,
5297 struct bnx2x_func_state_params *params)
5298{
5299 enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
5300 enum bnx2x_func_cmd cmd = params->cmd;
5301
6debea87
DK
5302 /*
5303 * Forget all pending for completion commands if a driver only state
5304 * transition has been requested.
5305 */
5306 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5307 o->pending = 0;
5308 o->next_state = BNX2X_F_STATE_MAX;
5309 }
5310
5311 /*
5312 * Don't allow a next state transition if we are in the middle of
5313 * the previous one.
5314 */
5315 if (o->pending)
5316 return -EBUSY;
5317
619c5cb6
VZ
5318 switch (state) {
5319 case BNX2X_F_STATE_RESET:
5320 if (cmd == BNX2X_F_CMD_HW_INIT)
5321 next_state = BNX2X_F_STATE_INITIALIZED;
5322
5323 break;
5324 case BNX2X_F_STATE_INITIALIZED:
5325 if (cmd == BNX2X_F_CMD_START)
5326 next_state = BNX2X_F_STATE_STARTED;
5327
5328 else if (cmd == BNX2X_F_CMD_HW_RESET)
5329 next_state = BNX2X_F_STATE_RESET;
5330
5331 break;
5332 case BNX2X_F_STATE_STARTED:
5333 if (cmd == BNX2X_F_CMD_STOP)
5334 next_state = BNX2X_F_STATE_INITIALIZED;
6debea87
DK
5335 else if (cmd == BNX2X_F_CMD_TX_STOP)
5336 next_state = BNX2X_F_STATE_TX_STOPPED;
5337
5338 break;
5339 case BNX2X_F_STATE_TX_STOPPED:
5340 if (cmd == BNX2X_F_CMD_TX_START)
5341 next_state = BNX2X_F_STATE_STARTED;
619c5cb6
VZ
5342
5343 break;
5344 default:
5345 BNX2X_ERR("Unknown state: %d\n", state);
5346 }
5347
5348 /* Transition is assured */
5349 if (next_state != BNX2X_F_STATE_MAX) {
5350 DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
5351 state, cmd, next_state);
5352 o->next_state = next_state;
5353 return 0;
5354 }
5355
5356 DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
5357 state, cmd);
5358
5359 return -EINVAL;
5360}
5361
5362/**
5363 * bnx2x_func_init_func - performs HW init at function stage
5364 *
5365 * @bp: device handle
5366 * @drv:
5367 *
5368 * Init HW when the current phase is
5369 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5370 * HW blocks.
5371 */
5372static inline int bnx2x_func_init_func(struct bnx2x *bp,
5373 const struct bnx2x_func_sp_drv_ops *drv)
5374{
5375 return drv->init_hw_func(bp);
5376}
5377
5378/**
5379 * bnx2x_func_init_port - performs HW init at port stage
5380 *
5381 * @bp: device handle
5382 * @drv:
5383 *
5384 * Init HW when the current phase is
5385 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5386 * FUNCTION-only HW blocks.
5387 *
5388 */
5389static inline int bnx2x_func_init_port(struct bnx2x *bp,
5390 const struct bnx2x_func_sp_drv_ops *drv)
5391{
5392 int rc = drv->init_hw_port(bp);
5393 if (rc)
5394 return rc;
5395
5396 return bnx2x_func_init_func(bp, drv);
5397}
5398
5399/**
5400 * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
5401 *
5402 * @bp: device handle
5403 * @drv:
5404 *
5405 * Init HW when the current phase is
5406 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5407 * PORT-only and FUNCTION-only HW blocks.
5408 */
5409static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
5410 const struct bnx2x_func_sp_drv_ops *drv)
5411{
5412 int rc = drv->init_hw_cmn_chip(bp);
5413 if (rc)
5414 return rc;
5415
5416 return bnx2x_func_init_port(bp, drv);
5417}
5418
5419/**
5420 * bnx2x_func_init_cmn - performs HW init at common stage
5421 *
5422 * @bp: device handle
5423 * @drv:
5424 *
5425 * Init HW when the current phase is
5426 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5427 * PORT-only and FUNCTION-only HW blocks.
5428 */
5429static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
5430 const struct bnx2x_func_sp_drv_ops *drv)
5431{
5432 int rc = drv->init_hw_cmn(bp);
5433 if (rc)
5434 return rc;
5435
5436 return bnx2x_func_init_port(bp, drv);
5437}
5438
5439static int bnx2x_func_hw_init(struct bnx2x *bp,
5440 struct bnx2x_func_state_params *params)
5441{
5442 u32 load_code = params->params.hw_init.load_phase;
5443 struct bnx2x_func_sp_obj *o = params->f_obj;
5444 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5445 int rc = 0;
5446
5447 DP(BNX2X_MSG_SP, "function %d load_code %x\n",
5448 BP_ABS_FUNC(bp), load_code);
5449
5450 /* Prepare buffers for unzipping the FW */
5451 rc = drv->gunzip_init(bp);
5452 if (rc)
5453 return rc;
5454
5455 /* Prepare FW */
5456 rc = drv->init_fw(bp);
5457 if (rc) {
5458 BNX2X_ERR("Error loading firmware\n");
eb2afd4a 5459 goto init_err;
619c5cb6
VZ
5460 }
5461
5462 /* Handle the beginning of COMMON_XXX pases separatelly... */
5463 switch (load_code) {
5464 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5465 rc = bnx2x_func_init_cmn_chip(bp, drv);
5466 if (rc)
eb2afd4a 5467 goto init_err;
619c5cb6
VZ
5468
5469 break;
5470 case FW_MSG_CODE_DRV_LOAD_COMMON:
5471 rc = bnx2x_func_init_cmn(bp, drv);
5472 if (rc)
eb2afd4a 5473 goto init_err;
619c5cb6
VZ
5474
5475 break;
5476 case FW_MSG_CODE_DRV_LOAD_PORT:
5477 rc = bnx2x_func_init_port(bp, drv);
5478 if (rc)
eb2afd4a 5479 goto init_err;
619c5cb6
VZ
5480
5481 break;
5482 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5483 rc = bnx2x_func_init_func(bp, drv);
5484 if (rc)
eb2afd4a 5485 goto init_err;
619c5cb6
VZ
5486
5487 break;
5488 default:
5489 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5490 rc = -EINVAL;
5491 }
5492
eb2afd4a 5493init_err:
619c5cb6
VZ
5494 drv->gunzip_end(bp);
5495
5496 /* In case of success, complete the comand immediatelly: no ramrods
5497 * have been sent.
5498 */
5499 if (!rc)
5500 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
5501
5502 return rc;
5503}
5504
5505/**
5506 * bnx2x_func_reset_func - reset HW at function stage
5507 *
5508 * @bp: device handle
5509 * @drv:
5510 *
5511 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5512 * FUNCTION-only HW blocks.
5513 */
5514static inline void bnx2x_func_reset_func(struct bnx2x *bp,
5515 const struct bnx2x_func_sp_drv_ops *drv)
5516{
5517 drv->reset_hw_func(bp);
5518}
5519
5520/**
5521 * bnx2x_func_reset_port - reser HW at port stage
5522 *
5523 * @bp: device handle
5524 * @drv:
5525 *
5526 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5527 * FUNCTION-only and PORT-only HW blocks.
5528 *
5529 * !!!IMPORTANT!!!
5530 *
5531 * It's important to call reset_port before reset_func() as the last thing
5532 * reset_func does is pf_disable() thus disabling PGLUE_B, which
5533 * makes impossible any DMAE transactions.
5534 */
5535static inline void bnx2x_func_reset_port(struct bnx2x *bp,
5536 const struct bnx2x_func_sp_drv_ops *drv)
5537{
5538 drv->reset_hw_port(bp);
5539 bnx2x_func_reset_func(bp, drv);
5540}
5541
5542/**
5543 * bnx2x_func_reset_cmn - reser HW at common stage
5544 *
5545 * @bp: device handle
5546 * @drv:
5547 *
5548 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5549 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5550 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5551 */
5552static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
5553 const struct bnx2x_func_sp_drv_ops *drv)
5554{
5555 bnx2x_func_reset_port(bp, drv);
5556 drv->reset_hw_cmn(bp);
5557}
5558
5559
5560static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5561 struct bnx2x_func_state_params *params)
5562{
5563 u32 reset_phase = params->params.hw_reset.reset_phase;
5564 struct bnx2x_func_sp_obj *o = params->f_obj;
5565 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5566
5567 DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp),
5568 reset_phase);
5569
5570 switch (reset_phase) {
5571 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5572 bnx2x_func_reset_cmn(bp, drv);
5573 break;
5574 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5575 bnx2x_func_reset_port(bp, drv);
5576 break;
5577 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5578 bnx2x_func_reset_func(bp, drv);
5579 break;
5580 default:
5581 BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5582 reset_phase);
5583 break;
5584 }
5585
5586 /* Complete the comand immediatelly: no ramrods have been sent. */
5587 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
5588
5589 return 0;
5590}
5591
5592static inline int bnx2x_func_send_start(struct bnx2x *bp,
5593 struct bnx2x_func_state_params *params)
5594{
5595 struct bnx2x_func_sp_obj *o = params->f_obj;
5596 struct function_start_data *rdata =
5597 (struct function_start_data *)o->rdata;
5598 dma_addr_t data_mapping = o->rdata_mapping;
5599 struct bnx2x_func_start_params *start_params = &params->params.start;
5600
5601 memset(rdata, 0, sizeof(*rdata));
5602
5603 /* Fill the ramrod data with provided parameters */
5604 rdata->function_mode = cpu_to_le16(start_params->mf_mode);
5605 rdata->sd_vlan_tag = start_params->sd_vlan_tag;
5606 rdata->path_id = BP_PATH(bp);
5607 rdata->network_cos_mode = start_params->network_cos_mode;
5608
53e51e2f
VZ
5609 /*
5610 * No need for an explicit memory barrier here as long we would
5611 * need to ensure the ordering of writing to the SPQ element
5612 * and updating of the SPQ producer which involves a memory
5613 * read and we will have to put a full memory barrier there
5614 * (inside bnx2x_sp_post()).
5615 */
619c5cb6
VZ
5616
5617 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5618 U64_HI(data_mapping),
5619 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5620}
5621
5622static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5623 struct bnx2x_func_state_params *params)
5624{
5625 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
5626 NONE_CONNECTION_TYPE);
5627}
5628
6debea87
DK
5629static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
5630 struct bnx2x_func_state_params *params)
5631{
5632 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
5633 NONE_CONNECTION_TYPE);
5634}
5635static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
5636 struct bnx2x_func_state_params *params)
5637{
5638 struct bnx2x_func_sp_obj *o = params->f_obj;
5639 struct flow_control_configuration *rdata =
5640 (struct flow_control_configuration *)o->rdata;
5641 dma_addr_t data_mapping = o->rdata_mapping;
5642 struct bnx2x_func_tx_start_params *tx_start_params =
5643 &params->params.tx_start;
5644 int i;
5645
5646 memset(rdata, 0, sizeof(*rdata));
5647
5648 rdata->dcb_enabled = tx_start_params->dcb_enabled;
5649 rdata->dcb_version = tx_start_params->dcb_version;
5650 rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
5651
5652 for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
5653 rdata->traffic_type_to_priority_cos[i] =
5654 tx_start_params->traffic_type_to_priority_cos[i];
5655
5656 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5657 U64_HI(data_mapping),
5658 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5659}
5660
619c5cb6
VZ
5661static int bnx2x_func_send_cmd(struct bnx2x *bp,
5662 struct bnx2x_func_state_params *params)
5663{
5664 switch (params->cmd) {
5665 case BNX2X_F_CMD_HW_INIT:
5666 return bnx2x_func_hw_init(bp, params);
5667 case BNX2X_F_CMD_START:
5668 return bnx2x_func_send_start(bp, params);
5669 case BNX2X_F_CMD_STOP:
5670 return bnx2x_func_send_stop(bp, params);
5671 case BNX2X_F_CMD_HW_RESET:
5672 return bnx2x_func_hw_reset(bp, params);
6debea87
DK
5673 case BNX2X_F_CMD_TX_STOP:
5674 return bnx2x_func_send_tx_stop(bp, params);
5675 case BNX2X_F_CMD_TX_START:
5676 return bnx2x_func_send_tx_start(bp, params);
619c5cb6
VZ
5677 default:
5678 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5679 return -EINVAL;
5680 }
5681}
5682
5683void bnx2x_init_func_obj(struct bnx2x *bp,
5684 struct bnx2x_func_sp_obj *obj,
5685 void *rdata, dma_addr_t rdata_mapping,
5686 struct bnx2x_func_sp_drv_ops *drv_iface)
5687{
5688 memset(obj, 0, sizeof(*obj));
5689
5690 mutex_init(&obj->one_pending_mutex);
5691
5692 obj->rdata = rdata;
5693 obj->rdata_mapping = rdata_mapping;
5694
5695 obj->send_cmd = bnx2x_func_send_cmd;
5696 obj->check_transition = bnx2x_func_chk_transition;
5697 obj->complete_cmd = bnx2x_func_comp_cmd;
5698 obj->wait_comp = bnx2x_func_wait_comp;
5699
5700 obj->drv = drv_iface;
5701}
5702
5703/**
5704 * bnx2x_func_state_change - perform Function state change transition
5705 *
5706 * @bp: device handle
5707 * @params: parameters to perform the transaction
5708 *
5709 * returns 0 in case of successfully completed transition,
5710 * negative error code in case of failure, positive
5711 * (EBUSY) value if there is a completion to that is
5712 * still pending (possible only if RAMROD_COMP_WAIT is
5713 * not set in params->ramrod_flags for asynchronous
5714 * commands).
5715 */
5716int bnx2x_func_state_change(struct bnx2x *bp,
5717 struct bnx2x_func_state_params *params)
5718{
5719 struct bnx2x_func_sp_obj *o = params->f_obj;
5720 int rc;
5721 enum bnx2x_func_cmd cmd = params->cmd;
5722 unsigned long *pending = &o->pending;
5723
5724 mutex_lock(&o->one_pending_mutex);
5725
5726 /* Check that the requested transition is legal */
5727 if (o->check_transition(bp, o, params)) {
5728 mutex_unlock(&o->one_pending_mutex);
5729 return -EINVAL;
5730 }
5731
5732 /* Set "pending" bit */
5733 set_bit(cmd, pending);
5734
5735 /* Don't send a command if only driver cleanup was requested */
5736 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5737 bnx2x_func_state_change_comp(bp, o, cmd);
5738 mutex_unlock(&o->one_pending_mutex);
5739 } else {
5740 /* Send a ramrod */
5741 rc = o->send_cmd(bp, params);
5742
5743 mutex_unlock(&o->one_pending_mutex);
5744
5745 if (rc) {
5746 o->next_state = BNX2X_F_STATE_MAX;
5747 clear_bit(cmd, pending);
5748 smp_mb__after_clear_bit();
5749 return rc;
5750 }
5751
5752 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
5753 rc = o->wait_comp(bp, o, cmd);
5754 if (rc)
5755 return rc;
5756
5757 return 0;
5758 }
5759 }
042181f5 5760
619c5cb6 5761 return !!test_bit(cmd, pending);
042181f5 5762}
This page took 0.347872 seconds and 5 git commands to generate.