bnx2x: Parity errors recovery for 578xx
[deliverable/linux.git] / drivers / net / bnx2x / bnx2x_sp.c
CommitLineData
619c5cb6
VZ
1/* bnx2x_sp.c: Broadcom Everest network driver.
2 *
3 * Copyright 2011 Broadcom Corporation
4 *
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9 *
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
13 * consent.
14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Vladislav Zolotarov
17 *
18 */
042181f5
VZ
19#include <linux/module.h>
20#include <linux/crc32.h>
21#include <linux/netdevice.h>
22#include <linux/etherdevice.h>
23#include <linux/crc32c.h>
24#include "bnx2x.h"
25#include "bnx2x_cmn.h"
26#include "bnx2x_sp.h"
27
619c5cb6
VZ
28#define BNX2X_MAX_EMUL_MULTI 16
29
30/**** Exe Queue interfaces ****/
042181f5
VZ
31
32/**
619c5cb6 33 * bnx2x_exe_queue_init - init the Exe Queue object
042181f5 34 *
619c5cb6
VZ
35 * @o: poiter to the object
36 * @exe_len: length
37 * @owner: poiter to the owner
38 * @validate: validate function pointer
39 * @optimize: optimize function pointer
40 * @exec: execute function pointer
41 * @get: get function pointer
042181f5 42 */
619c5cb6
VZ
43static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
44 struct bnx2x_exe_queue_obj *o,
45 int exe_len,
46 union bnx2x_qable_obj *owner,
47 exe_q_validate validate,
48 exe_q_optimize optimize,
49 exe_q_execute exec,
50 exe_q_get get)
042181f5 51{
619c5cb6 52 memset(o, 0, sizeof(*o));
042181f5 53
619c5cb6
VZ
54 INIT_LIST_HEAD(&o->exe_queue);
55 INIT_LIST_HEAD(&o->pending_comp);
042181f5 56
619c5cb6 57 spin_lock_init(&o->lock);
042181f5 58
619c5cb6
VZ
59 o->exe_chunk_len = exe_len;
60 o->owner = owner;
042181f5 61
619c5cb6
VZ
62 /* Owner specific callbacks */
63 o->validate = validate;
64 o->optimize = optimize;
65 o->execute = exec;
66 o->get = get;
042181f5 67
619c5cb6
VZ
68 DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk "
69 "length of %d\n", exe_len);
042181f5
VZ
70}
71
619c5cb6
VZ
72static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
73 struct bnx2x_exeq_elem *elem)
74{
75 DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
76 kfree(elem);
77}
042181f5 78
619c5cb6 79static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
042181f5 80{
619c5cb6
VZ
81 struct bnx2x_exeq_elem *elem;
82 int cnt = 0;
83
84 spin_lock_bh(&o->lock);
85
86 list_for_each_entry(elem, &o->exe_queue, link)
87 cnt++;
88
89 spin_unlock_bh(&o->lock);
90
91 return cnt;
042181f5
VZ
92}
93
619c5cb6
VZ
94/**
95 * bnx2x_exe_queue_add - add a new element to the execution queue
96 *
97 * @bp: driver handle
98 * @o: queue
99 * @cmd: new command to add
100 * @restore: true - do not optimize the command
042181f5 101 *
619c5cb6 102 * If the element is optimized or is illegal, frees it.
042181f5 103 */
619c5cb6
VZ
104static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
105 struct bnx2x_exe_queue_obj *o,
106 struct bnx2x_exeq_elem *elem,
107 bool restore)
042181f5 108{
619c5cb6 109 int rc;
042181f5 110
619c5cb6 111 spin_lock_bh(&o->lock);
042181f5 112
619c5cb6
VZ
113 if (!restore) {
114 /* Try to cancel this element queue */
115 rc = o->optimize(bp, o->owner, elem);
116 if (rc)
117 goto free_and_exit;
118
119 /* Check if this request is ok */
120 rc = o->validate(bp, o->owner, elem);
121 if (rc) {
122 BNX2X_ERR("Preamble failed: %d\n", rc);
123 goto free_and_exit;
042181f5
VZ
124 }
125 }
126
619c5cb6
VZ
127 /* If so, add it to the execution queue */
128 list_add_tail(&elem->link, &o->exe_queue);
042181f5 129
619c5cb6 130 spin_unlock_bh(&o->lock);
042181f5 131
619c5cb6 132 return 0;
042181f5 133
619c5cb6
VZ
134free_and_exit:
135 bnx2x_exe_queue_free_elem(bp, elem);
042181f5 136
619c5cb6 137 spin_unlock_bh(&o->lock);
042181f5 138
619c5cb6 139 return rc;
042181f5 140
619c5cb6 141}
042181f5 142
619c5cb6
VZ
143static inline void __bnx2x_exe_queue_reset_pending(
144 struct bnx2x *bp,
145 struct bnx2x_exe_queue_obj *o)
146{
147 struct bnx2x_exeq_elem *elem;
042181f5 148
619c5cb6
VZ
149 while (!list_empty(&o->pending_comp)) {
150 elem = list_first_entry(&o->pending_comp,
151 struct bnx2x_exeq_elem, link);
042181f5 152
619c5cb6
VZ
153 list_del(&elem->link);
154 bnx2x_exe_queue_free_elem(bp, elem);
155 }
042181f5
VZ
156}
157
619c5cb6
VZ
158static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
159 struct bnx2x_exe_queue_obj *o)
042181f5 160{
042181f5 161
619c5cb6 162 spin_lock_bh(&o->lock);
042181f5 163
619c5cb6 164 __bnx2x_exe_queue_reset_pending(bp, o);
042181f5 165
619c5cb6 166 spin_unlock_bh(&o->lock);
042181f5 167
042181f5
VZ
168}
169
619c5cb6
VZ
170/**
171 * bnx2x_exe_queue_step - execute one execution chunk atomically
172 *
173 * @bp: driver handle
174 * @o: queue
175 * @ramrod_flags: flags
176 *
177 * (Atomicy is ensured using the exe_queue->lock).
178 */
179static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
180 struct bnx2x_exe_queue_obj *o,
181 unsigned long *ramrod_flags)
042181f5 182{
619c5cb6
VZ
183 struct bnx2x_exeq_elem *elem, spacer;
184 int cur_len = 0, rc;
042181f5 185
619c5cb6 186 memset(&spacer, 0, sizeof(spacer));
042181f5 187
619c5cb6 188 spin_lock_bh(&o->lock);
042181f5 189
619c5cb6
VZ
190 /*
191 * Next step should not be performed until the current is finished,
192 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
193 * properly clear object internals without sending any command to the FW
194 * which also implies there won't be any completion to clear the
195 * 'pending' list.
196 */
197 if (!list_empty(&o->pending_comp)) {
198 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
199 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: "
200 "resetting pending_comp\n");
201 __bnx2x_exe_queue_reset_pending(bp, o);
202 } else {
203 spin_unlock_bh(&o->lock);
204 return 1;
205 }
206 }
042181f5 207
619c5cb6
VZ
208 /*
209 * Run through the pending commands list and create a next
210 * execution chunk.
211 */
212 while (!list_empty(&o->exe_queue)) {
213 elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
214 link);
215 WARN_ON(!elem->cmd_len);
042181f5 216
619c5cb6
VZ
217 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
218 cur_len += elem->cmd_len;
042181f5 219 /*
619c5cb6
VZ
220 * Prevent from both lists being empty when moving an
221 * element. This will allow the call of
222 * bnx2x_exe_queue_empty() without locking.
042181f5 223 */
619c5cb6
VZ
224 list_add_tail(&spacer.link, &o->pending_comp);
225 mb();
226 list_del(&elem->link);
227 list_add_tail(&elem->link, &o->pending_comp);
228 list_del(&spacer.link);
229 } else
230 break;
042181f5 231 }
042181f5 232
619c5cb6
VZ
233 /* Sanity check */
234 if (!cur_len) {
235 spin_unlock_bh(&o->lock);
236 return 0;
042181f5
VZ
237 }
238
619c5cb6
VZ
239 rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
240 if (rc < 0)
241 /*
242 * In case of an error return the commands back to the queue
243 * and reset the pending_comp.
244 */
245 list_splice_init(&o->pending_comp, &o->exe_queue);
246 else if (!rc)
247 /*
248 * If zero is returned, means there are no outstanding pending
249 * completions and we may dismiss the pending list.
250 */
251 __bnx2x_exe_queue_reset_pending(bp, o);
042181f5 252
619c5cb6
VZ
253 spin_unlock_bh(&o->lock);
254 return rc;
255}
042181f5 256
619c5cb6
VZ
257static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
258{
259 bool empty = list_empty(&o->exe_queue);
042181f5 260
619c5cb6
VZ
261 /* Don't reorder!!! */
262 mb();
042181f5 263
619c5cb6
VZ
264 return empty && list_empty(&o->pending_comp);
265}
042181f5 266
619c5cb6
VZ
267static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
268 struct bnx2x *bp)
269{
270 DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
271 return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
272}
042181f5 273
619c5cb6
VZ
274/************************ raw_obj functions ***********************************/
275static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
276{
277 return !!test_bit(o->state, o->pstate);
042181f5
VZ
278}
279
619c5cb6 280static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
042181f5 281{
619c5cb6
VZ
282 smp_mb__before_clear_bit();
283 clear_bit(o->state, o->pstate);
284 smp_mb__after_clear_bit();
285}
042181f5 286
619c5cb6
VZ
287static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
288{
289 smp_mb__before_clear_bit();
290 set_bit(o->state, o->pstate);
291 smp_mb__after_clear_bit();
292}
042181f5 293
619c5cb6
VZ
294/**
295 * bnx2x_state_wait - wait until the given bit(state) is cleared
296 *
297 * @bp: device handle
298 * @state: state which is to be cleared
299 * @state_p: state buffer
300 *
301 */
302static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
303 unsigned long *pstate)
304{
305 /* can take a while if any port is running */
306 int cnt = 5000;
042181f5 307
042181f5 308
619c5cb6
VZ
309 if (CHIP_REV_IS_EMUL(bp))
310 cnt *= 20;
042181f5 311
619c5cb6
VZ
312 DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
313
314 might_sleep();
315 while (cnt--) {
316 if (!test_bit(state, pstate)) {
317#ifdef BNX2X_STOP_ON_ERROR
318 DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt);
042181f5 319#endif
619c5cb6
VZ
320 return 0;
321 }
042181f5 322
619c5cb6 323 usleep_range(1000, 1000);
042181f5 324
619c5cb6
VZ
325 if (bp->panic)
326 return -EIO;
327 }
042181f5 328
619c5cb6
VZ
329 /* timeout! */
330 BNX2X_ERR("timeout waiting for state %d\n", state);
331#ifdef BNX2X_STOP_ON_ERROR
332 bnx2x_panic();
333#endif
042181f5 334
619c5cb6
VZ
335 return -EBUSY;
336}
042181f5 337
619c5cb6
VZ
338static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
339{
340 return bnx2x_state_wait(bp, raw->state, raw->pstate);
042181f5
VZ
341}
342
619c5cb6
VZ
343/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
344/* credit handling callbacks */
345static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
042181f5 346{
619c5cb6
VZ
347 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
348
349 WARN_ON(!mp);
350
351 return mp->get_entry(mp, offset);
042181f5
VZ
352}
353
619c5cb6 354static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
042181f5 355{
619c5cb6
VZ
356 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
357
358 WARN_ON(!mp);
359
360 return mp->get(mp, 1);
042181f5
VZ
361}
362
619c5cb6 363static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
042181f5 364{
619c5cb6 365 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
042181f5 366
619c5cb6 367 WARN_ON(!vp);
042181f5 368
619c5cb6 369 return vp->get_entry(vp, offset);
042181f5
VZ
370}
371
619c5cb6 372static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
042181f5 373{
619c5cb6 374 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
042181f5 375
619c5cb6 376 WARN_ON(!vp);
042181f5 377
619c5cb6 378 return vp->get(vp, 1);
042181f5
VZ
379}
380
619c5cb6 381static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
042181f5 382{
619c5cb6
VZ
383 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
384 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
385
386 if (!mp->get(mp, 1))
387 return false;
042181f5 388
619c5cb6
VZ
389 if (!vp->get(vp, 1)) {
390 mp->put(mp, 1);
391 return false;
392 }
042181f5 393
619c5cb6 394 return true;
042181f5
VZ
395}
396
619c5cb6
VZ
397static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
398{
399 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
400
401 return mp->put_entry(mp, offset);
402}
042181f5 403
619c5cb6 404static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
042181f5 405{
619c5cb6 406 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
042181f5 407
619c5cb6 408 return mp->put(mp, 1);
042181f5
VZ
409}
410
619c5cb6 411static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
042181f5 412{
619c5cb6
VZ
413 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
414
415 return vp->put_entry(vp, offset);
416}
042181f5 417
619c5cb6
VZ
418static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
419{
420 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
042181f5 421
619c5cb6 422 return vp->put(vp, 1);
042181f5
VZ
423}
424
619c5cb6 425static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
042181f5 426{
619c5cb6
VZ
427 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
428 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
429
430 if (!mp->put(mp, 1))
431 return false;
042181f5 432
619c5cb6
VZ
433 if (!vp->put(vp, 1)) {
434 mp->get(mp, 1);
435 return false;
436 }
042181f5 437
619c5cb6 438 return true;
042181f5
VZ
439}
440
619c5cb6
VZ
441/* check_add() callbacks */
442static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o,
443 union bnx2x_classification_ramrod_data *data)
042181f5 444{
619c5cb6
VZ
445 struct bnx2x_vlan_mac_registry_elem *pos;
446
447 if (!is_valid_ether_addr(data->mac.mac))
448 return -EINVAL;
042181f5 449
619c5cb6
VZ
450 /* Check if a requested MAC already exists */
451 list_for_each_entry(pos, &o->head, link)
452 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
453 return -EEXIST;
042181f5 454
619c5cb6 455 return 0;
042181f5
VZ
456}
457
619c5cb6
VZ
458static int bnx2x_check_vlan_add(struct bnx2x_vlan_mac_obj *o,
459 union bnx2x_classification_ramrod_data *data)
042181f5 460{
619c5cb6 461 struct bnx2x_vlan_mac_registry_elem *pos;
042181f5 462
619c5cb6
VZ
463 list_for_each_entry(pos, &o->head, link)
464 if (data->vlan.vlan == pos->u.vlan.vlan)
465 return -EEXIST;
042181f5 466
619c5cb6 467 return 0;
042181f5
VZ
468}
469
619c5cb6
VZ
470static int bnx2x_check_vlan_mac_add(struct bnx2x_vlan_mac_obj *o,
471 union bnx2x_classification_ramrod_data *data)
042181f5 472{
619c5cb6
VZ
473 struct bnx2x_vlan_mac_registry_elem *pos;
474
475 list_for_each_entry(pos, &o->head, link)
476 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
477 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
478 ETH_ALEN)))
479 return -EEXIST;
042181f5 480
619c5cb6 481 return 0;
042181f5
VZ
482}
483
619c5cb6
VZ
484
485/* check_del() callbacks */
486static struct bnx2x_vlan_mac_registry_elem *
487 bnx2x_check_mac_del(struct bnx2x_vlan_mac_obj *o,
488 union bnx2x_classification_ramrod_data *data)
042181f5 489{
619c5cb6
VZ
490 struct bnx2x_vlan_mac_registry_elem *pos;
491
492 list_for_each_entry(pos, &o->head, link)
493 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
494 return pos;
042181f5 495
619c5cb6 496 return NULL;
042181f5
VZ
497}
498
619c5cb6
VZ
499static struct bnx2x_vlan_mac_registry_elem *
500 bnx2x_check_vlan_del(struct bnx2x_vlan_mac_obj *o,
501 union bnx2x_classification_ramrod_data *data)
042181f5 502{
619c5cb6 503 struct bnx2x_vlan_mac_registry_elem *pos;
042181f5 504
619c5cb6
VZ
505 list_for_each_entry(pos, &o->head, link)
506 if (data->vlan.vlan == pos->u.vlan.vlan)
507 return pos;
508
509 return NULL;
042181f5
VZ
510}
511
619c5cb6
VZ
512static struct bnx2x_vlan_mac_registry_elem *
513 bnx2x_check_vlan_mac_del(struct bnx2x_vlan_mac_obj *o,
514 union bnx2x_classification_ramrod_data *data)
042181f5 515{
619c5cb6
VZ
516 struct bnx2x_vlan_mac_registry_elem *pos;
517
518 list_for_each_entry(pos, &o->head, link)
519 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
520 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
521 ETH_ALEN)))
522 return pos;
042181f5 523
619c5cb6 524 return NULL;
042181f5
VZ
525}
526
619c5cb6
VZ
527/* check_move() callback */
528static bool bnx2x_check_move(struct bnx2x_vlan_mac_obj *src_o,
529 struct bnx2x_vlan_mac_obj *dst_o,
530 union bnx2x_classification_ramrod_data *data)
042181f5 531{
619c5cb6
VZ
532 struct bnx2x_vlan_mac_registry_elem *pos;
533 int rc;
534
535 /* Check if we can delete the requested configuration from the first
536 * object.
537 */
538 pos = src_o->check_del(src_o, data);
539
540 /* check if configuration can be added */
541 rc = dst_o->check_add(dst_o, data);
542
543 /* If this classification can not be added (is already set)
544 * or can't be deleted - return an error.
545 */
546 if (rc || !pos)
547 return false;
548
549 return true;
042181f5
VZ
550}
551
619c5cb6
VZ
552static bool bnx2x_check_move_always_err(
553 struct bnx2x_vlan_mac_obj *src_o,
554 struct bnx2x_vlan_mac_obj *dst_o,
555 union bnx2x_classification_ramrod_data *data)
042181f5 556{
619c5cb6 557 return false;
042181f5
VZ
558}
559
619c5cb6
VZ
560
561static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
042181f5 562{
619c5cb6
VZ
563 struct bnx2x_raw_obj *raw = &o->raw;
564 u8 rx_tx_flag = 0;
042181f5 565
619c5cb6
VZ
566 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
567 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
568 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
042181f5 569
619c5cb6
VZ
570 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
571 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
572 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
573
574 return rx_tx_flag;
042181f5
VZ
575}
576
619c5cb6
VZ
577/* LLH CAM line allocations */
578enum {
579 LLH_CAM_ISCSI_ETH_LINE = 0,
580 LLH_CAM_ETH_LINE,
581 LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
582};
583
584static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp,
585 bool add, unsigned char *dev_addr, int index)
042181f5 586{
619c5cb6
VZ
587 u32 wb_data[2];
588 u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
589 NIG_REG_LLH0_FUNC_MEM;
590
591 if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
592 return;
593
594 DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
595 (add ? "ADD" : "DELETE"), index);
596
597 if (add) {
598 /* LLH_FUNC_MEM is a u64 WB register */
599 reg_offset += 8*index;
042181f5 600
619c5cb6
VZ
601 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
602 (dev_addr[4] << 8) | dev_addr[5]);
603 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
042181f5 604
619c5cb6
VZ
605 REG_WR_DMAE(bp, reg_offset, wb_data, 2);
606 }
042181f5 607
619c5cb6
VZ
608 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
609 NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
610}
042181f5 611
619c5cb6
VZ
612/**
613 * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
614 *
615 * @bp: device handle
616 * @o: queue for which we want to configure this rule
617 * @add: if true the command is an ADD command, DEL otherwise
618 * @opcode: CLASSIFY_RULE_OPCODE_XXX
619 * @hdr: pointer to a header to setup
620 *
621 */
622static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
623 struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
624 struct eth_classify_cmd_header *hdr)
625{
626 struct bnx2x_raw_obj *raw = &o->raw;
042181f5 627
619c5cb6
VZ
628 hdr->client_id = raw->cl_id;
629 hdr->func_id = raw->func_id;
042181f5 630
619c5cb6
VZ
631 /* Rx or/and Tx (internal switching) configuration ? */
632 hdr->cmd_general_data |=
633 bnx2x_vlan_mac_get_rx_tx_flag(o);
042181f5 634
619c5cb6
VZ
635 if (add)
636 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
042181f5 637
619c5cb6
VZ
638 hdr->cmd_general_data |=
639 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
640}
042181f5 641
619c5cb6
VZ
642/**
643 * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
644 *
645 * @cid: connection id
646 * @type: BNX2X_FILTER_XXX_PENDING
647 * @hdr: poiter to header to setup
648 * @rule_cnt:
649 *
650 * currently we always configure one rule and echo field to contain a CID and an
651 * opcode type.
652 */
653static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
654 struct eth_classify_header *hdr, int rule_cnt)
655{
656 hdr->echo = (cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT);
657 hdr->rule_cnt = (u8)rule_cnt;
658}
042181f5 659
042181f5 660
619c5cb6
VZ
661/* hw_config() callbacks */
662static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
663 struct bnx2x_vlan_mac_obj *o,
664 struct bnx2x_exeq_elem *elem, int rule_idx,
665 int cam_offset)
666{
667 struct bnx2x_raw_obj *raw = &o->raw;
668 struct eth_classify_rules_ramrod_data *data =
669 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
670 int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
671 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
672 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
673 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
674 u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
675
676 /*
677 * Set LLH CAM entry: currently only iSCSI and ETH macs are
678 * relevant. In addition, current implementation is tuned for a
679 * single ETH MAC.
680 *
681 * When multiple unicast ETH MACs PF configuration in switch
682 * independent mode is required (NetQ, multiple netdev MACs,
683 * etc.), consider better utilisation of 8 per function MAC
684 * entries in the LLH register. There is also
685 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
686 * total number of CAM entries to 16.
687 *
688 * Currently we won't configure NIG for MACs other than a primary ETH
689 * MAC and iSCSI L2 MAC.
690 *
691 * If this MAC is moving from one Queue to another, no need to change
692 * NIG configuration.
693 */
694 if (cmd != BNX2X_VLAN_MAC_MOVE) {
695 if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
696 bnx2x_set_mac_in_nig(bp, add, mac,
697 LLH_CAM_ISCSI_ETH_LINE);
698 else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
699 bnx2x_set_mac_in_nig(bp, add, mac, LLH_CAM_ETH_LINE);
042181f5
VZ
700 }
701
619c5cb6
VZ
702 /* Reset the ramrod data buffer for the first rule */
703 if (rule_idx == 0)
704 memset(data, 0, sizeof(*data));
705
706 /* Setup a command header */
707 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
708 &rule_entry->mac.header);
709
710 DP(BNX2X_MSG_SP, "About to %s MAC "BNX2X_MAC_FMT" for "
711 "Queue %d\n", (add ? "add" : "delete"),
712 BNX2X_MAC_PRN_LIST(mac), raw->cl_id);
713
714 /* Set a MAC itself */
715 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
716 &rule_entry->mac.mac_mid,
717 &rule_entry->mac.mac_lsb, mac);
718
719 /* MOVE: Add a rule that will add this MAC to the target Queue */
720 if (cmd == BNX2X_VLAN_MAC_MOVE) {
721 rule_entry++;
722 rule_cnt++;
723
724 /* Setup ramrod data */
725 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
726 elem->cmd_data.vlan_mac.target_obj,
727 true, CLASSIFY_RULE_OPCODE_MAC,
728 &rule_entry->mac.header);
729
730 /* Set a MAC itself */
731 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
732 &rule_entry->mac.mac_mid,
733 &rule_entry->mac.mac_lsb, mac);
042181f5 734 }
619c5cb6
VZ
735
736 /* Set the ramrod data header */
737 /* TODO: take this to the higher level in order to prevent multiple
738 writing */
739 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
740 rule_cnt);
042181f5
VZ
741}
742
619c5cb6
VZ
743/**
744 * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
745 *
746 * @bp: device handle
747 * @o: queue
748 * @type:
749 * @cam_offset: offset in cam memory
750 * @hdr: pointer to a header to setup
751 *
752 * E1/E1H
753 */
754static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
755 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
756 struct mac_configuration_hdr *hdr)
042181f5 757{
619c5cb6 758 struct bnx2x_raw_obj *r = &o->raw;
042181f5 759
619c5cb6
VZ
760 hdr->length = 1;
761 hdr->offset = (u8)cam_offset;
762 hdr->client_id = 0xff;
763 hdr->echo = ((r->cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT));
764}
042181f5 765
619c5cb6
VZ
766static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
767 struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
768 u16 vlan_id, struct mac_configuration_entry *cfg_entry)
769{
770 struct bnx2x_raw_obj *r = &o->raw;
771 u32 cl_bit_vec = (1 << r->cl_id);
772
773 cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
774 cfg_entry->pf_id = r->func_id;
775 cfg_entry->vlan_id = cpu_to_le16(vlan_id);
776
777 if (add) {
778 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
779 T_ETH_MAC_COMMAND_SET);
780 SET_FLAG(cfg_entry->flags,
781 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
782
783 /* Set a MAC in a ramrod data */
784 bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
785 &cfg_entry->middle_mac_addr,
786 &cfg_entry->lsb_mac_addr, mac);
787 } else
788 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
789 T_ETH_MAC_COMMAND_INVALIDATE);
790}
042181f5 791
619c5cb6
VZ
792static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
793 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
794 u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
795{
796 struct mac_configuration_entry *cfg_entry = &config->config_table[0];
797 struct bnx2x_raw_obj *raw = &o->raw;
042181f5 798
619c5cb6
VZ
799 bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
800 &config->hdr);
801 bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
802 cfg_entry);
042181f5 803
619c5cb6
VZ
804 DP(BNX2X_MSG_SP, "%s MAC "BNX2X_MAC_FMT" CLID %d CAM offset %d\n",
805 (add ? "setting" : "clearing"),
806 BNX2X_MAC_PRN_LIST(mac), raw->cl_id, cam_offset);
042181f5
VZ
807}
808
619c5cb6
VZ
809/**
810 * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
811 *
812 * @bp: device handle
813 * @o: bnx2x_vlan_mac_obj
814 * @elem: bnx2x_exeq_elem
815 * @rule_idx: rule_idx
816 * @cam_offset: cam_offset
817 */
818static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
819 struct bnx2x_vlan_mac_obj *o,
820 struct bnx2x_exeq_elem *elem, int rule_idx,
821 int cam_offset)
042181f5 822{
619c5cb6
VZ
823 struct bnx2x_raw_obj *raw = &o->raw;
824 struct mac_configuration_cmd *config =
825 (struct mac_configuration_cmd *)(raw->rdata);
826 /*
827 * 57710 and 57711 do not support MOVE command,
828 * so it's either ADD or DEL
829 */
830 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
831 true : false;
042181f5 832
619c5cb6
VZ
833 /* Reset the ramrod data buffer */
834 memset(config, 0, sizeof(*config));
042181f5 835
619c5cb6
VZ
836 bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_MAC_PENDING,
837 cam_offset, add,
838 elem->cmd_data.vlan_mac.u.mac.mac, 0,
839 ETH_VLAN_FILTER_ANY_VLAN, config);
840}
042181f5 841
619c5cb6
VZ
842static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
843 struct bnx2x_vlan_mac_obj *o,
844 struct bnx2x_exeq_elem *elem, int rule_idx,
845 int cam_offset)
846{
847 struct bnx2x_raw_obj *raw = &o->raw;
848 struct eth_classify_rules_ramrod_data *data =
849 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
850 int rule_cnt = rule_idx + 1;
851 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
852 int cmd = elem->cmd_data.vlan_mac.cmd;
853 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
854 u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
855
856 /* Reset the ramrod data buffer for the first rule */
857 if (rule_idx == 0)
858 memset(data, 0, sizeof(*data));
859
860 /* Set a rule header */
861 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
862 &rule_entry->vlan.header);
863
864 DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
865 vlan);
866
867 /* Set a VLAN itself */
868 rule_entry->vlan.vlan = cpu_to_le16(vlan);
869
870 /* MOVE: Add a rule that will add this MAC to the target Queue */
871 if (cmd == BNX2X_VLAN_MAC_MOVE) {
872 rule_entry++;
873 rule_cnt++;
874
875 /* Setup ramrod data */
876 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
877 elem->cmd_data.vlan_mac.target_obj,
878 true, CLASSIFY_RULE_OPCODE_VLAN,
879 &rule_entry->vlan.header);
880
881 /* Set a VLAN itself */
882 rule_entry->vlan.vlan = cpu_to_le16(vlan);
883 }
042181f5 884
619c5cb6
VZ
885 /* Set the ramrod data header */
886 /* TODO: take this to the higher level in order to prevent multiple
887 writing */
888 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
889 rule_cnt);
890}
042181f5 891
619c5cb6
VZ
892static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
893 struct bnx2x_vlan_mac_obj *o,
894 struct bnx2x_exeq_elem *elem,
895 int rule_idx, int cam_offset)
896{
897 struct bnx2x_raw_obj *raw = &o->raw;
898 struct eth_classify_rules_ramrod_data *data =
899 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
900 int rule_cnt = rule_idx + 1;
901 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
902 int cmd = elem->cmd_data.vlan_mac.cmd;
903 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
904 u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
905 u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
906
907
908 /* Reset the ramrod data buffer for the first rule */
909 if (rule_idx == 0)
910 memset(data, 0, sizeof(*data));
911
912 /* Set a rule header */
913 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
914 &rule_entry->pair.header);
915
916 /* Set VLAN and MAC themselvs */
917 rule_entry->pair.vlan = cpu_to_le16(vlan);
918 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
919 &rule_entry->pair.mac_mid,
920 &rule_entry->pair.mac_lsb, mac);
921
922 /* MOVE: Add a rule that will add this MAC to the target Queue */
923 if (cmd == BNX2X_VLAN_MAC_MOVE) {
924 rule_entry++;
925 rule_cnt++;
926
927 /* Setup ramrod data */
928 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
929 elem->cmd_data.vlan_mac.target_obj,
930 true, CLASSIFY_RULE_OPCODE_PAIR,
931 &rule_entry->pair.header);
932
933 /* Set a VLAN itself */
934 rule_entry->pair.vlan = cpu_to_le16(vlan);
935 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
936 &rule_entry->pair.mac_mid,
937 &rule_entry->pair.mac_lsb, mac);
042181f5
VZ
938 }
939
619c5cb6
VZ
940 /* Set the ramrod data header */
941 /* TODO: take this to the higher level in order to prevent multiple
942 writing */
943 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
944 rule_cnt);
945}
042181f5 946
619c5cb6
VZ
947/**
948 * bnx2x_set_one_vlan_mac_e1h -
949 *
950 * @bp: device handle
951 * @o: bnx2x_vlan_mac_obj
952 * @elem: bnx2x_exeq_elem
953 * @rule_idx: rule_idx
954 * @cam_offset: cam_offset
955 */
956static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
957 struct bnx2x_vlan_mac_obj *o,
958 struct bnx2x_exeq_elem *elem,
959 int rule_idx, int cam_offset)
960{
961 struct bnx2x_raw_obj *raw = &o->raw;
962 struct mac_configuration_cmd *config =
963 (struct mac_configuration_cmd *)(raw->rdata);
964 /*
965 * 57710 and 57711 do not support MOVE command,
966 * so it's either ADD or DEL
042181f5 967 */
619c5cb6
VZ
968 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
969 true : false;
042181f5 970
619c5cb6
VZ
971 /* Reset the ramrod data buffer */
972 memset(config, 0, sizeof(*config));
042181f5 973
619c5cb6
VZ
974 bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
975 cam_offset, add,
976 elem->cmd_data.vlan_mac.u.vlan_mac.mac,
977 elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
978 ETH_VLAN_FILTER_CLASSIFY, config);
042181f5
VZ
979}
980
619c5cb6
VZ
981#define list_next_entry(pos, member) \
982 list_entry((pos)->member.next, typeof(*(pos)), member)
983
984/**
985 * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
986 *
987 * @bp: device handle
988 * @p: command parameters
989 * @ppos: pointer to the cooky
990 *
991 * reconfigure next MAC/VLAN/VLAN-MAC element from the
992 * previously configured elements list.
993 *
994 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
995 * into an account
996 *
997 * pointer to the cooky - that should be given back in the next call to make
998 * function handle the next element. If *ppos is set to NULL it will restart the
999 * iterator. If returned *ppos == NULL this means that the last element has been
1000 * handled.
1001 *
1002 */
1003static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1004 struct bnx2x_vlan_mac_ramrod_params *p,
1005 struct bnx2x_vlan_mac_registry_elem **ppos)
1006{
1007 struct bnx2x_vlan_mac_registry_elem *pos;
1008 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1009
1010 /* If list is empty - there is nothing to do here */
1011 if (list_empty(&o->head)) {
1012 *ppos = NULL;
1013 return 0;
1014 }
1015
1016 /* make a step... */
1017 if (*ppos == NULL)
1018 *ppos = list_first_entry(&o->head,
1019 struct bnx2x_vlan_mac_registry_elem,
1020 link);
1021 else
1022 *ppos = list_next_entry(*ppos, link);
1023
1024 pos = *ppos;
1025
1026 /* If it's the last step - return NULL */
1027 if (list_is_last(&pos->link, &o->head))
1028 *ppos = NULL;
1029
1030 /* Prepare a 'user_req' */
1031 memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1032
1033 /* Set the command */
1034 p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1035
1036 /* Set vlan_mac_flags */
1037 p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1038
1039 /* Set a restore bit */
1040 __set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1041
1042 return bnx2x_config_vlan_mac(bp, p);
1043}
1044
1045/*
1046 * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1047 * pointer to an element with a specific criteria and NULL if such an element
1048 * hasn't been found.
1049 */
1050static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1051 struct bnx2x_exe_queue_obj *o,
1052 struct bnx2x_exeq_elem *elem)
1053{
1054 struct bnx2x_exeq_elem *pos;
1055 struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1056
1057 /* Check pending for execution commands */
1058 list_for_each_entry(pos, &o->exe_queue, link)
1059 if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1060 sizeof(*data)) &&
1061 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1062 return pos;
1063
1064 return NULL;
1065}
1066
1067static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1068 struct bnx2x_exe_queue_obj *o,
1069 struct bnx2x_exeq_elem *elem)
1070{
1071 struct bnx2x_exeq_elem *pos;
1072 struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1073
1074 /* Check pending for execution commands */
1075 list_for_each_entry(pos, &o->exe_queue, link)
1076 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1077 sizeof(*data)) &&
1078 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1079 return pos;
1080
1081 return NULL;
1082}
1083
1084static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
1085 struct bnx2x_exe_queue_obj *o,
1086 struct bnx2x_exeq_elem *elem)
1087{
1088 struct bnx2x_exeq_elem *pos;
1089 struct bnx2x_vlan_mac_ramrod_data *data =
1090 &elem->cmd_data.vlan_mac.u.vlan_mac;
1091
1092 /* Check pending for execution commands */
1093 list_for_each_entry(pos, &o->exe_queue, link)
1094 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1095 sizeof(*data)) &&
1096 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1097 return pos;
1098
1099 return NULL;
1100}
1101
1102/**
1103 * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1104 *
1105 * @bp: device handle
1106 * @qo: bnx2x_qable_obj
1107 * @elem: bnx2x_exeq_elem
1108 *
1109 * Checks that the requested configuration can be added. If yes and if
1110 * requested, consume CAM credit.
1111 *
1112 * The 'validate' is run after the 'optimize'.
1113 *
1114 */
1115static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1116 union bnx2x_qable_obj *qo,
1117 struct bnx2x_exeq_elem *elem)
1118{
1119 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1120 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1121 int rc;
1122
1123 /* Check the registry */
1124 rc = o->check_add(o, &elem->cmd_data.vlan_mac.u);
1125 if (rc) {
1126 DP(BNX2X_MSG_SP, "ADD command is not allowed considering "
1127 "current registry state\n");
1128 return rc;
1129 }
1130
1131 /*
1132 * Check if there is a pending ADD command for this
1133 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1134 */
1135 if (exeq->get(exeq, elem)) {
1136 DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1137 return -EEXIST;
1138 }
1139
1140 /*
1141 * TODO: Check the pending MOVE from other objects where this
1142 * object is a destination object.
1143 */
1144
1145 /* Consume the credit if not requested not to */
1146 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1147 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1148 o->get_credit(o)))
1149 return -EINVAL;
1150
1151 return 0;
1152}
1153
1154/**
1155 * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1156 *
1157 * @bp: device handle
1158 * @qo: quable object to check
1159 * @elem: element that needs to be deleted
1160 *
1161 * Checks that the requested configuration can be deleted. If yes and if
1162 * requested, returns a CAM credit.
1163 *
1164 * The 'validate' is run after the 'optimize'.
1165 */
1166static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1167 union bnx2x_qable_obj *qo,
1168 struct bnx2x_exeq_elem *elem)
1169{
1170 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1171 struct bnx2x_vlan_mac_registry_elem *pos;
1172 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1173 struct bnx2x_exeq_elem query_elem;
1174
1175 /* If this classification can not be deleted (doesn't exist)
1176 * - return a BNX2X_EXIST.
1177 */
1178 pos = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1179 if (!pos) {
1180 DP(BNX2X_MSG_SP, "DEL command is not allowed considering "
1181 "current registry state\n");
1182 return -EEXIST;
1183 }
1184
1185 /*
1186 * Check if there are pending DEL or MOVE commands for this
1187 * MAC/VLAN/VLAN-MAC. Return an error if so.
1188 */
1189 memcpy(&query_elem, elem, sizeof(query_elem));
1190
1191 /* Check for MOVE commands */
1192 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1193 if (exeq->get(exeq, &query_elem)) {
1194 BNX2X_ERR("There is a pending MOVE command already\n");
1195 return -EINVAL;
1196 }
1197
1198 /* Check for DEL commands */
1199 if (exeq->get(exeq, elem)) {
1200 DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1201 return -EEXIST;
1202 }
1203
1204 /* Return the credit to the credit pool if not requested not to */
1205 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1206 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1207 o->put_credit(o))) {
1208 BNX2X_ERR("Failed to return a credit\n");
1209 return -EINVAL;
1210 }
1211
1212 return 0;
1213}
1214
1215/**
1216 * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1217 *
1218 * @bp: device handle
1219 * @qo: quable object to check (source)
1220 * @elem: element that needs to be moved
1221 *
1222 * Checks that the requested configuration can be moved. If yes and if
1223 * requested, returns a CAM credit.
1224 *
1225 * The 'validate' is run after the 'optimize'.
1226 */
1227static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1228 union bnx2x_qable_obj *qo,
1229 struct bnx2x_exeq_elem *elem)
1230{
1231 struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1232 struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1233 struct bnx2x_exeq_elem query_elem;
1234 struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1235 struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1236
1237 /*
1238 * Check if we can perform this operation based on the current registry
1239 * state.
1240 */
1241 if (!src_o->check_move(src_o, dest_o, &elem->cmd_data.vlan_mac.u)) {
1242 DP(BNX2X_MSG_SP, "MOVE command is not allowed considering "
1243 "current registry state\n");
1244 return -EINVAL;
1245 }
1246
1247 /*
1248 * Check if there is an already pending DEL or MOVE command for the
1249 * source object or ADD command for a destination object. Return an
1250 * error if so.
1251 */
1252 memcpy(&query_elem, elem, sizeof(query_elem));
1253
1254 /* Check DEL on source */
1255 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1256 if (src_exeq->get(src_exeq, &query_elem)) {
1257 BNX2X_ERR("There is a pending DEL command on the source "
1258 "queue already\n");
1259 return -EINVAL;
1260 }
1261
1262 /* Check MOVE on source */
1263 if (src_exeq->get(src_exeq, elem)) {
1264 DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1265 return -EEXIST;
1266 }
1267
1268 /* Check ADD on destination */
1269 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1270 if (dest_exeq->get(dest_exeq, &query_elem)) {
1271 BNX2X_ERR("There is a pending ADD command on the "
1272 "destination queue already\n");
1273 return -EINVAL;
1274 }
1275
1276 /* Consume the credit if not requested not to */
1277 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1278 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1279 dest_o->get_credit(dest_o)))
1280 return -EINVAL;
1281
1282 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1283 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1284 src_o->put_credit(src_o))) {
1285 /* return the credit taken from dest... */
1286 dest_o->put_credit(dest_o);
1287 return -EINVAL;
1288 }
1289
1290 return 0;
1291}
1292
1293static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1294 union bnx2x_qable_obj *qo,
1295 struct bnx2x_exeq_elem *elem)
1296{
1297 switch (elem->cmd_data.vlan_mac.cmd) {
1298 case BNX2X_VLAN_MAC_ADD:
1299 return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1300 case BNX2X_VLAN_MAC_DEL:
1301 return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1302 case BNX2X_VLAN_MAC_MOVE:
1303 return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1304 default:
1305 return -EINVAL;
1306 }
1307}
1308
1309/**
1310 * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes.
1311 *
1312 * @bp: device handle
1313 * @o: bnx2x_vlan_mac_obj
1314 *
1315 */
1316static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1317 struct bnx2x_vlan_mac_obj *o)
1318{
1319 int cnt = 5000, rc;
1320 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1321 struct bnx2x_raw_obj *raw = &o->raw;
1322
1323 while (cnt--) {
1324 /* Wait for the current command to complete */
1325 rc = raw->wait_comp(bp, raw);
1326 if (rc)
1327 return rc;
1328
1329 /* Wait until there are no pending commands */
1330 if (!bnx2x_exe_queue_empty(exeq))
1331 usleep_range(1000, 1000);
1332 else
1333 return 0;
1334 }
1335
1336 return -EBUSY;
1337}
1338
1339/**
1340 * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1341 *
1342 * @bp: device handle
1343 * @o: bnx2x_vlan_mac_obj
1344 * @cqe:
1345 * @cont: if true schedule next execution chunk
1346 *
1347 */
1348static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1349 struct bnx2x_vlan_mac_obj *o,
1350 union event_ring_elem *cqe,
1351 unsigned long *ramrod_flags)
1352{
1353 struct bnx2x_raw_obj *r = &o->raw;
1354 int rc;
1355
1356 /* Reset pending list */
1357 bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1358
1359 /* Clear pending */
1360 r->clear_pending(r);
1361
1362 /* If ramrod failed this is most likely a SW bug */
1363 if (cqe->message.error)
1364 return -EINVAL;
1365
1366 /* Run the next bulk of pending commands if requeted */
1367 if (test_bit(RAMROD_CONT, ramrod_flags)) {
1368 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1369 if (rc < 0)
1370 return rc;
1371 }
1372
1373 /* If there is more work to do return PENDING */
1374 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1375 return 1;
1376
1377 return 0;
1378}
1379
1380/**
1381 * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1382 *
1383 * @bp: device handle
1384 * @o: bnx2x_qable_obj
1385 * @elem: bnx2x_exeq_elem
1386 */
1387static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1388 union bnx2x_qable_obj *qo,
1389 struct bnx2x_exeq_elem *elem)
1390{
1391 struct bnx2x_exeq_elem query, *pos;
1392 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1393 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1394
1395 memcpy(&query, elem, sizeof(query));
1396
1397 switch (elem->cmd_data.vlan_mac.cmd) {
1398 case BNX2X_VLAN_MAC_ADD:
1399 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1400 break;
1401 case BNX2X_VLAN_MAC_DEL:
1402 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1403 break;
1404 default:
1405 /* Don't handle anything other than ADD or DEL */
1406 return 0;
1407 }
1408
1409 /* If we found the appropriate element - delete it */
1410 pos = exeq->get(exeq, &query);
1411 if (pos) {
1412
1413 /* Return the credit of the optimized command */
1414 if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1415 &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1416 if ((query.cmd_data.vlan_mac.cmd ==
1417 BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
1418 BNX2X_ERR("Failed to return the credit for the "
1419 "optimized ADD command\n");
1420 return -EINVAL;
1421 } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1422 BNX2X_ERR("Failed to recover the credit from "
1423 "the optimized DEL command\n");
1424 return -EINVAL;
1425 }
1426 }
1427
1428 DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1429 (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1430 "ADD" : "DEL");
1431
1432 list_del(&pos->link);
1433 bnx2x_exe_queue_free_elem(bp, pos);
1434 return 1;
1435 }
1436
1437 return 0;
1438}
1439
1440/**
1441 * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1442 *
1443 * @bp: device handle
1444 * @o:
1445 * @elem:
1446 * @restore:
1447 * @re:
1448 *
1449 * prepare a registry element according to the current command request.
1450 */
1451static inline int bnx2x_vlan_mac_get_registry_elem(
1452 struct bnx2x *bp,
1453 struct bnx2x_vlan_mac_obj *o,
1454 struct bnx2x_exeq_elem *elem,
1455 bool restore,
1456 struct bnx2x_vlan_mac_registry_elem **re)
1457{
1458 int cmd = elem->cmd_data.vlan_mac.cmd;
1459 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1460
1461 /* Allocate a new registry element if needed. */
1462 if (!restore &&
1463 ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1464 reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1465 if (!reg_elem)
1466 return -ENOMEM;
1467
1468 /* Get a new CAM offset */
1469 if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1470 /*
1471 * This shell never happen, because we have checked the
1472 * CAM availiability in the 'validate'.
1473 */
1474 WARN_ON(1);
1475 kfree(reg_elem);
1476 return -EINVAL;
1477 }
1478
1479 DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1480
1481 /* Set a VLAN-MAC data */
1482 memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1483 sizeof(reg_elem->u));
1484
1485 /* Copy the flags (needed for DEL and RESTORE flows) */
1486 reg_elem->vlan_mac_flags =
1487 elem->cmd_data.vlan_mac.vlan_mac_flags;
1488 } else /* DEL, RESTORE */
1489 reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1490
1491 *re = reg_elem;
1492 return 0;
1493}
1494
1495/**
1496 * bnx2x_execute_vlan_mac - execute vlan mac command
1497 *
1498 * @bp: device handle
1499 * @qo:
1500 * @exe_chunk:
1501 * @ramrod_flags:
1502 *
1503 * go and send a ramrod!
1504 */
1505static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1506 union bnx2x_qable_obj *qo,
1507 struct list_head *exe_chunk,
1508 unsigned long *ramrod_flags)
1509{
1510 struct bnx2x_exeq_elem *elem;
1511 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1512 struct bnx2x_raw_obj *r = &o->raw;
1513 int rc, idx = 0;
1514 bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1515 bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1516 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1517 int cmd;
1518
1519 /*
1520 * If DRIVER_ONLY execution is requested, cleanup a registry
1521 * and exit. Otherwise send a ramrod to FW.
1522 */
1523 if (!drv_only) {
1524 WARN_ON(r->check_pending(r));
1525
1526 /* Set pending */
1527 r->set_pending(r);
1528
1529 /* Fill tha ramrod data */
1530 list_for_each_entry(elem, exe_chunk, link) {
1531 cmd = elem->cmd_data.vlan_mac.cmd;
1532 /*
1533 * We will add to the target object in MOVE command, so
1534 * change the object for a CAM search.
1535 */
1536 if (cmd == BNX2X_VLAN_MAC_MOVE)
1537 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1538 else
1539 cam_obj = o;
1540
1541 rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1542 elem, restore,
1543 &reg_elem);
1544 if (rc)
1545 goto error_exit;
1546
1547 WARN_ON(!reg_elem);
1548
1549 /* Push a new entry into the registry */
1550 if (!restore &&
1551 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1552 (cmd == BNX2X_VLAN_MAC_MOVE)))
1553 list_add(&reg_elem->link, &cam_obj->head);
1554
1555 /* Configure a single command in a ramrod data buffer */
1556 o->set_one_rule(bp, o, elem, idx,
1557 reg_elem->cam_offset);
1558
1559 /* MOVE command consumes 2 entries in the ramrod data */
1560 if (cmd == BNX2X_VLAN_MAC_MOVE)
1561 idx += 2;
1562 else
1563 idx++;
1564 }
1565
53e51e2f
VZ
1566 /*
1567 * No need for an explicit memory barrier here as long we would
1568 * need to ensure the ordering of writing to the SPQ element
1569 * and updating of the SPQ producer which involves a memory
1570 * read and we will have to put a full memory barrier there
1571 * (inside bnx2x_sp_post()).
1572 */
619c5cb6
VZ
1573
1574 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1575 U64_HI(r->rdata_mapping),
1576 U64_LO(r->rdata_mapping),
1577 ETH_CONNECTION_TYPE);
1578 if (rc)
1579 goto error_exit;
1580 }
1581
1582 /* Now, when we are done with the ramrod - clean up the registry */
1583 list_for_each_entry(elem, exe_chunk, link) {
1584 cmd = elem->cmd_data.vlan_mac.cmd;
1585 if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1586 (cmd == BNX2X_VLAN_MAC_MOVE)) {
1587 reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1588
1589 WARN_ON(!reg_elem);
1590
1591 o->put_cam_offset(o, reg_elem->cam_offset);
1592 list_del(&reg_elem->link);
1593 kfree(reg_elem);
1594 }
1595 }
1596
1597 if (!drv_only)
1598 return 1;
1599 else
1600 return 0;
1601
1602error_exit:
1603 r->clear_pending(r);
1604
1605 /* Cleanup a registry in case of a failure */
1606 list_for_each_entry(elem, exe_chunk, link) {
1607 cmd = elem->cmd_data.vlan_mac.cmd;
1608
1609 if (cmd == BNX2X_VLAN_MAC_MOVE)
1610 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1611 else
1612 cam_obj = o;
1613
1614 /* Delete all newly added above entries */
1615 if (!restore &&
1616 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1617 (cmd == BNX2X_VLAN_MAC_MOVE))) {
1618 reg_elem = o->check_del(cam_obj,
1619 &elem->cmd_data.vlan_mac.u);
1620 if (reg_elem) {
1621 list_del(&reg_elem->link);
1622 kfree(reg_elem);
1623 }
1624 }
1625 }
1626
1627 return rc;
1628}
1629
1630static inline int bnx2x_vlan_mac_push_new_cmd(
1631 struct bnx2x *bp,
1632 struct bnx2x_vlan_mac_ramrod_params *p)
1633{
1634 struct bnx2x_exeq_elem *elem;
1635 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1636 bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1637
1638 /* Allocate the execution queue element */
1639 elem = bnx2x_exe_queue_alloc_elem(bp);
1640 if (!elem)
1641 return -ENOMEM;
1642
1643 /* Set the command 'length' */
1644 switch (p->user_req.cmd) {
1645 case BNX2X_VLAN_MAC_MOVE:
1646 elem->cmd_len = 2;
1647 break;
1648 default:
1649 elem->cmd_len = 1;
1650 }
1651
1652 /* Fill the object specific info */
1653 memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1654
1655 /* Try to add a new command to the pending list */
1656 return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1657}
1658
1659/**
1660 * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1661 *
1662 * @bp: device handle
1663 * @p:
1664 *
1665 */
1666int bnx2x_config_vlan_mac(
1667 struct bnx2x *bp,
1668 struct bnx2x_vlan_mac_ramrod_params *p)
1669{
1670 int rc = 0;
1671 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1672 unsigned long *ramrod_flags = &p->ramrod_flags;
1673 bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1674 struct bnx2x_raw_obj *raw = &o->raw;
1675
1676 /*
1677 * Add new elements to the execution list for commands that require it.
1678 */
1679 if (!cont) {
1680 rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1681 if (rc)
1682 return rc;
1683 }
1684
1685 /*
1686 * If nothing will be executed further in this iteration we want to
1687 * return PENDING if there are pending commands
1688 */
1689 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1690 rc = 1;
1691
1692 /* Execute commands if required */
1693 if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1694 test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1695 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1696 if (rc < 0)
1697 return rc;
1698 }
1699
1700 /*
1701 * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1702 * then user want to wait until the last command is done.
1703 */
1704 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1705 /*
1706 * Wait maximum for the current exe_queue length iterations plus
1707 * one (for the current pending command).
1708 */
1709 int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1710
1711 while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
1712 max_iterations--) {
1713
1714 /* Wait for the current command to complete */
1715 rc = raw->wait_comp(bp, raw);
1716 if (rc)
1717 return rc;
1718
1719 /* Make a next step */
1720 rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
1721 ramrod_flags);
1722 if (rc < 0)
1723 return rc;
1724 }
1725
1726 return 0;
1727 }
1728
1729 return rc;
1730}
1731
1732
1733
1734/**
1735 * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1736 *
1737 * @bp: device handle
1738 * @o:
1739 * @vlan_mac_flags:
1740 * @ramrod_flags: execution flags to be used for this deletion
1741 *
1742 * if the last operation has completed successfully and there are no
1743 * moreelements left, positive value if the last operation has completed
1744 * successfully and there are more previously configured elements, negative
1745 * value is current operation has failed.
1746 */
1747static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1748 struct bnx2x_vlan_mac_obj *o,
1749 unsigned long *vlan_mac_flags,
1750 unsigned long *ramrod_flags)
1751{
1752 struct bnx2x_vlan_mac_registry_elem *pos = NULL;
1753 int rc = 0;
1754 struct bnx2x_vlan_mac_ramrod_params p;
1755 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1756 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
1757
1758 /* Clear pending commands first */
1759
1760 spin_lock_bh(&exeq->lock);
1761
1762 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
1763 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
1764 *vlan_mac_flags)
1765 list_del(&exeq_pos->link);
1766 }
1767
1768 spin_unlock_bh(&exeq->lock);
1769
1770 /* Prepare a command request */
1771 memset(&p, 0, sizeof(p));
1772 p.vlan_mac_obj = o;
1773 p.ramrod_flags = *ramrod_flags;
1774 p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
1775
1776 /*
1777 * Add all but the last VLAN-MAC to the execution queue without actually
1778 * execution anything.
1779 */
1780 __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
1781 __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
1782 __clear_bit(RAMROD_CONT, &p.ramrod_flags);
1783
1784 list_for_each_entry(pos, &o->head, link) {
1785 if (pos->vlan_mac_flags == *vlan_mac_flags) {
1786 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1787 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
1788 rc = bnx2x_config_vlan_mac(bp, &p);
1789 if (rc < 0) {
1790 BNX2X_ERR("Failed to add a new DEL command\n");
1791 return rc;
1792 }
1793 }
1794 }
1795
1796 p.ramrod_flags = *ramrod_flags;
1797 __set_bit(RAMROD_CONT, &p.ramrod_flags);
1798
1799 return bnx2x_config_vlan_mac(bp, &p);
1800}
1801
1802static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
1803 u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
1804 unsigned long *pstate, bnx2x_obj_type type)
1805{
1806 raw->func_id = func_id;
1807 raw->cid = cid;
1808 raw->cl_id = cl_id;
1809 raw->rdata = rdata;
1810 raw->rdata_mapping = rdata_mapping;
1811 raw->state = state;
1812 raw->pstate = pstate;
1813 raw->obj_type = type;
1814 raw->check_pending = bnx2x_raw_check_pending;
1815 raw->clear_pending = bnx2x_raw_clear_pending;
1816 raw->set_pending = bnx2x_raw_set_pending;
1817 raw->wait_comp = bnx2x_raw_wait;
1818}
1819
1820static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
1821 u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
1822 int state, unsigned long *pstate, bnx2x_obj_type type,
1823 struct bnx2x_credit_pool_obj *macs_pool,
1824 struct bnx2x_credit_pool_obj *vlans_pool)
1825{
1826 INIT_LIST_HEAD(&o->head);
1827
1828 o->macs_pool = macs_pool;
1829 o->vlans_pool = vlans_pool;
1830
1831 o->delete_all = bnx2x_vlan_mac_del_all;
1832 o->restore = bnx2x_vlan_mac_restore;
1833 o->complete = bnx2x_complete_vlan_mac;
1834 o->wait = bnx2x_wait_vlan_mac;
1835
1836 bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1837 state, pstate, type);
1838}
1839
1840
1841void bnx2x_init_mac_obj(struct bnx2x *bp,
1842 struct bnx2x_vlan_mac_obj *mac_obj,
1843 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1844 dma_addr_t rdata_mapping, int state,
1845 unsigned long *pstate, bnx2x_obj_type type,
1846 struct bnx2x_credit_pool_obj *macs_pool)
1847{
1848 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
1849
1850 bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1851 rdata_mapping, state, pstate, type,
1852 macs_pool, NULL);
1853
1854 /* CAM credit pool handling */
1855 mac_obj->get_credit = bnx2x_get_credit_mac;
1856 mac_obj->put_credit = bnx2x_put_credit_mac;
1857 mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1858 mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1859
1860 if (CHIP_IS_E1x(bp)) {
1861 mac_obj->set_one_rule = bnx2x_set_one_mac_e1x;
1862 mac_obj->check_del = bnx2x_check_mac_del;
1863 mac_obj->check_add = bnx2x_check_mac_add;
1864 mac_obj->check_move = bnx2x_check_move_always_err;
1865 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
1866
1867 /* Exe Queue */
1868 bnx2x_exe_queue_init(bp,
1869 &mac_obj->exe_queue, 1, qable_obj,
1870 bnx2x_validate_vlan_mac,
1871 bnx2x_optimize_vlan_mac,
1872 bnx2x_execute_vlan_mac,
1873 bnx2x_exeq_get_mac);
1874 } else {
1875 mac_obj->set_one_rule = bnx2x_set_one_mac_e2;
1876 mac_obj->check_del = bnx2x_check_mac_del;
1877 mac_obj->check_add = bnx2x_check_mac_add;
1878 mac_obj->check_move = bnx2x_check_move;
1879 mac_obj->ramrod_cmd =
1880 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1881
1882 /* Exe Queue */
1883 bnx2x_exe_queue_init(bp,
1884 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1885 qable_obj, bnx2x_validate_vlan_mac,
1886 bnx2x_optimize_vlan_mac,
1887 bnx2x_execute_vlan_mac,
1888 bnx2x_exeq_get_mac);
1889 }
1890}
1891
1892void bnx2x_init_vlan_obj(struct bnx2x *bp,
1893 struct bnx2x_vlan_mac_obj *vlan_obj,
1894 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1895 dma_addr_t rdata_mapping, int state,
1896 unsigned long *pstate, bnx2x_obj_type type,
1897 struct bnx2x_credit_pool_obj *vlans_pool)
1898{
1899 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
1900
1901 bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
1902 rdata_mapping, state, pstate, type, NULL,
1903 vlans_pool);
1904
1905 vlan_obj->get_credit = bnx2x_get_credit_vlan;
1906 vlan_obj->put_credit = bnx2x_put_credit_vlan;
1907 vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
1908 vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
1909
1910 if (CHIP_IS_E1x(bp)) {
1911 BNX2X_ERR("Do not support chips others than E2 and newer\n");
1912 BUG();
1913 } else {
1914 vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2;
1915 vlan_obj->check_del = bnx2x_check_vlan_del;
1916 vlan_obj->check_add = bnx2x_check_vlan_add;
1917 vlan_obj->check_move = bnx2x_check_move;
1918 vlan_obj->ramrod_cmd =
1919 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1920
1921 /* Exe Queue */
1922 bnx2x_exe_queue_init(bp,
1923 &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
1924 qable_obj, bnx2x_validate_vlan_mac,
1925 bnx2x_optimize_vlan_mac,
1926 bnx2x_execute_vlan_mac,
1927 bnx2x_exeq_get_vlan);
1928 }
1929}
1930
1931void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
1932 struct bnx2x_vlan_mac_obj *vlan_mac_obj,
1933 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1934 dma_addr_t rdata_mapping, int state,
1935 unsigned long *pstate, bnx2x_obj_type type,
1936 struct bnx2x_credit_pool_obj *macs_pool,
1937 struct bnx2x_credit_pool_obj *vlans_pool)
1938{
1939 union bnx2x_qable_obj *qable_obj =
1940 (union bnx2x_qable_obj *)vlan_mac_obj;
1941
1942 bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
1943 rdata_mapping, state, pstate, type,
1944 macs_pool, vlans_pool);
1945
1946 /* CAM pool handling */
1947 vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
1948 vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
1949 /*
1950 * CAM offset is relevant for 57710 and 57711 chips only which have a
1951 * single CAM for both MACs and VLAN-MAC pairs. So the offset
1952 * will be taken from MACs' pool object only.
1953 */
1954 vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1955 vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1956
1957 if (CHIP_IS_E1(bp)) {
1958 BNX2X_ERR("Do not support chips others than E2\n");
1959 BUG();
1960 } else if (CHIP_IS_E1H(bp)) {
1961 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
1962 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
1963 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
1964 vlan_mac_obj->check_move = bnx2x_check_move_always_err;
1965 vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
1966
1967 /* Exe Queue */
1968 bnx2x_exe_queue_init(bp,
1969 &vlan_mac_obj->exe_queue, 1, qable_obj,
1970 bnx2x_validate_vlan_mac,
1971 bnx2x_optimize_vlan_mac,
1972 bnx2x_execute_vlan_mac,
1973 bnx2x_exeq_get_vlan_mac);
1974 } else {
1975 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2;
1976 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
1977 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
1978 vlan_mac_obj->check_move = bnx2x_check_move;
1979 vlan_mac_obj->ramrod_cmd =
1980 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1981
1982 /* Exe Queue */
1983 bnx2x_exe_queue_init(bp,
1984 &vlan_mac_obj->exe_queue,
1985 CLASSIFY_RULES_COUNT,
1986 qable_obj, bnx2x_validate_vlan_mac,
1987 bnx2x_optimize_vlan_mac,
1988 bnx2x_execute_vlan_mac,
1989 bnx2x_exeq_get_vlan_mac);
1990 }
1991
1992}
1993
1994/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
1995static inline void __storm_memset_mac_filters(struct bnx2x *bp,
1996 struct tstorm_eth_mac_filter_config *mac_filters,
1997 u16 pf_id)
1998{
1999 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2000
2001 u32 addr = BAR_TSTRORM_INTMEM +
2002 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2003
2004 __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2005}
2006
2007static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2008 struct bnx2x_rx_mode_ramrod_params *p)
2009{
2010 /* update the bp MAC filter structure */
2011 u32 mask = (1 << p->cl_id);
2012
2013 struct tstorm_eth_mac_filter_config *mac_filters =
2014 (struct tstorm_eth_mac_filter_config *)p->rdata;
2015
2016 /* initial seeting is drop-all */
2017 u8 drop_all_ucast = 1, drop_all_mcast = 1;
2018 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2019 u8 unmatched_unicast = 0;
2020
2021 /* In e1x there we only take into account rx acceot flag since tx switching
2022 * isn't enabled. */
2023 if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
2024 /* accept matched ucast */
2025 drop_all_ucast = 0;
2026
2027 if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
2028 /* accept matched mcast */
2029 drop_all_mcast = 0;
2030
2031 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2032 /* accept all mcast */
2033 drop_all_ucast = 0;
2034 accp_all_ucast = 1;
2035 }
2036 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2037 /* accept all mcast */
2038 drop_all_mcast = 0;
2039 accp_all_mcast = 1;
2040 }
2041 if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
2042 /* accept (all) bcast */
2043 accp_all_bcast = 1;
2044 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2045 /* accept unmatched unicasts */
2046 unmatched_unicast = 1;
2047
2048 mac_filters->ucast_drop_all = drop_all_ucast ?
2049 mac_filters->ucast_drop_all | mask :
2050 mac_filters->ucast_drop_all & ~mask;
2051
2052 mac_filters->mcast_drop_all = drop_all_mcast ?
2053 mac_filters->mcast_drop_all | mask :
2054 mac_filters->mcast_drop_all & ~mask;
2055
2056 mac_filters->ucast_accept_all = accp_all_ucast ?
2057 mac_filters->ucast_accept_all | mask :
2058 mac_filters->ucast_accept_all & ~mask;
2059
2060 mac_filters->mcast_accept_all = accp_all_mcast ?
2061 mac_filters->mcast_accept_all | mask :
2062 mac_filters->mcast_accept_all & ~mask;
2063
2064 mac_filters->bcast_accept_all = accp_all_bcast ?
2065 mac_filters->bcast_accept_all | mask :
2066 mac_filters->bcast_accept_all & ~mask;
2067
2068 mac_filters->unmatched_unicast = unmatched_unicast ?
2069 mac_filters->unmatched_unicast | mask :
2070 mac_filters->unmatched_unicast & ~mask;
2071
2072 DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2073 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2074 mac_filters->ucast_drop_all,
2075 mac_filters->mcast_drop_all,
2076 mac_filters->ucast_accept_all,
2077 mac_filters->mcast_accept_all,
2078 mac_filters->bcast_accept_all);
2079
2080 /* write the MAC filter structure*/
2081 __storm_memset_mac_filters(bp, mac_filters, p->func_id);
2082
2083 /* The operation is completed */
2084 clear_bit(p->state, p->pstate);
2085 smp_mb__after_clear_bit();
2086
2087 return 0;
2088}
2089
2090/* Setup ramrod data */
2091static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2092 struct eth_classify_header *hdr,
2093 u8 rule_cnt)
2094{
2095 hdr->echo = cid;
2096 hdr->rule_cnt = rule_cnt;
2097}
2098
2099static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
2100 unsigned long accept_flags,
2101 struct eth_filter_rules_cmd *cmd,
2102 bool clear_accept_all)
2103{
2104 u16 state;
2105
2106 /* start with 'drop-all' */
2107 state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2108 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2109
2110 if (accept_flags) {
2111 if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags))
2112 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2113
2114 if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags))
2115 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2116
2117 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) {
2118 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2119 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2120 }
2121
2122 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) {
2123 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2124 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2125 }
2126 if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags))
2127 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2128
2129 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) {
2130 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2131 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2132 }
2133 if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags))
2134 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2135 }
2136
2137 /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2138 if (clear_accept_all) {
2139 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2140 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2141 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2142 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2143 }
2144
2145 cmd->state = cpu_to_le16(state);
2146
2147}
2148
2149static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2150 struct bnx2x_rx_mode_ramrod_params *p)
2151{
2152 struct eth_filter_rules_ramrod_data *data = p->rdata;
2153 int rc;
2154 u8 rule_idx = 0;
2155
2156 /* Reset the ramrod data buffer */
2157 memset(data, 0, sizeof(*data));
2158
2159 /* Setup ramrod data */
2160
2161 /* Tx (internal switching) */
2162 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2163 data->rules[rule_idx].client_id = p->cl_id;
2164 data->rules[rule_idx].func_id = p->func_id;
2165
2166 data->rules[rule_idx].cmd_general_data =
2167 ETH_FILTER_RULES_CMD_TX_CMD;
2168
2169 bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
2170 &(data->rules[rule_idx++]), false);
2171 }
2172
2173 /* Rx */
2174 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2175 data->rules[rule_idx].client_id = p->cl_id;
2176 data->rules[rule_idx].func_id = p->func_id;
2177
2178 data->rules[rule_idx].cmd_general_data =
2179 ETH_FILTER_RULES_CMD_RX_CMD;
2180
2181 bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
2182 &(data->rules[rule_idx++]), false);
2183 }
2184
2185
2186 /*
2187 * If FCoE Queue configuration has been requested configure the Rx and
2188 * internal switching modes for this queue in separate rules.
2189 *
2190 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2191 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2192 */
2193 if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2194 /* Tx (internal switching) */
2195 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2196 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2197 data->rules[rule_idx].func_id = p->func_id;
2198
2199 data->rules[rule_idx].cmd_general_data =
2200 ETH_FILTER_RULES_CMD_TX_CMD;
2201
2202 bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
2203 &(data->rules[rule_idx++]),
2204 true);
2205 }
2206
2207 /* Rx */
2208 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2209 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2210 data->rules[rule_idx].func_id = p->func_id;
2211
2212 data->rules[rule_idx].cmd_general_data =
2213 ETH_FILTER_RULES_CMD_RX_CMD;
2214
2215 bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
2216 &(data->rules[rule_idx++]),
2217 true);
2218 }
2219 }
2220
2221 /*
2222 * Set the ramrod header (most importantly - number of rules to
2223 * configure).
2224 */
2225 bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2226
2227 DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, "
2228 "tx_accept_flags 0x%lx\n",
2229 data->header.rule_cnt, p->rx_accept_flags,
2230 p->tx_accept_flags);
2231
53e51e2f
VZ
2232 /*
2233 * No need for an explicit memory barrier here as long we would
2234 * need to ensure the ordering of writing to the SPQ element
2235 * and updating of the SPQ producer which involves a memory
2236 * read and we will have to put a full memory barrier there
2237 * (inside bnx2x_sp_post()).
2238 */
619c5cb6
VZ
2239
2240 /* Send a ramrod */
2241 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2242 U64_HI(p->rdata_mapping),
2243 U64_LO(p->rdata_mapping),
2244 ETH_CONNECTION_TYPE);
2245 if (rc)
2246 return rc;
2247
2248 /* Ramrod completion is pending */
2249 return 1;
2250}
2251
2252static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2253 struct bnx2x_rx_mode_ramrod_params *p)
2254{
2255 return bnx2x_state_wait(bp, p->state, p->pstate);
2256}
2257
2258static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2259 struct bnx2x_rx_mode_ramrod_params *p)
2260{
2261 /* Do nothing */
2262 return 0;
2263}
2264
2265int bnx2x_config_rx_mode(struct bnx2x *bp,
2266 struct bnx2x_rx_mode_ramrod_params *p)
2267{
2268 int rc;
2269
2270 /* Configure the new classification in the chip */
2271 rc = p->rx_mode_obj->config_rx_mode(bp, p);
2272 if (rc < 0)
2273 return rc;
2274
2275 /* Wait for a ramrod completion if was requested */
2276 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2277 rc = p->rx_mode_obj->wait_comp(bp, p);
2278 if (rc)
2279 return rc;
2280 }
2281
2282 return rc;
2283}
2284
2285void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2286 struct bnx2x_rx_mode_obj *o)
2287{
2288 if (CHIP_IS_E1x(bp)) {
2289 o->wait_comp = bnx2x_empty_rx_mode_wait;
2290 o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2291 } else {
2292 o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
2293 o->config_rx_mode = bnx2x_set_rx_mode_e2;
2294 }
2295}
2296
2297/********************* Multicast verbs: SET, CLEAR ****************************/
2298static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2299{
2300 return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2301}
2302
2303struct bnx2x_mcast_mac_elem {
2304 struct list_head link;
2305 u8 mac[ETH_ALEN];
2306 u8 pad[2]; /* For a natural alignment of the following buffer */
2307};
2308
2309struct bnx2x_pending_mcast_cmd {
2310 struct list_head link;
2311 int type; /* BNX2X_MCAST_CMD_X */
2312 union {
2313 struct list_head macs_head;
2314 u32 macs_num; /* Needed for DEL command */
2315 int next_bin; /* Needed for RESTORE flow with aprox match */
2316 } data;
2317
2318 bool done; /* set to true, when the command has been handled,
2319 * practically used in 57712 handling only, where one pending
2320 * command may be handled in a few operations. As long as for
2321 * other chips every operation handling is completed in a
2322 * single ramrod, there is no need to utilize this field.
2323 */
2324};
2325
2326static int bnx2x_mcast_wait(struct bnx2x *bp,
2327 struct bnx2x_mcast_obj *o)
2328{
2329 if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2330 o->raw.wait_comp(bp, &o->raw))
2331 return -EBUSY;
2332
2333 return 0;
2334}
2335
2336static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2337 struct bnx2x_mcast_obj *o,
2338 struct bnx2x_mcast_ramrod_params *p,
2339 int cmd)
2340{
2341 int total_sz;
2342 struct bnx2x_pending_mcast_cmd *new_cmd;
2343 struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2344 struct bnx2x_mcast_list_elem *pos;
2345 int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2346 p->mcast_list_len : 0);
2347
2348 /* If the command is empty ("handle pending commands only"), break */
2349 if (!p->mcast_list_len)
2350 return 0;
2351
2352 total_sz = sizeof(*new_cmd) +
2353 macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2354
2355 /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2356 new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2357
2358 if (!new_cmd)
2359 return -ENOMEM;
2360
2361 DP(BNX2X_MSG_SP, "About to enqueue a new %d command. "
2362 "macs_list_len=%d\n", cmd, macs_list_len);
2363
2364 INIT_LIST_HEAD(&new_cmd->data.macs_head);
2365
2366 new_cmd->type = cmd;
2367 new_cmd->done = false;
2368
2369 switch (cmd) {
2370 case BNX2X_MCAST_CMD_ADD:
2371 cur_mac = (struct bnx2x_mcast_mac_elem *)
2372 ((u8 *)new_cmd + sizeof(*new_cmd));
2373
2374 /* Push the MACs of the current command into the pendig command
2375 * MACs list: FIFO
2376 */
2377 list_for_each_entry(pos, &p->mcast_list, link) {
2378 memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2379 list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2380 cur_mac++;
2381 }
2382
2383 break;
2384
2385 case BNX2X_MCAST_CMD_DEL:
2386 new_cmd->data.macs_num = p->mcast_list_len;
2387 break;
2388
2389 case BNX2X_MCAST_CMD_RESTORE:
2390 new_cmd->data.next_bin = 0;
2391 break;
2392
2393 default:
2394 BNX2X_ERR("Unknown command: %d\n", cmd);
2395 return -EINVAL;
2396 }
2397
2398 /* Push the new pending command to the tail of the pending list: FIFO */
2399 list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2400
2401 o->set_sched(o);
2402
2403 return 1;
2404}
2405
2406/**
2407 * bnx2x_mcast_get_next_bin - get the next set bin (index)
2408 *
2409 * @o:
2410 * @last: index to start looking from (including)
2411 *
2412 * returns the next found (set) bin or a negative value if none is found.
2413 */
2414static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2415{
2416 int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2417
2418 for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2419 if (o->registry.aprox_match.vec[i])
2420 for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2421 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2422 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2423 vec, cur_bit)) {
2424 return cur_bit;
2425 }
2426 }
2427 inner_start = 0;
2428 }
2429
2430 /* None found */
2431 return -1;
2432}
2433
2434/**
2435 * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2436 *
2437 * @o:
2438 *
2439 * returns the index of the found bin or -1 if none is found
2440 */
2441static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2442{
2443 int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2444
2445 if (cur_bit >= 0)
2446 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2447
2448 return cur_bit;
2449}
2450
2451static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2452{
2453 struct bnx2x_raw_obj *raw = &o->raw;
2454 u8 rx_tx_flag = 0;
2455
2456 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2457 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2458 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2459
2460 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2461 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2462 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2463
2464 return rx_tx_flag;
2465}
2466
2467static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2468 struct bnx2x_mcast_obj *o, int idx,
2469 union bnx2x_mcast_config_data *cfg_data,
2470 int cmd)
2471{
2472 struct bnx2x_raw_obj *r = &o->raw;
2473 struct eth_multicast_rules_ramrod_data *data =
2474 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2475 u8 func_id = r->func_id;
2476 u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2477 int bin;
2478
2479 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2480 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2481
2482 data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2483
2484 /* Get a bin and update a bins' vector */
2485 switch (cmd) {
2486 case BNX2X_MCAST_CMD_ADD:
2487 bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2488 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2489 break;
2490
2491 case BNX2X_MCAST_CMD_DEL:
2492 /* If there were no more bins to clear
2493 * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2494 * clear any (0xff) bin.
2495 * See bnx2x_mcast_validate_e2() for explanation when it may
2496 * happen.
2497 */
2498 bin = bnx2x_mcast_clear_first_bin(o);
2499 break;
2500
2501 case BNX2X_MCAST_CMD_RESTORE:
2502 bin = cfg_data->bin;
2503 break;
2504
2505 default:
2506 BNX2X_ERR("Unknown command: %d\n", cmd);
2507 return;
2508 }
2509
2510 DP(BNX2X_MSG_SP, "%s bin %d\n",
2511 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2512 "Setting" : "Clearing"), bin);
2513
2514 data->rules[idx].bin_id = (u8)bin;
2515 data->rules[idx].func_id = func_id;
2516 data->rules[idx].engine_id = o->engine_id;
2517}
2518
2519/**
2520 * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2521 *
2522 * @bp: device handle
2523 * @o:
2524 * @start_bin: index in the registry to start from (including)
2525 * @rdata_idx: index in the ramrod data to start from
2526 *
2527 * returns last handled bin index or -1 if all bins have been handled
2528 */
2529static inline int bnx2x_mcast_handle_restore_cmd_e2(
2530 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2531 int *rdata_idx)
2532{
2533 int cur_bin, cnt = *rdata_idx;
2534 union bnx2x_mcast_config_data cfg_data = {0};
2535
2536 /* go through the registry and configure the bins from it */
2537 for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2538 cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
2539
2540 cfg_data.bin = (u8)cur_bin;
2541 o->set_one_rule(bp, o, cnt, &cfg_data,
2542 BNX2X_MCAST_CMD_RESTORE);
2543
2544 cnt++;
2545
2546 DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
2547
2548 /* Break if we reached the maximum number
2549 * of rules.
2550 */
2551 if (cnt >= o->max_cmd_len)
2552 break;
2553 }
2554
2555 *rdata_idx = cnt;
2556
2557 return cur_bin;
2558}
2559
2560static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2561 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2562 int *line_idx)
2563{
2564 struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2565 int cnt = *line_idx;
2566 union bnx2x_mcast_config_data cfg_data = {0};
2567
2568 list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2569 link) {
2570
2571 cfg_data.mac = &pmac_pos->mac[0];
2572 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2573
2574 cnt++;
2575
2576 DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
2577 " mcast MAC\n",
2578 BNX2X_MAC_PRN_LIST(pmac_pos->mac));
2579
2580 list_del(&pmac_pos->link);
2581
2582 /* Break if we reached the maximum number
2583 * of rules.
2584 */
2585 if (cnt >= o->max_cmd_len)
2586 break;
2587 }
2588
2589 *line_idx = cnt;
2590
2591 /* if no more MACs to configure - we are done */
2592 if (list_empty(&cmd_pos->data.macs_head))
2593 cmd_pos->done = true;
2594}
2595
2596static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2597 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2598 int *line_idx)
2599{
2600 int cnt = *line_idx;
2601
2602 while (cmd_pos->data.macs_num) {
2603 o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2604
2605 cnt++;
2606
2607 cmd_pos->data.macs_num--;
2608
2609 DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2610 cmd_pos->data.macs_num, cnt);
2611
2612 /* Break if we reached the maximum
2613 * number of rules.
2614 */
2615 if (cnt >= o->max_cmd_len)
2616 break;
2617 }
2618
2619 *line_idx = cnt;
2620
2621 /* If we cleared all bins - we are done */
2622 if (!cmd_pos->data.macs_num)
2623 cmd_pos->done = true;
2624}
2625
2626static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2627 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2628 int *line_idx)
2629{
2630 cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2631 line_idx);
2632
2633 if (cmd_pos->data.next_bin < 0)
2634 /* If o->set_restore returned -1 we are done */
2635 cmd_pos->done = true;
2636 else
2637 /* Start from the next bin next time */
2638 cmd_pos->data.next_bin++;
2639}
2640
2641static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2642 struct bnx2x_mcast_ramrod_params *p)
2643{
2644 struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2645 int cnt = 0;
2646 struct bnx2x_mcast_obj *o = p->mcast_obj;
2647
2648 list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2649 link) {
2650 switch (cmd_pos->type) {
2651 case BNX2X_MCAST_CMD_ADD:
2652 bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2653 break;
2654
2655 case BNX2X_MCAST_CMD_DEL:
2656 bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2657 break;
2658
2659 case BNX2X_MCAST_CMD_RESTORE:
2660 bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2661 &cnt);
2662 break;
2663
2664 default:
2665 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2666 return -EINVAL;
2667 }
2668
2669 /* If the command has been completed - remove it from the list
2670 * and free the memory
2671 */
2672 if (cmd_pos->done) {
2673 list_del(&cmd_pos->link);
2674 kfree(cmd_pos);
2675 }
2676
2677 /* Break if we reached the maximum number of rules */
2678 if (cnt >= o->max_cmd_len)
2679 break;
2680 }
2681
2682 return cnt;
2683}
2684
2685static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2686 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2687 int *line_idx)
2688{
2689 struct bnx2x_mcast_list_elem *mlist_pos;
2690 union bnx2x_mcast_config_data cfg_data = {0};
2691 int cnt = *line_idx;
2692
2693 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2694 cfg_data.mac = mlist_pos->mac;
2695 o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
2696
2697 cnt++;
2698
2699 DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
2700 " mcast MAC\n",
2701 BNX2X_MAC_PRN_LIST(mlist_pos->mac));
2702 }
2703
2704 *line_idx = cnt;
2705}
2706
2707static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
2708 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2709 int *line_idx)
2710{
2711 int cnt = *line_idx, i;
2712
2713 for (i = 0; i < p->mcast_list_len; i++) {
2714 o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
2715
2716 cnt++;
2717
2718 DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
2719 p->mcast_list_len - i - 1);
2720 }
2721
2722 *line_idx = cnt;
2723}
2724
2725/**
2726 * bnx2x_mcast_handle_current_cmd -
2727 *
2728 * @bp: device handle
2729 * @p:
2730 * @cmd:
2731 * @start_cnt: first line in the ramrod data that may be used
2732 *
2733 * This function is called iff there is enough place for the current command in
2734 * the ramrod data.
2735 * Returns number of lines filled in the ramrod data in total.
2736 */
2737static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
2738 struct bnx2x_mcast_ramrod_params *p, int cmd,
2739 int start_cnt)
2740{
2741 struct bnx2x_mcast_obj *o = p->mcast_obj;
2742 int cnt = start_cnt;
2743
2744 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
2745
2746 switch (cmd) {
2747 case BNX2X_MCAST_CMD_ADD:
2748 bnx2x_mcast_hdl_add(bp, o, p, &cnt);
2749 break;
2750
2751 case BNX2X_MCAST_CMD_DEL:
2752 bnx2x_mcast_hdl_del(bp, o, p, &cnt);
2753 break;
2754
2755 case BNX2X_MCAST_CMD_RESTORE:
2756 o->hdl_restore(bp, o, 0, &cnt);
2757 break;
2758
2759 default:
2760 BNX2X_ERR("Unknown command: %d\n", cmd);
2761 return -EINVAL;
2762 }
2763
2764 /* The current command has been handled */
2765 p->mcast_list_len = 0;
2766
2767 return cnt;
2768}
2769
2770static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
2771 struct bnx2x_mcast_ramrod_params *p,
2772 int cmd)
2773{
2774 struct bnx2x_mcast_obj *o = p->mcast_obj;
2775 int reg_sz = o->get_registry_size(o);
2776
2777 switch (cmd) {
2778 /* DEL command deletes all currently configured MACs */
2779 case BNX2X_MCAST_CMD_DEL:
2780 o->set_registry_size(o, 0);
2781 /* Don't break */
2782
2783 /* RESTORE command will restore the entire multicast configuration */
2784 case BNX2X_MCAST_CMD_RESTORE:
2785 /* Here we set the approximate amount of work to do, which in
2786 * fact may be only less as some MACs in postponed ADD
2787 * command(s) scheduled before this command may fall into
2788 * the same bin and the actual number of bins set in the
2789 * registry would be less than we estimated here. See
2790 * bnx2x_mcast_set_one_rule_e2() for further details.
2791 */
2792 p->mcast_list_len = reg_sz;
2793 break;
2794
2795 case BNX2X_MCAST_CMD_ADD:
2796 case BNX2X_MCAST_CMD_CONT:
2797 /* Here we assume that all new MACs will fall into new bins.
2798 * However we will correct the real registry size after we
2799 * handle all pending commands.
2800 */
2801 o->set_registry_size(o, reg_sz + p->mcast_list_len);
2802 break;
2803
2804 default:
2805 BNX2X_ERR("Unknown command: %d\n", cmd);
2806 return -EINVAL;
2807
2808 }
2809
2810 /* Increase the total number of MACs pending to be configured */
2811 o->total_pending_num += p->mcast_list_len;
2812
2813 return 0;
2814}
2815
2816static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
2817 struct bnx2x_mcast_ramrod_params *p,
2818 int old_num_bins)
2819{
2820 struct bnx2x_mcast_obj *o = p->mcast_obj;
2821
2822 o->set_registry_size(o, old_num_bins);
2823 o->total_pending_num -= p->mcast_list_len;
2824}
2825
2826/**
2827 * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
2828 *
2829 * @bp: device handle
2830 * @p:
2831 * @len: number of rules to handle
2832 */
2833static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
2834 struct bnx2x_mcast_ramrod_params *p,
2835 u8 len)
2836{
2837 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
2838 struct eth_multicast_rules_ramrod_data *data =
2839 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2840
2841 data->header.echo = ((r->cid & BNX2X_SWCID_MASK) |
2842 (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
2843 data->header.rule_cnt = len;
2844}
2845
2846/**
2847 * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2848 *
2849 * @bp: device handle
2850 * @o:
2851 *
2852 * Recalculate the actual number of set bins in the registry using Brian
2853 * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2854 *
2855 * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
2856 */
2857static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
2858 struct bnx2x_mcast_obj *o)
2859{
2860 int i, cnt = 0;
2861 u64 elem;
2862
2863 for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
2864 elem = o->registry.aprox_match.vec[i];
2865 for (; elem; cnt++)
2866 elem &= elem - 1;
2867 }
2868
2869 o->set_registry_size(o, cnt);
2870
2871 return 0;
2872}
2873
2874static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
2875 struct bnx2x_mcast_ramrod_params *p,
2876 int cmd)
2877{
2878 struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
2879 struct bnx2x_mcast_obj *o = p->mcast_obj;
2880 struct eth_multicast_rules_ramrod_data *data =
2881 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2882 int cnt = 0, rc;
2883
2884 /* Reset the ramrod data buffer */
2885 memset(data, 0, sizeof(*data));
2886
2887 cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
2888
2889 /* If there are no more pending commands - clear SCHEDULED state */
2890 if (list_empty(&o->pending_cmds_head))
2891 o->clear_sched(o);
2892
2893 /* The below may be true iff there was enough room in ramrod
2894 * data for all pending commands and for the current
2895 * command. Otherwise the current command would have been added
2896 * to the pending commands and p->mcast_list_len would have been
2897 * zeroed.
2898 */
2899 if (p->mcast_list_len > 0)
2900 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
2901
2902 /* We've pulled out some MACs - update the total number of
2903 * outstanding.
2904 */
2905 o->total_pending_num -= cnt;
2906
2907 /* send a ramrod */
2908 WARN_ON(o->total_pending_num < 0);
2909 WARN_ON(cnt > o->max_cmd_len);
2910
2911 bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
2912
2913 /* Update a registry size if there are no more pending operations.
2914 *
2915 * We don't want to change the value of the registry size if there are
2916 * pending operations because we want it to always be equal to the
2917 * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
2918 * set bins after the last requested operation in order to properly
2919 * evaluate the size of the next DEL/RESTORE operation.
2920 *
2921 * Note that we update the registry itself during command(s) handling
2922 * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
2923 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
2924 * with a limited amount of update commands (per MAC/bin) and we don't
2925 * know in this scope what the actual state of bins configuration is
2926 * going to be after this ramrod.
2927 */
2928 if (!o->total_pending_num)
2929 bnx2x_mcast_refresh_registry_e2(bp, o);
2930
53e51e2f
VZ
2931 /*
2932 * If CLEAR_ONLY was requested - don't send a ramrod and clear
619c5cb6
VZ
2933 * RAMROD_PENDING status immediately.
2934 */
2935 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
2936 raw->clear_pending(raw);
2937 return 0;
2938 } else {
53e51e2f
VZ
2939 /*
2940 * No need for an explicit memory barrier here as long we would
2941 * need to ensure the ordering of writing to the SPQ element
2942 * and updating of the SPQ producer which involves a memory
2943 * read and we will have to put a full memory barrier there
2944 * (inside bnx2x_sp_post()).
2945 */
2946
619c5cb6
VZ
2947 /* Send a ramrod */
2948 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
2949 raw->cid, U64_HI(raw->rdata_mapping),
2950 U64_LO(raw->rdata_mapping),
2951 ETH_CONNECTION_TYPE);
2952 if (rc)
2953 return rc;
2954
2955 /* Ramrod completion is pending */
2956 return 1;
2957 }
2958}
2959
2960static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
2961 struct bnx2x_mcast_ramrod_params *p,
2962 int cmd)
2963{
2964 /* Mark, that there is a work to do */
2965 if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2966 p->mcast_list_len = 1;
2967
2968 return 0;
2969}
2970
2971static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
2972 struct bnx2x_mcast_ramrod_params *p,
2973 int old_num_bins)
2974{
2975 /* Do nothing */
2976}
2977
2978#define BNX2X_57711_SET_MC_FILTER(filter, bit) \
2979do { \
2980 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
2981} while (0)
2982
2983static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
2984 struct bnx2x_mcast_obj *o,
2985 struct bnx2x_mcast_ramrod_params *p,
2986 u32 *mc_filter)
2987{
2988 struct bnx2x_mcast_list_elem *mlist_pos;
2989 int bit;
2990
2991 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2992 bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
2993 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
2994
2995 DP(BNX2X_MSG_SP, "About to configure "
2996 BNX2X_MAC_FMT" mcast MAC, bin %d\n",
2997 BNX2X_MAC_PRN_LIST(mlist_pos->mac), bit);
2998
2999 /* bookkeeping... */
3000 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3001 bit);
3002 }
3003}
3004
3005static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
3006 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3007 u32 *mc_filter)
3008{
3009 int bit;
3010
3011 for (bit = bnx2x_mcast_get_next_bin(o, 0);
3012 bit >= 0;
3013 bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
3014 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3015 DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3016 }
3017}
3018
3019/* On 57711 we write the multicast MACs' aproximate match
3020 * table by directly into the TSTORM's internal RAM. So we don't
3021 * really need to handle any tricks to make it work.
3022 */
3023static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3024 struct bnx2x_mcast_ramrod_params *p,
3025 int cmd)
3026{
3027 int i;
3028 struct bnx2x_mcast_obj *o = p->mcast_obj;
3029 struct bnx2x_raw_obj *r = &o->raw;
3030
3031 /* If CLEAR_ONLY has been requested - clear the registry
3032 * and clear a pending bit.
3033 */
3034 if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3035 u32 mc_filter[MC_HASH_SIZE] = {0};
3036
3037 /* Set the multicast filter bits before writing it into
3038 * the internal memory.
3039 */
3040 switch (cmd) {
3041 case BNX2X_MCAST_CMD_ADD:
3042 bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3043 break;
3044
3045 case BNX2X_MCAST_CMD_DEL:
3046 DP(BNX2X_MSG_SP, "Invalidating multicast "
3047 "MACs configuration\n");
3048
3049 /* clear the registry */
3050 memset(o->registry.aprox_match.vec, 0,
3051 sizeof(o->registry.aprox_match.vec));
3052 break;
3053
3054 case BNX2X_MCAST_CMD_RESTORE:
3055 bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3056 break;
3057
3058 default:
3059 BNX2X_ERR("Unknown command: %d\n", cmd);
3060 return -EINVAL;
3061 }
3062
3063 /* Set the mcast filter in the internal memory */
3064 for (i = 0; i < MC_HASH_SIZE; i++)
3065 REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3066 } else
3067 /* clear the registry */
3068 memset(o->registry.aprox_match.vec, 0,
3069 sizeof(o->registry.aprox_match.vec));
3070
3071 /* We are done */
3072 r->clear_pending(r);
3073
3074 return 0;
3075}
3076
3077static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3078 struct bnx2x_mcast_ramrod_params *p,
3079 int cmd)
3080{
3081 struct bnx2x_mcast_obj *o = p->mcast_obj;
3082 int reg_sz = o->get_registry_size(o);
3083
3084 switch (cmd) {
3085 /* DEL command deletes all currently configured MACs */
3086 case BNX2X_MCAST_CMD_DEL:
3087 o->set_registry_size(o, 0);
3088 /* Don't break */
3089
3090 /* RESTORE command will restore the entire multicast configuration */
3091 case BNX2X_MCAST_CMD_RESTORE:
3092 p->mcast_list_len = reg_sz;
3093 DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3094 cmd, p->mcast_list_len);
3095 break;
3096
3097 case BNX2X_MCAST_CMD_ADD:
3098 case BNX2X_MCAST_CMD_CONT:
3099 /* Multicast MACs on 57710 are configured as unicast MACs and
3100 * there is only a limited number of CAM entries for that
3101 * matter.
3102 */
3103 if (p->mcast_list_len > o->max_cmd_len) {
3104 BNX2X_ERR("Can't configure more than %d multicast MACs"
3105 "on 57710\n", o->max_cmd_len);
3106 return -EINVAL;
3107 }
3108 /* Every configured MAC should be cleared if DEL command is
3109 * called. Only the last ADD command is relevant as long as
3110 * every ADD commands overrides the previous configuration.
3111 */
3112 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3113 if (p->mcast_list_len > 0)
3114 o->set_registry_size(o, p->mcast_list_len);
3115
3116 break;
3117
3118 default:
3119 BNX2X_ERR("Unknown command: %d\n", cmd);
3120 return -EINVAL;
3121
3122 }
3123
3124 /* We want to ensure that commands are executed one by one for 57710.
3125 * Therefore each none-empty command will consume o->max_cmd_len.
3126 */
3127 if (p->mcast_list_len)
3128 o->total_pending_num += o->max_cmd_len;
3129
3130 return 0;
3131}
3132
3133static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3134 struct bnx2x_mcast_ramrod_params *p,
3135 int old_num_macs)
3136{
3137 struct bnx2x_mcast_obj *o = p->mcast_obj;
3138
3139 o->set_registry_size(o, old_num_macs);
3140
3141 /* If current command hasn't been handled yet and we are
3142 * here means that it's meant to be dropped and we have to
3143 * update the number of outstandling MACs accordingly.
3144 */
3145 if (p->mcast_list_len)
3146 o->total_pending_num -= o->max_cmd_len;
3147}
3148
3149static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3150 struct bnx2x_mcast_obj *o, int idx,
3151 union bnx2x_mcast_config_data *cfg_data,
3152 int cmd)
3153{
3154 struct bnx2x_raw_obj *r = &o->raw;
3155 struct mac_configuration_cmd *data =
3156 (struct mac_configuration_cmd *)(r->rdata);
3157
3158 /* copy mac */
3159 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3160 bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3161 &data->config_table[idx].middle_mac_addr,
3162 &data->config_table[idx].lsb_mac_addr,
3163 cfg_data->mac);
3164
3165 data->config_table[idx].vlan_id = 0;
3166 data->config_table[idx].pf_id = r->func_id;
3167 data->config_table[idx].clients_bit_vector =
3168 cpu_to_le32(1 << r->cl_id);
3169
3170 SET_FLAG(data->config_table[idx].flags,
3171 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3172 T_ETH_MAC_COMMAND_SET);
3173 }
3174}
3175
3176/**
3177 * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd
3178 *
3179 * @bp: device handle
3180 * @p:
3181 * @len: number of rules to handle
3182 */
3183static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3184 struct bnx2x_mcast_ramrod_params *p,
3185 u8 len)
3186{
3187 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3188 struct mac_configuration_cmd *data =
3189 (struct mac_configuration_cmd *)(r->rdata);
3190
3191 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3192 BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3193 BNX2X_MAX_MULTICAST*(1 + r->func_id));
3194
3195 data->hdr.offset = offset;
3196 data->hdr.client_id = 0xff;
3197 data->hdr.echo = ((r->cid & BNX2X_SWCID_MASK) |
3198 (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
3199 data->hdr.length = len;
3200}
3201
3202/**
3203 * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3204 *
3205 * @bp: device handle
3206 * @o:
3207 * @start_idx: index in the registry to start from
3208 * @rdata_idx: index in the ramrod data to start from
3209 *
3210 * restore command for 57710 is like all other commands - always a stand alone
3211 * command - start_idx and rdata_idx will always be 0. This function will always
3212 * succeed.
3213 * returns -1 to comply with 57712 variant.
3214 */
3215static inline int bnx2x_mcast_handle_restore_cmd_e1(
3216 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3217 int *rdata_idx)
3218{
3219 struct bnx2x_mcast_mac_elem *elem;
3220 int i = 0;
3221 union bnx2x_mcast_config_data cfg_data = {0};
3222
3223 /* go through the registry and configure the MACs from it. */
3224 list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3225 cfg_data.mac = &elem->mac[0];
3226 o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3227
3228 i++;
3229
3230 DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
3231 " mcast MAC\n",
3232 BNX2X_MAC_PRN_LIST(cfg_data.mac));
3233 }
3234
3235 *rdata_idx = i;
3236
3237 return -1;
3238}
3239
3240
3241static inline int bnx2x_mcast_handle_pending_cmds_e1(
3242 struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3243{
3244 struct bnx2x_pending_mcast_cmd *cmd_pos;
3245 struct bnx2x_mcast_mac_elem *pmac_pos;
3246 struct bnx2x_mcast_obj *o = p->mcast_obj;
3247 union bnx2x_mcast_config_data cfg_data = {0};
3248 int cnt = 0;
3249
3250
3251 /* If nothing to be done - return */
3252 if (list_empty(&o->pending_cmds_head))
3253 return 0;
3254
3255 /* Handle the first command */
3256 cmd_pos = list_first_entry(&o->pending_cmds_head,
3257 struct bnx2x_pending_mcast_cmd, link);
3258
3259 switch (cmd_pos->type) {
3260 case BNX2X_MCAST_CMD_ADD:
3261 list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3262 cfg_data.mac = &pmac_pos->mac[0];
3263 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3264
3265 cnt++;
3266
3267 DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
3268 " mcast MAC\n",
3269 BNX2X_MAC_PRN_LIST(pmac_pos->mac));
3270 }
3271 break;
3272
3273 case BNX2X_MCAST_CMD_DEL:
3274 cnt = cmd_pos->data.macs_num;
3275 DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3276 break;
3277
3278 case BNX2X_MCAST_CMD_RESTORE:
3279 o->hdl_restore(bp, o, 0, &cnt);
3280 break;
3281
3282 default:
3283 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3284 return -EINVAL;
3285 }
3286
3287 list_del(&cmd_pos->link);
3288 kfree(cmd_pos);
3289
3290 return cnt;
3291}
3292
3293/**
3294 * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3295 *
3296 * @fw_hi:
3297 * @fw_mid:
3298 * @fw_lo:
3299 * @mac:
3300 */
3301static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3302 __le16 *fw_lo, u8 *mac)
3303{
3304 mac[1] = ((u8 *)fw_hi)[0];
3305 mac[0] = ((u8 *)fw_hi)[1];
3306 mac[3] = ((u8 *)fw_mid)[0];
3307 mac[2] = ((u8 *)fw_mid)[1];
3308 mac[5] = ((u8 *)fw_lo)[0];
3309 mac[4] = ((u8 *)fw_lo)[1];
3310}
3311
3312/**
3313 * bnx2x_mcast_refresh_registry_e1 -
3314 *
3315 * @bp: device handle
3316 * @cnt:
3317 *
3318 * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3319 * and update the registry correspondingly: if ADD - allocate a memory and add
3320 * the entries to the registry (list), if DELETE - clear the registry and free
3321 * the memory.
3322 */
3323static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3324 struct bnx2x_mcast_obj *o)
3325{
3326 struct bnx2x_raw_obj *raw = &o->raw;
3327 struct bnx2x_mcast_mac_elem *elem;
3328 struct mac_configuration_cmd *data =
3329 (struct mac_configuration_cmd *)(raw->rdata);
3330
3331 /* If first entry contains a SET bit - the command was ADD,
3332 * otherwise - DEL_ALL
3333 */
3334 if (GET_FLAG(data->config_table[0].flags,
3335 MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3336 int i, len = data->hdr.length;
3337
3338 /* Break if it was a RESTORE command */
3339 if (!list_empty(&o->registry.exact_match.macs))
3340 return 0;
3341
3342 elem = kzalloc(sizeof(*elem)*len, GFP_ATOMIC);
3343 if (!elem) {
3344 BNX2X_ERR("Failed to allocate registry memory\n");
3345 return -ENOMEM;
3346 }
3347
3348 for (i = 0; i < len; i++, elem++) {
3349 bnx2x_get_fw_mac_addr(
3350 &data->config_table[i].msb_mac_addr,
3351 &data->config_table[i].middle_mac_addr,
3352 &data->config_table[i].lsb_mac_addr,
3353 elem->mac);
3354 DP(BNX2X_MSG_SP, "Adding registry entry for ["
3355 BNX2X_MAC_FMT"]\n",
3356 BNX2X_MAC_PRN_LIST(elem->mac));
3357 list_add_tail(&elem->link,
3358 &o->registry.exact_match.macs);
3359 }
3360 } else {
3361 elem = list_first_entry(&o->registry.exact_match.macs,
3362 struct bnx2x_mcast_mac_elem, link);
3363 DP(BNX2X_MSG_SP, "Deleting a registry\n");
3364 kfree(elem);
3365 INIT_LIST_HEAD(&o->registry.exact_match.macs);
3366 }
3367
3368 return 0;
3369}
3370
3371static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3372 struct bnx2x_mcast_ramrod_params *p,
3373 int cmd)
3374{
3375 struct bnx2x_mcast_obj *o = p->mcast_obj;
3376 struct bnx2x_raw_obj *raw = &o->raw;
3377 struct mac_configuration_cmd *data =
3378 (struct mac_configuration_cmd *)(raw->rdata);
3379 int cnt = 0, i, rc;
3380
3381 /* Reset the ramrod data buffer */
3382 memset(data, 0, sizeof(*data));
3383
3384 /* First set all entries as invalid */
3385 for (i = 0; i < o->max_cmd_len ; i++)
3386 SET_FLAG(data->config_table[i].flags,
3387 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3388 T_ETH_MAC_COMMAND_INVALIDATE);
3389
3390 /* Handle pending commands first */
3391 cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3392
3393 /* If there are no more pending commands - clear SCHEDULED state */
3394 if (list_empty(&o->pending_cmds_head))
3395 o->clear_sched(o);
3396
3397 /* The below may be true iff there were no pending commands */
3398 if (!cnt)
3399 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3400
3401 /* For 57710 every command has o->max_cmd_len length to ensure that
3402 * commands are done one at a time.
3403 */
3404 o->total_pending_num -= o->max_cmd_len;
3405
3406 /* send a ramrod */
3407
3408 WARN_ON(cnt > o->max_cmd_len);
3409
3410 /* Set ramrod header (in particular, a number of entries to update) */
3411 bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3412
3413 /* update a registry: we need the registry contents to be always up
3414 * to date in order to be able to execute a RESTORE opcode. Here
3415 * we use the fact that for 57710 we sent one command at a time
3416 * hence we may take the registry update out of the command handling
3417 * and do it in a simpler way here.
3418 */
3419 rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3420 if (rc)
3421 return rc;
3422
53e51e2f
VZ
3423 /*
3424 * If CLEAR_ONLY was requested - don't send a ramrod and clear
619c5cb6
VZ
3425 * RAMROD_PENDING status immediately.
3426 */
3427 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3428 raw->clear_pending(raw);
3429 return 0;
3430 } else {
53e51e2f
VZ
3431 /*
3432 * No need for an explicit memory barrier here as long we would
3433 * need to ensure the ordering of writing to the SPQ element
3434 * and updating of the SPQ producer which involves a memory
3435 * read and we will have to put a full memory barrier there
3436 * (inside bnx2x_sp_post()).
3437 */
3438
619c5cb6
VZ
3439 /* Send a ramrod */
3440 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3441 U64_HI(raw->rdata_mapping),
3442 U64_LO(raw->rdata_mapping),
3443 ETH_CONNECTION_TYPE);
3444 if (rc)
3445 return rc;
3446
3447 /* Ramrod completion is pending */
3448 return 1;
3449 }
3450
3451}
3452
3453static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3454{
3455 return o->registry.exact_match.num_macs_set;
3456}
3457
3458static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3459{
3460 return o->registry.aprox_match.num_bins_set;
3461}
3462
3463static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3464 int n)
3465{
3466 o->registry.exact_match.num_macs_set = n;
3467}
3468
3469static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3470 int n)
3471{
3472 o->registry.aprox_match.num_bins_set = n;
3473}
3474
3475int bnx2x_config_mcast(struct bnx2x *bp,
3476 struct bnx2x_mcast_ramrod_params *p,
3477 int cmd)
3478{
3479 struct bnx2x_mcast_obj *o = p->mcast_obj;
3480 struct bnx2x_raw_obj *r = &o->raw;
3481 int rc = 0, old_reg_size;
3482
3483 /* This is needed to recover number of currently configured mcast macs
3484 * in case of failure.
3485 */
3486 old_reg_size = o->get_registry_size(o);
3487
3488 /* Do some calculations and checks */
3489 rc = o->validate(bp, p, cmd);
3490 if (rc)
3491 return rc;
3492
3493 /* Return if there is no work to do */
3494 if ((!p->mcast_list_len) && (!o->check_sched(o)))
3495 return 0;
3496
3497 DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d "
3498 "o->max_cmd_len=%d\n", o->total_pending_num,
3499 p->mcast_list_len, o->max_cmd_len);
3500
3501 /* Enqueue the current command to the pending list if we can't complete
3502 * it in the current iteration
3503 */
3504 if (r->check_pending(r) ||
3505 ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3506 rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3507 if (rc < 0)
3508 goto error_exit1;
3509
3510 /* As long as the current command is in a command list we
3511 * don't need to handle it separately.
3512 */
3513 p->mcast_list_len = 0;
3514 }
3515
3516 if (!r->check_pending(r)) {
3517
3518 /* Set 'pending' state */
3519 r->set_pending(r);
3520
3521 /* Configure the new classification in the chip */
3522 rc = o->config_mcast(bp, p, cmd);
3523 if (rc < 0)
3524 goto error_exit2;
3525
3526 /* Wait for a ramrod completion if was requested */
3527 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3528 rc = o->wait_comp(bp, o);
3529 }
3530
3531 return rc;
3532
3533error_exit2:
3534 r->clear_pending(r);
3535
3536error_exit1:
3537 o->revert(bp, p, old_reg_size);
3538
3539 return rc;
3540}
3541
3542static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3543{
3544 smp_mb__before_clear_bit();
3545 clear_bit(o->sched_state, o->raw.pstate);
3546 smp_mb__after_clear_bit();
3547}
3548
3549static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3550{
3551 smp_mb__before_clear_bit();
3552 set_bit(o->sched_state, o->raw.pstate);
3553 smp_mb__after_clear_bit();
3554}
3555
3556static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3557{
3558 return !!test_bit(o->sched_state, o->raw.pstate);
3559}
3560
3561static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3562{
3563 return o->raw.check_pending(&o->raw) || o->check_sched(o);
3564}
3565
3566void bnx2x_init_mcast_obj(struct bnx2x *bp,
3567 struct bnx2x_mcast_obj *mcast_obj,
3568 u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
3569 u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
3570 int state, unsigned long *pstate, bnx2x_obj_type type)
3571{
3572 memset(mcast_obj, 0, sizeof(*mcast_obj));
3573
3574 bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3575 rdata, rdata_mapping, state, pstate, type);
3576
3577 mcast_obj->engine_id = engine_id;
3578
3579 INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3580
3581 mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
3582 mcast_obj->check_sched = bnx2x_mcast_check_sched;
3583 mcast_obj->set_sched = bnx2x_mcast_set_sched;
3584 mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3585
3586 if (CHIP_IS_E1(bp)) {
3587 mcast_obj->config_mcast = bnx2x_mcast_setup_e1;
3588 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3589 mcast_obj->hdl_restore =
3590 bnx2x_mcast_handle_restore_cmd_e1;
3591 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3592
3593 if (CHIP_REV_IS_SLOW(bp))
3594 mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3595 else
3596 mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3597
3598 mcast_obj->wait_comp = bnx2x_mcast_wait;
3599 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1;
3600 mcast_obj->validate = bnx2x_mcast_validate_e1;
3601 mcast_obj->revert = bnx2x_mcast_revert_e1;
3602 mcast_obj->get_registry_size =
3603 bnx2x_mcast_get_registry_size_exact;
3604 mcast_obj->set_registry_size =
3605 bnx2x_mcast_set_registry_size_exact;
3606
3607 /* 57710 is the only chip that uses the exact match for mcast
3608 * at the moment.
3609 */
3610 INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3611
3612 } else if (CHIP_IS_E1H(bp)) {
3613 mcast_obj->config_mcast = bnx2x_mcast_setup_e1h;
3614 mcast_obj->enqueue_cmd = NULL;
3615 mcast_obj->hdl_restore = NULL;
3616 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3617
3618 /* 57711 doesn't send a ramrod, so it has unlimited credit
3619 * for one command.
3620 */
3621 mcast_obj->max_cmd_len = -1;
3622 mcast_obj->wait_comp = bnx2x_mcast_wait;
3623 mcast_obj->set_one_rule = NULL;
3624 mcast_obj->validate = bnx2x_mcast_validate_e1h;
3625 mcast_obj->revert = bnx2x_mcast_revert_e1h;
3626 mcast_obj->get_registry_size =
3627 bnx2x_mcast_get_registry_size_aprox;
3628 mcast_obj->set_registry_size =
3629 bnx2x_mcast_set_registry_size_aprox;
3630 } else {
3631 mcast_obj->config_mcast = bnx2x_mcast_setup_e2;
3632 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3633 mcast_obj->hdl_restore =
3634 bnx2x_mcast_handle_restore_cmd_e2;
3635 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3636 /* TODO: There should be a proper HSI define for this number!!!
3637 */
3638 mcast_obj->max_cmd_len = 16;
3639 mcast_obj->wait_comp = bnx2x_mcast_wait;
3640 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2;
3641 mcast_obj->validate = bnx2x_mcast_validate_e2;
3642 mcast_obj->revert = bnx2x_mcast_revert_e2;
3643 mcast_obj->get_registry_size =
3644 bnx2x_mcast_get_registry_size_aprox;
3645 mcast_obj->set_registry_size =
3646 bnx2x_mcast_set_registry_size_aprox;
3647 }
3648}
3649
3650/*************************** Credit handling **********************************/
3651
3652/**
3653 * atomic_add_ifless - add if the result is less than a given value.
3654 *
3655 * @v: pointer of type atomic_t
3656 * @a: the amount to add to v...
3657 * @u: ...if (v + a) is less than u.
3658 *
3659 * returns true if (v + a) was less than u, and false otherwise.
3660 *
3661 */
3662static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3663{
3664 int c, old;
3665
3666 c = atomic_read(v);
3667 for (;;) {
3668 if (unlikely(c + a >= u))
3669 return false;
3670
3671 old = atomic_cmpxchg((v), c, c + a);
3672 if (likely(old == c))
3673 break;
3674 c = old;
3675 }
3676
3677 return true;
3678}
3679
3680/**
3681 * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3682 *
3683 * @v: pointer of type atomic_t
3684 * @a: the amount to dec from v...
3685 * @u: ...if (v - a) is more or equal than u.
3686 *
3687 * returns true if (v - a) was more or equal than u, and false
3688 * otherwise.
3689 */
3690static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3691{
3692 int c, old;
3693
3694 c = atomic_read(v);
3695 for (;;) {
3696 if (unlikely(c - a < u))
3697 return false;
3698
3699 old = atomic_cmpxchg((v), c, c - a);
3700 if (likely(old == c))
3701 break;
3702 c = old;
3703 }
3704
3705 return true;
3706}
3707
3708static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
3709{
3710 bool rc;
3711
3712 smp_mb();
3713 rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3714 smp_mb();
3715
3716 return rc;
3717}
3718
3719static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
3720{
3721 bool rc;
3722
3723 smp_mb();
3724
3725 /* Don't let to refill if credit + cnt > pool_sz */
3726 rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3727
3728 smp_mb();
3729
3730 return rc;
3731}
3732
3733static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
3734{
3735 int cur_credit;
3736
3737 smp_mb();
3738 cur_credit = atomic_read(&o->credit);
3739
3740 return cur_credit;
3741}
3742
3743static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
3744 int cnt)
3745{
3746 return true;
3747}
3748
3749
3750static bool bnx2x_credit_pool_get_entry(
3751 struct bnx2x_credit_pool_obj *o,
3752 int *offset)
3753{
3754 int idx, vec, i;
3755
3756 *offset = -1;
3757
3758 /* Find "internal cam-offset" then add to base for this object... */
3759 for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
3760
3761 /* Skip the current vector if there are no free entries in it */
3762 if (!o->pool_mirror[vec])
3763 continue;
3764
3765 /* If we've got here we are going to find a free entry */
3766 for (idx = vec * BNX2X_POOL_VEC_SIZE, i = 0;
3767 i < BIT_VEC64_ELEM_SZ; idx++, i++)
3768
3769 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3770 /* Got one!! */
3771 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3772 *offset = o->base_pool_offset + idx;
3773 return true;
3774 }
3775 }
3776
3777 return false;
3778}
3779
3780static bool bnx2x_credit_pool_put_entry(
3781 struct bnx2x_credit_pool_obj *o,
3782 int offset)
3783{
3784 if (offset < o->base_pool_offset)
3785 return false;
3786
3787 offset -= o->base_pool_offset;
3788
3789 if (offset >= o->pool_sz)
3790 return false;
3791
3792 /* Return the entry to the pool */
3793 BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3794
3795 return true;
3796}
3797
3798static bool bnx2x_credit_pool_put_entry_always_true(
3799 struct bnx2x_credit_pool_obj *o,
3800 int offset)
3801{
3802 return true;
3803}
3804
3805static bool bnx2x_credit_pool_get_entry_always_true(
3806 struct bnx2x_credit_pool_obj *o,
3807 int *offset)
3808{
3809 *offset = -1;
3810 return true;
3811}
3812/**
3813 * bnx2x_init_credit_pool - initialize credit pool internals.
3814 *
3815 * @p:
3816 * @base: Base entry in the CAM to use.
3817 * @credit: pool size.
3818 *
3819 * If base is negative no CAM entries handling will be performed.
3820 * If credit is negative pool operations will always succeed (unlimited pool).
3821 *
3822 */
3823static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
3824 int base, int credit)
3825{
3826 /* Zero the object first */
3827 memset(p, 0, sizeof(*p));
3828
3829 /* Set the table to all 1s */
3830 memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3831
3832 /* Init a pool as full */
3833 atomic_set(&p->credit, credit);
3834
3835 /* The total poll size */
3836 p->pool_sz = credit;
3837
3838 p->base_pool_offset = base;
3839
3840 /* Commit the change */
3841 smp_mb();
3842
3843 p->check = bnx2x_credit_pool_check;
3844
3845 /* if pool credit is negative - disable the checks */
3846 if (credit >= 0) {
3847 p->put = bnx2x_credit_pool_put;
3848 p->get = bnx2x_credit_pool_get;
3849 p->put_entry = bnx2x_credit_pool_put_entry;
3850 p->get_entry = bnx2x_credit_pool_get_entry;
3851 } else {
3852 p->put = bnx2x_credit_pool_always_true;
3853 p->get = bnx2x_credit_pool_always_true;
3854 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3855 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3856 }
3857
3858 /* If base is negative - disable entries handling */
3859 if (base < 0) {
3860 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3861 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3862 }
3863}
3864
3865void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
3866 struct bnx2x_credit_pool_obj *p, u8 func_id,
3867 u8 func_num)
3868{
3869/* TODO: this will be defined in consts as well... */
3870#define BNX2X_CAM_SIZE_EMUL 5
3871
3872 int cam_sz;
3873
3874 if (CHIP_IS_E1(bp)) {
3875 /* In E1, Multicast is saved in cam... */
3876 if (!CHIP_REV_IS_SLOW(bp))
3877 cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
3878 else
3879 cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
3880
3881 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3882
3883 } else if (CHIP_IS_E1H(bp)) {
3884 /* CAM credit is equaly divided between all active functions
3885 * on the PORT!.
3886 */
3887 if ((func_num > 0)) {
3888 if (!CHIP_REV_IS_SLOW(bp))
3889 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
3890 else
3891 cam_sz = BNX2X_CAM_SIZE_EMUL;
3892 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3893 } else {
3894 /* this should never happen! Block MAC operations. */
3895 bnx2x_init_credit_pool(p, 0, 0);
3896 }
3897
3898 } else {
3899
3900 /*
3901 * CAM credit is equaly divided between all active functions
3902 * on the PATH.
3903 */
3904 if ((func_num > 0)) {
3905 if (!CHIP_REV_IS_SLOW(bp))
3906 cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
3907 else
3908 cam_sz = BNX2X_CAM_SIZE_EMUL;
3909
3910 /*
3911 * No need for CAM entries handling for 57712 and
3912 * newer.
3913 */
3914 bnx2x_init_credit_pool(p, -1, cam_sz);
3915 } else {
3916 /* this should never happen! Block MAC operations. */
3917 bnx2x_init_credit_pool(p, 0, 0);
3918 }
3919
3920 }
3921}
3922
3923void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
3924 struct bnx2x_credit_pool_obj *p,
3925 u8 func_id,
3926 u8 func_num)
3927{
3928 if (CHIP_IS_E1x(bp)) {
3929 /*
3930 * There is no VLAN credit in HW on 57710 and 57711 only
3931 * MAC / MAC-VLAN can be set
3932 */
3933 bnx2x_init_credit_pool(p, 0, -1);
3934 } else {
3935 /*
3936 * CAM credit is equaly divided between all active functions
3937 * on the PATH.
3938 */
3939 if (func_num > 0) {
3940 int credit = MAX_VLAN_CREDIT_E2 / func_num;
3941 bnx2x_init_credit_pool(p, func_id * credit, credit);
3942 } else
3943 /* this should never happen! Block VLAN operations. */
3944 bnx2x_init_credit_pool(p, 0, 0);
3945 }
3946}
3947
3948/****************** RSS Configuration ******************/
3949/**
3950 * bnx2x_debug_print_ind_table - prints the indirection table configuration.
3951 *
3952 * @bp: driver hanlde
3953 * @p: pointer to rss configuration
3954 *
3955 * Prints it when NETIF_MSG_IFUP debug level is configured.
3956 */
3957static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
3958 struct bnx2x_config_rss_params *p)
3959{
042181f5
VZ
3960 int i;
3961
619c5cb6
VZ
3962 DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
3963 DP(BNX2X_MSG_SP, "0x0000: ");
3964 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
3965 DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
3966
3967 /* Print 4 bytes in a line */
3968 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
3969 (((i + 1) & 0x3) == 0)) {
3970 DP_CONT(BNX2X_MSG_SP, "\n");
3971 DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
3972 }
3973 }
3974
3975 DP_CONT(BNX2X_MSG_SP, "\n");
3976}
3977
3978/**
3979 * bnx2x_setup_rss - configure RSS
3980 *
3981 * @bp: device handle
3982 * @p: rss configuration
3983 *
3984 * sends on UPDATE ramrod for that matter.
3985 */
3986static int bnx2x_setup_rss(struct bnx2x *bp,
3987 struct bnx2x_config_rss_params *p)
3988{
3989 struct bnx2x_rss_config_obj *o = p->rss_obj;
3990 struct bnx2x_raw_obj *r = &o->raw;
3991 struct eth_rss_update_ramrod_data *data =
3992 (struct eth_rss_update_ramrod_data *)(r->rdata);
3993 u8 rss_mode = 0;
3994 int rc;
3995
3996 memset(data, 0, sizeof(*data));
3997
3998 DP(BNX2X_MSG_SP, "Configuring RSS\n");
3999
4000 /* Set an echo field */
4001 data->echo = (r->cid & BNX2X_SWCID_MASK) |
4002 (r->state << BNX2X_SWCID_SHIFT);
4003
4004 /* RSS mode */
4005 if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
4006 rss_mode = ETH_RSS_MODE_DISABLED;
4007 else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4008 rss_mode = ETH_RSS_MODE_REGULAR;
4009 else if (test_bit(BNX2X_RSS_MODE_VLAN_PRI, &p->rss_flags))
4010 rss_mode = ETH_RSS_MODE_VLAN_PRI;
4011 else if (test_bit(BNX2X_RSS_MODE_E1HOV_PRI, &p->rss_flags))
4012 rss_mode = ETH_RSS_MODE_E1HOV_PRI;
4013 else if (test_bit(BNX2X_RSS_MODE_IP_DSCP, &p->rss_flags))
4014 rss_mode = ETH_RSS_MODE_IP_DSCP;
4015
4016 data->rss_mode = rss_mode;
4017
4018 DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
4019
4020 /* RSS capabilities */
4021 if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4022 data->capabilities |=
4023 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4024
4025 if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4026 data->capabilities |=
4027 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4028
4029 if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4030 data->capabilities |=
4031 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4032
4033 if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4034 data->capabilities |=
4035 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4036
4037 /* Hashing mask */
4038 data->rss_result_mask = p->rss_result_mask;
4039
4040 /* RSS engine ID */
4041 data->rss_engine_id = o->engine_id;
4042
4043 DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4044
4045 /* Indirection table */
4046 memcpy(data->indirection_table, p->ind_table,
4047 T_ETH_INDIRECTION_TABLE_SIZE);
4048
4049 /* Remember the last configuration */
4050 memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4051
4052 /* Print the indirection table */
4053 if (netif_msg_ifup(bp))
4054 bnx2x_debug_print_ind_table(bp, p);
4055
4056 /* RSS keys */
4057 if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4058 memcpy(&data->rss_key[0], &p->rss_key[0],
4059 sizeof(data->rss_key));
4060 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4061 }
4062
53e51e2f
VZ
4063 /*
4064 * No need for an explicit memory barrier here as long we would
4065 * need to ensure the ordering of writing to the SPQ element
4066 * and updating of the SPQ producer which involves a memory
4067 * read and we will have to put a full memory barrier there
4068 * (inside bnx2x_sp_post()).
4069 */
619c5cb6
VZ
4070
4071 /* Send a ramrod */
4072 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
4073 U64_HI(r->rdata_mapping),
4074 U64_LO(r->rdata_mapping),
4075 ETH_CONNECTION_TYPE);
4076
4077 if (rc < 0)
4078 return rc;
4079
4080 return 1;
4081}
4082
4083void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
4084 u8 *ind_table)
4085{
4086 memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4087}
4088
4089int bnx2x_config_rss(struct bnx2x *bp,
4090 struct bnx2x_config_rss_params *p)
4091{
4092 int rc;
4093 struct bnx2x_rss_config_obj *o = p->rss_obj;
4094 struct bnx2x_raw_obj *r = &o->raw;
4095
4096 /* Do nothing if only driver cleanup was requested */
4097 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
4098 return 0;
4099
4100 r->set_pending(r);
4101
4102 rc = o->config_rss(bp, p);
4103 if (rc < 0) {
4104 r->clear_pending(r);
4105 return rc;
4106 }
4107
4108 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
4109 rc = r->wait_comp(bp, r);
4110
4111 return rc;
4112}
4113
4114
4115void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4116 struct bnx2x_rss_config_obj *rss_obj,
4117 u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4118 void *rdata, dma_addr_t rdata_mapping,
4119 int state, unsigned long *pstate,
4120 bnx2x_obj_type type)
4121{
4122 bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4123 rdata_mapping, state, pstate, type);
4124
4125 rss_obj->engine_id = engine_id;
4126 rss_obj->config_rss = bnx2x_setup_rss;
4127}
4128
4129/********************** Queue state object ***********************************/
4130
4131/**
4132 * bnx2x_queue_state_change - perform Queue state change transition
4133 *
4134 * @bp: device handle
4135 * @params: parameters to perform the transition
4136 *
4137 * returns 0 in case of successfully completed transition, negative error
4138 * code in case of failure, positive (EBUSY) value if there is a completion
4139 * to that is still pending (possible only if RAMROD_COMP_WAIT is
4140 * not set in params->ramrod_flags for asynchronous commands).
4141 *
4142 */
4143int bnx2x_queue_state_change(struct bnx2x *bp,
4144 struct bnx2x_queue_state_params *params)
4145{
4146 struct bnx2x_queue_sp_obj *o = params->q_obj;
4147 int rc, pending_bit;
4148 unsigned long *pending = &o->pending;
4149
4150 /* Check that the requested transition is legal */
4151 if (o->check_transition(bp, o, params))
4152 return -EINVAL;
4153
4154 /* Set "pending" bit */
4155 pending_bit = o->set_pending(o, params);
4156
4157 /* Don't send a command if only driver cleanup was requested */
4158 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
4159 o->complete_cmd(bp, o, pending_bit);
4160 else {
4161 /* Send a ramrod */
4162 rc = o->send_cmd(bp, params);
4163 if (rc) {
4164 o->next_state = BNX2X_Q_STATE_MAX;
4165 clear_bit(pending_bit, pending);
4166 smp_mb__after_clear_bit();
4167 return rc;
4168 }
4169
4170 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
4171 rc = o->wait_comp(bp, o, pending_bit);
4172 if (rc)
4173 return rc;
4174
4175 return 0;
4176 }
4177 }
4178
4179 return !!test_bit(pending_bit, pending);
4180}
4181
4182
4183static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4184 struct bnx2x_queue_state_params *params)
4185{
4186 enum bnx2x_queue_cmd cmd = params->cmd, bit;
4187
4188 /* ACTIVATE and DEACTIVATE commands are implemented on top of
4189 * UPDATE command.
4190 */
4191 if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4192 (cmd == BNX2X_Q_CMD_DEACTIVATE))
4193 bit = BNX2X_Q_CMD_UPDATE;
4194 else
4195 bit = cmd;
4196
4197 set_bit(bit, &obj->pending);
4198 return bit;
4199}
4200
4201static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4202 struct bnx2x_queue_sp_obj *o,
4203 enum bnx2x_queue_cmd cmd)
4204{
4205 return bnx2x_state_wait(bp, cmd, &o->pending);
4206}
4207
4208/**
4209 * bnx2x_queue_comp_cmd - complete the state change command.
4210 *
4211 * @bp: device handle
4212 * @o:
4213 * @cmd:
4214 *
4215 * Checks that the arrived completion is expected.
4216 */
4217static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4218 struct bnx2x_queue_sp_obj *o,
4219 enum bnx2x_queue_cmd cmd)
4220{
4221 unsigned long cur_pending = o->pending;
4222
4223 if (!test_and_clear_bit(cmd, &cur_pending)) {
4224 BNX2X_ERR("Bad MC reply %d for queue %d in state %d "
6383c0b3
AE
4225 "pending 0x%lx, next_state %d\n", cmd,
4226 o->cids[BNX2X_PRIMARY_CID_INDEX],
619c5cb6
VZ
4227 o->state, cur_pending, o->next_state);
4228 return -EINVAL;
4229 }
4230
6383c0b3
AE
4231 if (o->next_tx_only >= o->max_cos)
4232 /* >= becuase tx only must always be smaller than cos since the
4233 * primary connection suports COS 0
4234 */
4235 BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4236 o->next_tx_only, o->max_cos);
4237
619c5cb6 4238 DP(BNX2X_MSG_SP, "Completing command %d for queue %d, "
6383c0b3
AE
4239 "setting state to %d\n", cmd,
4240 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
4241
4242 if (o->next_tx_only) /* print num tx-only if any exist */
4243 DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d",
4244 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
619c5cb6
VZ
4245
4246 o->state = o->next_state;
6383c0b3 4247 o->num_tx_only = o->next_tx_only;
619c5cb6
VZ
4248 o->next_state = BNX2X_Q_STATE_MAX;
4249
4250 /* It's important that o->state and o->next_state are
4251 * updated before o->pending.
4252 */
4253 wmb();
4254
4255 clear_bit(cmd, &o->pending);
4256 smp_mb__after_clear_bit();
4257
4258 return 0;
4259}
4260
4261static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4262 struct bnx2x_queue_state_params *cmd_params,
4263 struct client_init_ramrod_data *data)
4264{
4265 struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
4266
4267 /* Rx data */
4268
4269 /* IPv6 TPA supported for E2 and above only */
f5219d8e 4270 data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, &params->flags) *
619c5cb6
VZ
4271 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4272}
4273
6383c0b3
AE
4274static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4275 struct bnx2x_queue_sp_obj *o,
4276 struct bnx2x_general_setup_params *params,
4277 struct client_init_general_data *gen_data,
4278 unsigned long *flags)
4279{
4280 gen_data->client_id = o->cl_id;
4281
4282 if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
4283 gen_data->statistics_counter_id =
4284 params->stat_id;
4285 gen_data->statistics_en_flg = 1;
4286 gen_data->statistics_zero_flg =
4287 test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
619c5cb6 4288 } else
6383c0b3 4289 gen_data->statistics_counter_id =
619c5cb6
VZ
4290 DISABLE_STATISTIC_COUNTER_ID_VALUE;
4291
6383c0b3
AE
4292 gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
4293 gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
4294 gen_data->sp_client_id = params->spcl_id;
4295 gen_data->mtu = cpu_to_le16(params->mtu);
4296 gen_data->func_id = o->func_id;
619c5cb6
VZ
4297
4298
6383c0b3 4299 gen_data->cos = params->cos;
619c5cb6 4300
6383c0b3
AE
4301 gen_data->traffic_type =
4302 test_bit(BNX2X_Q_FLG_FCOE, flags) ?
619c5cb6
VZ
4303 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4304
6383c0b3
AE
4305 DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d",
4306 gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4307}
4308
4309static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4310 struct bnx2x_txq_setup_params *params,
4311 struct client_init_tx_data *tx_data,
4312 unsigned long *flags)
4313{
4314 tx_data->enforce_security_flg =
4315 test_bit(BNX2X_Q_FLG_TX_SEC, flags);
4316 tx_data->default_vlan =
4317 cpu_to_le16(params->default_vlan);
4318 tx_data->default_vlan_flg =
4319 test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
4320 tx_data->tx_switching_flg =
4321 test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4322 tx_data->anti_spoofing_flg =
4323 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
4324 tx_data->tx_status_block_id = params->fw_sb_id;
4325 tx_data->tx_sb_index_number = params->sb_cq_index;
4326 tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4327
4328 tx_data->tx_bd_page_base.lo =
4329 cpu_to_le32(U64_LO(params->dscr_map));
4330 tx_data->tx_bd_page_base.hi =
4331 cpu_to_le32(U64_HI(params->dscr_map));
4332
4333 /* Don't configure any Tx switching mode during queue SETUP */
4334 tx_data->state = 0;
4335}
4336
4337static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4338 struct rxq_pause_params *params,
4339 struct client_init_rx_data *rx_data)
4340{
4341 /* flow control data */
4342 rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
4343 rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
4344 rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
4345 rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
4346 rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
4347 rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
4348 rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
4349}
4350
4351static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4352 struct bnx2x_rxq_setup_params *params,
4353 struct client_init_rx_data *rx_data,
4354 unsigned long *flags)
4355{
4356 /* Rx data */
4357 rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
619c5cb6 4358 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
6383c0b3 4359 rx_data->vmqueue_mode_en_flg = 0;
619c5cb6 4360
6383c0b3
AE
4361 rx_data->cache_line_alignment_log_size =
4362 params->cache_line_log;
4363 rx_data->enable_dynamic_hc =
4364 test_bit(BNX2X_Q_FLG_DHC, flags);
4365 rx_data->max_sges_for_packet = params->max_sges_pkt;
4366 rx_data->client_qzone_id = params->cl_qzone_id;
4367 rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
619c5cb6
VZ
4368
4369 /* Always start in DROP_ALL mode */
6383c0b3 4370 rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
619c5cb6
VZ
4371 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4372
4373 /* We don't set drop flags */
6383c0b3
AE
4374 rx_data->drop_ip_cs_err_flg = 0;
4375 rx_data->drop_tcp_cs_err_flg = 0;
4376 rx_data->drop_ttl0_flg = 0;
4377 rx_data->drop_udp_cs_err_flg = 0;
4378 rx_data->inner_vlan_removal_enable_flg =
4379 test_bit(BNX2X_Q_FLG_VLAN, flags);
4380 rx_data->outer_vlan_removal_enable_flg =
4381 test_bit(BNX2X_Q_FLG_OV, flags);
4382 rx_data->status_block_id = params->fw_sb_id;
4383 rx_data->rx_sb_index_number = params->sb_cq_index;
4384 rx_data->max_tpa_queues = params->max_tpa_queues;
4385 rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
4386 rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
4387 rx_data->bd_page_base.lo =
4388 cpu_to_le32(U64_LO(params->dscr_map));
4389 rx_data->bd_page_base.hi =
4390 cpu_to_le32(U64_HI(params->dscr_map));
4391 rx_data->sge_page_base.lo =
4392 cpu_to_le32(U64_LO(params->sge_map));
4393 rx_data->sge_page_base.hi =
4394 cpu_to_le32(U64_HI(params->sge_map));
4395 rx_data->cqe_page_base.lo =
4396 cpu_to_le32(U64_LO(params->rcq_map));
4397 rx_data->cqe_page_base.hi =
4398 cpu_to_le32(U64_HI(params->rcq_map));
4399 rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
4400
4401 if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
4402 rx_data->approx_mcast_engine_id = o->func_id;
4403 rx_data->is_approx_mcast = 1;
619c5cb6
VZ
4404 }
4405
6383c0b3 4406 rx_data->rss_engine_id = params->rss_engine_id;
619c5cb6
VZ
4407
4408 /* silent vlan removal */
6383c0b3
AE
4409 rx_data->silent_vlan_removal_flg =
4410 test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
4411 rx_data->silent_vlan_value =
4412 cpu_to_le16(params->silent_removal_value);
4413 rx_data->silent_vlan_mask =
4414 cpu_to_le16(params->silent_removal_mask);
619c5cb6 4415
619c5cb6
VZ
4416}
4417
6383c0b3
AE
4418/* initialize the general, tx and rx parts of a queue object */
4419static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4420 struct bnx2x_queue_state_params *cmd_params,
4421 struct client_init_ramrod_data *data)
4422{
4423 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4424 &cmd_params->params.setup.gen_params,
4425 &data->general,
4426 &cmd_params->params.setup.flags);
4427
4428 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4429 &cmd_params->params.setup.txq_params,
4430 &data->tx,
4431 &cmd_params->params.setup.flags);
4432
4433 bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
4434 &cmd_params->params.setup.rxq_params,
4435 &data->rx,
4436 &cmd_params->params.setup.flags);
4437
4438 bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
4439 &cmd_params->params.setup.pause_params,
4440 &data->rx);
4441}
4442
4443/* initialize the general and tx parts of a tx-only queue object */
4444static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
4445 struct bnx2x_queue_state_params *cmd_params,
4446 struct tx_queue_init_ramrod_data *data)
4447{
4448 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4449 &cmd_params->params.tx_only.gen_params,
4450 &data->general,
4451 &cmd_params->params.tx_only.flags);
4452
4453 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4454 &cmd_params->params.tx_only.txq_params,
4455 &data->tx,
4456 &cmd_params->params.tx_only.flags);
4457
4458 DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",cmd_params->q_obj->cids[0],
4459 data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi);
4460}
619c5cb6
VZ
4461
4462/**
4463 * bnx2x_q_init - init HW/FW queue
4464 *
4465 * @bp: device handle
4466 * @params:
4467 *
4468 * HW/FW initial Queue configuration:
4469 * - HC: Rx and Tx
4470 * - CDU context validation
4471 *
4472 */
4473static inline int bnx2x_q_init(struct bnx2x *bp,
4474 struct bnx2x_queue_state_params *params)
4475{
4476 struct bnx2x_queue_sp_obj *o = params->q_obj;
4477 struct bnx2x_queue_init_params *init = &params->params.init;
4478 u16 hc_usec;
6383c0b3 4479 u8 cos;
619c5cb6
VZ
4480
4481 /* Tx HC configuration */
4482 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4483 test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4484 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4485
4486 bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4487 init->tx.sb_cq_index,
4488 !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
4489 hc_usec);
4490 }
4491
4492 /* Rx HC configuration */
4493 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4494 test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4495 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
4496
4497 bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4498 init->rx.sb_cq_index,
4499 !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4500 hc_usec);
4501 }
4502
4503 /* Set CDU context validation values */
6383c0b3
AE
4504 for (cos = 0; cos < o->max_cos; cos++) {
4505 DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d",
4506 o->cids[cos], cos);
4507 DP(BNX2X_MSG_SP, "context pointer %p", init->cxts[cos]);
4508 bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
4509 }
619c5cb6
VZ
4510
4511 /* As no ramrod is sent, complete the command immediately */
4512 o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4513
4514 mmiowb();
4515 smp_mb();
4516
4517 return 0;
4518}
4519
4520static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4521 struct bnx2x_queue_state_params *params)
4522{
4523 struct bnx2x_queue_sp_obj *o = params->q_obj;
4524 struct client_init_ramrod_data *rdata =
4525 (struct client_init_ramrod_data *)o->rdata;
4526 dma_addr_t data_mapping = o->rdata_mapping;
4527 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4528
4529 /* Clear the ramrod data */
4530 memset(rdata, 0, sizeof(*rdata));
4531
4532 /* Fill the ramrod data */
4533 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4534
53e51e2f
VZ
4535 /*
4536 * No need for an explicit memory barrier here as long we would
4537 * need to ensure the ordering of writing to the SPQ element
4538 * and updating of the SPQ producer which involves a memory
4539 * read and we will have to put a full memory barrier there
4540 * (inside bnx2x_sp_post()).
4541 */
619c5cb6 4542
6383c0b3
AE
4543 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4544 U64_HI(data_mapping),
619c5cb6
VZ
4545 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4546}
4547
4548static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4549 struct bnx2x_queue_state_params *params)
4550{
4551 struct bnx2x_queue_sp_obj *o = params->q_obj;
4552 struct client_init_ramrod_data *rdata =
4553 (struct client_init_ramrod_data *)o->rdata;
4554 dma_addr_t data_mapping = o->rdata_mapping;
4555 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4556
4557 /* Clear the ramrod data */
4558 memset(rdata, 0, sizeof(*rdata));
4559
4560 /* Fill the ramrod data */
4561 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4562 bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4563
53e51e2f
VZ
4564 /*
4565 * No need for an explicit memory barrier here as long we would
4566 * need to ensure the ordering of writing to the SPQ element
4567 * and updating of the SPQ producer which involves a memory
4568 * read and we will have to put a full memory barrier there
4569 * (inside bnx2x_sp_post()).
4570 */
619c5cb6 4571
6383c0b3
AE
4572 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4573 U64_HI(data_mapping),
4574 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4575}
4576
4577static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4578 struct bnx2x_queue_state_params *params)
4579{
4580 struct bnx2x_queue_sp_obj *o = params->q_obj;
4581 struct tx_queue_init_ramrod_data *rdata =
4582 (struct tx_queue_init_ramrod_data *)o->rdata;
4583 dma_addr_t data_mapping = o->rdata_mapping;
4584 int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4585 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
4586 &params->params.tx_only;
4587 u8 cid_index = tx_only_params->cid_index;
4588
4589
4590 if (cid_index >= o->max_cos) {
4591 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4592 o->cl_id, cid_index);
4593 return -EINVAL;
4594 }
4595
4596 DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d",
4597 tx_only_params->gen_params.cos,
4598 tx_only_params->gen_params.spcl_id);
4599
4600 /* Clear the ramrod data */
4601 memset(rdata, 0, sizeof(*rdata));
4602
4603 /* Fill the ramrod data */
4604 bnx2x_q_fill_setup_tx_only(bp, params, rdata);
4605
4606 DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d,"
4607 "sp-client id %d, cos %d",
4608 o->cids[cid_index],
4609 rdata->general.client_id,
4610 rdata->general.sp_client_id, rdata->general.cos);
4611
4612 /*
4613 * No need for an explicit memory barrier here as long we would
4614 * need to ensure the ordering of writing to the SPQ element
4615 * and updating of the SPQ producer which involves a memory
4616 * read and we will have to put a full memory barrier there
4617 * (inside bnx2x_sp_post()).
4618 */
4619
4620 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4621 U64_HI(data_mapping),
619c5cb6
VZ
4622 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4623}
4624
4625static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4626 struct bnx2x_queue_sp_obj *obj,
4627 struct bnx2x_queue_update_params *params,
4628 struct client_update_ramrod_data *data)
4629{
4630 /* Client ID of the client to update */
4631 data->client_id = obj->cl_id;
4632
4633 /* Function ID of the client to update */
4634 data->func_id = obj->func_id;
4635
4636 /* Default VLAN value */
4637 data->default_vlan = cpu_to_le16(params->def_vlan);
4638
4639 /* Inner VLAN stripping */
4640 data->inner_vlan_removal_enable_flg =
4641 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
4642 data->inner_vlan_removal_change_flg =
4643 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
4644 &params->update_flags);
4645
4646 /* Outer VLAN sripping */
4647 data->outer_vlan_removal_enable_flg =
4648 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
4649 data->outer_vlan_removal_change_flg =
4650 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
4651 &params->update_flags);
4652
4653 /* Drop packets that have source MAC that doesn't belong to this
4654 * Queue.
4655 */
4656 data->anti_spoofing_enable_flg =
4657 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
4658 data->anti_spoofing_change_flg =
4659 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, &params->update_flags);
4660
4661 /* Activate/Deactivate */
4662 data->activate_flg =
4663 test_bit(BNX2X_Q_UPDATE_ACTIVATE, &params->update_flags);
4664 data->activate_change_flg =
4665 test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
4666
4667 /* Enable default VLAN */
4668 data->default_vlan_enable_flg =
4669 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
4670 data->default_vlan_change_flg =
4671 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
4672 &params->update_flags);
4673
4674 /* silent vlan removal */
4675 data->silent_vlan_change_flg =
4676 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4677 &params->update_flags);
4678 data->silent_vlan_removal_flg =
4679 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
4680 data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
4681 data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
4682}
4683
4684static inline int bnx2x_q_send_update(struct bnx2x *bp,
4685 struct bnx2x_queue_state_params *params)
4686{
4687 struct bnx2x_queue_sp_obj *o = params->q_obj;
4688 struct client_update_ramrod_data *rdata =
4689 (struct client_update_ramrod_data *)o->rdata;
4690 dma_addr_t data_mapping = o->rdata_mapping;
6383c0b3
AE
4691 struct bnx2x_queue_update_params *update_params =
4692 &params->params.update;
4693 u8 cid_index = update_params->cid_index;
4694
4695 if (cid_index >= o->max_cos) {
4696 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4697 o->cl_id, cid_index);
4698 return -EINVAL;
4699 }
4700
619c5cb6
VZ
4701
4702 /* Clear the ramrod data */
4703 memset(rdata, 0, sizeof(*rdata));
4704
4705 /* Fill the ramrod data */
6383c0b3 4706 bnx2x_q_fill_update_data(bp, o, update_params, rdata);
619c5cb6 4707
53e51e2f
VZ
4708 /*
4709 * No need for an explicit memory barrier here as long we would
4710 * need to ensure the ordering of writing to the SPQ element
4711 * and updating of the SPQ producer which involves a memory
4712 * read and we will have to put a full memory barrier there
4713 * (inside bnx2x_sp_post()).
4714 */
619c5cb6 4715
6383c0b3
AE
4716 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4717 o->cids[cid_index], U64_HI(data_mapping),
619c5cb6
VZ
4718 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4719}
4720
4721/**
4722 * bnx2x_q_send_deactivate - send DEACTIVATE command
4723 *
4724 * @bp: device handle
4725 * @params:
4726 *
4727 * implemented using the UPDATE command.
4728 */
4729static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
4730 struct bnx2x_queue_state_params *params)
4731{
4732 struct bnx2x_queue_update_params *update = &params->params.update;
4733
4734 memset(update, 0, sizeof(*update));
4735
4736 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4737
4738 return bnx2x_q_send_update(bp, params);
4739}
4740
4741/**
4742 * bnx2x_q_send_activate - send ACTIVATE command
4743 *
4744 * @bp: device handle
4745 * @params:
4746 *
4747 * implemented using the UPDATE command.
4748 */
4749static inline int bnx2x_q_send_activate(struct bnx2x *bp,
4750 struct bnx2x_queue_state_params *params)
4751{
4752 struct bnx2x_queue_update_params *update = &params->params.update;
4753
4754 memset(update, 0, sizeof(*update));
4755
4756 __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
4757 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4758
4759 return bnx2x_q_send_update(bp, params);
4760}
4761
4762static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
4763 struct bnx2x_queue_state_params *params)
4764{
4765 /* TODO: Not implemented yet. */
4766 return -1;
4767}
4768
4769static inline int bnx2x_q_send_halt(struct bnx2x *bp,
4770 struct bnx2x_queue_state_params *params)
4771{
4772 struct bnx2x_queue_sp_obj *o = params->q_obj;
4773
6383c0b3
AE
4774 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
4775 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
619c5cb6
VZ
4776 ETH_CONNECTION_TYPE);
4777}
4778
4779static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
4780 struct bnx2x_queue_state_params *params)
4781{
4782 struct bnx2x_queue_sp_obj *o = params->q_obj;
6383c0b3 4783 u8 cid_idx = params->params.cfc_del.cid_index;
619c5cb6 4784
6383c0b3
AE
4785 if (cid_idx >= o->max_cos) {
4786 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4787 o->cl_id, cid_idx);
4788 return -EINVAL;
4789 }
4790
4791 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
4792 o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
619c5cb6
VZ
4793}
4794
4795static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
4796 struct bnx2x_queue_state_params *params)
4797{
4798 struct bnx2x_queue_sp_obj *o = params->q_obj;
6383c0b3 4799 u8 cid_index = params->params.terminate.cid_index;
619c5cb6 4800
6383c0b3
AE
4801 if (cid_index >= o->max_cos) {
4802 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4803 o->cl_id, cid_index);
4804 return -EINVAL;
4805 }
4806
4807 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
4808 o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
619c5cb6
VZ
4809}
4810
4811static inline int bnx2x_q_send_empty(struct bnx2x *bp,
4812 struct bnx2x_queue_state_params *params)
4813{
4814 struct bnx2x_queue_sp_obj *o = params->q_obj;
4815
6383c0b3
AE
4816 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
4817 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
619c5cb6
VZ
4818 ETH_CONNECTION_TYPE);
4819}
4820
4821static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
4822 struct bnx2x_queue_state_params *params)
4823{
4824 switch (params->cmd) {
4825 case BNX2X_Q_CMD_INIT:
4826 return bnx2x_q_init(bp, params);
6383c0b3
AE
4827 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4828 return bnx2x_q_send_setup_tx_only(bp, params);
619c5cb6
VZ
4829 case BNX2X_Q_CMD_DEACTIVATE:
4830 return bnx2x_q_send_deactivate(bp, params);
4831 case BNX2X_Q_CMD_ACTIVATE:
4832 return bnx2x_q_send_activate(bp, params);
4833 case BNX2X_Q_CMD_UPDATE:
4834 return bnx2x_q_send_update(bp, params);
4835 case BNX2X_Q_CMD_UPDATE_TPA:
4836 return bnx2x_q_send_update_tpa(bp, params);
4837 case BNX2X_Q_CMD_HALT:
4838 return bnx2x_q_send_halt(bp, params);
4839 case BNX2X_Q_CMD_CFC_DEL:
4840 return bnx2x_q_send_cfc_del(bp, params);
4841 case BNX2X_Q_CMD_TERMINATE:
4842 return bnx2x_q_send_terminate(bp, params);
4843 case BNX2X_Q_CMD_EMPTY:
4844 return bnx2x_q_send_empty(bp, params);
4845 default:
4846 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4847 return -EINVAL;
4848 }
4849}
4850
4851static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
4852 struct bnx2x_queue_state_params *params)
4853{
4854 switch (params->cmd) {
4855 case BNX2X_Q_CMD_SETUP:
4856 return bnx2x_q_send_setup_e1x(bp, params);
4857 case BNX2X_Q_CMD_INIT:
6383c0b3 4858 case BNX2X_Q_CMD_SETUP_TX_ONLY:
619c5cb6
VZ
4859 case BNX2X_Q_CMD_DEACTIVATE:
4860 case BNX2X_Q_CMD_ACTIVATE:
4861 case BNX2X_Q_CMD_UPDATE:
4862 case BNX2X_Q_CMD_UPDATE_TPA:
4863 case BNX2X_Q_CMD_HALT:
4864 case BNX2X_Q_CMD_CFC_DEL:
4865 case BNX2X_Q_CMD_TERMINATE:
4866 case BNX2X_Q_CMD_EMPTY:
4867 return bnx2x_queue_send_cmd_cmn(bp, params);
4868 default:
4869 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4870 return -EINVAL;
4871 }
4872}
4873
4874static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
4875 struct bnx2x_queue_state_params *params)
4876{
4877 switch (params->cmd) {
4878 case BNX2X_Q_CMD_SETUP:
4879 return bnx2x_q_send_setup_e2(bp, params);
4880 case BNX2X_Q_CMD_INIT:
6383c0b3 4881 case BNX2X_Q_CMD_SETUP_TX_ONLY:
619c5cb6
VZ
4882 case BNX2X_Q_CMD_DEACTIVATE:
4883 case BNX2X_Q_CMD_ACTIVATE:
4884 case BNX2X_Q_CMD_UPDATE:
4885 case BNX2X_Q_CMD_UPDATE_TPA:
4886 case BNX2X_Q_CMD_HALT:
4887 case BNX2X_Q_CMD_CFC_DEL:
4888 case BNX2X_Q_CMD_TERMINATE:
4889 case BNX2X_Q_CMD_EMPTY:
4890 return bnx2x_queue_send_cmd_cmn(bp, params);
4891 default:
4892 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4893 return -EINVAL;
4894 }
4895}
4896
4897/**
4898 * bnx2x_queue_chk_transition - check state machine of a regular Queue
4899 *
4900 * @bp: device handle
4901 * @o:
4902 * @params:
4903 *
4904 * (not Forwarding)
4905 * It both checks if the requested command is legal in a current
4906 * state and, if it's legal, sets a `next_state' in the object
4907 * that will be used in the completion flow to set the `state'
4908 * of the object.
4909 *
4910 * returns 0 if a requested command is a legal transition,
4911 * -EINVAL otherwise.
4912 */
4913static int bnx2x_queue_chk_transition(struct bnx2x *bp,
4914 struct bnx2x_queue_sp_obj *o,
4915 struct bnx2x_queue_state_params *params)
4916{
4917 enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
4918 enum bnx2x_queue_cmd cmd = params->cmd;
6383c0b3
AE
4919 struct bnx2x_queue_update_params *update_params =
4920 &params->params.update;
4921 u8 next_tx_only = o->num_tx_only;
619c5cb6 4922
6debea87
DK
4923 /*
4924 * Forget all pending for completion commands if a driver only state
4925 * transition has been requested.
4926 */
4927 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
4928 o->pending = 0;
4929 o->next_state = BNX2X_Q_STATE_MAX;
4930 }
4931
4932 /*
4933 * Don't allow a next state transition if we are in the middle of
4934 * the previous one.
4935 */
4936 if (o->pending)
4937 return -EBUSY;
4938
619c5cb6
VZ
4939 switch (state) {
4940 case BNX2X_Q_STATE_RESET:
4941 if (cmd == BNX2X_Q_CMD_INIT)
4942 next_state = BNX2X_Q_STATE_INITIALIZED;
4943
4944 break;
4945 case BNX2X_Q_STATE_INITIALIZED:
4946 if (cmd == BNX2X_Q_CMD_SETUP) {
4947 if (test_bit(BNX2X_Q_FLG_ACTIVE,
4948 &params->params.setup.flags))
4949 next_state = BNX2X_Q_STATE_ACTIVE;
4950 else
4951 next_state = BNX2X_Q_STATE_INACTIVE;
4952 }
4953
4954 break;
4955 case BNX2X_Q_STATE_ACTIVE:
4956 if (cmd == BNX2X_Q_CMD_DEACTIVATE)
4957 next_state = BNX2X_Q_STATE_INACTIVE;
4958
4959 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
4960 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
4961 next_state = BNX2X_Q_STATE_ACTIVE;
4962
6383c0b3
AE
4963 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
4964 next_state = BNX2X_Q_STATE_MULTI_COS;
4965 next_tx_only = 1;
4966 }
4967
619c5cb6
VZ
4968 else if (cmd == BNX2X_Q_CMD_HALT)
4969 next_state = BNX2X_Q_STATE_STOPPED;
4970
4971 else if (cmd == BNX2X_Q_CMD_UPDATE) {
6383c0b3
AE
4972 /* If "active" state change is requested, update the
4973 * state accordingly.
4974 */
4975 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
4976 &update_params->update_flags) &&
4977 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
4978 &update_params->update_flags))
4979 next_state = BNX2X_Q_STATE_INACTIVE;
4980 else
4981 next_state = BNX2X_Q_STATE_ACTIVE;
4982 }
4983
4984 break;
4985 case BNX2X_Q_STATE_MULTI_COS:
4986 if (cmd == BNX2X_Q_CMD_TERMINATE)
4987 next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
4988
4989 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
4990 next_state = BNX2X_Q_STATE_MULTI_COS;
4991 next_tx_only = o->num_tx_only + 1;
4992 }
4993
4994 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
4995 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
4996 next_state = BNX2X_Q_STATE_MULTI_COS;
619c5cb6 4997
6383c0b3 4998 else if (cmd == BNX2X_Q_CMD_UPDATE) {
619c5cb6
VZ
4999 /* If "active" state change is requested, update the
5000 * state accordingly.
5001 */
5002 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5003 &update_params->update_flags) &&
5004 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5005 &update_params->update_flags))
5006 next_state = BNX2X_Q_STATE_INACTIVE;
5007 else
6383c0b3
AE
5008 next_state = BNX2X_Q_STATE_MULTI_COS;
5009 }
5010
5011 break;
5012 case BNX2X_Q_STATE_MCOS_TERMINATED:
5013 if (cmd == BNX2X_Q_CMD_CFC_DEL) {
5014 next_tx_only = o->num_tx_only - 1;
5015 if (next_tx_only == 0)
619c5cb6 5016 next_state = BNX2X_Q_STATE_ACTIVE;
6383c0b3
AE
5017 else
5018 next_state = BNX2X_Q_STATE_MULTI_COS;
619c5cb6
VZ
5019 }
5020
5021 break;
5022 case BNX2X_Q_STATE_INACTIVE:
5023 if (cmd == BNX2X_Q_CMD_ACTIVATE)
5024 next_state = BNX2X_Q_STATE_ACTIVE;
5025
5026 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5027 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5028 next_state = BNX2X_Q_STATE_INACTIVE;
5029
5030 else if (cmd == BNX2X_Q_CMD_HALT)
5031 next_state = BNX2X_Q_STATE_STOPPED;
5032
5033 else if (cmd == BNX2X_Q_CMD_UPDATE) {
619c5cb6
VZ
5034 /* If "active" state change is requested, update the
5035 * state accordingly.
5036 */
5037 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5038 &update_params->update_flags) &&
5039 test_bit(BNX2X_Q_UPDATE_ACTIVATE,
6383c0b3
AE
5040 &update_params->update_flags)){
5041 if (o->num_tx_only == 0)
5042 next_state = BNX2X_Q_STATE_ACTIVE;
5043 else /* tx only queues exist for this queue */
5044 next_state = BNX2X_Q_STATE_MULTI_COS;
5045 } else
619c5cb6
VZ
5046 next_state = BNX2X_Q_STATE_INACTIVE;
5047 }
5048
5049 break;
5050 case BNX2X_Q_STATE_STOPPED:
5051 if (cmd == BNX2X_Q_CMD_TERMINATE)
5052 next_state = BNX2X_Q_STATE_TERMINATED;
5053
5054 break;
5055 case BNX2X_Q_STATE_TERMINATED:
5056 if (cmd == BNX2X_Q_CMD_CFC_DEL)
5057 next_state = BNX2X_Q_STATE_RESET;
5058
5059 break;
5060 default:
5061 BNX2X_ERR("Illegal state: %d\n", state);
5062 }
5063
5064 /* Transition is assured */
5065 if (next_state != BNX2X_Q_STATE_MAX) {
5066 DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
5067 state, cmd, next_state);
5068 o->next_state = next_state;
6383c0b3 5069 o->next_tx_only = next_tx_only;
619c5cb6
VZ
5070 return 0;
5071 }
5072
5073 DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
5074
5075 return -EINVAL;
5076}
5077
5078void bnx2x_init_queue_obj(struct bnx2x *bp,
5079 struct bnx2x_queue_sp_obj *obj,
6383c0b3
AE
5080 u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
5081 void *rdata,
619c5cb6
VZ
5082 dma_addr_t rdata_mapping, unsigned long type)
5083{
5084 memset(obj, 0, sizeof(*obj));
5085
6383c0b3
AE
5086 /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
5087 BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
5088
5089 memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5090 obj->max_cos = cid_cnt;
619c5cb6
VZ
5091 obj->cl_id = cl_id;
5092 obj->func_id = func_id;
5093 obj->rdata = rdata;
5094 obj->rdata_mapping = rdata_mapping;
5095 obj->type = type;
5096 obj->next_state = BNX2X_Q_STATE_MAX;
5097
5098 if (CHIP_IS_E1x(bp))
5099 obj->send_cmd = bnx2x_queue_send_cmd_e1x;
5100 else
5101 obj->send_cmd = bnx2x_queue_send_cmd_e2;
5102
5103 obj->check_transition = bnx2x_queue_chk_transition;
5104
5105 obj->complete_cmd = bnx2x_queue_comp_cmd;
5106 obj->wait_comp = bnx2x_queue_wait_comp;
5107 obj->set_pending = bnx2x_queue_set_pending;
5108}
5109
6383c0b3
AE
5110void bnx2x_queue_set_cos_cid(struct bnx2x *bp,
5111 struct bnx2x_queue_sp_obj *obj,
5112 u32 cid, u8 index)
5113{
5114 obj->cids[index] = cid;
5115}
5116
619c5cb6 5117/********************** Function state object *********************************/
6debea87
DK
5118enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
5119 struct bnx2x_func_sp_obj *o)
5120{
5121 /* in the middle of transaction - return INVALID state */
5122 if (o->pending)
5123 return BNX2X_F_STATE_MAX;
5124
5125 /*
5126 * unsure the order of reading of o->pending and o->state
5127 * o->pending should be read first
5128 */
5129 rmb();
5130
5131 return o->state;
5132}
619c5cb6
VZ
5133
5134static int bnx2x_func_wait_comp(struct bnx2x *bp,
5135 struct bnx2x_func_sp_obj *o,
5136 enum bnx2x_func_cmd cmd)
5137{
5138 return bnx2x_state_wait(bp, cmd, &o->pending);
5139}
5140
5141/**
5142 * bnx2x_func_state_change_comp - complete the state machine transition
5143 *
5144 * @bp: device handle
5145 * @o:
5146 * @cmd:
5147 *
5148 * Called on state change transition. Completes the state
5149 * machine transition only - no HW interaction.
5150 */
5151static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
5152 struct bnx2x_func_sp_obj *o,
5153 enum bnx2x_func_cmd cmd)
5154{
5155 unsigned long cur_pending = o->pending;
5156
5157 if (!test_and_clear_bit(cmd, &cur_pending)) {
5158 BNX2X_ERR("Bad MC reply %d for func %d in state %d "
5159 "pending 0x%lx, next_state %d\n", cmd, BP_FUNC(bp),
5160 o->state, cur_pending, o->next_state);
5161 return -EINVAL;
5162 }
5163
5164 DP(BNX2X_MSG_SP, "Completing command %d for func %d, setting state to "
5165 "%d\n", cmd, BP_FUNC(bp), o->next_state);
5166
5167 o->state = o->next_state;
5168 o->next_state = BNX2X_F_STATE_MAX;
5169
5170 /* It's important that o->state and o->next_state are
5171 * updated before o->pending.
5172 */
5173 wmb();
5174
5175 clear_bit(cmd, &o->pending);
5176 smp_mb__after_clear_bit();
5177
5178 return 0;
5179}
5180
5181/**
5182 * bnx2x_func_comp_cmd - complete the state change command
5183 *
5184 * @bp: device handle
5185 * @o:
5186 * @cmd:
5187 *
5188 * Checks that the arrived completion is expected.
5189 */
5190static int bnx2x_func_comp_cmd(struct bnx2x *bp,
5191 struct bnx2x_func_sp_obj *o,
5192 enum bnx2x_func_cmd cmd)
5193{
5194 /* Complete the state machine part first, check if it's a
5195 * legal completion.
5196 */
5197 int rc = bnx2x_func_state_change_comp(bp, o, cmd);
5198 return rc;
5199}
5200
5201/**
5202 * bnx2x_func_chk_transition - perform function state machine transition
5203 *
5204 * @bp: device handle
5205 * @o:
5206 * @params:
5207 *
5208 * It both checks if the requested command is legal in a current
5209 * state and, if it's legal, sets a `next_state' in the object
5210 * that will be used in the completion flow to set the `state'
5211 * of the object.
5212 *
5213 * returns 0 if a requested command is a legal transition,
5214 * -EINVAL otherwise.
5215 */
5216static int bnx2x_func_chk_transition(struct bnx2x *bp,
5217 struct bnx2x_func_sp_obj *o,
5218 struct bnx2x_func_state_params *params)
5219{
5220 enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
5221 enum bnx2x_func_cmd cmd = params->cmd;
5222
6debea87
DK
5223 /*
5224 * Forget all pending for completion commands if a driver only state
5225 * transition has been requested.
5226 */
5227 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5228 o->pending = 0;
5229 o->next_state = BNX2X_F_STATE_MAX;
5230 }
5231
5232 /*
5233 * Don't allow a next state transition if we are in the middle of
5234 * the previous one.
5235 */
5236 if (o->pending)
5237 return -EBUSY;
5238
619c5cb6
VZ
5239 switch (state) {
5240 case BNX2X_F_STATE_RESET:
5241 if (cmd == BNX2X_F_CMD_HW_INIT)
5242 next_state = BNX2X_F_STATE_INITIALIZED;
5243
5244 break;
5245 case BNX2X_F_STATE_INITIALIZED:
5246 if (cmd == BNX2X_F_CMD_START)
5247 next_state = BNX2X_F_STATE_STARTED;
5248
5249 else if (cmd == BNX2X_F_CMD_HW_RESET)
5250 next_state = BNX2X_F_STATE_RESET;
5251
5252 break;
5253 case BNX2X_F_STATE_STARTED:
5254 if (cmd == BNX2X_F_CMD_STOP)
5255 next_state = BNX2X_F_STATE_INITIALIZED;
6debea87
DK
5256 else if (cmd == BNX2X_F_CMD_TX_STOP)
5257 next_state = BNX2X_F_STATE_TX_STOPPED;
5258
5259 break;
5260 case BNX2X_F_STATE_TX_STOPPED:
5261 if (cmd == BNX2X_F_CMD_TX_START)
5262 next_state = BNX2X_F_STATE_STARTED;
619c5cb6
VZ
5263
5264 break;
5265 default:
5266 BNX2X_ERR("Unknown state: %d\n", state);
5267 }
5268
5269 /* Transition is assured */
5270 if (next_state != BNX2X_F_STATE_MAX) {
5271 DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
5272 state, cmd, next_state);
5273 o->next_state = next_state;
5274 return 0;
5275 }
5276
5277 DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
5278 state, cmd);
5279
5280 return -EINVAL;
5281}
5282
5283/**
5284 * bnx2x_func_init_func - performs HW init at function stage
5285 *
5286 * @bp: device handle
5287 * @drv:
5288 *
5289 * Init HW when the current phase is
5290 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5291 * HW blocks.
5292 */
5293static inline int bnx2x_func_init_func(struct bnx2x *bp,
5294 const struct bnx2x_func_sp_drv_ops *drv)
5295{
5296 return drv->init_hw_func(bp);
5297}
5298
5299/**
5300 * bnx2x_func_init_port - performs HW init at port stage
5301 *
5302 * @bp: device handle
5303 * @drv:
5304 *
5305 * Init HW when the current phase is
5306 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5307 * FUNCTION-only HW blocks.
5308 *
5309 */
5310static inline int bnx2x_func_init_port(struct bnx2x *bp,
5311 const struct bnx2x_func_sp_drv_ops *drv)
5312{
5313 int rc = drv->init_hw_port(bp);
5314 if (rc)
5315 return rc;
5316
5317 return bnx2x_func_init_func(bp, drv);
5318}
5319
5320/**
5321 * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
5322 *
5323 * @bp: device handle
5324 * @drv:
5325 *
5326 * Init HW when the current phase is
5327 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5328 * PORT-only and FUNCTION-only HW blocks.
5329 */
5330static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
5331 const struct bnx2x_func_sp_drv_ops *drv)
5332{
5333 int rc = drv->init_hw_cmn_chip(bp);
5334 if (rc)
5335 return rc;
5336
5337 return bnx2x_func_init_port(bp, drv);
5338}
5339
5340/**
5341 * bnx2x_func_init_cmn - performs HW init at common stage
5342 *
5343 * @bp: device handle
5344 * @drv:
5345 *
5346 * Init HW when the current phase is
5347 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5348 * PORT-only and FUNCTION-only HW blocks.
5349 */
5350static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
5351 const struct bnx2x_func_sp_drv_ops *drv)
5352{
5353 int rc = drv->init_hw_cmn(bp);
5354 if (rc)
5355 return rc;
5356
5357 return bnx2x_func_init_port(bp, drv);
5358}
5359
5360static int bnx2x_func_hw_init(struct bnx2x *bp,
5361 struct bnx2x_func_state_params *params)
5362{
5363 u32 load_code = params->params.hw_init.load_phase;
5364 struct bnx2x_func_sp_obj *o = params->f_obj;
5365 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5366 int rc = 0;
5367
5368 DP(BNX2X_MSG_SP, "function %d load_code %x\n",
5369 BP_ABS_FUNC(bp), load_code);
5370
5371 /* Prepare buffers for unzipping the FW */
5372 rc = drv->gunzip_init(bp);
5373 if (rc)
5374 return rc;
5375
5376 /* Prepare FW */
5377 rc = drv->init_fw(bp);
5378 if (rc) {
5379 BNX2X_ERR("Error loading firmware\n");
5380 goto fw_init_err;
5381 }
5382
5383 /* Handle the beginning of COMMON_XXX pases separatelly... */
5384 switch (load_code) {
5385 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5386 rc = bnx2x_func_init_cmn_chip(bp, drv);
5387 if (rc)
5388 goto init_hw_err;
5389
5390 break;
5391 case FW_MSG_CODE_DRV_LOAD_COMMON:
5392 rc = bnx2x_func_init_cmn(bp, drv);
5393 if (rc)
5394 goto init_hw_err;
5395
5396 break;
5397 case FW_MSG_CODE_DRV_LOAD_PORT:
5398 rc = bnx2x_func_init_port(bp, drv);
5399 if (rc)
5400 goto init_hw_err;
5401
5402 break;
5403 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5404 rc = bnx2x_func_init_func(bp, drv);
5405 if (rc)
5406 goto init_hw_err;
5407
5408 break;
5409 default:
5410 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5411 rc = -EINVAL;
5412 }
5413
5414init_hw_err:
5415 drv->release_fw(bp);
5416
5417fw_init_err:
5418 drv->gunzip_end(bp);
5419
5420 /* In case of success, complete the comand immediatelly: no ramrods
5421 * have been sent.
5422 */
5423 if (!rc)
5424 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
5425
5426 return rc;
5427}
5428
5429/**
5430 * bnx2x_func_reset_func - reset HW at function stage
5431 *
5432 * @bp: device handle
5433 * @drv:
5434 *
5435 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5436 * FUNCTION-only HW blocks.
5437 */
5438static inline void bnx2x_func_reset_func(struct bnx2x *bp,
5439 const struct bnx2x_func_sp_drv_ops *drv)
5440{
5441 drv->reset_hw_func(bp);
5442}
5443
5444/**
5445 * bnx2x_func_reset_port - reser HW at port stage
5446 *
5447 * @bp: device handle
5448 * @drv:
5449 *
5450 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5451 * FUNCTION-only and PORT-only HW blocks.
5452 *
5453 * !!!IMPORTANT!!!
5454 *
5455 * It's important to call reset_port before reset_func() as the last thing
5456 * reset_func does is pf_disable() thus disabling PGLUE_B, which
5457 * makes impossible any DMAE transactions.
5458 */
5459static inline void bnx2x_func_reset_port(struct bnx2x *bp,
5460 const struct bnx2x_func_sp_drv_ops *drv)
5461{
5462 drv->reset_hw_port(bp);
5463 bnx2x_func_reset_func(bp, drv);
5464}
5465
5466/**
5467 * bnx2x_func_reset_cmn - reser HW at common stage
5468 *
5469 * @bp: device handle
5470 * @drv:
5471 *
5472 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5473 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5474 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5475 */
5476static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
5477 const struct bnx2x_func_sp_drv_ops *drv)
5478{
5479 bnx2x_func_reset_port(bp, drv);
5480 drv->reset_hw_cmn(bp);
5481}
5482
5483
5484static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5485 struct bnx2x_func_state_params *params)
5486{
5487 u32 reset_phase = params->params.hw_reset.reset_phase;
5488 struct bnx2x_func_sp_obj *o = params->f_obj;
5489 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5490
5491 DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp),
5492 reset_phase);
5493
5494 switch (reset_phase) {
5495 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5496 bnx2x_func_reset_cmn(bp, drv);
5497 break;
5498 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5499 bnx2x_func_reset_port(bp, drv);
5500 break;
5501 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5502 bnx2x_func_reset_func(bp, drv);
5503 break;
5504 default:
5505 BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5506 reset_phase);
5507 break;
5508 }
5509
5510 /* Complete the comand immediatelly: no ramrods have been sent. */
5511 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
5512
5513 return 0;
5514}
5515
5516static inline int bnx2x_func_send_start(struct bnx2x *bp,
5517 struct bnx2x_func_state_params *params)
5518{
5519 struct bnx2x_func_sp_obj *o = params->f_obj;
5520 struct function_start_data *rdata =
5521 (struct function_start_data *)o->rdata;
5522 dma_addr_t data_mapping = o->rdata_mapping;
5523 struct bnx2x_func_start_params *start_params = &params->params.start;
5524
5525 memset(rdata, 0, sizeof(*rdata));
5526
5527 /* Fill the ramrod data with provided parameters */
5528 rdata->function_mode = cpu_to_le16(start_params->mf_mode);
5529 rdata->sd_vlan_tag = start_params->sd_vlan_tag;
5530 rdata->path_id = BP_PATH(bp);
5531 rdata->network_cos_mode = start_params->network_cos_mode;
5532
53e51e2f
VZ
5533 /*
5534 * No need for an explicit memory barrier here as long we would
5535 * need to ensure the ordering of writing to the SPQ element
5536 * and updating of the SPQ producer which involves a memory
5537 * read and we will have to put a full memory barrier there
5538 * (inside bnx2x_sp_post()).
5539 */
619c5cb6
VZ
5540
5541 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5542 U64_HI(data_mapping),
5543 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5544}
5545
5546static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5547 struct bnx2x_func_state_params *params)
5548{
5549 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
5550 NONE_CONNECTION_TYPE);
5551}
5552
6debea87
DK
5553static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
5554 struct bnx2x_func_state_params *params)
5555{
5556 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
5557 NONE_CONNECTION_TYPE);
5558}
5559static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
5560 struct bnx2x_func_state_params *params)
5561{
5562 struct bnx2x_func_sp_obj *o = params->f_obj;
5563 struct flow_control_configuration *rdata =
5564 (struct flow_control_configuration *)o->rdata;
5565 dma_addr_t data_mapping = o->rdata_mapping;
5566 struct bnx2x_func_tx_start_params *tx_start_params =
5567 &params->params.tx_start;
5568 int i;
5569
5570 memset(rdata, 0, sizeof(*rdata));
5571
5572 rdata->dcb_enabled = tx_start_params->dcb_enabled;
5573 rdata->dcb_version = tx_start_params->dcb_version;
5574 rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
5575
5576 for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
5577 rdata->traffic_type_to_priority_cos[i] =
5578 tx_start_params->traffic_type_to_priority_cos[i];
5579
5580 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5581 U64_HI(data_mapping),
5582 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5583}
5584
619c5cb6
VZ
5585static int bnx2x_func_send_cmd(struct bnx2x *bp,
5586 struct bnx2x_func_state_params *params)
5587{
5588 switch (params->cmd) {
5589 case BNX2X_F_CMD_HW_INIT:
5590 return bnx2x_func_hw_init(bp, params);
5591 case BNX2X_F_CMD_START:
5592 return bnx2x_func_send_start(bp, params);
5593 case BNX2X_F_CMD_STOP:
5594 return bnx2x_func_send_stop(bp, params);
5595 case BNX2X_F_CMD_HW_RESET:
5596 return bnx2x_func_hw_reset(bp, params);
6debea87
DK
5597 case BNX2X_F_CMD_TX_STOP:
5598 return bnx2x_func_send_tx_stop(bp, params);
5599 case BNX2X_F_CMD_TX_START:
5600 return bnx2x_func_send_tx_start(bp, params);
619c5cb6
VZ
5601 default:
5602 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5603 return -EINVAL;
5604 }
5605}
5606
5607void bnx2x_init_func_obj(struct bnx2x *bp,
5608 struct bnx2x_func_sp_obj *obj,
5609 void *rdata, dma_addr_t rdata_mapping,
5610 struct bnx2x_func_sp_drv_ops *drv_iface)
5611{
5612 memset(obj, 0, sizeof(*obj));
5613
5614 mutex_init(&obj->one_pending_mutex);
5615
5616 obj->rdata = rdata;
5617 obj->rdata_mapping = rdata_mapping;
5618
5619 obj->send_cmd = bnx2x_func_send_cmd;
5620 obj->check_transition = bnx2x_func_chk_transition;
5621 obj->complete_cmd = bnx2x_func_comp_cmd;
5622 obj->wait_comp = bnx2x_func_wait_comp;
5623
5624 obj->drv = drv_iface;
5625}
5626
5627/**
5628 * bnx2x_func_state_change - perform Function state change transition
5629 *
5630 * @bp: device handle
5631 * @params: parameters to perform the transaction
5632 *
5633 * returns 0 in case of successfully completed transition,
5634 * negative error code in case of failure, positive
5635 * (EBUSY) value if there is a completion to that is
5636 * still pending (possible only if RAMROD_COMP_WAIT is
5637 * not set in params->ramrod_flags for asynchronous
5638 * commands).
5639 */
5640int bnx2x_func_state_change(struct bnx2x *bp,
5641 struct bnx2x_func_state_params *params)
5642{
5643 struct bnx2x_func_sp_obj *o = params->f_obj;
5644 int rc;
5645 enum bnx2x_func_cmd cmd = params->cmd;
5646 unsigned long *pending = &o->pending;
5647
5648 mutex_lock(&o->one_pending_mutex);
5649
5650 /* Check that the requested transition is legal */
5651 if (o->check_transition(bp, o, params)) {
5652 mutex_unlock(&o->one_pending_mutex);
5653 return -EINVAL;
5654 }
5655
5656 /* Set "pending" bit */
5657 set_bit(cmd, pending);
5658
5659 /* Don't send a command if only driver cleanup was requested */
5660 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5661 bnx2x_func_state_change_comp(bp, o, cmd);
5662 mutex_unlock(&o->one_pending_mutex);
5663 } else {
5664 /* Send a ramrod */
5665 rc = o->send_cmd(bp, params);
5666
5667 mutex_unlock(&o->one_pending_mutex);
5668
5669 if (rc) {
5670 o->next_state = BNX2X_F_STATE_MAX;
5671 clear_bit(cmd, pending);
5672 smp_mb__after_clear_bit();
5673 return rc;
5674 }
5675
5676 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
5677 rc = o->wait_comp(bp, o, cmd);
5678 if (rc)
5679 return rc;
5680
5681 return 0;
5682 }
5683 }
042181f5 5684
619c5cb6 5685 return !!test_bit(cmd, pending);
042181f5 5686}
This page took 0.2664 seconds and 5 git commands to generate.