net/mlx4_core: Resource tracker for reg/unreg vlans
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx4 / resource_tracker.c
CommitLineData
c82e9aa0
EC
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4 * All rights reserved.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/sched.h>
37#include <linux/pci.h>
38#include <linux/errno.h>
39#include <linux/kernel.h>
40#include <linux/io.h>
e143a1ad 41#include <linux/slab.h>
c82e9aa0
EC
42#include <linux/mlx4/cmd.h>
43#include <linux/mlx4/qp.h>
af22d9de 44#include <linux/if_ether.h>
7fb40f87 45#include <linux/etherdevice.h>
c82e9aa0
EC
46
47#include "mlx4.h"
48#include "fw.h"
49
50#define MLX4_MAC_VALID (1ull << 63)
c82e9aa0
EC
51
52struct mac_res {
53 struct list_head list;
54 u64 mac;
55 u8 port;
56};
57
4874080d
JM
58struct vlan_res {
59 struct list_head list;
60 u16 vlan;
61 int ref_count;
62 int vlan_index;
63 u8 port;
64};
65
c82e9aa0
EC
66struct res_common {
67 struct list_head list;
4af1c048 68 struct rb_node node;
aa1ec3dd 69 u64 res_id;
c82e9aa0
EC
70 int owner;
71 int state;
72 int from_state;
73 int to_state;
74 int removing;
75};
76
77enum {
78 RES_ANY_BUSY = 1
79};
80
81struct res_gid {
82 struct list_head list;
83 u8 gid[16];
84 enum mlx4_protocol prot;
9f5b6c63 85 enum mlx4_steer_type steer;
fab1e24a 86 u64 reg_id;
c82e9aa0
EC
87};
88
89enum res_qp_states {
90 RES_QP_BUSY = RES_ANY_BUSY,
91
92 /* QP number was allocated */
93 RES_QP_RESERVED,
94
95 /* ICM memory for QP context was mapped */
96 RES_QP_MAPPED,
97
98 /* QP is in hw ownership */
99 RES_QP_HW
100};
101
c82e9aa0
EC
102struct res_qp {
103 struct res_common com;
104 struct res_mtt *mtt;
105 struct res_cq *rcq;
106 struct res_cq *scq;
107 struct res_srq *srq;
108 struct list_head mcg_list;
109 spinlock_t mcg_spl;
110 int local_qpn;
2c473ae7 111 atomic_t ref_count;
b01978ca
JM
112 u32 qpc_flags;
113 u8 sched_queue;
c82e9aa0
EC
114};
115
116enum res_mtt_states {
117 RES_MTT_BUSY = RES_ANY_BUSY,
118 RES_MTT_ALLOCATED,
119};
120
121static inline const char *mtt_states_str(enum res_mtt_states state)
122{
123 switch (state) {
124 case RES_MTT_BUSY: return "RES_MTT_BUSY";
125 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
126 default: return "Unknown";
127 }
128}
129
130struct res_mtt {
131 struct res_common com;
132 int order;
133 atomic_t ref_count;
134};
135
136enum res_mpt_states {
137 RES_MPT_BUSY = RES_ANY_BUSY,
138 RES_MPT_RESERVED,
139 RES_MPT_MAPPED,
140 RES_MPT_HW,
141};
142
143struct res_mpt {
144 struct res_common com;
145 struct res_mtt *mtt;
146 int key;
147};
148
149enum res_eq_states {
150 RES_EQ_BUSY = RES_ANY_BUSY,
151 RES_EQ_RESERVED,
152 RES_EQ_HW,
153};
154
155struct res_eq {
156 struct res_common com;
157 struct res_mtt *mtt;
158};
159
160enum res_cq_states {
161 RES_CQ_BUSY = RES_ANY_BUSY,
162 RES_CQ_ALLOCATED,
163 RES_CQ_HW,
164};
165
166struct res_cq {
167 struct res_common com;
168 struct res_mtt *mtt;
169 atomic_t ref_count;
170};
171
172enum res_srq_states {
173 RES_SRQ_BUSY = RES_ANY_BUSY,
174 RES_SRQ_ALLOCATED,
175 RES_SRQ_HW,
176};
177
c82e9aa0
EC
178struct res_srq {
179 struct res_common com;
180 struct res_mtt *mtt;
181 struct res_cq *cq;
182 atomic_t ref_count;
183};
184
185enum res_counter_states {
186 RES_COUNTER_BUSY = RES_ANY_BUSY,
187 RES_COUNTER_ALLOCATED,
188};
189
c82e9aa0
EC
190struct res_counter {
191 struct res_common com;
192 int port;
193};
194
ba062d52
JM
195enum res_xrcdn_states {
196 RES_XRCD_BUSY = RES_ANY_BUSY,
197 RES_XRCD_ALLOCATED,
198};
199
200struct res_xrcdn {
201 struct res_common com;
202 int port;
203};
204
1b9c6b06
HHZ
205enum res_fs_rule_states {
206 RES_FS_RULE_BUSY = RES_ANY_BUSY,
207 RES_FS_RULE_ALLOCATED,
208};
209
210struct res_fs_rule {
211 struct res_common com;
2c473ae7 212 int qpn;
1b9c6b06
HHZ
213};
214
4af1c048
HHZ
215static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
216{
217 struct rb_node *node = root->rb_node;
218
219 while (node) {
220 struct res_common *res = container_of(node, struct res_common,
221 node);
222
223 if (res_id < res->res_id)
224 node = node->rb_left;
225 else if (res_id > res->res_id)
226 node = node->rb_right;
227 else
228 return res;
229 }
230 return NULL;
231}
232
233static int res_tracker_insert(struct rb_root *root, struct res_common *res)
234{
235 struct rb_node **new = &(root->rb_node), *parent = NULL;
236
237 /* Figure out where to put new node */
238 while (*new) {
239 struct res_common *this = container_of(*new, struct res_common,
240 node);
241
242 parent = *new;
243 if (res->res_id < this->res_id)
244 new = &((*new)->rb_left);
245 else if (res->res_id > this->res_id)
246 new = &((*new)->rb_right);
247 else
248 return -EEXIST;
249 }
250
251 /* Add new node and rebalance tree. */
252 rb_link_node(&res->node, parent, new);
253 rb_insert_color(&res->node, root);
254
255 return 0;
256}
257
54679e14
JM
258enum qp_transition {
259 QP_TRANS_INIT2RTR,
260 QP_TRANS_RTR2RTS,
261 QP_TRANS_RTS2RTS,
262 QP_TRANS_SQERR2RTS,
263 QP_TRANS_SQD2SQD,
264 QP_TRANS_SQD2RTS
265};
266
c82e9aa0
EC
267/* For Debug uses */
268static const char *ResourceType(enum mlx4_resource rt)
269{
270 switch (rt) {
271 case RES_QP: return "RES_QP";
272 case RES_CQ: return "RES_CQ";
273 case RES_SRQ: return "RES_SRQ";
274 case RES_MPT: return "RES_MPT";
275 case RES_MTT: return "RES_MTT";
276 case RES_MAC: return "RES_MAC";
4874080d 277 case RES_VLAN: return "RES_VLAN";
c82e9aa0
EC
278 case RES_EQ: return "RES_EQ";
279 case RES_COUNTER: return "RES_COUNTER";
1b9c6b06 280 case RES_FS_RULE: return "RES_FS_RULE";
ba062d52 281 case RES_XRCD: return "RES_XRCD";
c82e9aa0
EC
282 default: return "Unknown resource type !!!";
283 };
284}
285
4874080d 286static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
c82e9aa0
EC
287int mlx4_init_resource_tracker(struct mlx4_dev *dev)
288{
289 struct mlx4_priv *priv = mlx4_priv(dev);
290 int i;
291 int t;
292
293 priv->mfunc.master.res_tracker.slave_list =
294 kzalloc(dev->num_slaves * sizeof(struct slave_list),
295 GFP_KERNEL);
296 if (!priv->mfunc.master.res_tracker.slave_list)
297 return -ENOMEM;
298
299 for (i = 0 ; i < dev->num_slaves; i++) {
300 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
301 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
302 slave_list[i].res_list[t]);
303 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
304 }
305
306 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
307 dev->num_slaves);
308 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
4af1c048 309 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
c82e9aa0
EC
310
311 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
312 return 0 ;
313}
314
b8924951
JM
315void mlx4_free_resource_tracker(struct mlx4_dev *dev,
316 enum mlx4_res_tracker_free_type type)
c82e9aa0
EC
317{
318 struct mlx4_priv *priv = mlx4_priv(dev);
319 int i;
320
321 if (priv->mfunc.master.res_tracker.slave_list) {
4874080d
JM
322 if (type != RES_TR_FREE_STRUCTS_ONLY) {
323 for (i = 0; i < dev->num_slaves; i++) {
b8924951
JM
324 if (type == RES_TR_FREE_ALL ||
325 dev->caps.function != i)
326 mlx4_delete_all_resources_for_slave(dev, i);
4874080d
JM
327 }
328 /* free master's vlans */
329 i = dev->caps.function;
330 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
331 rem_slave_vlans(dev, i);
332 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
333 }
b8924951
JM
334
335 if (type != RES_TR_FREE_SLAVES_ONLY) {
336 kfree(priv->mfunc.master.res_tracker.slave_list);
337 priv->mfunc.master.res_tracker.slave_list = NULL;
338 }
c82e9aa0
EC
339 }
340}
341
54679e14
JM
342static void update_pkey_index(struct mlx4_dev *dev, int slave,
343 struct mlx4_cmd_mailbox *inbox)
c82e9aa0 344{
54679e14
JM
345 u8 sched = *(u8 *)(inbox->buf + 64);
346 u8 orig_index = *(u8 *)(inbox->buf + 35);
347 u8 new_index;
348 struct mlx4_priv *priv = mlx4_priv(dev);
349 int port;
350
351 port = (sched >> 6 & 1) + 1;
352
353 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
354 *(u8 *)(inbox->buf + 35) = new_index;
54679e14
JM
355}
356
357static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
358 u8 slave)
359{
360 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
361 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
362 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
c82e9aa0
EC
363
364 if (MLX4_QP_ST_UD == ts)
365 qp_ctx->pri_path.mgid_index = 0x80 | slave;
366
54679e14
JM
367 if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
368 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
369 qp_ctx->pri_path.mgid_index = slave & 0x7F;
370 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
371 qp_ctx->alt_path.mgid_index = slave & 0x7F;
372 }
c82e9aa0
EC
373}
374
3f7fb021
RE
375static int update_vport_qp_param(struct mlx4_dev *dev,
376 struct mlx4_cmd_mailbox *inbox,
b01978ca 377 u8 slave, u32 qpn)
3f7fb021
RE
378{
379 struct mlx4_qp_context *qpc = inbox->buf + 8;
380 struct mlx4_vport_oper_state *vp_oper;
381 struct mlx4_priv *priv;
382 u32 qp_type;
383 int port;
384
385 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
386 priv = mlx4_priv(dev);
387 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
388
389 if (MLX4_VGT != vp_oper->state.default_vlan) {
390 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
b01978ca
JM
391 if (MLX4_QP_ST_RC == qp_type ||
392 (MLX4_QP_ST_UD == qp_type &&
393 !mlx4_is_qp_reserved(dev, qpn)))
3f7fb021
RE
394 return -EINVAL;
395
b01978ca
JM
396 /* the reserved QPs (special, proxy, tunnel)
397 * do not operate over vlans
398 */
399 if (mlx4_is_qp_reserved(dev, qpn))
400 return 0;
401
7677fc96
RE
402 /* force strip vlan by clear vsd */
403 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
0a6eac24
RE
404
405 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
406 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
407 qpc->pri_path.vlan_control =
408 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
409 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
410 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
411 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
412 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
413 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
414 } else if (0 != vp_oper->state.default_vlan) {
7677fc96
RE
415 qpc->pri_path.vlan_control =
416 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
417 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
418 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
419 } else { /* priority tagged */
420 qpc->pri_path.vlan_control =
421 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
422 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
423 }
424
425 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
3f7fb021 426 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
7677fc96
RE
427 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
428 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
3f7fb021
RE
429 qpc->pri_path.sched_queue &= 0xC7;
430 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
3f7fb021 431 }
e6b6a231 432 if (vp_oper->state.spoofchk) {
7677fc96 433 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
e6b6a231 434 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
e6b6a231 435 }
3f7fb021
RE
436 return 0;
437}
438
c82e9aa0
EC
439static int mpt_mask(struct mlx4_dev *dev)
440{
441 return dev->caps.num_mpts - 1;
442}
443
1e3f7b32 444static void *find_res(struct mlx4_dev *dev, u64 res_id,
c82e9aa0
EC
445 enum mlx4_resource type)
446{
447 struct mlx4_priv *priv = mlx4_priv(dev);
448
4af1c048
HHZ
449 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
450 res_id);
c82e9aa0
EC
451}
452
aa1ec3dd 453static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
c82e9aa0
EC
454 enum mlx4_resource type,
455 void *res)
456{
457 struct res_common *r;
458 int err = 0;
459
460 spin_lock_irq(mlx4_tlock(dev));
461 r = find_res(dev, res_id, type);
462 if (!r) {
463 err = -ENONET;
464 goto exit;
465 }
466
467 if (r->state == RES_ANY_BUSY) {
468 err = -EBUSY;
469 goto exit;
470 }
471
472 if (r->owner != slave) {
473 err = -EPERM;
474 goto exit;
475 }
476
477 r->from_state = r->state;
478 r->state = RES_ANY_BUSY;
c82e9aa0
EC
479
480 if (res)
481 *((struct res_common **)res) = r;
482
483exit:
484 spin_unlock_irq(mlx4_tlock(dev));
485 return err;
486}
487
488int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
489 enum mlx4_resource type,
aa1ec3dd 490 u64 res_id, int *slave)
c82e9aa0
EC
491{
492
493 struct res_common *r;
494 int err = -ENOENT;
495 int id = res_id;
496
497 if (type == RES_QP)
498 id &= 0x7fffff;
996b0541 499 spin_lock(mlx4_tlock(dev));
c82e9aa0
EC
500
501 r = find_res(dev, id, type);
502 if (r) {
503 *slave = r->owner;
504 err = 0;
505 }
996b0541 506 spin_unlock(mlx4_tlock(dev));
c82e9aa0
EC
507
508 return err;
509}
510
aa1ec3dd 511static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
c82e9aa0
EC
512 enum mlx4_resource type)
513{
514 struct res_common *r;
515
516 spin_lock_irq(mlx4_tlock(dev));
517 r = find_res(dev, res_id, type);
518 if (r)
519 r->state = r->from_state;
520 spin_unlock_irq(mlx4_tlock(dev));
521}
522
523static struct res_common *alloc_qp_tr(int id)
524{
525 struct res_qp *ret;
526
527 ret = kzalloc(sizeof *ret, GFP_KERNEL);
528 if (!ret)
529 return NULL;
530
531 ret->com.res_id = id;
532 ret->com.state = RES_QP_RESERVED;
2531188b 533 ret->local_qpn = id;
c82e9aa0
EC
534 INIT_LIST_HEAD(&ret->mcg_list);
535 spin_lock_init(&ret->mcg_spl);
2c473ae7 536 atomic_set(&ret->ref_count, 0);
c82e9aa0
EC
537
538 return &ret->com;
539}
540
541static struct res_common *alloc_mtt_tr(int id, int order)
542{
543 struct res_mtt *ret;
544
545 ret = kzalloc(sizeof *ret, GFP_KERNEL);
546 if (!ret)
547 return NULL;
548
549 ret->com.res_id = id;
550 ret->order = order;
551 ret->com.state = RES_MTT_ALLOCATED;
552 atomic_set(&ret->ref_count, 0);
553
554 return &ret->com;
555}
556
557static struct res_common *alloc_mpt_tr(int id, int key)
558{
559 struct res_mpt *ret;
560
561 ret = kzalloc(sizeof *ret, GFP_KERNEL);
562 if (!ret)
563 return NULL;
564
565 ret->com.res_id = id;
566 ret->com.state = RES_MPT_RESERVED;
567 ret->key = key;
568
569 return &ret->com;
570}
571
572static struct res_common *alloc_eq_tr(int id)
573{
574 struct res_eq *ret;
575
576 ret = kzalloc(sizeof *ret, GFP_KERNEL);
577 if (!ret)
578 return NULL;
579
580 ret->com.res_id = id;
581 ret->com.state = RES_EQ_RESERVED;
582
583 return &ret->com;
584}
585
586static struct res_common *alloc_cq_tr(int id)
587{
588 struct res_cq *ret;
589
590 ret = kzalloc(sizeof *ret, GFP_KERNEL);
591 if (!ret)
592 return NULL;
593
594 ret->com.res_id = id;
595 ret->com.state = RES_CQ_ALLOCATED;
596 atomic_set(&ret->ref_count, 0);
597
598 return &ret->com;
599}
600
601static struct res_common *alloc_srq_tr(int id)
602{
603 struct res_srq *ret;
604
605 ret = kzalloc(sizeof *ret, GFP_KERNEL);
606 if (!ret)
607 return NULL;
608
609 ret->com.res_id = id;
610 ret->com.state = RES_SRQ_ALLOCATED;
611 atomic_set(&ret->ref_count, 0);
612
613 return &ret->com;
614}
615
616static struct res_common *alloc_counter_tr(int id)
617{
618 struct res_counter *ret;
619
620 ret = kzalloc(sizeof *ret, GFP_KERNEL);
621 if (!ret)
622 return NULL;
623
624 ret->com.res_id = id;
625 ret->com.state = RES_COUNTER_ALLOCATED;
626
627 return &ret->com;
628}
629
ba062d52
JM
630static struct res_common *alloc_xrcdn_tr(int id)
631{
632 struct res_xrcdn *ret;
633
634 ret = kzalloc(sizeof *ret, GFP_KERNEL);
635 if (!ret)
636 return NULL;
637
638 ret->com.res_id = id;
639 ret->com.state = RES_XRCD_ALLOCATED;
640
641 return &ret->com;
642}
643
2c473ae7 644static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
1b9c6b06
HHZ
645{
646 struct res_fs_rule *ret;
647
648 ret = kzalloc(sizeof *ret, GFP_KERNEL);
649 if (!ret)
650 return NULL;
651
652 ret->com.res_id = id;
653 ret->com.state = RES_FS_RULE_ALLOCATED;
2c473ae7 654 ret->qpn = qpn;
1b9c6b06
HHZ
655 return &ret->com;
656}
657
aa1ec3dd 658static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
c82e9aa0
EC
659 int extra)
660{
661 struct res_common *ret;
662
663 switch (type) {
664 case RES_QP:
665 ret = alloc_qp_tr(id);
666 break;
667 case RES_MPT:
668 ret = alloc_mpt_tr(id, extra);
669 break;
670 case RES_MTT:
671 ret = alloc_mtt_tr(id, extra);
672 break;
673 case RES_EQ:
674 ret = alloc_eq_tr(id);
675 break;
676 case RES_CQ:
677 ret = alloc_cq_tr(id);
678 break;
679 case RES_SRQ:
680 ret = alloc_srq_tr(id);
681 break;
682 case RES_MAC:
683 printk(KERN_ERR "implementation missing\n");
684 return NULL;
685 case RES_COUNTER:
686 ret = alloc_counter_tr(id);
687 break;
ba062d52
JM
688 case RES_XRCD:
689 ret = alloc_xrcdn_tr(id);
690 break;
1b9c6b06 691 case RES_FS_RULE:
2c473ae7 692 ret = alloc_fs_rule_tr(id, extra);
1b9c6b06 693 break;
c82e9aa0
EC
694 default:
695 return NULL;
696 }
697 if (ret)
698 ret->owner = slave;
699
700 return ret;
701}
702
aa1ec3dd 703static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
c82e9aa0
EC
704 enum mlx4_resource type, int extra)
705{
706 int i;
707 int err;
708 struct mlx4_priv *priv = mlx4_priv(dev);
709 struct res_common **res_arr;
710 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4af1c048 711 struct rb_root *root = &tracker->res_tree[type];
c82e9aa0
EC
712
713 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
714 if (!res_arr)
715 return -ENOMEM;
716
717 for (i = 0; i < count; ++i) {
718 res_arr[i] = alloc_tr(base + i, type, slave, extra);
719 if (!res_arr[i]) {
720 for (--i; i >= 0; --i)
721 kfree(res_arr[i]);
722
723 kfree(res_arr);
724 return -ENOMEM;
725 }
726 }
727
728 spin_lock_irq(mlx4_tlock(dev));
729 for (i = 0; i < count; ++i) {
730 if (find_res(dev, base + i, type)) {
731 err = -EEXIST;
732 goto undo;
733 }
4af1c048 734 err = res_tracker_insert(root, res_arr[i]);
c82e9aa0
EC
735 if (err)
736 goto undo;
737 list_add_tail(&res_arr[i]->list,
738 &tracker->slave_list[slave].res_list[type]);
739 }
740 spin_unlock_irq(mlx4_tlock(dev));
741 kfree(res_arr);
742
743 return 0;
744
745undo:
746 for (--i; i >= base; --i)
4af1c048 747 rb_erase(&res_arr[i]->node, root);
c82e9aa0
EC
748
749 spin_unlock_irq(mlx4_tlock(dev));
750
751 for (i = 0; i < count; ++i)
752 kfree(res_arr[i]);
753
754 kfree(res_arr);
755
756 return err;
757}
758
759static int remove_qp_ok(struct res_qp *res)
760{
2c473ae7
HHZ
761 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
762 !list_empty(&res->mcg_list)) {
763 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
764 res->com.state, atomic_read(&res->ref_count));
c82e9aa0 765 return -EBUSY;
2c473ae7 766 } else if (res->com.state != RES_QP_RESERVED) {
c82e9aa0 767 return -EPERM;
2c473ae7 768 }
c82e9aa0
EC
769
770 return 0;
771}
772
773static int remove_mtt_ok(struct res_mtt *res, int order)
774{
775 if (res->com.state == RES_MTT_BUSY ||
776 atomic_read(&res->ref_count)) {
777 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
778 __func__, __LINE__,
779 mtt_states_str(res->com.state),
780 atomic_read(&res->ref_count));
781 return -EBUSY;
782 } else if (res->com.state != RES_MTT_ALLOCATED)
783 return -EPERM;
784 else if (res->order != order)
785 return -EINVAL;
786
787 return 0;
788}
789
790static int remove_mpt_ok(struct res_mpt *res)
791{
792 if (res->com.state == RES_MPT_BUSY)
793 return -EBUSY;
794 else if (res->com.state != RES_MPT_RESERVED)
795 return -EPERM;
796
797 return 0;
798}
799
800static int remove_eq_ok(struct res_eq *res)
801{
802 if (res->com.state == RES_MPT_BUSY)
803 return -EBUSY;
804 else if (res->com.state != RES_MPT_RESERVED)
805 return -EPERM;
806
807 return 0;
808}
809
810static int remove_counter_ok(struct res_counter *res)
811{
812 if (res->com.state == RES_COUNTER_BUSY)
813 return -EBUSY;
814 else if (res->com.state != RES_COUNTER_ALLOCATED)
815 return -EPERM;
816
817 return 0;
818}
819
ba062d52
JM
820static int remove_xrcdn_ok(struct res_xrcdn *res)
821{
822 if (res->com.state == RES_XRCD_BUSY)
823 return -EBUSY;
824 else if (res->com.state != RES_XRCD_ALLOCATED)
825 return -EPERM;
826
827 return 0;
828}
829
1b9c6b06
HHZ
830static int remove_fs_rule_ok(struct res_fs_rule *res)
831{
832 if (res->com.state == RES_FS_RULE_BUSY)
833 return -EBUSY;
834 else if (res->com.state != RES_FS_RULE_ALLOCATED)
835 return -EPERM;
836
837 return 0;
838}
839
c82e9aa0
EC
840static int remove_cq_ok(struct res_cq *res)
841{
842 if (res->com.state == RES_CQ_BUSY)
843 return -EBUSY;
844 else if (res->com.state != RES_CQ_ALLOCATED)
845 return -EPERM;
846
847 return 0;
848}
849
850static int remove_srq_ok(struct res_srq *res)
851{
852 if (res->com.state == RES_SRQ_BUSY)
853 return -EBUSY;
854 else if (res->com.state != RES_SRQ_ALLOCATED)
855 return -EPERM;
856
857 return 0;
858}
859
860static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
861{
862 switch (type) {
863 case RES_QP:
864 return remove_qp_ok((struct res_qp *)res);
865 case RES_CQ:
866 return remove_cq_ok((struct res_cq *)res);
867 case RES_SRQ:
868 return remove_srq_ok((struct res_srq *)res);
869 case RES_MPT:
870 return remove_mpt_ok((struct res_mpt *)res);
871 case RES_MTT:
872 return remove_mtt_ok((struct res_mtt *)res, extra);
873 case RES_MAC:
874 return -ENOSYS;
875 case RES_EQ:
876 return remove_eq_ok((struct res_eq *)res);
877 case RES_COUNTER:
878 return remove_counter_ok((struct res_counter *)res);
ba062d52
JM
879 case RES_XRCD:
880 return remove_xrcdn_ok((struct res_xrcdn *)res);
1b9c6b06
HHZ
881 case RES_FS_RULE:
882 return remove_fs_rule_ok((struct res_fs_rule *)res);
c82e9aa0
EC
883 default:
884 return -EINVAL;
885 }
886}
887
aa1ec3dd 888static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
c82e9aa0
EC
889 enum mlx4_resource type, int extra)
890{
aa1ec3dd 891 u64 i;
c82e9aa0
EC
892 int err;
893 struct mlx4_priv *priv = mlx4_priv(dev);
894 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
895 struct res_common *r;
896
897 spin_lock_irq(mlx4_tlock(dev));
898 for (i = base; i < base + count; ++i) {
4af1c048 899 r = res_tracker_lookup(&tracker->res_tree[type], i);
c82e9aa0
EC
900 if (!r) {
901 err = -ENOENT;
902 goto out;
903 }
904 if (r->owner != slave) {
905 err = -EPERM;
906 goto out;
907 }
908 err = remove_ok(r, type, extra);
909 if (err)
910 goto out;
911 }
912
913 for (i = base; i < base + count; ++i) {
4af1c048
HHZ
914 r = res_tracker_lookup(&tracker->res_tree[type], i);
915 rb_erase(&r->node, &tracker->res_tree[type]);
c82e9aa0
EC
916 list_del(&r->list);
917 kfree(r);
918 }
919 err = 0;
920
921out:
922 spin_unlock_irq(mlx4_tlock(dev));
923
924 return err;
925}
926
927static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
928 enum res_qp_states state, struct res_qp **qp,
929 int alloc)
930{
931 struct mlx4_priv *priv = mlx4_priv(dev);
932 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
933 struct res_qp *r;
934 int err = 0;
935
936 spin_lock_irq(mlx4_tlock(dev));
4af1c048 937 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
c82e9aa0
EC
938 if (!r)
939 err = -ENOENT;
940 else if (r->com.owner != slave)
941 err = -EPERM;
942 else {
943 switch (state) {
944 case RES_QP_BUSY:
aa1ec3dd 945 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
c82e9aa0
EC
946 __func__, r->com.res_id);
947 err = -EBUSY;
948 break;
949
950 case RES_QP_RESERVED:
951 if (r->com.state == RES_QP_MAPPED && !alloc)
952 break;
953
aa1ec3dd 954 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
c82e9aa0
EC
955 err = -EINVAL;
956 break;
957
958 case RES_QP_MAPPED:
959 if ((r->com.state == RES_QP_RESERVED && alloc) ||
960 r->com.state == RES_QP_HW)
961 break;
962 else {
aa1ec3dd 963 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
c82e9aa0
EC
964 r->com.res_id);
965 err = -EINVAL;
966 }
967
968 break;
969
970 case RES_QP_HW:
971 if (r->com.state != RES_QP_MAPPED)
972 err = -EINVAL;
973 break;
974 default:
975 err = -EINVAL;
976 }
977
978 if (!err) {
979 r->com.from_state = r->com.state;
980 r->com.to_state = state;
981 r->com.state = RES_QP_BUSY;
982 if (qp)
64699336 983 *qp = r;
c82e9aa0
EC
984 }
985 }
986
987 spin_unlock_irq(mlx4_tlock(dev));
988
989 return err;
990}
991
992static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
993 enum res_mpt_states state, struct res_mpt **mpt)
994{
995 struct mlx4_priv *priv = mlx4_priv(dev);
996 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
997 struct res_mpt *r;
998 int err = 0;
999
1000 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1001 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
c82e9aa0
EC
1002 if (!r)
1003 err = -ENOENT;
1004 else if (r->com.owner != slave)
1005 err = -EPERM;
1006 else {
1007 switch (state) {
1008 case RES_MPT_BUSY:
1009 err = -EINVAL;
1010 break;
1011
1012 case RES_MPT_RESERVED:
1013 if (r->com.state != RES_MPT_MAPPED)
1014 err = -EINVAL;
1015 break;
1016
1017 case RES_MPT_MAPPED:
1018 if (r->com.state != RES_MPT_RESERVED &&
1019 r->com.state != RES_MPT_HW)
1020 err = -EINVAL;
1021 break;
1022
1023 case RES_MPT_HW:
1024 if (r->com.state != RES_MPT_MAPPED)
1025 err = -EINVAL;
1026 break;
1027 default:
1028 err = -EINVAL;
1029 }
1030
1031 if (!err) {
1032 r->com.from_state = r->com.state;
1033 r->com.to_state = state;
1034 r->com.state = RES_MPT_BUSY;
1035 if (mpt)
64699336 1036 *mpt = r;
c82e9aa0
EC
1037 }
1038 }
1039
1040 spin_unlock_irq(mlx4_tlock(dev));
1041
1042 return err;
1043}
1044
1045static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1046 enum res_eq_states state, struct res_eq **eq)
1047{
1048 struct mlx4_priv *priv = mlx4_priv(dev);
1049 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1050 struct res_eq *r;
1051 int err = 0;
1052
1053 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1054 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
c82e9aa0
EC
1055 if (!r)
1056 err = -ENOENT;
1057 else if (r->com.owner != slave)
1058 err = -EPERM;
1059 else {
1060 switch (state) {
1061 case RES_EQ_BUSY:
1062 err = -EINVAL;
1063 break;
1064
1065 case RES_EQ_RESERVED:
1066 if (r->com.state != RES_EQ_HW)
1067 err = -EINVAL;
1068 break;
1069
1070 case RES_EQ_HW:
1071 if (r->com.state != RES_EQ_RESERVED)
1072 err = -EINVAL;
1073 break;
1074
1075 default:
1076 err = -EINVAL;
1077 }
1078
1079 if (!err) {
1080 r->com.from_state = r->com.state;
1081 r->com.to_state = state;
1082 r->com.state = RES_EQ_BUSY;
1083 if (eq)
1084 *eq = r;
1085 }
1086 }
1087
1088 spin_unlock_irq(mlx4_tlock(dev));
1089
1090 return err;
1091}
1092
1093static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1094 enum res_cq_states state, struct res_cq **cq)
1095{
1096 struct mlx4_priv *priv = mlx4_priv(dev);
1097 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1098 struct res_cq *r;
1099 int err;
1100
1101 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1102 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
c82e9aa0
EC
1103 if (!r)
1104 err = -ENOENT;
1105 else if (r->com.owner != slave)
1106 err = -EPERM;
1107 else {
1108 switch (state) {
1109 case RES_CQ_BUSY:
1110 err = -EBUSY;
1111 break;
1112
1113 case RES_CQ_ALLOCATED:
1114 if (r->com.state != RES_CQ_HW)
1115 err = -EINVAL;
1116 else if (atomic_read(&r->ref_count))
1117 err = -EBUSY;
1118 else
1119 err = 0;
1120 break;
1121
1122 case RES_CQ_HW:
1123 if (r->com.state != RES_CQ_ALLOCATED)
1124 err = -EINVAL;
1125 else
1126 err = 0;
1127 break;
1128
1129 default:
1130 err = -EINVAL;
1131 }
1132
1133 if (!err) {
1134 r->com.from_state = r->com.state;
1135 r->com.to_state = state;
1136 r->com.state = RES_CQ_BUSY;
1137 if (cq)
1138 *cq = r;
1139 }
1140 }
1141
1142 spin_unlock_irq(mlx4_tlock(dev));
1143
1144 return err;
1145}
1146
1147static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1148 enum res_cq_states state, struct res_srq **srq)
1149{
1150 struct mlx4_priv *priv = mlx4_priv(dev);
1151 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1152 struct res_srq *r;
1153 int err = 0;
1154
1155 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1156 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
c82e9aa0
EC
1157 if (!r)
1158 err = -ENOENT;
1159 else if (r->com.owner != slave)
1160 err = -EPERM;
1161 else {
1162 switch (state) {
1163 case RES_SRQ_BUSY:
1164 err = -EINVAL;
1165 break;
1166
1167 case RES_SRQ_ALLOCATED:
1168 if (r->com.state != RES_SRQ_HW)
1169 err = -EINVAL;
1170 else if (atomic_read(&r->ref_count))
1171 err = -EBUSY;
1172 break;
1173
1174 case RES_SRQ_HW:
1175 if (r->com.state != RES_SRQ_ALLOCATED)
1176 err = -EINVAL;
1177 break;
1178
1179 default:
1180 err = -EINVAL;
1181 }
1182
1183 if (!err) {
1184 r->com.from_state = r->com.state;
1185 r->com.to_state = state;
1186 r->com.state = RES_SRQ_BUSY;
1187 if (srq)
1188 *srq = r;
1189 }
1190 }
1191
1192 spin_unlock_irq(mlx4_tlock(dev));
1193
1194 return err;
1195}
1196
1197static void res_abort_move(struct mlx4_dev *dev, int slave,
1198 enum mlx4_resource type, int id)
1199{
1200 struct mlx4_priv *priv = mlx4_priv(dev);
1201 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1202 struct res_common *r;
1203
1204 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1205 r = res_tracker_lookup(&tracker->res_tree[type], id);
c82e9aa0
EC
1206 if (r && (r->owner == slave))
1207 r->state = r->from_state;
1208 spin_unlock_irq(mlx4_tlock(dev));
1209}
1210
1211static void res_end_move(struct mlx4_dev *dev, int slave,
1212 enum mlx4_resource type, int id)
1213{
1214 struct mlx4_priv *priv = mlx4_priv(dev);
1215 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1216 struct res_common *r;
1217
1218 spin_lock_irq(mlx4_tlock(dev));
4af1c048 1219 r = res_tracker_lookup(&tracker->res_tree[type], id);
c82e9aa0
EC
1220 if (r && (r->owner == slave))
1221 r->state = r->to_state;
1222 spin_unlock_irq(mlx4_tlock(dev));
1223}
1224
1225static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1226{
e2c76824
JM
1227 return mlx4_is_qp_reserved(dev, qpn) &&
1228 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
c82e9aa0
EC
1229}
1230
54679e14
JM
1231static int fw_reserved(struct mlx4_dev *dev, int qpn)
1232{
1233 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
c82e9aa0
EC
1234}
1235
1236static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1237 u64 in_param, u64 *out_param)
1238{
1239 int err;
1240 int count;
1241 int align;
1242 int base;
1243 int qpn;
1244
1245 switch (op) {
1246 case RES_OP_RESERVE:
1247 count = get_param_l(&in_param);
1248 align = get_param_h(&in_param);
1249 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1250 if (err)
1251 return err;
1252
1253 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1254 if (err) {
1255 __mlx4_qp_release_range(dev, base, count);
1256 return err;
1257 }
1258 set_param_l(out_param, base);
1259 break;
1260 case RES_OP_MAP_ICM:
1261 qpn = get_param_l(&in_param) & 0x7fffff;
1262 if (valid_reserved(dev, slave, qpn)) {
1263 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1264 if (err)
1265 return err;
1266 }
1267
1268 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1269 NULL, 1);
1270 if (err)
1271 return err;
1272
54679e14 1273 if (!fw_reserved(dev, qpn)) {
c82e9aa0
EC
1274 err = __mlx4_qp_alloc_icm(dev, qpn);
1275 if (err) {
1276 res_abort_move(dev, slave, RES_QP, qpn);
1277 return err;
1278 }
1279 }
1280
1281 res_end_move(dev, slave, RES_QP, qpn);
1282 break;
1283
1284 default:
1285 err = -EINVAL;
1286 break;
1287 }
1288 return err;
1289}
1290
1291static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1292 u64 in_param, u64 *out_param)
1293{
1294 int err = -EINVAL;
1295 int base;
1296 int order;
1297
1298 if (op != RES_OP_RESERVE_AND_MAP)
1299 return err;
1300
1301 order = get_param_l(&in_param);
1302 base = __mlx4_alloc_mtt_range(dev, order);
1303 if (base == -1)
1304 return -ENOMEM;
1305
1306 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1307 if (err)
1308 __mlx4_free_mtt_range(dev, base, order);
1309 else
1310 set_param_l(out_param, base);
1311
1312 return err;
1313}
1314
1315static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1316 u64 in_param, u64 *out_param)
1317{
1318 int err = -EINVAL;
1319 int index;
1320 int id;
1321 struct res_mpt *mpt;
1322
1323 switch (op) {
1324 case RES_OP_RESERVE:
b20e519a 1325 index = __mlx4_mpt_reserve(dev);
c82e9aa0
EC
1326 if (index == -1)
1327 break;
1328 id = index & mpt_mask(dev);
1329
1330 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1331 if (err) {
b20e519a 1332 __mlx4_mpt_release(dev, index);
c82e9aa0
EC
1333 break;
1334 }
1335 set_param_l(out_param, index);
1336 break;
1337 case RES_OP_MAP_ICM:
1338 index = get_param_l(&in_param);
1339 id = index & mpt_mask(dev);
1340 err = mr_res_start_move_to(dev, slave, id,
1341 RES_MPT_MAPPED, &mpt);
1342 if (err)
1343 return err;
1344
b20e519a 1345 err = __mlx4_mpt_alloc_icm(dev, mpt->key);
c82e9aa0
EC
1346 if (err) {
1347 res_abort_move(dev, slave, RES_MPT, id);
1348 return err;
1349 }
1350
1351 res_end_move(dev, slave, RES_MPT, id);
1352 break;
1353 }
1354 return err;
1355}
1356
1357static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1358 u64 in_param, u64 *out_param)
1359{
1360 int cqn;
1361 int err;
1362
1363 switch (op) {
1364 case RES_OP_RESERVE_AND_MAP:
1365 err = __mlx4_cq_alloc_icm(dev, &cqn);
1366 if (err)
1367 break;
1368
1369 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1370 if (err) {
1371 __mlx4_cq_free_icm(dev, cqn);
1372 break;
1373 }
1374
1375 set_param_l(out_param, cqn);
1376 break;
1377
1378 default:
1379 err = -EINVAL;
1380 }
1381
1382 return err;
1383}
1384
1385static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1386 u64 in_param, u64 *out_param)
1387{
1388 int srqn;
1389 int err;
1390
1391 switch (op) {
1392 case RES_OP_RESERVE_AND_MAP:
1393 err = __mlx4_srq_alloc_icm(dev, &srqn);
1394 if (err)
1395 break;
1396
1397 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1398 if (err) {
1399 __mlx4_srq_free_icm(dev, srqn);
1400 break;
1401 }
1402
1403 set_param_l(out_param, srqn);
1404 break;
1405
1406 default:
1407 err = -EINVAL;
1408 }
1409
1410 return err;
1411}
1412
1413static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1414{
1415 struct mlx4_priv *priv = mlx4_priv(dev);
1416 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1417 struct mac_res *res;
1418
1419 res = kzalloc(sizeof *res, GFP_KERNEL);
1420 if (!res)
1421 return -ENOMEM;
1422 res->mac = mac;
1423 res->port = (u8) port;
1424 list_add_tail(&res->list,
1425 &tracker->slave_list[slave].res_list[RES_MAC]);
1426 return 0;
1427}
1428
1429static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1430 int port)
1431{
1432 struct mlx4_priv *priv = mlx4_priv(dev);
1433 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1434 struct list_head *mac_list =
1435 &tracker->slave_list[slave].res_list[RES_MAC];
1436 struct mac_res *res, *tmp;
1437
1438 list_for_each_entry_safe(res, tmp, mac_list, list) {
1439 if (res->mac == mac && res->port == (u8) port) {
1440 list_del(&res->list);
1441 kfree(res);
1442 break;
1443 }
1444 }
1445}
1446
1447static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1448{
1449 struct mlx4_priv *priv = mlx4_priv(dev);
1450 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1451 struct list_head *mac_list =
1452 &tracker->slave_list[slave].res_list[RES_MAC];
1453 struct mac_res *res, *tmp;
1454
1455 list_for_each_entry_safe(res, tmp, mac_list, list) {
1456 list_del(&res->list);
1457 __mlx4_unregister_mac(dev, res->port, res->mac);
1458 kfree(res);
1459 }
1460}
1461
1462static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
acddd5dd 1463 u64 in_param, u64 *out_param, int in_port)
c82e9aa0
EC
1464{
1465 int err = -EINVAL;
1466 int port;
1467 u64 mac;
1468
1469 if (op != RES_OP_RESERVE_AND_MAP)
1470 return err;
1471
acddd5dd 1472 port = !in_port ? get_param_l(out_param) : in_port;
c82e9aa0
EC
1473 mac = in_param;
1474
1475 err = __mlx4_register_mac(dev, port, mac);
1476 if (err >= 0) {
1477 set_param_l(out_param, err);
1478 err = 0;
1479 }
1480
1481 if (!err) {
1482 err = mac_add_to_slave(dev, slave, mac, port);
1483 if (err)
1484 __mlx4_unregister_mac(dev, port, mac);
1485 }
1486 return err;
1487}
1488
4874080d
JM
1489static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1490 int port, int vlan_index)
ffe455ad 1491{
4874080d
JM
1492 struct mlx4_priv *priv = mlx4_priv(dev);
1493 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1494 struct list_head *vlan_list =
1495 &tracker->slave_list[slave].res_list[RES_VLAN];
1496 struct vlan_res *res, *tmp;
1497
1498 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1499 if (res->vlan == vlan && res->port == (u8) port) {
1500 /* vlan found. update ref count */
1501 ++res->ref_count;
1502 return 0;
1503 }
1504 }
1505
1506 res = kzalloc(sizeof(*res), GFP_KERNEL);
1507 if (!res)
1508 return -ENOMEM;
1509 res->vlan = vlan;
1510 res->port = (u8) port;
1511 res->vlan_index = vlan_index;
1512 res->ref_count = 1;
1513 list_add_tail(&res->list,
1514 &tracker->slave_list[slave].res_list[RES_VLAN]);
ffe455ad
EE
1515 return 0;
1516}
1517
4874080d
JM
1518
1519static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1520 int port)
1521{
1522 struct mlx4_priv *priv = mlx4_priv(dev);
1523 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1524 struct list_head *vlan_list =
1525 &tracker->slave_list[slave].res_list[RES_VLAN];
1526 struct vlan_res *res, *tmp;
1527
1528 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1529 if (res->vlan == vlan && res->port == (u8) port) {
1530 if (!--res->ref_count) {
1531 list_del(&res->list);
1532 kfree(res);
1533 }
1534 break;
1535 }
1536 }
1537}
1538
1539static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
1540{
1541 struct mlx4_priv *priv = mlx4_priv(dev);
1542 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1543 struct list_head *vlan_list =
1544 &tracker->slave_list[slave].res_list[RES_VLAN];
1545 struct vlan_res *res, *tmp;
1546 int i;
1547
1548 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1549 list_del(&res->list);
1550 /* dereference the vlan the num times the slave referenced it */
1551 for (i = 0; i < res->ref_count; i++)
1552 __mlx4_unregister_vlan(dev, res->port, res->vlan);
1553 kfree(res);
1554 }
1555}
1556
1557static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1558 u64 in_param, u64 *out_param, int port)
1559{
1560 int err;
1561 u16 vlan;
1562 int vlan_index;
1563
1564 if (!port || op != RES_OP_RESERVE_AND_MAP)
1565 return -EINVAL;
1566
1567 vlan = (u16) in_param;
1568
1569 err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
1570 if (!err) {
1571 set_param_l(out_param, (u32) vlan_index);
1572 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
1573 if (err)
1574 __mlx4_unregister_vlan(dev, port, vlan);
1575 }
1576 return err;
1577}
1578
ba062d52
JM
1579static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1580 u64 in_param, u64 *out_param)
1581{
1582 u32 index;
1583 int err;
1584
1585 if (op != RES_OP_RESERVE)
1586 return -EINVAL;
1587
1588 err = __mlx4_counter_alloc(dev, &index);
1589 if (err)
1590 return err;
1591
1592 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1593 if (err)
1594 __mlx4_counter_free(dev, index);
1595 else
1596 set_param_l(out_param, index);
1597
1598 return err;
1599}
1600
1601static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1602 u64 in_param, u64 *out_param)
1603{
1604 u32 xrcdn;
1605 int err;
1606
1607 if (op != RES_OP_RESERVE)
1608 return -EINVAL;
1609
1610 err = __mlx4_xrcd_alloc(dev, &xrcdn);
1611 if (err)
1612 return err;
1613
1614 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1615 if (err)
1616 __mlx4_xrcd_free(dev, xrcdn);
1617 else
1618 set_param_l(out_param, xrcdn);
1619
1620 return err;
1621}
1622
c82e9aa0
EC
1623int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1624 struct mlx4_vhcr *vhcr,
1625 struct mlx4_cmd_mailbox *inbox,
1626 struct mlx4_cmd_mailbox *outbox,
1627 struct mlx4_cmd_info *cmd)
1628{
1629 int err;
1630 int alop = vhcr->op_modifier;
1631
acddd5dd 1632 switch (vhcr->in_modifier & 0xFF) {
c82e9aa0
EC
1633 case RES_QP:
1634 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1635 vhcr->in_param, &vhcr->out_param);
1636 break;
1637
1638 case RES_MTT:
1639 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1640 vhcr->in_param, &vhcr->out_param);
1641 break;
1642
1643 case RES_MPT:
1644 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1645 vhcr->in_param, &vhcr->out_param);
1646 break;
1647
1648 case RES_CQ:
1649 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1650 vhcr->in_param, &vhcr->out_param);
1651 break;
1652
1653 case RES_SRQ:
1654 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1655 vhcr->in_param, &vhcr->out_param);
1656 break;
1657
1658 case RES_MAC:
1659 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
acddd5dd
JM
1660 vhcr->in_param, &vhcr->out_param,
1661 (vhcr->in_modifier >> 8) & 0xFF);
c82e9aa0
EC
1662 break;
1663
ffe455ad
EE
1664 case RES_VLAN:
1665 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
acddd5dd
JM
1666 vhcr->in_param, &vhcr->out_param,
1667 (vhcr->in_modifier >> 8) & 0xFF);
ffe455ad
EE
1668 break;
1669
ba062d52
JM
1670 case RES_COUNTER:
1671 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1672 vhcr->in_param, &vhcr->out_param);
1673 break;
1674
1675 case RES_XRCD:
1676 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
1677 vhcr->in_param, &vhcr->out_param);
1678 break;
1679
c82e9aa0
EC
1680 default:
1681 err = -EINVAL;
1682 break;
1683 }
1684
1685 return err;
1686}
1687
1688static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1689 u64 in_param)
1690{
1691 int err;
1692 int count;
1693 int base;
1694 int qpn;
1695
1696 switch (op) {
1697 case RES_OP_RESERVE:
1698 base = get_param_l(&in_param) & 0x7fffff;
1699 count = get_param_h(&in_param);
1700 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1701 if (err)
1702 break;
1703 __mlx4_qp_release_range(dev, base, count);
1704 break;
1705 case RES_OP_MAP_ICM:
1706 qpn = get_param_l(&in_param) & 0x7fffff;
1707 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1708 NULL, 0);
1709 if (err)
1710 return err;
1711
54679e14 1712 if (!fw_reserved(dev, qpn))
c82e9aa0
EC
1713 __mlx4_qp_free_icm(dev, qpn);
1714
1715 res_end_move(dev, slave, RES_QP, qpn);
1716
1717 if (valid_reserved(dev, slave, qpn))
1718 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1719 break;
1720 default:
1721 err = -EINVAL;
1722 break;
1723 }
1724 return err;
1725}
1726
1727static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1728 u64 in_param, u64 *out_param)
1729{
1730 int err = -EINVAL;
1731 int base;
1732 int order;
1733
1734 if (op != RES_OP_RESERVE_AND_MAP)
1735 return err;
1736
1737 base = get_param_l(&in_param);
1738 order = get_param_h(&in_param);
1739 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1740 if (!err)
1741 __mlx4_free_mtt_range(dev, base, order);
1742 return err;
1743}
1744
1745static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1746 u64 in_param)
1747{
1748 int err = -EINVAL;
1749 int index;
1750 int id;
1751 struct res_mpt *mpt;
1752
1753 switch (op) {
1754 case RES_OP_RESERVE:
1755 index = get_param_l(&in_param);
1756 id = index & mpt_mask(dev);
1757 err = get_res(dev, slave, id, RES_MPT, &mpt);
1758 if (err)
1759 break;
1760 index = mpt->key;
1761 put_res(dev, slave, id, RES_MPT);
1762
1763 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1764 if (err)
1765 break;
b20e519a 1766 __mlx4_mpt_release(dev, index);
c82e9aa0
EC
1767 break;
1768 case RES_OP_MAP_ICM:
1769 index = get_param_l(&in_param);
1770 id = index & mpt_mask(dev);
1771 err = mr_res_start_move_to(dev, slave, id,
1772 RES_MPT_RESERVED, &mpt);
1773 if (err)
1774 return err;
1775
b20e519a 1776 __mlx4_mpt_free_icm(dev, mpt->key);
c82e9aa0
EC
1777 res_end_move(dev, slave, RES_MPT, id);
1778 return err;
1779 break;
1780 default:
1781 err = -EINVAL;
1782 break;
1783 }
1784 return err;
1785}
1786
1787static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1788 u64 in_param, u64 *out_param)
1789{
1790 int cqn;
1791 int err;
1792
1793 switch (op) {
1794 case RES_OP_RESERVE_AND_MAP:
1795 cqn = get_param_l(&in_param);
1796 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1797 if (err)
1798 break;
1799
1800 __mlx4_cq_free_icm(dev, cqn);
1801 break;
1802
1803 default:
1804 err = -EINVAL;
1805 break;
1806 }
1807
1808 return err;
1809}
1810
1811static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1812 u64 in_param, u64 *out_param)
1813{
1814 int srqn;
1815 int err;
1816
1817 switch (op) {
1818 case RES_OP_RESERVE_AND_MAP:
1819 srqn = get_param_l(&in_param);
1820 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1821 if (err)
1822 break;
1823
1824 __mlx4_srq_free_icm(dev, srqn);
1825 break;
1826
1827 default:
1828 err = -EINVAL;
1829 break;
1830 }
1831
1832 return err;
1833}
1834
1835static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
acddd5dd 1836 u64 in_param, u64 *out_param, int in_port)
c82e9aa0
EC
1837{
1838 int port;
1839 int err = 0;
1840
1841 switch (op) {
1842 case RES_OP_RESERVE_AND_MAP:
acddd5dd 1843 port = !in_port ? get_param_l(out_param) : in_port;
c82e9aa0
EC
1844 mac_del_from_slave(dev, slave, in_param, port);
1845 __mlx4_unregister_mac(dev, port, in_param);
1846 break;
1847 default:
1848 err = -EINVAL;
1849 break;
1850 }
1851
1852 return err;
1853
1854}
1855
ffe455ad 1856static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
acddd5dd 1857 u64 in_param, u64 *out_param, int port)
ffe455ad 1858{
4874080d
JM
1859 int err = 0;
1860
1861 switch (op) {
1862 case RES_OP_RESERVE_AND_MAP:
1863 if (!port)
1864 return -EINVAL;
1865 vlan_del_from_slave(dev, slave, in_param, port);
1866 __mlx4_unregister_vlan(dev, port, in_param);
1867 break;
1868 default:
1869 err = -EINVAL;
1870 break;
1871 }
1872
1873 return err;
ffe455ad
EE
1874}
1875
ba062d52
JM
1876static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1877 u64 in_param, u64 *out_param)
1878{
1879 int index;
1880 int err;
1881
1882 if (op != RES_OP_RESERVE)
1883 return -EINVAL;
1884
1885 index = get_param_l(&in_param);
1886 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1887 if (err)
1888 return err;
1889
1890 __mlx4_counter_free(dev, index);
1891
1892 return err;
1893}
1894
1895static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1896 u64 in_param, u64 *out_param)
1897{
1898 int xrcdn;
1899 int err;
1900
1901 if (op != RES_OP_RESERVE)
1902 return -EINVAL;
1903
1904 xrcdn = get_param_l(&in_param);
1905 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1906 if (err)
1907 return err;
1908
1909 __mlx4_xrcd_free(dev, xrcdn);
1910
1911 return err;
1912}
1913
c82e9aa0
EC
1914int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1915 struct mlx4_vhcr *vhcr,
1916 struct mlx4_cmd_mailbox *inbox,
1917 struct mlx4_cmd_mailbox *outbox,
1918 struct mlx4_cmd_info *cmd)
1919{
1920 int err = -EINVAL;
1921 int alop = vhcr->op_modifier;
1922
acddd5dd 1923 switch (vhcr->in_modifier & 0xFF) {
c82e9aa0
EC
1924 case RES_QP:
1925 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1926 vhcr->in_param);
1927 break;
1928
1929 case RES_MTT:
1930 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1931 vhcr->in_param, &vhcr->out_param);
1932 break;
1933
1934 case RES_MPT:
1935 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1936 vhcr->in_param);
1937 break;
1938
1939 case RES_CQ:
1940 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1941 vhcr->in_param, &vhcr->out_param);
1942 break;
1943
1944 case RES_SRQ:
1945 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1946 vhcr->in_param, &vhcr->out_param);
1947 break;
1948
1949 case RES_MAC:
1950 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
acddd5dd
JM
1951 vhcr->in_param, &vhcr->out_param,
1952 (vhcr->in_modifier >> 8) & 0xFF);
c82e9aa0
EC
1953 break;
1954
ffe455ad
EE
1955 case RES_VLAN:
1956 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
acddd5dd
JM
1957 vhcr->in_param, &vhcr->out_param,
1958 (vhcr->in_modifier >> 8) & 0xFF);
ffe455ad
EE
1959 break;
1960
ba062d52
JM
1961 case RES_COUNTER:
1962 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
1963 vhcr->in_param, &vhcr->out_param);
1964 break;
1965
1966 case RES_XRCD:
1967 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
1968 vhcr->in_param, &vhcr->out_param);
1969
c82e9aa0
EC
1970 default:
1971 break;
1972 }
1973 return err;
1974}
1975
1976/* ugly but other choices are uglier */
1977static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1978{
1979 return (be32_to_cpu(mpt->flags) >> 9) & 1;
1980}
1981
2b8fb286 1982static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
c82e9aa0 1983{
2b8fb286 1984 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
c82e9aa0
EC
1985}
1986
1987static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1988{
1989 return be32_to_cpu(mpt->mtt_sz);
1990}
1991
cc1ade94
SM
1992static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
1993{
1994 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
1995}
1996
1997static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
1998{
1999 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2000}
2001
2002static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2003{
2004 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2005}
2006
2007static int mr_is_region(struct mlx4_mpt_entry *mpt)
2008{
2009 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2010}
2011
2b8fb286 2012static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
c82e9aa0
EC
2013{
2014 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2015}
2016
2b8fb286 2017static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
c82e9aa0
EC
2018{
2019 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2020}
2021
2022static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2023{
2024 int page_shift = (qpc->log_page_size & 0x3f) + 12;
2025 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2026 int log_sq_sride = qpc->sq_size_stride & 7;
2027 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2028 int log_rq_stride = qpc->rq_size_stride & 7;
2029 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2030 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
5c5f3f0a
YH
2031 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2032 int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
c82e9aa0
EC
2033 int sq_size;
2034 int rq_size;
2035 int total_pages;
2036 int total_mem;
2037 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2038
2039 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2040 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2041 total_mem = sq_size + rq_size;
2042 total_pages =
2043 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2044 page_shift);
2045
2046 return total_pages;
2047}
2048
c82e9aa0
EC
2049static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2050 int size, struct res_mtt *mtt)
2051{
2b8fb286
MA
2052 int res_start = mtt->com.res_id;
2053 int res_size = (1 << mtt->order);
c82e9aa0
EC
2054
2055 if (start < res_start || start + size > res_start + res_size)
2056 return -EPERM;
2057 return 0;
2058}
2059
2060int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2061 struct mlx4_vhcr *vhcr,
2062 struct mlx4_cmd_mailbox *inbox,
2063 struct mlx4_cmd_mailbox *outbox,
2064 struct mlx4_cmd_info *cmd)
2065{
2066 int err;
2067 int index = vhcr->in_modifier;
2068 struct res_mtt *mtt;
2069 struct res_mpt *mpt;
2b8fb286 2070 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2071 int phys;
2072 int id;
cc1ade94
SM
2073 u32 pd;
2074 int pd_slave;
c82e9aa0
EC
2075
2076 id = index & mpt_mask(dev);
2077 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2078 if (err)
2079 return err;
2080
cc1ade94
SM
2081 /* Disable memory windows for VFs. */
2082 if (!mr_is_region(inbox->buf)) {
2083 err = -EPERM;
2084 goto ex_abort;
2085 }
2086
2087 /* Make sure that the PD bits related to the slave id are zeros. */
2088 pd = mr_get_pd(inbox->buf);
2089 pd_slave = (pd >> 17) & 0x7f;
2090 if (pd_slave != 0 && pd_slave != slave) {
2091 err = -EPERM;
2092 goto ex_abort;
2093 }
2094
2095 if (mr_is_fmr(inbox->buf)) {
2096 /* FMR and Bind Enable are forbidden in slave devices. */
2097 if (mr_is_bind_enabled(inbox->buf)) {
2098 err = -EPERM;
2099 goto ex_abort;
2100 }
2101 /* FMR and Memory Windows are also forbidden. */
2102 if (!mr_is_region(inbox->buf)) {
2103 err = -EPERM;
2104 goto ex_abort;
2105 }
2106 }
2107
c82e9aa0
EC
2108 phys = mr_phys_mpt(inbox->buf);
2109 if (!phys) {
2b8fb286 2110 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2111 if (err)
2112 goto ex_abort;
2113
2114 err = check_mtt_range(dev, slave, mtt_base,
2115 mr_get_mtt_size(inbox->buf), mtt);
2116 if (err)
2117 goto ex_put;
2118
2119 mpt->mtt = mtt;
2120 }
2121
c82e9aa0
EC
2122 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2123 if (err)
2124 goto ex_put;
2125
2126 if (!phys) {
2127 atomic_inc(&mtt->ref_count);
2128 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2129 }
2130
2131 res_end_move(dev, slave, RES_MPT, id);
2132 return 0;
2133
2134ex_put:
2135 if (!phys)
2136 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2137ex_abort:
2138 res_abort_move(dev, slave, RES_MPT, id);
2139
2140 return err;
2141}
2142
2143int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2144 struct mlx4_vhcr *vhcr,
2145 struct mlx4_cmd_mailbox *inbox,
2146 struct mlx4_cmd_mailbox *outbox,
2147 struct mlx4_cmd_info *cmd)
2148{
2149 int err;
2150 int index = vhcr->in_modifier;
2151 struct res_mpt *mpt;
2152 int id;
2153
2154 id = index & mpt_mask(dev);
2155 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2156 if (err)
2157 return err;
2158
2159 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2160 if (err)
2161 goto ex_abort;
2162
2163 if (mpt->mtt)
2164 atomic_dec(&mpt->mtt->ref_count);
2165
2166 res_end_move(dev, slave, RES_MPT, id);
2167 return 0;
2168
2169ex_abort:
2170 res_abort_move(dev, slave, RES_MPT, id);
2171
2172 return err;
2173}
2174
2175int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2176 struct mlx4_vhcr *vhcr,
2177 struct mlx4_cmd_mailbox *inbox,
2178 struct mlx4_cmd_mailbox *outbox,
2179 struct mlx4_cmd_info *cmd)
2180{
2181 int err;
2182 int index = vhcr->in_modifier;
2183 struct res_mpt *mpt;
2184 int id;
2185
2186 id = index & mpt_mask(dev);
2187 err = get_res(dev, slave, id, RES_MPT, &mpt);
2188 if (err)
2189 return err;
2190
2191 if (mpt->com.from_state != RES_MPT_HW) {
2192 err = -EBUSY;
2193 goto out;
2194 }
2195
2196 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2197
2198out:
2199 put_res(dev, slave, id, RES_MPT);
2200 return err;
2201}
2202
2203static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2204{
2205 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2206}
2207
2208static int qp_get_scqn(struct mlx4_qp_context *qpc)
2209{
2210 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2211}
2212
2213static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2214{
2215 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2216}
2217
54679e14
JM
2218static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2219 struct mlx4_qp_context *context)
2220{
2221 u32 qpn = vhcr->in_modifier & 0xffffff;
2222 u32 qkey = 0;
2223
2224 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2225 return;
2226
2227 /* adjust qkey in qp context */
2228 context->qkey = cpu_to_be32(qkey);
2229}
2230
c82e9aa0
EC
2231int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2232 struct mlx4_vhcr *vhcr,
2233 struct mlx4_cmd_mailbox *inbox,
2234 struct mlx4_cmd_mailbox *outbox,
2235 struct mlx4_cmd_info *cmd)
2236{
2237 int err;
2238 int qpn = vhcr->in_modifier & 0x7fffff;
2239 struct res_mtt *mtt;
2240 struct res_qp *qp;
2241 struct mlx4_qp_context *qpc = inbox->buf + 8;
2b8fb286 2242 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2243 int mtt_size = qp_get_mtt_size(qpc);
2244 struct res_cq *rcq;
2245 struct res_cq *scq;
2246 int rcqn = qp_get_rcqn(qpc);
2247 int scqn = qp_get_scqn(qpc);
2248 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2249 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2250 struct res_srq *srq;
2251 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2252
2253 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2254 if (err)
2255 return err;
2256 qp->local_qpn = local_qpn;
b01978ca
JM
2257 qp->sched_queue = 0;
2258 qp->qpc_flags = be32_to_cpu(qpc->flags);
c82e9aa0 2259
2b8fb286 2260 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2261 if (err)
2262 goto ex_abort;
2263
2264 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2265 if (err)
2266 goto ex_put_mtt;
2267
c82e9aa0
EC
2268 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2269 if (err)
2270 goto ex_put_mtt;
2271
2272 if (scqn != rcqn) {
2273 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2274 if (err)
2275 goto ex_put_rcq;
2276 } else
2277 scq = rcq;
2278
2279 if (use_srq) {
2280 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2281 if (err)
2282 goto ex_put_scq;
2283 }
2284
54679e14
JM
2285 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2286 update_pkey_index(dev, slave, inbox);
c82e9aa0
EC
2287 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2288 if (err)
2289 goto ex_put_srq;
2290 atomic_inc(&mtt->ref_count);
2291 qp->mtt = mtt;
2292 atomic_inc(&rcq->ref_count);
2293 qp->rcq = rcq;
2294 atomic_inc(&scq->ref_count);
2295 qp->scq = scq;
2296
2297 if (scqn != rcqn)
2298 put_res(dev, slave, scqn, RES_CQ);
2299
2300 if (use_srq) {
2301 atomic_inc(&srq->ref_count);
2302 put_res(dev, slave, srqn, RES_SRQ);
2303 qp->srq = srq;
2304 }
2305 put_res(dev, slave, rcqn, RES_CQ);
2b8fb286 2306 put_res(dev, slave, mtt_base, RES_MTT);
c82e9aa0
EC
2307 res_end_move(dev, slave, RES_QP, qpn);
2308
2309 return 0;
2310
2311ex_put_srq:
2312 if (use_srq)
2313 put_res(dev, slave, srqn, RES_SRQ);
2314ex_put_scq:
2315 if (scqn != rcqn)
2316 put_res(dev, slave, scqn, RES_CQ);
2317ex_put_rcq:
2318 put_res(dev, slave, rcqn, RES_CQ);
2319ex_put_mtt:
2b8fb286 2320 put_res(dev, slave, mtt_base, RES_MTT);
c82e9aa0
EC
2321ex_abort:
2322 res_abort_move(dev, slave, RES_QP, qpn);
2323
2324 return err;
2325}
2326
2b8fb286 2327static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
c82e9aa0
EC
2328{
2329 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2330}
2331
2332static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2333{
2334 int log_eq_size = eqc->log_eq_size & 0x1f;
2335 int page_shift = (eqc->log_page_size & 0x3f) + 12;
2336
2337 if (log_eq_size + 5 < page_shift)
2338 return 1;
2339
2340 return 1 << (log_eq_size + 5 - page_shift);
2341}
2342
2b8fb286 2343static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
c82e9aa0
EC
2344{
2345 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2346}
2347
2348static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2349{
2350 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2351 int page_shift = (cqc->log_page_size & 0x3f) + 12;
2352
2353 if (log_cq_size + 5 < page_shift)
2354 return 1;
2355
2356 return 1 << (log_cq_size + 5 - page_shift);
2357}
2358
2359int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2360 struct mlx4_vhcr *vhcr,
2361 struct mlx4_cmd_mailbox *inbox,
2362 struct mlx4_cmd_mailbox *outbox,
2363 struct mlx4_cmd_info *cmd)
2364{
2365 int err;
2366 int eqn = vhcr->in_modifier;
2367 int res_id = (slave << 8) | eqn;
2368 struct mlx4_eq_context *eqc = inbox->buf;
2b8fb286 2369 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2370 int mtt_size = eq_get_mtt_size(eqc);
2371 struct res_eq *eq;
2372 struct res_mtt *mtt;
2373
2374 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2375 if (err)
2376 return err;
2377 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2378 if (err)
2379 goto out_add;
2380
2b8fb286 2381 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2382 if (err)
2383 goto out_move;
2384
2385 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2386 if (err)
2387 goto out_put;
2388
2389 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2390 if (err)
2391 goto out_put;
2392
2393 atomic_inc(&mtt->ref_count);
2394 eq->mtt = mtt;
2395 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2396 res_end_move(dev, slave, RES_EQ, res_id);
2397 return 0;
2398
2399out_put:
2400 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2401out_move:
2402 res_abort_move(dev, slave, RES_EQ, res_id);
2403out_add:
2404 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2405 return err;
2406}
2407
2408static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2409 int len, struct res_mtt **res)
2410{
2411 struct mlx4_priv *priv = mlx4_priv(dev);
2412 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2413 struct res_mtt *mtt;
2414 int err = -EINVAL;
2415
2416 spin_lock_irq(mlx4_tlock(dev));
2417 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2418 com.list) {
2419 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2420 *res = mtt;
2421 mtt->com.from_state = mtt->com.state;
2422 mtt->com.state = RES_MTT_BUSY;
2423 err = 0;
2424 break;
2425 }
2426 }
2427 spin_unlock_irq(mlx4_tlock(dev));
2428
2429 return err;
2430}
2431
54679e14
JM
2432static int verify_qp_parameters(struct mlx4_dev *dev,
2433 struct mlx4_cmd_mailbox *inbox,
2434 enum qp_transition transition, u8 slave)
2435{
2436 u32 qp_type;
2437 struct mlx4_qp_context *qp_ctx;
2438 enum mlx4_qp_optpar optpar;
2439
2440 qp_ctx = inbox->buf + 8;
2441 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2442 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
2443
2444 switch (qp_type) {
2445 case MLX4_QP_ST_RC:
2446 case MLX4_QP_ST_UC:
2447 switch (transition) {
2448 case QP_TRANS_INIT2RTR:
2449 case QP_TRANS_RTR2RTS:
2450 case QP_TRANS_RTS2RTS:
2451 case QP_TRANS_SQD2SQD:
2452 case QP_TRANS_SQD2RTS:
2453 if (slave != mlx4_master_func_num(dev))
2454 /* slaves have only gid index 0 */
2455 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
2456 if (qp_ctx->pri_path.mgid_index)
2457 return -EINVAL;
2458 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
2459 if (qp_ctx->alt_path.mgid_index)
2460 return -EINVAL;
2461 break;
2462 default:
2463 break;
2464 }
2465
2466 break;
2467 default:
2468 break;
2469 }
2470
2471 return 0;
2472}
2473
c82e9aa0
EC
2474int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2475 struct mlx4_vhcr *vhcr,
2476 struct mlx4_cmd_mailbox *inbox,
2477 struct mlx4_cmd_mailbox *outbox,
2478 struct mlx4_cmd_info *cmd)
2479{
2480 struct mlx4_mtt mtt;
2481 __be64 *page_list = inbox->buf;
2482 u64 *pg_list = (u64 *)page_list;
2483 int i;
2484 struct res_mtt *rmtt = NULL;
2485 int start = be64_to_cpu(page_list[0]);
2486 int npages = vhcr->in_modifier;
2487 int err;
2488
2489 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2490 if (err)
2491 return err;
2492
2493 /* Call the SW implementation of write_mtt:
2494 * - Prepare a dummy mtt struct
2495 * - Translate inbox contents to simple addresses in host endianess */
2b8fb286
MA
2496 mtt.offset = 0; /* TBD this is broken but I don't handle it since
2497 we don't really use it */
c82e9aa0
EC
2498 mtt.order = 0;
2499 mtt.page_shift = 0;
2500 for (i = 0; i < npages; ++i)
2501 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2502
2503 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2504 ((u64 *)page_list + 2));
2505
2506 if (rmtt)
2507 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2508
2509 return err;
2510}
2511
2512int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2513 struct mlx4_vhcr *vhcr,
2514 struct mlx4_cmd_mailbox *inbox,
2515 struct mlx4_cmd_mailbox *outbox,
2516 struct mlx4_cmd_info *cmd)
2517{
2518 int eqn = vhcr->in_modifier;
2519 int res_id = eqn | (slave << 8);
2520 struct res_eq *eq;
2521 int err;
2522
2523 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2524 if (err)
2525 return err;
2526
2527 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2528 if (err)
2529 goto ex_abort;
2530
2531 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2532 if (err)
2533 goto ex_put;
2534
2535 atomic_dec(&eq->mtt->ref_count);
2536 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2537 res_end_move(dev, slave, RES_EQ, res_id);
2538 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2539
2540 return 0;
2541
2542ex_put:
2543 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2544ex_abort:
2545 res_abort_move(dev, slave, RES_EQ, res_id);
2546
2547 return err;
2548}
2549
2550int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2551{
2552 struct mlx4_priv *priv = mlx4_priv(dev);
2553 struct mlx4_slave_event_eq_info *event_eq;
2554 struct mlx4_cmd_mailbox *mailbox;
2555 u32 in_modifier = 0;
2556 int err;
2557 int res_id;
2558 struct res_eq *req;
2559
2560 if (!priv->mfunc.master.slave_state)
2561 return -EINVAL;
2562
803143fb 2563 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
c82e9aa0
EC
2564
2565 /* Create the event only if the slave is registered */
803143fb 2566 if (event_eq->eqn < 0)
c82e9aa0
EC
2567 return 0;
2568
2569 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2570 res_id = (slave << 8) | event_eq->eqn;
2571 err = get_res(dev, slave, res_id, RES_EQ, &req);
2572 if (err)
2573 goto unlock;
2574
2575 if (req->com.from_state != RES_EQ_HW) {
2576 err = -EINVAL;
2577 goto put;
2578 }
2579
2580 mailbox = mlx4_alloc_cmd_mailbox(dev);
2581 if (IS_ERR(mailbox)) {
2582 err = PTR_ERR(mailbox);
2583 goto put;
2584 }
2585
2586 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2587 ++event_eq->token;
2588 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2589 }
2590
2591 memcpy(mailbox->buf, (u8 *) eqe, 28);
2592
2593 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2594
2595 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2596 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2597 MLX4_CMD_NATIVE);
2598
2599 put_res(dev, slave, res_id, RES_EQ);
2600 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2601 mlx4_free_cmd_mailbox(dev, mailbox);
2602 return err;
2603
2604put:
2605 put_res(dev, slave, res_id, RES_EQ);
2606
2607unlock:
2608 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2609 return err;
2610}
2611
2612int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2613 struct mlx4_vhcr *vhcr,
2614 struct mlx4_cmd_mailbox *inbox,
2615 struct mlx4_cmd_mailbox *outbox,
2616 struct mlx4_cmd_info *cmd)
2617{
2618 int eqn = vhcr->in_modifier;
2619 int res_id = eqn | (slave << 8);
2620 struct res_eq *eq;
2621 int err;
2622
2623 err = get_res(dev, slave, res_id, RES_EQ, &eq);
2624 if (err)
2625 return err;
2626
2627 if (eq->com.from_state != RES_EQ_HW) {
2628 err = -EINVAL;
2629 goto ex_put;
2630 }
2631
2632 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2633
2634ex_put:
2635 put_res(dev, slave, res_id, RES_EQ);
2636 return err;
2637}
2638
2639int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2640 struct mlx4_vhcr *vhcr,
2641 struct mlx4_cmd_mailbox *inbox,
2642 struct mlx4_cmd_mailbox *outbox,
2643 struct mlx4_cmd_info *cmd)
2644{
2645 int err;
2646 int cqn = vhcr->in_modifier;
2647 struct mlx4_cq_context *cqc = inbox->buf;
2b8fb286 2648 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2649 struct res_cq *cq;
2650 struct res_mtt *mtt;
2651
2652 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2653 if (err)
2654 return err;
2b8fb286 2655 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2656 if (err)
2657 goto out_move;
2658 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2659 if (err)
2660 goto out_put;
2661 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2662 if (err)
2663 goto out_put;
2664 atomic_inc(&mtt->ref_count);
2665 cq->mtt = mtt;
2666 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2667 res_end_move(dev, slave, RES_CQ, cqn);
2668 return 0;
2669
2670out_put:
2671 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2672out_move:
2673 res_abort_move(dev, slave, RES_CQ, cqn);
2674 return err;
2675}
2676
2677int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2678 struct mlx4_vhcr *vhcr,
2679 struct mlx4_cmd_mailbox *inbox,
2680 struct mlx4_cmd_mailbox *outbox,
2681 struct mlx4_cmd_info *cmd)
2682{
2683 int err;
2684 int cqn = vhcr->in_modifier;
2685 struct res_cq *cq;
2686
2687 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2688 if (err)
2689 return err;
2690 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2691 if (err)
2692 goto out_move;
2693 atomic_dec(&cq->mtt->ref_count);
2694 res_end_move(dev, slave, RES_CQ, cqn);
2695 return 0;
2696
2697out_move:
2698 res_abort_move(dev, slave, RES_CQ, cqn);
2699 return err;
2700}
2701
2702int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2703 struct mlx4_vhcr *vhcr,
2704 struct mlx4_cmd_mailbox *inbox,
2705 struct mlx4_cmd_mailbox *outbox,
2706 struct mlx4_cmd_info *cmd)
2707{
2708 int cqn = vhcr->in_modifier;
2709 struct res_cq *cq;
2710 int err;
2711
2712 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2713 if (err)
2714 return err;
2715
2716 if (cq->com.from_state != RES_CQ_HW)
2717 goto ex_put;
2718
2719 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2720ex_put:
2721 put_res(dev, slave, cqn, RES_CQ);
2722
2723 return err;
2724}
2725
2726static int handle_resize(struct mlx4_dev *dev, int slave,
2727 struct mlx4_vhcr *vhcr,
2728 struct mlx4_cmd_mailbox *inbox,
2729 struct mlx4_cmd_mailbox *outbox,
2730 struct mlx4_cmd_info *cmd,
2731 struct res_cq *cq)
2732{
2733 int err;
2734 struct res_mtt *orig_mtt;
2735 struct res_mtt *mtt;
2736 struct mlx4_cq_context *cqc = inbox->buf;
2b8fb286 2737 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2738
2739 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2740 if (err)
2741 return err;
2742
2743 if (orig_mtt != cq->mtt) {
2744 err = -EINVAL;
2745 goto ex_put;
2746 }
2747
2b8fb286 2748 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2749 if (err)
2750 goto ex_put;
2751
2752 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2753 if (err)
2754 goto ex_put1;
2755 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2756 if (err)
2757 goto ex_put1;
2758 atomic_dec(&orig_mtt->ref_count);
2759 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2760 atomic_inc(&mtt->ref_count);
2761 cq->mtt = mtt;
2762 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2763 return 0;
2764
2765ex_put1:
2766 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2767ex_put:
2768 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2769
2770 return err;
2771
2772}
2773
2774int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2775 struct mlx4_vhcr *vhcr,
2776 struct mlx4_cmd_mailbox *inbox,
2777 struct mlx4_cmd_mailbox *outbox,
2778 struct mlx4_cmd_info *cmd)
2779{
2780 int cqn = vhcr->in_modifier;
2781 struct res_cq *cq;
2782 int err;
2783
2784 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2785 if (err)
2786 return err;
2787
2788 if (cq->com.from_state != RES_CQ_HW)
2789 goto ex_put;
2790
2791 if (vhcr->op_modifier == 0) {
2792 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
dcf353b1 2793 goto ex_put;
c82e9aa0
EC
2794 }
2795
2796 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2797ex_put:
2798 put_res(dev, slave, cqn, RES_CQ);
2799
2800 return err;
2801}
2802
c82e9aa0
EC
2803static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2804{
2805 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2806 int log_rq_stride = srqc->logstride & 7;
2807 int page_shift = (srqc->log_page_size & 0x3f) + 12;
2808
2809 if (log_srq_size + log_rq_stride + 4 < page_shift)
2810 return 1;
2811
2812 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2813}
2814
2815int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2816 struct mlx4_vhcr *vhcr,
2817 struct mlx4_cmd_mailbox *inbox,
2818 struct mlx4_cmd_mailbox *outbox,
2819 struct mlx4_cmd_info *cmd)
2820{
2821 int err;
2822 int srqn = vhcr->in_modifier;
2823 struct res_mtt *mtt;
2824 struct res_srq *srq;
2825 struct mlx4_srq_context *srqc = inbox->buf;
2b8fb286 2826 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2827
2828 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2829 return -EINVAL;
2830
2831 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2832 if (err)
2833 return err;
2b8fb286 2834 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2835 if (err)
2836 goto ex_abort;
2837 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2838 mtt);
2839 if (err)
2840 goto ex_put_mtt;
2841
c82e9aa0
EC
2842 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2843 if (err)
2844 goto ex_put_mtt;
2845
2846 atomic_inc(&mtt->ref_count);
2847 srq->mtt = mtt;
2848 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2849 res_end_move(dev, slave, RES_SRQ, srqn);
2850 return 0;
2851
2852ex_put_mtt:
2853 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2854ex_abort:
2855 res_abort_move(dev, slave, RES_SRQ, srqn);
2856
2857 return err;
2858}
2859
2860int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2861 struct mlx4_vhcr *vhcr,
2862 struct mlx4_cmd_mailbox *inbox,
2863 struct mlx4_cmd_mailbox *outbox,
2864 struct mlx4_cmd_info *cmd)
2865{
2866 int err;
2867 int srqn = vhcr->in_modifier;
2868 struct res_srq *srq;
2869
2870 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2871 if (err)
2872 return err;
2873 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2874 if (err)
2875 goto ex_abort;
2876 atomic_dec(&srq->mtt->ref_count);
2877 if (srq->cq)
2878 atomic_dec(&srq->cq->ref_count);
2879 res_end_move(dev, slave, RES_SRQ, srqn);
2880
2881 return 0;
2882
2883ex_abort:
2884 res_abort_move(dev, slave, RES_SRQ, srqn);
2885
2886 return err;
2887}
2888
2889int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2890 struct mlx4_vhcr *vhcr,
2891 struct mlx4_cmd_mailbox *inbox,
2892 struct mlx4_cmd_mailbox *outbox,
2893 struct mlx4_cmd_info *cmd)
2894{
2895 int err;
2896 int srqn = vhcr->in_modifier;
2897 struct res_srq *srq;
2898
2899 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2900 if (err)
2901 return err;
2902 if (srq->com.from_state != RES_SRQ_HW) {
2903 err = -EBUSY;
2904 goto out;
2905 }
2906 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2907out:
2908 put_res(dev, slave, srqn, RES_SRQ);
2909 return err;
2910}
2911
2912int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2913 struct mlx4_vhcr *vhcr,
2914 struct mlx4_cmd_mailbox *inbox,
2915 struct mlx4_cmd_mailbox *outbox,
2916 struct mlx4_cmd_info *cmd)
2917{
2918 int err;
2919 int srqn = vhcr->in_modifier;
2920 struct res_srq *srq;
2921
2922 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2923 if (err)
2924 return err;
2925
2926 if (srq->com.from_state != RES_SRQ_HW) {
2927 err = -EBUSY;
2928 goto out;
2929 }
2930
2931 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2932out:
2933 put_res(dev, slave, srqn, RES_SRQ);
2934 return err;
2935}
2936
2937int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2938 struct mlx4_vhcr *vhcr,
2939 struct mlx4_cmd_mailbox *inbox,
2940 struct mlx4_cmd_mailbox *outbox,
2941 struct mlx4_cmd_info *cmd)
2942{
2943 int err;
2944 int qpn = vhcr->in_modifier & 0x7fffff;
2945 struct res_qp *qp;
2946
2947 err = get_res(dev, slave, qpn, RES_QP, &qp);
2948 if (err)
2949 return err;
2950 if (qp->com.from_state != RES_QP_HW) {
2951 err = -EBUSY;
2952 goto out;
2953 }
2954
2955 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2956out:
2957 put_res(dev, slave, qpn, RES_QP);
2958 return err;
2959}
2960
54679e14
JM
2961int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2962 struct mlx4_vhcr *vhcr,
2963 struct mlx4_cmd_mailbox *inbox,
2964 struct mlx4_cmd_mailbox *outbox,
2965 struct mlx4_cmd_info *cmd)
2966{
2967 struct mlx4_qp_context *context = inbox->buf + 8;
2968 adjust_proxy_tun_qkey(dev, vhcr, context);
2969 update_pkey_index(dev, slave, inbox);
2970 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2971}
2972
c82e9aa0
EC
2973int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2974 struct mlx4_vhcr *vhcr,
2975 struct mlx4_cmd_mailbox *inbox,
2976 struct mlx4_cmd_mailbox *outbox,
2977 struct mlx4_cmd_info *cmd)
2978{
54679e14 2979 int err;
c82e9aa0 2980 struct mlx4_qp_context *qpc = inbox->buf + 8;
b01978ca
JM
2981 int qpn = vhcr->in_modifier & 0x7fffff;
2982 struct res_qp *qp;
2983 u8 orig_sched_queue;
c82e9aa0 2984
54679e14
JM
2985 err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
2986 if (err)
2987 return err;
2988
2989 update_pkey_index(dev, slave, inbox);
2990 update_gid(dev, inbox, (u8)slave);
2991 adjust_proxy_tun_qkey(dev, vhcr, qpc);
b01978ca
JM
2992 orig_sched_queue = qpc->pri_path.sched_queue;
2993 err = update_vport_qp_param(dev, inbox, slave, qpn);
3f7fb021
RE
2994 if (err)
2995 return err;
54679e14 2996
b01978ca
JM
2997 err = get_res(dev, slave, qpn, RES_QP, &qp);
2998 if (err)
2999 return err;
3000 if (qp->com.from_state != RES_QP_HW) {
3001 err = -EBUSY;
3002 goto out;
3003 }
3004
3005 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3006out:
3007 /* if no error, save sched queue value passed in by VF. This is
3008 * essentially the QOS value provided by the VF. This will be useful
3009 * if we allow dynamic changes from VST back to VGT
3010 */
3011 if (!err)
3012 qp->sched_queue = orig_sched_queue;
3013
3014 put_res(dev, slave, qpn, RES_QP);
3015 return err;
54679e14
JM
3016}
3017
3018int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3019 struct mlx4_vhcr *vhcr,
3020 struct mlx4_cmd_mailbox *inbox,
3021 struct mlx4_cmd_mailbox *outbox,
3022 struct mlx4_cmd_info *cmd)
3023{
3024 int err;
3025 struct mlx4_qp_context *context = inbox->buf + 8;
3026
3027 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
3028 if (err)
3029 return err;
3030
3031 update_pkey_index(dev, slave, inbox);
3032 update_gid(dev, inbox, (u8)slave);
3033 adjust_proxy_tun_qkey(dev, vhcr, context);
3034 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3035}
3036
3037int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3038 struct mlx4_vhcr *vhcr,
3039 struct mlx4_cmd_mailbox *inbox,
3040 struct mlx4_cmd_mailbox *outbox,
3041 struct mlx4_cmd_info *cmd)
3042{
3043 int err;
3044 struct mlx4_qp_context *context = inbox->buf + 8;
3045
3046 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
3047 if (err)
3048 return err;
3049
3050 update_pkey_index(dev, slave, inbox);
3051 update_gid(dev, inbox, (u8)slave);
3052 adjust_proxy_tun_qkey(dev, vhcr, context);
3053 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3054}
3055
3056
3057int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3058 struct mlx4_vhcr *vhcr,
3059 struct mlx4_cmd_mailbox *inbox,
3060 struct mlx4_cmd_mailbox *outbox,
3061 struct mlx4_cmd_info *cmd)
3062{
3063 struct mlx4_qp_context *context = inbox->buf + 8;
3064 adjust_proxy_tun_qkey(dev, vhcr, context);
3065 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3066}
3067
3068int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3069 struct mlx4_vhcr *vhcr,
3070 struct mlx4_cmd_mailbox *inbox,
3071 struct mlx4_cmd_mailbox *outbox,
3072 struct mlx4_cmd_info *cmd)
3073{
3074 int err;
3075 struct mlx4_qp_context *context = inbox->buf + 8;
3076
3077 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
3078 if (err)
3079 return err;
3080
3081 adjust_proxy_tun_qkey(dev, vhcr, context);
3082 update_gid(dev, inbox, (u8)slave);
3083 update_pkey_index(dev, slave, inbox);
3084 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3085}
3086
3087int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3088 struct mlx4_vhcr *vhcr,
3089 struct mlx4_cmd_mailbox *inbox,
3090 struct mlx4_cmd_mailbox *outbox,
3091 struct mlx4_cmd_info *cmd)
3092{
3093 int err;
3094 struct mlx4_qp_context *context = inbox->buf + 8;
3095
3096 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
3097 if (err)
3098 return err;
c82e9aa0 3099
54679e14
JM
3100 adjust_proxy_tun_qkey(dev, vhcr, context);
3101 update_gid(dev, inbox, (u8)slave);
3102 update_pkey_index(dev, slave, inbox);
c82e9aa0
EC
3103 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3104}
3105
3106int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3107 struct mlx4_vhcr *vhcr,
3108 struct mlx4_cmd_mailbox *inbox,
3109 struct mlx4_cmd_mailbox *outbox,
3110 struct mlx4_cmd_info *cmd)
3111{
3112 int err;
3113 int qpn = vhcr->in_modifier & 0x7fffff;
3114 struct res_qp *qp;
3115
3116 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3117 if (err)
3118 return err;
3119 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3120 if (err)
3121 goto ex_abort;
3122
3123 atomic_dec(&qp->mtt->ref_count);
3124 atomic_dec(&qp->rcq->ref_count);
3125 atomic_dec(&qp->scq->ref_count);
3126 if (qp->srq)
3127 atomic_dec(&qp->srq->ref_count);
3128 res_end_move(dev, slave, RES_QP, qpn);
3129 return 0;
3130
3131ex_abort:
3132 res_abort_move(dev, slave, RES_QP, qpn);
3133
3134 return err;
3135}
3136
3137static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3138 struct res_qp *rqp, u8 *gid)
3139{
3140 struct res_gid *res;
3141
3142 list_for_each_entry(res, &rqp->mcg_list, list) {
3143 if (!memcmp(res->gid, gid, 16))
3144 return res;
3145 }
3146 return NULL;
3147}
3148
3149static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
9f5b6c63 3150 u8 *gid, enum mlx4_protocol prot,
fab1e24a 3151 enum mlx4_steer_type steer, u64 reg_id)
c82e9aa0
EC
3152{
3153 struct res_gid *res;
3154 int err;
3155
3156 res = kzalloc(sizeof *res, GFP_KERNEL);
3157 if (!res)
3158 return -ENOMEM;
3159
3160 spin_lock_irq(&rqp->mcg_spl);
3161 if (find_gid(dev, slave, rqp, gid)) {
3162 kfree(res);
3163 err = -EEXIST;
3164 } else {
3165 memcpy(res->gid, gid, 16);
3166 res->prot = prot;
9f5b6c63 3167 res->steer = steer;
fab1e24a 3168 res->reg_id = reg_id;
c82e9aa0
EC
3169 list_add_tail(&res->list, &rqp->mcg_list);
3170 err = 0;
3171 }
3172 spin_unlock_irq(&rqp->mcg_spl);
3173
3174 return err;
3175}
3176
3177static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
9f5b6c63 3178 u8 *gid, enum mlx4_protocol prot,
fab1e24a 3179 enum mlx4_steer_type steer, u64 *reg_id)
c82e9aa0
EC
3180{
3181 struct res_gid *res;
3182 int err;
3183
3184 spin_lock_irq(&rqp->mcg_spl);
3185 res = find_gid(dev, slave, rqp, gid);
9f5b6c63 3186 if (!res || res->prot != prot || res->steer != steer)
c82e9aa0
EC
3187 err = -EINVAL;
3188 else {
fab1e24a 3189 *reg_id = res->reg_id;
c82e9aa0
EC
3190 list_del(&res->list);
3191 kfree(res);
3192 err = 0;
3193 }
3194 spin_unlock_irq(&rqp->mcg_spl);
3195
3196 return err;
3197}
3198
fab1e24a
HHZ
3199static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3200 int block_loopback, enum mlx4_protocol prot,
3201 enum mlx4_steer_type type, u64 *reg_id)
3202{
3203 switch (dev->caps.steering_mode) {
3204 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3205 return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5],
3206 block_loopback, prot,
3207 reg_id);
3208 case MLX4_STEERING_MODE_B0:
3209 return mlx4_qp_attach_common(dev, qp, gid,
3210 block_loopback, prot, type);
3211 default:
3212 return -EINVAL;
3213 }
3214}
3215
3216static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3217 enum mlx4_protocol prot, enum mlx4_steer_type type,
3218 u64 reg_id)
3219{
3220 switch (dev->caps.steering_mode) {
3221 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3222 return mlx4_flow_detach(dev, reg_id);
3223 case MLX4_STEERING_MODE_B0:
3224 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3225 default:
3226 return -EINVAL;
3227 }
3228}
3229
c82e9aa0
EC
3230int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3231 struct mlx4_vhcr *vhcr,
3232 struct mlx4_cmd_mailbox *inbox,
3233 struct mlx4_cmd_mailbox *outbox,
3234 struct mlx4_cmd_info *cmd)
3235{
3236 struct mlx4_qp qp; /* dummy for calling attach/detach */
3237 u8 *gid = inbox->buf;
3238 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
162344ed 3239 int err;
c82e9aa0
EC
3240 int qpn;
3241 struct res_qp *rqp;
fab1e24a 3242 u64 reg_id = 0;
c82e9aa0
EC
3243 int attach = vhcr->op_modifier;
3244 int block_loopback = vhcr->in_modifier >> 31;
3245 u8 steer_type_mask = 2;
75c6062c 3246 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
c82e9aa0
EC
3247
3248 qpn = vhcr->in_modifier & 0xffffff;
3249 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3250 if (err)
3251 return err;
3252
3253 qp.qpn = qpn;
3254 if (attach) {
fab1e24a
HHZ
3255 err = qp_attach(dev, &qp, gid, block_loopback, prot,
3256 type, &reg_id);
3257 if (err) {
3258 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
c82e9aa0 3259 goto ex_put;
fab1e24a
HHZ
3260 }
3261 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
c82e9aa0 3262 if (err)
fab1e24a 3263 goto ex_detach;
c82e9aa0 3264 } else {
fab1e24a 3265 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
c82e9aa0
EC
3266 if (err)
3267 goto ex_put;
c82e9aa0 3268
fab1e24a
HHZ
3269 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3270 if (err)
3271 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3272 qpn, reg_id);
3273 }
c82e9aa0 3274 put_res(dev, slave, qpn, RES_QP);
fab1e24a 3275 return err;
c82e9aa0 3276
fab1e24a
HHZ
3277ex_detach:
3278 qp_detach(dev, &qp, gid, prot, type, reg_id);
c82e9aa0
EC
3279ex_put:
3280 put_res(dev, slave, qpn, RES_QP);
c82e9aa0
EC
3281 return err;
3282}
3283
7fb40f87
HHZ
3284/*
3285 * MAC validation for Flow Steering rules.
3286 * VF can attach rules only with a mac address which is assigned to it.
3287 */
3288static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3289 struct list_head *rlist)
3290{
3291 struct mac_res *res, *tmp;
3292 __be64 be_mac;
3293
3294 /* make sure it isn't multicast or broadcast mac*/
3295 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3296 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3297 list_for_each_entry_safe(res, tmp, rlist, list) {
3298 be_mac = cpu_to_be64(res->mac << 16);
3299 if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
3300 return 0;
3301 }
3302 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3303 eth_header->eth.dst_mac, slave);
3304 return -EINVAL;
3305 }
3306 return 0;
3307}
3308
3309/*
3310 * In case of missing eth header, append eth header with a MAC address
3311 * assigned to the VF.
3312 */
3313static int add_eth_header(struct mlx4_dev *dev, int slave,
3314 struct mlx4_cmd_mailbox *inbox,
3315 struct list_head *rlist, int header_id)
3316{
3317 struct mac_res *res, *tmp;
3318 u8 port;
3319 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3320 struct mlx4_net_trans_rule_hw_eth *eth_header;
3321 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3322 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3323 __be64 be_mac = 0;
3324 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3325
3326 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
015465f8 3327 port = ctrl->port;
7fb40f87
HHZ
3328 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3329
3330 /* Clear a space in the inbox for eth header */
3331 switch (header_id) {
3332 case MLX4_NET_TRANS_RULE_ID_IPV4:
3333 ip_header =
3334 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3335 memmove(ip_header, eth_header,
3336 sizeof(*ip_header) + sizeof(*l4_header));
3337 break;
3338 case MLX4_NET_TRANS_RULE_ID_TCP:
3339 case MLX4_NET_TRANS_RULE_ID_UDP:
3340 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3341 (eth_header + 1);
3342 memmove(l4_header, eth_header, sizeof(*l4_header));
3343 break;
3344 default:
3345 return -EINVAL;
3346 }
3347 list_for_each_entry_safe(res, tmp, rlist, list) {
3348 if (port == res->port) {
3349 be_mac = cpu_to_be64(res->mac << 16);
3350 break;
3351 }
3352 }
3353 if (!be_mac) {
3354 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3355 port);
3356 return -EINVAL;
3357 }
3358
3359 memset(eth_header, 0, sizeof(*eth_header));
3360 eth_header->size = sizeof(*eth_header) >> 2;
3361 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3362 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3363 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3364
3365 return 0;
3366
3367}
3368
8fcfb4db
HHZ
3369int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3370 struct mlx4_vhcr *vhcr,
3371 struct mlx4_cmd_mailbox *inbox,
3372 struct mlx4_cmd_mailbox *outbox,
3373 struct mlx4_cmd_info *cmd)
3374{
7fb40f87
HHZ
3375
3376 struct mlx4_priv *priv = mlx4_priv(dev);
3377 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3378 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
1b9c6b06 3379 int err;
a9c01e7a 3380 int qpn;
2c473ae7 3381 struct res_qp *rqp;
7fb40f87
HHZ
3382 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3383 struct _rule_hw *rule_header;
3384 int header_id;
1b9c6b06 3385
0ff1fb65
HHZ
3386 if (dev->caps.steering_mode !=
3387 MLX4_STEERING_MODE_DEVICE_MANAGED)
3388 return -EOPNOTSUPP;
1b9c6b06 3389
7fb40f87 3390 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
a9c01e7a 3391 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
2c473ae7 3392 err = get_res(dev, slave, qpn, RES_QP, &rqp);
a9c01e7a
HHZ
3393 if (err) {
3394 pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
3395 return err;
3396 }
7fb40f87
HHZ
3397 rule_header = (struct _rule_hw *)(ctrl + 1);
3398 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
3399
3400 switch (header_id) {
3401 case MLX4_NET_TRANS_RULE_ID_ETH:
a9c01e7a
HHZ
3402 if (validate_eth_header_mac(slave, rule_header, rlist)) {
3403 err = -EINVAL;
3404 goto err_put;
3405 }
7fb40f87 3406 break;
60396683
JM
3407 case MLX4_NET_TRANS_RULE_ID_IB:
3408 break;
7fb40f87
HHZ
3409 case MLX4_NET_TRANS_RULE_ID_IPV4:
3410 case MLX4_NET_TRANS_RULE_ID_TCP:
3411 case MLX4_NET_TRANS_RULE_ID_UDP:
3412 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
a9c01e7a
HHZ
3413 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
3414 err = -EINVAL;
3415 goto err_put;
3416 }
7fb40f87
HHZ
3417 vhcr->in_modifier +=
3418 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
3419 break;
3420 default:
3421 pr_err("Corrupted mailbox.\n");
a9c01e7a
HHZ
3422 err = -EINVAL;
3423 goto err_put;
7fb40f87
HHZ
3424 }
3425
1b9c6b06
HHZ
3426 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
3427 vhcr->in_modifier, 0,
3428 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3429 MLX4_CMD_NATIVE);
3430 if (err)
a9c01e7a 3431 goto err_put;
1b9c6b06 3432
2c473ae7 3433 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
1b9c6b06
HHZ
3434 if (err) {
3435 mlx4_err(dev, "Fail to add flow steering resources.\n ");
3436 /* detach rule*/
3437 mlx4_cmd(dev, vhcr->out_param, 0, 0,
2065b38b 3438 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
1b9c6b06 3439 MLX4_CMD_NATIVE);
2c473ae7 3440 goto err_put;
1b9c6b06 3441 }
2c473ae7 3442 atomic_inc(&rqp->ref_count);
a9c01e7a
HHZ
3443err_put:
3444 put_res(dev, slave, qpn, RES_QP);
1b9c6b06 3445 return err;
8fcfb4db
HHZ
3446}
3447
3448int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
3449 struct mlx4_vhcr *vhcr,
3450 struct mlx4_cmd_mailbox *inbox,
3451 struct mlx4_cmd_mailbox *outbox,
3452 struct mlx4_cmd_info *cmd)
3453{
1b9c6b06 3454 int err;
2c473ae7
HHZ
3455 struct res_qp *rqp;
3456 struct res_fs_rule *rrule;
1b9c6b06 3457
0ff1fb65
HHZ
3458 if (dev->caps.steering_mode !=
3459 MLX4_STEERING_MODE_DEVICE_MANAGED)
3460 return -EOPNOTSUPP;
1b9c6b06 3461
2c473ae7
HHZ
3462 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
3463 if (err)
3464 return err;
3465 /* Release the rule form busy state before removal */
3466 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
3467 err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
3468 if (err)
3469 return err;
3470
1b9c6b06
HHZ
3471 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
3472 if (err) {
3473 mlx4_err(dev, "Fail to remove flow steering resources.\n ");
2c473ae7 3474 goto out;
1b9c6b06
HHZ
3475 }
3476
3477 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
3478 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3479 MLX4_CMD_NATIVE);
2c473ae7
HHZ
3480 if (!err)
3481 atomic_dec(&rqp->ref_count);
3482out:
3483 put_res(dev, slave, rrule->qpn, RES_QP);
1b9c6b06 3484 return err;
8fcfb4db
HHZ
3485}
3486
c82e9aa0
EC
3487enum {
3488 BUSY_MAX_RETRIES = 10
3489};
3490
3491int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
3492 struct mlx4_vhcr *vhcr,
3493 struct mlx4_cmd_mailbox *inbox,
3494 struct mlx4_cmd_mailbox *outbox,
3495 struct mlx4_cmd_info *cmd)
3496{
3497 int err;
3498 int index = vhcr->in_modifier & 0xffff;
3499
3500 err = get_res(dev, slave, index, RES_COUNTER, NULL);
3501 if (err)
3502 return err;
3503
3504 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3505 put_res(dev, slave, index, RES_COUNTER);
3506 return err;
3507}
3508
3509static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
3510{
3511 struct res_gid *rgid;
3512 struct res_gid *tmp;
c82e9aa0
EC
3513 struct mlx4_qp qp; /* dummy for calling attach/detach */
3514
3515 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
fab1e24a
HHZ
3516 switch (dev->caps.steering_mode) {
3517 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3518 mlx4_flow_detach(dev, rgid->reg_id);
3519 break;
3520 case MLX4_STEERING_MODE_B0:
3521 qp.qpn = rqp->local_qpn;
3522 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
3523 rgid->prot, rgid->steer);
3524 break;
3525 }
c82e9aa0
EC
3526 list_del(&rgid->list);
3527 kfree(rgid);
3528 }
3529}
3530
3531static int _move_all_busy(struct mlx4_dev *dev, int slave,
3532 enum mlx4_resource type, int print)
3533{
3534 struct mlx4_priv *priv = mlx4_priv(dev);
3535 struct mlx4_resource_tracker *tracker =
3536 &priv->mfunc.master.res_tracker;
3537 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
3538 struct res_common *r;
3539 struct res_common *tmp;
3540 int busy;
3541
3542 busy = 0;
3543 spin_lock_irq(mlx4_tlock(dev));
3544 list_for_each_entry_safe(r, tmp, rlist, list) {
3545 if (r->owner == slave) {
3546 if (!r->removing) {
3547 if (r->state == RES_ANY_BUSY) {
3548 if (print)
3549 mlx4_dbg(dev,
aa1ec3dd 3550 "%s id 0x%llx is busy\n",
c82e9aa0
EC
3551 ResourceType(type),
3552 r->res_id);
3553 ++busy;
3554 } else {
3555 r->from_state = r->state;
3556 r->state = RES_ANY_BUSY;
3557 r->removing = 1;
3558 }
3559 }
3560 }
3561 }
3562 spin_unlock_irq(mlx4_tlock(dev));
3563
3564 return busy;
3565}
3566
3567static int move_all_busy(struct mlx4_dev *dev, int slave,
3568 enum mlx4_resource type)
3569{
3570 unsigned long begin;
3571 int busy;
3572
3573 begin = jiffies;
3574 do {
3575 busy = _move_all_busy(dev, slave, type, 0);
3576 if (time_after(jiffies, begin + 5 * HZ))
3577 break;
3578 if (busy)
3579 cond_resched();
3580 } while (busy);
3581
3582 if (busy)
3583 busy = _move_all_busy(dev, slave, type, 1);
3584
3585 return busy;
3586}
3587static void rem_slave_qps(struct mlx4_dev *dev, int slave)
3588{
3589 struct mlx4_priv *priv = mlx4_priv(dev);
3590 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3591 struct list_head *qp_list =
3592 &tracker->slave_list[slave].res_list[RES_QP];
3593 struct res_qp *qp;
3594 struct res_qp *tmp;
3595 int state;
3596 u64 in_param;
3597 int qpn;
3598 int err;
3599
3600 err = move_all_busy(dev, slave, RES_QP);
3601 if (err)
3602 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
3603 "for slave %d\n", slave);
3604
3605 spin_lock_irq(mlx4_tlock(dev));
3606 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
3607 spin_unlock_irq(mlx4_tlock(dev));
3608 if (qp->com.owner == slave) {
3609 qpn = qp->com.res_id;
3610 detach_qp(dev, slave, qp);
3611 state = qp->com.from_state;
3612 while (state != 0) {
3613 switch (state) {
3614 case RES_QP_RESERVED:
3615 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3616 rb_erase(&qp->com.node,
3617 &tracker->res_tree[RES_QP]);
c82e9aa0
EC
3618 list_del(&qp->com.list);
3619 spin_unlock_irq(mlx4_tlock(dev));
3620 kfree(qp);
3621 state = 0;
3622 break;
3623 case RES_QP_MAPPED:
3624 if (!valid_reserved(dev, slave, qpn))
3625 __mlx4_qp_free_icm(dev, qpn);
3626 state = RES_QP_RESERVED;
3627 break;
3628 case RES_QP_HW:
3629 in_param = slave;
3630 err = mlx4_cmd(dev, in_param,
3631 qp->local_qpn, 2,
3632 MLX4_CMD_2RST_QP,
3633 MLX4_CMD_TIME_CLASS_A,
3634 MLX4_CMD_NATIVE);
3635 if (err)
3636 mlx4_dbg(dev, "rem_slave_qps: failed"
3637 " to move slave %d qpn %d to"
3638 " reset\n", slave,
3639 qp->local_qpn);
3640 atomic_dec(&qp->rcq->ref_count);
3641 atomic_dec(&qp->scq->ref_count);
3642 atomic_dec(&qp->mtt->ref_count);
3643 if (qp->srq)
3644 atomic_dec(&qp->srq->ref_count);
3645 state = RES_QP_MAPPED;
3646 break;
3647 default:
3648 state = 0;
3649 }
3650 }
3651 }
3652 spin_lock_irq(mlx4_tlock(dev));
3653 }
3654 spin_unlock_irq(mlx4_tlock(dev));
3655}
3656
3657static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
3658{
3659 struct mlx4_priv *priv = mlx4_priv(dev);
3660 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3661 struct list_head *srq_list =
3662 &tracker->slave_list[slave].res_list[RES_SRQ];
3663 struct res_srq *srq;
3664 struct res_srq *tmp;
3665 int state;
3666 u64 in_param;
3667 LIST_HEAD(tlist);
3668 int srqn;
3669 int err;
3670
3671 err = move_all_busy(dev, slave, RES_SRQ);
3672 if (err)
3673 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
3674 "busy for slave %d\n", slave);
3675
3676 spin_lock_irq(mlx4_tlock(dev));
3677 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
3678 spin_unlock_irq(mlx4_tlock(dev));
3679 if (srq->com.owner == slave) {
3680 srqn = srq->com.res_id;
3681 state = srq->com.from_state;
3682 while (state != 0) {
3683 switch (state) {
3684 case RES_SRQ_ALLOCATED:
3685 __mlx4_srq_free_icm(dev, srqn);
3686 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3687 rb_erase(&srq->com.node,
3688 &tracker->res_tree[RES_SRQ]);
c82e9aa0
EC
3689 list_del(&srq->com.list);
3690 spin_unlock_irq(mlx4_tlock(dev));
3691 kfree(srq);
3692 state = 0;
3693 break;
3694
3695 case RES_SRQ_HW:
3696 in_param = slave;
3697 err = mlx4_cmd(dev, in_param, srqn, 1,
3698 MLX4_CMD_HW2SW_SRQ,
3699 MLX4_CMD_TIME_CLASS_A,
3700 MLX4_CMD_NATIVE);
3701 if (err)
3702 mlx4_dbg(dev, "rem_slave_srqs: failed"
3703 " to move slave %d srq %d to"
3704 " SW ownership\n",
3705 slave, srqn);
3706
3707 atomic_dec(&srq->mtt->ref_count);
3708 if (srq->cq)
3709 atomic_dec(&srq->cq->ref_count);
3710 state = RES_SRQ_ALLOCATED;
3711 break;
3712
3713 default:
3714 state = 0;
3715 }
3716 }
3717 }
3718 spin_lock_irq(mlx4_tlock(dev));
3719 }
3720 spin_unlock_irq(mlx4_tlock(dev));
3721}
3722
3723static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
3724{
3725 struct mlx4_priv *priv = mlx4_priv(dev);
3726 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3727 struct list_head *cq_list =
3728 &tracker->slave_list[slave].res_list[RES_CQ];
3729 struct res_cq *cq;
3730 struct res_cq *tmp;
3731 int state;
3732 u64 in_param;
3733 LIST_HEAD(tlist);
3734 int cqn;
3735 int err;
3736
3737 err = move_all_busy(dev, slave, RES_CQ);
3738 if (err)
3739 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
3740 "busy for slave %d\n", slave);
3741
3742 spin_lock_irq(mlx4_tlock(dev));
3743 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
3744 spin_unlock_irq(mlx4_tlock(dev));
3745 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
3746 cqn = cq->com.res_id;
3747 state = cq->com.from_state;
3748 while (state != 0) {
3749 switch (state) {
3750 case RES_CQ_ALLOCATED:
3751 __mlx4_cq_free_icm(dev, cqn);
3752 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3753 rb_erase(&cq->com.node,
3754 &tracker->res_tree[RES_CQ]);
c82e9aa0
EC
3755 list_del(&cq->com.list);
3756 spin_unlock_irq(mlx4_tlock(dev));
3757 kfree(cq);
3758 state = 0;
3759 break;
3760
3761 case RES_CQ_HW:
3762 in_param = slave;
3763 err = mlx4_cmd(dev, in_param, cqn, 1,
3764 MLX4_CMD_HW2SW_CQ,
3765 MLX4_CMD_TIME_CLASS_A,
3766 MLX4_CMD_NATIVE);
3767 if (err)
3768 mlx4_dbg(dev, "rem_slave_cqs: failed"
3769 " to move slave %d cq %d to"
3770 " SW ownership\n",
3771 slave, cqn);
3772 atomic_dec(&cq->mtt->ref_count);
3773 state = RES_CQ_ALLOCATED;
3774 break;
3775
3776 default:
3777 state = 0;
3778 }
3779 }
3780 }
3781 spin_lock_irq(mlx4_tlock(dev));
3782 }
3783 spin_unlock_irq(mlx4_tlock(dev));
3784}
3785
3786static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
3787{
3788 struct mlx4_priv *priv = mlx4_priv(dev);
3789 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3790 struct list_head *mpt_list =
3791 &tracker->slave_list[slave].res_list[RES_MPT];
3792 struct res_mpt *mpt;
3793 struct res_mpt *tmp;
3794 int state;
3795 u64 in_param;
3796 LIST_HEAD(tlist);
3797 int mptn;
3798 int err;
3799
3800 err = move_all_busy(dev, slave, RES_MPT);
3801 if (err)
3802 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
3803 "busy for slave %d\n", slave);
3804
3805 spin_lock_irq(mlx4_tlock(dev));
3806 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
3807 spin_unlock_irq(mlx4_tlock(dev));
3808 if (mpt->com.owner == slave) {
3809 mptn = mpt->com.res_id;
3810 state = mpt->com.from_state;
3811 while (state != 0) {
3812 switch (state) {
3813 case RES_MPT_RESERVED:
b20e519a 3814 __mlx4_mpt_release(dev, mpt->key);
c82e9aa0 3815 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3816 rb_erase(&mpt->com.node,
3817 &tracker->res_tree[RES_MPT]);
c82e9aa0
EC
3818 list_del(&mpt->com.list);
3819 spin_unlock_irq(mlx4_tlock(dev));
3820 kfree(mpt);
3821 state = 0;
3822 break;
3823
3824 case RES_MPT_MAPPED:
b20e519a 3825 __mlx4_mpt_free_icm(dev, mpt->key);
c82e9aa0
EC
3826 state = RES_MPT_RESERVED;
3827 break;
3828
3829 case RES_MPT_HW:
3830 in_param = slave;
3831 err = mlx4_cmd(dev, in_param, mptn, 0,
3832 MLX4_CMD_HW2SW_MPT,
3833 MLX4_CMD_TIME_CLASS_A,
3834 MLX4_CMD_NATIVE);
3835 if (err)
3836 mlx4_dbg(dev, "rem_slave_mrs: failed"
3837 " to move slave %d mpt %d to"
3838 " SW ownership\n",
3839 slave, mptn);
3840 if (mpt->mtt)
3841 atomic_dec(&mpt->mtt->ref_count);
3842 state = RES_MPT_MAPPED;
3843 break;
3844 default:
3845 state = 0;
3846 }
3847 }
3848 }
3849 spin_lock_irq(mlx4_tlock(dev));
3850 }
3851 spin_unlock_irq(mlx4_tlock(dev));
3852}
3853
3854static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
3855{
3856 struct mlx4_priv *priv = mlx4_priv(dev);
3857 struct mlx4_resource_tracker *tracker =
3858 &priv->mfunc.master.res_tracker;
3859 struct list_head *mtt_list =
3860 &tracker->slave_list[slave].res_list[RES_MTT];
3861 struct res_mtt *mtt;
3862 struct res_mtt *tmp;
3863 int state;
3864 LIST_HEAD(tlist);
3865 int base;
3866 int err;
3867
3868 err = move_all_busy(dev, slave, RES_MTT);
3869 if (err)
3870 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
3871 "busy for slave %d\n", slave);
3872
3873 spin_lock_irq(mlx4_tlock(dev));
3874 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
3875 spin_unlock_irq(mlx4_tlock(dev));
3876 if (mtt->com.owner == slave) {
3877 base = mtt->com.res_id;
3878 state = mtt->com.from_state;
3879 while (state != 0) {
3880 switch (state) {
3881 case RES_MTT_ALLOCATED:
3882 __mlx4_free_mtt_range(dev, base,
3883 mtt->order);
3884 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3885 rb_erase(&mtt->com.node,
3886 &tracker->res_tree[RES_MTT]);
c82e9aa0
EC
3887 list_del(&mtt->com.list);
3888 spin_unlock_irq(mlx4_tlock(dev));
3889 kfree(mtt);
3890 state = 0;
3891 break;
3892
3893 default:
3894 state = 0;
3895 }
3896 }
3897 }
3898 spin_lock_irq(mlx4_tlock(dev));
3899 }
3900 spin_unlock_irq(mlx4_tlock(dev));
3901}
3902
1b9c6b06
HHZ
3903static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
3904{
3905 struct mlx4_priv *priv = mlx4_priv(dev);
3906 struct mlx4_resource_tracker *tracker =
3907 &priv->mfunc.master.res_tracker;
3908 struct list_head *fs_rule_list =
3909 &tracker->slave_list[slave].res_list[RES_FS_RULE];
3910 struct res_fs_rule *fs_rule;
3911 struct res_fs_rule *tmp;
3912 int state;
3913 u64 base;
3914 int err;
3915
3916 err = move_all_busy(dev, slave, RES_FS_RULE);
3917 if (err)
3918 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
3919 slave);
3920
3921 spin_lock_irq(mlx4_tlock(dev));
3922 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
3923 spin_unlock_irq(mlx4_tlock(dev));
3924 if (fs_rule->com.owner == slave) {
3925 base = fs_rule->com.res_id;
3926 state = fs_rule->com.from_state;
3927 while (state != 0) {
3928 switch (state) {
3929 case RES_FS_RULE_ALLOCATED:
3930 /* detach rule */
3931 err = mlx4_cmd(dev, base, 0, 0,
3932 MLX4_QP_FLOW_STEERING_DETACH,
3933 MLX4_CMD_TIME_CLASS_A,
3934 MLX4_CMD_NATIVE);
3935
3936 spin_lock_irq(mlx4_tlock(dev));
3937 rb_erase(&fs_rule->com.node,
3938 &tracker->res_tree[RES_FS_RULE]);
3939 list_del(&fs_rule->com.list);
3940 spin_unlock_irq(mlx4_tlock(dev));
3941 kfree(fs_rule);
3942 state = 0;
3943 break;
3944
3945 default:
3946 state = 0;
3947 }
3948 }
3949 }
3950 spin_lock_irq(mlx4_tlock(dev));
3951 }
3952 spin_unlock_irq(mlx4_tlock(dev));
3953}
3954
c82e9aa0
EC
3955static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3956{
3957 struct mlx4_priv *priv = mlx4_priv(dev);
3958 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3959 struct list_head *eq_list =
3960 &tracker->slave_list[slave].res_list[RES_EQ];
3961 struct res_eq *eq;
3962 struct res_eq *tmp;
3963 int err;
3964 int state;
3965 LIST_HEAD(tlist);
3966 int eqn;
3967 struct mlx4_cmd_mailbox *mailbox;
3968
3969 err = move_all_busy(dev, slave, RES_EQ);
3970 if (err)
3971 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3972 "busy for slave %d\n", slave);
3973
3974 spin_lock_irq(mlx4_tlock(dev));
3975 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3976 spin_unlock_irq(mlx4_tlock(dev));
3977 if (eq->com.owner == slave) {
3978 eqn = eq->com.res_id;
3979 state = eq->com.from_state;
3980 while (state != 0) {
3981 switch (state) {
3982 case RES_EQ_RESERVED:
3983 spin_lock_irq(mlx4_tlock(dev));
4af1c048
HHZ
3984 rb_erase(&eq->com.node,
3985 &tracker->res_tree[RES_EQ]);
c82e9aa0
EC
3986 list_del(&eq->com.list);
3987 spin_unlock_irq(mlx4_tlock(dev));
3988 kfree(eq);
3989 state = 0;
3990 break;
3991
3992 case RES_EQ_HW:
3993 mailbox = mlx4_alloc_cmd_mailbox(dev);
3994 if (IS_ERR(mailbox)) {
3995 cond_resched();
3996 continue;
3997 }
3998 err = mlx4_cmd_box(dev, slave, 0,
3999 eqn & 0xff, 0,
4000 MLX4_CMD_HW2SW_EQ,
4001 MLX4_CMD_TIME_CLASS_A,
4002 MLX4_CMD_NATIVE);
eb71d0d6
JM
4003 if (err)
4004 mlx4_dbg(dev, "rem_slave_eqs: failed"
4005 " to move slave %d eqs %d to"
4006 " SW ownership\n", slave, eqn);
c82e9aa0 4007 mlx4_free_cmd_mailbox(dev, mailbox);
eb71d0d6
JM
4008 atomic_dec(&eq->mtt->ref_count);
4009 state = RES_EQ_RESERVED;
c82e9aa0
EC
4010 break;
4011
4012 default:
4013 state = 0;
4014 }
4015 }
4016 }
4017 spin_lock_irq(mlx4_tlock(dev));
4018 }
4019 spin_unlock_irq(mlx4_tlock(dev));
4020}
4021
ba062d52
JM
4022static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4023{
4024 struct mlx4_priv *priv = mlx4_priv(dev);
4025 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4026 struct list_head *counter_list =
4027 &tracker->slave_list[slave].res_list[RES_COUNTER];
4028 struct res_counter *counter;
4029 struct res_counter *tmp;
4030 int err;
4031 int index;
4032
4033 err = move_all_busy(dev, slave, RES_COUNTER);
4034 if (err)
4035 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
4036 "busy for slave %d\n", slave);
4037
4038 spin_lock_irq(mlx4_tlock(dev));
4039 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4040 if (counter->com.owner == slave) {
4041 index = counter->com.res_id;
4af1c048
HHZ
4042 rb_erase(&counter->com.node,
4043 &tracker->res_tree[RES_COUNTER]);
ba062d52
JM
4044 list_del(&counter->com.list);
4045 kfree(counter);
4046 __mlx4_counter_free(dev, index);
4047 }
4048 }
4049 spin_unlock_irq(mlx4_tlock(dev));
4050}
4051
4052static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4053{
4054 struct mlx4_priv *priv = mlx4_priv(dev);
4055 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4056 struct list_head *xrcdn_list =
4057 &tracker->slave_list[slave].res_list[RES_XRCD];
4058 struct res_xrcdn *xrcd;
4059 struct res_xrcdn *tmp;
4060 int err;
4061 int xrcdn;
4062
4063 err = move_all_busy(dev, slave, RES_XRCD);
4064 if (err)
4065 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
4066 "busy for slave %d\n", slave);
4067
4068 spin_lock_irq(mlx4_tlock(dev));
4069 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4070 if (xrcd->com.owner == slave) {
4071 xrcdn = xrcd->com.res_id;
4af1c048 4072 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
ba062d52
JM
4073 list_del(&xrcd->com.list);
4074 kfree(xrcd);
4075 __mlx4_xrcd_free(dev, xrcdn);
4076 }
4077 }
4078 spin_unlock_irq(mlx4_tlock(dev));
4079}
4080
c82e9aa0
EC
4081void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4082{
4083 struct mlx4_priv *priv = mlx4_priv(dev);
4084
4085 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4874080d 4086 rem_slave_vlans(dev, slave);
c82e9aa0 4087 rem_slave_macs(dev, slave);
80cb0021 4088 rem_slave_fs_rule(dev, slave);
c82e9aa0
EC
4089 rem_slave_qps(dev, slave);
4090 rem_slave_srqs(dev, slave);
4091 rem_slave_cqs(dev, slave);
4092 rem_slave_mrs(dev, slave);
4093 rem_slave_eqs(dev, slave);
4094 rem_slave_mtts(dev, slave);
ba062d52
JM
4095 rem_slave_counters(dev, slave);
4096 rem_slave_xrcdns(dev, slave);
c82e9aa0
EC
4097 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4098}
b01978ca
JM
4099
4100void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4101{
4102 struct mlx4_vf_immed_vlan_work *work =
4103 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
4104 struct mlx4_cmd_mailbox *mailbox;
4105 struct mlx4_update_qp_context *upd_context;
4106 struct mlx4_dev *dev = &work->priv->dev;
4107 struct mlx4_resource_tracker *tracker =
4108 &work->priv->mfunc.master.res_tracker;
4109 struct list_head *qp_list =
4110 &tracker->slave_list[work->slave].res_list[RES_QP];
4111 struct res_qp *qp;
4112 struct res_qp *tmp;
4113 u64 qp_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
4114 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
4115 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
4116 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
4117 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
4118 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED) |
4119 (1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4120 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4121
4122 int err;
4123 int port, errors = 0;
4124 u8 vlan_control;
4125
4126 if (mlx4_is_slave(dev)) {
4127 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4128 work->slave);
4129 goto out;
4130 }
4131
4132 mailbox = mlx4_alloc_cmd_mailbox(dev);
4133 if (IS_ERR(mailbox))
4134 goto out;
0a6eac24
RE
4135 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
4136 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4137 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
4138 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
4139 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4140 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
4141 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4142 else if (!work->vlan_id)
b01978ca
JM
4143 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4144 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4145 else
4146 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4147 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4148 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4149
4150 upd_context = mailbox->buf;
4151 upd_context->primary_addr_path_mask = cpu_to_be64(qp_mask);
4152 upd_context->qp_context.pri_path.vlan_control = vlan_control;
4153 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4154
4155 spin_lock_irq(mlx4_tlock(dev));
4156 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4157 spin_unlock_irq(mlx4_tlock(dev));
4158 if (qp->com.owner == work->slave) {
4159 if (qp->com.from_state != RES_QP_HW ||
4160 !qp->sched_queue || /* no INIT2RTR trans yet */
4161 mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4162 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4163 spin_lock_irq(mlx4_tlock(dev));
4164 continue;
4165 }
4166 port = (qp->sched_queue >> 6 & 1) + 1;
4167 if (port != work->port) {
4168 spin_lock_irq(mlx4_tlock(dev));
4169 continue;
4170 }
4171 upd_context->qp_context.pri_path.sched_queue =
4172 qp->sched_queue & 0xC7;
4173 upd_context->qp_context.pri_path.sched_queue |=
4174 ((work->qos & 0x7) << 3);
4175
4176 err = mlx4_cmd(dev, mailbox->dma,
4177 qp->local_qpn & 0xffffff,
4178 0, MLX4_CMD_UPDATE_QP,
4179 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4180 if (err) {
4181 mlx4_info(dev, "UPDATE_QP failed for slave %d, "
4182 "port %d, qpn %d (%d)\n",
4183 work->slave, port, qp->local_qpn,
4184 err);
4185 errors++;
4186 }
4187 }
4188 spin_lock_irq(mlx4_tlock(dev));
4189 }
4190 spin_unlock_irq(mlx4_tlock(dev));
4191 mlx4_free_cmd_mailbox(dev, mailbox);
4192
4193 if (errors)
4194 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
4195 errors, work->slave, work->port);
4196
4197 /* unregister previous vlan_id if needed and we had no errors
4198 * while updating the QPs
4199 */
4200 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4201 NO_INDX != work->orig_vlan_ix)
4202 __mlx4_unregister_vlan(&work->priv->dev, work->port,
2009d005 4203 work->orig_vlan_id);
b01978ca
JM
4204out:
4205 kfree(work);
4206 return;
4207}
This page took 0.43688 seconds and 5 git commands to generate.