be2net: update driver version
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx4 / resource_tracker.c
CommitLineData
c82e9aa0
EC
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4 * All rights reserved.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/sched.h>
37#include <linux/pci.h>
38#include <linux/errno.h>
39#include <linux/kernel.h>
40#include <linux/io.h>
e143a1ad 41#include <linux/slab.h>
c82e9aa0
EC
42#include <linux/mlx4/cmd.h>
43#include <linux/mlx4/qp.h>
44
45#include "mlx4.h"
46#include "fw.h"
47
48#define MLX4_MAC_VALID (1ull << 63)
49#define MLX4_MAC_MASK 0x7fffffffffffffffULL
50#define ETH_ALEN 6
51
52struct mac_res {
53 struct list_head list;
54 u64 mac;
55 u8 port;
56};
57
58struct res_common {
59 struct list_head list;
60 u32 res_id;
61 int owner;
62 int state;
63 int from_state;
64 int to_state;
65 int removing;
66};
67
68enum {
69 RES_ANY_BUSY = 1
70};
71
72struct res_gid {
73 struct list_head list;
74 u8 gid[16];
75 enum mlx4_protocol prot;
9f5b6c63 76 enum mlx4_steer_type steer;
c82e9aa0
EC
77};
78
79enum res_qp_states {
80 RES_QP_BUSY = RES_ANY_BUSY,
81
82 /* QP number was allocated */
83 RES_QP_RESERVED,
84
85 /* ICM memory for QP context was mapped */
86 RES_QP_MAPPED,
87
88 /* QP is in hw ownership */
89 RES_QP_HW
90};
91
c82e9aa0
EC
92struct res_qp {
93 struct res_common com;
94 struct res_mtt *mtt;
95 struct res_cq *rcq;
96 struct res_cq *scq;
97 struct res_srq *srq;
98 struct list_head mcg_list;
99 spinlock_t mcg_spl;
100 int local_qpn;
101};
102
103enum res_mtt_states {
104 RES_MTT_BUSY = RES_ANY_BUSY,
105 RES_MTT_ALLOCATED,
106};
107
108static inline const char *mtt_states_str(enum res_mtt_states state)
109{
110 switch (state) {
111 case RES_MTT_BUSY: return "RES_MTT_BUSY";
112 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
113 default: return "Unknown";
114 }
115}
116
117struct res_mtt {
118 struct res_common com;
119 int order;
120 atomic_t ref_count;
121};
122
123enum res_mpt_states {
124 RES_MPT_BUSY = RES_ANY_BUSY,
125 RES_MPT_RESERVED,
126 RES_MPT_MAPPED,
127 RES_MPT_HW,
128};
129
130struct res_mpt {
131 struct res_common com;
132 struct res_mtt *mtt;
133 int key;
134};
135
136enum res_eq_states {
137 RES_EQ_BUSY = RES_ANY_BUSY,
138 RES_EQ_RESERVED,
139 RES_EQ_HW,
140};
141
142struct res_eq {
143 struct res_common com;
144 struct res_mtt *mtt;
145};
146
147enum res_cq_states {
148 RES_CQ_BUSY = RES_ANY_BUSY,
149 RES_CQ_ALLOCATED,
150 RES_CQ_HW,
151};
152
153struct res_cq {
154 struct res_common com;
155 struct res_mtt *mtt;
156 atomic_t ref_count;
157};
158
159enum res_srq_states {
160 RES_SRQ_BUSY = RES_ANY_BUSY,
161 RES_SRQ_ALLOCATED,
162 RES_SRQ_HW,
163};
164
c82e9aa0
EC
165struct res_srq {
166 struct res_common com;
167 struct res_mtt *mtt;
168 struct res_cq *cq;
169 atomic_t ref_count;
170};
171
172enum res_counter_states {
173 RES_COUNTER_BUSY = RES_ANY_BUSY,
174 RES_COUNTER_ALLOCATED,
175};
176
c82e9aa0
EC
177struct res_counter {
178 struct res_common com;
179 int port;
180};
181
ba062d52
JM
182enum res_xrcdn_states {
183 RES_XRCD_BUSY = RES_ANY_BUSY,
184 RES_XRCD_ALLOCATED,
185};
186
187struct res_xrcdn {
188 struct res_common com;
189 int port;
190};
191
c82e9aa0
EC
192/* For Debug uses */
193static const char *ResourceType(enum mlx4_resource rt)
194{
195 switch (rt) {
196 case RES_QP: return "RES_QP";
197 case RES_CQ: return "RES_CQ";
198 case RES_SRQ: return "RES_SRQ";
199 case RES_MPT: return "RES_MPT";
200 case RES_MTT: return "RES_MTT";
201 case RES_MAC: return "RES_MAC";
202 case RES_EQ: return "RES_EQ";
203 case RES_COUNTER: return "RES_COUNTER";
ba062d52 204 case RES_XRCD: return "RES_XRCD";
c82e9aa0
EC
205 default: return "Unknown resource type !!!";
206 };
207}
208
c82e9aa0
EC
209int mlx4_init_resource_tracker(struct mlx4_dev *dev)
210{
211 struct mlx4_priv *priv = mlx4_priv(dev);
212 int i;
213 int t;
214
215 priv->mfunc.master.res_tracker.slave_list =
216 kzalloc(dev->num_slaves * sizeof(struct slave_list),
217 GFP_KERNEL);
218 if (!priv->mfunc.master.res_tracker.slave_list)
219 return -ENOMEM;
220
221 for (i = 0 ; i < dev->num_slaves; i++) {
222 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
223 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
224 slave_list[i].res_list[t]);
225 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
226 }
227
228 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
229 dev->num_slaves);
230 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
231 INIT_RADIX_TREE(&priv->mfunc.master.res_tracker.res_tree[i],
232 GFP_ATOMIC|__GFP_NOWARN);
233
234 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
235 return 0 ;
236}
237
b8924951
JM
238void mlx4_free_resource_tracker(struct mlx4_dev *dev,
239 enum mlx4_res_tracker_free_type type)
c82e9aa0
EC
240{
241 struct mlx4_priv *priv = mlx4_priv(dev);
242 int i;
243
244 if (priv->mfunc.master.res_tracker.slave_list) {
b8924951
JM
245 if (type != RES_TR_FREE_STRUCTS_ONLY)
246 for (i = 0 ; i < dev->num_slaves; i++)
247 if (type == RES_TR_FREE_ALL ||
248 dev->caps.function != i)
249 mlx4_delete_all_resources_for_slave(dev, i);
250
251 if (type != RES_TR_FREE_SLAVES_ONLY) {
252 kfree(priv->mfunc.master.res_tracker.slave_list);
253 priv->mfunc.master.res_tracker.slave_list = NULL;
254 }
c82e9aa0
EC
255 }
256}
257
258static void update_ud_gid(struct mlx4_dev *dev,
259 struct mlx4_qp_context *qp_ctx, u8 slave)
260{
261 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
262
263 if (MLX4_QP_ST_UD == ts)
264 qp_ctx->pri_path.mgid_index = 0x80 | slave;
265
266 mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
267 slave, qp_ctx->pri_path.mgid_index);
268}
269
270static int mpt_mask(struct mlx4_dev *dev)
271{
272 return dev->caps.num_mpts - 1;
273}
274
275static void *find_res(struct mlx4_dev *dev, int res_id,
276 enum mlx4_resource type)
277{
278 struct mlx4_priv *priv = mlx4_priv(dev);
279
280 return radix_tree_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
281 res_id);
282}
283
284static int get_res(struct mlx4_dev *dev, int slave, int res_id,
285 enum mlx4_resource type,
286 void *res)
287{
288 struct res_common *r;
289 int err = 0;
290
291 spin_lock_irq(mlx4_tlock(dev));
292 r = find_res(dev, res_id, type);
293 if (!r) {
294 err = -ENONET;
295 goto exit;
296 }
297
298 if (r->state == RES_ANY_BUSY) {
299 err = -EBUSY;
300 goto exit;
301 }
302
303 if (r->owner != slave) {
304 err = -EPERM;
305 goto exit;
306 }
307
308 r->from_state = r->state;
309 r->state = RES_ANY_BUSY;
310 mlx4_dbg(dev, "res %s id 0x%x to busy\n",
311 ResourceType(type), r->res_id);
312
313 if (res)
314 *((struct res_common **)res) = r;
315
316exit:
317 spin_unlock_irq(mlx4_tlock(dev));
318 return err;
319}
320
321int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
322 enum mlx4_resource type,
323 int res_id, int *slave)
324{
325
326 struct res_common *r;
327 int err = -ENOENT;
328 int id = res_id;
329
330 if (type == RES_QP)
331 id &= 0x7fffff;
996b0541 332 spin_lock(mlx4_tlock(dev));
c82e9aa0
EC
333
334 r = find_res(dev, id, type);
335 if (r) {
336 *slave = r->owner;
337 err = 0;
338 }
996b0541 339 spin_unlock(mlx4_tlock(dev));
c82e9aa0
EC
340
341 return err;
342}
343
344static void put_res(struct mlx4_dev *dev, int slave, int res_id,
345 enum mlx4_resource type)
346{
347 struct res_common *r;
348
349 spin_lock_irq(mlx4_tlock(dev));
350 r = find_res(dev, res_id, type);
351 if (r)
352 r->state = r->from_state;
353 spin_unlock_irq(mlx4_tlock(dev));
354}
355
356static struct res_common *alloc_qp_tr(int id)
357{
358 struct res_qp *ret;
359
360 ret = kzalloc(sizeof *ret, GFP_KERNEL);
361 if (!ret)
362 return NULL;
363
364 ret->com.res_id = id;
365 ret->com.state = RES_QP_RESERVED;
2531188b 366 ret->local_qpn = id;
c82e9aa0
EC
367 INIT_LIST_HEAD(&ret->mcg_list);
368 spin_lock_init(&ret->mcg_spl);
369
370 return &ret->com;
371}
372
373static struct res_common *alloc_mtt_tr(int id, int order)
374{
375 struct res_mtt *ret;
376
377 ret = kzalloc(sizeof *ret, GFP_KERNEL);
378 if (!ret)
379 return NULL;
380
381 ret->com.res_id = id;
382 ret->order = order;
383 ret->com.state = RES_MTT_ALLOCATED;
384 atomic_set(&ret->ref_count, 0);
385
386 return &ret->com;
387}
388
389static struct res_common *alloc_mpt_tr(int id, int key)
390{
391 struct res_mpt *ret;
392
393 ret = kzalloc(sizeof *ret, GFP_KERNEL);
394 if (!ret)
395 return NULL;
396
397 ret->com.res_id = id;
398 ret->com.state = RES_MPT_RESERVED;
399 ret->key = key;
400
401 return &ret->com;
402}
403
404static struct res_common *alloc_eq_tr(int id)
405{
406 struct res_eq *ret;
407
408 ret = kzalloc(sizeof *ret, GFP_KERNEL);
409 if (!ret)
410 return NULL;
411
412 ret->com.res_id = id;
413 ret->com.state = RES_EQ_RESERVED;
414
415 return &ret->com;
416}
417
418static struct res_common *alloc_cq_tr(int id)
419{
420 struct res_cq *ret;
421
422 ret = kzalloc(sizeof *ret, GFP_KERNEL);
423 if (!ret)
424 return NULL;
425
426 ret->com.res_id = id;
427 ret->com.state = RES_CQ_ALLOCATED;
428 atomic_set(&ret->ref_count, 0);
429
430 return &ret->com;
431}
432
433static struct res_common *alloc_srq_tr(int id)
434{
435 struct res_srq *ret;
436
437 ret = kzalloc(sizeof *ret, GFP_KERNEL);
438 if (!ret)
439 return NULL;
440
441 ret->com.res_id = id;
442 ret->com.state = RES_SRQ_ALLOCATED;
443 atomic_set(&ret->ref_count, 0);
444
445 return &ret->com;
446}
447
448static struct res_common *alloc_counter_tr(int id)
449{
450 struct res_counter *ret;
451
452 ret = kzalloc(sizeof *ret, GFP_KERNEL);
453 if (!ret)
454 return NULL;
455
456 ret->com.res_id = id;
457 ret->com.state = RES_COUNTER_ALLOCATED;
458
459 return &ret->com;
460}
461
ba062d52
JM
462static struct res_common *alloc_xrcdn_tr(int id)
463{
464 struct res_xrcdn *ret;
465
466 ret = kzalloc(sizeof *ret, GFP_KERNEL);
467 if (!ret)
468 return NULL;
469
470 ret->com.res_id = id;
471 ret->com.state = RES_XRCD_ALLOCATED;
472
473 return &ret->com;
474}
475
c82e9aa0
EC
476static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
477 int extra)
478{
479 struct res_common *ret;
480
481 switch (type) {
482 case RES_QP:
483 ret = alloc_qp_tr(id);
484 break;
485 case RES_MPT:
486 ret = alloc_mpt_tr(id, extra);
487 break;
488 case RES_MTT:
489 ret = alloc_mtt_tr(id, extra);
490 break;
491 case RES_EQ:
492 ret = alloc_eq_tr(id);
493 break;
494 case RES_CQ:
495 ret = alloc_cq_tr(id);
496 break;
497 case RES_SRQ:
498 ret = alloc_srq_tr(id);
499 break;
500 case RES_MAC:
501 printk(KERN_ERR "implementation missing\n");
502 return NULL;
503 case RES_COUNTER:
504 ret = alloc_counter_tr(id);
505 break;
ba062d52
JM
506 case RES_XRCD:
507 ret = alloc_xrcdn_tr(id);
508 break;
c82e9aa0
EC
509 default:
510 return NULL;
511 }
512 if (ret)
513 ret->owner = slave;
514
515 return ret;
516}
517
518static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
519 enum mlx4_resource type, int extra)
520{
521 int i;
522 int err;
523 struct mlx4_priv *priv = mlx4_priv(dev);
524 struct res_common **res_arr;
525 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
526 struct radix_tree_root *root = &tracker->res_tree[type];
527
528 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
529 if (!res_arr)
530 return -ENOMEM;
531
532 for (i = 0; i < count; ++i) {
533 res_arr[i] = alloc_tr(base + i, type, slave, extra);
534 if (!res_arr[i]) {
535 for (--i; i >= 0; --i)
536 kfree(res_arr[i]);
537
538 kfree(res_arr);
539 return -ENOMEM;
540 }
541 }
542
543 spin_lock_irq(mlx4_tlock(dev));
544 for (i = 0; i < count; ++i) {
545 if (find_res(dev, base + i, type)) {
546 err = -EEXIST;
547 goto undo;
548 }
549 err = radix_tree_insert(root, base + i, res_arr[i]);
550 if (err)
551 goto undo;
552 list_add_tail(&res_arr[i]->list,
553 &tracker->slave_list[slave].res_list[type]);
554 }
555 spin_unlock_irq(mlx4_tlock(dev));
556 kfree(res_arr);
557
558 return 0;
559
560undo:
561 for (--i; i >= base; --i)
562 radix_tree_delete(&tracker->res_tree[type], i);
563
564 spin_unlock_irq(mlx4_tlock(dev));
565
566 for (i = 0; i < count; ++i)
567 kfree(res_arr[i]);
568
569 kfree(res_arr);
570
571 return err;
572}
573
574static int remove_qp_ok(struct res_qp *res)
575{
576 if (res->com.state == RES_QP_BUSY)
577 return -EBUSY;
578 else if (res->com.state != RES_QP_RESERVED)
579 return -EPERM;
580
581 return 0;
582}
583
584static int remove_mtt_ok(struct res_mtt *res, int order)
585{
586 if (res->com.state == RES_MTT_BUSY ||
587 atomic_read(&res->ref_count)) {
588 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
589 __func__, __LINE__,
590 mtt_states_str(res->com.state),
591 atomic_read(&res->ref_count));
592 return -EBUSY;
593 } else if (res->com.state != RES_MTT_ALLOCATED)
594 return -EPERM;
595 else if (res->order != order)
596 return -EINVAL;
597
598 return 0;
599}
600
601static int remove_mpt_ok(struct res_mpt *res)
602{
603 if (res->com.state == RES_MPT_BUSY)
604 return -EBUSY;
605 else if (res->com.state != RES_MPT_RESERVED)
606 return -EPERM;
607
608 return 0;
609}
610
611static int remove_eq_ok(struct res_eq *res)
612{
613 if (res->com.state == RES_MPT_BUSY)
614 return -EBUSY;
615 else if (res->com.state != RES_MPT_RESERVED)
616 return -EPERM;
617
618 return 0;
619}
620
621static int remove_counter_ok(struct res_counter *res)
622{
623 if (res->com.state == RES_COUNTER_BUSY)
624 return -EBUSY;
625 else if (res->com.state != RES_COUNTER_ALLOCATED)
626 return -EPERM;
627
628 return 0;
629}
630
ba062d52
JM
631static int remove_xrcdn_ok(struct res_xrcdn *res)
632{
633 if (res->com.state == RES_XRCD_BUSY)
634 return -EBUSY;
635 else if (res->com.state != RES_XRCD_ALLOCATED)
636 return -EPERM;
637
638 return 0;
639}
640
c82e9aa0
EC
641static int remove_cq_ok(struct res_cq *res)
642{
643 if (res->com.state == RES_CQ_BUSY)
644 return -EBUSY;
645 else if (res->com.state != RES_CQ_ALLOCATED)
646 return -EPERM;
647
648 return 0;
649}
650
651static int remove_srq_ok(struct res_srq *res)
652{
653 if (res->com.state == RES_SRQ_BUSY)
654 return -EBUSY;
655 else if (res->com.state != RES_SRQ_ALLOCATED)
656 return -EPERM;
657
658 return 0;
659}
660
661static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
662{
663 switch (type) {
664 case RES_QP:
665 return remove_qp_ok((struct res_qp *)res);
666 case RES_CQ:
667 return remove_cq_ok((struct res_cq *)res);
668 case RES_SRQ:
669 return remove_srq_ok((struct res_srq *)res);
670 case RES_MPT:
671 return remove_mpt_ok((struct res_mpt *)res);
672 case RES_MTT:
673 return remove_mtt_ok((struct res_mtt *)res, extra);
674 case RES_MAC:
675 return -ENOSYS;
676 case RES_EQ:
677 return remove_eq_ok((struct res_eq *)res);
678 case RES_COUNTER:
679 return remove_counter_ok((struct res_counter *)res);
ba062d52
JM
680 case RES_XRCD:
681 return remove_xrcdn_ok((struct res_xrcdn *)res);
c82e9aa0
EC
682 default:
683 return -EINVAL;
684 }
685}
686
687static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
688 enum mlx4_resource type, int extra)
689{
690 int i;
691 int err;
692 struct mlx4_priv *priv = mlx4_priv(dev);
693 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
694 struct res_common *r;
695
696 spin_lock_irq(mlx4_tlock(dev));
697 for (i = base; i < base + count; ++i) {
698 r = radix_tree_lookup(&tracker->res_tree[type], i);
699 if (!r) {
700 err = -ENOENT;
701 goto out;
702 }
703 if (r->owner != slave) {
704 err = -EPERM;
705 goto out;
706 }
707 err = remove_ok(r, type, extra);
708 if (err)
709 goto out;
710 }
711
712 for (i = base; i < base + count; ++i) {
713 r = radix_tree_lookup(&tracker->res_tree[type], i);
714 radix_tree_delete(&tracker->res_tree[type], i);
715 list_del(&r->list);
716 kfree(r);
717 }
718 err = 0;
719
720out:
721 spin_unlock_irq(mlx4_tlock(dev));
722
723 return err;
724}
725
726static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
727 enum res_qp_states state, struct res_qp **qp,
728 int alloc)
729{
730 struct mlx4_priv *priv = mlx4_priv(dev);
731 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
732 struct res_qp *r;
733 int err = 0;
734
735 spin_lock_irq(mlx4_tlock(dev));
736 r = radix_tree_lookup(&tracker->res_tree[RES_QP], qpn);
737 if (!r)
738 err = -ENOENT;
739 else if (r->com.owner != slave)
740 err = -EPERM;
741 else {
742 switch (state) {
743 case RES_QP_BUSY:
744 mlx4_dbg(dev, "%s: failed RES_QP, 0x%x\n",
745 __func__, r->com.res_id);
746 err = -EBUSY;
747 break;
748
749 case RES_QP_RESERVED:
750 if (r->com.state == RES_QP_MAPPED && !alloc)
751 break;
752
753 mlx4_dbg(dev, "failed RES_QP, 0x%x\n", r->com.res_id);
754 err = -EINVAL;
755 break;
756
757 case RES_QP_MAPPED:
758 if ((r->com.state == RES_QP_RESERVED && alloc) ||
759 r->com.state == RES_QP_HW)
760 break;
761 else {
762 mlx4_dbg(dev, "failed RES_QP, 0x%x\n",
763 r->com.res_id);
764 err = -EINVAL;
765 }
766
767 break;
768
769 case RES_QP_HW:
770 if (r->com.state != RES_QP_MAPPED)
771 err = -EINVAL;
772 break;
773 default:
774 err = -EINVAL;
775 }
776
777 if (!err) {
778 r->com.from_state = r->com.state;
779 r->com.to_state = state;
780 r->com.state = RES_QP_BUSY;
781 if (qp)
782 *qp = (struct res_qp *)r;
783 }
784 }
785
786 spin_unlock_irq(mlx4_tlock(dev));
787
788 return err;
789}
790
791static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
792 enum res_mpt_states state, struct res_mpt **mpt)
793{
794 struct mlx4_priv *priv = mlx4_priv(dev);
795 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
796 struct res_mpt *r;
797 int err = 0;
798
799 spin_lock_irq(mlx4_tlock(dev));
800 r = radix_tree_lookup(&tracker->res_tree[RES_MPT], index);
801 if (!r)
802 err = -ENOENT;
803 else if (r->com.owner != slave)
804 err = -EPERM;
805 else {
806 switch (state) {
807 case RES_MPT_BUSY:
808 err = -EINVAL;
809 break;
810
811 case RES_MPT_RESERVED:
812 if (r->com.state != RES_MPT_MAPPED)
813 err = -EINVAL;
814 break;
815
816 case RES_MPT_MAPPED:
817 if (r->com.state != RES_MPT_RESERVED &&
818 r->com.state != RES_MPT_HW)
819 err = -EINVAL;
820 break;
821
822 case RES_MPT_HW:
823 if (r->com.state != RES_MPT_MAPPED)
824 err = -EINVAL;
825 break;
826 default:
827 err = -EINVAL;
828 }
829
830 if (!err) {
831 r->com.from_state = r->com.state;
832 r->com.to_state = state;
833 r->com.state = RES_MPT_BUSY;
834 if (mpt)
835 *mpt = (struct res_mpt *)r;
836 }
837 }
838
839 spin_unlock_irq(mlx4_tlock(dev));
840
841 return err;
842}
843
844static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
845 enum res_eq_states state, struct res_eq **eq)
846{
847 struct mlx4_priv *priv = mlx4_priv(dev);
848 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
849 struct res_eq *r;
850 int err = 0;
851
852 spin_lock_irq(mlx4_tlock(dev));
853 r = radix_tree_lookup(&tracker->res_tree[RES_EQ], index);
854 if (!r)
855 err = -ENOENT;
856 else if (r->com.owner != slave)
857 err = -EPERM;
858 else {
859 switch (state) {
860 case RES_EQ_BUSY:
861 err = -EINVAL;
862 break;
863
864 case RES_EQ_RESERVED:
865 if (r->com.state != RES_EQ_HW)
866 err = -EINVAL;
867 break;
868
869 case RES_EQ_HW:
870 if (r->com.state != RES_EQ_RESERVED)
871 err = -EINVAL;
872 break;
873
874 default:
875 err = -EINVAL;
876 }
877
878 if (!err) {
879 r->com.from_state = r->com.state;
880 r->com.to_state = state;
881 r->com.state = RES_EQ_BUSY;
882 if (eq)
883 *eq = r;
884 }
885 }
886
887 spin_unlock_irq(mlx4_tlock(dev));
888
889 return err;
890}
891
892static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
893 enum res_cq_states state, struct res_cq **cq)
894{
895 struct mlx4_priv *priv = mlx4_priv(dev);
896 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
897 struct res_cq *r;
898 int err;
899
900 spin_lock_irq(mlx4_tlock(dev));
901 r = radix_tree_lookup(&tracker->res_tree[RES_CQ], cqn);
902 if (!r)
903 err = -ENOENT;
904 else if (r->com.owner != slave)
905 err = -EPERM;
906 else {
907 switch (state) {
908 case RES_CQ_BUSY:
909 err = -EBUSY;
910 break;
911
912 case RES_CQ_ALLOCATED:
913 if (r->com.state != RES_CQ_HW)
914 err = -EINVAL;
915 else if (atomic_read(&r->ref_count))
916 err = -EBUSY;
917 else
918 err = 0;
919 break;
920
921 case RES_CQ_HW:
922 if (r->com.state != RES_CQ_ALLOCATED)
923 err = -EINVAL;
924 else
925 err = 0;
926 break;
927
928 default:
929 err = -EINVAL;
930 }
931
932 if (!err) {
933 r->com.from_state = r->com.state;
934 r->com.to_state = state;
935 r->com.state = RES_CQ_BUSY;
936 if (cq)
937 *cq = r;
938 }
939 }
940
941 spin_unlock_irq(mlx4_tlock(dev));
942
943 return err;
944}
945
946static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
947 enum res_cq_states state, struct res_srq **srq)
948{
949 struct mlx4_priv *priv = mlx4_priv(dev);
950 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
951 struct res_srq *r;
952 int err = 0;
953
954 spin_lock_irq(mlx4_tlock(dev));
955 r = radix_tree_lookup(&tracker->res_tree[RES_SRQ], index);
956 if (!r)
957 err = -ENOENT;
958 else if (r->com.owner != slave)
959 err = -EPERM;
960 else {
961 switch (state) {
962 case RES_SRQ_BUSY:
963 err = -EINVAL;
964 break;
965
966 case RES_SRQ_ALLOCATED:
967 if (r->com.state != RES_SRQ_HW)
968 err = -EINVAL;
969 else if (atomic_read(&r->ref_count))
970 err = -EBUSY;
971 break;
972
973 case RES_SRQ_HW:
974 if (r->com.state != RES_SRQ_ALLOCATED)
975 err = -EINVAL;
976 break;
977
978 default:
979 err = -EINVAL;
980 }
981
982 if (!err) {
983 r->com.from_state = r->com.state;
984 r->com.to_state = state;
985 r->com.state = RES_SRQ_BUSY;
986 if (srq)
987 *srq = r;
988 }
989 }
990
991 spin_unlock_irq(mlx4_tlock(dev));
992
993 return err;
994}
995
996static void res_abort_move(struct mlx4_dev *dev, int slave,
997 enum mlx4_resource type, int id)
998{
999 struct mlx4_priv *priv = mlx4_priv(dev);
1000 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1001 struct res_common *r;
1002
1003 spin_lock_irq(mlx4_tlock(dev));
1004 r = radix_tree_lookup(&tracker->res_tree[type], id);
1005 if (r && (r->owner == slave))
1006 r->state = r->from_state;
1007 spin_unlock_irq(mlx4_tlock(dev));
1008}
1009
1010static void res_end_move(struct mlx4_dev *dev, int slave,
1011 enum mlx4_resource type, int id)
1012{
1013 struct mlx4_priv *priv = mlx4_priv(dev);
1014 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1015 struct res_common *r;
1016
1017 spin_lock_irq(mlx4_tlock(dev));
1018 r = radix_tree_lookup(&tracker->res_tree[type], id);
1019 if (r && (r->owner == slave))
1020 r->state = r->to_state;
1021 spin_unlock_irq(mlx4_tlock(dev));
1022}
1023
1024static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1025{
1026 return mlx4_is_qp_reserved(dev, qpn);
1027}
1028
1029static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1030 u64 in_param, u64 *out_param)
1031{
1032 int err;
1033 int count;
1034 int align;
1035 int base;
1036 int qpn;
1037
1038 switch (op) {
1039 case RES_OP_RESERVE:
1040 count = get_param_l(&in_param);
1041 align = get_param_h(&in_param);
1042 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1043 if (err)
1044 return err;
1045
1046 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1047 if (err) {
1048 __mlx4_qp_release_range(dev, base, count);
1049 return err;
1050 }
1051 set_param_l(out_param, base);
1052 break;
1053 case RES_OP_MAP_ICM:
1054 qpn = get_param_l(&in_param) & 0x7fffff;
1055 if (valid_reserved(dev, slave, qpn)) {
1056 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1057 if (err)
1058 return err;
1059 }
1060
1061 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1062 NULL, 1);
1063 if (err)
1064 return err;
1065
1066 if (!valid_reserved(dev, slave, qpn)) {
1067 err = __mlx4_qp_alloc_icm(dev, qpn);
1068 if (err) {
1069 res_abort_move(dev, slave, RES_QP, qpn);
1070 return err;
1071 }
1072 }
1073
1074 res_end_move(dev, slave, RES_QP, qpn);
1075 break;
1076
1077 default:
1078 err = -EINVAL;
1079 break;
1080 }
1081 return err;
1082}
1083
1084static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1085 u64 in_param, u64 *out_param)
1086{
1087 int err = -EINVAL;
1088 int base;
1089 int order;
1090
1091 if (op != RES_OP_RESERVE_AND_MAP)
1092 return err;
1093
1094 order = get_param_l(&in_param);
1095 base = __mlx4_alloc_mtt_range(dev, order);
1096 if (base == -1)
1097 return -ENOMEM;
1098
1099 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1100 if (err)
1101 __mlx4_free_mtt_range(dev, base, order);
1102 else
1103 set_param_l(out_param, base);
1104
1105 return err;
1106}
1107
1108static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1109 u64 in_param, u64 *out_param)
1110{
1111 int err = -EINVAL;
1112 int index;
1113 int id;
1114 struct res_mpt *mpt;
1115
1116 switch (op) {
1117 case RES_OP_RESERVE:
1118 index = __mlx4_mr_reserve(dev);
1119 if (index == -1)
1120 break;
1121 id = index & mpt_mask(dev);
1122
1123 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1124 if (err) {
1125 __mlx4_mr_release(dev, index);
1126 break;
1127 }
1128 set_param_l(out_param, index);
1129 break;
1130 case RES_OP_MAP_ICM:
1131 index = get_param_l(&in_param);
1132 id = index & mpt_mask(dev);
1133 err = mr_res_start_move_to(dev, slave, id,
1134 RES_MPT_MAPPED, &mpt);
1135 if (err)
1136 return err;
1137
1138 err = __mlx4_mr_alloc_icm(dev, mpt->key);
1139 if (err) {
1140 res_abort_move(dev, slave, RES_MPT, id);
1141 return err;
1142 }
1143
1144 res_end_move(dev, slave, RES_MPT, id);
1145 break;
1146 }
1147 return err;
1148}
1149
1150static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1151 u64 in_param, u64 *out_param)
1152{
1153 int cqn;
1154 int err;
1155
1156 switch (op) {
1157 case RES_OP_RESERVE_AND_MAP:
1158 err = __mlx4_cq_alloc_icm(dev, &cqn);
1159 if (err)
1160 break;
1161
1162 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1163 if (err) {
1164 __mlx4_cq_free_icm(dev, cqn);
1165 break;
1166 }
1167
1168 set_param_l(out_param, cqn);
1169 break;
1170
1171 default:
1172 err = -EINVAL;
1173 }
1174
1175 return err;
1176}
1177
1178static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1179 u64 in_param, u64 *out_param)
1180{
1181 int srqn;
1182 int err;
1183
1184 switch (op) {
1185 case RES_OP_RESERVE_AND_MAP:
1186 err = __mlx4_srq_alloc_icm(dev, &srqn);
1187 if (err)
1188 break;
1189
1190 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1191 if (err) {
1192 __mlx4_srq_free_icm(dev, srqn);
1193 break;
1194 }
1195
1196 set_param_l(out_param, srqn);
1197 break;
1198
1199 default:
1200 err = -EINVAL;
1201 }
1202
1203 return err;
1204}
1205
1206static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1207{
1208 struct mlx4_priv *priv = mlx4_priv(dev);
1209 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1210 struct mac_res *res;
1211
1212 res = kzalloc(sizeof *res, GFP_KERNEL);
1213 if (!res)
1214 return -ENOMEM;
1215 res->mac = mac;
1216 res->port = (u8) port;
1217 list_add_tail(&res->list,
1218 &tracker->slave_list[slave].res_list[RES_MAC]);
1219 return 0;
1220}
1221
1222static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1223 int port)
1224{
1225 struct mlx4_priv *priv = mlx4_priv(dev);
1226 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1227 struct list_head *mac_list =
1228 &tracker->slave_list[slave].res_list[RES_MAC];
1229 struct mac_res *res, *tmp;
1230
1231 list_for_each_entry_safe(res, tmp, mac_list, list) {
1232 if (res->mac == mac && res->port == (u8) port) {
1233 list_del(&res->list);
1234 kfree(res);
1235 break;
1236 }
1237 }
1238}
1239
1240static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1241{
1242 struct mlx4_priv *priv = mlx4_priv(dev);
1243 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1244 struct list_head *mac_list =
1245 &tracker->slave_list[slave].res_list[RES_MAC];
1246 struct mac_res *res, *tmp;
1247
1248 list_for_each_entry_safe(res, tmp, mac_list, list) {
1249 list_del(&res->list);
1250 __mlx4_unregister_mac(dev, res->port, res->mac);
1251 kfree(res);
1252 }
1253}
1254
1255static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1256 u64 in_param, u64 *out_param)
1257{
1258 int err = -EINVAL;
1259 int port;
1260 u64 mac;
1261
1262 if (op != RES_OP_RESERVE_AND_MAP)
1263 return err;
1264
1265 port = get_param_l(out_param);
1266 mac = in_param;
1267
1268 err = __mlx4_register_mac(dev, port, mac);
1269 if (err >= 0) {
1270 set_param_l(out_param, err);
1271 err = 0;
1272 }
1273
1274 if (!err) {
1275 err = mac_add_to_slave(dev, slave, mac, port);
1276 if (err)
1277 __mlx4_unregister_mac(dev, port, mac);
1278 }
1279 return err;
1280}
1281
ffe455ad
EE
1282static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1283 u64 in_param, u64 *out_param)
1284{
1285 return 0;
1286}
1287
ba062d52
JM
1288static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1289 u64 in_param, u64 *out_param)
1290{
1291 u32 index;
1292 int err;
1293
1294 if (op != RES_OP_RESERVE)
1295 return -EINVAL;
1296
1297 err = __mlx4_counter_alloc(dev, &index);
1298 if (err)
1299 return err;
1300
1301 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1302 if (err)
1303 __mlx4_counter_free(dev, index);
1304 else
1305 set_param_l(out_param, index);
1306
1307 return err;
1308}
1309
1310static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1311 u64 in_param, u64 *out_param)
1312{
1313 u32 xrcdn;
1314 int err;
1315
1316 if (op != RES_OP_RESERVE)
1317 return -EINVAL;
1318
1319 err = __mlx4_xrcd_alloc(dev, &xrcdn);
1320 if (err)
1321 return err;
1322
1323 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1324 if (err)
1325 __mlx4_xrcd_free(dev, xrcdn);
1326 else
1327 set_param_l(out_param, xrcdn);
1328
1329 return err;
1330}
1331
c82e9aa0
EC
1332int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1333 struct mlx4_vhcr *vhcr,
1334 struct mlx4_cmd_mailbox *inbox,
1335 struct mlx4_cmd_mailbox *outbox,
1336 struct mlx4_cmd_info *cmd)
1337{
1338 int err;
1339 int alop = vhcr->op_modifier;
1340
1341 switch (vhcr->in_modifier) {
1342 case RES_QP:
1343 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1344 vhcr->in_param, &vhcr->out_param);
1345 break;
1346
1347 case RES_MTT:
1348 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1349 vhcr->in_param, &vhcr->out_param);
1350 break;
1351
1352 case RES_MPT:
1353 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1354 vhcr->in_param, &vhcr->out_param);
1355 break;
1356
1357 case RES_CQ:
1358 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1359 vhcr->in_param, &vhcr->out_param);
1360 break;
1361
1362 case RES_SRQ:
1363 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1364 vhcr->in_param, &vhcr->out_param);
1365 break;
1366
1367 case RES_MAC:
1368 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1369 vhcr->in_param, &vhcr->out_param);
1370 break;
1371
ffe455ad
EE
1372 case RES_VLAN:
1373 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1374 vhcr->in_param, &vhcr->out_param);
1375 break;
1376
ba062d52
JM
1377 case RES_COUNTER:
1378 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1379 vhcr->in_param, &vhcr->out_param);
1380 break;
1381
1382 case RES_XRCD:
1383 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
1384 vhcr->in_param, &vhcr->out_param);
1385 break;
1386
c82e9aa0
EC
1387 default:
1388 err = -EINVAL;
1389 break;
1390 }
1391
1392 return err;
1393}
1394
1395static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1396 u64 in_param)
1397{
1398 int err;
1399 int count;
1400 int base;
1401 int qpn;
1402
1403 switch (op) {
1404 case RES_OP_RESERVE:
1405 base = get_param_l(&in_param) & 0x7fffff;
1406 count = get_param_h(&in_param);
1407 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1408 if (err)
1409 break;
1410 __mlx4_qp_release_range(dev, base, count);
1411 break;
1412 case RES_OP_MAP_ICM:
1413 qpn = get_param_l(&in_param) & 0x7fffff;
1414 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1415 NULL, 0);
1416 if (err)
1417 return err;
1418
1419 if (!valid_reserved(dev, slave, qpn))
1420 __mlx4_qp_free_icm(dev, qpn);
1421
1422 res_end_move(dev, slave, RES_QP, qpn);
1423
1424 if (valid_reserved(dev, slave, qpn))
1425 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1426 break;
1427 default:
1428 err = -EINVAL;
1429 break;
1430 }
1431 return err;
1432}
1433
1434static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1435 u64 in_param, u64 *out_param)
1436{
1437 int err = -EINVAL;
1438 int base;
1439 int order;
1440
1441 if (op != RES_OP_RESERVE_AND_MAP)
1442 return err;
1443
1444 base = get_param_l(&in_param);
1445 order = get_param_h(&in_param);
1446 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1447 if (!err)
1448 __mlx4_free_mtt_range(dev, base, order);
1449 return err;
1450}
1451
1452static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1453 u64 in_param)
1454{
1455 int err = -EINVAL;
1456 int index;
1457 int id;
1458 struct res_mpt *mpt;
1459
1460 switch (op) {
1461 case RES_OP_RESERVE:
1462 index = get_param_l(&in_param);
1463 id = index & mpt_mask(dev);
1464 err = get_res(dev, slave, id, RES_MPT, &mpt);
1465 if (err)
1466 break;
1467 index = mpt->key;
1468 put_res(dev, slave, id, RES_MPT);
1469
1470 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1471 if (err)
1472 break;
1473 __mlx4_mr_release(dev, index);
1474 break;
1475 case RES_OP_MAP_ICM:
1476 index = get_param_l(&in_param);
1477 id = index & mpt_mask(dev);
1478 err = mr_res_start_move_to(dev, slave, id,
1479 RES_MPT_RESERVED, &mpt);
1480 if (err)
1481 return err;
1482
1483 __mlx4_mr_free_icm(dev, mpt->key);
1484 res_end_move(dev, slave, RES_MPT, id);
1485 return err;
1486 break;
1487 default:
1488 err = -EINVAL;
1489 break;
1490 }
1491 return err;
1492}
1493
1494static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1495 u64 in_param, u64 *out_param)
1496{
1497 int cqn;
1498 int err;
1499
1500 switch (op) {
1501 case RES_OP_RESERVE_AND_MAP:
1502 cqn = get_param_l(&in_param);
1503 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1504 if (err)
1505 break;
1506
1507 __mlx4_cq_free_icm(dev, cqn);
1508 break;
1509
1510 default:
1511 err = -EINVAL;
1512 break;
1513 }
1514
1515 return err;
1516}
1517
1518static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1519 u64 in_param, u64 *out_param)
1520{
1521 int srqn;
1522 int err;
1523
1524 switch (op) {
1525 case RES_OP_RESERVE_AND_MAP:
1526 srqn = get_param_l(&in_param);
1527 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1528 if (err)
1529 break;
1530
1531 __mlx4_srq_free_icm(dev, srqn);
1532 break;
1533
1534 default:
1535 err = -EINVAL;
1536 break;
1537 }
1538
1539 return err;
1540}
1541
1542static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1543 u64 in_param, u64 *out_param)
1544{
1545 int port;
1546 int err = 0;
1547
1548 switch (op) {
1549 case RES_OP_RESERVE_AND_MAP:
1550 port = get_param_l(out_param);
1551 mac_del_from_slave(dev, slave, in_param, port);
1552 __mlx4_unregister_mac(dev, port, in_param);
1553 break;
1554 default:
1555 err = -EINVAL;
1556 break;
1557 }
1558
1559 return err;
1560
1561}
1562
ffe455ad
EE
1563static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1564 u64 in_param, u64 *out_param)
1565{
1566 return 0;
1567}
1568
ba062d52
JM
1569static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1570 u64 in_param, u64 *out_param)
1571{
1572 int index;
1573 int err;
1574
1575 if (op != RES_OP_RESERVE)
1576 return -EINVAL;
1577
1578 index = get_param_l(&in_param);
1579 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1580 if (err)
1581 return err;
1582
1583 __mlx4_counter_free(dev, index);
1584
1585 return err;
1586}
1587
1588static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1589 u64 in_param, u64 *out_param)
1590{
1591 int xrcdn;
1592 int err;
1593
1594 if (op != RES_OP_RESERVE)
1595 return -EINVAL;
1596
1597 xrcdn = get_param_l(&in_param);
1598 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1599 if (err)
1600 return err;
1601
1602 __mlx4_xrcd_free(dev, xrcdn);
1603
1604 return err;
1605}
1606
c82e9aa0
EC
1607int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1608 struct mlx4_vhcr *vhcr,
1609 struct mlx4_cmd_mailbox *inbox,
1610 struct mlx4_cmd_mailbox *outbox,
1611 struct mlx4_cmd_info *cmd)
1612{
1613 int err = -EINVAL;
1614 int alop = vhcr->op_modifier;
1615
1616 switch (vhcr->in_modifier) {
1617 case RES_QP:
1618 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1619 vhcr->in_param);
1620 break;
1621
1622 case RES_MTT:
1623 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1624 vhcr->in_param, &vhcr->out_param);
1625 break;
1626
1627 case RES_MPT:
1628 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1629 vhcr->in_param);
1630 break;
1631
1632 case RES_CQ:
1633 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1634 vhcr->in_param, &vhcr->out_param);
1635 break;
1636
1637 case RES_SRQ:
1638 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1639 vhcr->in_param, &vhcr->out_param);
1640 break;
1641
1642 case RES_MAC:
1643 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1644 vhcr->in_param, &vhcr->out_param);
1645 break;
1646
ffe455ad
EE
1647 case RES_VLAN:
1648 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1649 vhcr->in_param, &vhcr->out_param);
1650 break;
1651
ba062d52
JM
1652 case RES_COUNTER:
1653 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
1654 vhcr->in_param, &vhcr->out_param);
1655 break;
1656
1657 case RES_XRCD:
1658 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
1659 vhcr->in_param, &vhcr->out_param);
1660
c82e9aa0
EC
1661 default:
1662 break;
1663 }
1664 return err;
1665}
1666
1667/* ugly but other choices are uglier */
1668static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1669{
1670 return (be32_to_cpu(mpt->flags) >> 9) & 1;
1671}
1672
2b8fb286 1673static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
c82e9aa0 1674{
2b8fb286 1675 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
c82e9aa0
EC
1676}
1677
1678static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1679{
1680 return be32_to_cpu(mpt->mtt_sz);
1681}
1682
2b8fb286 1683static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
c82e9aa0
EC
1684{
1685 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1686}
1687
2b8fb286 1688static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
c82e9aa0
EC
1689{
1690 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1691}
1692
1693static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1694{
1695 int page_shift = (qpc->log_page_size & 0x3f) + 12;
1696 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1697 int log_sq_sride = qpc->sq_size_stride & 7;
1698 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1699 int log_rq_stride = qpc->rq_size_stride & 7;
1700 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1701 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1702 int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
1703 int sq_size;
1704 int rq_size;
1705 int total_pages;
1706 int total_mem;
1707 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1708
1709 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1710 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1711 total_mem = sq_size + rq_size;
1712 total_pages =
1713 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1714 page_shift);
1715
1716 return total_pages;
1717}
1718
c82e9aa0
EC
1719static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1720 int size, struct res_mtt *mtt)
1721{
2b8fb286
MA
1722 int res_start = mtt->com.res_id;
1723 int res_size = (1 << mtt->order);
c82e9aa0
EC
1724
1725 if (start < res_start || start + size > res_start + res_size)
1726 return -EPERM;
1727 return 0;
1728}
1729
1730int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1731 struct mlx4_vhcr *vhcr,
1732 struct mlx4_cmd_mailbox *inbox,
1733 struct mlx4_cmd_mailbox *outbox,
1734 struct mlx4_cmd_info *cmd)
1735{
1736 int err;
1737 int index = vhcr->in_modifier;
1738 struct res_mtt *mtt;
1739 struct res_mpt *mpt;
2b8fb286 1740 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
1741 int phys;
1742 int id;
1743
1744 id = index & mpt_mask(dev);
1745 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1746 if (err)
1747 return err;
1748
1749 phys = mr_phys_mpt(inbox->buf);
1750 if (!phys) {
2b8fb286 1751 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
1752 if (err)
1753 goto ex_abort;
1754
1755 err = check_mtt_range(dev, slave, mtt_base,
1756 mr_get_mtt_size(inbox->buf), mtt);
1757 if (err)
1758 goto ex_put;
1759
1760 mpt->mtt = mtt;
1761 }
1762
c82e9aa0
EC
1763 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1764 if (err)
1765 goto ex_put;
1766
1767 if (!phys) {
1768 atomic_inc(&mtt->ref_count);
1769 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1770 }
1771
1772 res_end_move(dev, slave, RES_MPT, id);
1773 return 0;
1774
1775ex_put:
1776 if (!phys)
1777 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1778ex_abort:
1779 res_abort_move(dev, slave, RES_MPT, id);
1780
1781 return err;
1782}
1783
1784int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1785 struct mlx4_vhcr *vhcr,
1786 struct mlx4_cmd_mailbox *inbox,
1787 struct mlx4_cmd_mailbox *outbox,
1788 struct mlx4_cmd_info *cmd)
1789{
1790 int err;
1791 int index = vhcr->in_modifier;
1792 struct res_mpt *mpt;
1793 int id;
1794
1795 id = index & mpt_mask(dev);
1796 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
1797 if (err)
1798 return err;
1799
1800 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1801 if (err)
1802 goto ex_abort;
1803
1804 if (mpt->mtt)
1805 atomic_dec(&mpt->mtt->ref_count);
1806
1807 res_end_move(dev, slave, RES_MPT, id);
1808 return 0;
1809
1810ex_abort:
1811 res_abort_move(dev, slave, RES_MPT, id);
1812
1813 return err;
1814}
1815
1816int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
1817 struct mlx4_vhcr *vhcr,
1818 struct mlx4_cmd_mailbox *inbox,
1819 struct mlx4_cmd_mailbox *outbox,
1820 struct mlx4_cmd_info *cmd)
1821{
1822 int err;
1823 int index = vhcr->in_modifier;
1824 struct res_mpt *mpt;
1825 int id;
1826
1827 id = index & mpt_mask(dev);
1828 err = get_res(dev, slave, id, RES_MPT, &mpt);
1829 if (err)
1830 return err;
1831
1832 if (mpt->com.from_state != RES_MPT_HW) {
1833 err = -EBUSY;
1834 goto out;
1835 }
1836
1837 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1838
1839out:
1840 put_res(dev, slave, id, RES_MPT);
1841 return err;
1842}
1843
1844static int qp_get_rcqn(struct mlx4_qp_context *qpc)
1845{
1846 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
1847}
1848
1849static int qp_get_scqn(struct mlx4_qp_context *qpc)
1850{
1851 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
1852}
1853
1854static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
1855{
1856 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
1857}
1858
1859int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1860 struct mlx4_vhcr *vhcr,
1861 struct mlx4_cmd_mailbox *inbox,
1862 struct mlx4_cmd_mailbox *outbox,
1863 struct mlx4_cmd_info *cmd)
1864{
1865 int err;
1866 int qpn = vhcr->in_modifier & 0x7fffff;
1867 struct res_mtt *mtt;
1868 struct res_qp *qp;
1869 struct mlx4_qp_context *qpc = inbox->buf + 8;
2b8fb286 1870 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
1871 int mtt_size = qp_get_mtt_size(qpc);
1872 struct res_cq *rcq;
1873 struct res_cq *scq;
1874 int rcqn = qp_get_rcqn(qpc);
1875 int scqn = qp_get_scqn(qpc);
1876 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
1877 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
1878 struct res_srq *srq;
1879 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
1880
1881 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
1882 if (err)
1883 return err;
1884 qp->local_qpn = local_qpn;
1885
2b8fb286 1886 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
1887 if (err)
1888 goto ex_abort;
1889
1890 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
1891 if (err)
1892 goto ex_put_mtt;
1893
c82e9aa0
EC
1894 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
1895 if (err)
1896 goto ex_put_mtt;
1897
1898 if (scqn != rcqn) {
1899 err = get_res(dev, slave, scqn, RES_CQ, &scq);
1900 if (err)
1901 goto ex_put_rcq;
1902 } else
1903 scq = rcq;
1904
1905 if (use_srq) {
1906 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
1907 if (err)
1908 goto ex_put_scq;
1909 }
1910
1911 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1912 if (err)
1913 goto ex_put_srq;
1914 atomic_inc(&mtt->ref_count);
1915 qp->mtt = mtt;
1916 atomic_inc(&rcq->ref_count);
1917 qp->rcq = rcq;
1918 atomic_inc(&scq->ref_count);
1919 qp->scq = scq;
1920
1921 if (scqn != rcqn)
1922 put_res(dev, slave, scqn, RES_CQ);
1923
1924 if (use_srq) {
1925 atomic_inc(&srq->ref_count);
1926 put_res(dev, slave, srqn, RES_SRQ);
1927 qp->srq = srq;
1928 }
1929 put_res(dev, slave, rcqn, RES_CQ);
2b8fb286 1930 put_res(dev, slave, mtt_base, RES_MTT);
c82e9aa0
EC
1931 res_end_move(dev, slave, RES_QP, qpn);
1932
1933 return 0;
1934
1935ex_put_srq:
1936 if (use_srq)
1937 put_res(dev, slave, srqn, RES_SRQ);
1938ex_put_scq:
1939 if (scqn != rcqn)
1940 put_res(dev, slave, scqn, RES_CQ);
1941ex_put_rcq:
1942 put_res(dev, slave, rcqn, RES_CQ);
1943ex_put_mtt:
2b8fb286 1944 put_res(dev, slave, mtt_base, RES_MTT);
c82e9aa0
EC
1945ex_abort:
1946 res_abort_move(dev, slave, RES_QP, qpn);
1947
1948 return err;
1949}
1950
2b8fb286 1951static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
c82e9aa0
EC
1952{
1953 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
1954}
1955
1956static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
1957{
1958 int log_eq_size = eqc->log_eq_size & 0x1f;
1959 int page_shift = (eqc->log_page_size & 0x3f) + 12;
1960
1961 if (log_eq_size + 5 < page_shift)
1962 return 1;
1963
1964 return 1 << (log_eq_size + 5 - page_shift);
1965}
1966
2b8fb286 1967static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
c82e9aa0
EC
1968{
1969 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
1970}
1971
1972static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
1973{
1974 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
1975 int page_shift = (cqc->log_page_size & 0x3f) + 12;
1976
1977 if (log_cq_size + 5 < page_shift)
1978 return 1;
1979
1980 return 1 << (log_cq_size + 5 - page_shift);
1981}
1982
1983int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
1984 struct mlx4_vhcr *vhcr,
1985 struct mlx4_cmd_mailbox *inbox,
1986 struct mlx4_cmd_mailbox *outbox,
1987 struct mlx4_cmd_info *cmd)
1988{
1989 int err;
1990 int eqn = vhcr->in_modifier;
1991 int res_id = (slave << 8) | eqn;
1992 struct mlx4_eq_context *eqc = inbox->buf;
2b8fb286 1993 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
1994 int mtt_size = eq_get_mtt_size(eqc);
1995 struct res_eq *eq;
1996 struct res_mtt *mtt;
1997
1998 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
1999 if (err)
2000 return err;
2001 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2002 if (err)
2003 goto out_add;
2004
2b8fb286 2005 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2006 if (err)
2007 goto out_move;
2008
2009 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2010 if (err)
2011 goto out_put;
2012
2013 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2014 if (err)
2015 goto out_put;
2016
2017 atomic_inc(&mtt->ref_count);
2018 eq->mtt = mtt;
2019 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2020 res_end_move(dev, slave, RES_EQ, res_id);
2021 return 0;
2022
2023out_put:
2024 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2025out_move:
2026 res_abort_move(dev, slave, RES_EQ, res_id);
2027out_add:
2028 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2029 return err;
2030}
2031
2032static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2033 int len, struct res_mtt **res)
2034{
2035 struct mlx4_priv *priv = mlx4_priv(dev);
2036 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2037 struct res_mtt *mtt;
2038 int err = -EINVAL;
2039
2040 spin_lock_irq(mlx4_tlock(dev));
2041 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2042 com.list) {
2043 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2044 *res = mtt;
2045 mtt->com.from_state = mtt->com.state;
2046 mtt->com.state = RES_MTT_BUSY;
2047 err = 0;
2048 break;
2049 }
2050 }
2051 spin_unlock_irq(mlx4_tlock(dev));
2052
2053 return err;
2054}
2055
2056int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2057 struct mlx4_vhcr *vhcr,
2058 struct mlx4_cmd_mailbox *inbox,
2059 struct mlx4_cmd_mailbox *outbox,
2060 struct mlx4_cmd_info *cmd)
2061{
2062 struct mlx4_mtt mtt;
2063 __be64 *page_list = inbox->buf;
2064 u64 *pg_list = (u64 *)page_list;
2065 int i;
2066 struct res_mtt *rmtt = NULL;
2067 int start = be64_to_cpu(page_list[0]);
2068 int npages = vhcr->in_modifier;
2069 int err;
2070
2071 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2072 if (err)
2073 return err;
2074
2075 /* Call the SW implementation of write_mtt:
2076 * - Prepare a dummy mtt struct
2077 * - Translate inbox contents to simple addresses in host endianess */
2b8fb286
MA
2078 mtt.offset = 0; /* TBD this is broken but I don't handle it since
2079 we don't really use it */
c82e9aa0
EC
2080 mtt.order = 0;
2081 mtt.page_shift = 0;
2082 for (i = 0; i < npages; ++i)
2083 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2084
2085 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2086 ((u64 *)page_list + 2));
2087
2088 if (rmtt)
2089 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2090
2091 return err;
2092}
2093
2094int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2095 struct mlx4_vhcr *vhcr,
2096 struct mlx4_cmd_mailbox *inbox,
2097 struct mlx4_cmd_mailbox *outbox,
2098 struct mlx4_cmd_info *cmd)
2099{
2100 int eqn = vhcr->in_modifier;
2101 int res_id = eqn | (slave << 8);
2102 struct res_eq *eq;
2103 int err;
2104
2105 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2106 if (err)
2107 return err;
2108
2109 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2110 if (err)
2111 goto ex_abort;
2112
2113 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2114 if (err)
2115 goto ex_put;
2116
2117 atomic_dec(&eq->mtt->ref_count);
2118 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2119 res_end_move(dev, slave, RES_EQ, res_id);
2120 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2121
2122 return 0;
2123
2124ex_put:
2125 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2126ex_abort:
2127 res_abort_move(dev, slave, RES_EQ, res_id);
2128
2129 return err;
2130}
2131
2132int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2133{
2134 struct mlx4_priv *priv = mlx4_priv(dev);
2135 struct mlx4_slave_event_eq_info *event_eq;
2136 struct mlx4_cmd_mailbox *mailbox;
2137 u32 in_modifier = 0;
2138 int err;
2139 int res_id;
2140 struct res_eq *req;
2141
2142 if (!priv->mfunc.master.slave_state)
2143 return -EINVAL;
2144
803143fb 2145 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
c82e9aa0
EC
2146
2147 /* Create the event only if the slave is registered */
803143fb 2148 if (event_eq->eqn < 0)
c82e9aa0
EC
2149 return 0;
2150
2151 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2152 res_id = (slave << 8) | event_eq->eqn;
2153 err = get_res(dev, slave, res_id, RES_EQ, &req);
2154 if (err)
2155 goto unlock;
2156
2157 if (req->com.from_state != RES_EQ_HW) {
2158 err = -EINVAL;
2159 goto put;
2160 }
2161
2162 mailbox = mlx4_alloc_cmd_mailbox(dev);
2163 if (IS_ERR(mailbox)) {
2164 err = PTR_ERR(mailbox);
2165 goto put;
2166 }
2167
2168 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2169 ++event_eq->token;
2170 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2171 }
2172
2173 memcpy(mailbox->buf, (u8 *) eqe, 28);
2174
2175 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2176
2177 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2178 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2179 MLX4_CMD_NATIVE);
2180
2181 put_res(dev, slave, res_id, RES_EQ);
2182 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2183 mlx4_free_cmd_mailbox(dev, mailbox);
2184 return err;
2185
2186put:
2187 put_res(dev, slave, res_id, RES_EQ);
2188
2189unlock:
2190 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2191 return err;
2192}
2193
2194int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2195 struct mlx4_vhcr *vhcr,
2196 struct mlx4_cmd_mailbox *inbox,
2197 struct mlx4_cmd_mailbox *outbox,
2198 struct mlx4_cmd_info *cmd)
2199{
2200 int eqn = vhcr->in_modifier;
2201 int res_id = eqn | (slave << 8);
2202 struct res_eq *eq;
2203 int err;
2204
2205 err = get_res(dev, slave, res_id, RES_EQ, &eq);
2206 if (err)
2207 return err;
2208
2209 if (eq->com.from_state != RES_EQ_HW) {
2210 err = -EINVAL;
2211 goto ex_put;
2212 }
2213
2214 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2215
2216ex_put:
2217 put_res(dev, slave, res_id, RES_EQ);
2218 return err;
2219}
2220
2221int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2222 struct mlx4_vhcr *vhcr,
2223 struct mlx4_cmd_mailbox *inbox,
2224 struct mlx4_cmd_mailbox *outbox,
2225 struct mlx4_cmd_info *cmd)
2226{
2227 int err;
2228 int cqn = vhcr->in_modifier;
2229 struct mlx4_cq_context *cqc = inbox->buf;
2b8fb286 2230 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2231 struct res_cq *cq;
2232 struct res_mtt *mtt;
2233
2234 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2235 if (err)
2236 return err;
2b8fb286 2237 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2238 if (err)
2239 goto out_move;
2240 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2241 if (err)
2242 goto out_put;
2243 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2244 if (err)
2245 goto out_put;
2246 atomic_inc(&mtt->ref_count);
2247 cq->mtt = mtt;
2248 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2249 res_end_move(dev, slave, RES_CQ, cqn);
2250 return 0;
2251
2252out_put:
2253 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2254out_move:
2255 res_abort_move(dev, slave, RES_CQ, cqn);
2256 return err;
2257}
2258
2259int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2260 struct mlx4_vhcr *vhcr,
2261 struct mlx4_cmd_mailbox *inbox,
2262 struct mlx4_cmd_mailbox *outbox,
2263 struct mlx4_cmd_info *cmd)
2264{
2265 int err;
2266 int cqn = vhcr->in_modifier;
2267 struct res_cq *cq;
2268
2269 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2270 if (err)
2271 return err;
2272 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2273 if (err)
2274 goto out_move;
2275 atomic_dec(&cq->mtt->ref_count);
2276 res_end_move(dev, slave, RES_CQ, cqn);
2277 return 0;
2278
2279out_move:
2280 res_abort_move(dev, slave, RES_CQ, cqn);
2281 return err;
2282}
2283
2284int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2285 struct mlx4_vhcr *vhcr,
2286 struct mlx4_cmd_mailbox *inbox,
2287 struct mlx4_cmd_mailbox *outbox,
2288 struct mlx4_cmd_info *cmd)
2289{
2290 int cqn = vhcr->in_modifier;
2291 struct res_cq *cq;
2292 int err;
2293
2294 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2295 if (err)
2296 return err;
2297
2298 if (cq->com.from_state != RES_CQ_HW)
2299 goto ex_put;
2300
2301 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2302ex_put:
2303 put_res(dev, slave, cqn, RES_CQ);
2304
2305 return err;
2306}
2307
2308static int handle_resize(struct mlx4_dev *dev, int slave,
2309 struct mlx4_vhcr *vhcr,
2310 struct mlx4_cmd_mailbox *inbox,
2311 struct mlx4_cmd_mailbox *outbox,
2312 struct mlx4_cmd_info *cmd,
2313 struct res_cq *cq)
2314{
2315 int err;
2316 struct res_mtt *orig_mtt;
2317 struct res_mtt *mtt;
2318 struct mlx4_cq_context *cqc = inbox->buf;
2b8fb286 2319 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2320
2321 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2322 if (err)
2323 return err;
2324
2325 if (orig_mtt != cq->mtt) {
2326 err = -EINVAL;
2327 goto ex_put;
2328 }
2329
2b8fb286 2330 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2331 if (err)
2332 goto ex_put;
2333
2334 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2335 if (err)
2336 goto ex_put1;
2337 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2338 if (err)
2339 goto ex_put1;
2340 atomic_dec(&orig_mtt->ref_count);
2341 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2342 atomic_inc(&mtt->ref_count);
2343 cq->mtt = mtt;
2344 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2345 return 0;
2346
2347ex_put1:
2348 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2349ex_put:
2350 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2351
2352 return err;
2353
2354}
2355
2356int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2357 struct mlx4_vhcr *vhcr,
2358 struct mlx4_cmd_mailbox *inbox,
2359 struct mlx4_cmd_mailbox *outbox,
2360 struct mlx4_cmd_info *cmd)
2361{
2362 int cqn = vhcr->in_modifier;
2363 struct res_cq *cq;
2364 int err;
2365
2366 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2367 if (err)
2368 return err;
2369
2370 if (cq->com.from_state != RES_CQ_HW)
2371 goto ex_put;
2372
2373 if (vhcr->op_modifier == 0) {
2374 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
dcf353b1 2375 goto ex_put;
c82e9aa0
EC
2376 }
2377
2378 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2379ex_put:
2380 put_res(dev, slave, cqn, RES_CQ);
2381
2382 return err;
2383}
2384
c82e9aa0
EC
2385static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2386{
2387 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2388 int log_rq_stride = srqc->logstride & 7;
2389 int page_shift = (srqc->log_page_size & 0x3f) + 12;
2390
2391 if (log_srq_size + log_rq_stride + 4 < page_shift)
2392 return 1;
2393
2394 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2395}
2396
2397int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2398 struct mlx4_vhcr *vhcr,
2399 struct mlx4_cmd_mailbox *inbox,
2400 struct mlx4_cmd_mailbox *outbox,
2401 struct mlx4_cmd_info *cmd)
2402{
2403 int err;
2404 int srqn = vhcr->in_modifier;
2405 struct res_mtt *mtt;
2406 struct res_srq *srq;
2407 struct mlx4_srq_context *srqc = inbox->buf;
2b8fb286 2408 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
c82e9aa0
EC
2409
2410 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2411 return -EINVAL;
2412
2413 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2414 if (err)
2415 return err;
2b8fb286 2416 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
c82e9aa0
EC
2417 if (err)
2418 goto ex_abort;
2419 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2420 mtt);
2421 if (err)
2422 goto ex_put_mtt;
2423
c82e9aa0
EC
2424 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2425 if (err)
2426 goto ex_put_mtt;
2427
2428 atomic_inc(&mtt->ref_count);
2429 srq->mtt = mtt;
2430 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2431 res_end_move(dev, slave, RES_SRQ, srqn);
2432 return 0;
2433
2434ex_put_mtt:
2435 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2436ex_abort:
2437 res_abort_move(dev, slave, RES_SRQ, srqn);
2438
2439 return err;
2440}
2441
2442int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2443 struct mlx4_vhcr *vhcr,
2444 struct mlx4_cmd_mailbox *inbox,
2445 struct mlx4_cmd_mailbox *outbox,
2446 struct mlx4_cmd_info *cmd)
2447{
2448 int err;
2449 int srqn = vhcr->in_modifier;
2450 struct res_srq *srq;
2451
2452 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2453 if (err)
2454 return err;
2455 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2456 if (err)
2457 goto ex_abort;
2458 atomic_dec(&srq->mtt->ref_count);
2459 if (srq->cq)
2460 atomic_dec(&srq->cq->ref_count);
2461 res_end_move(dev, slave, RES_SRQ, srqn);
2462
2463 return 0;
2464
2465ex_abort:
2466 res_abort_move(dev, slave, RES_SRQ, srqn);
2467
2468 return err;
2469}
2470
2471int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2472 struct mlx4_vhcr *vhcr,
2473 struct mlx4_cmd_mailbox *inbox,
2474 struct mlx4_cmd_mailbox *outbox,
2475 struct mlx4_cmd_info *cmd)
2476{
2477 int err;
2478 int srqn = vhcr->in_modifier;
2479 struct res_srq *srq;
2480
2481 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2482 if (err)
2483 return err;
2484 if (srq->com.from_state != RES_SRQ_HW) {
2485 err = -EBUSY;
2486 goto out;
2487 }
2488 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2489out:
2490 put_res(dev, slave, srqn, RES_SRQ);
2491 return err;
2492}
2493
2494int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2495 struct mlx4_vhcr *vhcr,
2496 struct mlx4_cmd_mailbox *inbox,
2497 struct mlx4_cmd_mailbox *outbox,
2498 struct mlx4_cmd_info *cmd)
2499{
2500 int err;
2501 int srqn = vhcr->in_modifier;
2502 struct res_srq *srq;
2503
2504 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2505 if (err)
2506 return err;
2507
2508 if (srq->com.from_state != RES_SRQ_HW) {
2509 err = -EBUSY;
2510 goto out;
2511 }
2512
2513 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2514out:
2515 put_res(dev, slave, srqn, RES_SRQ);
2516 return err;
2517}
2518
2519int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2520 struct mlx4_vhcr *vhcr,
2521 struct mlx4_cmd_mailbox *inbox,
2522 struct mlx4_cmd_mailbox *outbox,
2523 struct mlx4_cmd_info *cmd)
2524{
2525 int err;
2526 int qpn = vhcr->in_modifier & 0x7fffff;
2527 struct res_qp *qp;
2528
2529 err = get_res(dev, slave, qpn, RES_QP, &qp);
2530 if (err)
2531 return err;
2532 if (qp->com.from_state != RES_QP_HW) {
2533 err = -EBUSY;
2534 goto out;
2535 }
2536
2537 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2538out:
2539 put_res(dev, slave, qpn, RES_QP);
2540 return err;
2541}
2542
2543int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2544 struct mlx4_vhcr *vhcr,
2545 struct mlx4_cmd_mailbox *inbox,
2546 struct mlx4_cmd_mailbox *outbox,
2547 struct mlx4_cmd_info *cmd)
2548{
2549 struct mlx4_qp_context *qpc = inbox->buf + 8;
2550
2551 update_ud_gid(dev, qpc, (u8)slave);
2552
2553 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2554}
2555
2556int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2557 struct mlx4_vhcr *vhcr,
2558 struct mlx4_cmd_mailbox *inbox,
2559 struct mlx4_cmd_mailbox *outbox,
2560 struct mlx4_cmd_info *cmd)
2561{
2562 int err;
2563 int qpn = vhcr->in_modifier & 0x7fffff;
2564 struct res_qp *qp;
2565
2566 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
2567 if (err)
2568 return err;
2569 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2570 if (err)
2571 goto ex_abort;
2572
2573 atomic_dec(&qp->mtt->ref_count);
2574 atomic_dec(&qp->rcq->ref_count);
2575 atomic_dec(&qp->scq->ref_count);
2576 if (qp->srq)
2577 atomic_dec(&qp->srq->ref_count);
2578 res_end_move(dev, slave, RES_QP, qpn);
2579 return 0;
2580
2581ex_abort:
2582 res_abort_move(dev, slave, RES_QP, qpn);
2583
2584 return err;
2585}
2586
2587static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
2588 struct res_qp *rqp, u8 *gid)
2589{
2590 struct res_gid *res;
2591
2592 list_for_each_entry(res, &rqp->mcg_list, list) {
2593 if (!memcmp(res->gid, gid, 16))
2594 return res;
2595 }
2596 return NULL;
2597}
2598
2599static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
9f5b6c63
EE
2600 u8 *gid, enum mlx4_protocol prot,
2601 enum mlx4_steer_type steer)
c82e9aa0
EC
2602{
2603 struct res_gid *res;
2604 int err;
2605
2606 res = kzalloc(sizeof *res, GFP_KERNEL);
2607 if (!res)
2608 return -ENOMEM;
2609
2610 spin_lock_irq(&rqp->mcg_spl);
2611 if (find_gid(dev, slave, rqp, gid)) {
2612 kfree(res);
2613 err = -EEXIST;
2614 } else {
2615 memcpy(res->gid, gid, 16);
2616 res->prot = prot;
9f5b6c63 2617 res->steer = steer;
c82e9aa0
EC
2618 list_add_tail(&res->list, &rqp->mcg_list);
2619 err = 0;
2620 }
2621 spin_unlock_irq(&rqp->mcg_spl);
2622
2623 return err;
2624}
2625
2626static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
9f5b6c63
EE
2627 u8 *gid, enum mlx4_protocol prot,
2628 enum mlx4_steer_type steer)
c82e9aa0
EC
2629{
2630 struct res_gid *res;
2631 int err;
2632
2633 spin_lock_irq(&rqp->mcg_spl);
2634 res = find_gid(dev, slave, rqp, gid);
9f5b6c63 2635 if (!res || res->prot != prot || res->steer != steer)
c82e9aa0
EC
2636 err = -EINVAL;
2637 else {
2638 list_del(&res->list);
2639 kfree(res);
2640 err = 0;
2641 }
2642 spin_unlock_irq(&rqp->mcg_spl);
2643
2644 return err;
2645}
2646
2647int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2648 struct mlx4_vhcr *vhcr,
2649 struct mlx4_cmd_mailbox *inbox,
2650 struct mlx4_cmd_mailbox *outbox,
2651 struct mlx4_cmd_info *cmd)
2652{
2653 struct mlx4_qp qp; /* dummy for calling attach/detach */
2654 u8 *gid = inbox->buf;
2655 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
162344ed 2656 int err;
c82e9aa0
EC
2657 int qpn;
2658 struct res_qp *rqp;
2659 int attach = vhcr->op_modifier;
2660 int block_loopback = vhcr->in_modifier >> 31;
2661 u8 steer_type_mask = 2;
75c6062c 2662 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
c82e9aa0
EC
2663
2664 qpn = vhcr->in_modifier & 0xffffff;
2665 err = get_res(dev, slave, qpn, RES_QP, &rqp);
2666 if (err)
2667 return err;
2668
2669 qp.qpn = qpn;
2670 if (attach) {
9f5b6c63 2671 err = add_mcg_res(dev, slave, rqp, gid, prot, type);
c82e9aa0
EC
2672 if (err)
2673 goto ex_put;
2674
2675 err = mlx4_qp_attach_common(dev, &qp, gid,
2676 block_loopback, prot, type);
2677 if (err)
2678 goto ex_rem;
2679 } else {
9f5b6c63 2680 err = rem_mcg_res(dev, slave, rqp, gid, prot, type);
c82e9aa0
EC
2681 if (err)
2682 goto ex_put;
2683 err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
2684 }
2685
2686 put_res(dev, slave, qpn, RES_QP);
2687 return 0;
2688
2689ex_rem:
2690 /* ignore error return below, already in error */
162344ed 2691 (void) rem_mcg_res(dev, slave, rqp, gid, prot, type);
c82e9aa0
EC
2692ex_put:
2693 put_res(dev, slave, qpn, RES_QP);
2694
2695 return err;
2696}
2697
2698enum {
2699 BUSY_MAX_RETRIES = 10
2700};
2701
2702int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
2703 struct mlx4_vhcr *vhcr,
2704 struct mlx4_cmd_mailbox *inbox,
2705 struct mlx4_cmd_mailbox *outbox,
2706 struct mlx4_cmd_info *cmd)
2707{
2708 int err;
2709 int index = vhcr->in_modifier & 0xffff;
2710
2711 err = get_res(dev, slave, index, RES_COUNTER, NULL);
2712 if (err)
2713 return err;
2714
2715 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2716 put_res(dev, slave, index, RES_COUNTER);
2717 return err;
2718}
2719
2720static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
2721{
2722 struct res_gid *rgid;
2723 struct res_gid *tmp;
c82e9aa0
EC
2724 struct mlx4_qp qp; /* dummy for calling attach/detach */
2725
2726 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
2727 qp.qpn = rqp->local_qpn;
162344ed
OG
2728 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
2729 rgid->steer);
c82e9aa0
EC
2730 list_del(&rgid->list);
2731 kfree(rgid);
2732 }
2733}
2734
2735static int _move_all_busy(struct mlx4_dev *dev, int slave,
2736 enum mlx4_resource type, int print)
2737{
2738 struct mlx4_priv *priv = mlx4_priv(dev);
2739 struct mlx4_resource_tracker *tracker =
2740 &priv->mfunc.master.res_tracker;
2741 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
2742 struct res_common *r;
2743 struct res_common *tmp;
2744 int busy;
2745
2746 busy = 0;
2747 spin_lock_irq(mlx4_tlock(dev));
2748 list_for_each_entry_safe(r, tmp, rlist, list) {
2749 if (r->owner == slave) {
2750 if (!r->removing) {
2751 if (r->state == RES_ANY_BUSY) {
2752 if (print)
2753 mlx4_dbg(dev,
2754 "%s id 0x%x is busy\n",
2755 ResourceType(type),
2756 r->res_id);
2757 ++busy;
2758 } else {
2759 r->from_state = r->state;
2760 r->state = RES_ANY_BUSY;
2761 r->removing = 1;
2762 }
2763 }
2764 }
2765 }
2766 spin_unlock_irq(mlx4_tlock(dev));
2767
2768 return busy;
2769}
2770
2771static int move_all_busy(struct mlx4_dev *dev, int slave,
2772 enum mlx4_resource type)
2773{
2774 unsigned long begin;
2775 int busy;
2776
2777 begin = jiffies;
2778 do {
2779 busy = _move_all_busy(dev, slave, type, 0);
2780 if (time_after(jiffies, begin + 5 * HZ))
2781 break;
2782 if (busy)
2783 cond_resched();
2784 } while (busy);
2785
2786 if (busy)
2787 busy = _move_all_busy(dev, slave, type, 1);
2788
2789 return busy;
2790}
2791static void rem_slave_qps(struct mlx4_dev *dev, int slave)
2792{
2793 struct mlx4_priv *priv = mlx4_priv(dev);
2794 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2795 struct list_head *qp_list =
2796 &tracker->slave_list[slave].res_list[RES_QP];
2797 struct res_qp *qp;
2798 struct res_qp *tmp;
2799 int state;
2800 u64 in_param;
2801 int qpn;
2802 int err;
2803
2804 err = move_all_busy(dev, slave, RES_QP);
2805 if (err)
2806 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
2807 "for slave %d\n", slave);
2808
2809 spin_lock_irq(mlx4_tlock(dev));
2810 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
2811 spin_unlock_irq(mlx4_tlock(dev));
2812 if (qp->com.owner == slave) {
2813 qpn = qp->com.res_id;
2814 detach_qp(dev, slave, qp);
2815 state = qp->com.from_state;
2816 while (state != 0) {
2817 switch (state) {
2818 case RES_QP_RESERVED:
2819 spin_lock_irq(mlx4_tlock(dev));
2820 radix_tree_delete(&tracker->res_tree[RES_QP],
2821 qp->com.res_id);
2822 list_del(&qp->com.list);
2823 spin_unlock_irq(mlx4_tlock(dev));
2824 kfree(qp);
2825 state = 0;
2826 break;
2827 case RES_QP_MAPPED:
2828 if (!valid_reserved(dev, slave, qpn))
2829 __mlx4_qp_free_icm(dev, qpn);
2830 state = RES_QP_RESERVED;
2831 break;
2832 case RES_QP_HW:
2833 in_param = slave;
2834 err = mlx4_cmd(dev, in_param,
2835 qp->local_qpn, 2,
2836 MLX4_CMD_2RST_QP,
2837 MLX4_CMD_TIME_CLASS_A,
2838 MLX4_CMD_NATIVE);
2839 if (err)
2840 mlx4_dbg(dev, "rem_slave_qps: failed"
2841 " to move slave %d qpn %d to"
2842 " reset\n", slave,
2843 qp->local_qpn);
2844 atomic_dec(&qp->rcq->ref_count);
2845 atomic_dec(&qp->scq->ref_count);
2846 atomic_dec(&qp->mtt->ref_count);
2847 if (qp->srq)
2848 atomic_dec(&qp->srq->ref_count);
2849 state = RES_QP_MAPPED;
2850 break;
2851 default:
2852 state = 0;
2853 }
2854 }
2855 }
2856 spin_lock_irq(mlx4_tlock(dev));
2857 }
2858 spin_unlock_irq(mlx4_tlock(dev));
2859}
2860
2861static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
2862{
2863 struct mlx4_priv *priv = mlx4_priv(dev);
2864 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2865 struct list_head *srq_list =
2866 &tracker->slave_list[slave].res_list[RES_SRQ];
2867 struct res_srq *srq;
2868 struct res_srq *tmp;
2869 int state;
2870 u64 in_param;
2871 LIST_HEAD(tlist);
2872 int srqn;
2873 int err;
2874
2875 err = move_all_busy(dev, slave, RES_SRQ);
2876 if (err)
2877 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
2878 "busy for slave %d\n", slave);
2879
2880 spin_lock_irq(mlx4_tlock(dev));
2881 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
2882 spin_unlock_irq(mlx4_tlock(dev));
2883 if (srq->com.owner == slave) {
2884 srqn = srq->com.res_id;
2885 state = srq->com.from_state;
2886 while (state != 0) {
2887 switch (state) {
2888 case RES_SRQ_ALLOCATED:
2889 __mlx4_srq_free_icm(dev, srqn);
2890 spin_lock_irq(mlx4_tlock(dev));
2891 radix_tree_delete(&tracker->res_tree[RES_SRQ],
2892 srqn);
2893 list_del(&srq->com.list);
2894 spin_unlock_irq(mlx4_tlock(dev));
2895 kfree(srq);
2896 state = 0;
2897 break;
2898
2899 case RES_SRQ_HW:
2900 in_param = slave;
2901 err = mlx4_cmd(dev, in_param, srqn, 1,
2902 MLX4_CMD_HW2SW_SRQ,
2903 MLX4_CMD_TIME_CLASS_A,
2904 MLX4_CMD_NATIVE);
2905 if (err)
2906 mlx4_dbg(dev, "rem_slave_srqs: failed"
2907 " to move slave %d srq %d to"
2908 " SW ownership\n",
2909 slave, srqn);
2910
2911 atomic_dec(&srq->mtt->ref_count);
2912 if (srq->cq)
2913 atomic_dec(&srq->cq->ref_count);
2914 state = RES_SRQ_ALLOCATED;
2915 break;
2916
2917 default:
2918 state = 0;
2919 }
2920 }
2921 }
2922 spin_lock_irq(mlx4_tlock(dev));
2923 }
2924 spin_unlock_irq(mlx4_tlock(dev));
2925}
2926
2927static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
2928{
2929 struct mlx4_priv *priv = mlx4_priv(dev);
2930 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2931 struct list_head *cq_list =
2932 &tracker->slave_list[slave].res_list[RES_CQ];
2933 struct res_cq *cq;
2934 struct res_cq *tmp;
2935 int state;
2936 u64 in_param;
2937 LIST_HEAD(tlist);
2938 int cqn;
2939 int err;
2940
2941 err = move_all_busy(dev, slave, RES_CQ);
2942 if (err)
2943 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
2944 "busy for slave %d\n", slave);
2945
2946 spin_lock_irq(mlx4_tlock(dev));
2947 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
2948 spin_unlock_irq(mlx4_tlock(dev));
2949 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
2950 cqn = cq->com.res_id;
2951 state = cq->com.from_state;
2952 while (state != 0) {
2953 switch (state) {
2954 case RES_CQ_ALLOCATED:
2955 __mlx4_cq_free_icm(dev, cqn);
2956 spin_lock_irq(mlx4_tlock(dev));
2957 radix_tree_delete(&tracker->res_tree[RES_CQ],
2958 cqn);
2959 list_del(&cq->com.list);
2960 spin_unlock_irq(mlx4_tlock(dev));
2961 kfree(cq);
2962 state = 0;
2963 break;
2964
2965 case RES_CQ_HW:
2966 in_param = slave;
2967 err = mlx4_cmd(dev, in_param, cqn, 1,
2968 MLX4_CMD_HW2SW_CQ,
2969 MLX4_CMD_TIME_CLASS_A,
2970 MLX4_CMD_NATIVE);
2971 if (err)
2972 mlx4_dbg(dev, "rem_slave_cqs: failed"
2973 " to move slave %d cq %d to"
2974 " SW ownership\n",
2975 slave, cqn);
2976 atomic_dec(&cq->mtt->ref_count);
2977 state = RES_CQ_ALLOCATED;
2978 break;
2979
2980 default:
2981 state = 0;
2982 }
2983 }
2984 }
2985 spin_lock_irq(mlx4_tlock(dev));
2986 }
2987 spin_unlock_irq(mlx4_tlock(dev));
2988}
2989
2990static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
2991{
2992 struct mlx4_priv *priv = mlx4_priv(dev);
2993 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2994 struct list_head *mpt_list =
2995 &tracker->slave_list[slave].res_list[RES_MPT];
2996 struct res_mpt *mpt;
2997 struct res_mpt *tmp;
2998 int state;
2999 u64 in_param;
3000 LIST_HEAD(tlist);
3001 int mptn;
3002 int err;
3003
3004 err = move_all_busy(dev, slave, RES_MPT);
3005 if (err)
3006 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
3007 "busy for slave %d\n", slave);
3008
3009 spin_lock_irq(mlx4_tlock(dev));
3010 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
3011 spin_unlock_irq(mlx4_tlock(dev));
3012 if (mpt->com.owner == slave) {
3013 mptn = mpt->com.res_id;
3014 state = mpt->com.from_state;
3015 while (state != 0) {
3016 switch (state) {
3017 case RES_MPT_RESERVED:
3018 __mlx4_mr_release(dev, mpt->key);
3019 spin_lock_irq(mlx4_tlock(dev));
3020 radix_tree_delete(&tracker->res_tree[RES_MPT],
3021 mptn);
3022 list_del(&mpt->com.list);
3023 spin_unlock_irq(mlx4_tlock(dev));
3024 kfree(mpt);
3025 state = 0;
3026 break;
3027
3028 case RES_MPT_MAPPED:
3029 __mlx4_mr_free_icm(dev, mpt->key);
3030 state = RES_MPT_RESERVED;
3031 break;
3032
3033 case RES_MPT_HW:
3034 in_param = slave;
3035 err = mlx4_cmd(dev, in_param, mptn, 0,
3036 MLX4_CMD_HW2SW_MPT,
3037 MLX4_CMD_TIME_CLASS_A,
3038 MLX4_CMD_NATIVE);
3039 if (err)
3040 mlx4_dbg(dev, "rem_slave_mrs: failed"
3041 " to move slave %d mpt %d to"
3042 " SW ownership\n",
3043 slave, mptn);
3044 if (mpt->mtt)
3045 atomic_dec(&mpt->mtt->ref_count);
3046 state = RES_MPT_MAPPED;
3047 break;
3048 default:
3049 state = 0;
3050 }
3051 }
3052 }
3053 spin_lock_irq(mlx4_tlock(dev));
3054 }
3055 spin_unlock_irq(mlx4_tlock(dev));
3056}
3057
3058static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
3059{
3060 struct mlx4_priv *priv = mlx4_priv(dev);
3061 struct mlx4_resource_tracker *tracker =
3062 &priv->mfunc.master.res_tracker;
3063 struct list_head *mtt_list =
3064 &tracker->slave_list[slave].res_list[RES_MTT];
3065 struct res_mtt *mtt;
3066 struct res_mtt *tmp;
3067 int state;
3068 LIST_HEAD(tlist);
3069 int base;
3070 int err;
3071
3072 err = move_all_busy(dev, slave, RES_MTT);
3073 if (err)
3074 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
3075 "busy for slave %d\n", slave);
3076
3077 spin_lock_irq(mlx4_tlock(dev));
3078 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
3079 spin_unlock_irq(mlx4_tlock(dev));
3080 if (mtt->com.owner == slave) {
3081 base = mtt->com.res_id;
3082 state = mtt->com.from_state;
3083 while (state != 0) {
3084 switch (state) {
3085 case RES_MTT_ALLOCATED:
3086 __mlx4_free_mtt_range(dev, base,
3087 mtt->order);
3088 spin_lock_irq(mlx4_tlock(dev));
3089 radix_tree_delete(&tracker->res_tree[RES_MTT],
3090 base);
3091 list_del(&mtt->com.list);
3092 spin_unlock_irq(mlx4_tlock(dev));
3093 kfree(mtt);
3094 state = 0;
3095 break;
3096
3097 default:
3098 state = 0;
3099 }
3100 }
3101 }
3102 spin_lock_irq(mlx4_tlock(dev));
3103 }
3104 spin_unlock_irq(mlx4_tlock(dev));
3105}
3106
3107static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3108{
3109 struct mlx4_priv *priv = mlx4_priv(dev);
3110 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3111 struct list_head *eq_list =
3112 &tracker->slave_list[slave].res_list[RES_EQ];
3113 struct res_eq *eq;
3114 struct res_eq *tmp;
3115 int err;
3116 int state;
3117 LIST_HEAD(tlist);
3118 int eqn;
3119 struct mlx4_cmd_mailbox *mailbox;
3120
3121 err = move_all_busy(dev, slave, RES_EQ);
3122 if (err)
3123 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3124 "busy for slave %d\n", slave);
3125
3126 spin_lock_irq(mlx4_tlock(dev));
3127 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3128 spin_unlock_irq(mlx4_tlock(dev));
3129 if (eq->com.owner == slave) {
3130 eqn = eq->com.res_id;
3131 state = eq->com.from_state;
3132 while (state != 0) {
3133 switch (state) {
3134 case RES_EQ_RESERVED:
3135 spin_lock_irq(mlx4_tlock(dev));
3136 radix_tree_delete(&tracker->res_tree[RES_EQ],
3137 eqn);
3138 list_del(&eq->com.list);
3139 spin_unlock_irq(mlx4_tlock(dev));
3140 kfree(eq);
3141 state = 0;
3142 break;
3143
3144 case RES_EQ_HW:
3145 mailbox = mlx4_alloc_cmd_mailbox(dev);
3146 if (IS_ERR(mailbox)) {
3147 cond_resched();
3148 continue;
3149 }
3150 err = mlx4_cmd_box(dev, slave, 0,
3151 eqn & 0xff, 0,
3152 MLX4_CMD_HW2SW_EQ,
3153 MLX4_CMD_TIME_CLASS_A,
3154 MLX4_CMD_NATIVE);
eb71d0d6
JM
3155 if (err)
3156 mlx4_dbg(dev, "rem_slave_eqs: failed"
3157 " to move slave %d eqs %d to"
3158 " SW ownership\n", slave, eqn);
c82e9aa0 3159 mlx4_free_cmd_mailbox(dev, mailbox);
eb71d0d6
JM
3160 atomic_dec(&eq->mtt->ref_count);
3161 state = RES_EQ_RESERVED;
c82e9aa0
EC
3162 break;
3163
3164 default:
3165 state = 0;
3166 }
3167 }
3168 }
3169 spin_lock_irq(mlx4_tlock(dev));
3170 }
3171 spin_unlock_irq(mlx4_tlock(dev));
3172}
3173
ba062d52
JM
3174static void rem_slave_counters(struct mlx4_dev *dev, int slave)
3175{
3176 struct mlx4_priv *priv = mlx4_priv(dev);
3177 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3178 struct list_head *counter_list =
3179 &tracker->slave_list[slave].res_list[RES_COUNTER];
3180 struct res_counter *counter;
3181 struct res_counter *tmp;
3182 int err;
3183 int index;
3184
3185 err = move_all_busy(dev, slave, RES_COUNTER);
3186 if (err)
3187 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
3188 "busy for slave %d\n", slave);
3189
3190 spin_lock_irq(mlx4_tlock(dev));
3191 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
3192 if (counter->com.owner == slave) {
3193 index = counter->com.res_id;
3194 radix_tree_delete(&tracker->res_tree[RES_COUNTER], index);
3195 list_del(&counter->com.list);
3196 kfree(counter);
3197 __mlx4_counter_free(dev, index);
3198 }
3199 }
3200 spin_unlock_irq(mlx4_tlock(dev));
3201}
3202
3203static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
3204{
3205 struct mlx4_priv *priv = mlx4_priv(dev);
3206 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3207 struct list_head *xrcdn_list =
3208 &tracker->slave_list[slave].res_list[RES_XRCD];
3209 struct res_xrcdn *xrcd;
3210 struct res_xrcdn *tmp;
3211 int err;
3212 int xrcdn;
3213
3214 err = move_all_busy(dev, slave, RES_XRCD);
3215 if (err)
3216 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
3217 "busy for slave %d\n", slave);
3218
3219 spin_lock_irq(mlx4_tlock(dev));
3220 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
3221 if (xrcd->com.owner == slave) {
3222 xrcdn = xrcd->com.res_id;
3223 radix_tree_delete(&tracker->res_tree[RES_XRCD], xrcdn);
3224 list_del(&xrcd->com.list);
3225 kfree(xrcd);
3226 __mlx4_xrcd_free(dev, xrcdn);
3227 }
3228 }
3229 spin_unlock_irq(mlx4_tlock(dev));
3230}
3231
c82e9aa0
EC
3232void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3233{
3234 struct mlx4_priv *priv = mlx4_priv(dev);
3235
3236 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3237 /*VLAN*/
3238 rem_slave_macs(dev, slave);
3239 rem_slave_qps(dev, slave);
3240 rem_slave_srqs(dev, slave);
3241 rem_slave_cqs(dev, slave);
3242 rem_slave_mrs(dev, slave);
3243 rem_slave_eqs(dev, slave);
3244 rem_slave_mtts(dev, slave);
ba062d52
JM
3245 rem_slave_counters(dev, slave);
3246 rem_slave_xrcdns(dev, slave);
c82e9aa0
EC
3247 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3248}
This page took 0.195946 seconds and 5 git commands to generate.