2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/module.h>
35 #include <linux/hardirq.h>
36 #include <linux/mlx5/driver.h>
37 #include <linux/mlx5/cmd.h>
38 #include <rdma/ib_verbs.h>
39 #include <linux/mlx5/cq.h>
40 #include "mlx5_core.h"
42 #define TASKLET_MAX_TIME 2
43 #define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME)
45 void mlx5_cq_tasklet_cb(unsigned long data
)
48 unsigned long end
= jiffies
+ TASKLET_MAX_TIME_JIFFIES
;
49 struct mlx5_eq_tasklet
*ctx
= (struct mlx5_eq_tasklet
*)data
;
50 struct mlx5_core_cq
*mcq
;
51 struct mlx5_core_cq
*temp
;
53 spin_lock_irqsave(&ctx
->lock
, flags
);
54 list_splice_tail_init(&ctx
->list
, &ctx
->process_list
);
55 spin_unlock_irqrestore(&ctx
->lock
, flags
);
57 list_for_each_entry_safe(mcq
, temp
, &ctx
->process_list
,
59 list_del_init(&mcq
->tasklet_ctx
.list
);
60 mcq
->tasklet_ctx
.comp(mcq
);
61 if (atomic_dec_and_test(&mcq
->refcount
))
63 if (time_after(jiffies
, end
))
67 if (!list_empty(&ctx
->process_list
))
68 tasklet_schedule(&ctx
->task
);
71 static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq
*cq
)
74 struct mlx5_eq_tasklet
*tasklet_ctx
= cq
->tasklet_ctx
.priv
;
76 spin_lock_irqsave(&tasklet_ctx
->lock
, flags
);
77 /* When migrating CQs between EQs will be implemented, please note
78 * that you need to sync this point. It is possible that
79 * while migrating a CQ, completions on the old EQs could
82 if (list_empty_careful(&cq
->tasklet_ctx
.list
)) {
83 atomic_inc(&cq
->refcount
);
84 list_add_tail(&cq
->tasklet_ctx
.list
, &tasklet_ctx
->list
);
86 spin_unlock_irqrestore(&tasklet_ctx
->lock
, flags
);
89 void mlx5_cq_completion(struct mlx5_core_dev
*dev
, u32 cqn
)
91 struct mlx5_core_cq
*cq
;
92 struct mlx5_cq_table
*table
= &dev
->priv
.cq_table
;
94 spin_lock(&table
->lock
);
95 cq
= radix_tree_lookup(&table
->tree
, cqn
);
97 atomic_inc(&cq
->refcount
);
98 spin_unlock(&table
->lock
);
101 mlx5_core_warn(dev
, "Completion event for bogus CQ 0x%x\n", cqn
);
109 if (atomic_dec_and_test(&cq
->refcount
))
113 void mlx5_cq_event(struct mlx5_core_dev
*dev
, u32 cqn
, int event_type
)
115 struct mlx5_cq_table
*table
= &dev
->priv
.cq_table
;
116 struct mlx5_core_cq
*cq
;
118 spin_lock(&table
->lock
);
120 cq
= radix_tree_lookup(&table
->tree
, cqn
);
122 atomic_inc(&cq
->refcount
);
124 spin_unlock(&table
->lock
);
127 mlx5_core_warn(dev
, "Async event for bogus CQ 0x%x\n", cqn
);
131 cq
->event(cq
, event_type
);
133 if (atomic_dec_and_test(&cq
->refcount
))
138 int mlx5_core_create_cq(struct mlx5_core_dev
*dev
, struct mlx5_core_cq
*cq
,
139 struct mlx5_create_cq_mbox_in
*in
, int inlen
)
142 struct mlx5_cq_table
*table
= &dev
->priv
.cq_table
;
143 struct mlx5_create_cq_mbox_out out
;
144 struct mlx5_destroy_cq_mbox_in din
;
145 struct mlx5_destroy_cq_mbox_out dout
;
146 int eqn
= MLX5_GET(cqc
, MLX5_ADDR_OF(create_cq_in
, in
, cq_context
),
150 eq
= mlx5_eqn2eq(dev
, eqn
);
154 in
->hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_CREATE_CQ
);
155 memset(&out
, 0, sizeof(out
));
156 err
= mlx5_cmd_exec(dev
, in
, inlen
, &out
, sizeof(out
));
161 return mlx5_cmd_status_to_err(&out
.hdr
);
163 cq
->cqn
= be32_to_cpu(out
.cqn
) & 0xffffff;
166 atomic_set(&cq
->refcount
, 1);
167 init_completion(&cq
->free
);
169 cq
->comp
= mlx5_add_cq_to_tasklet
;
170 /* assuming CQ will be deleted before the EQ */
171 cq
->tasklet_ctx
.priv
= &eq
->tasklet_ctx
;
172 INIT_LIST_HEAD(&cq
->tasklet_ctx
.list
);
174 spin_lock_irq(&table
->lock
);
175 err
= radix_tree_insert(&table
->tree
, cq
->cqn
, cq
);
176 spin_unlock_irq(&table
->lock
);
180 cq
->pid
= current
->pid
;
181 err
= mlx5_debug_cq_add(dev
, cq
);
183 mlx5_core_dbg(dev
, "failed adding CP 0x%x to debug file system\n",
189 memset(&din
, 0, sizeof(din
));
190 memset(&dout
, 0, sizeof(dout
));
191 din
.hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ
);
192 mlx5_cmd_exec(dev
, &din
, sizeof(din
), &dout
, sizeof(dout
));
195 EXPORT_SYMBOL(mlx5_core_create_cq
);
197 int mlx5_core_destroy_cq(struct mlx5_core_dev
*dev
, struct mlx5_core_cq
*cq
)
199 struct mlx5_cq_table
*table
= &dev
->priv
.cq_table
;
200 struct mlx5_destroy_cq_mbox_in in
;
201 struct mlx5_destroy_cq_mbox_out out
;
202 struct mlx5_core_cq
*tmp
;
205 spin_lock_irq(&table
->lock
);
206 tmp
= radix_tree_delete(&table
->tree
, cq
->cqn
);
207 spin_unlock_irq(&table
->lock
);
209 mlx5_core_warn(dev
, "cq 0x%x not found in tree\n", cq
->cqn
);
213 mlx5_core_warn(dev
, "corruption on srqn 0x%x\n", cq
->cqn
);
217 memset(&in
, 0, sizeof(in
));
218 memset(&out
, 0, sizeof(out
));
219 in
.hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ
);
220 in
.cqn
= cpu_to_be32(cq
->cqn
);
221 err
= mlx5_cmd_exec(dev
, &in
, sizeof(in
), &out
, sizeof(out
));
226 return mlx5_cmd_status_to_err(&out
.hdr
);
228 synchronize_irq(cq
->irqn
);
230 mlx5_debug_cq_remove(dev
, cq
);
231 if (atomic_dec_and_test(&cq
->refcount
))
233 wait_for_completion(&cq
->free
);
237 EXPORT_SYMBOL(mlx5_core_destroy_cq
);
239 int mlx5_core_query_cq(struct mlx5_core_dev
*dev
, struct mlx5_core_cq
*cq
,
240 struct mlx5_query_cq_mbox_out
*out
)
242 struct mlx5_query_cq_mbox_in in
;
245 memset(&in
, 0, sizeof(in
));
246 memset(out
, 0, sizeof(*out
));
248 in
.hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_QUERY_CQ
);
249 in
.cqn
= cpu_to_be32(cq
->cqn
);
250 err
= mlx5_cmd_exec(dev
, &in
, sizeof(in
), out
, sizeof(*out
));
255 return mlx5_cmd_status_to_err(&out
->hdr
);
259 EXPORT_SYMBOL(mlx5_core_query_cq
);
262 int mlx5_core_modify_cq(struct mlx5_core_dev
*dev
, struct mlx5_core_cq
*cq
,
263 struct mlx5_modify_cq_mbox_in
*in
, int in_sz
)
265 struct mlx5_modify_cq_mbox_out out
;
268 memset(&out
, 0, sizeof(out
));
269 in
->hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_MODIFY_CQ
);
270 err
= mlx5_cmd_exec(dev
, in
, in_sz
, &out
, sizeof(out
));
275 return mlx5_cmd_status_to_err(&out
.hdr
);
279 EXPORT_SYMBOL(mlx5_core_modify_cq
);
281 int mlx5_core_modify_cq_moderation(struct mlx5_core_dev
*dev
,
282 struct mlx5_core_cq
*cq
,
286 struct mlx5_modify_cq_mbox_in in
;
288 memset(&in
, 0, sizeof(in
));
290 in
.cqn
= cpu_to_be32(cq
->cqn
);
291 in
.ctx
.cq_period
= cpu_to_be16(cq_period
);
292 in
.ctx
.cq_max_count
= cpu_to_be16(cq_max_count
);
293 in
.field_select
= cpu_to_be32(MLX5_CQ_MODIFY_PERIOD
|
294 MLX5_CQ_MODIFY_COUNT
);
296 return mlx5_core_modify_cq(dev
, cq
, &in
, sizeof(in
));
299 int mlx5_init_cq_table(struct mlx5_core_dev
*dev
)
301 struct mlx5_cq_table
*table
= &dev
->priv
.cq_table
;
304 memset(table
, 0, sizeof(*table
));
305 spin_lock_init(&table
->lock
);
306 INIT_RADIX_TREE(&table
->tree
, GFP_ATOMIC
);
307 err
= mlx5_cq_debugfs_init(dev
);
312 void mlx5_cleanup_cq_table(struct mlx5_core_dev
*dev
)
314 mlx5_cq_debugfs_cleanup(dev
);