lightnvm: NVM should depend on HAS_DMA
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / srq.c
1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/kernel.h>
34 #include <linux/module.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/cmd.h>
37 #include <linux/mlx5/srq.h>
38 #include <rdma/ib_verbs.h>
39 #include "mlx5_core.h"
40 #include <linux/mlx5/transobj.h>
41
42 void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
43 {
44 struct mlx5_srq_table *table = &dev->priv.srq_table;
45 struct mlx5_core_srq *srq;
46
47 spin_lock(&table->lock);
48
49 srq = radix_tree_lookup(&table->tree, srqn);
50 if (srq)
51 atomic_inc(&srq->refcount);
52
53 spin_unlock(&table->lock);
54
55 if (!srq) {
56 mlx5_core_warn(dev, "Async event for bogus SRQ 0x%08x\n", srqn);
57 return;
58 }
59
60 srq->event(srq, event_type);
61
62 if (atomic_dec_and_test(&srq->refcount))
63 complete(&srq->free);
64 }
65
66 static int get_pas_size(struct mlx5_srq_attr *in)
67 {
68 u32 log_page_size = in->log_page_size + 12;
69 u32 log_srq_size = in->log_size;
70 u32 log_rq_stride = in->wqe_shift;
71 u32 page_offset = in->page_offset;
72 u32 po_quanta = 1 << (log_page_size - 6);
73 u32 rq_sz = 1 << (log_srq_size + 4 + log_rq_stride);
74 u32 page_size = 1 << log_page_size;
75 u32 rq_sz_po = rq_sz + (page_offset * po_quanta);
76 u32 rq_num_pas = (rq_sz_po + page_size - 1) / page_size;
77
78 return rq_num_pas * sizeof(u64);
79 }
80
81 static void set_wq(void *wq, struct mlx5_srq_attr *in)
82 {
83 MLX5_SET(wq, wq, wq_signature, !!(in->flags
84 & MLX5_SRQ_FLAG_WQ_SIG));
85 MLX5_SET(wq, wq, log_wq_pg_sz, in->log_page_size);
86 MLX5_SET(wq, wq, log_wq_stride, in->wqe_shift + 4);
87 MLX5_SET(wq, wq, log_wq_sz, in->log_size);
88 MLX5_SET(wq, wq, page_offset, in->page_offset);
89 MLX5_SET(wq, wq, lwm, in->lwm);
90 MLX5_SET(wq, wq, pd, in->pd);
91 MLX5_SET64(wq, wq, dbr_addr, in->db_record);
92 }
93
94 static void set_srqc(void *srqc, struct mlx5_srq_attr *in)
95 {
96 MLX5_SET(srqc, srqc, wq_signature, !!(in->flags
97 & MLX5_SRQ_FLAG_WQ_SIG));
98 MLX5_SET(srqc, srqc, log_page_size, in->log_page_size);
99 MLX5_SET(srqc, srqc, log_rq_stride, in->wqe_shift);
100 MLX5_SET(srqc, srqc, log_srq_size, in->log_size);
101 MLX5_SET(srqc, srqc, page_offset, in->page_offset);
102 MLX5_SET(srqc, srqc, lwm, in->lwm);
103 MLX5_SET(srqc, srqc, pd, in->pd);
104 MLX5_SET64(srqc, srqc, dbr_addr, in->db_record);
105 MLX5_SET(srqc, srqc, xrcd, in->xrcd);
106 MLX5_SET(srqc, srqc, cqn, in->cqn);
107 }
108
109 static void get_wq(void *wq, struct mlx5_srq_attr *in)
110 {
111 if (MLX5_GET(wq, wq, wq_signature))
112 in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
113 in->log_page_size = MLX5_GET(wq, wq, log_wq_pg_sz);
114 in->wqe_shift = MLX5_GET(wq, wq, log_wq_stride) - 4;
115 in->log_size = MLX5_GET(wq, wq, log_wq_sz);
116 in->page_offset = MLX5_GET(wq, wq, page_offset);
117 in->lwm = MLX5_GET(wq, wq, lwm);
118 in->pd = MLX5_GET(wq, wq, pd);
119 in->db_record = MLX5_GET64(wq, wq, dbr_addr);
120 }
121
122 static void get_srqc(void *srqc, struct mlx5_srq_attr *in)
123 {
124 if (MLX5_GET(srqc, srqc, wq_signature))
125 in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
126 in->log_page_size = MLX5_GET(srqc, srqc, log_page_size);
127 in->wqe_shift = MLX5_GET(srqc, srqc, log_rq_stride);
128 in->log_size = MLX5_GET(srqc, srqc, log_srq_size);
129 in->page_offset = MLX5_GET(srqc, srqc, page_offset);
130 in->lwm = MLX5_GET(srqc, srqc, lwm);
131 in->pd = MLX5_GET(srqc, srqc, pd);
132 in->db_record = MLX5_GET64(srqc, srqc, dbr_addr);
133 }
134
135 struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
136 {
137 struct mlx5_srq_table *table = &dev->priv.srq_table;
138 struct mlx5_core_srq *srq;
139
140 spin_lock(&table->lock);
141
142 srq = radix_tree_lookup(&table->tree, srqn);
143 if (srq)
144 atomic_inc(&srq->refcount);
145
146 spin_unlock(&table->lock);
147
148 return srq;
149 }
150 EXPORT_SYMBOL(mlx5_core_get_srq);
151
152 static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
153 struct mlx5_srq_attr *in)
154 {
155 u32 create_out[MLX5_ST_SZ_DW(create_srq_out)] = {0};
156 void *create_in;
157 void *srqc;
158 void *pas;
159 int pas_size;
160 int inlen;
161 int err;
162
163 pas_size = get_pas_size(in);
164 inlen = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size;
165 create_in = mlx5_vzalloc(inlen);
166 if (!create_in)
167 return -ENOMEM;
168
169 srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry);
170 pas = MLX5_ADDR_OF(create_srq_in, create_in, pas);
171
172 set_srqc(srqc, in);
173 memcpy(pas, in->pas, pas_size);
174
175 MLX5_SET(create_srq_in, create_in, opcode,
176 MLX5_CMD_OP_CREATE_SRQ);
177
178 err = mlx5_cmd_exec_check_status(dev, create_in, inlen, create_out,
179 sizeof(create_out));
180 kvfree(create_in);
181 if (!err)
182 srq->srqn = MLX5_GET(create_srq_out, create_out, srqn);
183
184 return err;
185 }
186
187 static int destroy_srq_cmd(struct mlx5_core_dev *dev,
188 struct mlx5_core_srq *srq)
189 {
190 u32 srq_in[MLX5_ST_SZ_DW(destroy_srq_in)] = {0};
191 u32 srq_out[MLX5_ST_SZ_DW(destroy_srq_out)] = {0};
192
193 MLX5_SET(destroy_srq_in, srq_in, opcode,
194 MLX5_CMD_OP_DESTROY_SRQ);
195 MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn);
196
197 return mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in),
198 srq_out, sizeof(srq_out));
199 }
200
201 static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
202 u16 lwm, int is_srq)
203 {
204 /* arm_srq structs missing using identical xrc ones */
205 u32 srq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
206 u32 srq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
207
208 MLX5_SET(arm_xrc_srq_in, srq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
209 MLX5_SET(arm_xrc_srq_in, srq_in, xrc_srqn, srq->srqn);
210 MLX5_SET(arm_xrc_srq_in, srq_in, lwm, lwm);
211
212 return mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in),
213 srq_out, sizeof(srq_out));
214 }
215
216 static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
217 struct mlx5_srq_attr *out)
218 {
219 u32 srq_in[MLX5_ST_SZ_DW(query_srq_in)] = {0};
220 u32 *srq_out;
221 void *srqc;
222 int err;
223
224 srq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_srq_out));
225 if (!srq_out)
226 return -ENOMEM;
227
228 MLX5_SET(query_srq_in, srq_in, opcode,
229 MLX5_CMD_OP_QUERY_SRQ);
230 MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn);
231 err = mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in),
232 srq_out,
233 MLX5_ST_SZ_BYTES(query_srq_out));
234 if (err)
235 goto out;
236
237 srqc = MLX5_ADDR_OF(query_srq_out, srq_out, srq_context_entry);
238 get_srqc(srqc, out);
239 if (MLX5_GET(srqc, srqc, state) != MLX5_SRQC_STATE_GOOD)
240 out->flags |= MLX5_SRQ_FLAG_ERR;
241 out:
242 kvfree(srq_out);
243 return err;
244 }
245
246 static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
247 struct mlx5_core_srq *srq,
248 struct mlx5_srq_attr *in)
249 {
250 u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
251 void *create_in;
252 void *xrc_srqc;
253 void *pas;
254 int pas_size;
255 int inlen;
256 int err;
257
258 pas_size = get_pas_size(in);
259 inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
260 create_in = mlx5_vzalloc(inlen);
261 if (!create_in)
262 return -ENOMEM;
263
264 xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in,
265 xrc_srq_context_entry);
266 pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
267
268 set_srqc(xrc_srqc, in);
269 MLX5_SET(xrc_srqc, xrc_srqc, user_index, in->user_index);
270 memcpy(pas, in->pas, pas_size);
271 MLX5_SET(create_xrc_srq_in, create_in, opcode,
272 MLX5_CMD_OP_CREATE_XRC_SRQ);
273
274 memset(create_out, 0, sizeof(create_out));
275 err = mlx5_cmd_exec_check_status(dev, create_in, inlen, create_out,
276 sizeof(create_out));
277 if (err)
278 goto out;
279
280 srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn);
281 out:
282 kvfree(create_in);
283 return err;
284 }
285
286 static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
287 struct mlx5_core_srq *srq)
288 {
289 u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)];
290 u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)];
291
292 memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
293 memset(xrcsrq_out, 0, sizeof(xrcsrq_out));
294
295 MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode,
296 MLX5_CMD_OP_DESTROY_XRC_SRQ);
297 MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
298
299 return mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
300 xrcsrq_out, sizeof(xrcsrq_out));
301 }
302
303 static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
304 struct mlx5_core_srq *srq, u16 lwm)
305 {
306 u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)];
307 u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)];
308
309 memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
310 memset(xrcsrq_out, 0, sizeof(xrcsrq_out));
311
312 MLX5_SET(arm_xrc_srq_in, xrcsrq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
313 MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
314 MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
315 MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm, lwm);
316
317 return mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
318 xrcsrq_out, sizeof(xrcsrq_out));
319 }
320
321 static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
322 struct mlx5_core_srq *srq,
323 struct mlx5_srq_attr *out)
324 {
325 u32 xrcsrq_in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
326 u32 *xrcsrq_out;
327 void *xrc_srqc;
328 int err;
329
330 xrcsrq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out));
331 if (!xrcsrq_out)
332 return -ENOMEM;
333 memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
334
335 MLX5_SET(query_xrc_srq_in, xrcsrq_in, opcode,
336 MLX5_CMD_OP_QUERY_XRC_SRQ);
337 MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
338 err = mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
339 xrcsrq_out,
340 MLX5_ST_SZ_BYTES(query_xrc_srq_out));
341 if (err)
342 goto out;
343
344 xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
345 xrc_srq_context_entry);
346 get_srqc(xrc_srqc, out);
347 if (MLX5_GET(xrc_srqc, xrc_srqc, state) != MLX5_XRC_SRQC_STATE_GOOD)
348 out->flags |= MLX5_SRQ_FLAG_ERR;
349
350 out:
351 kvfree(xrcsrq_out);
352 return err;
353 }
354
355 static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
356 struct mlx5_srq_attr *in)
357 {
358 void *create_in;
359 void *rmpc;
360 void *wq;
361 int pas_size;
362 int inlen;
363 int err;
364
365 pas_size = get_pas_size(in);
366 inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
367 create_in = mlx5_vzalloc(inlen);
368 if (!create_in)
369 return -ENOMEM;
370
371 rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
372 wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
373
374 MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
375 set_wq(wq, in);
376 memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
377
378 err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn);
379
380 kvfree(create_in);
381 return err;
382 }
383
384 static int destroy_rmp_cmd(struct mlx5_core_dev *dev,
385 struct mlx5_core_srq *srq)
386 {
387 return mlx5_core_destroy_rmp(dev, srq->srqn);
388 }
389
390 static int arm_rmp_cmd(struct mlx5_core_dev *dev,
391 struct mlx5_core_srq *srq,
392 u16 lwm)
393 {
394 void *in;
395 void *rmpc;
396 void *wq;
397 void *bitmask;
398 int err;
399
400 in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in));
401 if (!in)
402 return -ENOMEM;
403
404 rmpc = MLX5_ADDR_OF(modify_rmp_in, in, ctx);
405 bitmask = MLX5_ADDR_OF(modify_rmp_in, in, bitmask);
406 wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
407
408 MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY);
409 MLX5_SET(modify_rmp_in, in, rmpn, srq->srqn);
410 MLX5_SET(wq, wq, lwm, lwm);
411 MLX5_SET(rmp_bitmask, bitmask, lwm, 1);
412 MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
413
414 err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in));
415
416 kvfree(in);
417 return err;
418 }
419
420 static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
421 struct mlx5_srq_attr *out)
422 {
423 u32 *rmp_out;
424 void *rmpc;
425 int err;
426
427 rmp_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out));
428 if (!rmp_out)
429 return -ENOMEM;
430
431 err = mlx5_core_query_rmp(dev, srq->srqn, rmp_out);
432 if (err)
433 goto out;
434
435 rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
436 get_wq(MLX5_ADDR_OF(rmpc, rmpc, wq), out);
437 if (MLX5_GET(rmpc, rmpc, state) != MLX5_RMPC_STATE_RDY)
438 out->flags |= MLX5_SRQ_FLAG_ERR;
439
440 out:
441 kvfree(rmp_out);
442 return err;
443 }
444
445 static int create_srq_split(struct mlx5_core_dev *dev,
446 struct mlx5_core_srq *srq,
447 struct mlx5_srq_attr *in)
448 {
449 if (!dev->issi)
450 return create_srq_cmd(dev, srq, in);
451 else if (srq->common.res == MLX5_RES_XSRQ)
452 return create_xrc_srq_cmd(dev, srq, in);
453 else
454 return create_rmp_cmd(dev, srq, in);
455 }
456
457 static int destroy_srq_split(struct mlx5_core_dev *dev,
458 struct mlx5_core_srq *srq)
459 {
460 if (!dev->issi)
461 return destroy_srq_cmd(dev, srq);
462 else if (srq->common.res == MLX5_RES_XSRQ)
463 return destroy_xrc_srq_cmd(dev, srq);
464 else
465 return destroy_rmp_cmd(dev, srq);
466 }
467
468 int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
469 struct mlx5_srq_attr *in)
470 {
471 int err;
472 struct mlx5_srq_table *table = &dev->priv.srq_table;
473
474 if (in->type == IB_SRQT_XRC)
475 srq->common.res = MLX5_RES_XSRQ;
476 else
477 srq->common.res = MLX5_RES_SRQ;
478
479 err = create_srq_split(dev, srq, in);
480 if (err)
481 return err;
482
483 atomic_set(&srq->refcount, 1);
484 init_completion(&srq->free);
485
486 spin_lock_irq(&table->lock);
487 err = radix_tree_insert(&table->tree, srq->srqn, srq);
488 spin_unlock_irq(&table->lock);
489 if (err) {
490 mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn);
491 goto err_destroy_srq_split;
492 }
493
494 return 0;
495
496 err_destroy_srq_split:
497 destroy_srq_split(dev, srq);
498
499 return err;
500 }
501 EXPORT_SYMBOL(mlx5_core_create_srq);
502
503 int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
504 {
505 struct mlx5_srq_table *table = &dev->priv.srq_table;
506 struct mlx5_core_srq *tmp;
507 int err;
508
509 spin_lock_irq(&table->lock);
510 tmp = radix_tree_delete(&table->tree, srq->srqn);
511 spin_unlock_irq(&table->lock);
512 if (!tmp) {
513 mlx5_core_warn(dev, "srq 0x%x not found in tree\n", srq->srqn);
514 return -EINVAL;
515 }
516 if (tmp != srq) {
517 mlx5_core_warn(dev, "corruption on srqn 0x%x\n", srq->srqn);
518 return -EINVAL;
519 }
520
521 err = destroy_srq_split(dev, srq);
522 if (err)
523 return err;
524
525 if (atomic_dec_and_test(&srq->refcount))
526 complete(&srq->free);
527 wait_for_completion(&srq->free);
528
529 return 0;
530 }
531 EXPORT_SYMBOL(mlx5_core_destroy_srq);
532
533 int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
534 struct mlx5_srq_attr *out)
535 {
536 if (!dev->issi)
537 return query_srq_cmd(dev, srq, out);
538 else if (srq->common.res == MLX5_RES_XSRQ)
539 return query_xrc_srq_cmd(dev, srq, out);
540 else
541 return query_rmp_cmd(dev, srq, out);
542 }
543 EXPORT_SYMBOL(mlx5_core_query_srq);
544
545 int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
546 u16 lwm, int is_srq)
547 {
548 if (!dev->issi)
549 return arm_srq_cmd(dev, srq, lwm, is_srq);
550 else if (srq->common.res == MLX5_RES_XSRQ)
551 return arm_xrc_srq_cmd(dev, srq, lwm);
552 else
553 return arm_rmp_cmd(dev, srq, lwm);
554 }
555 EXPORT_SYMBOL(mlx5_core_arm_srq);
556
557 void mlx5_init_srq_table(struct mlx5_core_dev *dev)
558 {
559 struct mlx5_srq_table *table = &dev->priv.srq_table;
560
561 memset(table, 0, sizeof(*table));
562 spin_lock_init(&table->lock);
563 INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
564 }
565
566 void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev)
567 {
568 /* nothing */
569 }
This page took 0.04399 seconds and 5 git commands to generate.