2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/errno.h>
37 #include <linux/pci.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
40 #include <linux/io-mapping.h>
41 #include <linux/interrupt.h>
42 #include <linux/delay.h>
43 #include <linux/mlx5/driver.h>
44 #include <linux/mlx5/cq.h>
45 #include <linux/mlx5/qp.h>
46 #include <linux/mlx5/srq.h>
47 #include <linux/debugfs.h>
48 #include <linux/kmod.h>
49 #include <linux/delay.h>
50 #include <linux/mlx5/mlx5_ifc.h>
51 #ifdef CONFIG_RFS_ACCEL
52 #include <linux/cpu_rmap.h>
54 #include <net/devlink.h>
55 #include "mlx5_core.h"
57 #ifdef CONFIG_MLX5_CORE_EN
61 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
62 MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
63 MODULE_LICENSE("Dual BSD/GPL");
64 MODULE_VERSION(DRIVER_VERSION
);
66 int mlx5_core_debug_mask
;
67 module_param_named(debug_mask
, mlx5_core_debug_mask
, int, 0644);
68 MODULE_PARM_DESC(debug_mask
, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
70 #define MLX5_DEFAULT_PROF 2
71 static int prof_sel
= MLX5_DEFAULT_PROF
;
72 module_param_named(prof_sel
, prof_sel
, int, 0444);
73 MODULE_PARM_DESC(prof_sel
, "profile selector. Valid range 0 - 2");
76 MLX5_ATOMIC_REQ_MODE_BE
= 0x0,
77 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS
= 0x1,
80 static struct mlx5_profile profile
[] = {
85 .mask
= MLX5_PROF_MASK_QP_SIZE
,
89 .mask
= MLX5_PROF_MASK_QP_SIZE
|
90 MLX5_PROF_MASK_MR_CACHE
,
159 #define FW_INIT_TIMEOUT_MILI 2000
160 #define FW_INIT_WAIT_MS 2
162 static int wait_fw_init(struct mlx5_core_dev
*dev
, u32 max_wait_mili
)
164 unsigned long end
= jiffies
+ msecs_to_jiffies(max_wait_mili
);
167 while (fw_initializing(dev
)) {
168 if (time_after(jiffies
, end
)) {
172 msleep(FW_INIT_WAIT_MS
);
178 static int set_dma_caps(struct pci_dev
*pdev
)
182 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
184 dev_warn(&pdev
->dev
, "Warning: couldn't set 64-bit PCI DMA mask\n");
185 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
187 dev_err(&pdev
->dev
, "Can't set PCI DMA mask, aborting\n");
192 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
195 "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
196 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
199 "Can't set consistent PCI DMA mask, aborting\n");
204 dma_set_max_seg_size(&pdev
->dev
, 2u * 1024 * 1024 * 1024);
208 static int mlx5_pci_enable_device(struct mlx5_core_dev
*dev
)
210 struct pci_dev
*pdev
= dev
->pdev
;
213 mutex_lock(&dev
->pci_status_mutex
);
214 if (dev
->pci_status
== MLX5_PCI_STATUS_DISABLED
) {
215 err
= pci_enable_device(pdev
);
217 dev
->pci_status
= MLX5_PCI_STATUS_ENABLED
;
219 mutex_unlock(&dev
->pci_status_mutex
);
224 static void mlx5_pci_disable_device(struct mlx5_core_dev
*dev
)
226 struct pci_dev
*pdev
= dev
->pdev
;
228 mutex_lock(&dev
->pci_status_mutex
);
229 if (dev
->pci_status
== MLX5_PCI_STATUS_ENABLED
) {
230 pci_disable_device(pdev
);
231 dev
->pci_status
= MLX5_PCI_STATUS_DISABLED
;
233 mutex_unlock(&dev
->pci_status_mutex
);
236 static int request_bar(struct pci_dev
*pdev
)
240 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
241 dev_err(&pdev
->dev
, "Missing registers BAR, aborting\n");
245 err
= pci_request_regions(pdev
, DRIVER_NAME
);
247 dev_err(&pdev
->dev
, "Couldn't get PCI resources, aborting\n");
252 static void release_bar(struct pci_dev
*pdev
)
254 pci_release_regions(pdev
);
257 static int mlx5_enable_msix(struct mlx5_core_dev
*dev
)
259 struct mlx5_priv
*priv
= &dev
->priv
;
260 struct mlx5_eq_table
*table
= &priv
->eq_table
;
261 int num_eqs
= 1 << MLX5_CAP_GEN(dev
, log_max_eq
);
265 nvec
= MLX5_CAP_GEN(dev
, num_ports
) * num_online_cpus() +
266 MLX5_EQ_VEC_COMP_BASE
;
267 nvec
= min_t(int, nvec
, num_eqs
);
268 if (nvec
<= MLX5_EQ_VEC_COMP_BASE
)
271 priv
->msix_arr
= kcalloc(nvec
, sizeof(*priv
->msix_arr
), GFP_KERNEL
);
273 priv
->irq_info
= kcalloc(nvec
, sizeof(*priv
->irq_info
), GFP_KERNEL
);
274 if (!priv
->msix_arr
|| !priv
->irq_info
)
277 for (i
= 0; i
< nvec
; i
++)
278 priv
->msix_arr
[i
].entry
= i
;
280 nvec
= pci_enable_msix_range(dev
->pdev
, priv
->msix_arr
,
281 MLX5_EQ_VEC_COMP_BASE
+ 1, nvec
);
285 table
->num_comp_vectors
= nvec
- MLX5_EQ_VEC_COMP_BASE
;
290 kfree(priv
->irq_info
);
291 kfree(priv
->msix_arr
);
295 static void mlx5_disable_msix(struct mlx5_core_dev
*dev
)
297 struct mlx5_priv
*priv
= &dev
->priv
;
299 pci_disable_msix(dev
->pdev
);
300 kfree(priv
->irq_info
);
301 kfree(priv
->msix_arr
);
304 struct mlx5_reg_host_endianess
{
310 #define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
313 MLX5_CAP_BITS_RW_MASK
= CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM
, 2) |
314 MLX5_DEV_CAP_FLAG_DCT
,
317 static u16
to_fw_pkey_sz(struct mlx5_core_dev
*dev
, u32 size
)
333 mlx5_core_warn(dev
, "invalid pkey table size %d\n", size
);
338 static int mlx5_core_get_caps_mode(struct mlx5_core_dev
*dev
,
339 enum mlx5_cap_type cap_type
,
340 enum mlx5_cap_mode cap_mode
)
342 u8 in
[MLX5_ST_SZ_BYTES(query_hca_cap_in
)];
343 int out_sz
= MLX5_ST_SZ_BYTES(query_hca_cap_out
);
344 void *out
, *hca_caps
;
345 u16 opmod
= (cap_type
<< 1) | (cap_mode
& 0x01);
348 memset(in
, 0, sizeof(in
));
349 out
= kzalloc(out_sz
, GFP_KERNEL
);
353 MLX5_SET(query_hca_cap_in
, in
, opcode
, MLX5_CMD_OP_QUERY_HCA_CAP
);
354 MLX5_SET(query_hca_cap_in
, in
, op_mod
, opmod
);
355 err
= mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, out_sz
);
358 "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
359 cap_type
, cap_mode
, err
);
363 hca_caps
= MLX5_ADDR_OF(query_hca_cap_out
, out
, capability
);
366 case HCA_CAP_OPMOD_GET_MAX
:
367 memcpy(dev
->hca_caps_max
[cap_type
], hca_caps
,
368 MLX5_UN_SZ_BYTES(hca_cap_union
));
370 case HCA_CAP_OPMOD_GET_CUR
:
371 memcpy(dev
->hca_caps_cur
[cap_type
], hca_caps
,
372 MLX5_UN_SZ_BYTES(hca_cap_union
));
376 "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
386 int mlx5_core_get_caps(struct mlx5_core_dev
*dev
, enum mlx5_cap_type cap_type
)
390 ret
= mlx5_core_get_caps_mode(dev
, cap_type
, HCA_CAP_OPMOD_GET_CUR
);
393 return mlx5_core_get_caps_mode(dev
, cap_type
, HCA_CAP_OPMOD_GET_MAX
);
396 static int set_caps(struct mlx5_core_dev
*dev
, void *in
, int in_sz
, int opmod
)
398 u32 out
[MLX5_ST_SZ_DW(set_hca_cap_out
)] = {0};
400 MLX5_SET(set_hca_cap_in
, in
, opcode
, MLX5_CMD_OP_SET_HCA_CAP
);
401 MLX5_SET(set_hca_cap_in
, in
, op_mod
, opmod
<< 1);
402 return mlx5_cmd_exec(dev
, in
, in_sz
, out
, sizeof(out
));
405 static int handle_hca_cap_atomic(struct mlx5_core_dev
*dev
)
409 int set_sz
= MLX5_ST_SZ_BYTES(set_hca_cap_in
);
413 if (MLX5_CAP_GEN(dev
, atomic
)) {
414 err
= mlx5_core_get_caps(dev
, MLX5_CAP_ATOMIC
);
423 supported_atomic_req_8B_endianess_mode_1
);
425 if (req_endianness
!= MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS
)
428 set_ctx
= kzalloc(set_sz
, GFP_KERNEL
);
432 set_hca_cap
= MLX5_ADDR_OF(set_hca_cap_in
, set_ctx
, capability
);
434 /* Set requestor to host endianness */
435 MLX5_SET(atomic_caps
, set_hca_cap
, atomic_req_8B_endianess_mode
,
436 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS
);
438 err
= set_caps(dev
, set_ctx
, set_sz
, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC
);
444 static int handle_hca_cap(struct mlx5_core_dev
*dev
)
446 void *set_ctx
= NULL
;
447 struct mlx5_profile
*prof
= dev
->profile
;
449 int set_sz
= MLX5_ST_SZ_BYTES(set_hca_cap_in
);
452 set_ctx
= kzalloc(set_sz
, GFP_KERNEL
);
456 err
= mlx5_core_get_caps(dev
, MLX5_CAP_GENERAL
);
460 set_hca_cap
= MLX5_ADDR_OF(set_hca_cap_in
, set_ctx
,
462 memcpy(set_hca_cap
, dev
->hca_caps_cur
[MLX5_CAP_GENERAL
],
463 MLX5_ST_SZ_BYTES(cmd_hca_cap
));
465 mlx5_core_dbg(dev
, "Current Pkey table size %d Setting new size %d\n",
466 mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev
, pkey_table_size
)),
468 /* we limit the size of the pkey table to 128 entries for now */
469 MLX5_SET(cmd_hca_cap
, set_hca_cap
, pkey_table_size
,
470 to_fw_pkey_sz(dev
, 128));
472 if (prof
->mask
& MLX5_PROF_MASK_QP_SIZE
)
473 MLX5_SET(cmd_hca_cap
, set_hca_cap
, log_max_qp
,
476 /* disable cmdif checksum */
477 MLX5_SET(cmd_hca_cap
, set_hca_cap
, cmdif_checksum
, 0);
479 MLX5_SET(cmd_hca_cap
, set_hca_cap
, log_uar_page_sz
, PAGE_SHIFT
- 12);
481 err
= set_caps(dev
, set_ctx
, set_sz
,
482 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE
);
489 static int set_hca_ctrl(struct mlx5_core_dev
*dev
)
491 struct mlx5_reg_host_endianess he_in
;
492 struct mlx5_reg_host_endianess he_out
;
495 if (!mlx5_core_is_pf(dev
))
498 memset(&he_in
, 0, sizeof(he_in
));
499 he_in
.he
= MLX5_SET_HOST_ENDIANNESS
;
500 err
= mlx5_core_access_reg(dev
, &he_in
, sizeof(he_in
),
501 &he_out
, sizeof(he_out
),
502 MLX5_REG_HOST_ENDIANNESS
, 0, 1);
506 int mlx5_core_enable_hca(struct mlx5_core_dev
*dev
, u16 func_id
)
508 u32 out
[MLX5_ST_SZ_DW(enable_hca_out
)] = {0};
509 u32 in
[MLX5_ST_SZ_DW(enable_hca_in
)] = {0};
511 MLX5_SET(enable_hca_in
, in
, opcode
, MLX5_CMD_OP_ENABLE_HCA
);
512 MLX5_SET(enable_hca_in
, in
, function_id
, func_id
);
513 return mlx5_cmd_exec(dev
, &in
, sizeof(in
), &out
, sizeof(out
));
516 int mlx5_core_disable_hca(struct mlx5_core_dev
*dev
, u16 func_id
)
518 u32 out
[MLX5_ST_SZ_DW(disable_hca_out
)] = {0};
519 u32 in
[MLX5_ST_SZ_DW(disable_hca_in
)] = {0};
521 MLX5_SET(disable_hca_in
, in
, opcode
, MLX5_CMD_OP_DISABLE_HCA
);
522 MLX5_SET(disable_hca_in
, in
, function_id
, func_id
);
523 return mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
526 cycle_t
mlx5_read_internal_timer(struct mlx5_core_dev
*dev
)
528 u32 timer_h
, timer_h1
, timer_l
;
530 timer_h
= ioread32be(&dev
->iseg
->internal_timer_h
);
531 timer_l
= ioread32be(&dev
->iseg
->internal_timer_l
);
532 timer_h1
= ioread32be(&dev
->iseg
->internal_timer_h
);
533 if (timer_h
!= timer_h1
) /* wrap around */
534 timer_l
= ioread32be(&dev
->iseg
->internal_timer_l
);
536 return (cycle_t
)timer_l
| (cycle_t
)timer_h1
<< 32;
539 static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev
*mdev
, int i
)
541 struct mlx5_priv
*priv
= &mdev
->priv
;
542 struct msix_entry
*msix
= priv
->msix_arr
;
543 int irq
= msix
[i
+ MLX5_EQ_VEC_COMP_BASE
].vector
;
544 int numa_node
= priv
->numa_node
;
547 if (!zalloc_cpumask_var(&priv
->irq_info
[i
].mask
, GFP_KERNEL
)) {
548 mlx5_core_warn(mdev
, "zalloc_cpumask_var failed");
552 cpumask_set_cpu(cpumask_local_spread(i
, numa_node
),
553 priv
->irq_info
[i
].mask
);
555 err
= irq_set_affinity_hint(irq
, priv
->irq_info
[i
].mask
);
557 mlx5_core_warn(mdev
, "irq_set_affinity_hint failed,irq 0x%.4x",
565 free_cpumask_var(priv
->irq_info
[i
].mask
);
569 static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev
*mdev
, int i
)
571 struct mlx5_priv
*priv
= &mdev
->priv
;
572 struct msix_entry
*msix
= priv
->msix_arr
;
573 int irq
= msix
[i
+ MLX5_EQ_VEC_COMP_BASE
].vector
;
575 irq_set_affinity_hint(irq
, NULL
);
576 free_cpumask_var(priv
->irq_info
[i
].mask
);
579 static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev
*mdev
)
584 for (i
= 0; i
< mdev
->priv
.eq_table
.num_comp_vectors
; i
++) {
585 err
= mlx5_irq_set_affinity_hint(mdev
, i
);
593 for (i
--; i
>= 0; i
--)
594 mlx5_irq_clear_affinity_hint(mdev
, i
);
599 static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev
*mdev
)
603 for (i
= 0; i
< mdev
->priv
.eq_table
.num_comp_vectors
; i
++)
604 mlx5_irq_clear_affinity_hint(mdev
, i
);
607 int mlx5_vector2eqn(struct mlx5_core_dev
*dev
, int vector
, int *eqn
,
610 struct mlx5_eq_table
*table
= &dev
->priv
.eq_table
;
611 struct mlx5_eq
*eq
, *n
;
614 spin_lock(&table
->lock
);
615 list_for_each_entry_safe(eq
, n
, &table
->comp_eqs_list
, list
) {
616 if (eq
->index
== vector
) {
623 spin_unlock(&table
->lock
);
627 EXPORT_SYMBOL(mlx5_vector2eqn
);
629 struct mlx5_eq
*mlx5_eqn2eq(struct mlx5_core_dev
*dev
, int eqn
)
631 struct mlx5_eq_table
*table
= &dev
->priv
.eq_table
;
634 spin_lock(&table
->lock
);
635 list_for_each_entry(eq
, &table
->comp_eqs_list
, list
)
636 if (eq
->eqn
== eqn
) {
637 spin_unlock(&table
->lock
);
641 spin_unlock(&table
->lock
);
643 return ERR_PTR(-ENOENT
);
646 static void free_comp_eqs(struct mlx5_core_dev
*dev
)
648 struct mlx5_eq_table
*table
= &dev
->priv
.eq_table
;
649 struct mlx5_eq
*eq
, *n
;
651 #ifdef CONFIG_RFS_ACCEL
653 free_irq_cpu_rmap(dev
->rmap
);
657 spin_lock(&table
->lock
);
658 list_for_each_entry_safe(eq
, n
, &table
->comp_eqs_list
, list
) {
660 spin_unlock(&table
->lock
);
661 if (mlx5_destroy_unmap_eq(dev
, eq
))
662 mlx5_core_warn(dev
, "failed to destroy EQ 0x%x\n",
665 spin_lock(&table
->lock
);
667 spin_unlock(&table
->lock
);
670 static int alloc_comp_eqs(struct mlx5_core_dev
*dev
)
672 struct mlx5_eq_table
*table
= &dev
->priv
.eq_table
;
673 char name
[MLX5_MAX_IRQ_NAME
];
680 INIT_LIST_HEAD(&table
->comp_eqs_list
);
681 ncomp_vec
= table
->num_comp_vectors
;
682 nent
= MLX5_COMP_EQ_SIZE
;
683 #ifdef CONFIG_RFS_ACCEL
684 dev
->rmap
= alloc_irq_cpu_rmap(ncomp_vec
);
688 for (i
= 0; i
< ncomp_vec
; i
++) {
689 eq
= kzalloc(sizeof(*eq
), GFP_KERNEL
);
695 #ifdef CONFIG_RFS_ACCEL
696 irq_cpu_rmap_add(dev
->rmap
,
697 dev
->priv
.msix_arr
[i
+ MLX5_EQ_VEC_COMP_BASE
].vector
);
699 snprintf(name
, MLX5_MAX_IRQ_NAME
, "mlx5_comp%d", i
);
700 err
= mlx5_create_map_eq(dev
, eq
,
701 i
+ MLX5_EQ_VEC_COMP_BASE
, nent
, 0,
702 name
, &dev
->priv
.uuari
.uars
[0]);
707 mlx5_core_dbg(dev
, "allocated completion EQN %d\n", eq
->eqn
);
709 spin_lock(&table
->lock
);
710 list_add_tail(&eq
->list
, &table
->comp_eqs_list
);
711 spin_unlock(&table
->lock
);
721 static int mlx5_core_set_issi(struct mlx5_core_dev
*dev
)
723 u32 query_in
[MLX5_ST_SZ_DW(query_issi_in
)] = {0};
724 u32 query_out
[MLX5_ST_SZ_DW(query_issi_out
)] = {0};
728 MLX5_SET(query_issi_in
, query_in
, opcode
, MLX5_CMD_OP_QUERY_ISSI
);
729 err
= mlx5_cmd_exec(dev
, query_in
, sizeof(query_in
),
730 query_out
, sizeof(query_out
));
735 mlx5_cmd_mbox_status(query_out
, &status
, &syndrome
);
736 if (status
== MLX5_CMD_STAT_BAD_OP_ERR
) {
737 pr_debug("Only ISSI 0 is supported\n");
741 pr_err("failed to query ISSI err(%d)\n", err
);
745 sup_issi
= MLX5_GET(query_issi_out
, query_out
, supported_issi_dw0
);
747 if (sup_issi
& (1 << 1)) {
748 u32 set_in
[MLX5_ST_SZ_DW(set_issi_in
)] = {0};
749 u32 set_out
[MLX5_ST_SZ_DW(set_issi_out
)] = {0};
751 MLX5_SET(set_issi_in
, set_in
, opcode
, MLX5_CMD_OP_SET_ISSI
);
752 MLX5_SET(set_issi_in
, set_in
, current_issi
, 1);
753 err
= mlx5_cmd_exec(dev
, set_in
, sizeof(set_in
),
754 set_out
, sizeof(set_out
));
756 pr_err("failed to set ISSI=1 err(%d)\n", err
);
763 } else if (sup_issi
& (1 << 0) || !sup_issi
) {
771 static int mlx5_pci_init(struct mlx5_core_dev
*dev
, struct mlx5_priv
*priv
)
773 struct pci_dev
*pdev
= dev
->pdev
;
776 pci_set_drvdata(dev
->pdev
, dev
);
777 strncpy(priv
->name
, dev_name(&pdev
->dev
), MLX5_MAX_NAME_LEN
);
778 priv
->name
[MLX5_MAX_NAME_LEN
- 1] = 0;
780 mutex_init(&priv
->pgdir_mutex
);
781 INIT_LIST_HEAD(&priv
->pgdir_list
);
782 spin_lock_init(&priv
->mkey_lock
);
784 mutex_init(&priv
->alloc_mutex
);
786 priv
->numa_node
= dev_to_node(&dev
->pdev
->dev
);
788 priv
->dbg_root
= debugfs_create_dir(dev_name(&pdev
->dev
), mlx5_debugfs_root
);
792 err
= mlx5_pci_enable_device(dev
);
794 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
798 err
= request_bar(pdev
);
800 dev_err(&pdev
->dev
, "error requesting BARs, aborting\n");
804 pci_set_master(pdev
);
806 err
= set_dma_caps(pdev
);
808 dev_err(&pdev
->dev
, "Failed setting DMA capabilities mask, aborting\n");
812 dev
->iseg_base
= pci_resource_start(dev
->pdev
, 0);
813 dev
->iseg
= ioremap(dev
->iseg_base
, sizeof(*dev
->iseg
));
816 dev_err(&pdev
->dev
, "Failed mapping initialization segment, aborting\n");
823 pci_clear_master(dev
->pdev
);
824 release_bar(dev
->pdev
);
826 mlx5_pci_disable_device(dev
);
829 debugfs_remove(priv
->dbg_root
);
833 static void mlx5_pci_close(struct mlx5_core_dev
*dev
, struct mlx5_priv
*priv
)
836 pci_clear_master(dev
->pdev
);
837 release_bar(dev
->pdev
);
838 mlx5_pci_disable_device(dev
);
839 debugfs_remove(priv
->dbg_root
);
842 static int mlx5_init_once(struct mlx5_core_dev
*dev
, struct mlx5_priv
*priv
)
844 struct pci_dev
*pdev
= dev
->pdev
;
847 err
= mlx5_query_hca_caps(dev
);
849 dev_err(&pdev
->dev
, "query hca failed\n");
853 err
= mlx5_query_board_id(dev
);
855 dev_err(&pdev
->dev
, "query board id failed\n");
859 err
= mlx5_eq_init(dev
);
861 dev_err(&pdev
->dev
, "failed to initialize eq\n");
865 MLX5_INIT_DOORBELL_LOCK(&priv
->cq_uar_lock
);
867 err
= mlx5_init_cq_table(dev
);
869 dev_err(&pdev
->dev
, "failed to initialize cq table\n");
873 mlx5_init_qp_table(dev
);
875 mlx5_init_srq_table(dev
);
877 mlx5_init_mkey_table(dev
);
879 err
= mlx5_init_rl_table(dev
);
881 dev_err(&pdev
->dev
, "Failed to init rate limiting\n");
882 goto err_tables_cleanup
;
885 #ifdef CONFIG_MLX5_CORE_EN
886 err
= mlx5_eswitch_init(dev
);
888 dev_err(&pdev
->dev
, "Failed to init eswitch %d\n", err
);
893 err
= mlx5_sriov_init(dev
);
895 dev_err(&pdev
->dev
, "Failed to init sriov %d\n", err
);
896 goto err_eswitch_cleanup
;
902 #ifdef CONFIG_MLX5_CORE_EN
903 mlx5_eswitch_cleanup(dev
->priv
.eswitch
);
907 mlx5_cleanup_rl_table(dev
);
910 mlx5_cleanup_mkey_table(dev
);
911 mlx5_cleanup_srq_table(dev
);
912 mlx5_cleanup_qp_table(dev
);
913 mlx5_cleanup_cq_table(dev
);
916 mlx5_eq_cleanup(dev
);
922 static void mlx5_cleanup_once(struct mlx5_core_dev
*dev
)
924 mlx5_sriov_cleanup(dev
);
925 #ifdef CONFIG_MLX5_CORE_EN
926 mlx5_eswitch_cleanup(dev
->priv
.eswitch
);
928 mlx5_cleanup_rl_table(dev
);
929 mlx5_cleanup_mkey_table(dev
);
930 mlx5_cleanup_srq_table(dev
);
931 mlx5_cleanup_qp_table(dev
);
932 mlx5_cleanup_cq_table(dev
);
933 mlx5_eq_cleanup(dev
);
936 static int mlx5_load_one(struct mlx5_core_dev
*dev
, struct mlx5_priv
*priv
,
939 struct pci_dev
*pdev
= dev
->pdev
;
942 mutex_lock(&dev
->intf_state_mutex
);
943 if (test_bit(MLX5_INTERFACE_STATE_UP
, &dev
->intf_state
)) {
944 dev_warn(&dev
->pdev
->dev
, "%s: interface is up, NOP\n",
949 dev_info(&pdev
->dev
, "firmware version: %d.%d.%d\n", fw_rev_maj(dev
),
950 fw_rev_min(dev
), fw_rev_sub(dev
));
952 /* on load removing any previous indication of internal error, device is
955 dev
->state
= MLX5_DEVICE_STATE_UP
;
957 err
= mlx5_cmd_init(dev
);
959 dev_err(&pdev
->dev
, "Failed initializing command interface, aborting\n");
963 err
= wait_fw_init(dev
, FW_INIT_TIMEOUT_MILI
);
965 dev_err(&dev
->pdev
->dev
, "Firmware over %d MS in initializing state, aborting\n",
966 FW_INIT_TIMEOUT_MILI
);
970 err
= mlx5_core_enable_hca(dev
, 0);
972 dev_err(&pdev
->dev
, "enable hca failed\n");
973 goto err_cmd_cleanup
;
976 err
= mlx5_core_set_issi(dev
);
978 dev_err(&pdev
->dev
, "failed to set issi\n");
979 goto err_disable_hca
;
982 err
= mlx5_satisfy_startup_pages(dev
, 1);
984 dev_err(&pdev
->dev
, "failed to allocate boot pages\n");
985 goto err_disable_hca
;
988 err
= set_hca_ctrl(dev
);
990 dev_err(&pdev
->dev
, "set_hca_ctrl failed\n");
991 goto reclaim_boot_pages
;
994 err
= handle_hca_cap(dev
);
996 dev_err(&pdev
->dev
, "handle_hca_cap failed\n");
997 goto reclaim_boot_pages
;
1000 err
= handle_hca_cap_atomic(dev
);
1002 dev_err(&pdev
->dev
, "handle_hca_cap_atomic failed\n");
1003 goto reclaim_boot_pages
;
1006 err
= mlx5_satisfy_startup_pages(dev
, 0);
1008 dev_err(&pdev
->dev
, "failed to allocate init pages\n");
1009 goto reclaim_boot_pages
;
1012 err
= mlx5_pagealloc_start(dev
);
1014 dev_err(&pdev
->dev
, "mlx5_pagealloc_start failed\n");
1015 goto reclaim_boot_pages
;
1018 err
= mlx5_cmd_init_hca(dev
);
1020 dev_err(&pdev
->dev
, "init hca failed\n");
1021 goto err_pagealloc_stop
;
1024 mlx5_start_health_poll(dev
);
1026 if (boot
&& mlx5_init_once(dev
, priv
)) {
1027 dev_err(&pdev
->dev
, "sw objs init failed\n");
1031 err
= mlx5_enable_msix(dev
);
1033 dev_err(&pdev
->dev
, "enable msix failed\n");
1034 goto err_cleanup_once
;
1037 err
= mlx5_alloc_uuars(dev
, &priv
->uuari
);
1039 dev_err(&pdev
->dev
, "Failed allocating uar, aborting\n");
1040 goto err_disable_msix
;
1043 err
= mlx5_start_eqs(dev
);
1045 dev_err(&pdev
->dev
, "Failed to start pages and async EQs\n");
1049 err
= alloc_comp_eqs(dev
);
1051 dev_err(&pdev
->dev
, "Failed to alloc completion EQs\n");
1055 err
= mlx5_irq_set_affinity_hints(dev
);
1057 dev_err(&pdev
->dev
, "Failed to alloc affinity hint cpumask\n");
1058 goto err_affinity_hints
;
1061 err
= mlx5_init_fs(dev
);
1063 dev_err(&pdev
->dev
, "Failed to init flow steering\n");
1067 #ifdef CONFIG_MLX5_CORE_EN
1068 mlx5_eswitch_attach(dev
->priv
.eswitch
);
1071 err
= mlx5_sriov_attach(dev
);
1073 dev_err(&pdev
->dev
, "sriov init failed %d\n", err
);
1077 if (mlx5_device_registered(dev
)) {
1078 mlx5_attach_device(dev
);
1080 err
= mlx5_register_device(dev
);
1082 dev_err(&pdev
->dev
, "mlx5_register_device failed %d\n", err
);
1087 clear_bit(MLX5_INTERFACE_STATE_DOWN
, &dev
->intf_state
);
1088 set_bit(MLX5_INTERFACE_STATE_UP
, &dev
->intf_state
);
1090 mutex_unlock(&dev
->intf_state_mutex
);
1095 mlx5_sriov_detach(dev
);
1098 #ifdef CONFIG_MLX5_CORE_EN
1099 mlx5_eswitch_detach(dev
->priv
.eswitch
);
1101 mlx5_cleanup_fs(dev
);
1104 mlx5_irq_clear_affinity_hints(dev
);
1113 mlx5_free_uuars(dev
, &priv
->uuari
);
1116 mlx5_disable_msix(dev
);
1120 mlx5_cleanup_once(dev
);
1123 mlx5_stop_health_poll(dev
);
1124 if (mlx5_cmd_teardown_hca(dev
)) {
1125 dev_err(&dev
->pdev
->dev
, "tear_down_hca failed, skip cleanup\n");
1130 mlx5_pagealloc_stop(dev
);
1133 mlx5_reclaim_startup_pages(dev
);
1136 mlx5_core_disable_hca(dev
, 0);
1139 mlx5_cmd_cleanup(dev
);
1142 dev
->state
= MLX5_DEVICE_STATE_INTERNAL_ERROR
;
1143 mutex_unlock(&dev
->intf_state_mutex
);
1148 static int mlx5_unload_one(struct mlx5_core_dev
*dev
, struct mlx5_priv
*priv
,
1153 mutex_lock(&dev
->intf_state_mutex
);
1154 if (test_bit(MLX5_INTERFACE_STATE_DOWN
, &dev
->intf_state
)) {
1155 dev_warn(&dev
->pdev
->dev
, "%s: interface is down, NOP\n",
1158 mlx5_cleanup_once(dev
);
1162 if (mlx5_device_registered(dev
))
1163 mlx5_detach_device(dev
);
1165 mlx5_sriov_detach(dev
);
1166 #ifdef CONFIG_MLX5_CORE_EN
1167 mlx5_eswitch_detach(dev
->priv
.eswitch
);
1169 mlx5_cleanup_fs(dev
);
1170 mlx5_irq_clear_affinity_hints(dev
);
1173 mlx5_free_uuars(dev
, &priv
->uuari
);
1174 mlx5_disable_msix(dev
);
1176 mlx5_cleanup_once(dev
);
1177 mlx5_stop_health_poll(dev
);
1178 err
= mlx5_cmd_teardown_hca(dev
);
1180 dev_err(&dev
->pdev
->dev
, "tear_down_hca failed, skip cleanup\n");
1183 mlx5_pagealloc_stop(dev
);
1184 mlx5_reclaim_startup_pages(dev
);
1185 mlx5_core_disable_hca(dev
, 0);
1186 mlx5_cmd_cleanup(dev
);
1189 clear_bit(MLX5_INTERFACE_STATE_UP
, &dev
->intf_state
);
1190 set_bit(MLX5_INTERFACE_STATE_DOWN
, &dev
->intf_state
);
1191 mutex_unlock(&dev
->intf_state_mutex
);
1195 struct mlx5_core_event_handler
{
1196 void (*event
)(struct mlx5_core_dev
*dev
,
1197 enum mlx5_dev_event event
,
1201 static const struct devlink_ops mlx5_devlink_ops
= {
1202 #ifdef CONFIG_MLX5_CORE_EN
1203 .eswitch_mode_set
= mlx5_devlink_eswitch_mode_set
,
1204 .eswitch_mode_get
= mlx5_devlink_eswitch_mode_get
,
1208 #define MLX5_IB_MOD "mlx5_ib"
1209 static int init_one(struct pci_dev
*pdev
,
1210 const struct pci_device_id
*id
)
1212 struct mlx5_core_dev
*dev
;
1213 struct devlink
*devlink
;
1214 struct mlx5_priv
*priv
;
1217 devlink
= devlink_alloc(&mlx5_devlink_ops
, sizeof(*dev
));
1219 dev_err(&pdev
->dev
, "kzalloc failed\n");
1223 dev
= devlink_priv(devlink
);
1225 priv
->pci_dev_data
= id
->driver_data
;
1227 pci_set_drvdata(pdev
, dev
);
1229 if (prof_sel
< 0 || prof_sel
>= ARRAY_SIZE(profile
)) {
1231 "selected profile out of range, selecting default (%d)\n",
1233 prof_sel
= MLX5_DEFAULT_PROF
;
1235 dev
->profile
= &profile
[prof_sel
];
1237 dev
->event
= mlx5_core_event
;
1239 INIT_LIST_HEAD(&priv
->ctx_list
);
1240 spin_lock_init(&priv
->ctx_lock
);
1241 mutex_init(&dev
->pci_status_mutex
);
1242 mutex_init(&dev
->intf_state_mutex
);
1243 err
= mlx5_pci_init(dev
, priv
);
1245 dev_err(&pdev
->dev
, "mlx5_pci_init failed with error code %d\n", err
);
1249 err
= mlx5_health_init(dev
);
1251 dev_err(&pdev
->dev
, "mlx5_health_init failed with error code %d\n", err
);
1255 mlx5_pagealloc_init(dev
);
1257 err
= mlx5_load_one(dev
, priv
, true);
1259 dev_err(&pdev
->dev
, "mlx5_load_one failed with error code %d\n", err
);
1263 err
= request_module_nowait(MLX5_IB_MOD
);
1265 pr_info("failed request module on %s\n", MLX5_IB_MOD
);
1267 err
= devlink_register(devlink
, &pdev
->dev
);
1274 mlx5_unload_one(dev
, priv
, true);
1276 mlx5_pagealloc_cleanup(dev
);
1277 mlx5_health_cleanup(dev
);
1279 mlx5_pci_close(dev
, priv
);
1281 pci_set_drvdata(pdev
, NULL
);
1282 devlink_free(devlink
);
1287 static void remove_one(struct pci_dev
*pdev
)
1289 struct mlx5_core_dev
*dev
= pci_get_drvdata(pdev
);
1290 struct devlink
*devlink
= priv_to_devlink(dev
);
1291 struct mlx5_priv
*priv
= &dev
->priv
;
1293 devlink_unregister(devlink
);
1294 mlx5_unregister_device(dev
);
1296 if (mlx5_unload_one(dev
, priv
, true)) {
1297 dev_err(&dev
->pdev
->dev
, "mlx5_unload_one failed\n");
1298 mlx5_health_cleanup(dev
);
1302 mlx5_pagealloc_cleanup(dev
);
1303 mlx5_health_cleanup(dev
);
1304 mlx5_pci_close(dev
, priv
);
1305 pci_set_drvdata(pdev
, NULL
);
1306 devlink_free(devlink
);
1309 static pci_ers_result_t
mlx5_pci_err_detected(struct pci_dev
*pdev
,
1310 pci_channel_state_t state
)
1312 struct mlx5_core_dev
*dev
= pci_get_drvdata(pdev
);
1313 struct mlx5_priv
*priv
= &dev
->priv
;
1315 dev_info(&pdev
->dev
, "%s was called\n", __func__
);
1316 mlx5_enter_error_state(dev
);
1317 mlx5_unload_one(dev
, priv
, false);
1318 pci_save_state(pdev
);
1319 mlx5_pci_disable_device(dev
);
1320 return state
== pci_channel_io_perm_failure
?
1321 PCI_ERS_RESULT_DISCONNECT
: PCI_ERS_RESULT_NEED_RESET
;
1324 /* wait for the device to show vital signs by waiting
1325 * for the health counter to start counting.
1327 static int wait_vital(struct pci_dev
*pdev
)
1329 struct mlx5_core_dev
*dev
= pci_get_drvdata(pdev
);
1330 struct mlx5_core_health
*health
= &dev
->priv
.health
;
1331 const int niter
= 100;
1336 for (i
= 0; i
< niter
; i
++) {
1337 count
= ioread32be(health
->health_counter
);
1338 if (count
&& count
!= 0xffffffff) {
1339 if (last_count
&& last_count
!= count
) {
1340 dev_info(&pdev
->dev
, "Counter value 0x%x after %d iterations\n", count
, i
);
1351 static pci_ers_result_t
mlx5_pci_slot_reset(struct pci_dev
*pdev
)
1353 struct mlx5_core_dev
*dev
= pci_get_drvdata(pdev
);
1356 dev_info(&pdev
->dev
, "%s was called\n", __func__
);
1358 err
= mlx5_pci_enable_device(dev
);
1360 dev_err(&pdev
->dev
, "%s: mlx5_pci_enable_device failed with error code: %d\n"
1362 return PCI_ERS_RESULT_DISCONNECT
;
1365 pci_set_master(pdev
);
1366 pci_restore_state(pdev
);
1368 if (wait_vital(pdev
)) {
1369 dev_err(&pdev
->dev
, "%s: wait_vital timed out\n", __func__
);
1370 return PCI_ERS_RESULT_DISCONNECT
;
1373 return PCI_ERS_RESULT_RECOVERED
;
1376 void mlx5_disable_device(struct mlx5_core_dev
*dev
)
1378 mlx5_pci_err_detected(dev
->pdev
, 0);
1381 static void mlx5_pci_resume(struct pci_dev
*pdev
)
1383 struct mlx5_core_dev
*dev
= pci_get_drvdata(pdev
);
1384 struct mlx5_priv
*priv
= &dev
->priv
;
1387 dev_info(&pdev
->dev
, "%s was called\n", __func__
);
1389 err
= mlx5_load_one(dev
, priv
, false);
1391 dev_err(&pdev
->dev
, "%s: mlx5_load_one failed with error code: %d\n"
1394 dev_info(&pdev
->dev
, "%s: device recovered\n", __func__
);
1397 static const struct pci_error_handlers mlx5_err_handler
= {
1398 .error_detected
= mlx5_pci_err_detected
,
1399 .slot_reset
= mlx5_pci_slot_reset
,
1400 .resume
= mlx5_pci_resume
1403 static void shutdown(struct pci_dev
*pdev
)
1405 struct mlx5_core_dev
*dev
= pci_get_drvdata(pdev
);
1406 struct mlx5_priv
*priv
= &dev
->priv
;
1408 dev_info(&pdev
->dev
, "Shutdown was called\n");
1409 /* Notify mlx5 clients that the kernel is being shut down */
1410 set_bit(MLX5_INTERFACE_STATE_SHUTDOWN
, &dev
->intf_state
);
1411 mlx5_unload_one(dev
, priv
, false);
1412 mlx5_pci_disable_device(dev
);
1415 static const struct pci_device_id mlx5_core_pci_table
[] = {
1416 { PCI_VDEVICE(MELLANOX
, 0x1011) }, /* Connect-IB */
1417 { PCI_VDEVICE(MELLANOX
, 0x1012), MLX5_PCI_DEV_IS_VF
}, /* Connect-IB VF */
1418 { PCI_VDEVICE(MELLANOX
, 0x1013) }, /* ConnectX-4 */
1419 { PCI_VDEVICE(MELLANOX
, 0x1014), MLX5_PCI_DEV_IS_VF
}, /* ConnectX-4 VF */
1420 { PCI_VDEVICE(MELLANOX
, 0x1015) }, /* ConnectX-4LX */
1421 { PCI_VDEVICE(MELLANOX
, 0x1016), MLX5_PCI_DEV_IS_VF
}, /* ConnectX-4LX VF */
1422 { PCI_VDEVICE(MELLANOX
, 0x1017) }, /* ConnectX-5, PCIe 3.0 */
1423 { PCI_VDEVICE(MELLANOX
, 0x1018), MLX5_PCI_DEV_IS_VF
}, /* ConnectX-5 VF */
1424 { PCI_VDEVICE(MELLANOX
, 0x1019) }, /* ConnectX-5, PCIe 4.0 */
1428 MODULE_DEVICE_TABLE(pci
, mlx5_core_pci_table
);
1430 static struct pci_driver mlx5_core_driver
= {
1431 .name
= DRIVER_NAME
,
1432 .id_table
= mlx5_core_pci_table
,
1434 .remove
= remove_one
,
1435 .shutdown
= shutdown
,
1436 .err_handler
= &mlx5_err_handler
,
1437 .sriov_configure
= mlx5_core_sriov_configure
,
1440 static int __init
init(void)
1444 mlx5_register_debugfs();
1446 err
= pci_register_driver(&mlx5_core_driver
);
1450 #ifdef CONFIG_MLX5_CORE_EN
1457 mlx5_unregister_debugfs();
1461 static void __exit
cleanup(void)
1463 #ifdef CONFIG_MLX5_CORE_EN
1466 pci_unregister_driver(&mlx5_core_driver
);
1467 mlx5_unregister_debugfs();
1471 module_exit(cleanup
);