staging/rdma/hfi1: rework is_a0() and is_bx()
[deliverable/linux.git] / drivers / staging / rdma / hfi1 / chip.c
index e48981994b107c0794331247a6789e7b2d50a3d5..3235324659ba2bce9c8e3a2f7804190f40fd0882 100644 (file)
@@ -121,8 +121,8 @@ struct flag_table {
 #define SEC_SC_HALTED          0x4     /* per-context only */
 #define SEC_SPC_FREEZE         0x8     /* per-HFI only */
 
-#define VL15CTXT                  1
 #define MIN_KERNEL_KCTXTS         2
+#define FIRST_KERNEL_KCTXT        1
 #define NUM_MAP_REGS             32
 
 /* Bit offset into the GUID which carries HFI id information */
@@ -1587,8 +1587,6 @@ static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
 [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
                        RCV_TID_FLOW_GEN_MISMATCH_CNT,
                        CNTR_NORMAL),
-[C_RX_CTX_RHQS] = RXE32_DEV_CNTR_ELEM(RxCtxRHQS, RCV_CONTEXT_RHQ_STALL,
-                       CNTR_NORMAL),
 [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
                        CNTR_NORMAL),
 [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
@@ -1873,13 +1871,6 @@ static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
 
 /* ======================================================================== */
 
-/* return true if this is chip revision revision a0 */
-int is_a0(struct hfi1_devdata *dd)
-{
-       return ((dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
-                       & CCE_REVISION_CHIP_REV_MINOR_MASK) == 0;
-}
-
 /* return true if this is chip revision revision a */
 int is_ax(struct hfi1_devdata *dd)
 {
@@ -1895,7 +1886,7 @@ int is_bx(struct hfi1_devdata *dd)
        u8 chip_rev_minor =
                dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
                        & CCE_REVISION_CHIP_REV_MINOR_MASK;
-       return !!(chip_rev_minor & 0x10);
+       return (chip_rev_minor & 0xF0) == 0x10;
 }
 
 /*
@@ -2190,9 +2181,8 @@ static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
        dd_dev_info(dd, "CCE Error: %s\n",
                cce_err_status_string(buf, sizeof(buf), reg));
 
-       if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK)
-                       && is_a0(dd)
-                       && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
+       if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
+           is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
                /* this error requires a manual drop into SPC freeze mode */
                /* then a fix up */
                start_freeze_handling(dd->pport, FREEZE_SELF);
@@ -2252,7 +2242,7 @@ static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
                 * Freeze mode recovery is disabled for the errors
                 * in RXE_FREEZE_ABORT_MASK
                 */
-               if (is_a0(dd) && (reg & RXE_FREEZE_ABORT_MASK))
+               if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
                        flags = FREEZE_ABORT;
 
                start_freeze_handling(dd->pport, flags);
@@ -2355,7 +2345,7 @@ static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
 
        if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
                start_freeze_handling(dd->pport, 0);
-       if (is_a0(dd) && (reg &
+       if (is_ax(dd) && (reg &
                    SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK)
                    && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
                start_freeze_handling(dd->pport, 0);
@@ -3050,7 +3040,7 @@ static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
        /* else this is _p */
 
        version = emulator_rev(dd);
-       if (!is_a0(dd))
+       if (!is_ax(dd))
                version = 0x2d; /* all B0 use 0x2d or higher settings */
 
        if (version <= 0x12) {
@@ -3336,7 +3326,7 @@ void handle_freeze(struct work_struct *work)
        write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
        wait_for_freeze_status(dd, 0);
 
-       if (is_a0(dd)) {
+       if (is_ax(dd)) {
                write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
                wait_for_freeze_status(dd, 1);
                write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
@@ -3864,7 +3854,7 @@ void handle_verify_cap(struct work_struct *work)
         *      REPLAY_BUF_MBE_SMASK
         *      FLIT_INPUT_BUF_MBE_SMASK
         */
-       if (is_a0(dd)) {                        /* fixed in B0 */
+       if (is_ax(dd)) {                        /* fixed in B0 */
                reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
                reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
                        | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
@@ -3907,18 +3897,32 @@ void handle_verify_cap(struct work_struct *work)
  */
 void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
 {
-       int skip = 1;
        int do_bounce = 0;
-       u16 lwde = ppd->link_width_downgrade_enabled;
+       int tries;
+       u16 lwde;
        u16 tx, rx;
 
+       /* use the hls lock to avoid a race with actual link up */
+       tries = 0;
+retry:
        mutex_lock(&ppd->hls_lock);
        /* only apply if the link is up */
-       if (ppd->host_link_state & HLS_UP)
-               skip = 0;
-       mutex_unlock(&ppd->hls_lock);
-       if (skip)
-               return;
+       if (!(ppd->host_link_state & HLS_UP)) {
+               /* still going up..wait and retry */
+               if (ppd->host_link_state & HLS_GOING_UP) {
+                       if (++tries < 1000) {
+                               mutex_unlock(&ppd->hls_lock);
+                               usleep_range(100, 120); /* arbitrary */
+                               goto retry;
+                       }
+                       dd_dev_err(ppd->dd,
+                                  "%s: giving up waiting for link state change\n",
+                                  __func__);
+               }
+               goto done;
+       }
+
+       lwde = ppd->link_width_downgrade_enabled;
 
        if (refresh_widths) {
                get_link_widths(ppd->dd, &tx, &rx);
@@ -3956,6 +3960,9 @@ void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
                do_bounce = 1;
        }
 
+done:
+       mutex_unlock(&ppd->hls_lock);
+
        if (do_bounce) {
                set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
                  OPA_LINKDOWN_REASON_WIDTH_POLICY);
@@ -4774,13 +4781,25 @@ int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
  */
 static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
 {
+       u32 regno;
+       int ret;
 
-       if (acquire_lcb_access(dd, 0) == 0) {
-               write_csr(dd, addr, data);
-               release_lcb_access(dd, 0);
-               return 0;
+       if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
+           (dd->dc8051_ver < dc8051_ver(0, 20))) {
+               if (acquire_lcb_access(dd, 0) == 0) {
+                       write_csr(dd, addr, data);
+                       release_lcb_access(dd, 0);
+                       return 0;
+               }
+               return -EBUSY;
        }
-       return -EBUSY;
+
+       /* register is an index of LCB registers: (offset - base) / 8 */
+       regno = (addr - DC_LCB_CFG_RUN) >> 3;
+       ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
+       if (ret != HCMD_SUCCESS)
+               return -EBUSY;
+       return 0;
 }
 
 /*
@@ -4861,6 +4880,26 @@ static int do_8051_command(
         * waiting for a command.
         */
 
+       /*
+        * When writing a LCB CSR, out_data contains the full value to
+        * to be written, while in_data contains the relative LCB
+        * address in 7:0.  Do the work here, rather than the caller,
+        * of distrubting the write data to where it needs to go:
+        *
+        * Write data
+        *   39:00 -> in_data[47:8]
+        *   47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
+        *   63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
+        */
+       if (type == HCMD_WRITE_LCB_CSR) {
+               in_data |= ((*out_data) & 0xffffffffffull) << 8;
+               reg = ((((*out_data) >> 40) & 0xff) <<
+                               DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
+                     | ((((*out_data) >> 48) & 0xffff) <<
+                               DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
+               write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
+       }
+
        /*
         * Do two writes: the first to stabilize the type and req_data, the
         * second to activate.
@@ -7282,8 +7321,8 @@ static int set_buffer_control(struct hfi1_devdata *dd,
         */
        use_all_mask = 0;
        if ((be16_to_cpu(new_bc->overall_shared_limit) <
-                               be16_to_cpu(cur_bc.overall_shared_limit))
-                       || (is_a0(dd) && any_shared_limit_changing)) {
+            be16_to_cpu(cur_bc.overall_shared_limit)) ||
+           (is_ax(dd) && any_shared_limit_changing)) {
                set_global_shared(dd, 0);
                cur_bc.overall_shared_limit = 0;
                use_all_mask = 1;
@@ -7457,7 +7496,7 @@ int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
  */
 static int disable_data_vls(struct hfi1_devdata *dd)
 {
-       if (is_a0(dd))
+       if (is_ax(dd))
                return 1;
 
        pio_send_control(dd, PSC_DATA_VL_DISABLE);
@@ -7475,7 +7514,7 @@ static int disable_data_vls(struct hfi1_devdata *dd)
  */
 int open_fill_data_vls(struct hfi1_devdata *dd)
 {
-       if (is_a0(dd))
+       if (is_ax(dd))
                return 1;
 
        pio_send_control(dd, PSC_DATA_VL_ENABLE);
@@ -7748,11 +7787,22 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
                                        & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
                                << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
                write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
-               if (ctxt == VL15CTXT)
-                       write_csr(dd, RCV_VL15, VL15CTXT);
+               if (ctxt == HFI1_CTRL_CTXT)
+                       write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
        }
        if (op & HFI1_RCVCTRL_CTXT_DIS) {
                write_csr(dd, RCV_VL15, 0);
+               /*
+                * When receive context is being disabled turn on tail
+                * update with a dummy tail address and then disable
+                * receive context.
+                */
+               if (dd->rcvhdrtail_dummy_physaddr) {
+                       write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
+                                       dd->rcvhdrtail_dummy_physaddr);
+                       rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
+               }
+
                rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
        }
        if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
@@ -7822,10 +7872,11 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
        if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
                /*
                 * If the context has been disabled and the Tail Update has
-                * been cleared, clear the RCV_HDR_TAIL_ADDR CSR so
-                * it doesn't contain an address that is invalid.
+                * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
+                * so it doesn't contain an address that is invalid.
                 */
-               write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, 0);
+               write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
+                               dd->rcvhdrtail_dummy_physaddr);
 }
 
 u32 hfi1_read_cntrs(struct hfi1_devdata *dd, loff_t pos, char **namep,
@@ -8785,7 +8836,7 @@ static void clean_up_interrupts(struct hfi1_devdata *dd)
        /* turn off interrupts */
        if (dd->num_msix_entries) {
                /* MSI-X */
-               hfi1_nomsix(dd);
+               pci_disable_msix(dd->pcidev);
        } else {
                /* INTx */
                disable_intx(dd->pcidev);
@@ -8840,18 +8891,12 @@ static void remap_sdma_interrupts(struct hfi1_devdata *dd,
                msix_intr);
 }
 
-static void remap_receive_available_interrupt(struct hfi1_devdata *dd,
-                                             int rx, int msix_intr)
-{
-       remap_intr(dd, IS_RCVAVAIL_START + rx, msix_intr);
-}
-
 static int request_intx_irq(struct hfi1_devdata *dd)
 {
        int ret;
 
-       snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME"_%d",
-               dd->unit);
+       snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
+                dd->unit);
        ret = request_irq(dd->pcidev->irq, general_interrupt,
                                  IRQF_SHARED, dd->intx_name, dd);
        if (ret)
@@ -8870,7 +8915,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
        int first_general, last_general;
        int first_sdma, last_sdma;
        int first_rx, last_rx;
-       int first_cpu, restart_cpu, curr_cpu;
+       int first_cpu, curr_cpu;
        int rcv_cpu, sdma_cpu;
        int i, ret = 0, possible;
        int ht;
@@ -8909,22 +8954,19 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
                        topology_sibling_cpumask(cpumask_first(local_mask)));
        for (i = possible/ht; i < possible; i++)
                cpumask_clear_cpu(i, def);
-       /* reset possible */
-       possible = cpumask_weight(def);
        /* def now has full cores on chosen node*/
        first_cpu = cpumask_first(def);
        if (nr_cpu_ids >= first_cpu)
                first_cpu++;
-       restart_cpu = first_cpu;
-       curr_cpu = restart_cpu;
+       curr_cpu = first_cpu;
 
-       for (i = first_cpu; i < dd->n_krcv_queues + first_cpu; i++) {
+       /*  One context is reserved as control context */
+       for (i = first_cpu; i < dd->n_krcv_queues + first_cpu - 1; i++) {
                cpumask_clear_cpu(curr_cpu, def);
                cpumask_set_cpu(curr_cpu, rcv);
-               if (curr_cpu >= possible)
-                       curr_cpu = restart_cpu;
-               else
-                       curr_cpu++;
+               curr_cpu = cpumask_next(curr_cpu, def);
+               if (curr_cpu >= nr_cpu_ids)
+                       break;
        }
        /* def mask has non-rcv, rcv has recv mask */
        rcv_cpu = cpumask_first(rcv);
@@ -8953,7 +8995,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
                        handler = general_interrupt;
                        arg = dd;
                        snprintf(me->name, sizeof(me->name),
-                               DRIVER_NAME"_%d", dd->unit);
+                                DRIVER_NAME "_%d", dd->unit);
                        err_info = "general";
                } else if (first_sdma <= i && i < last_sdma) {
                        idx = i - first_sdma;
@@ -8961,7 +9003,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
                        handler = sdma_interrupt;
                        arg = sde;
                        snprintf(me->name, sizeof(me->name),
-                               DRIVER_NAME"_%d sdma%d", dd->unit, idx);
+                                DRIVER_NAME "_%d sdma%d", dd->unit, idx);
                        err_info = "sdma";
                        remap_sdma_interrupts(dd, idx, i);
                } else if (first_rx <= i && i < last_rx) {
@@ -8981,9 +9023,9 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
                        thread = receive_context_thread;
                        arg = rcd;
                        snprintf(me->name, sizeof(me->name),
-                               DRIVER_NAME"_%d kctxt%d", dd->unit, idx);
+                                DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
                        err_info = "receive context";
-                       remap_receive_available_interrupt(dd, idx, i);
+                       remap_intr(dd, IS_RCVAVAIL_START + idx, i);
                } else {
                        /* not in our expected range - complain, then
                           ignore it */
@@ -9018,17 +9060,26 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
                if (handler == sdma_interrupt) {
                        dd_dev_info(dd, "sdma engine %d cpu %d\n",
                                sde->this_idx, sdma_cpu);
+                       sde->cpu = sdma_cpu;
                        cpumask_set_cpu(sdma_cpu, dd->msix_entries[i].mask);
                        sdma_cpu = cpumask_next(sdma_cpu, def);
                        if (sdma_cpu >= nr_cpu_ids)
                                sdma_cpu = cpumask_first(def);
                } else if (handler == receive_context_interrupt) {
-                       dd_dev_info(dd, "rcv ctxt %d cpu %d\n",
-                               rcd->ctxt, rcv_cpu);
-                       cpumask_set_cpu(rcv_cpu, dd->msix_entries[i].mask);
-                       rcv_cpu = cpumask_next(rcv_cpu, rcv);
-                       if (rcv_cpu >= nr_cpu_ids)
-                               rcv_cpu = cpumask_first(rcv);
+                       dd_dev_info(dd, "rcv ctxt %d cpu %d\n", rcd->ctxt,
+                                   (rcd->ctxt == HFI1_CTRL_CTXT) ?
+                                           cpumask_first(def) : rcv_cpu);
+                       if (rcd->ctxt == HFI1_CTRL_CTXT) {
+                               /* map to first default */
+                               cpumask_set_cpu(cpumask_first(def),
+                                               dd->msix_entries[i].mask);
+                       } else {
+                               cpumask_set_cpu(rcv_cpu,
+                                               dd->msix_entries[i].mask);
+                               rcv_cpu = cpumask_next(rcv_cpu, rcv);
+                               if (rcv_cpu >= nr_cpu_ids)
+                                       rcv_cpu = cpumask_first(rcv);
+                       }
                } else {
                        /* otherwise first def */
                        dd_dev_info(dd, "%s cpu %d\n",
@@ -9161,11 +9212,18 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
        /*
         * Kernel contexts: (to be fixed later):
         * - min or 2 or 1 context/numa
-        * - Context 0 - default/errors
-        * - Context 1 - VL15
+        * - Context 0 - control context (VL15/multicast/error)
+        * - Context 1 - default context
         */
        if (n_krcvqs)
-               num_kernel_contexts = n_krcvqs + MIN_KERNEL_KCTXTS;
+               /*
+                * Don't count context 0 in n_krcvqs since
+                * is isn't used for normal verbs traffic.
+                *
+                * krcvqs will reflect number of kernel
+                * receive contexts above 0.
+                */
+               num_kernel_contexts = n_krcvqs + MIN_KERNEL_KCTXTS - 1;
        else
                num_kernel_contexts = num_online_nodes();
        num_kernel_contexts =
@@ -9455,7 +9513,7 @@ static void reset_asic_csrs(struct hfi1_devdata *dd)
        /* We might want to retain this state across FLR if we ever use it */
        write_csr(dd, ASIC_CFG_DRV_STR, 0);
 
-       write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0);
+       /* ASIC_CFG_THERM_POLL_EN leave alone */
        /* ASIC_STS_THERM read-only */
        /* ASIC_CFG_RESET leave alone */
 
@@ -9906,7 +9964,7 @@ static void init_chip(struct hfi1_devdata *dd)
                /* restore command and BARs */
                restore_pci_variables(dd);
 
-               if (is_a0(dd)) {
+               if (is_ax(dd)) {
                        dd_dev_info(dd, "Resetting CSRs with FLR\n");
                        hfi1_pcie_flr(dd);
                        restore_pci_variables(dd);
@@ -9925,23 +9983,20 @@ static void init_chip(struct hfi1_devdata *dd)
        write_csr(dd, CCE_DC_CTRL, 0);
 
        /* Set the LED off */
-       if (is_a0(dd))
+       if (is_ax(dd))
                setextled(dd, 0);
        /*
         * Clear the QSFP reset.
-        * A0 leaves the out lines floating on power on, then on an FLR
-        * enforces a 0 on all out pins.  The driver does not touch
+        * An FLR enforces a 0 on all out pins. The driver does not touch
         * ASIC_QSFPn_OUT otherwise.  This leaves RESET_N low and
-        * anything  plugged constantly in reset, if it pays attention
+        * anything plugged constantly in reset, if it pays attention
         * to RESET_N.
-        * A prime example of this is SiPh. For now, set all pins high.
+        * Prime examples of this are optical cables. Set all pins high.
         * I2CCLK and I2CDAT will change per direction, and INT_N and
         * MODPRS_N are input only and their value is ignored.
         */
-       if (is_a0(dd)) {
-               write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
-               write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
-       }
+       write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
+       write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
 }
 
 static void init_early_variables(struct hfi1_devdata *dd)
@@ -9951,7 +10006,7 @@ static void init_early_variables(struct hfi1_devdata *dd)
        /* assign link credit variables */
        dd->vau = CM_VAU;
        dd->link_credits = CM_GLOBAL_CREDITS;
-       if (is_a0(dd))
+       if (is_ax(dd))
                dd->link_credits--;
        dd->vcu = cu_to_vcu(hfi1_cu);
        /* enough room for 8 MAD packets plus header - 17K */
@@ -10017,12 +10072,6 @@ static void init_qpmap_table(struct hfi1_devdata *dd,
        u64 ctxt = first_ctxt;
 
        for (i = 0; i < 256;) {
-               if (ctxt == VL15CTXT) {
-                       ctxt++;
-                       if (ctxt > last_ctxt)
-                               ctxt = first_ctxt;
-                       continue;
-               }
                reg |= ctxt << (8 * (i % 8));
                i++;
                ctxt++;
@@ -10065,7 +10114,7 @@ static void init_qos(struct hfi1_devdata *dd, u32 first_ctxt)
        unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
        u64 *rsmmap;
        u64 reg;
-       u8  rxcontext = is_a0(dd) ? 0 : 0xff;  /* 0 is default if a0 ver. */
+       u8  rxcontext = is_ax(dd) ? 0 : 0xff;  /* 0 is default if a0 ver. */
 
        /* validate */
        if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
@@ -10135,19 +10184,13 @@ static void init_qos(struct hfi1_devdata *dd, u32 first_ctxt)
        /* Enable RSM */
        add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
        kfree(rsmmap);
-       /* map everything else (non-VL15) to context 0 */
-       init_qpmap_table(
-               dd,
-               0,
-               0);
+       /* map everything else to first context */
+       init_qpmap_table(dd, FIRST_KERNEL_KCTXT, MIN_KERNEL_KCTXTS - 1);
        dd->qos_shift = n + 1;
        return;
 bail:
        dd->qos_shift = 1;
-       init_qpmap_table(
-               dd,
-               dd->n_krcv_queues > MIN_KERNEL_KCTXTS ? MIN_KERNEL_KCTXTS : 0,
-               dd->n_krcv_queues - 1);
+       init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
 }
 
 static void init_rxe(struct hfi1_devdata *dd)
@@ -10276,7 +10319,7 @@ int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
         * Enable send-side J_KEY integrity check, unless this is A0 h/w
         * (due to A0 erratum).
         */
-       if (!is_a0(dd)) {
+       if (!is_ax(dd)) {
                reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
                reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
                write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
@@ -10309,7 +10352,7 @@ int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
         * This check would not have been enabled for A0 h/w, see
         * set_ctxt_jkey().
         */
-       if (!is_a0(dd)) {
+       if (!is_ax(dd)) {
                reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
                reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
                write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
@@ -10803,7 +10846,9 @@ static int thermal_init(struct hfi1_devdata *dd)
 
        acquire_hw_mutex(dd);
        dd_dev_info(dd, "Initializing thermal sensor\n");
-
+       /* Disable polling of thermal readings */
+       write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
+       msleep(100);
        /* Thermal Sensor Initialization */
        /*    Step 1: Reset the Thermal SBus Receiver */
        ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
This page took 0.033184 seconds and 5 git commands to generate.