1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2015 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information
21 **********************************************************************/
22 #include <linux/version.h>
23 #include <linux/pci.h>
24 #include <linux/firmware.h>
25 #include <linux/ptp_clock_kernel.h>
26 #include <net/vxlan.h>
27 #include <linux/kthread.h>
28 #include "liquidio_common.h"
29 #include "octeon_droq.h"
30 #include "octeon_iq.h"
31 #include "response_manager.h"
32 #include "octeon_device.h"
33 #include "octeon_nic.h"
34 #include "octeon_main.h"
35 #include "octeon_network.h"
36 #include "cn66xx_regs.h"
37 #include "cn66xx_device.h"
38 #include "cn68xx_device.h"
39 #include "cn23xx_pf_device.h"
40 #include "liquidio_image.h"
42 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
43 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
44 MODULE_LICENSE("GPL");
45 MODULE_VERSION(LIQUIDIO_VERSION
);
46 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME LIO_FW_NAME_SUFFIX
);
47 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME LIO_FW_NAME_SUFFIX
);
48 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME LIO_FW_NAME_SUFFIX
);
50 static int ddr_timeout
= 10000;
51 module_param(ddr_timeout
, int, 0644);
52 MODULE_PARM_DESC(ddr_timeout
,
53 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
55 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
57 #define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
58 (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
60 static int debug
= -1;
61 module_param(debug
, int, 0644);
62 MODULE_PARM_DESC(debug
, "NETIF_MSG debug bits");
64 static char fw_type
[LIO_MAX_FW_TYPE_LEN
];
65 module_param_string(fw_type
, fw_type
, sizeof(fw_type
), 0000);
66 MODULE_PARM_DESC(fw_type
, "Type of firmware to be loaded. Default \"nic\"");
69 module_param(conf_type
, int, 0);
70 MODULE_PARM_DESC(conf_type
, "select octeon configuration 0 default 1 ovs");
72 static int ptp_enable
= 1;
74 /* Bit mask values for lio->ifstate */
75 #define LIO_IFSTATE_DROQ_OPS 0x01
76 #define LIO_IFSTATE_REGISTERED 0x02
77 #define LIO_IFSTATE_RUNNING 0x04
78 #define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
80 /* Polling interval for determining when NIC application is alive */
81 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
83 /* runtime link query interval */
84 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000
86 struct liquidio_if_cfg_context
{
94 struct liquidio_if_cfg_resp
{
96 struct liquidio_if_cfg_info cfg_info
;
100 struct liquidio_rx_ctl_context
{
103 wait_queue_head_t wc
;
108 struct oct_link_status_resp
{
110 struct oct_link_info link_info
;
114 struct oct_timestamp_resp
{
120 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
125 #ifdef __BIG_ENDIAN_BITFIELD
137 /** Octeon device properties to be used by the NIC module.
138 * Each octeon device in the system will be represented
139 * by this structure in the NIC module.
142 #define OCTNIC_MAX_SG (MAX_SKB_FRAGS)
144 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
145 #define OCTNIC_GSO_MAX_SIZE \
146 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
148 /** Structure of a node in list of gather components maintained by
149 * NIC driver for each network device.
151 struct octnic_gather
{
152 /** List manipulation. Next and prev pointers. */
153 struct list_head list
;
155 /** Size of the gather component at sg in bytes. */
158 /** Number of bytes that sg was adjusted to make it 8B-aligned. */
161 /** Gather component that can accommodate max sized fragment list
162 * received from the IP layer.
164 struct octeon_sg_entry
*sg
;
170 struct completion init
;
171 struct completion started
;
172 struct pci_dev
*pci_dev
;
177 struct octeon_device_priv
{
178 /** Tasklet structures for this device. */
179 struct tasklet_struct droq_tasklet
;
180 unsigned long napi_mask
;
183 static int octeon_device_init(struct octeon_device
*);
184 static int liquidio_stop(struct net_device
*netdev
);
185 static void liquidio_remove(struct pci_dev
*pdev
);
186 static int liquidio_probe(struct pci_dev
*pdev
,
187 const struct pci_device_id
*ent
);
189 static struct handshake handshake
[MAX_OCTEON_DEVICES
];
190 static struct completion first_stage
;
192 static void octeon_droq_bh(unsigned long pdev
)
196 struct octeon_device
*oct
= (struct octeon_device
*)pdev
;
197 struct octeon_device_priv
*oct_priv
=
198 (struct octeon_device_priv
*)oct
->priv
;
200 /* for (q_no = 0; q_no < oct->num_oqs; q_no++) { */
201 for (q_no
= 0; q_no
< MAX_OCTEON_OUTPUT_QUEUES(oct
); q_no
++) {
202 if (!(oct
->io_qmask
.oq
& (1ULL << q_no
)))
204 reschedule
|= octeon_droq_process_packets(oct
, oct
->droq
[q_no
],
206 lio_enable_irq(oct
->droq
[q_no
], NULL
);
208 if (OCTEON_CN23XX_PF(oct
) && oct
->msix_on
) {
209 /* set time and cnt interrupt thresholds for this DROQ
212 int adjusted_q_no
= q_no
+ oct
->sriov_info
.pf_srn
;
215 oct
, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no
),
218 oct
, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no
), 0);
223 tasklet_schedule(&oct_priv
->droq_tasklet
);
226 static int lio_wait_for_oq_pkts(struct octeon_device
*oct
)
228 struct octeon_device_priv
*oct_priv
=
229 (struct octeon_device_priv
*)oct
->priv
;
230 int retry
= 100, pkt_cnt
= 0, pending_pkts
= 0;
236 for (i
= 0; i
< MAX_OCTEON_OUTPUT_QUEUES(oct
); i
++) {
237 if (!(oct
->io_qmask
.oq
& (1ULL << i
)))
239 pkt_cnt
+= octeon_droq_check_hw_for_pkts(oct
->droq
[i
]);
242 pending_pkts
+= pkt_cnt
;
243 tasklet_schedule(&oct_priv
->droq_tasklet
);
246 schedule_timeout_uninterruptible(1);
248 } while (retry
-- && pending_pkts
);
254 * \brief Forces all IO queues off on a given device
255 * @param oct Pointer to Octeon device
257 static void force_io_queues_off(struct octeon_device
*oct
)
259 if ((oct
->chip_id
== OCTEON_CN66XX
) ||
260 (oct
->chip_id
== OCTEON_CN68XX
)) {
261 /* Reset the Enable bits for Input Queues. */
262 octeon_write_csr(oct
, CN6XXX_SLI_PKT_INSTR_ENB
, 0);
264 /* Reset the Enable bits for Output Queues. */
265 octeon_write_csr(oct
, CN6XXX_SLI_PKT_OUT_ENB
, 0);
270 * \brief wait for all pending requests to complete
271 * @param oct Pointer to Octeon device
273 * Called during shutdown sequence
275 static int wait_for_pending_requests(struct octeon_device
*oct
)
279 for (i
= 0; i
< 100; i
++) {
281 atomic_read(&oct
->response_list
282 [OCTEON_ORDERED_SC_LIST
].pending_req_count
);
284 schedule_timeout_uninterruptible(HZ
/ 10);
296 * \brief Cause device to go quiet so it can be safely removed/reset/etc
297 * @param oct Pointer to Octeon device
299 static inline void pcierror_quiesce_device(struct octeon_device
*oct
)
303 /* Disable the input and output queues now. No more packets will
304 * arrive from Octeon, but we should wait for all packet processing
307 force_io_queues_off(oct
);
309 /* To allow for in-flight requests */
310 schedule_timeout_uninterruptible(100);
312 if (wait_for_pending_requests(oct
))
313 dev_err(&oct
->pci_dev
->dev
, "There were pending requests\n");
315 /* Force all requests waiting to be fetched by OCTEON to complete. */
316 for (i
= 0; i
< MAX_OCTEON_INSTR_QUEUES(oct
); i
++) {
317 struct octeon_instr_queue
*iq
;
319 if (!(oct
->io_qmask
.iq
& (1ULL << i
)))
321 iq
= oct
->instr_queue
[i
];
323 if (atomic_read(&iq
->instr_pending
)) {
324 spin_lock_bh(&iq
->lock
);
326 iq
->octeon_read_index
= iq
->host_write_index
;
327 iq
->stats
.instr_processed
+=
328 atomic_read(&iq
->instr_pending
);
329 lio_process_iq_request_list(oct
, iq
, 0);
330 spin_unlock_bh(&iq
->lock
);
334 /* Force all pending ordered list requests to time out. */
335 lio_process_ordered_list(oct
, 1);
337 /* We do not need to wait for output queue packets to be processed. */
341 * \brief Cleanup PCI AER uncorrectable error status
342 * @param dev Pointer to PCI device
344 static void cleanup_aer_uncorrect_error_status(struct pci_dev
*dev
)
349 pr_info("%s :\n", __func__
);
351 pci_read_config_dword(dev
, pos
+ PCI_ERR_UNCOR_STATUS
, &status
);
352 pci_read_config_dword(dev
, pos
+ PCI_ERR_UNCOR_SEVER
, &mask
);
353 if (dev
->error_state
== pci_channel_io_normal
)
354 status
&= ~mask
; /* Clear corresponding nonfatal bits */
356 status
&= mask
; /* Clear corresponding fatal bits */
357 pci_write_config_dword(dev
, pos
+ PCI_ERR_UNCOR_STATUS
, status
);
361 * \brief Stop all PCI IO to a given device
362 * @param dev Pointer to Octeon device
364 static void stop_pci_io(struct octeon_device
*oct
)
366 /* No more instructions will be forwarded. */
367 atomic_set(&oct
->status
, OCT_DEV_IN_RESET
);
369 pci_disable_device(oct
->pci_dev
);
371 /* Disable interrupts */
372 oct
->fn_list
.disable_interrupt(oct
, OCTEON_ALL_INTR
);
374 pcierror_quiesce_device(oct
);
376 /* Release the interrupt line */
377 free_irq(oct
->pci_dev
->irq
, oct
);
379 if (oct
->flags
& LIO_FLAG_MSI_ENABLED
)
380 pci_disable_msi(oct
->pci_dev
);
382 dev_dbg(&oct
->pci_dev
->dev
, "Device state is now %s\n",
383 lio_get_state_string(&oct
->status
));
385 /* cn63xx_cleanup_aer_uncorrect_error_status(oct->pci_dev); */
386 /* making it a common function for all OCTEON models */
387 cleanup_aer_uncorrect_error_status(oct
->pci_dev
);
391 * \brief called when PCI error is detected
392 * @param pdev Pointer to PCI device
393 * @param state The current pci connection state
395 * This function is called after a PCI bus error affecting
396 * this device has been detected.
398 static pci_ers_result_t
liquidio_pcie_error_detected(struct pci_dev
*pdev
,
399 pci_channel_state_t state
)
401 struct octeon_device
*oct
= pci_get_drvdata(pdev
);
403 /* Non-correctable Non-fatal errors */
404 if (state
== pci_channel_io_normal
) {
405 dev_err(&oct
->pci_dev
->dev
, "Non-correctable non-fatal error reported:\n");
406 cleanup_aer_uncorrect_error_status(oct
->pci_dev
);
407 return PCI_ERS_RESULT_CAN_RECOVER
;
410 /* Non-correctable Fatal errors */
411 dev_err(&oct
->pci_dev
->dev
, "Non-correctable FATAL reported by PCI AER driver\n");
414 /* Always return a DISCONNECT. There is no support for recovery but only
415 * for a clean shutdown.
417 return PCI_ERS_RESULT_DISCONNECT
;
421 * \brief mmio handler
422 * @param pdev Pointer to PCI device
424 static pci_ers_result_t
liquidio_pcie_mmio_enabled(
425 struct pci_dev
*pdev
__attribute__((unused
)))
427 /* We should never hit this since we never ask for a reset for a Fatal
428 * Error. We always return DISCONNECT in io_error above.
429 * But play safe and return RECOVERED for now.
431 return PCI_ERS_RESULT_RECOVERED
;
435 * \brief called after the pci bus has been reset.
436 * @param pdev Pointer to PCI device
438 * Restart the card from scratch, as if from a cold-boot. Implementation
439 * resembles the first-half of the octeon_resume routine.
441 static pci_ers_result_t
liquidio_pcie_slot_reset(
442 struct pci_dev
*pdev
__attribute__((unused
)))
444 /* We should never hit this since we never ask for a reset for a Fatal
445 * Error. We always return DISCONNECT in io_error above.
446 * But play safe and return RECOVERED for now.
448 return PCI_ERS_RESULT_RECOVERED
;
452 * \brief called when traffic can start flowing again.
453 * @param pdev Pointer to PCI device
455 * This callback is called when the error recovery driver tells us that
456 * its OK to resume normal operation. Implementation resembles the
457 * second-half of the octeon_resume routine.
459 static void liquidio_pcie_resume(struct pci_dev
*pdev
__attribute__((unused
)))
461 /* Nothing to be done here. */
466 * \brief called when suspending
467 * @param pdev Pointer to PCI device
468 * @param state state to suspend to
470 static int liquidio_suspend(struct pci_dev
*pdev
__attribute__((unused
)),
471 pm_message_t state
__attribute__((unused
)))
477 * \brief called when resuming
478 * @param pdev Pointer to PCI device
480 static int liquidio_resume(struct pci_dev
*pdev
__attribute__((unused
)))
486 /* For PCI-E Advanced Error Recovery (AER) Interface */
487 static const struct pci_error_handlers liquidio_err_handler
= {
488 .error_detected
= liquidio_pcie_error_detected
,
489 .mmio_enabled
= liquidio_pcie_mmio_enabled
,
490 .slot_reset
= liquidio_pcie_slot_reset
,
491 .resume
= liquidio_pcie_resume
,
494 static const struct pci_device_id liquidio_pci_tbl
[] = {
496 PCI_VENDOR_ID_CAVIUM
, 0x91, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0
499 PCI_VENDOR_ID_CAVIUM
, 0x92, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0
502 PCI_VENDOR_ID_CAVIUM
, 0x9702, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0
508 MODULE_DEVICE_TABLE(pci
, liquidio_pci_tbl
);
510 static struct pci_driver liquidio_pci_driver
= {
512 .id_table
= liquidio_pci_tbl
,
513 .probe
= liquidio_probe
,
514 .remove
= liquidio_remove
,
515 .err_handler
= &liquidio_err_handler
, /* For AER */
518 .suspend
= liquidio_suspend
,
519 .resume
= liquidio_resume
,
524 * \brief register PCI driver
526 static int liquidio_init_pci(void)
528 return pci_register_driver(&liquidio_pci_driver
);
532 * \brief unregister PCI driver
534 static void liquidio_deinit_pci(void)
536 pci_unregister_driver(&liquidio_pci_driver
);
540 * \brief check interface state
541 * @param lio per-network private data
542 * @param state_flag flag state to check
544 static inline int ifstate_check(struct lio
*lio
, int state_flag
)
546 return atomic_read(&lio
->ifstate
) & state_flag
;
550 * \brief set interface state
551 * @param lio per-network private data
552 * @param state_flag flag state to set
554 static inline void ifstate_set(struct lio
*lio
, int state_flag
)
556 atomic_set(&lio
->ifstate
, (atomic_read(&lio
->ifstate
) | state_flag
));
560 * \brief clear interface state
561 * @param lio per-network private data
562 * @param state_flag flag state to clear
564 static inline void ifstate_reset(struct lio
*lio
, int state_flag
)
566 atomic_set(&lio
->ifstate
, (atomic_read(&lio
->ifstate
) & ~(state_flag
)));
570 * \brief Stop Tx queues
571 * @param netdev network device
573 static inline void txqs_stop(struct net_device
*netdev
)
575 if (netif_is_multiqueue(netdev
)) {
578 for (i
= 0; i
< netdev
->num_tx_queues
; i
++)
579 netif_stop_subqueue(netdev
, i
);
581 netif_stop_queue(netdev
);
586 * \brief Start Tx queues
587 * @param netdev network device
589 static inline void txqs_start(struct net_device
*netdev
)
591 if (netif_is_multiqueue(netdev
)) {
594 for (i
= 0; i
< netdev
->num_tx_queues
; i
++)
595 netif_start_subqueue(netdev
, i
);
597 netif_start_queue(netdev
);
602 * \brief Wake Tx queues
603 * @param netdev network device
605 static inline void txqs_wake(struct net_device
*netdev
)
607 struct lio
*lio
= GET_LIO(netdev
);
609 if (netif_is_multiqueue(netdev
)) {
612 for (i
= 0; i
< netdev
->num_tx_queues
; i
++) {
613 int qno
= lio
->linfo
.txpciq
[i
%
614 (lio
->linfo
.num_txpciq
)].s
.q_no
;
616 if (__netif_subqueue_stopped(netdev
, i
)) {
617 INCR_INSTRQUEUE_PKT_COUNT(lio
->oct_dev
, qno
,
619 netif_wake_subqueue(netdev
, i
);
623 INCR_INSTRQUEUE_PKT_COUNT(lio
->oct_dev
, lio
->txq
,
625 netif_wake_queue(netdev
);
630 * \brief Stop Tx queue
631 * @param netdev network device
633 static void stop_txq(struct net_device
*netdev
)
639 * \brief Start Tx queue
640 * @param netdev network device
642 static void start_txq(struct net_device
*netdev
)
644 struct lio
*lio
= GET_LIO(netdev
);
646 if (lio
->linfo
.link
.s
.link_up
) {
653 * \brief Wake a queue
654 * @param netdev network device
655 * @param q which queue to wake
657 static inline void wake_q(struct net_device
*netdev
, int q
)
659 if (netif_is_multiqueue(netdev
))
660 netif_wake_subqueue(netdev
, q
);
662 netif_wake_queue(netdev
);
666 * \brief Stop a queue
667 * @param netdev network device
668 * @param q which queue to stop
670 static inline void stop_q(struct net_device
*netdev
, int q
)
672 if (netif_is_multiqueue(netdev
))
673 netif_stop_subqueue(netdev
, q
);
675 netif_stop_queue(netdev
);
679 * \brief Check Tx queue status, and take appropriate action
680 * @param lio per-network private data
681 * @returns 0 if full, number of queues woken up otherwise
683 static inline int check_txq_status(struct lio
*lio
)
687 if (netif_is_multiqueue(lio
->netdev
)) {
688 int numqs
= lio
->netdev
->num_tx_queues
;
691 /* check each sub-queue state */
692 for (q
= 0; q
< numqs
; q
++) {
693 iq
= lio
->linfo
.txpciq
[q
%
694 (lio
->linfo
.num_txpciq
)].s
.q_no
;
695 if (octnet_iq_is_full(lio
->oct_dev
, iq
))
697 if (__netif_subqueue_stopped(lio
->netdev
, q
)) {
698 wake_q(lio
->netdev
, q
);
699 INCR_INSTRQUEUE_PKT_COUNT(lio
->oct_dev
, iq
,
705 if (octnet_iq_is_full(lio
->oct_dev
, lio
->txq
))
707 wake_q(lio
->netdev
, lio
->txq
);
708 INCR_INSTRQUEUE_PKT_COUNT(lio
->oct_dev
, lio
->txq
,
716 * Remove the node at the head of the list. The list would be empty at
717 * the end of this call if there are no more nodes in the list.
719 static inline struct list_head
*list_delete_head(struct list_head
*root
)
721 struct list_head
*node
;
723 if ((root
->prev
== root
) && (root
->next
== root
))
735 * \brief Delete gather lists
736 * @param lio per-network private data
738 static void delete_glists(struct lio
*lio
)
740 struct octnic_gather
*g
;
746 for (i
= 0; i
< lio
->linfo
.num_txpciq
; i
++) {
748 g
= (struct octnic_gather
*)
749 list_delete_head(&lio
->glist
[i
]);
752 dma_unmap_single(&lio
->oct_dev
->
757 kfree((void *)((unsigned long)g
->sg
-
765 kfree((void *)lio
->glist
);
769 * \brief Setup gather lists
770 * @param lio per-network private data
772 static int setup_glists(struct octeon_device
*oct
, struct lio
*lio
, int num_iqs
)
775 struct octnic_gather
*g
;
777 lio
->glist_lock
= kcalloc(num_iqs
, sizeof(*lio
->glist_lock
),
779 if (!lio
->glist_lock
)
782 lio
->glist
= kcalloc(num_iqs
, sizeof(*lio
->glist
),
785 kfree((void *)lio
->glist_lock
);
789 for (i
= 0; i
< num_iqs
; i
++) {
790 int numa_node
= cpu_to_node(i
% num_online_cpus());
792 spin_lock_init(&lio
->glist_lock
[i
]);
794 INIT_LIST_HEAD(&lio
->glist
[i
]);
796 for (j
= 0; j
< lio
->tx_qsize
; j
++) {
797 g
= kzalloc_node(sizeof(*g
), GFP_KERNEL
,
800 g
= kzalloc(sizeof(*g
), GFP_KERNEL
);
804 g
->sg_size
= ((ROUNDUP4(OCTNIC_MAX_SG
) >> 2) *
807 g
->sg
= kmalloc_node(g
->sg_size
+ 8,
808 GFP_KERNEL
, numa_node
);
810 g
->sg
= kmalloc(g
->sg_size
+ 8, GFP_KERNEL
);
816 /* The gather component should be aligned on 64-bit
819 if (((unsigned long)g
->sg
) & 7) {
820 g
->adjust
= 8 - (((unsigned long)g
->sg
) & 7);
821 g
->sg
= (struct octeon_sg_entry
*)
822 ((unsigned long)g
->sg
+ g
->adjust
);
824 g
->sg_dma_ptr
= dma_map_single(&oct
->pci_dev
->dev
,
827 if (dma_mapping_error(&oct
->pci_dev
->dev
,
829 kfree((void *)((unsigned long)g
->sg
-
835 list_add_tail(&g
->list
, &lio
->glist
[i
]);
838 if (j
!= lio
->tx_qsize
) {
848 * \brief Print link information
849 * @param netdev network device
851 static void print_link_info(struct net_device
*netdev
)
853 struct lio
*lio
= GET_LIO(netdev
);
855 if (atomic_read(&lio
->ifstate
) & LIO_IFSTATE_REGISTERED
) {
856 struct oct_link_info
*linfo
= &lio
->linfo
;
858 if (linfo
->link
.s
.link_up
) {
859 netif_info(lio
, link
, lio
->netdev
, "%d Mbps %s Duplex UP\n",
861 (linfo
->link
.s
.duplex
) ? "Full" : "Half");
863 netif_info(lio
, link
, lio
->netdev
, "Link Down\n");
869 * \brief Routine to notify MTU change
870 * @param work work_struct data structure
872 static void octnet_link_status_change(struct work_struct
*work
)
874 struct cavium_wk
*wk
= (struct cavium_wk
*)work
;
875 struct lio
*lio
= (struct lio
*)wk
->ctxptr
;
878 call_netdevice_notifiers(NETDEV_CHANGEMTU
, lio
->netdev
);
883 * \brief Sets up the mtu status change work
884 * @param netdev network device
886 static inline int setup_link_status_change_wq(struct net_device
*netdev
)
888 struct lio
*lio
= GET_LIO(netdev
);
889 struct octeon_device
*oct
= lio
->oct_dev
;
891 lio
->link_status_wq
.wq
= alloc_workqueue("link-status",
893 if (!lio
->link_status_wq
.wq
) {
894 dev_err(&oct
->pci_dev
->dev
, "unable to create cavium link status wq\n");
897 INIT_DELAYED_WORK(&lio
->link_status_wq
.wk
.work
,
898 octnet_link_status_change
);
899 lio
->link_status_wq
.wk
.ctxptr
= lio
;
904 static inline void cleanup_link_status_change_wq(struct net_device
*netdev
)
906 struct lio
*lio
= GET_LIO(netdev
);
908 if (lio
->link_status_wq
.wq
) {
909 cancel_delayed_work_sync(&lio
->link_status_wq
.wk
.work
);
910 destroy_workqueue(lio
->link_status_wq
.wq
);
915 * \brief Update link status
916 * @param netdev network device
917 * @param ls link status structure
919 * Called on receipt of a link status response from the core application to
920 * update each interface's link status.
922 static inline void update_link_status(struct net_device
*netdev
,
923 union oct_link_status
*ls
)
925 struct lio
*lio
= GET_LIO(netdev
);
926 int changed
= (lio
->linfo
.link
.u64
!= ls
->u64
);
928 lio
->linfo
.link
.u64
= ls
->u64
;
930 if ((lio
->intf_open
) && (changed
)) {
931 print_link_info(netdev
);
934 if (lio
->linfo
.link
.s
.link_up
) {
935 netif_carrier_on(netdev
);
936 /* start_txq(netdev); */
939 netif_carrier_off(netdev
);
945 /* Runs in interrupt context. */
946 static void update_txq_status(struct octeon_device
*oct
, int iq_num
)
948 struct net_device
*netdev
;
950 struct octeon_instr_queue
*iq
= oct
->instr_queue
[iq_num
];
952 netdev
= oct
->props
[iq
->ifidx
].netdev
;
954 /* This is needed because the first IQ does not have
955 * a netdev associated with it.
960 lio
= GET_LIO(netdev
);
961 if (netif_is_multiqueue(netdev
)) {
962 if (__netif_subqueue_stopped(netdev
, iq
->q_index
) &&
963 lio
->linfo
.link
.s
.link_up
&&
964 (!octnet_iq_is_full(oct
, iq_num
))) {
965 INCR_INSTRQUEUE_PKT_COUNT(lio
->oct_dev
, iq_num
,
967 netif_wake_subqueue(netdev
, iq
->q_index
);
969 if (!octnet_iq_is_full(oct
, lio
->txq
)) {
970 INCR_INSTRQUEUE_PKT_COUNT(lio
->oct_dev
,
973 wake_q(netdev
, lio
->txq
);
980 int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq
*droq
, u64 ret
)
982 struct octeon_device
*oct
= droq
->oct_dev
;
983 struct octeon_device_priv
*oct_priv
=
984 (struct octeon_device_priv
*)oct
->priv
;
986 if (droq
->ops
.poll_mode
) {
987 droq
->ops
.napi_fn(droq
);
989 if (ret
& MSIX_PO_INT
) {
990 tasklet_schedule(&oct_priv
->droq_tasklet
);
993 /* this will be flushed periodically by check iq db */
994 if (ret
& MSIX_PI_INT
)
1001 * \brief Droq packet processor sceduler
1002 * @param oct octeon device
1004 static void liquidio_schedule_droq_pkt_handlers(struct octeon_device
*oct
)
1006 struct octeon_device_priv
*oct_priv
=
1007 (struct octeon_device_priv
*)oct
->priv
;
1009 struct octeon_droq
*droq
;
1011 if (oct
->int_status
& OCT_DEV_INTR_PKT_DATA
) {
1012 for (oq_no
= 0; oq_no
< MAX_OCTEON_OUTPUT_QUEUES(oct
);
1014 if (!(oct
->droq_intr
& (1ULL << oq_no
)))
1017 droq
= oct
->droq
[oq_no
];
1019 if (droq
->ops
.poll_mode
) {
1020 droq
->ops
.napi_fn(droq
);
1021 oct_priv
->napi_mask
|= (1 << oq_no
);
1023 tasklet_schedule(&oct_priv
->droq_tasklet
);
1030 liquidio_msix_intr_handler(int irq
__attribute__((unused
)), void *dev
)
1033 struct octeon_ioq_vector
*ioq_vector
= (struct octeon_ioq_vector
*)dev
;
1034 struct octeon_device
*oct
= ioq_vector
->oct_dev
;
1035 struct octeon_droq
*droq
= oct
->droq
[ioq_vector
->droq_index
];
1037 ret
= oct
->fn_list
.msix_interrupt_handler(ioq_vector
);
1039 if ((ret
& MSIX_PO_INT
) || (ret
& MSIX_PI_INT
))
1040 liquidio_schedule_msix_droq_pkt_handler(droq
, ret
);
1046 * \brief Interrupt handler for octeon
1048 * @param dev octeon device
1051 irqreturn_t
liquidio_legacy_intr_handler(int irq
__attribute__((unused
)),
1054 struct octeon_device
*oct
= (struct octeon_device
*)dev
;
1057 /* Disable our interrupts for the duration of ISR */
1058 oct
->fn_list
.disable_interrupt(oct
, OCTEON_ALL_INTR
);
1060 ret
= oct
->fn_list
.process_interrupt_regs(oct
);
1062 if (ret
== IRQ_HANDLED
)
1063 liquidio_schedule_droq_pkt_handlers(oct
);
1065 /* Re-enable our interrupts */
1066 if (!(atomic_read(&oct
->status
) == OCT_DEV_IN_RESET
))
1067 oct
->fn_list
.enable_interrupt(oct
, OCTEON_ALL_INTR
);
1073 * \brief Setup interrupt for octeon device
1074 * @param oct octeon device
1076 * Enable interrupt in Octeon device as given in the PCI interrupt mask.
1078 static int octeon_setup_interrupt(struct octeon_device
*oct
)
1081 struct msix_entry
*msix_entries
;
1083 int num_ioq_vectors
;
1084 int num_alloc_ioq_vectors
;
1086 if (OCTEON_CN23XX_PF(oct
) && oct
->msix_on
) {
1087 oct
->num_msix_irqs
= oct
->sriov_info
.num_pf_rings
;
1088 /* one non ioq interrupt for handling sli_mac_pf_int_sum */
1089 oct
->num_msix_irqs
+= 1;
1091 oct
->msix_entries
= kcalloc(
1092 oct
->num_msix_irqs
, sizeof(struct msix_entry
), GFP_KERNEL
);
1093 if (!oct
->msix_entries
)
1096 msix_entries
= (struct msix_entry
*)oct
->msix_entries
;
1097 /*Assumption is that pf msix vectors start from pf srn to pf to
1098 * trs and not from 0. if not change this code
1100 for (i
= 0; i
< oct
->num_msix_irqs
- 1; i
++)
1101 msix_entries
[i
].entry
= oct
->sriov_info
.pf_srn
+ i
;
1102 msix_entries
[oct
->num_msix_irqs
- 1].entry
=
1103 oct
->sriov_info
.trs
;
1104 num_alloc_ioq_vectors
= pci_enable_msix_range(
1105 oct
->pci_dev
, msix_entries
,
1107 oct
->num_msix_irqs
);
1108 if (num_alloc_ioq_vectors
< 0) {
1109 dev_err(&oct
->pci_dev
->dev
, "unable to Allocate MSI-X interrupts\n");
1110 kfree(oct
->msix_entries
);
1111 oct
->msix_entries
= NULL
;
1114 dev_dbg(&oct
->pci_dev
->dev
, "OCTEON: Enough MSI-X interrupts are allocated...\n");
1116 num_ioq_vectors
= oct
->num_msix_irqs
;
1118 /** For PF, there is one non-ioq interrupt handler */
1119 num_ioq_vectors
-= 1;
1120 irqret
= request_irq(msix_entries
[num_ioq_vectors
].vector
,
1121 liquidio_legacy_intr_handler
, 0, "octeon",
1124 dev_err(&oct
->pci_dev
->dev
,
1125 "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
1127 pci_disable_msix(oct
->pci_dev
);
1128 kfree(oct
->msix_entries
);
1129 oct
->msix_entries
= NULL
;
1133 for (i
= 0; i
< num_ioq_vectors
; i
++) {
1134 irqret
= request_irq(msix_entries
[i
].vector
,
1135 liquidio_msix_intr_handler
, 0,
1136 "octeon", &oct
->ioq_vector
[i
]);
1138 dev_err(&oct
->pci_dev
->dev
,
1139 "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
1141 /** Freeing the non-ioq irq vector here . */
1142 free_irq(msix_entries
[num_ioq_vectors
].vector
,
1147 /** clearing affinity mask. */
1148 irq_set_affinity_hint(
1149 msix_entries
[i
].vector
, NULL
);
1150 free_irq(msix_entries
[i
].vector
,
1151 &oct
->ioq_vector
[i
]);
1153 pci_disable_msix(oct
->pci_dev
);
1154 kfree(oct
->msix_entries
);
1155 oct
->msix_entries
= NULL
;
1158 oct
->ioq_vector
[i
].vector
= msix_entries
[i
].vector
;
1159 /* assign the cpu mask for this msix interrupt vector */
1160 irq_set_affinity_hint(
1161 msix_entries
[i
].vector
,
1162 (&oct
->ioq_vector
[i
].affinity_mask
));
1164 dev_dbg(&oct
->pci_dev
->dev
, "OCTEON[%d]: MSI-X enabled\n",
1167 err
= pci_enable_msi(oct
->pci_dev
);
1169 dev_warn(&oct
->pci_dev
->dev
, "Reverting to legacy interrupts. Error: %d\n",
1172 oct
->flags
|= LIO_FLAG_MSI_ENABLED
;
1174 irqret
= request_irq(oct
->pci_dev
->irq
,
1175 liquidio_legacy_intr_handler
, IRQF_SHARED
,
1178 if (oct
->flags
& LIO_FLAG_MSI_ENABLED
)
1179 pci_disable_msi(oct
->pci_dev
);
1180 dev_err(&oct
->pci_dev
->dev
, "Request IRQ failed with code: %d\n",
1188 static int liquidio_watchdog(void *param
)
1191 u16 mask_of_stuck_cores
= 0;
1192 u16 mask_of_crashed_cores
= 0;
1194 u8 core_is_stuck
[LIO_MAX_CORES
];
1195 u8 core_crashed
[LIO_MAX_CORES
];
1196 struct octeon_device
*oct
= param
;
1198 memset(core_is_stuck
, 0, sizeof(core_is_stuck
));
1199 memset(core_crashed
, 0, sizeof(core_crashed
));
1201 while (!kthread_should_stop()) {
1202 mask_of_crashed_cores
=
1203 (u16
)octeon_read_csr64(oct
, CN23XX_SLI_SCRATCH2
);
1205 for (core_num
= 0; core_num
< LIO_MAX_CORES
; core_num
++) {
1206 if (!core_is_stuck
[core_num
]) {
1207 wdog
= lio_pci_readq(oct
, CIU3_WDOG(core_num
));
1209 /* look at watchdog state field */
1210 wdog
&= CIU3_WDOG_MASK
;
1212 /* this watchdog timer has expired */
1213 core_is_stuck
[core_num
] =
1214 LIO_MONITOR_WDOG_EXPIRE
;
1215 mask_of_stuck_cores
|= (1 << core_num
);
1219 if (!core_crashed
[core_num
])
1220 core_crashed
[core_num
] =
1221 (mask_of_crashed_cores
>> core_num
) & 1;
1224 if (mask_of_stuck_cores
) {
1225 for (core_num
= 0; core_num
< LIO_MAX_CORES
;
1227 if (core_is_stuck
[core_num
] == 1) {
1228 dev_err(&oct
->pci_dev
->dev
,
1229 "ERROR: Octeon core %d is stuck!\n",
1231 /* 2 means we have printk'd an error
1232 * so no need to repeat the same printk
1234 core_is_stuck
[core_num
] =
1235 LIO_MONITOR_CORE_STUCK_MSGD
;
1240 if (mask_of_crashed_cores
) {
1241 for (core_num
= 0; core_num
< LIO_MAX_CORES
;
1243 if (core_crashed
[core_num
] == 1) {
1244 dev_err(&oct
->pci_dev
->dev
,
1245 "ERROR: Octeon core %d crashed! See oct-fwdump for details.\n",
1247 /* 2 means we have printk'd an error
1248 * so no need to repeat the same printk
1250 core_crashed
[core_num
] =
1251 LIO_MONITOR_CORE_STUCK_MSGD
;
1255 #ifdef CONFIG_MODULE_UNLOAD
1256 if (mask_of_stuck_cores
|| mask_of_crashed_cores
) {
1257 /* make module refcount=0 so that rmmod will work */
1260 refcount
= module_refcount(THIS_MODULE
);
1262 while (refcount
> 0) {
1263 module_put(THIS_MODULE
);
1264 refcount
= module_refcount(THIS_MODULE
);
1267 /* compensate for and withstand an unlikely (but still
1268 * possible) race condition
1270 while (refcount
< 0) {
1271 try_module_get(THIS_MODULE
);
1272 refcount
= module_refcount(THIS_MODULE
);
1276 /* sleep for two seconds */
1277 set_current_state(TASK_INTERRUPTIBLE
);
1278 schedule_timeout(2 * HZ
);
1285 * \brief PCI probe handler
1286 * @param pdev PCI device structure
1290 liquidio_probe(struct pci_dev
*pdev
,
1291 const struct pci_device_id
*ent
__attribute__((unused
)))
1293 struct octeon_device
*oct_dev
= NULL
;
1294 struct handshake
*hs
;
1296 oct_dev
= octeon_allocate_device(pdev
->device
,
1297 sizeof(struct octeon_device_priv
));
1299 dev_err(&pdev
->dev
, "Unable to allocate device\n");
1303 if (pdev
->device
== OCTEON_CN23XX_PF_VID
)
1304 oct_dev
->msix_on
= LIO_FLAG_MSIX_ENABLED
;
1306 dev_info(&pdev
->dev
, "Initializing device %x:%x.\n",
1307 (u32
)pdev
->vendor
, (u32
)pdev
->device
);
1309 /* Assign octeon_device for this device to the private data area. */
1310 pci_set_drvdata(pdev
, oct_dev
);
1312 /* set linux specific device pointer */
1313 oct_dev
->pci_dev
= (void *)pdev
;
1315 hs
= &handshake
[oct_dev
->octeon_id
];
1316 init_completion(&hs
->init
);
1317 init_completion(&hs
->started
);
1320 if (oct_dev
->octeon_id
== 0)
1321 /* first LiquidIO NIC is detected */
1322 complete(&first_stage
);
1324 if (octeon_device_init(oct_dev
)) {
1325 liquidio_remove(pdev
);
1329 if (OCTEON_CN23XX_PF(oct_dev
)) {
1331 u8 bus
, device
, function
;
1333 scratch1
= octeon_read_csr64(oct_dev
, CN23XX_SLI_SCRATCH1
);
1334 if (!(scratch1
& 4ULL)) {
1335 /* Bit 2 of SLI_SCRATCH_1 is a flag that indicates that
1336 * the lio watchdog kernel thread is running for this
1337 * NIC. Each NIC gets one watchdog kernel thread.
1340 octeon_write_csr64(oct_dev
, CN23XX_SLI_SCRATCH1
,
1343 bus
= pdev
->bus
->number
;
1344 device
= PCI_SLOT(pdev
->devfn
);
1345 function
= PCI_FUNC(pdev
->devfn
);
1346 oct_dev
->watchdog_task
= kthread_create(
1347 liquidio_watchdog
, oct_dev
,
1348 "liowd/%02hhx:%02hhx.%hhx", bus
, device
, function
);
1349 wake_up_process(oct_dev
->watchdog_task
);
1353 oct_dev
->rx_pause
= 1;
1354 oct_dev
->tx_pause
= 1;
1356 dev_dbg(&oct_dev
->pci_dev
->dev
, "Device is ready\n");
1362 *\brief Destroy resources associated with octeon device
1363 * @param pdev PCI device structure
1366 static void octeon_destroy_resources(struct octeon_device
*oct
)
1369 struct msix_entry
*msix_entries
;
1370 struct octeon_device_priv
*oct_priv
=
1371 (struct octeon_device_priv
*)oct
->priv
;
1373 struct handshake
*hs
;
1375 switch (atomic_read(&oct
->status
)) {
1376 case OCT_DEV_RUNNING
:
1377 case OCT_DEV_CORE_OK
:
1379 /* No more instructions will be forwarded. */
1380 atomic_set(&oct
->status
, OCT_DEV_IN_RESET
);
1382 oct
->app_mode
= CVM_DRV_INVALID_APP
;
1383 dev_dbg(&oct
->pci_dev
->dev
, "Device state is now %s\n",
1384 lio_get_state_string(&oct
->status
));
1386 schedule_timeout_uninterruptible(HZ
/ 10);
1389 case OCT_DEV_HOST_OK
:
1392 case OCT_DEV_CONSOLE_INIT_DONE
:
1393 /* Remove any consoles */
1394 octeon_remove_consoles(oct
);
1397 case OCT_DEV_IO_QUEUES_DONE
:
1398 if (wait_for_pending_requests(oct
))
1399 dev_err(&oct
->pci_dev
->dev
, "There were pending requests\n");
1401 if (lio_wait_for_instr_fetch(oct
))
1402 dev_err(&oct
->pci_dev
->dev
, "IQ had pending instructions\n");
1404 /* Disable the input and output queues now. No more packets will
1405 * arrive from Octeon, but we should wait for all packet
1406 * processing to finish.
1408 oct
->fn_list
.disable_io_queues(oct
);
1410 if (lio_wait_for_oq_pkts(oct
))
1411 dev_err(&oct
->pci_dev
->dev
, "OQ had pending packets\n");
1413 /* Disable interrupts */
1414 oct
->fn_list
.disable_interrupt(oct
, OCTEON_ALL_INTR
);
1417 msix_entries
= (struct msix_entry
*)oct
->msix_entries
;
1418 for (i
= 0; i
< oct
->num_msix_irqs
- 1; i
++) {
1419 /* clear the affinity_cpumask */
1420 irq_set_affinity_hint(msix_entries
[i
].vector
,
1422 free_irq(msix_entries
[i
].vector
,
1423 &oct
->ioq_vector
[i
]);
1425 /* non-iov vector's argument is oct struct */
1426 free_irq(msix_entries
[i
].vector
, oct
);
1428 pci_disable_msix(oct
->pci_dev
);
1429 kfree(oct
->msix_entries
);
1430 oct
->msix_entries
= NULL
;
1432 /* Release the interrupt line */
1433 free_irq(oct
->pci_dev
->irq
, oct
);
1435 if (oct
->flags
& LIO_FLAG_MSI_ENABLED
)
1436 pci_disable_msi(oct
->pci_dev
);
1439 if (OCTEON_CN23XX_PF(oct
))
1440 octeon_free_ioq_vector(oct
);
1442 case OCT_DEV_IN_RESET
:
1443 case OCT_DEV_DROQ_INIT_DONE
:
1444 /*atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);*/
1446 for (i
= 0; i
< MAX_OCTEON_OUTPUT_QUEUES(oct
); i
++) {
1447 if (!(oct
->io_qmask
.oq
& BIT_ULL(i
)))
1449 octeon_delete_droq(oct
, i
);
1452 /* Force any pending handshakes to complete */
1453 for (i
= 0; i
< MAX_OCTEON_DEVICES
; i
++) {
1457 handshake
[oct
->octeon_id
].init_ok
= 0;
1458 complete(&handshake
[oct
->octeon_id
].init
);
1459 handshake
[oct
->octeon_id
].started_ok
= 0;
1460 complete(&handshake
[oct
->octeon_id
].started
);
1465 case OCT_DEV_RESP_LIST_INIT_DONE
:
1466 octeon_delete_response_list(oct
);
1469 case OCT_DEV_INSTR_QUEUE_INIT_DONE
:
1470 for (i
= 0; i
< MAX_OCTEON_INSTR_QUEUES(oct
); i
++) {
1471 if (!(oct
->io_qmask
.iq
& BIT_ULL(i
)))
1473 octeon_delete_instr_queue(oct
, i
);
1476 case OCT_DEV_SC_BUFF_POOL_INIT_DONE
:
1477 octeon_free_sc_buffer_pool(oct
);
1480 case OCT_DEV_DISPATCH_INIT_DONE
:
1481 octeon_delete_dispatch_list(oct
);
1482 cancel_delayed_work_sync(&oct
->nic_poll_work
.work
);
1485 case OCT_DEV_PCI_MAP_DONE
:
1486 /* Soft reset the octeon device before exiting */
1487 if ((!OCTEON_CN23XX_PF(oct
)) || !oct
->octeon_id
)
1488 oct
->fn_list
.soft_reset(oct
);
1490 octeon_unmap_pci_barx(oct
, 0);
1491 octeon_unmap_pci_barx(oct
, 1);
1494 case OCT_DEV_BEGIN_STATE
:
1495 /* Disable the device, releasing the PCI INT */
1496 pci_disable_device(oct
->pci_dev
);
1498 /* Nothing to be done here either */
1500 } /* end switch (oct->status) */
1502 tasklet_kill(&oct_priv
->droq_tasklet
);
1506 * \brief Callback for rx ctrl
1507 * @param status status of request
1508 * @param buf pointer to resp structure
1510 static void rx_ctl_callback(struct octeon_device
*oct
,
1514 struct octeon_soft_command
*sc
= (struct octeon_soft_command
*)buf
;
1515 struct liquidio_rx_ctl_context
*ctx
;
1517 ctx
= (struct liquidio_rx_ctl_context
*)sc
->ctxptr
;
1519 oct
= lio_get_device(ctx
->octeon_id
);
1521 dev_err(&oct
->pci_dev
->dev
, "rx ctl instruction failed. Status: %llx\n",
1522 CVM_CAST64(status
));
1523 WRITE_ONCE(ctx
->cond
, 1);
1525 /* This barrier is required to be sure that the response has been
1526 * written fully before waking up the handler
1530 wake_up_interruptible(&ctx
->wc
);
1534 * \brief Send Rx control command
1535 * @param lio per-network private data
1536 * @param start_stop whether to start or stop
1538 static void send_rx_ctrl_cmd(struct lio
*lio
, int start_stop
)
1540 struct octeon_soft_command
*sc
;
1541 struct liquidio_rx_ctl_context
*ctx
;
1542 union octnet_cmd
*ncmd
;
1543 int ctx_size
= sizeof(struct liquidio_rx_ctl_context
);
1544 struct octeon_device
*oct
= (struct octeon_device
*)lio
->oct_dev
;
1547 if (oct
->props
[lio
->ifidx
].rx_on
== start_stop
)
1550 sc
= (struct octeon_soft_command
*)
1551 octeon_alloc_soft_command(oct
, OCTNET_CMD_SIZE
,
1554 ncmd
= (union octnet_cmd
*)sc
->virtdptr
;
1555 ctx
= (struct liquidio_rx_ctl_context
*)sc
->ctxptr
;
1557 WRITE_ONCE(ctx
->cond
, 0);
1558 ctx
->octeon_id
= lio_get_device_id(oct
);
1559 init_waitqueue_head(&ctx
->wc
);
1562 ncmd
->s
.cmd
= OCTNET_CMD_RX_CTL
;
1563 ncmd
->s
.param1
= start_stop
;
1565 octeon_swap_8B_data((u64
*)ncmd
, (OCTNET_CMD_SIZE
>> 3));
1567 sc
->iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
1569 octeon_prepare_soft_command(oct
, sc
, OPCODE_NIC
,
1570 OPCODE_NIC_CMD
, 0, 0, 0);
1572 sc
->callback
= rx_ctl_callback
;
1573 sc
->callback_arg
= sc
;
1574 sc
->wait_time
= 5000;
1576 retval
= octeon_send_soft_command(oct
, sc
);
1577 if (retval
== IQ_SEND_FAILED
) {
1578 netif_info(lio
, rx_err
, lio
->netdev
, "Failed to send RX Control message\n");
1580 /* Sleep on a wait queue till the cond flag indicates that the
1581 * response arrived or timed-out.
1583 if (sleep_cond(&ctx
->wc
, &ctx
->cond
) == -EINTR
)
1585 oct
->props
[lio
->ifidx
].rx_on
= start_stop
;
1588 octeon_free_soft_command(oct
, sc
);
1592 * \brief Destroy NIC device interface
1593 * @param oct octeon device
1594 * @param ifidx which interface to destroy
1596 * Cleanup associated with each interface for an Octeon device when NIC
1597 * module is being unloaded or if initialization fails during load.
1599 static void liquidio_destroy_nic_device(struct octeon_device
*oct
, int ifidx
)
1601 struct net_device
*netdev
= oct
->props
[ifidx
].netdev
;
1603 struct napi_struct
*napi
, *n
;
1606 dev_err(&oct
->pci_dev
->dev
, "%s No netdevice ptr for index %d\n",
1611 lio
= GET_LIO(netdev
);
1613 dev_dbg(&oct
->pci_dev
->dev
, "NIC device cleanup\n");
1615 if (atomic_read(&lio
->ifstate
) & LIO_IFSTATE_RUNNING
)
1616 liquidio_stop(netdev
);
1618 if (oct
->props
[lio
->ifidx
].napi_enabled
== 1) {
1619 list_for_each_entry_safe(napi
, n
, &netdev
->napi_list
, dev_list
)
1622 oct
->props
[lio
->ifidx
].napi_enabled
= 0;
1624 if (OCTEON_CN23XX_PF(oct
))
1625 oct
->droq
[0]->ops
.poll_mode
= 0;
1628 if (atomic_read(&lio
->ifstate
) & LIO_IFSTATE_REGISTERED
)
1629 unregister_netdev(netdev
);
1631 cleanup_link_status_change_wq(netdev
);
1635 free_netdev(netdev
);
1637 oct
->props
[ifidx
].gmxport
= -1;
1639 oct
->props
[ifidx
].netdev
= NULL
;
1643 * \brief Stop complete NIC functionality
1644 * @param oct octeon device
1646 static int liquidio_stop_nic_module(struct octeon_device
*oct
)
1651 dev_dbg(&oct
->pci_dev
->dev
, "Stopping network interfaces\n");
1652 if (!oct
->ifcount
) {
1653 dev_err(&oct
->pci_dev
->dev
, "Init for Octeon was not completed\n");
1657 spin_lock_bh(&oct
->cmd_resp_wqlock
);
1658 oct
->cmd_resp_state
= OCT_DRV_OFFLINE
;
1659 spin_unlock_bh(&oct
->cmd_resp_wqlock
);
1661 for (i
= 0; i
< oct
->ifcount
; i
++) {
1662 lio
= GET_LIO(oct
->props
[i
].netdev
);
1663 for (j
= 0; j
< lio
->linfo
.num_rxpciq
; j
++)
1664 octeon_unregister_droq_ops(oct
,
1665 lio
->linfo
.rxpciq
[j
].s
.q_no
);
1668 for (i
= 0; i
< oct
->ifcount
; i
++)
1669 liquidio_destroy_nic_device(oct
, i
);
1671 dev_dbg(&oct
->pci_dev
->dev
, "Network interfaces stopped\n");
1676 * \brief Cleans up resources at unload time
1677 * @param pdev PCI device structure
1679 static void liquidio_remove(struct pci_dev
*pdev
)
1681 struct octeon_device
*oct_dev
= pci_get_drvdata(pdev
);
1683 dev_dbg(&oct_dev
->pci_dev
->dev
, "Stopping device\n");
1685 if (oct_dev
->watchdog_task
)
1686 kthread_stop(oct_dev
->watchdog_task
);
1688 if (oct_dev
->app_mode
&& (oct_dev
->app_mode
== CVM_DRV_NIC_APP
))
1689 liquidio_stop_nic_module(oct_dev
);
1691 /* Reset the octeon device and cleanup all memory allocated for
1692 * the octeon device by driver.
1694 octeon_destroy_resources(oct_dev
);
1696 dev_info(&oct_dev
->pci_dev
->dev
, "Device removed\n");
1698 /* This octeon device has been removed. Update the global
1699 * data structure to reflect this. Free the device structure.
1701 octeon_free_device_mem(oct_dev
);
1705 * \brief Identify the Octeon device and to map the BAR address space
1706 * @param oct octeon device
1708 static int octeon_chip_specific_setup(struct octeon_device
*oct
)
1714 pci_read_config_dword(oct
->pci_dev
, 0, &dev_id
);
1715 pci_read_config_dword(oct
->pci_dev
, 8, &rev_id
);
1716 oct
->rev_id
= rev_id
& 0xff;
1719 case OCTEON_CN68XX_PCIID
:
1720 oct
->chip_id
= OCTEON_CN68XX
;
1721 ret
= lio_setup_cn68xx_octeon_device(oct
);
1725 case OCTEON_CN66XX_PCIID
:
1726 oct
->chip_id
= OCTEON_CN66XX
;
1727 ret
= lio_setup_cn66xx_octeon_device(oct
);
1731 case OCTEON_CN23XX_PCIID_PF
:
1732 oct
->chip_id
= OCTEON_CN23XX_PF_VID
;
1733 ret
= setup_cn23xx_octeon_pf_device(oct
);
1739 dev_err(&oct
->pci_dev
->dev
, "Unknown device found (dev_id: %x)\n",
1744 dev_info(&oct
->pci_dev
->dev
, "%s PASS%d.%d %s Version: %s\n", s
,
1745 OCTEON_MAJOR_REV(oct
),
1746 OCTEON_MINOR_REV(oct
),
1747 octeon_get_conf(oct
)->card_name
,
1754 * \brief PCI initialization for each Octeon device.
1755 * @param oct octeon device
1757 static int octeon_pci_os_setup(struct octeon_device
*oct
)
1759 /* setup PCI stuff first */
1760 if (pci_enable_device(oct
->pci_dev
)) {
1761 dev_err(&oct
->pci_dev
->dev
, "pci_enable_device failed\n");
1765 if (dma_set_mask_and_coherent(&oct
->pci_dev
->dev
, DMA_BIT_MASK(64))) {
1766 dev_err(&oct
->pci_dev
->dev
, "Unexpected DMA device capability\n");
1770 /* Enable PCI DMA Master. */
1771 pci_set_master(oct
->pci_dev
);
1776 static inline int skb_iq(struct lio
*lio
, struct sk_buff
*skb
)
1780 if (netif_is_multiqueue(lio
->netdev
))
1781 q
= skb
->queue_mapping
% lio
->linfo
.num_txpciq
;
1787 * \brief Check Tx queue state for a given network buffer
1788 * @param lio per-network private data
1789 * @param skb network buffer
1791 static inline int check_txq_state(struct lio
*lio
, struct sk_buff
*skb
)
1795 if (netif_is_multiqueue(lio
->netdev
)) {
1796 q
= skb
->queue_mapping
;
1797 iq
= lio
->linfo
.txpciq
[(q
% (lio
->linfo
.num_txpciq
))].s
.q_no
;
1803 if (octnet_iq_is_full(lio
->oct_dev
, iq
))
1806 if (__netif_subqueue_stopped(lio
->netdev
, q
)) {
1807 INCR_INSTRQUEUE_PKT_COUNT(lio
->oct_dev
, iq
, tx_restart
, 1);
1808 wake_q(lio
->netdev
, q
);
1814 * \brief Unmap and free network buffer
1817 static void free_netbuf(void *buf
)
1819 struct sk_buff
*skb
;
1820 struct octnet_buf_free_info
*finfo
;
1823 finfo
= (struct octnet_buf_free_info
*)buf
;
1827 dma_unmap_single(&lio
->oct_dev
->pci_dev
->dev
, finfo
->dptr
, skb
->len
,
1830 check_txq_state(lio
, skb
);
1832 tx_buffer_free(skb
);
1836 * \brief Unmap and free gather buffer
1839 static void free_netsgbuf(void *buf
)
1841 struct octnet_buf_free_info
*finfo
;
1842 struct sk_buff
*skb
;
1844 struct octnic_gather
*g
;
1847 finfo
= (struct octnet_buf_free_info
*)buf
;
1851 frags
= skb_shinfo(skb
)->nr_frags
;
1853 dma_unmap_single(&lio
->oct_dev
->pci_dev
->dev
,
1854 g
->sg
[0].ptr
[0], (skb
->len
- skb
->data_len
),
1859 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
- 1];
1861 pci_unmap_page((lio
->oct_dev
)->pci_dev
,
1862 g
->sg
[(i
>> 2)].ptr
[(i
& 3)],
1863 frag
->size
, DMA_TO_DEVICE
);
1867 dma_sync_single_for_cpu(&lio
->oct_dev
->pci_dev
->dev
,
1868 g
->sg_dma_ptr
, g
->sg_size
, DMA_TO_DEVICE
);
1870 iq
= skb_iq(lio
, skb
);
1871 spin_lock(&lio
->glist_lock
[iq
]);
1872 list_add_tail(&g
->list
, &lio
->glist
[iq
]);
1873 spin_unlock(&lio
->glist_lock
[iq
]);
1875 check_txq_state(lio
, skb
); /* mq support: sub-queue state check */
1877 tx_buffer_free(skb
);
1881 * \brief Unmap and free gather buffer with response
1884 static void free_netsgbuf_with_resp(void *buf
)
1886 struct octeon_soft_command
*sc
;
1887 struct octnet_buf_free_info
*finfo
;
1888 struct sk_buff
*skb
;
1890 struct octnic_gather
*g
;
1893 sc
= (struct octeon_soft_command
*)buf
;
1894 skb
= (struct sk_buff
*)sc
->callback_arg
;
1895 finfo
= (struct octnet_buf_free_info
*)&skb
->cb
;
1899 frags
= skb_shinfo(skb
)->nr_frags
;
1901 dma_unmap_single(&lio
->oct_dev
->pci_dev
->dev
,
1902 g
->sg
[0].ptr
[0], (skb
->len
- skb
->data_len
),
1907 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
- 1];
1909 pci_unmap_page((lio
->oct_dev
)->pci_dev
,
1910 g
->sg
[(i
>> 2)].ptr
[(i
& 3)],
1911 frag
->size
, DMA_TO_DEVICE
);
1915 dma_sync_single_for_cpu(&lio
->oct_dev
->pci_dev
->dev
,
1916 g
->sg_dma_ptr
, g
->sg_size
, DMA_TO_DEVICE
);
1918 iq
= skb_iq(lio
, skb
);
1920 spin_lock(&lio
->glist_lock
[iq
]);
1921 list_add_tail(&g
->list
, &lio
->glist
[iq
]);
1922 spin_unlock(&lio
->glist_lock
[iq
]);
1924 /* Don't free the skb yet */
1926 check_txq_state(lio
, skb
);
1930 * \brief Adjust ptp frequency
1931 * @param ptp PTP clock info
1932 * @param ppb how much to adjust by, in parts-per-billion
1934 static int liquidio_ptp_adjfreq(struct ptp_clock_info
*ptp
, s32 ppb
)
1936 struct lio
*lio
= container_of(ptp
, struct lio
, ptp_info
);
1937 struct octeon_device
*oct
= (struct octeon_device
*)lio
->oct_dev
;
1939 unsigned long flags
;
1940 bool neg_adj
= false;
1947 /* The hardware adds the clock compensation value to the
1948 * PTP clock on every coprocessor clock cycle, so we
1949 * compute the delta in terms of coprocessor clocks.
1951 delta
= (u64
)ppb
<< 32;
1952 do_div(delta
, oct
->coproc_clock_rate
);
1954 spin_lock_irqsave(&lio
->ptp_lock
, flags
);
1955 comp
= lio_pci_readq(oct
, CN6XXX_MIO_PTP_CLOCK_COMP
);
1960 lio_pci_writeq(oct
, comp
, CN6XXX_MIO_PTP_CLOCK_COMP
);
1961 spin_unlock_irqrestore(&lio
->ptp_lock
, flags
);
1967 * \brief Adjust ptp time
1968 * @param ptp PTP clock info
1969 * @param delta how much to adjust by, in nanosecs
1971 static int liquidio_ptp_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
1973 unsigned long flags
;
1974 struct lio
*lio
= container_of(ptp
, struct lio
, ptp_info
);
1976 spin_lock_irqsave(&lio
->ptp_lock
, flags
);
1977 lio
->ptp_adjust
+= delta
;
1978 spin_unlock_irqrestore(&lio
->ptp_lock
, flags
);
1984 * \brief Get hardware clock time, including any adjustment
1985 * @param ptp PTP clock info
1986 * @param ts timespec
1988 static int liquidio_ptp_gettime(struct ptp_clock_info
*ptp
,
1989 struct timespec64
*ts
)
1992 unsigned long flags
;
1993 struct lio
*lio
= container_of(ptp
, struct lio
, ptp_info
);
1994 struct octeon_device
*oct
= (struct octeon_device
*)lio
->oct_dev
;
1996 spin_lock_irqsave(&lio
->ptp_lock
, flags
);
1997 ns
= lio_pci_readq(oct
, CN6XXX_MIO_PTP_CLOCK_HI
);
1998 ns
+= lio
->ptp_adjust
;
1999 spin_unlock_irqrestore(&lio
->ptp_lock
, flags
);
2001 *ts
= ns_to_timespec64(ns
);
2007 * \brief Set hardware clock time. Reset adjustment
2008 * @param ptp PTP clock info
2009 * @param ts timespec
2011 static int liquidio_ptp_settime(struct ptp_clock_info
*ptp
,
2012 const struct timespec64
*ts
)
2015 unsigned long flags
;
2016 struct lio
*lio
= container_of(ptp
, struct lio
, ptp_info
);
2017 struct octeon_device
*oct
= (struct octeon_device
*)lio
->oct_dev
;
2019 ns
= timespec_to_ns(ts
);
2021 spin_lock_irqsave(&lio
->ptp_lock
, flags
);
2022 lio_pci_writeq(oct
, ns
, CN6XXX_MIO_PTP_CLOCK_HI
);
2023 lio
->ptp_adjust
= 0;
2024 spin_unlock_irqrestore(&lio
->ptp_lock
, flags
);
2030 * \brief Check if PTP is enabled
2031 * @param ptp PTP clock info
2033 * @param on is it on
2036 liquidio_ptp_enable(struct ptp_clock_info
*ptp
__attribute__((unused
)),
2037 struct ptp_clock_request
*rq
__attribute__((unused
)),
2038 int on
__attribute__((unused
)))
2044 * \brief Open PTP clock source
2045 * @param netdev network device
2047 static void oct_ptp_open(struct net_device
*netdev
)
2049 struct lio
*lio
= GET_LIO(netdev
);
2050 struct octeon_device
*oct
= (struct octeon_device
*)lio
->oct_dev
;
2052 spin_lock_init(&lio
->ptp_lock
);
2054 snprintf(lio
->ptp_info
.name
, 16, "%s", netdev
->name
);
2055 lio
->ptp_info
.owner
= THIS_MODULE
;
2056 lio
->ptp_info
.max_adj
= 250000000;
2057 lio
->ptp_info
.n_alarm
= 0;
2058 lio
->ptp_info
.n_ext_ts
= 0;
2059 lio
->ptp_info
.n_per_out
= 0;
2060 lio
->ptp_info
.pps
= 0;
2061 lio
->ptp_info
.adjfreq
= liquidio_ptp_adjfreq
;
2062 lio
->ptp_info
.adjtime
= liquidio_ptp_adjtime
;
2063 lio
->ptp_info
.gettime64
= liquidio_ptp_gettime
;
2064 lio
->ptp_info
.settime64
= liquidio_ptp_settime
;
2065 lio
->ptp_info
.enable
= liquidio_ptp_enable
;
2067 lio
->ptp_adjust
= 0;
2069 lio
->ptp_clock
= ptp_clock_register(&lio
->ptp_info
,
2070 &oct
->pci_dev
->dev
);
2072 if (IS_ERR(lio
->ptp_clock
))
2073 lio
->ptp_clock
= NULL
;
2077 * \brief Init PTP clock
2078 * @param oct octeon device
2080 static void liquidio_ptp_init(struct octeon_device
*oct
)
2082 u64 clock_comp
, cfg
;
2084 clock_comp
= (u64
)NSEC_PER_SEC
<< 32;
2085 do_div(clock_comp
, oct
->coproc_clock_rate
);
2086 lio_pci_writeq(oct
, clock_comp
, CN6XXX_MIO_PTP_CLOCK_COMP
);
2089 cfg
= lio_pci_readq(oct
, CN6XXX_MIO_PTP_CLOCK_CFG
);
2090 lio_pci_writeq(oct
, cfg
| 0x01, CN6XXX_MIO_PTP_CLOCK_CFG
);
2094 * \brief Load firmware to device
2095 * @param oct octeon device
2097 * Maps device to firmware filename, requests firmware, and downloads it
2099 static int load_firmware(struct octeon_device
*oct
)
2102 const struct firmware
*fw
;
2103 char fw_name
[LIO_MAX_FW_FILENAME_LEN
];
2106 if (strncmp(fw_type
, LIO_FW_NAME_TYPE_NONE
,
2107 sizeof(LIO_FW_NAME_TYPE_NONE
)) == 0) {
2108 dev_info(&oct
->pci_dev
->dev
, "Skipping firmware load\n");
2112 if (fw_type
[0] == '\0')
2113 tmp_fw_type
= LIO_FW_NAME_TYPE_NIC
;
2115 tmp_fw_type
= fw_type
;
2117 sprintf(fw_name
, "%s%s%s_%s%s", LIO_FW_DIR
, LIO_FW_BASE_NAME
,
2118 octeon_get_conf(oct
)->card_name
, tmp_fw_type
,
2119 LIO_FW_NAME_SUFFIX
);
2121 ret
= request_firmware(&fw
, fw_name
, &oct
->pci_dev
->dev
);
2123 dev_err(&oct
->pci_dev
->dev
, "Request firmware failed. Could not find file %s.\n.",
2125 release_firmware(fw
);
2129 ret
= octeon_download_firmware(oct
, fw
->data
, fw
->size
);
2131 release_firmware(fw
);
2137 * \brief Setup output queue
2138 * @param oct octeon device
2139 * @param q_no which queue
2140 * @param num_descs how many descriptors
2141 * @param desc_size size of each descriptor
2142 * @param app_ctx application context
2144 static int octeon_setup_droq(struct octeon_device
*oct
, int q_no
, int num_descs
,
2145 int desc_size
, void *app_ctx
)
2149 dev_dbg(&oct
->pci_dev
->dev
, "Creating Droq: %d\n", q_no
);
2150 /* droq creation and local register settings. */
2151 ret_val
= octeon_create_droq(oct
, q_no
, num_descs
, desc_size
, app_ctx
);
2156 dev_dbg(&oct
->pci_dev
->dev
, "Using default droq %d\n", q_no
);
2159 /* tasklet creation for the droq */
2161 /* Enable the droq queues */
2162 octeon_set_droq_pkt_op(oct
, q_no
, 1);
2164 /* Send Credit for Octeon Output queues. Credits are always
2165 * sent after the output queue is enabled.
2167 writel(oct
->droq
[q_no
]->max_count
,
2168 oct
->droq
[q_no
]->pkts_credit_reg
);
2174 * \brief Callback for getting interface configuration
2175 * @param status status of request
2176 * @param buf pointer to resp structure
2178 static void if_cfg_callback(struct octeon_device
*oct
,
2179 u32 status
__attribute__((unused
)),
2182 struct octeon_soft_command
*sc
= (struct octeon_soft_command
*)buf
;
2183 struct liquidio_if_cfg_resp
*resp
;
2184 struct liquidio_if_cfg_context
*ctx
;
2186 resp
= (struct liquidio_if_cfg_resp
*)sc
->virtrptr
;
2187 ctx
= (struct liquidio_if_cfg_context
*)sc
->ctxptr
;
2189 oct
= lio_get_device(ctx
->octeon_id
);
2191 dev_err(&oct
->pci_dev
->dev
, "nic if cfg instruction failed. Status: %llx\n",
2192 CVM_CAST64(resp
->status
));
2193 WRITE_ONCE(ctx
->cond
, 1);
2195 snprintf(oct
->fw_info
.liquidio_firmware_version
, 32, "%s",
2196 resp
->cfg_info
.liquidio_firmware_version
);
2198 /* This barrier is required to be sure that the response has been
2199 * written fully before waking up the handler
2203 wake_up_interruptible(&ctx
->wc
);
2207 * \brief Select queue based on hash
2208 * @param dev Net device
2209 * @param skb sk_buff structure
2210 * @returns selected queue number
2212 static u16
select_q(struct net_device
*dev
, struct sk_buff
*skb
,
2213 void *accel_priv
__attribute__((unused
)),
2214 select_queue_fallback_t fallback
__attribute__((unused
)))
2220 qindex
= skb_tx_hash(dev
, skb
);
2222 return (u16
)(qindex
% (lio
->linfo
.num_txpciq
));
2225 /** Routine to push packets arriving on Octeon interface upto network layer.
2226 * @param oct_id - octeon device id.
2227 * @param skbuff - skbuff struct to be passed to network layer.
2228 * @param len - size of total data received.
2229 * @param rh - Control header associated with the packet
2230 * @param param - additional control data with the packet
2231 * @param arg - farg registered in droq_ops
2234 liquidio_push_packet(u32 octeon_id
__attribute__((unused
)),
2237 union octeon_rh
*rh
,
2241 struct napi_struct
*napi
= param
;
2242 struct sk_buff
*skb
= (struct sk_buff
*)skbuff
;
2243 struct skb_shared_hwtstamps
*shhwtstamps
;
2246 struct net_device
*netdev
= (struct net_device
*)arg
;
2247 struct octeon_droq
*droq
= container_of(param
, struct octeon_droq
,
2250 int packet_was_received
;
2251 struct lio
*lio
= GET_LIO(netdev
);
2252 struct octeon_device
*oct
= lio
->oct_dev
;
2254 /* Do not proceed if the interface is not in RUNNING state. */
2255 if (!ifstate_check(lio
, LIO_IFSTATE_RUNNING
)) {
2256 recv_buffer_free(skb
);
2257 droq
->stats
.rx_dropped
++;
2263 skb_record_rx_queue(skb
, droq
->q_no
);
2264 if (likely(len
> MIN_SKB_SIZE
)) {
2265 struct octeon_skb_page_info
*pg_info
;
2268 pg_info
= ((struct octeon_skb_page_info
*)(skb
->cb
));
2269 if (pg_info
->page
) {
2270 /* For Paged allocation use the frags */
2271 va
= page_address(pg_info
->page
) +
2272 pg_info
->page_offset
;
2273 memcpy(skb
->data
, va
, MIN_SKB_SIZE
);
2274 skb_put(skb
, MIN_SKB_SIZE
);
2275 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
,
2277 pg_info
->page_offset
+
2283 struct octeon_skb_page_info
*pg_info
=
2284 ((struct octeon_skb_page_info
*)(skb
->cb
));
2285 skb_copy_to_linear_data(skb
, page_address(pg_info
->page
)
2286 + pg_info
->page_offset
, len
);
2288 put_page(pg_info
->page
);
2291 if (((oct
->chip_id
== OCTEON_CN66XX
) ||
2292 (oct
->chip_id
== OCTEON_CN68XX
)) &&
2294 if (rh
->r_dh
.has_hwtstamp
) {
2295 /* timestamp is included from the hardware at
2296 * the beginning of the packet.
2299 (lio
, LIO_IFSTATE_RX_TIMESTAMP_ENABLED
)) {
2300 /* Nanoseconds are in the first 64-bits
2303 memcpy(&ns
, (skb
->data
), sizeof(ns
));
2304 shhwtstamps
= skb_hwtstamps(skb
);
2305 shhwtstamps
->hwtstamp
=
2309 skb_pull(skb
, sizeof(ns
));
2313 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
2314 if ((netdev
->features
& NETIF_F_RXCSUM
) &&
2315 (((rh
->r_dh
.encap_on
) &&
2316 (rh
->r_dh
.csum_verified
& CNNIC_TUN_CSUM_VERIFIED
)) ||
2317 (!(rh
->r_dh
.encap_on
) &&
2318 (rh
->r_dh
.csum_verified
& CNNIC_CSUM_VERIFIED
))))
2319 /* checksum has already been verified */
2320 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2322 skb
->ip_summed
= CHECKSUM_NONE
;
2324 /* Setting Encapsulation field on basis of status received
2327 if (rh
->r_dh
.encap_on
) {
2328 skb
->encapsulation
= 1;
2329 skb
->csum_level
= 1;
2330 droq
->stats
.rx_vxlan
++;
2333 /* inbound VLAN tag */
2334 if ((netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) &&
2335 (rh
->r_dh
.vlan
!= 0)) {
2336 u16 vid
= rh
->r_dh
.vlan
;
2337 u16 priority
= rh
->r_dh
.priority
;
2339 vtag
= priority
<< 13 | vid
;
2340 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vtag
);
2343 packet_was_received
= napi_gro_receive(napi
, skb
) != GRO_DROP
;
2345 if (packet_was_received
) {
2346 droq
->stats
.rx_bytes_received
+= len
;
2347 droq
->stats
.rx_pkts_received
++;
2348 netdev
->last_rx
= jiffies
;
2350 droq
->stats
.rx_dropped
++;
2351 netif_info(lio
, rx_err
, lio
->netdev
,
2352 "droq:%d error rx_dropped:%llu\n",
2353 droq
->q_no
, droq
->stats
.rx_dropped
);
2357 recv_buffer_free(skb
);
2362 * \brief wrapper for calling napi_schedule
2363 * @param param parameters to pass to napi_schedule
2365 * Used when scheduling on different CPUs
2367 static void napi_schedule_wrapper(void *param
)
2369 struct napi_struct
*napi
= param
;
2371 napi_schedule(napi
);
2375 * \brief callback when receive interrupt occurs and we are in NAPI mode
2376 * @param arg pointer to octeon output queue
2378 static void liquidio_napi_drv_callback(void *arg
)
2380 struct octeon_device
*oct
;
2381 struct octeon_droq
*droq
= arg
;
2382 int this_cpu
= smp_processor_id();
2384 oct
= droq
->oct_dev
;
2386 if (OCTEON_CN23XX_PF(oct
) || droq
->cpu_id
== this_cpu
) {
2387 napi_schedule_irqoff(&droq
->napi
);
2389 struct call_single_data
*csd
= &droq
->csd
;
2391 csd
->func
= napi_schedule_wrapper
;
2392 csd
->info
= &droq
->napi
;
2395 smp_call_function_single_async(droq
->cpu_id
, csd
);
2400 * \brief Entry point for NAPI polling
2401 * @param napi NAPI structure
2402 * @param budget maximum number of items to process
2404 static int liquidio_napi_poll(struct napi_struct
*napi
, int budget
)
2406 struct octeon_droq
*droq
;
2408 int tx_done
= 0, iq_no
;
2409 struct octeon_instr_queue
*iq
;
2410 struct octeon_device
*oct
;
2412 droq
= container_of(napi
, struct octeon_droq
, napi
);
2413 oct
= droq
->oct_dev
;
2415 /* Handle Droq descriptors */
2416 work_done
= octeon_process_droq_poll_cmd(oct
, droq
->q_no
,
2417 POLL_EVENT_PROCESS_PKTS
,
2420 /* Flush the instruction queue */
2421 iq
= oct
->instr_queue
[iq_no
];
2423 /* Process iq buffers with in the budget limits */
2424 tx_done
= octeon_flush_iq(oct
, iq
, 1, budget
);
2425 /* Update iq read-index rather than waiting for next interrupt.
2426 * Return back if tx_done is false.
2428 update_txq_status(oct
, iq_no
);
2429 /*tx_done = (iq->flush_index == iq->octeon_read_index);*/
2431 dev_err(&oct
->pci_dev
->dev
, "%s: iq (%d) num invalid\n",
2435 if ((work_done
< budget
) && (tx_done
)) {
2436 napi_complete(napi
);
2437 octeon_process_droq_poll_cmd(droq
->oct_dev
, droq
->q_no
,
2438 POLL_EVENT_ENABLE_INTR
, 0);
2442 return (!tx_done
) ? (budget
) : (work_done
);
2446 * \brief Setup input and output queues
2447 * @param octeon_dev octeon device
2448 * @param ifidx Interface Index
2450 * Note: Queues are with respect to the octeon device. Thus
2451 * an input queue is for egress packets, and output queues
2452 * are for ingress packets.
2454 static inline int setup_io_queues(struct octeon_device
*octeon_dev
,
2457 struct octeon_droq_ops droq_ops
;
2458 struct net_device
*netdev
;
2460 static int cpu_id_modulus
;
2461 struct octeon_droq
*droq
;
2462 struct napi_struct
*napi
;
2463 int q
, q_no
, retval
= 0;
2467 netdev
= octeon_dev
->props
[ifidx
].netdev
;
2469 lio
= GET_LIO(netdev
);
2471 memset(&droq_ops
, 0, sizeof(struct octeon_droq_ops
));
2473 droq_ops
.fptr
= liquidio_push_packet
;
2474 droq_ops
.farg
= (void *)netdev
;
2476 droq_ops
.poll_mode
= 1;
2477 droq_ops
.napi_fn
= liquidio_napi_drv_callback
;
2479 cpu_id_modulus
= num_present_cpus();
2482 for (q
= 0; q
< lio
->linfo
.num_rxpciq
; q
++) {
2483 q_no
= lio
->linfo
.rxpciq
[q
].s
.q_no
;
2484 dev_dbg(&octeon_dev
->pci_dev
->dev
,
2485 "setup_io_queues index:%d linfo.rxpciq.s.q_no:%d\n",
2487 retval
= octeon_setup_droq(octeon_dev
, q_no
,
2488 CFG_GET_NUM_RX_DESCS_NIC_IF
2489 (octeon_get_conf(octeon_dev
),
2491 CFG_GET_NUM_RX_BUF_SIZE_NIC_IF
2492 (octeon_get_conf(octeon_dev
),
2495 dev_err(&octeon_dev
->pci_dev
->dev
,
2496 "%s : Runtime DROQ(RxQ) creation failed.\n",
2501 droq
= octeon_dev
->droq
[q_no
];
2503 dev_dbg(&octeon_dev
->pci_dev
->dev
, "netif_napi_add netdev:%llx oct:%llx pf_num:%d\n",
2504 (u64
)netdev
, (u64
)octeon_dev
, octeon_dev
->pf_num
);
2505 netif_napi_add(netdev
, napi
, liquidio_napi_poll
, 64);
2507 /* designate a CPU for this droq */
2508 droq
->cpu_id
= cpu_id
;
2510 if (cpu_id
>= cpu_id_modulus
)
2513 octeon_register_droq_ops(octeon_dev
, q_no
, &droq_ops
);
2516 if (OCTEON_CN23XX_PF(octeon_dev
)) {
2517 /* 23XX PF can receive control messages (via the first PF-owned
2518 * droq) from the firmware even if the ethX interface is down,
2519 * so that's why poll_mode must be off for the first droq.
2521 octeon_dev
->droq
[0]->ops
.poll_mode
= 0;
2525 for (q
= 0; q
< lio
->linfo
.num_txpciq
; q
++) {
2526 num_tx_descs
= CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf
2529 retval
= octeon_setup_iq(octeon_dev
, ifidx
, q
,
2530 lio
->linfo
.txpciq
[q
], num_tx_descs
,
2531 netdev_get_tx_queue(netdev
, q
));
2533 dev_err(&octeon_dev
->pci_dev
->dev
,
2534 " %s : Runtime IQ(TxQ) creation failed.\n",
2544 * \brief Poll routine for checking transmit queue status
2545 * @param work work_struct data structure
2547 static void octnet_poll_check_txq_status(struct work_struct
*work
)
2549 struct cavium_wk
*wk
= (struct cavium_wk
*)work
;
2550 struct lio
*lio
= (struct lio
*)wk
->ctxptr
;
2552 if (!ifstate_check(lio
, LIO_IFSTATE_RUNNING
))
2555 check_txq_status(lio
);
2556 queue_delayed_work(lio
->txq_status_wq
.wq
,
2557 &lio
->txq_status_wq
.wk
.work
, msecs_to_jiffies(1));
2561 * \brief Sets up the txq poll check
2562 * @param netdev network device
2564 static inline int setup_tx_poll_fn(struct net_device
*netdev
)
2566 struct lio
*lio
= GET_LIO(netdev
);
2567 struct octeon_device
*oct
= lio
->oct_dev
;
2569 lio
->txq_status_wq
.wq
= alloc_workqueue("txq-status",
2571 if (!lio
->txq_status_wq
.wq
) {
2572 dev_err(&oct
->pci_dev
->dev
, "unable to create cavium txq status wq\n");
2575 INIT_DELAYED_WORK(&lio
->txq_status_wq
.wk
.work
,
2576 octnet_poll_check_txq_status
);
2577 lio
->txq_status_wq
.wk
.ctxptr
= lio
;
2578 queue_delayed_work(lio
->txq_status_wq
.wq
,
2579 &lio
->txq_status_wq
.wk
.work
, msecs_to_jiffies(1));
2583 static inline void cleanup_tx_poll_fn(struct net_device
*netdev
)
2585 struct lio
*lio
= GET_LIO(netdev
);
2587 if (lio
->txq_status_wq
.wq
) {
2588 cancel_delayed_work_sync(&lio
->txq_status_wq
.wk
.work
);
2589 destroy_workqueue(lio
->txq_status_wq
.wq
);
2594 * \brief Net device open for LiquidIO
2595 * @param netdev network device
2597 static int liquidio_open(struct net_device
*netdev
)
2599 struct lio
*lio
= GET_LIO(netdev
);
2600 struct octeon_device
*oct
= lio
->oct_dev
;
2601 struct napi_struct
*napi
, *n
;
2603 if (oct
->props
[lio
->ifidx
].napi_enabled
== 0) {
2604 list_for_each_entry_safe(napi
, n
, &netdev
->napi_list
, dev_list
)
2607 oct
->props
[lio
->ifidx
].napi_enabled
= 1;
2609 if (OCTEON_CN23XX_PF(oct
))
2610 oct
->droq
[0]->ops
.poll_mode
= 1;
2613 oct_ptp_open(netdev
);
2615 ifstate_set(lio
, LIO_IFSTATE_RUNNING
);
2617 /* Ready for link status updates */
2620 netif_info(lio
, ifup
, lio
->netdev
, "Interface Open, ready for traffic\n");
2622 if (OCTEON_CN23XX_PF(oct
)) {
2624 if (setup_tx_poll_fn(netdev
))
2627 if (setup_tx_poll_fn(netdev
))
2633 /* tell Octeon to start forwarding packets to host */
2634 send_rx_ctrl_cmd(lio
, 1);
2636 dev_info(&oct
->pci_dev
->dev
, "%s interface is opened\n",
2643 * \brief Net device stop for LiquidIO
2644 * @param netdev network device
2646 static int liquidio_stop(struct net_device
*netdev
)
2648 struct lio
*lio
= GET_LIO(netdev
);
2649 struct octeon_device
*oct
= lio
->oct_dev
;
2651 ifstate_reset(lio
, LIO_IFSTATE_RUNNING
);
2653 netif_tx_disable(netdev
);
2655 /* Inform that netif carrier is down */
2656 netif_carrier_off(netdev
);
2658 lio
->linfo
.link
.s
.link_up
= 0;
2659 lio
->link_changes
++;
2661 /* Pause for a moment and wait for Octeon to flush out (to the wire) any
2662 * egress packets that are in-flight.
2664 set_current_state(TASK_INTERRUPTIBLE
);
2665 schedule_timeout(msecs_to_jiffies(100));
2667 /* Now it should be safe to tell Octeon that nic interface is down. */
2668 send_rx_ctrl_cmd(lio
, 0);
2670 if (OCTEON_CN23XX_PF(oct
)) {
2672 cleanup_tx_poll_fn(netdev
);
2674 cleanup_tx_poll_fn(netdev
);
2677 if (lio
->ptp_clock
) {
2678 ptp_clock_unregister(lio
->ptp_clock
);
2679 lio
->ptp_clock
= NULL
;
2682 dev_info(&oct
->pci_dev
->dev
, "%s interface is stopped\n", netdev
->name
);
2688 * \brief Converts a mask based on net device flags
2689 * @param netdev network device
2691 * This routine generates a octnet_ifflags mask from the net device flags
2692 * received from the OS.
2694 static inline enum octnet_ifflags
get_new_flags(struct net_device
*netdev
)
2696 enum octnet_ifflags f
= OCTNET_IFFLAG_UNICAST
;
2698 if (netdev
->flags
& IFF_PROMISC
)
2699 f
|= OCTNET_IFFLAG_PROMISC
;
2701 if (netdev
->flags
& IFF_ALLMULTI
)
2702 f
|= OCTNET_IFFLAG_ALLMULTI
;
2704 if (netdev
->flags
& IFF_MULTICAST
) {
2705 f
|= OCTNET_IFFLAG_MULTICAST
;
2707 /* Accept all multicast addresses if there are more than we
2710 if (netdev_mc_count(netdev
) > MAX_OCTEON_MULTICAST_ADDR
)
2711 f
|= OCTNET_IFFLAG_ALLMULTI
;
2714 if (netdev
->flags
& IFF_BROADCAST
)
2715 f
|= OCTNET_IFFLAG_BROADCAST
;
2721 * \brief Net device set_multicast_list
2722 * @param netdev network device
2724 static void liquidio_set_mcast_list(struct net_device
*netdev
)
2726 struct lio
*lio
= GET_LIO(netdev
);
2727 struct octeon_device
*oct
= lio
->oct_dev
;
2728 struct octnic_ctrl_pkt nctrl
;
2729 struct netdev_hw_addr
*ha
;
2732 int mc_count
= min(netdev_mc_count(netdev
), MAX_OCTEON_MULTICAST_ADDR
);
2734 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
2736 /* Create a ctrl pkt command to be sent to core app. */
2738 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_SET_MULTI_LIST
;
2739 nctrl
.ncmd
.s
.param1
= get_new_flags(netdev
);
2740 nctrl
.ncmd
.s
.param2
= mc_count
;
2741 nctrl
.ncmd
.s
.more
= mc_count
;
2742 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
2743 nctrl
.netpndev
= (u64
)netdev
;
2744 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
2746 /* copy all the addresses into the udd */
2748 netdev_for_each_mc_addr(ha
, netdev
) {
2750 memcpy(((u8
*)mc
) + 2, ha
->addr
, ETH_ALEN
);
2751 /* no need to swap bytes */
2753 if (++mc
> &nctrl
.udd
[mc_count
])
2757 /* Apparently, any activity in this call from the kernel has to
2758 * be atomic. So we won't wait for response.
2760 nctrl
.wait_time
= 0;
2762 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
2764 dev_err(&oct
->pci_dev
->dev
, "DEVFLAGS change failed in core (ret: 0x%x)\n",
2770 * \brief Net device set_mac_address
2771 * @param netdev network device
2773 static int liquidio_set_mac(struct net_device
*netdev
, void *p
)
2776 struct lio
*lio
= GET_LIO(netdev
);
2777 struct octeon_device
*oct
= lio
->oct_dev
;
2778 struct sockaddr
*addr
= (struct sockaddr
*)p
;
2779 struct octnic_ctrl_pkt nctrl
;
2781 if (!is_valid_ether_addr(addr
->sa_data
))
2782 return -EADDRNOTAVAIL
;
2784 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
2787 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_CHANGE_MACADDR
;
2788 nctrl
.ncmd
.s
.param1
= 0;
2789 nctrl
.ncmd
.s
.more
= 1;
2790 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
2791 nctrl
.netpndev
= (u64
)netdev
;
2792 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
2793 nctrl
.wait_time
= 100;
2796 /* The MAC Address is presented in network byte order. */
2797 memcpy((u8
*)&nctrl
.udd
[0] + 2, addr
->sa_data
, ETH_ALEN
);
2799 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
2801 dev_err(&oct
->pci_dev
->dev
, "MAC Address change failed\n");
2804 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
2805 memcpy(((u8
*)&lio
->linfo
.hw_addr
) + 2, addr
->sa_data
, ETH_ALEN
);
2811 * \brief Net device get_stats
2812 * @param netdev network device
2814 static struct net_device_stats
*liquidio_get_stats(struct net_device
*netdev
)
2816 struct lio
*lio
= GET_LIO(netdev
);
2817 struct net_device_stats
*stats
= &netdev
->stats
;
2818 struct octeon_device
*oct
;
2819 u64 pkts
= 0, drop
= 0, bytes
= 0;
2820 struct oct_droq_stats
*oq_stats
;
2821 struct oct_iq_stats
*iq_stats
;
2822 int i
, iq_no
, oq_no
;
2826 for (i
= 0; i
< lio
->linfo
.num_txpciq
; i
++) {
2827 iq_no
= lio
->linfo
.txpciq
[i
].s
.q_no
;
2828 iq_stats
= &oct
->instr_queue
[iq_no
]->stats
;
2829 pkts
+= iq_stats
->tx_done
;
2830 drop
+= iq_stats
->tx_dropped
;
2831 bytes
+= iq_stats
->tx_tot_bytes
;
2834 stats
->tx_packets
= pkts
;
2835 stats
->tx_bytes
= bytes
;
2836 stats
->tx_dropped
= drop
;
2842 for (i
= 0; i
< lio
->linfo
.num_rxpciq
; i
++) {
2843 oq_no
= lio
->linfo
.rxpciq
[i
].s
.q_no
;
2844 oq_stats
= &oct
->droq
[oq_no
]->stats
;
2845 pkts
+= oq_stats
->rx_pkts_received
;
2846 drop
+= (oq_stats
->rx_dropped
+
2847 oq_stats
->dropped_nodispatch
+
2848 oq_stats
->dropped_toomany
+
2849 oq_stats
->dropped_nomem
);
2850 bytes
+= oq_stats
->rx_bytes_received
;
2853 stats
->rx_bytes
= bytes
;
2854 stats
->rx_packets
= pkts
;
2855 stats
->rx_dropped
= drop
;
2861 * \brief Net device change_mtu
2862 * @param netdev network device
2864 static int liquidio_change_mtu(struct net_device
*netdev
, int new_mtu
)
2866 struct lio
*lio
= GET_LIO(netdev
);
2867 struct octeon_device
*oct
= lio
->oct_dev
;
2868 struct octnic_ctrl_pkt nctrl
;
2871 /* Limit the MTU to make sure the ethernet packets are between 68 bytes
2874 if ((new_mtu
< LIO_MIN_MTU_SIZE
) ||
2875 (new_mtu
> LIO_MAX_MTU_SIZE
)) {
2876 dev_err(&oct
->pci_dev
->dev
, "Invalid MTU: %d\n", new_mtu
);
2877 dev_err(&oct
->pci_dev
->dev
, "Valid range %d and %d\n",
2878 LIO_MIN_MTU_SIZE
, LIO_MAX_MTU_SIZE
);
2882 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
2885 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_CHANGE_MTU
;
2886 nctrl
.ncmd
.s
.param1
= new_mtu
;
2887 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
2888 nctrl
.wait_time
= 100;
2889 nctrl
.netpndev
= (u64
)netdev
;
2890 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
2892 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
2894 dev_err(&oct
->pci_dev
->dev
, "Failed to set MTU\n");
2904 * \brief Handler for SIOCSHWTSTAMP ioctl
2905 * @param netdev network device
2906 * @param ifr interface request
2907 * @param cmd command
2909 static int hwtstamp_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
)
2911 struct hwtstamp_config conf
;
2912 struct lio
*lio
= GET_LIO(netdev
);
2914 if (copy_from_user(&conf
, ifr
->ifr_data
, sizeof(conf
)))
2920 switch (conf
.tx_type
) {
2921 case HWTSTAMP_TX_ON
:
2922 case HWTSTAMP_TX_OFF
:
2928 switch (conf
.rx_filter
) {
2929 case HWTSTAMP_FILTER_NONE
:
2931 case HWTSTAMP_FILTER_ALL
:
2932 case HWTSTAMP_FILTER_SOME
:
2933 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
2934 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
2935 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
2936 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
2937 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
2938 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
2939 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
2940 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
2941 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
2942 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
2943 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
2944 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
2945 conf
.rx_filter
= HWTSTAMP_FILTER_ALL
;
2951 if (conf
.rx_filter
== HWTSTAMP_FILTER_ALL
)
2952 ifstate_set(lio
, LIO_IFSTATE_RX_TIMESTAMP_ENABLED
);
2955 ifstate_reset(lio
, LIO_IFSTATE_RX_TIMESTAMP_ENABLED
);
2957 return copy_to_user(ifr
->ifr_data
, &conf
, sizeof(conf
)) ? -EFAULT
: 0;
2961 * \brief ioctl handler
2962 * @param netdev network device
2963 * @param ifr interface request
2964 * @param cmd command
2966 static int liquidio_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
2970 return hwtstamp_ioctl(netdev
, ifr
);
2977 * \brief handle a Tx timestamp response
2978 * @param status response status
2979 * @param buf pointer to skb
2981 static void handle_timestamp(struct octeon_device
*oct
,
2985 struct octnet_buf_free_info
*finfo
;
2986 struct octeon_soft_command
*sc
;
2987 struct oct_timestamp_resp
*resp
;
2989 struct sk_buff
*skb
= (struct sk_buff
*)buf
;
2991 finfo
= (struct octnet_buf_free_info
*)skb
->cb
;
2995 resp
= (struct oct_timestamp_resp
*)sc
->virtrptr
;
2997 if (status
!= OCTEON_REQUEST_DONE
) {
2998 dev_err(&oct
->pci_dev
->dev
, "Tx timestamp instruction failed. Status: %llx\n",
2999 CVM_CAST64(status
));
3000 resp
->timestamp
= 0;
3003 octeon_swap_8B_data(&resp
->timestamp
, 1);
3005 if (unlikely((skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
) != 0)) {
3006 struct skb_shared_hwtstamps ts
;
3007 u64 ns
= resp
->timestamp
;
3009 netif_info(lio
, tx_done
, lio
->netdev
,
3010 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
3011 skb
, (unsigned long long)ns
);
3012 ts
.hwtstamp
= ns_to_ktime(ns
+ lio
->ptp_adjust
);
3013 skb_tstamp_tx(skb
, &ts
);
3016 octeon_free_soft_command(oct
, sc
);
3017 tx_buffer_free(skb
);
3020 /* \brief Send a data packet that will be timestamped
3021 * @param oct octeon device
3022 * @param ndata pointer to network data
3023 * @param finfo pointer to private network data
3025 static inline int send_nic_timestamp_pkt(struct octeon_device
*oct
,
3026 struct octnic_data_pkt
*ndata
,
3027 struct octnet_buf_free_info
*finfo
)
3030 struct octeon_soft_command
*sc
;
3037 sc
= octeon_alloc_soft_command_resp(oct
, &ndata
->cmd
,
3038 sizeof(struct oct_timestamp_resp
));
3042 dev_err(&oct
->pci_dev
->dev
, "No memory for timestamped data packet\n");
3043 return IQ_SEND_FAILED
;
3046 if (ndata
->reqtype
== REQTYPE_NORESP_NET
)
3047 ndata
->reqtype
= REQTYPE_RESP_NET
;
3048 else if (ndata
->reqtype
== REQTYPE_NORESP_NET_SG
)
3049 ndata
->reqtype
= REQTYPE_RESP_NET_SG
;
3051 sc
->callback
= handle_timestamp
;
3052 sc
->callback_arg
= finfo
->skb
;
3053 sc
->iq_no
= ndata
->q_no
;
3055 if (OCTEON_CN23XX_PF(oct
))
3056 len
= (u32
)((struct octeon_instr_ih3
*)
3057 (&sc
->cmd
.cmd3
.ih3
))->dlengsz
;
3059 len
= (u32
)((struct octeon_instr_ih2
*)
3060 (&sc
->cmd
.cmd2
.ih2
))->dlengsz
;
3064 retval
= octeon_send_command(oct
, sc
->iq_no
, ring_doorbell
, &sc
->cmd
,
3065 sc
, len
, ndata
->reqtype
);
3067 if (retval
== IQ_SEND_FAILED
) {
3068 dev_err(&oct
->pci_dev
->dev
, "timestamp data packet failed status: %x\n",
3070 octeon_free_soft_command(oct
, sc
);
3072 netif_info(lio
, tx_queued
, lio
->netdev
, "Queued timestamp packet\n");
3078 /** \brief Transmit networks packets to the Octeon interface
3079 * @param skbuff skbuff struct to be passed to network layer.
3080 * @param netdev pointer to network device
3081 * @returns whether the packet was transmitted to the device okay or not
3082 * (NETDEV_TX_OK or NETDEV_TX_BUSY)
3084 static int liquidio_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
3087 struct octnet_buf_free_info
*finfo
;
3088 union octnic_cmd_setup cmdsetup
;
3089 struct octnic_data_pkt ndata
;
3090 struct octeon_device
*oct
;
3091 struct oct_iq_stats
*stats
;
3092 struct octeon_instr_irh
*irh
;
3093 union tx_info
*tx_info
;
3095 int q_idx
= 0, iq_no
= 0;
3100 lio
= GET_LIO(netdev
);
3103 if (netif_is_multiqueue(netdev
)) {
3104 q_idx
= skb
->queue_mapping
;
3105 q_idx
= (q_idx
% (lio
->linfo
.num_txpciq
));
3107 iq_no
= lio
->linfo
.txpciq
[q_idx
].s
.q_no
;
3112 stats
= &oct
->instr_queue
[iq_no
]->stats
;
3114 /* Check for all conditions in which the current packet cannot be
3117 if (!(atomic_read(&lio
->ifstate
) & LIO_IFSTATE_RUNNING
) ||
3118 (!lio
->linfo
.link
.s
.link_up
) ||
3120 netif_info(lio
, tx_err
, lio
->netdev
,
3121 "Transmit failed link_status : %d\n",
3122 lio
->linfo
.link
.s
.link_up
);
3123 goto lio_xmit_failed
;
3126 /* Use space in skb->cb to store info used to unmap and
3129 finfo
= (struct octnet_buf_free_info
*)skb
->cb
;
3134 /* Prepare the attributes for the data to be passed to OSI. */
3135 memset(&ndata
, 0, sizeof(struct octnic_data_pkt
));
3137 ndata
.buf
= (void *)finfo
;
3141 if (netif_is_multiqueue(netdev
)) {
3142 if (octnet_iq_is_full(oct
, ndata
.q_no
)) {
3143 /* defer sending if queue is full */
3144 netif_info(lio
, tx_err
, lio
->netdev
, "Transmit failed iq:%d full\n",
3146 stats
->tx_iq_busy
++;
3147 return NETDEV_TX_BUSY
;
3150 if (octnet_iq_is_full(oct
, lio
->txq
)) {
3151 /* defer sending if queue is full */
3152 stats
->tx_iq_busy
++;
3153 netif_info(lio
, tx_err
, lio
->netdev
, "Transmit failed iq:%d full\n",
3155 return NETDEV_TX_BUSY
;
3158 /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
3159 * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
3162 ndata
.datasize
= skb
->len
;
3165 cmdsetup
.s
.iq_no
= iq_no
;
3167 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
3168 if (skb
->encapsulation
) {
3169 cmdsetup
.s
.tnl_csum
= 1;
3172 cmdsetup
.s
.transport_csum
= 1;
3175 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)) {
3176 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
3177 cmdsetup
.s
.timestamp
= 1;
3180 if (skb_shinfo(skb
)->nr_frags
== 0) {
3181 cmdsetup
.s
.u
.datasize
= skb
->len
;
3182 octnet_prepare_pci_cmd(oct
, &ndata
.cmd
, &cmdsetup
, tag
);
3184 /* Offload checksum calculation for TCP/UDP packets */
3185 dptr
= dma_map_single(&oct
->pci_dev
->dev
,
3189 if (dma_mapping_error(&oct
->pci_dev
->dev
, dptr
)) {
3190 dev_err(&oct
->pci_dev
->dev
, "%s DMA mapping error 1\n",
3192 return NETDEV_TX_BUSY
;
3195 if (OCTEON_CN23XX_PF(oct
))
3196 ndata
.cmd
.cmd3
.dptr
= dptr
;
3198 ndata
.cmd
.cmd2
.dptr
= dptr
;
3200 ndata
.reqtype
= REQTYPE_NORESP_NET
;
3204 struct skb_frag_struct
*frag
;
3205 struct octnic_gather
*g
;
3207 spin_lock(&lio
->glist_lock
[q_idx
]);
3208 g
= (struct octnic_gather
*)
3209 list_delete_head(&lio
->glist
[q_idx
]);
3210 spin_unlock(&lio
->glist_lock
[q_idx
]);
3213 netif_info(lio
, tx_err
, lio
->netdev
,
3214 "Transmit scatter gather: glist null!\n");
3215 goto lio_xmit_failed
;
3218 cmdsetup
.s
.gather
= 1;
3219 cmdsetup
.s
.u
.gatherptrs
= (skb_shinfo(skb
)->nr_frags
+ 1);
3220 octnet_prepare_pci_cmd(oct
, &ndata
.cmd
, &cmdsetup
, tag
);
3222 memset(g
->sg
, 0, g
->sg_size
);
3224 g
->sg
[0].ptr
[0] = dma_map_single(&oct
->pci_dev
->dev
,
3226 (skb
->len
- skb
->data_len
),
3228 if (dma_mapping_error(&oct
->pci_dev
->dev
, g
->sg
[0].ptr
[0])) {
3229 dev_err(&oct
->pci_dev
->dev
, "%s DMA mapping error 2\n",
3231 return NETDEV_TX_BUSY
;
3233 add_sg_size(&g
->sg
[0], (skb
->len
- skb
->data_len
), 0);
3235 frags
= skb_shinfo(skb
)->nr_frags
;
3238 frag
= &skb_shinfo(skb
)->frags
[i
- 1];
3240 g
->sg
[(i
>> 2)].ptr
[(i
& 3)] =
3241 dma_map_page(&oct
->pci_dev
->dev
,
3247 if (dma_mapping_error(&oct
->pci_dev
->dev
,
3248 g
->sg
[i
>> 2].ptr
[i
& 3])) {
3249 dma_unmap_single(&oct
->pci_dev
->dev
,
3251 skb
->len
- skb
->data_len
,
3253 for (j
= 1; j
< i
; j
++) {
3254 frag
= &skb_shinfo(skb
)->frags
[j
- 1];
3255 dma_unmap_page(&oct
->pci_dev
->dev
,
3256 g
->sg
[j
>> 2].ptr
[j
& 3],
3260 dev_err(&oct
->pci_dev
->dev
, "%s DMA mapping error 3\n",
3262 return NETDEV_TX_BUSY
;
3265 add_sg_size(&g
->sg
[(i
>> 2)], frag
->size
, (i
& 3));
3269 dma_sync_single_for_device(&oct
->pci_dev
->dev
, g
->sg_dma_ptr
,
3270 g
->sg_size
, DMA_TO_DEVICE
);
3271 dptr
= g
->sg_dma_ptr
;
3273 if (OCTEON_CN23XX_PF(oct
))
3274 ndata
.cmd
.cmd3
.dptr
= dptr
;
3276 ndata
.cmd
.cmd2
.dptr
= dptr
;
3280 ndata
.reqtype
= REQTYPE_NORESP_NET_SG
;
3283 if (OCTEON_CN23XX_PF(oct
)) {
3284 irh
= (struct octeon_instr_irh
*)&ndata
.cmd
.cmd3
.irh
;
3285 tx_info
= (union tx_info
*)&ndata
.cmd
.cmd3
.ossp
[0];
3287 irh
= (struct octeon_instr_irh
*)&ndata
.cmd
.cmd2
.irh
;
3288 tx_info
= (union tx_info
*)&ndata
.cmd
.cmd2
.ossp
[0];
3291 if (skb_shinfo(skb
)->gso_size
) {
3292 tx_info
->s
.gso_size
= skb_shinfo(skb
)->gso_size
;
3293 tx_info
->s
.gso_segs
= skb_shinfo(skb
)->gso_segs
;
3297 /* HW insert VLAN tag */
3298 if (skb_vlan_tag_present(skb
)) {
3299 irh
->priority
= skb_vlan_tag_get(skb
) >> 13;
3300 irh
->vlan
= skb_vlan_tag_get(skb
) & 0xfff;
3303 if (unlikely(cmdsetup
.s
.timestamp
))
3304 status
= send_nic_timestamp_pkt(oct
, &ndata
, finfo
);
3306 status
= octnet_send_nic_data_pkt(oct
, &ndata
);
3307 if (status
== IQ_SEND_FAILED
)
3308 goto lio_xmit_failed
;
3310 netif_info(lio
, tx_queued
, lio
->netdev
, "Transmit queued successfully\n");
3312 if (status
== IQ_SEND_STOP
)
3313 stop_q(lio
->netdev
, q_idx
);
3315 netif_trans_update(netdev
);
3317 if (skb_shinfo(skb
)->gso_size
)
3318 stats
->tx_done
+= skb_shinfo(skb
)->gso_segs
;
3321 stats
->tx_tot_bytes
+= skb
->len
;
3323 return NETDEV_TX_OK
;
3326 stats
->tx_dropped
++;
3327 netif_info(lio
, tx_err
, lio
->netdev
, "IQ%d Transmit dropped:%llu\n",
3328 iq_no
, stats
->tx_dropped
);
3330 dma_unmap_single(&oct
->pci_dev
->dev
, dptr
,
3331 ndata
.datasize
, DMA_TO_DEVICE
);
3332 tx_buffer_free(skb
);
3333 return NETDEV_TX_OK
;
3336 /** \brief Network device Tx timeout
3337 * @param netdev pointer to network device
3339 static void liquidio_tx_timeout(struct net_device
*netdev
)
3343 lio
= GET_LIO(netdev
);
3345 netif_info(lio
, tx_err
, lio
->netdev
,
3346 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
3347 netdev
->stats
.tx_dropped
);
3348 netif_trans_update(netdev
);
3352 static int liquidio_vlan_rx_add_vid(struct net_device
*netdev
,
3353 __be16 proto
__attribute__((unused
)),
3356 struct lio
*lio
= GET_LIO(netdev
);
3357 struct octeon_device
*oct
= lio
->oct_dev
;
3358 struct octnic_ctrl_pkt nctrl
;
3361 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
3364 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_ADD_VLAN_FILTER
;
3365 nctrl
.ncmd
.s
.param1
= vid
;
3366 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
3367 nctrl
.wait_time
= 100;
3368 nctrl
.netpndev
= (u64
)netdev
;
3369 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
3371 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
3373 dev_err(&oct
->pci_dev
->dev
, "Add VLAN filter failed in core (ret: 0x%x)\n",
3380 static int liquidio_vlan_rx_kill_vid(struct net_device
*netdev
,
3381 __be16 proto
__attribute__((unused
)),
3384 struct lio
*lio
= GET_LIO(netdev
);
3385 struct octeon_device
*oct
= lio
->oct_dev
;
3386 struct octnic_ctrl_pkt nctrl
;
3389 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
3392 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_DEL_VLAN_FILTER
;
3393 nctrl
.ncmd
.s
.param1
= vid
;
3394 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
3395 nctrl
.wait_time
= 100;
3396 nctrl
.netpndev
= (u64
)netdev
;
3397 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
3399 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
3401 dev_err(&oct
->pci_dev
->dev
, "Add VLAN filter failed in core (ret: 0x%x)\n",
3407 /** Sending command to enable/disable RX checksum offload
3408 * @param netdev pointer to network device
3409 * @param command OCTNET_CMD_TNL_RX_CSUM_CTL
3410 * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/
3411 * OCTNET_CMD_RXCSUM_DISABLE
3412 * @returns SUCCESS or FAILURE
3414 static int liquidio_set_rxcsum_command(struct net_device
*netdev
, int command
,
3417 struct lio
*lio
= GET_LIO(netdev
);
3418 struct octeon_device
*oct
= lio
->oct_dev
;
3419 struct octnic_ctrl_pkt nctrl
;
3423 nctrl
.ncmd
.s
.cmd
= command
;
3424 nctrl
.ncmd
.s
.param1
= rx_cmd
;
3425 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
3426 nctrl
.wait_time
= 100;
3427 nctrl
.netpndev
= (u64
)netdev
;
3428 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
3430 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
3432 dev_err(&oct
->pci_dev
->dev
,
3433 "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
3439 /** Sending command to add/delete VxLAN UDP port to firmware
3440 * @param netdev pointer to network device
3441 * @param command OCTNET_CMD_VXLAN_PORT_CONFIG
3442 * @param vxlan_port VxLAN port to be added or deleted
3443 * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD,
3444 * OCTNET_CMD_VXLAN_PORT_DEL
3445 * @returns SUCCESS or FAILURE
3447 static int liquidio_vxlan_port_command(struct net_device
*netdev
, int command
,
3448 u16 vxlan_port
, u8 vxlan_cmd_bit
)
3450 struct lio
*lio
= GET_LIO(netdev
);
3451 struct octeon_device
*oct
= lio
->oct_dev
;
3452 struct octnic_ctrl_pkt nctrl
;
3456 nctrl
.ncmd
.s
.cmd
= command
;
3457 nctrl
.ncmd
.s
.more
= vxlan_cmd_bit
;
3458 nctrl
.ncmd
.s
.param1
= vxlan_port
;
3459 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
3460 nctrl
.wait_time
= 100;
3461 nctrl
.netpndev
= (u64
)netdev
;
3462 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
3464 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
3466 dev_err(&oct
->pci_dev
->dev
,
3467 "VxLAN port add/delete failed in core (ret:0x%x)\n",
3473 /** \brief Net device fix features
3474 * @param netdev pointer to network device
3475 * @param request features requested
3476 * @returns updated features list
3478 static netdev_features_t
liquidio_fix_features(struct net_device
*netdev
,
3479 netdev_features_t request
)
3481 struct lio
*lio
= netdev_priv(netdev
);
3483 if ((request
& NETIF_F_RXCSUM
) &&
3484 !(lio
->dev_capability
& NETIF_F_RXCSUM
))
3485 request
&= ~NETIF_F_RXCSUM
;
3487 if ((request
& NETIF_F_HW_CSUM
) &&
3488 !(lio
->dev_capability
& NETIF_F_HW_CSUM
))
3489 request
&= ~NETIF_F_HW_CSUM
;
3491 if ((request
& NETIF_F_TSO
) && !(lio
->dev_capability
& NETIF_F_TSO
))
3492 request
&= ~NETIF_F_TSO
;
3494 if ((request
& NETIF_F_TSO6
) && !(lio
->dev_capability
& NETIF_F_TSO6
))
3495 request
&= ~NETIF_F_TSO6
;
3497 if ((request
& NETIF_F_LRO
) && !(lio
->dev_capability
& NETIF_F_LRO
))
3498 request
&= ~NETIF_F_LRO
;
3500 /*Disable LRO if RXCSUM is off */
3501 if (!(request
& NETIF_F_RXCSUM
) && (netdev
->features
& NETIF_F_LRO
) &&
3502 (lio
->dev_capability
& NETIF_F_LRO
))
3503 request
&= ~NETIF_F_LRO
;
3508 /** \brief Net device set features
3509 * @param netdev pointer to network device
3510 * @param features features to enable/disable
3512 static int liquidio_set_features(struct net_device
*netdev
,
3513 netdev_features_t features
)
3515 struct lio
*lio
= netdev_priv(netdev
);
3517 if (!((netdev
->features
^ features
) & NETIF_F_LRO
))
3520 if ((features
& NETIF_F_LRO
) && (lio
->dev_capability
& NETIF_F_LRO
))
3521 liquidio_set_feature(netdev
, OCTNET_CMD_LRO_ENABLE
,
3522 OCTNIC_LROIPV4
| OCTNIC_LROIPV6
);
3523 else if (!(features
& NETIF_F_LRO
) &&
3524 (lio
->dev_capability
& NETIF_F_LRO
))
3525 liquidio_set_feature(netdev
, OCTNET_CMD_LRO_DISABLE
,
3526 OCTNIC_LROIPV4
| OCTNIC_LROIPV6
);
3528 /* Sending command to firmware to enable/disable RX checksum
3529 * offload settings using ethtool
3531 if (!(netdev
->features
& NETIF_F_RXCSUM
) &&
3532 (lio
->enc_dev_capability
& NETIF_F_RXCSUM
) &&
3533 (features
& NETIF_F_RXCSUM
))
3534 liquidio_set_rxcsum_command(netdev
,
3535 OCTNET_CMD_TNL_RX_CSUM_CTL
,
3536 OCTNET_CMD_RXCSUM_ENABLE
);
3537 else if ((netdev
->features
& NETIF_F_RXCSUM
) &&
3538 (lio
->enc_dev_capability
& NETIF_F_RXCSUM
) &&
3539 !(features
& NETIF_F_RXCSUM
))
3540 liquidio_set_rxcsum_command(netdev
, OCTNET_CMD_TNL_RX_CSUM_CTL
,
3541 OCTNET_CMD_RXCSUM_DISABLE
);
3546 static void liquidio_add_vxlan_port(struct net_device
*netdev
,
3547 struct udp_tunnel_info
*ti
)
3549 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
3552 liquidio_vxlan_port_command(netdev
,
3553 OCTNET_CMD_VXLAN_PORT_CONFIG
,
3555 OCTNET_CMD_VXLAN_PORT_ADD
);
3558 static void liquidio_del_vxlan_port(struct net_device
*netdev
,
3559 struct udp_tunnel_info
*ti
)
3561 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
3564 liquidio_vxlan_port_command(netdev
,
3565 OCTNET_CMD_VXLAN_PORT_CONFIG
,
3567 OCTNET_CMD_VXLAN_PORT_DEL
);
3570 static struct net_device_ops lionetdevops
= {
3571 .ndo_open
= liquidio_open
,
3572 .ndo_stop
= liquidio_stop
,
3573 .ndo_start_xmit
= liquidio_xmit
,
3574 .ndo_get_stats
= liquidio_get_stats
,
3575 .ndo_set_mac_address
= liquidio_set_mac
,
3576 .ndo_set_rx_mode
= liquidio_set_mcast_list
,
3577 .ndo_tx_timeout
= liquidio_tx_timeout
,
3579 .ndo_vlan_rx_add_vid
= liquidio_vlan_rx_add_vid
,
3580 .ndo_vlan_rx_kill_vid
= liquidio_vlan_rx_kill_vid
,
3581 .ndo_change_mtu
= liquidio_change_mtu
,
3582 .ndo_do_ioctl
= liquidio_ioctl
,
3583 .ndo_fix_features
= liquidio_fix_features
,
3584 .ndo_set_features
= liquidio_set_features
,
3585 .ndo_udp_tunnel_add
= liquidio_add_vxlan_port
,
3586 .ndo_udp_tunnel_del
= liquidio_del_vxlan_port
,
3589 /** \brief Entry point for the liquidio module
3591 static int __init
liquidio_init(void)
3594 struct handshake
*hs
;
3596 init_completion(&first_stage
);
3598 octeon_init_device_list(conf_type
);
3600 if (liquidio_init_pci())
3603 wait_for_completion_timeout(&first_stage
, msecs_to_jiffies(1000));
3605 for (i
= 0; i
< MAX_OCTEON_DEVICES
; i
++) {
3608 wait_for_completion(&hs
->init
);
3610 /* init handshake failed */
3611 dev_err(&hs
->pci_dev
->dev
,
3612 "Failed to init device\n");
3613 liquidio_deinit_pci();
3619 for (i
= 0; i
< MAX_OCTEON_DEVICES
; i
++) {
3622 wait_for_completion_timeout(&hs
->started
,
3623 msecs_to_jiffies(30000));
3624 if (!hs
->started_ok
) {
3625 /* starter handshake failed */
3626 dev_err(&hs
->pci_dev
->dev
,
3627 "Firmware failed to start\n");
3628 liquidio_deinit_pci();
3637 static int lio_nic_info(struct octeon_recv_info
*recv_info
, void *buf
)
3639 struct octeon_device
*oct
= (struct octeon_device
*)buf
;
3640 struct octeon_recv_pkt
*recv_pkt
= recv_info
->recv_pkt
;
3642 union oct_link_status
*ls
;
3645 if (recv_pkt
->buffer_size
[0] != sizeof(*ls
)) {
3646 dev_err(&oct
->pci_dev
->dev
, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3647 recv_pkt
->buffer_size
[0],
3648 recv_pkt
->rh
.r_nic_info
.gmxport
);
3652 gmxport
= recv_pkt
->rh
.r_nic_info
.gmxport
;
3653 ls
= (union oct_link_status
*)get_rbd(recv_pkt
->buffer_ptr
[0]);
3655 octeon_swap_8B_data((u64
*)ls
, (sizeof(union oct_link_status
)) >> 3);
3656 for (i
= 0; i
< oct
->ifcount
; i
++) {
3657 if (oct
->props
[i
].gmxport
== gmxport
) {
3658 update_link_status(oct
->props
[i
].netdev
, ls
);
3664 for (i
= 0; i
< recv_pkt
->buffer_count
; i
++)
3665 recv_buffer_free(recv_pkt
->buffer_ptr
[i
]);
3666 octeon_free_recv_info(recv_info
);
3671 * \brief Setup network interfaces
3672 * @param octeon_dev octeon device
3674 * Called during init time for each device. It assumes the NIC
3675 * is already up and running. The link information for each
3676 * interface is passed in link_info.
3678 static int setup_nic_devices(struct octeon_device
*octeon_dev
)
3680 struct lio
*lio
= NULL
;
3681 struct net_device
*netdev
;
3683 struct octeon_soft_command
*sc
;
3684 struct liquidio_if_cfg_context
*ctx
;
3685 struct liquidio_if_cfg_resp
*resp
;
3686 struct octdev_props
*props
;
3687 int retval
, num_iqueues
, num_oqueues
;
3688 union oct_nic_if_cfg if_cfg
;
3689 unsigned int base_queue
;
3690 unsigned int gmx_port_id
;
3691 u32 resp_size
, ctx_size
, data_size
;
3693 struct lio_version
*vdata
;
3695 /* This is to handle link status changes */
3696 octeon_register_dispatch_fn(octeon_dev
, OPCODE_NIC
,
3698 lio_nic_info
, octeon_dev
);
3700 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3701 * They are handled directly.
3703 octeon_register_reqtype_free_fn(octeon_dev
, REQTYPE_NORESP_NET
,
3706 octeon_register_reqtype_free_fn(octeon_dev
, REQTYPE_NORESP_NET_SG
,
3709 octeon_register_reqtype_free_fn(octeon_dev
, REQTYPE_RESP_NET_SG
,
3710 free_netsgbuf_with_resp
);
3712 for (i
= 0; i
< octeon_dev
->ifcount
; i
++) {
3713 resp_size
= sizeof(struct liquidio_if_cfg_resp
);
3714 ctx_size
= sizeof(struct liquidio_if_cfg_context
);
3715 data_size
= sizeof(struct lio_version
);
3716 sc
= (struct octeon_soft_command
*)
3717 octeon_alloc_soft_command(octeon_dev
, data_size
,
3718 resp_size
, ctx_size
);
3719 resp
= (struct liquidio_if_cfg_resp
*)sc
->virtrptr
;
3720 ctx
= (struct liquidio_if_cfg_context
*)sc
->ctxptr
;
3721 vdata
= (struct lio_version
*)sc
->virtdptr
;
3723 *((u64
*)vdata
) = 0;
3724 vdata
->major
= cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION
);
3725 vdata
->minor
= cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION
);
3726 vdata
->micro
= cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION
);
3728 if (OCTEON_CN23XX_PF(octeon_dev
)) {
3729 num_iqueues
= octeon_dev
->sriov_info
.num_pf_rings
;
3730 num_oqueues
= octeon_dev
->sriov_info
.num_pf_rings
;
3731 base_queue
= octeon_dev
->sriov_info
.pf_srn
;
3733 gmx_port_id
= octeon_dev
->pf_num
;
3734 ifidx_or_pfnum
= octeon_dev
->pf_num
;
3736 num_iqueues
= CFG_GET_NUM_TXQS_NIC_IF(
3737 octeon_get_conf(octeon_dev
), i
);
3738 num_oqueues
= CFG_GET_NUM_RXQS_NIC_IF(
3739 octeon_get_conf(octeon_dev
), i
);
3740 base_queue
= CFG_GET_BASE_QUE_NIC_IF(
3741 octeon_get_conf(octeon_dev
), i
);
3742 gmx_port_id
= CFG_GET_GMXID_NIC_IF(
3743 octeon_get_conf(octeon_dev
), i
);
3747 dev_dbg(&octeon_dev
->pci_dev
->dev
,
3748 "requesting config for interface %d, iqs %d, oqs %d\n",
3749 ifidx_or_pfnum
, num_iqueues
, num_oqueues
);
3750 WRITE_ONCE(ctx
->cond
, 0);
3751 ctx
->octeon_id
= lio_get_device_id(octeon_dev
);
3752 init_waitqueue_head(&ctx
->wc
);
3755 if_cfg
.s
.num_iqueues
= num_iqueues
;
3756 if_cfg
.s
.num_oqueues
= num_oqueues
;
3757 if_cfg
.s
.base_queue
= base_queue
;
3758 if_cfg
.s
.gmx_port_id
= gmx_port_id
;
3762 octeon_prepare_soft_command(octeon_dev
, sc
, OPCODE_NIC
,
3763 OPCODE_NIC_IF_CFG
, 0,
3766 sc
->callback
= if_cfg_callback
;
3767 sc
->callback_arg
= sc
;
3768 sc
->wait_time
= 3000;
3770 retval
= octeon_send_soft_command(octeon_dev
, sc
);
3771 if (retval
== IQ_SEND_FAILED
) {
3772 dev_err(&octeon_dev
->pci_dev
->dev
,
3773 "iq/oq config failed status: %x\n",
3775 /* Soft instr is freed by driver in case of failure. */
3776 goto setup_nic_dev_fail
;
3779 /* Sleep on a wait queue till the cond flag indicates that the
3780 * response arrived or timed-out.
3782 if (sleep_cond(&ctx
->wc
, &ctx
->cond
) == -EINTR
) {
3783 dev_err(&octeon_dev
->pci_dev
->dev
, "Wait interrupted\n");
3784 goto setup_nic_wait_intr
;
3787 retval
= resp
->status
;
3789 dev_err(&octeon_dev
->pci_dev
->dev
, "iq/oq config failed\n");
3790 goto setup_nic_dev_fail
;
3793 octeon_swap_8B_data((u64
*)(&resp
->cfg_info
),
3794 (sizeof(struct liquidio_if_cfg_info
)) >> 3);
3796 num_iqueues
= hweight64(resp
->cfg_info
.iqmask
);
3797 num_oqueues
= hweight64(resp
->cfg_info
.oqmask
);
3799 if (!(num_iqueues
) || !(num_oqueues
)) {
3800 dev_err(&octeon_dev
->pci_dev
->dev
,
3801 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
3802 resp
->cfg_info
.iqmask
,
3803 resp
->cfg_info
.oqmask
);
3804 goto setup_nic_dev_fail
;
3806 dev_dbg(&octeon_dev
->pci_dev
->dev
,
3807 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
3808 i
, resp
->cfg_info
.iqmask
, resp
->cfg_info
.oqmask
,
3809 num_iqueues
, num_oqueues
);
3810 netdev
= alloc_etherdev_mq(LIO_SIZE
, num_iqueues
);
3813 dev_err(&octeon_dev
->pci_dev
->dev
, "Device allocation failed\n");
3814 goto setup_nic_dev_fail
;
3817 SET_NETDEV_DEV(netdev
, &octeon_dev
->pci_dev
->dev
);
3819 if (num_iqueues
> 1)
3820 lionetdevops
.ndo_select_queue
= select_q
;
3822 /* Associate the routines that will handle different
3825 netdev
->netdev_ops
= &lionetdevops
;
3827 lio
= GET_LIO(netdev
);
3829 memset(lio
, 0, sizeof(struct lio
));
3831 lio
->ifidx
= ifidx_or_pfnum
;
3833 props
= &octeon_dev
->props
[i
];
3834 props
->gmxport
= resp
->cfg_info
.linfo
.gmxport
;
3835 props
->netdev
= netdev
;
3837 lio
->linfo
.num_rxpciq
= num_oqueues
;
3838 lio
->linfo
.num_txpciq
= num_iqueues
;
3839 for (j
= 0; j
< num_oqueues
; j
++) {
3840 lio
->linfo
.rxpciq
[j
].u64
=
3841 resp
->cfg_info
.linfo
.rxpciq
[j
].u64
;
3843 for (j
= 0; j
< num_iqueues
; j
++) {
3844 lio
->linfo
.txpciq
[j
].u64
=
3845 resp
->cfg_info
.linfo
.txpciq
[j
].u64
;
3847 lio
->linfo
.hw_addr
= resp
->cfg_info
.linfo
.hw_addr
;
3848 lio
->linfo
.gmxport
= resp
->cfg_info
.linfo
.gmxport
;
3849 lio
->linfo
.link
.u64
= resp
->cfg_info
.linfo
.link
.u64
;
3851 lio
->msg_enable
= netif_msg_init(debug
, DEFAULT_MSG_ENABLE
);
3853 if (OCTEON_CN23XX_PF(octeon_dev
) ||
3854 OCTEON_CN6XXX(octeon_dev
)) {
3855 lio
->dev_capability
= NETIF_F_HIGHDMA
3858 | NETIF_F_SG
| NETIF_F_RXCSUM
3860 | NETIF_F_TSO
| NETIF_F_TSO6
3863 netif_set_gso_max_size(netdev
, OCTNIC_GSO_MAX_SIZE
);
3865 /* Copy of transmit encapsulation capabilities:
3866 * TSO, TSO6, Checksums for this device
3868 lio
->enc_dev_capability
= NETIF_F_IP_CSUM
3870 | NETIF_F_GSO_UDP_TUNNEL
3871 | NETIF_F_HW_CSUM
| NETIF_F_SG
3873 | NETIF_F_TSO
| NETIF_F_TSO6
3876 netdev
->hw_enc_features
= (lio
->enc_dev_capability
&
3879 lio
->dev_capability
|= NETIF_F_GSO_UDP_TUNNEL
;
3881 netdev
->vlan_features
= lio
->dev_capability
;
3882 /* Add any unchangeable hw features */
3883 lio
->dev_capability
|= NETIF_F_HW_VLAN_CTAG_FILTER
|
3884 NETIF_F_HW_VLAN_CTAG_RX
|
3885 NETIF_F_HW_VLAN_CTAG_TX
;
3887 netdev
->features
= (lio
->dev_capability
& ~NETIF_F_LRO
);
3889 netdev
->hw_features
= lio
->dev_capability
;
3890 /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
3891 netdev
->hw_features
= netdev
->hw_features
&
3892 ~NETIF_F_HW_VLAN_CTAG_RX
;
3894 /* Point to the properties for octeon device to which this
3895 * interface belongs.
3897 lio
->oct_dev
= octeon_dev
;
3898 lio
->octprops
= props
;
3899 lio
->netdev
= netdev
;
3901 dev_dbg(&octeon_dev
->pci_dev
->dev
,
3902 "if%d gmx: %d hw_addr: 0x%llx\n", i
,
3903 lio
->linfo
.gmxport
, CVM_CAST64(lio
->linfo
.hw_addr
));
3905 /* 64-bit swap required on LE machines */
3906 octeon_swap_8B_data(&lio
->linfo
.hw_addr
, 1);
3907 for (j
= 0; j
< 6; j
++)
3908 mac
[j
] = *((u8
*)(((u8
*)&lio
->linfo
.hw_addr
) + 2 + j
));
3910 /* Copy MAC Address to OS network device structure */
3912 ether_addr_copy(netdev
->dev_addr
, mac
);
3914 /* By default all interfaces on a single Octeon uses the same
3917 lio
->txq
= lio
->linfo
.txpciq
[0].s
.q_no
;
3918 lio
->rxq
= lio
->linfo
.rxpciq
[0].s
.q_no
;
3919 if (setup_io_queues(octeon_dev
, i
)) {
3920 dev_err(&octeon_dev
->pci_dev
->dev
, "I/O queues creation failed\n");
3921 goto setup_nic_dev_fail
;
3924 ifstate_set(lio
, LIO_IFSTATE_DROQ_OPS
);
3926 lio
->tx_qsize
= octeon_get_tx_qsize(octeon_dev
, lio
->txq
);
3927 lio
->rx_qsize
= octeon_get_rx_qsize(octeon_dev
, lio
->rxq
);
3929 if (setup_glists(octeon_dev
, lio
, num_iqueues
)) {
3930 dev_err(&octeon_dev
->pci_dev
->dev
,
3931 "Gather list allocation failed\n");
3932 goto setup_nic_dev_fail
;
3935 /* Register ethtool support */
3936 liquidio_set_ethtool_ops(netdev
);
3937 if (lio
->oct_dev
->chip_id
== OCTEON_CN23XX_PF_VID
)
3938 octeon_dev
->priv_flags
= OCT_PRIV_FLAG_DEFAULT
;
3940 octeon_dev
->priv_flags
= 0x0;
3942 if (netdev
->features
& NETIF_F_LRO
)
3943 liquidio_set_feature(netdev
, OCTNET_CMD_LRO_ENABLE
,
3944 OCTNIC_LROIPV4
| OCTNIC_LROIPV6
);
3946 liquidio_set_feature(netdev
, OCTNET_CMD_ENABLE_VLAN_FILTER
, 0);
3948 if ((debug
!= -1) && (debug
& NETIF_MSG_HW
))
3949 liquidio_set_feature(netdev
,
3950 OCTNET_CMD_VERBOSE_ENABLE
, 0);
3952 if (setup_link_status_change_wq(netdev
))
3953 goto setup_nic_dev_fail
;
3955 /* Register the network device with the OS */
3956 if (register_netdev(netdev
)) {
3957 dev_err(&octeon_dev
->pci_dev
->dev
, "Device registration failed\n");
3958 goto setup_nic_dev_fail
;
3961 dev_dbg(&octeon_dev
->pci_dev
->dev
,
3962 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
3963 i
, mac
[0], mac
[1], mac
[2], mac
[3], mac
[4], mac
[5]);
3964 netif_carrier_off(netdev
);
3965 lio
->link_changes
++;
3967 ifstate_set(lio
, LIO_IFSTATE_REGISTERED
);
3969 /* Sending command to firmware to enable Rx checksum offload
3970 * by default at the time of setup of Liquidio driver for
3973 liquidio_set_rxcsum_command(netdev
, OCTNET_CMD_TNL_RX_CSUM_CTL
,
3974 OCTNET_CMD_RXCSUM_ENABLE
);
3975 liquidio_set_feature(netdev
, OCTNET_CMD_TNL_TX_CSUM_CTL
,
3976 OCTNET_CMD_TXCSUM_ENABLE
);
3978 dev_dbg(&octeon_dev
->pci_dev
->dev
,
3979 "NIC ifidx:%d Setup successful\n", i
);
3981 octeon_free_soft_command(octeon_dev
, sc
);
3988 octeon_free_soft_command(octeon_dev
, sc
);
3990 setup_nic_wait_intr
:
3993 dev_err(&octeon_dev
->pci_dev
->dev
,
3994 "NIC ifidx:%d Setup failed\n", i
);
3995 liquidio_destroy_nic_device(octeon_dev
, i
);
4001 * \brief initialize the NIC
4002 * @param oct octeon device
4004 * This initialization routine is called once the Octeon device application is
4007 static int liquidio_init_nic_module(struct octeon_device
*oct
)
4009 struct oct_intrmod_cfg
*intrmod_cfg
;
4011 int num_nic_ports
= CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct
));
4013 dev_dbg(&oct
->pci_dev
->dev
, "Initializing network interfaces\n");
4015 /* only default iq and oq were initialized
4016 * initialize the rest as well
4018 /* run port_config command for each port */
4019 oct
->ifcount
= num_nic_ports
;
4021 memset(oct
->props
, 0, sizeof(struct octdev_props
) * num_nic_ports
);
4023 for (i
= 0; i
< MAX_OCTEON_LINKS
; i
++)
4024 oct
->props
[i
].gmxport
= -1;
4026 retval
= setup_nic_devices(oct
);
4028 dev_err(&oct
->pci_dev
->dev
, "Setup NIC devices failed\n");
4029 goto octnet_init_failure
;
4032 liquidio_ptp_init(oct
);
4034 /* Initialize interrupt moderation params */
4035 intrmod_cfg
= &((struct octeon_device
*)oct
)->intrmod
;
4036 intrmod_cfg
->rx_enable
= 1;
4037 intrmod_cfg
->check_intrvl
= LIO_INTRMOD_CHECK_INTERVAL
;
4038 intrmod_cfg
->maxpkt_ratethr
= LIO_INTRMOD_MAXPKT_RATETHR
;
4039 intrmod_cfg
->minpkt_ratethr
= LIO_INTRMOD_MINPKT_RATETHR
;
4040 intrmod_cfg
->rx_maxcnt_trigger
= LIO_INTRMOD_RXMAXCNT_TRIGGER
;
4041 intrmod_cfg
->rx_maxtmr_trigger
= LIO_INTRMOD_RXMAXTMR_TRIGGER
;
4042 intrmod_cfg
->rx_mintmr_trigger
= LIO_INTRMOD_RXMINTMR_TRIGGER
;
4043 intrmod_cfg
->rx_mincnt_trigger
= LIO_INTRMOD_RXMINCNT_TRIGGER
;
4044 intrmod_cfg
->tx_enable
= 1;
4045 intrmod_cfg
->tx_maxcnt_trigger
= LIO_INTRMOD_TXMAXCNT_TRIGGER
;
4046 intrmod_cfg
->tx_mincnt_trigger
= LIO_INTRMOD_TXMINCNT_TRIGGER
;
4047 intrmod_cfg
->rx_frames
= CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct
));
4048 intrmod_cfg
->rx_usecs
= CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct
));
4049 intrmod_cfg
->tx_frames
= CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct
));
4050 dev_dbg(&oct
->pci_dev
->dev
, "Network interfaces ready\n");
4054 octnet_init_failure
:
4062 * \brief starter callback that invokes the remaining initialization work after
4063 * the NIC is up and running.
4064 * @param octptr work struct work_struct
4066 static void nic_starter(struct work_struct
*work
)
4068 struct octeon_device
*oct
;
4069 struct cavium_wk
*wk
= (struct cavium_wk
*)work
;
4071 oct
= (struct octeon_device
*)wk
->ctxptr
;
4073 if (atomic_read(&oct
->status
) == OCT_DEV_RUNNING
)
4076 /* If the status of the device is CORE_OK, the core
4077 * application has reported its application type. Call
4078 * any registered handlers now and move to the RUNNING
4081 if (atomic_read(&oct
->status
) != OCT_DEV_CORE_OK
) {
4082 schedule_delayed_work(&oct
->nic_poll_work
.work
,
4083 LIQUIDIO_STARTER_POLL_INTERVAL_MS
);
4087 atomic_set(&oct
->status
, OCT_DEV_RUNNING
);
4089 if (oct
->app_mode
&& oct
->app_mode
== CVM_DRV_NIC_APP
) {
4090 dev_dbg(&oct
->pci_dev
->dev
, "Starting NIC module\n");
4092 if (liquidio_init_nic_module(oct
))
4093 dev_err(&oct
->pci_dev
->dev
, "NIC initialization failed\n");
4095 handshake
[oct
->octeon_id
].started_ok
= 1;
4097 dev_err(&oct
->pci_dev
->dev
,
4098 "Unexpected application running on NIC (%d). Check firmware.\n",
4102 complete(&handshake
[oct
->octeon_id
].started
);
4106 * \brief Device initialization for each Octeon device that is probed
4107 * @param octeon_dev octeon device
4109 static int octeon_device_init(struct octeon_device
*octeon_dev
)
4113 char bootcmd
[] = "\n";
4114 struct octeon_device_priv
*oct_priv
=
4115 (struct octeon_device_priv
*)octeon_dev
->priv
;
4116 atomic_set(&octeon_dev
->status
, OCT_DEV_BEGIN_STATE
);
4118 /* Enable access to the octeon device and make its DMA capability
4121 if (octeon_pci_os_setup(octeon_dev
))
4124 /* Identify the Octeon type and map the BAR address space. */
4125 if (octeon_chip_specific_setup(octeon_dev
)) {
4126 dev_err(&octeon_dev
->pci_dev
->dev
, "Chip specific setup failed\n");
4130 atomic_set(&octeon_dev
->status
, OCT_DEV_PCI_MAP_DONE
);
4132 octeon_dev
->app_mode
= CVM_DRV_INVALID_APP
;
4134 if (OCTEON_CN23XX_PF(octeon_dev
)) {
4135 if (!cn23xx_fw_loaded(octeon_dev
)) {
4137 /* Do a soft reset of the Octeon device. */
4138 if (octeon_dev
->fn_list
.soft_reset(octeon_dev
))
4140 /* things might have changed */
4141 if (!cn23xx_fw_loaded(octeon_dev
))
4148 } else if (octeon_dev
->fn_list
.soft_reset(octeon_dev
)) {
4152 /* Initialize the dispatch mechanism used to push packets arriving on
4153 * Octeon Output queues.
4155 if (octeon_init_dispatch_list(octeon_dev
))
4158 octeon_register_dispatch_fn(octeon_dev
, OPCODE_NIC
,
4159 OPCODE_NIC_CORE_DRV_ACTIVE
,
4160 octeon_core_drv_init
,
4163 INIT_DELAYED_WORK(&octeon_dev
->nic_poll_work
.work
, nic_starter
);
4164 octeon_dev
->nic_poll_work
.ctxptr
= (void *)octeon_dev
;
4165 schedule_delayed_work(&octeon_dev
->nic_poll_work
.work
,
4166 LIQUIDIO_STARTER_POLL_INTERVAL_MS
);
4168 atomic_set(&octeon_dev
->status
, OCT_DEV_DISPATCH_INIT_DONE
);
4170 octeon_set_io_queues_off(octeon_dev
);
4172 if (OCTEON_CN23XX_PF(octeon_dev
)) {
4173 ret
= octeon_dev
->fn_list
.setup_device_regs(octeon_dev
);
4175 dev_err(&octeon_dev
->pci_dev
->dev
, "OCTEON: Failed to configure device registers\n");
4180 /* Initialize soft command buffer pool
4182 if (octeon_setup_sc_buffer_pool(octeon_dev
)) {
4183 dev_err(&octeon_dev
->pci_dev
->dev
, "sc buffer pool allocation failed\n");
4186 atomic_set(&octeon_dev
->status
, OCT_DEV_SC_BUFF_POOL_INIT_DONE
);
4188 /* Setup the data structures that manage this Octeon's Input queues. */
4189 if (octeon_setup_instr_queues(octeon_dev
)) {
4190 dev_err(&octeon_dev
->pci_dev
->dev
,
4191 "instruction queue initialization failed\n");
4192 /* On error, release any previously allocated queues */
4193 for (j
= 0; j
< octeon_dev
->num_iqs
; j
++)
4194 octeon_delete_instr_queue(octeon_dev
, j
);
4197 atomic_set(&octeon_dev
->status
, OCT_DEV_INSTR_QUEUE_INIT_DONE
);
4199 /* Initialize lists to manage the requests of different types that
4200 * arrive from user & kernel applications for this octeon device.
4202 if (octeon_setup_response_list(octeon_dev
)) {
4203 dev_err(&octeon_dev
->pci_dev
->dev
, "Response list allocation failed\n");
4206 atomic_set(&octeon_dev
->status
, OCT_DEV_RESP_LIST_INIT_DONE
);
4208 if (octeon_setup_output_queues(octeon_dev
)) {
4209 dev_err(&octeon_dev
->pci_dev
->dev
, "Output queue initialization failed\n");
4210 /* Release any previously allocated queues */
4211 for (j
= 0; j
< octeon_dev
->num_oqs
; j
++)
4212 octeon_delete_droq(octeon_dev
, j
);
4216 atomic_set(&octeon_dev
->status
, OCT_DEV_DROQ_INIT_DONE
);
4218 if (OCTEON_CN23XX_PF(octeon_dev
)) {
4219 if (octeon_allocate_ioq_vector(octeon_dev
)) {
4220 dev_err(&octeon_dev
->pci_dev
->dev
, "OCTEON: ioq vector allocation failed\n");
4225 /* The input and output queue registers were setup earlier (the
4226 * queues were not enabled). Any additional registers
4227 * that need to be programmed should be done now.
4229 ret
= octeon_dev
->fn_list
.setup_device_regs(octeon_dev
);
4231 dev_err(&octeon_dev
->pci_dev
->dev
,
4232 "Failed to configure device registers\n");
4237 /* Initialize the tasklet that handles output queue packet processing.*/
4238 dev_dbg(&octeon_dev
->pci_dev
->dev
, "Initializing droq tasklet\n");
4239 tasklet_init(&oct_priv
->droq_tasklet
, octeon_droq_bh
,
4240 (unsigned long)octeon_dev
);
4242 /* Setup the interrupt handler and record the INT SUM register address
4244 if (octeon_setup_interrupt(octeon_dev
))
4247 /* Enable Octeon device interrupts */
4248 octeon_dev
->fn_list
.enable_interrupt(octeon_dev
, OCTEON_ALL_INTR
);
4250 /* Enable the input and output queues for this Octeon device */
4251 ret
= octeon_dev
->fn_list
.enable_io_queues(octeon_dev
);
4253 dev_err(&octeon_dev
->pci_dev
->dev
, "Failed to enable input/output queues");
4257 atomic_set(&octeon_dev
->status
, OCT_DEV_IO_QUEUES_DONE
);
4259 if ((!OCTEON_CN23XX_PF(octeon_dev
)) || !fw_loaded
) {
4260 dev_dbg(&octeon_dev
->pci_dev
->dev
, "Waiting for DDR initialization...\n");
4262 dev_info(&octeon_dev
->pci_dev
->dev
,
4263 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
4266 schedule_timeout_uninterruptible(HZ
* LIO_RESET_SECS
);
4268 /* Wait for the octeon to initialize DDR after the soft-reset.*/
4269 while (!ddr_timeout
) {
4270 set_current_state(TASK_INTERRUPTIBLE
);
4271 if (schedule_timeout(HZ
/ 10)) {
4272 /* user probably pressed Control-C */
4276 ret
= octeon_wait_for_ddr_init(octeon_dev
, &ddr_timeout
);
4278 dev_err(&octeon_dev
->pci_dev
->dev
,
4279 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
4284 if (octeon_wait_for_bootloader(octeon_dev
, 1000)) {
4285 dev_err(&octeon_dev
->pci_dev
->dev
, "Board not responding\n");
4289 /* Divert uboot to take commands from host instead. */
4290 ret
= octeon_console_send_cmd(octeon_dev
, bootcmd
, 50);
4292 dev_dbg(&octeon_dev
->pci_dev
->dev
, "Initializing consoles\n");
4293 ret
= octeon_init_consoles(octeon_dev
);
4295 dev_err(&octeon_dev
->pci_dev
->dev
, "Could not access board consoles\n");
4298 ret
= octeon_add_console(octeon_dev
, 0);
4300 dev_err(&octeon_dev
->pci_dev
->dev
, "Could not access board console\n");
4304 atomic_set(&octeon_dev
->status
, OCT_DEV_CONSOLE_INIT_DONE
);
4306 dev_dbg(&octeon_dev
->pci_dev
->dev
, "Loading firmware\n");
4307 ret
= load_firmware(octeon_dev
);
4309 dev_err(&octeon_dev
->pci_dev
->dev
, "Could not load firmware to board\n");
4312 /* set bit 1 of SLI_SCRATCH_1 to indicate that firmware is
4315 if (OCTEON_CN23XX_PF(octeon_dev
))
4316 octeon_write_csr64(octeon_dev
, CN23XX_SLI_SCRATCH1
,
4320 handshake
[octeon_dev
->octeon_id
].init_ok
= 1;
4321 complete(&handshake
[octeon_dev
->octeon_id
].init
);
4323 atomic_set(&octeon_dev
->status
, OCT_DEV_HOST_OK
);
4325 /* Send Credit for Octeon Output queues. Credits are always sent after
4326 * the output queue is enabled.
4328 for (j
= 0; j
< octeon_dev
->num_oqs
; j
++)
4329 writel(octeon_dev
->droq
[j
]->max_count
,
4330 octeon_dev
->droq
[j
]->pkts_credit_reg
);
4332 /* Packets can start arriving on the output queues from this point. */
4337 * \brief Exits the module
4339 static void __exit
liquidio_exit(void)
4341 liquidio_deinit_pci();
4343 pr_info("LiquidIO network module is now unloaded\n");
4346 module_init(liquidio_init
);
4347 module_exit(liquidio_exit
);