1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2015 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information
21 **********************************************************************/
22 #include <linux/version.h>
23 #include <linux/module.h>
24 #include <linux/crc32.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/pci.h>
27 #include <linux/pci_ids.h>
30 #include <linux/ipv6.h>
31 #include <linux/net_tstamp.h>
32 #include <linux/if_vlan.h>
33 #include <linux/firmware.h>
34 #include <linux/ethtool.h>
35 #include <linux/ptp_clock_kernel.h>
36 #include <linux/types.h>
37 #include <linux/list.h>
38 #include <linux/workqueue.h>
39 #include <linux/interrupt.h>
40 #include "octeon_config.h"
41 #include "liquidio_common.h"
42 #include "octeon_droq.h"
43 #include "octeon_iq.h"
44 #include "response_manager.h"
45 #include "octeon_device.h"
46 #include "octeon_nic.h"
47 #include "octeon_main.h"
48 #include "octeon_network.h"
49 #include "cn66xx_regs.h"
50 #include "cn66xx_device.h"
51 #include "cn68xx_regs.h"
52 #include "cn68xx_device.h"
53 #include "liquidio_image.h"
55 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
56 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
57 MODULE_LICENSE("GPL");
58 MODULE_VERSION(LIQUIDIO_VERSION
);
59 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME LIO_FW_NAME_SUFFIX
);
60 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME LIO_FW_NAME_SUFFIX
);
61 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME LIO_FW_NAME_SUFFIX
);
63 static int ddr_timeout
= 10000;
64 module_param(ddr_timeout
, int, 0644);
65 MODULE_PARM_DESC(ddr_timeout
,
66 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
68 static u32 console_bitmask
;
69 module_param(console_bitmask
, int, 0644);
70 MODULE_PARM_DESC(console_bitmask
,
71 "Bitmask indicating which consoles have debug output redirected to syslog.");
73 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
75 static int debug
= -1;
76 module_param(debug
, int, 0644);
77 MODULE_PARM_DESC(debug
, "NETIF_MSG debug bits");
79 static char fw_type
[LIO_MAX_FW_TYPE_LEN
];
80 module_param_string(fw_type
, fw_type
, sizeof(fw_type
), 0000);
81 MODULE_PARM_DESC(fw_type
, "Type of firmware to be loaded. Default \"nic\"");
84 module_param(conf_type
, int, 0);
85 MODULE_PARM_DESC(conf_type
, "select octeon configuration 0 default 1 ovs");
87 static int ptp_enable
= 1;
89 /* Bit mask values for lio->ifstate */
90 #define LIO_IFSTATE_DROQ_OPS 0x01
91 #define LIO_IFSTATE_REGISTERED 0x02
92 #define LIO_IFSTATE_RUNNING 0x04
93 #define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
95 /* Polling interval for determining when NIC application is alive */
96 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
98 /* runtime link query interval */
99 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000
101 struct liquidio_if_cfg_context
{
104 wait_queue_head_t wc
;
109 struct liquidio_if_cfg_resp
{
111 struct liquidio_if_cfg_info cfg_info
;
115 struct oct_link_status_resp
{
117 struct oct_link_info link_info
;
121 struct oct_timestamp_resp
{
127 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
132 #ifdef __BIG_ENDIAN_BITFIELD
144 /** Octeon device properties to be used by the NIC module.
145 * Each octeon device in the system will be represented
146 * by this structure in the NIC module.
149 #define OCTNIC_MAX_SG (MAX_SKB_FRAGS)
151 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
152 #define OCTNIC_GSO_MAX_SIZE (GSO_MAX_SIZE - OCTNIC_GSO_MAX_HEADER_SIZE)
154 /** Structure of a node in list of gather components maintained by
155 * NIC driver for each network device.
157 struct octnic_gather
{
158 /** List manipulation. Next and prev pointers. */
159 struct list_head list
;
161 /** Size of the gather component at sg in bytes. */
164 /** Number of bytes that sg was adjusted to make it 8B-aligned. */
167 /** Gather component that can accommodate max sized fragment list
168 * received from the IP layer.
170 struct octeon_sg_entry
*sg
;
175 /** This structure is used by NIC driver to store information required
176 * to free the sk_buff when the packet has been fetched by Octeon.
177 * Bytes offset below assume worst-case of a 64-bit system.
179 struct octnet_buf_free_info
{
180 /** Bytes 1-8. Pointer to network device private structure. */
183 /** Bytes 9-16. Pointer to sk_buff. */
186 /** Bytes 17-24. Pointer to gather list. */
187 struct octnic_gather
*g
;
189 /** Bytes 25-32. Physical address of skb->data or gather list. */
192 /** Bytes 33-47. Piggybacked soft command, if any */
193 struct octeon_soft_command
*sc
;
197 struct completion init
;
198 struct completion started
;
199 struct pci_dev
*pci_dev
;
204 struct octeon_device_priv
{
205 /** Tasklet structures for this device. */
206 struct tasklet_struct droq_tasklet
;
207 unsigned long napi_mask
;
210 static int octeon_device_init(struct octeon_device
*);
211 static void liquidio_remove(struct pci_dev
*pdev
);
212 static int liquidio_probe(struct pci_dev
*pdev
,
213 const struct pci_device_id
*ent
);
215 static struct handshake handshake
[MAX_OCTEON_DEVICES
];
216 static struct completion first_stage
;
218 static void octeon_droq_bh(unsigned long pdev
)
222 struct octeon_device
*oct
= (struct octeon_device
*)pdev
;
223 struct octeon_device_priv
*oct_priv
=
224 (struct octeon_device_priv
*)oct
->priv
;
226 /* for (q_no = 0; q_no < oct->num_oqs; q_no++) { */
227 for (q_no
= 0; q_no
< MAX_OCTEON_OUTPUT_QUEUES(oct
); q_no
++) {
228 if (!(oct
->io_qmask
.oq
& (1ULL << q_no
)))
230 reschedule
|= octeon_droq_process_packets(oct
, oct
->droq
[q_no
],
235 tasklet_schedule(&oct_priv
->droq_tasklet
);
238 static int lio_wait_for_oq_pkts(struct octeon_device
*oct
)
240 struct octeon_device_priv
*oct_priv
=
241 (struct octeon_device_priv
*)oct
->priv
;
242 int retry
= 100, pkt_cnt
= 0, pending_pkts
= 0;
248 for (i
= 0; i
< MAX_OCTEON_OUTPUT_QUEUES(oct
); i
++) {
249 if (!(oct
->io_qmask
.oq
& (1ULL << i
)))
251 pkt_cnt
+= octeon_droq_check_hw_for_pkts(oct
,
255 pending_pkts
+= pkt_cnt
;
256 tasklet_schedule(&oct_priv
->droq_tasklet
);
259 schedule_timeout_uninterruptible(1);
261 } while (retry
-- && pending_pkts
);
266 void octeon_report_tx_completion_to_bql(void *txq
, unsigned int pkts_compl
,
267 unsigned int bytes_compl
)
269 struct netdev_queue
*netdev_queue
= txq
;
271 netdev_tx_completed_queue(netdev_queue
, pkts_compl
, bytes_compl
);
274 void octeon_update_tx_completion_counters(void *buf
, int reqtype
,
275 unsigned int *pkts_compl
,
276 unsigned int *bytes_compl
)
278 struct octnet_buf_free_info
*finfo
;
279 struct sk_buff
*skb
= NULL
;
280 struct octeon_soft_command
*sc
;
283 case REQTYPE_NORESP_NET
:
284 case REQTYPE_NORESP_NET_SG
:
289 case REQTYPE_RESP_NET_SG
:
290 case REQTYPE_RESP_NET
:
292 skb
= sc
->callback_arg
;
300 *bytes_compl
+= skb
->len
;
303 void octeon_report_sent_bytes_to_bql(void *buf
, int reqtype
)
305 struct octnet_buf_free_info
*finfo
;
307 struct octeon_soft_command
*sc
;
308 struct netdev_queue
*txq
;
311 case REQTYPE_NORESP_NET
:
312 case REQTYPE_NORESP_NET_SG
:
317 case REQTYPE_RESP_NET_SG
:
318 case REQTYPE_RESP_NET
:
320 skb
= sc
->callback_arg
;
327 txq
= netdev_get_tx_queue(skb
->dev
, skb_get_queue_mapping(skb
));
328 netdev_tx_sent_queue(txq
, skb
->len
);
331 int octeon_console_debug_enabled(u32 console
)
333 return (console_bitmask
>> (console
)) & 0x1;
337 * \brief Forces all IO queues off on a given device
338 * @param oct Pointer to Octeon device
340 static void force_io_queues_off(struct octeon_device
*oct
)
342 if ((oct
->chip_id
== OCTEON_CN66XX
) ||
343 (oct
->chip_id
== OCTEON_CN68XX
)) {
344 /* Reset the Enable bits for Input Queues. */
345 octeon_write_csr(oct
, CN6XXX_SLI_PKT_INSTR_ENB
, 0);
347 /* Reset the Enable bits for Output Queues. */
348 octeon_write_csr(oct
, CN6XXX_SLI_PKT_OUT_ENB
, 0);
353 * \brief wait for all pending requests to complete
354 * @param oct Pointer to Octeon device
356 * Called during shutdown sequence
358 static int wait_for_pending_requests(struct octeon_device
*oct
)
362 for (i
= 0; i
< 100; i
++) {
364 atomic_read(&oct
->response_list
365 [OCTEON_ORDERED_SC_LIST
].pending_req_count
);
367 schedule_timeout_uninterruptible(HZ
/ 10);
379 * \brief Cause device to go quiet so it can be safely removed/reset/etc
380 * @param oct Pointer to Octeon device
382 static inline void pcierror_quiesce_device(struct octeon_device
*oct
)
386 /* Disable the input and output queues now. No more packets will
387 * arrive from Octeon, but we should wait for all packet processing
390 force_io_queues_off(oct
);
392 /* To allow for in-flight requests */
393 schedule_timeout_uninterruptible(100);
395 if (wait_for_pending_requests(oct
))
396 dev_err(&oct
->pci_dev
->dev
, "There were pending requests\n");
398 /* Force all requests waiting to be fetched by OCTEON to complete. */
399 for (i
= 0; i
< MAX_OCTEON_INSTR_QUEUES(oct
); i
++) {
400 struct octeon_instr_queue
*iq
;
402 if (!(oct
->io_qmask
.iq
& (1ULL << i
)))
404 iq
= oct
->instr_queue
[i
];
406 if (atomic_read(&iq
->instr_pending
)) {
407 spin_lock_bh(&iq
->lock
);
409 iq
->octeon_read_index
= iq
->host_write_index
;
410 iq
->stats
.instr_processed
+=
411 atomic_read(&iq
->instr_pending
);
412 lio_process_iq_request_list(oct
, iq
);
413 spin_unlock_bh(&iq
->lock
);
417 /* Force all pending ordered list requests to time out. */
418 lio_process_ordered_list(oct
, 1);
420 /* We do not need to wait for output queue packets to be processed. */
424 * \brief Cleanup PCI AER uncorrectable error status
425 * @param dev Pointer to PCI device
427 static void cleanup_aer_uncorrect_error_status(struct pci_dev
*dev
)
432 pr_info("%s :\n", __func__
);
434 pci_read_config_dword(dev
, pos
+ PCI_ERR_UNCOR_STATUS
, &status
);
435 pci_read_config_dword(dev
, pos
+ PCI_ERR_UNCOR_SEVER
, &mask
);
436 if (dev
->error_state
== pci_channel_io_normal
)
437 status
&= ~mask
; /* Clear corresponding nonfatal bits */
439 status
&= mask
; /* Clear corresponding fatal bits */
440 pci_write_config_dword(dev
, pos
+ PCI_ERR_UNCOR_STATUS
, status
);
444 * \brief Stop all PCI IO to a given device
445 * @param dev Pointer to Octeon device
447 static void stop_pci_io(struct octeon_device
*oct
)
449 /* No more instructions will be forwarded. */
450 atomic_set(&oct
->status
, OCT_DEV_IN_RESET
);
452 pci_disable_device(oct
->pci_dev
);
454 /* Disable interrupts */
455 oct
->fn_list
.disable_interrupt(oct
->chip
);
457 pcierror_quiesce_device(oct
);
459 /* Release the interrupt line */
460 free_irq(oct
->pci_dev
->irq
, oct
);
462 if (oct
->flags
& LIO_FLAG_MSI_ENABLED
)
463 pci_disable_msi(oct
->pci_dev
);
465 dev_dbg(&oct
->pci_dev
->dev
, "Device state is now %s\n",
466 lio_get_state_string(&oct
->status
));
468 /* cn63xx_cleanup_aer_uncorrect_error_status(oct->pci_dev); */
469 /* making it a common function for all OCTEON models */
470 cleanup_aer_uncorrect_error_status(oct
->pci_dev
);
474 * \brief called when PCI error is detected
475 * @param pdev Pointer to PCI device
476 * @param state The current pci connection state
478 * This function is called after a PCI bus error affecting
479 * this device has been detected.
481 static pci_ers_result_t
liquidio_pcie_error_detected(struct pci_dev
*pdev
,
482 pci_channel_state_t state
)
484 struct octeon_device
*oct
= pci_get_drvdata(pdev
);
486 /* Non-correctable Non-fatal errors */
487 if (state
== pci_channel_io_normal
) {
488 dev_err(&oct
->pci_dev
->dev
, "Non-correctable non-fatal error reported:\n");
489 cleanup_aer_uncorrect_error_status(oct
->pci_dev
);
490 return PCI_ERS_RESULT_CAN_RECOVER
;
493 /* Non-correctable Fatal errors */
494 dev_err(&oct
->pci_dev
->dev
, "Non-correctable FATAL reported by PCI AER driver\n");
497 /* Always return a DISCONNECT. There is no support for recovery but only
498 * for a clean shutdown.
500 return PCI_ERS_RESULT_DISCONNECT
;
504 * \brief mmio handler
505 * @param pdev Pointer to PCI device
507 static pci_ers_result_t
liquidio_pcie_mmio_enabled(struct pci_dev
*pdev
)
509 /* We should never hit this since we never ask for a reset for a Fatal
510 * Error. We always return DISCONNECT in io_error above.
511 * But play safe and return RECOVERED for now.
513 return PCI_ERS_RESULT_RECOVERED
;
517 * \brief called after the pci bus has been reset.
518 * @param pdev Pointer to PCI device
520 * Restart the card from scratch, as if from a cold-boot. Implementation
521 * resembles the first-half of the octeon_resume routine.
523 static pci_ers_result_t
liquidio_pcie_slot_reset(struct pci_dev
*pdev
)
525 /* We should never hit this since we never ask for a reset for a Fatal
526 * Error. We always return DISCONNECT in io_error above.
527 * But play safe and return RECOVERED for now.
529 return PCI_ERS_RESULT_RECOVERED
;
533 * \brief called when traffic can start flowing again.
534 * @param pdev Pointer to PCI device
536 * This callback is called when the error recovery driver tells us that
537 * its OK to resume normal operation. Implementation resembles the
538 * second-half of the octeon_resume routine.
540 static void liquidio_pcie_resume(struct pci_dev
*pdev
)
542 /* Nothing to be done here. */
547 * \brief called when suspending
548 * @param pdev Pointer to PCI device
549 * @param state state to suspend to
551 static int liquidio_suspend(struct pci_dev
*pdev
, pm_message_t state
)
557 * \brief called when resuming
558 * @param pdev Pointer to PCI device
560 static int liquidio_resume(struct pci_dev
*pdev
)
566 /* For PCI-E Advanced Error Recovery (AER) Interface */
567 static const struct pci_error_handlers liquidio_err_handler
= {
568 .error_detected
= liquidio_pcie_error_detected
,
569 .mmio_enabled
= liquidio_pcie_mmio_enabled
,
570 .slot_reset
= liquidio_pcie_slot_reset
,
571 .resume
= liquidio_pcie_resume
,
574 static const struct pci_device_id liquidio_pci_tbl
[] = {
576 PCI_VENDOR_ID_CAVIUM
, 0x91, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0
579 PCI_VENDOR_ID_CAVIUM
, 0x92, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0
585 MODULE_DEVICE_TABLE(pci
, liquidio_pci_tbl
);
587 static struct pci_driver liquidio_pci_driver
= {
589 .id_table
= liquidio_pci_tbl
,
590 .probe
= liquidio_probe
,
591 .remove
= liquidio_remove
,
592 .err_handler
= &liquidio_err_handler
, /* For AER */
595 .suspend
= liquidio_suspend
,
596 .resume
= liquidio_resume
,
602 * \brief register PCI driver
604 static int liquidio_init_pci(void)
606 return pci_register_driver(&liquidio_pci_driver
);
610 * \brief unregister PCI driver
612 static void liquidio_deinit_pci(void)
614 pci_unregister_driver(&liquidio_pci_driver
);
618 * \brief check interface state
619 * @param lio per-network private data
620 * @param state_flag flag state to check
622 static inline int ifstate_check(struct lio
*lio
, int state_flag
)
624 return atomic_read(&lio
->ifstate
) & state_flag
;
628 * \brief set interface state
629 * @param lio per-network private data
630 * @param state_flag flag state to set
632 static inline void ifstate_set(struct lio
*lio
, int state_flag
)
634 atomic_set(&lio
->ifstate
, (atomic_read(&lio
->ifstate
) | state_flag
));
638 * \brief clear interface state
639 * @param lio per-network private data
640 * @param state_flag flag state to clear
642 static inline void ifstate_reset(struct lio
*lio
, int state_flag
)
644 atomic_set(&lio
->ifstate
, (atomic_read(&lio
->ifstate
) & ~(state_flag
)));
648 * \brief Stop Tx queues
649 * @param netdev network device
651 static inline void txqs_stop(struct net_device
*netdev
)
653 if (netif_is_multiqueue(netdev
)) {
656 for (i
= 0; i
< netdev
->num_tx_queues
; i
++)
657 netif_stop_subqueue(netdev
, i
);
659 netif_stop_queue(netdev
);
664 * \brief Start Tx queues
665 * @param netdev network device
667 static inline void txqs_start(struct net_device
*netdev
)
669 if (netif_is_multiqueue(netdev
)) {
672 for (i
= 0; i
< netdev
->num_tx_queues
; i
++)
673 netif_start_subqueue(netdev
, i
);
675 netif_start_queue(netdev
);
680 * \brief Wake Tx queues
681 * @param netdev network device
683 static inline void txqs_wake(struct net_device
*netdev
)
685 if (netif_is_multiqueue(netdev
)) {
688 for (i
= 0; i
< netdev
->num_tx_queues
; i
++)
689 if (__netif_subqueue_stopped(netdev
, i
))
690 netif_wake_subqueue(netdev
, i
);
692 netif_wake_queue(netdev
);
697 * \brief Stop Tx queue
698 * @param netdev network device
700 static void stop_txq(struct net_device
*netdev
)
706 * \brief Start Tx queue
707 * @param netdev network device
709 static void start_txq(struct net_device
*netdev
)
711 struct lio
*lio
= GET_LIO(netdev
);
713 if (lio
->linfo
.link
.s
.link_up
) {
720 * \brief Wake a queue
721 * @param netdev network device
722 * @param q which queue to wake
724 static inline void wake_q(struct net_device
*netdev
, int q
)
726 if (netif_is_multiqueue(netdev
))
727 netif_wake_subqueue(netdev
, q
);
729 netif_wake_queue(netdev
);
733 * \brief Stop a queue
734 * @param netdev network device
735 * @param q which queue to stop
737 static inline void stop_q(struct net_device
*netdev
, int q
)
739 if (netif_is_multiqueue(netdev
))
740 netif_stop_subqueue(netdev
, q
);
742 netif_stop_queue(netdev
);
746 * \brief Check Tx queue status, and take appropriate action
747 * @param lio per-network private data
748 * @returns 0 if full, number of queues woken up otherwise
750 static inline int check_txq_status(struct lio
*lio
)
754 if (netif_is_multiqueue(lio
->netdev
)) {
755 int numqs
= lio
->netdev
->num_tx_queues
;
758 /* check each sub-queue state */
759 for (q
= 0; q
< numqs
; q
++) {
760 iq
= lio
->linfo
.txpciq
[q
%
761 (lio
->linfo
.num_txpciq
)].s
.q_no
;
762 if (octnet_iq_is_full(lio
->oct_dev
, iq
))
764 if (__netif_subqueue_stopped(lio
->netdev
, q
)) {
765 wake_q(lio
->netdev
, q
);
770 if (octnet_iq_is_full(lio
->oct_dev
, lio
->txq
))
772 wake_q(lio
->netdev
, lio
->txq
);
779 * Remove the node at the head of the list. The list would be empty at
780 * the end of this call if there are no more nodes in the list.
782 static inline struct list_head
*list_delete_head(struct list_head
*root
)
784 struct list_head
*node
;
786 if ((root
->prev
== root
) && (root
->next
== root
))
798 * \brief Delete gather lists
799 * @param lio per-network private data
801 static void delete_glists(struct lio
*lio
)
803 struct octnic_gather
*g
;
809 for (i
= 0; i
< lio
->linfo
.num_txpciq
; i
++) {
811 g
= (struct octnic_gather
*)
812 list_delete_head(&lio
->glist
[i
]);
815 dma_unmap_single(&lio
->oct_dev
->
820 kfree((void *)((unsigned long)g
->sg
-
828 kfree((void *)lio
->glist
);
832 * \brief Setup gather lists
833 * @param lio per-network private data
835 static int setup_glists(struct octeon_device
*oct
, struct lio
*lio
, int num_iqs
)
838 struct octnic_gather
*g
;
840 lio
->glist_lock
= kcalloc(num_iqs
, sizeof(*lio
->glist_lock
),
842 if (!lio
->glist_lock
)
845 lio
->glist
= kcalloc(num_iqs
, sizeof(*lio
->glist
),
848 kfree((void *)lio
->glist_lock
);
852 for (i
= 0; i
< num_iqs
; i
++) {
853 int numa_node
= cpu_to_node(i
% num_online_cpus());
855 spin_lock_init(&lio
->glist_lock
[i
]);
857 INIT_LIST_HEAD(&lio
->glist
[i
]);
859 for (j
= 0; j
< lio
->tx_qsize
; j
++) {
860 g
= kzalloc_node(sizeof(*g
), GFP_KERNEL
,
863 g
= kzalloc(sizeof(*g
), GFP_KERNEL
);
867 g
->sg_size
= ((ROUNDUP4(OCTNIC_MAX_SG
) >> 2) *
870 g
->sg
= kmalloc_node(g
->sg_size
+ 8,
871 GFP_KERNEL
, numa_node
);
873 g
->sg
= kmalloc(g
->sg_size
+ 8, GFP_KERNEL
);
879 /* The gather component should be aligned on 64-bit
882 if (((unsigned long)g
->sg
) & 7) {
883 g
->adjust
= 8 - (((unsigned long)g
->sg
) & 7);
884 g
->sg
= (struct octeon_sg_entry
*)
885 ((unsigned long)g
->sg
+ g
->adjust
);
887 g
->sg_dma_ptr
= dma_map_single(&oct
->pci_dev
->dev
,
890 if (dma_mapping_error(&oct
->pci_dev
->dev
,
892 kfree((void *)((unsigned long)g
->sg
-
898 list_add_tail(&g
->list
, &lio
->glist
[i
]);
901 if (j
!= lio
->tx_qsize
) {
911 * \brief Print link information
912 * @param netdev network device
914 static void print_link_info(struct net_device
*netdev
)
916 struct lio
*lio
= GET_LIO(netdev
);
918 if (atomic_read(&lio
->ifstate
) & LIO_IFSTATE_REGISTERED
) {
919 struct oct_link_info
*linfo
= &lio
->linfo
;
921 if (linfo
->link
.s
.link_up
) {
922 netif_info(lio
, link
, lio
->netdev
, "%d Mbps %s Duplex UP\n",
924 (linfo
->link
.s
.duplex
) ? "Full" : "Half");
926 netif_info(lio
, link
, lio
->netdev
, "Link Down\n");
932 * \brief Update link status
933 * @param netdev network device
934 * @param ls link status structure
936 * Called on receipt of a link status response from the core application to
937 * update each interface's link status.
939 static inline void update_link_status(struct net_device
*netdev
,
940 union oct_link_status
*ls
)
942 struct lio
*lio
= GET_LIO(netdev
);
943 int changed
= (lio
->linfo
.link
.u64
!= ls
->u64
);
945 lio
->linfo
.link
.u64
= ls
->u64
;
947 if ((lio
->intf_open
) && (changed
)) {
948 print_link_info(netdev
);
951 if (lio
->linfo
.link
.s
.link_up
) {
952 netif_carrier_on(netdev
);
953 /* start_txq(netdev); */
956 netif_carrier_off(netdev
);
963 * \brief Droq packet processor sceduler
964 * @param oct octeon device
967 void liquidio_schedule_droq_pkt_handlers(struct octeon_device
*oct
)
969 struct octeon_device_priv
*oct_priv
=
970 (struct octeon_device_priv
*)oct
->priv
;
972 struct octeon_droq
*droq
;
974 if (oct
->int_status
& OCT_DEV_INTR_PKT_DATA
) {
975 for (oq_no
= 0; oq_no
< MAX_OCTEON_OUTPUT_QUEUES(oct
);
977 if (!(oct
->droq_intr
& (1ULL << oq_no
)))
980 droq
= oct
->droq
[oq_no
];
982 if (droq
->ops
.poll_mode
) {
983 droq
->ops
.napi_fn(droq
);
984 oct_priv
->napi_mask
|= (1 << oq_no
);
986 tasklet_schedule(&oct_priv
->droq_tasklet
);
993 * \brief Interrupt handler for octeon
995 * @param dev octeon device
998 irqreturn_t
liquidio_intr_handler(int irq
__attribute__((unused
)), void *dev
)
1000 struct octeon_device
*oct
= (struct octeon_device
*)dev
;
1003 /* Disable our interrupts for the duration of ISR */
1004 oct
->fn_list
.disable_interrupt(oct
->chip
);
1006 ret
= oct
->fn_list
.process_interrupt_regs(oct
);
1008 if (ret
== IRQ_HANDLED
)
1009 liquidio_schedule_droq_pkt_handlers(oct
);
1011 /* Re-enable our interrupts */
1012 if (!(atomic_read(&oct
->status
) == OCT_DEV_IN_RESET
))
1013 oct
->fn_list
.enable_interrupt(oct
->chip
);
1019 * \brief Setup interrupt for octeon device
1020 * @param oct octeon device
1022 * Enable interrupt in Octeon device as given in the PCI interrupt mask.
1024 static int octeon_setup_interrupt(struct octeon_device
*oct
)
1028 err
= pci_enable_msi(oct
->pci_dev
);
1030 dev_warn(&oct
->pci_dev
->dev
, "Reverting to legacy interrupts. Error: %d\n",
1033 oct
->flags
|= LIO_FLAG_MSI_ENABLED
;
1035 irqret
= request_irq(oct
->pci_dev
->irq
, liquidio_intr_handler
,
1036 IRQF_SHARED
, "octeon", oct
);
1038 if (oct
->flags
& LIO_FLAG_MSI_ENABLED
)
1039 pci_disable_msi(oct
->pci_dev
);
1040 dev_err(&oct
->pci_dev
->dev
, "Request IRQ failed with code: %d\n",
1049 * \brief PCI probe handler
1050 * @param pdev PCI device structure
1053 static int liquidio_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1055 struct octeon_device
*oct_dev
= NULL
;
1056 struct handshake
*hs
;
1058 oct_dev
= octeon_allocate_device(pdev
->device
,
1059 sizeof(struct octeon_device_priv
));
1061 dev_err(&pdev
->dev
, "Unable to allocate device\n");
1065 dev_info(&pdev
->dev
, "Initializing device %x:%x.\n",
1066 (u32
)pdev
->vendor
, (u32
)pdev
->device
);
1068 /* Assign octeon_device for this device to the private data area. */
1069 pci_set_drvdata(pdev
, oct_dev
);
1071 /* set linux specific device pointer */
1072 oct_dev
->pci_dev
= (void *)pdev
;
1074 hs
= &handshake
[oct_dev
->octeon_id
];
1075 init_completion(&hs
->init
);
1076 init_completion(&hs
->started
);
1079 if (oct_dev
->octeon_id
== 0)
1080 /* first LiquidIO NIC is detected */
1081 complete(&first_stage
);
1083 if (octeon_device_init(oct_dev
)) {
1084 liquidio_remove(pdev
);
1088 dev_dbg(&oct_dev
->pci_dev
->dev
, "Device is ready\n");
1094 *\brief Destroy resources associated with octeon device
1095 * @param pdev PCI device structure
1098 static void octeon_destroy_resources(struct octeon_device
*oct
)
1101 struct octeon_device_priv
*oct_priv
=
1102 (struct octeon_device_priv
*)oct
->priv
;
1104 struct handshake
*hs
;
1106 switch (atomic_read(&oct
->status
)) {
1107 case OCT_DEV_RUNNING
:
1108 case OCT_DEV_CORE_OK
:
1110 /* No more instructions will be forwarded. */
1111 atomic_set(&oct
->status
, OCT_DEV_IN_RESET
);
1113 oct
->app_mode
= CVM_DRV_INVALID_APP
;
1114 dev_dbg(&oct
->pci_dev
->dev
, "Device state is now %s\n",
1115 lio_get_state_string(&oct
->status
));
1117 schedule_timeout_uninterruptible(HZ
/ 10);
1120 case OCT_DEV_HOST_OK
:
1123 case OCT_DEV_CONSOLE_INIT_DONE
:
1124 /* Remove any consoles */
1125 octeon_remove_consoles(oct
);
1128 case OCT_DEV_IO_QUEUES_DONE
:
1129 if (wait_for_pending_requests(oct
))
1130 dev_err(&oct
->pci_dev
->dev
, "There were pending requests\n");
1132 if (lio_wait_for_instr_fetch(oct
))
1133 dev_err(&oct
->pci_dev
->dev
, "IQ had pending instructions\n");
1135 /* Disable the input and output queues now. No more packets will
1136 * arrive from Octeon, but we should wait for all packet
1137 * processing to finish.
1139 oct
->fn_list
.disable_io_queues(oct
);
1141 if (lio_wait_for_oq_pkts(oct
))
1142 dev_err(&oct
->pci_dev
->dev
, "OQ had pending packets\n");
1144 /* Disable interrupts */
1145 oct
->fn_list
.disable_interrupt(oct
->chip
);
1147 /* Release the interrupt line */
1148 free_irq(oct
->pci_dev
->irq
, oct
);
1150 if (oct
->flags
& LIO_FLAG_MSI_ENABLED
)
1151 pci_disable_msi(oct
->pci_dev
);
1153 /* Soft reset the octeon device before exiting */
1154 oct
->fn_list
.soft_reset(oct
);
1156 /* Disable the device, releasing the PCI INT */
1157 pci_disable_device(oct
->pci_dev
);
1160 case OCT_DEV_IN_RESET
:
1161 case OCT_DEV_DROQ_INIT_DONE
:
1162 /*atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);*/
1164 for (i
= 0; i
< MAX_OCTEON_OUTPUT_QUEUES(oct
); i
++) {
1165 if (!(oct
->io_qmask
.oq
& (1ULL << i
)))
1167 octeon_delete_droq(oct
, i
);
1170 /* Force any pending handshakes to complete */
1171 for (i
= 0; i
< MAX_OCTEON_DEVICES
; i
++) {
1175 handshake
[oct
->octeon_id
].init_ok
= 0;
1176 complete(&handshake
[oct
->octeon_id
].init
);
1177 handshake
[oct
->octeon_id
].started_ok
= 0;
1178 complete(&handshake
[oct
->octeon_id
].started
);
1183 case OCT_DEV_RESP_LIST_INIT_DONE
:
1184 octeon_delete_response_list(oct
);
1187 case OCT_DEV_SC_BUFF_POOL_INIT_DONE
:
1188 octeon_free_sc_buffer_pool(oct
);
1191 case OCT_DEV_INSTR_QUEUE_INIT_DONE
:
1192 for (i
= 0; i
< MAX_OCTEON_INSTR_QUEUES(oct
); i
++) {
1193 if (!(oct
->io_qmask
.iq
& (1ULL << i
)))
1195 octeon_delete_instr_queue(oct
, i
);
1199 case OCT_DEV_DISPATCH_INIT_DONE
:
1200 octeon_delete_dispatch_list(oct
);
1201 cancel_delayed_work_sync(&oct
->nic_poll_work
.work
);
1204 case OCT_DEV_PCI_MAP_DONE
:
1205 octeon_unmap_pci_barx(oct
, 0);
1206 octeon_unmap_pci_barx(oct
, 1);
1209 case OCT_DEV_BEGIN_STATE
:
1210 /* Nothing to be done here either */
1212 } /* end switch(oct->status) */
1214 tasklet_kill(&oct_priv
->droq_tasklet
);
1218 * \brief Send Rx control command
1219 * @param lio per-network private data
1220 * @param start_stop whether to start or stop
1222 static void send_rx_ctrl_cmd(struct lio
*lio
, int start_stop
)
1224 struct octnic_ctrl_pkt nctrl
;
1226 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
1228 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_RX_CTL
;
1229 nctrl
.ncmd
.s
.param1
= start_stop
;
1230 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
1231 nctrl
.netpndev
= (u64
)lio
->netdev
;
1233 if (octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
) < 0)
1234 netif_info(lio
, rx_err
, lio
->netdev
, "Failed to send RX Control message\n");
1238 * \brief Destroy NIC device interface
1239 * @param oct octeon device
1240 * @param ifidx which interface to destroy
1242 * Cleanup associated with each interface for an Octeon device when NIC
1243 * module is being unloaded or if initialization fails during load.
1245 static void liquidio_destroy_nic_device(struct octeon_device
*oct
, int ifidx
)
1247 struct net_device
*netdev
= oct
->props
[ifidx
].netdev
;
1251 dev_err(&oct
->pci_dev
->dev
, "%s No netdevice ptr for index %d\n",
1256 lio
= GET_LIO(netdev
);
1258 dev_dbg(&oct
->pci_dev
->dev
, "NIC device cleanup\n");
1260 send_rx_ctrl_cmd(lio
, 0);
1262 if (atomic_read(&lio
->ifstate
) & LIO_IFSTATE_RUNNING
)
1265 if (atomic_read(&lio
->ifstate
) & LIO_IFSTATE_REGISTERED
)
1266 unregister_netdev(netdev
);
1270 free_netdev(netdev
);
1272 oct
->props
[ifidx
].gmxport
= -1;
1274 oct
->props
[ifidx
].netdev
= NULL
;
1278 * \brief Stop complete NIC functionality
1279 * @param oct octeon device
1281 static int liquidio_stop_nic_module(struct octeon_device
*oct
)
1286 dev_dbg(&oct
->pci_dev
->dev
, "Stopping network interfaces\n");
1287 if (!oct
->ifcount
) {
1288 dev_err(&oct
->pci_dev
->dev
, "Init for Octeon was not completed\n");
1292 for (i
= 0; i
< oct
->ifcount
; i
++) {
1293 lio
= GET_LIO(oct
->props
[i
].netdev
);
1294 for (j
= 0; j
< lio
->linfo
.num_rxpciq
; j
++)
1295 octeon_unregister_droq_ops(oct
,
1296 lio
->linfo
.rxpciq
[j
].s
.q_no
);
1299 for (i
= 0; i
< oct
->ifcount
; i
++)
1300 liquidio_destroy_nic_device(oct
, i
);
1302 dev_dbg(&oct
->pci_dev
->dev
, "Network interfaces stopped\n");
1307 * \brief Cleans up resources at unload time
1308 * @param pdev PCI device structure
1310 static void liquidio_remove(struct pci_dev
*pdev
)
1312 struct octeon_device
*oct_dev
= pci_get_drvdata(pdev
);
1314 dev_dbg(&oct_dev
->pci_dev
->dev
, "Stopping device\n");
1316 if (oct_dev
->app_mode
&& (oct_dev
->app_mode
== CVM_DRV_NIC_APP
))
1317 liquidio_stop_nic_module(oct_dev
);
1319 /* Reset the octeon device and cleanup all memory allocated for
1320 * the octeon device by driver.
1322 octeon_destroy_resources(oct_dev
);
1324 dev_info(&oct_dev
->pci_dev
->dev
, "Device removed\n");
1326 /* This octeon device has been removed. Update the global
1327 * data structure to reflect this. Free the device structure.
1329 octeon_free_device_mem(oct_dev
);
1333 * \brief Identify the Octeon device and to map the BAR address space
1334 * @param oct octeon device
1336 static int octeon_chip_specific_setup(struct octeon_device
*oct
)
1341 pci_read_config_dword(oct
->pci_dev
, 0, &dev_id
);
1342 pci_read_config_dword(oct
->pci_dev
, 8, &rev_id
);
1343 oct
->rev_id
= rev_id
& 0xff;
1346 case OCTEON_CN68XX_PCIID
:
1347 oct
->chip_id
= OCTEON_CN68XX
;
1348 ret
= lio_setup_cn68xx_octeon_device(oct
);
1351 case OCTEON_CN66XX_PCIID
:
1352 oct
->chip_id
= OCTEON_CN66XX
;
1353 ret
= lio_setup_cn66xx_octeon_device(oct
);
1356 dev_err(&oct
->pci_dev
->dev
, "Unknown device found (dev_id: %x)\n",
1361 dev_info(&oct
->pci_dev
->dev
, "CN68XX PASS%d.%d %s\n",
1362 OCTEON_MAJOR_REV(oct
),
1363 OCTEON_MINOR_REV(oct
),
1364 octeon_get_conf(oct
)->card_name
);
1370 * \brief PCI initialization for each Octeon device.
1371 * @param oct octeon device
1373 static int octeon_pci_os_setup(struct octeon_device
*oct
)
1375 /* setup PCI stuff first */
1376 if (pci_enable_device(oct
->pci_dev
)) {
1377 dev_err(&oct
->pci_dev
->dev
, "pci_enable_device failed\n");
1381 if (dma_set_mask_and_coherent(&oct
->pci_dev
->dev
, DMA_BIT_MASK(64))) {
1382 dev_err(&oct
->pci_dev
->dev
, "Unexpected DMA device capability\n");
1386 /* Enable PCI DMA Master. */
1387 pci_set_master(oct
->pci_dev
);
1392 static inline int skb_iq(struct lio
*lio
, struct sk_buff
*skb
)
1396 if (netif_is_multiqueue(lio
->netdev
))
1397 q
= skb
->queue_mapping
% lio
->linfo
.num_txpciq
;
1403 * \brief Check Tx queue state for a given network buffer
1404 * @param lio per-network private data
1405 * @param skb network buffer
1407 static inline int check_txq_state(struct lio
*lio
, struct sk_buff
*skb
)
1411 if (netif_is_multiqueue(lio
->netdev
)) {
1412 q
= skb
->queue_mapping
;
1413 iq
= lio
->linfo
.txpciq
[(q
% (lio
->linfo
.num_txpciq
))].s
.q_no
;
1419 if (octnet_iq_is_full(lio
->oct_dev
, iq
))
1422 if (__netif_subqueue_stopped(lio
->netdev
, q
))
1423 wake_q(lio
->netdev
, q
);
1428 * \brief Unmap and free network buffer
1431 static void free_netbuf(void *buf
)
1433 struct sk_buff
*skb
;
1434 struct octnet_buf_free_info
*finfo
;
1437 finfo
= (struct octnet_buf_free_info
*)buf
;
1441 dma_unmap_single(&lio
->oct_dev
->pci_dev
->dev
, finfo
->dptr
, skb
->len
,
1444 check_txq_state(lio
, skb
);
1446 tx_buffer_free(skb
);
1450 * \brief Unmap and free gather buffer
1453 static void free_netsgbuf(void *buf
)
1455 struct octnet_buf_free_info
*finfo
;
1456 struct sk_buff
*skb
;
1458 struct octnic_gather
*g
;
1461 finfo
= (struct octnet_buf_free_info
*)buf
;
1465 frags
= skb_shinfo(skb
)->nr_frags
;
1467 dma_unmap_single(&lio
->oct_dev
->pci_dev
->dev
,
1468 g
->sg
[0].ptr
[0], (skb
->len
- skb
->data_len
),
1473 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
- 1];
1475 pci_unmap_page((lio
->oct_dev
)->pci_dev
,
1476 g
->sg
[(i
>> 2)].ptr
[(i
& 3)],
1477 frag
->size
, DMA_TO_DEVICE
);
1481 dma_sync_single_for_cpu(&lio
->oct_dev
->pci_dev
->dev
,
1482 g
->sg_dma_ptr
, g
->sg_size
, DMA_TO_DEVICE
);
1484 iq
= skb_iq(lio
, skb
);
1485 spin_lock(&lio
->glist_lock
[iq
]);
1486 list_add_tail(&g
->list
, &lio
->glist
[iq
]);
1487 spin_unlock(&lio
->glist_lock
[iq
]);
1489 check_txq_state(lio
, skb
); /* mq support: sub-queue state check */
1491 tx_buffer_free(skb
);
1495 * \brief Unmap and free gather buffer with response
1498 static void free_netsgbuf_with_resp(void *buf
)
1500 struct octeon_soft_command
*sc
;
1501 struct octnet_buf_free_info
*finfo
;
1502 struct sk_buff
*skb
;
1504 struct octnic_gather
*g
;
1507 sc
= (struct octeon_soft_command
*)buf
;
1508 skb
= (struct sk_buff
*)sc
->callback_arg
;
1509 finfo
= (struct octnet_buf_free_info
*)&skb
->cb
;
1513 frags
= skb_shinfo(skb
)->nr_frags
;
1515 dma_unmap_single(&lio
->oct_dev
->pci_dev
->dev
,
1516 g
->sg
[0].ptr
[0], (skb
->len
- skb
->data_len
),
1521 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
- 1];
1523 pci_unmap_page((lio
->oct_dev
)->pci_dev
,
1524 g
->sg
[(i
>> 2)].ptr
[(i
& 3)],
1525 frag
->size
, DMA_TO_DEVICE
);
1529 dma_sync_single_for_cpu(&lio
->oct_dev
->pci_dev
->dev
,
1530 g
->sg_dma_ptr
, g
->sg_size
, DMA_TO_DEVICE
);
1532 iq
= skb_iq(lio
, skb
);
1534 spin_lock(&lio
->glist_lock
[iq
]);
1535 list_add_tail(&g
->list
, &lio
->glist
[iq
]);
1536 spin_unlock(&lio
->glist_lock
[iq
]);
1538 /* Don't free the skb yet */
1540 check_txq_state(lio
, skb
);
1544 * \brief Adjust ptp frequency
1545 * @param ptp PTP clock info
1546 * @param ppb how much to adjust by, in parts-per-billion
1548 static int liquidio_ptp_adjfreq(struct ptp_clock_info
*ptp
, s32 ppb
)
1550 struct lio
*lio
= container_of(ptp
, struct lio
, ptp_info
);
1551 struct octeon_device
*oct
= (struct octeon_device
*)lio
->oct_dev
;
1553 unsigned long flags
;
1554 bool neg_adj
= false;
1561 /* The hardware adds the clock compensation value to the
1562 * PTP clock on every coprocessor clock cycle, so we
1563 * compute the delta in terms of coprocessor clocks.
1565 delta
= (u64
)ppb
<< 32;
1566 do_div(delta
, oct
->coproc_clock_rate
);
1568 spin_lock_irqsave(&lio
->ptp_lock
, flags
);
1569 comp
= lio_pci_readq(oct
, CN6XXX_MIO_PTP_CLOCK_COMP
);
1574 lio_pci_writeq(oct
, comp
, CN6XXX_MIO_PTP_CLOCK_COMP
);
1575 spin_unlock_irqrestore(&lio
->ptp_lock
, flags
);
1581 * \brief Adjust ptp time
1582 * @param ptp PTP clock info
1583 * @param delta how much to adjust by, in nanosecs
1585 static int liquidio_ptp_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
1587 unsigned long flags
;
1588 struct lio
*lio
= container_of(ptp
, struct lio
, ptp_info
);
1590 spin_lock_irqsave(&lio
->ptp_lock
, flags
);
1591 lio
->ptp_adjust
+= delta
;
1592 spin_unlock_irqrestore(&lio
->ptp_lock
, flags
);
1598 * \brief Get hardware clock time, including any adjustment
1599 * @param ptp PTP clock info
1600 * @param ts timespec
1602 static int liquidio_ptp_gettime(struct ptp_clock_info
*ptp
,
1603 struct timespec64
*ts
)
1606 unsigned long flags
;
1607 struct lio
*lio
= container_of(ptp
, struct lio
, ptp_info
);
1608 struct octeon_device
*oct
= (struct octeon_device
*)lio
->oct_dev
;
1610 spin_lock_irqsave(&lio
->ptp_lock
, flags
);
1611 ns
= lio_pci_readq(oct
, CN6XXX_MIO_PTP_CLOCK_HI
);
1612 ns
+= lio
->ptp_adjust
;
1613 spin_unlock_irqrestore(&lio
->ptp_lock
, flags
);
1615 *ts
= ns_to_timespec64(ns
);
1621 * \brief Set hardware clock time. Reset adjustment
1622 * @param ptp PTP clock info
1623 * @param ts timespec
1625 static int liquidio_ptp_settime(struct ptp_clock_info
*ptp
,
1626 const struct timespec64
*ts
)
1629 unsigned long flags
;
1630 struct lio
*lio
= container_of(ptp
, struct lio
, ptp_info
);
1631 struct octeon_device
*oct
= (struct octeon_device
*)lio
->oct_dev
;
1633 ns
= timespec_to_ns(ts
);
1635 spin_lock_irqsave(&lio
->ptp_lock
, flags
);
1636 lio_pci_writeq(oct
, ns
, CN6XXX_MIO_PTP_CLOCK_HI
);
1637 lio
->ptp_adjust
= 0;
1638 spin_unlock_irqrestore(&lio
->ptp_lock
, flags
);
1644 * \brief Check if PTP is enabled
1645 * @param ptp PTP clock info
1647 * @param on is it on
1649 static int liquidio_ptp_enable(struct ptp_clock_info
*ptp
,
1650 struct ptp_clock_request
*rq
, int on
)
1656 * \brief Open PTP clock source
1657 * @param netdev network device
1659 static void oct_ptp_open(struct net_device
*netdev
)
1661 struct lio
*lio
= GET_LIO(netdev
);
1662 struct octeon_device
*oct
= (struct octeon_device
*)lio
->oct_dev
;
1664 spin_lock_init(&lio
->ptp_lock
);
1666 snprintf(lio
->ptp_info
.name
, 16, "%s", netdev
->name
);
1667 lio
->ptp_info
.owner
= THIS_MODULE
;
1668 lio
->ptp_info
.max_adj
= 250000000;
1669 lio
->ptp_info
.n_alarm
= 0;
1670 lio
->ptp_info
.n_ext_ts
= 0;
1671 lio
->ptp_info
.n_per_out
= 0;
1672 lio
->ptp_info
.pps
= 0;
1673 lio
->ptp_info
.adjfreq
= liquidio_ptp_adjfreq
;
1674 lio
->ptp_info
.adjtime
= liquidio_ptp_adjtime
;
1675 lio
->ptp_info
.gettime64
= liquidio_ptp_gettime
;
1676 lio
->ptp_info
.settime64
= liquidio_ptp_settime
;
1677 lio
->ptp_info
.enable
= liquidio_ptp_enable
;
1679 lio
->ptp_adjust
= 0;
1681 lio
->ptp_clock
= ptp_clock_register(&lio
->ptp_info
,
1682 &oct
->pci_dev
->dev
);
1684 if (IS_ERR(lio
->ptp_clock
))
1685 lio
->ptp_clock
= NULL
;
1689 * \brief Init PTP clock
1690 * @param oct octeon device
1692 static void liquidio_ptp_init(struct octeon_device
*oct
)
1694 u64 clock_comp
, cfg
;
1696 clock_comp
= (u64
)NSEC_PER_SEC
<< 32;
1697 do_div(clock_comp
, oct
->coproc_clock_rate
);
1698 lio_pci_writeq(oct
, clock_comp
, CN6XXX_MIO_PTP_CLOCK_COMP
);
1701 cfg
= lio_pci_readq(oct
, CN6XXX_MIO_PTP_CLOCK_CFG
);
1702 lio_pci_writeq(oct
, cfg
| 0x01, CN6XXX_MIO_PTP_CLOCK_CFG
);
1706 * \brief Load firmware to device
1707 * @param oct octeon device
1709 * Maps device to firmware filename, requests firmware, and downloads it
1711 static int load_firmware(struct octeon_device
*oct
)
1714 const struct firmware
*fw
;
1715 char fw_name
[LIO_MAX_FW_FILENAME_LEN
];
1718 if (strncmp(fw_type
, LIO_FW_NAME_TYPE_NONE
,
1719 sizeof(LIO_FW_NAME_TYPE_NONE
)) == 0) {
1720 dev_info(&oct
->pci_dev
->dev
, "Skipping firmware load\n");
1724 if (fw_type
[0] == '\0')
1725 tmp_fw_type
= LIO_FW_NAME_TYPE_NIC
;
1727 tmp_fw_type
= fw_type
;
1729 sprintf(fw_name
, "%s%s%s_%s%s", LIO_FW_DIR
, LIO_FW_BASE_NAME
,
1730 octeon_get_conf(oct
)->card_name
, tmp_fw_type
,
1731 LIO_FW_NAME_SUFFIX
);
1733 ret
= request_firmware(&fw
, fw_name
, &oct
->pci_dev
->dev
);
1735 dev_err(&oct
->pci_dev
->dev
, "Request firmware failed. Could not find file %s.\n.",
1740 ret
= octeon_download_firmware(oct
, fw
->data
, fw
->size
);
1742 release_firmware(fw
);
1748 * \brief Setup output queue
1749 * @param oct octeon device
1750 * @param q_no which queue
1751 * @param num_descs how many descriptors
1752 * @param desc_size size of each descriptor
1753 * @param app_ctx application context
1755 static int octeon_setup_droq(struct octeon_device
*oct
, int q_no
, int num_descs
,
1756 int desc_size
, void *app_ctx
)
1760 dev_dbg(&oct
->pci_dev
->dev
, "Creating Droq: %d\n", q_no
);
1761 /* droq creation and local register settings. */
1762 ret_val
= octeon_create_droq(oct
, q_no
, num_descs
, desc_size
, app_ctx
);
1767 dev_dbg(&oct
->pci_dev
->dev
, "Using default droq %d\n", q_no
);
1770 /* tasklet creation for the droq */
1772 /* Enable the droq queues */
1773 octeon_set_droq_pkt_op(oct
, q_no
, 1);
1775 /* Send Credit for Octeon Output queues. Credits are always
1776 * sent after the output queue is enabled.
1778 writel(oct
->droq
[q_no
]->max_count
,
1779 oct
->droq
[q_no
]->pkts_credit_reg
);
1785 * \brief Callback for getting interface configuration
1786 * @param status status of request
1787 * @param buf pointer to resp structure
1789 static void if_cfg_callback(struct octeon_device
*oct
,
1793 struct octeon_soft_command
*sc
= (struct octeon_soft_command
*)buf
;
1794 struct liquidio_if_cfg_resp
*resp
;
1795 struct liquidio_if_cfg_context
*ctx
;
1797 resp
= (struct liquidio_if_cfg_resp
*)sc
->virtrptr
;
1798 ctx
= (struct liquidio_if_cfg_context
*)sc
->ctxptr
;
1800 oct
= lio_get_device(ctx
->octeon_id
);
1802 dev_err(&oct
->pci_dev
->dev
, "nic if cfg instruction failed. Status: %llx\n",
1803 CVM_CAST64(resp
->status
));
1804 ACCESS_ONCE(ctx
->cond
) = 1;
1806 /* This barrier is required to be sure that the response has been
1807 * written fully before waking up the handler
1811 wake_up_interruptible(&ctx
->wc
);
1815 * \brief Select queue based on hash
1816 * @param dev Net device
1817 * @param skb sk_buff structure
1818 * @returns selected queue number
1820 static u16
select_q(struct net_device
*dev
, struct sk_buff
*skb
,
1821 void *accel_priv
, select_queue_fallback_t fallback
)
1827 qindex
= skb_tx_hash(dev
, skb
);
1829 return (u16
)(qindex
% (lio
->linfo
.num_txpciq
));
1832 /** Routine to push packets arriving on Octeon interface upto network layer.
1833 * @param oct_id - octeon device id.
1834 * @param skbuff - skbuff struct to be passed to network layer.
1835 * @param len - size of total data received.
1836 * @param rh - Control header associated with the packet
1837 * @param param - additional control data with the packet
1838 * @param arg - farg registered in droq_ops
1841 liquidio_push_packet(u32 octeon_id
,
1844 union octeon_rh
*rh
,
1848 struct napi_struct
*napi
= param
;
1849 struct sk_buff
*skb
= (struct sk_buff
*)skbuff
;
1850 struct skb_shared_hwtstamps
*shhwtstamps
;
1852 struct net_device
*netdev
= (struct net_device
*)arg
;
1853 struct octeon_droq
*droq
= container_of(param
, struct octeon_droq
,
1856 int packet_was_received
;
1857 struct lio
*lio
= GET_LIO(netdev
);
1858 struct octeon_device
*oct
= lio
->oct_dev
;
1860 /* Do not proceed if the interface is not in RUNNING state. */
1861 if (!ifstate_check(lio
, LIO_IFSTATE_RUNNING
)) {
1862 recv_buffer_free(skb
);
1863 droq
->stats
.rx_dropped
++;
1869 skb_record_rx_queue(skb
, droq
->q_no
);
1870 if (likely(len
> MIN_SKB_SIZE
)) {
1871 struct octeon_skb_page_info
*pg_info
;
1874 pg_info
= ((struct octeon_skb_page_info
*)(skb
->cb
));
1875 if (pg_info
->page
) {
1876 /* For Paged allocation use the frags */
1877 va
= page_address(pg_info
->page
) +
1878 pg_info
->page_offset
;
1879 memcpy(skb
->data
, va
, MIN_SKB_SIZE
);
1880 skb_put(skb
, MIN_SKB_SIZE
);
1881 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
,
1883 pg_info
->page_offset
+
1889 struct octeon_skb_page_info
*pg_info
=
1890 ((struct octeon_skb_page_info
*)(skb
->cb
));
1891 skb_copy_to_linear_data(skb
, page_address(pg_info
->page
)
1892 + pg_info
->page_offset
, len
);
1894 put_page(pg_info
->page
);
1897 if (((oct
->chip_id
== OCTEON_CN66XX
) ||
1898 (oct
->chip_id
== OCTEON_CN68XX
)) &&
1900 if (rh
->r_dh
.has_hwtstamp
) {
1901 /* timestamp is included from the hardware at
1902 * the beginning of the packet.
1905 (lio
, LIO_IFSTATE_RX_TIMESTAMP_ENABLED
)) {
1906 /* Nanoseconds are in the first 64-bits
1909 memcpy(&ns
, (skb
->data
), sizeof(ns
));
1910 shhwtstamps
= skb_hwtstamps(skb
);
1911 shhwtstamps
->hwtstamp
=
1915 skb_pull(skb
, sizeof(ns
));
1919 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
1921 if ((netdev
->features
& NETIF_F_RXCSUM
) &&
1922 (rh
->r_dh
.csum_verified
== CNNIC_CSUM_VERIFIED
))
1923 /* checksum has already been verified */
1924 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1926 skb
->ip_summed
= CHECKSUM_NONE
;
1928 packet_was_received
= napi_gro_receive(napi
, skb
) != GRO_DROP
;
1930 if (packet_was_received
) {
1931 droq
->stats
.rx_bytes_received
+= len
;
1932 droq
->stats
.rx_pkts_received
++;
1933 netdev
->last_rx
= jiffies
;
1935 droq
->stats
.rx_dropped
++;
1936 netif_info(lio
, rx_err
, lio
->netdev
,
1937 "droq:%d error rx_dropped:%llu\n",
1938 droq
->q_no
, droq
->stats
.rx_dropped
);
1942 recv_buffer_free(skb
);
1947 * \brief wrapper for calling napi_schedule
1948 * @param param parameters to pass to napi_schedule
1950 * Used when scheduling on different CPUs
1952 static void napi_schedule_wrapper(void *param
)
1954 struct napi_struct
*napi
= param
;
1956 napi_schedule(napi
);
1960 * \brief callback when receive interrupt occurs and we are in NAPI mode
1961 * @param arg pointer to octeon output queue
1963 static void liquidio_napi_drv_callback(void *arg
)
1965 struct octeon_droq
*droq
= arg
;
1966 int this_cpu
= smp_processor_id();
1968 if (droq
->cpu_id
== this_cpu
) {
1969 napi_schedule(&droq
->napi
);
1971 struct call_single_data
*csd
= &droq
->csd
;
1973 csd
->func
= napi_schedule_wrapper
;
1974 csd
->info
= &droq
->napi
;
1977 smp_call_function_single_async(droq
->cpu_id
, csd
);
1982 * \brief Main NAPI poll function
1983 * @param droq octeon output queue
1984 * @param budget maximum number of items to process
1986 static int liquidio_napi_do_rx(struct octeon_droq
*droq
, int budget
)
1989 struct lio
*lio
= GET_LIO(droq
->napi
.dev
);
1990 struct octeon_device
*oct
= lio
->oct_dev
;
1992 work_done
= octeon_process_droq_poll_cmd(oct
, droq
->q_no
,
1993 POLL_EVENT_PROCESS_PKTS
,
1995 if (work_done
< 0) {
1996 netif_info(lio
, rx_err
, lio
->netdev
,
1997 "Receive work_done < 0, rxq:%d\n", droq
->q_no
);
1998 goto octnet_napi_finish
;
2001 if (work_done
> budget
)
2002 dev_err(&oct
->pci_dev
->dev
, ">>>> %s work_done: %d budget: %d\n",
2003 __func__
, work_done
, budget
);
2008 napi_complete(&droq
->napi
);
2009 octeon_process_droq_poll_cmd(oct
, droq
->q_no
, POLL_EVENT_ENABLE_INTR
,
2015 * \brief Entry point for NAPI polling
2016 * @param napi NAPI structure
2017 * @param budget maximum number of items to process
2019 static int liquidio_napi_poll(struct napi_struct
*napi
, int budget
)
2021 struct octeon_droq
*droq
;
2024 droq
= container_of(napi
, struct octeon_droq
, napi
);
2026 work_done
= liquidio_napi_do_rx(droq
, budget
);
2028 if (work_done
< budget
) {
2029 napi_complete(napi
);
2030 octeon_process_droq_poll_cmd(droq
->oct_dev
, droq
->q_no
,
2031 POLL_EVENT_ENABLE_INTR
, 0);
2039 * \brief Setup input and output queues
2040 * @param octeon_dev octeon device
2041 * @param net_device Net device
2043 * Note: Queues are with respect to the octeon device. Thus
2044 * an input queue is for egress packets, and output queues
2045 * are for ingress packets.
2047 static inline int setup_io_queues(struct octeon_device
*octeon_dev
,
2050 struct octeon_droq_ops droq_ops
;
2051 struct net_device
*netdev
;
2053 static int cpu_id_modulus
;
2054 struct octeon_droq
*droq
;
2055 struct napi_struct
*napi
;
2056 int q
, q_no
, retval
= 0;
2060 netdev
= octeon_dev
->props
[ifidx
].netdev
;
2062 lio
= GET_LIO(netdev
);
2064 memset(&droq_ops
, 0, sizeof(struct octeon_droq_ops
));
2066 droq_ops
.fptr
= liquidio_push_packet
;
2067 droq_ops
.farg
= (void *)netdev
;
2069 droq_ops
.poll_mode
= 1;
2070 droq_ops
.napi_fn
= liquidio_napi_drv_callback
;
2072 cpu_id_modulus
= num_present_cpus();
2075 for (q
= 0; q
< lio
->linfo
.num_rxpciq
; q
++) {
2076 q_no
= lio
->linfo
.rxpciq
[q
].s
.q_no
;
2077 dev_dbg(&octeon_dev
->pci_dev
->dev
,
2078 "setup_io_queues index:%d linfo.rxpciq.s.q_no:%d\n",
2080 retval
= octeon_setup_droq(octeon_dev
, q_no
,
2081 CFG_GET_NUM_RX_DESCS_NIC_IF
2082 (octeon_get_conf(octeon_dev
),
2084 CFG_GET_NUM_RX_BUF_SIZE_NIC_IF
2085 (octeon_get_conf(octeon_dev
),
2088 dev_err(&octeon_dev
->pci_dev
->dev
,
2089 " %s : Runtime DROQ(RxQ) creation failed.\n",
2094 droq
= octeon_dev
->droq
[q_no
];
2096 dev_dbg(&octeon_dev
->pci_dev
->dev
,
2097 "netif_napi_add netdev:%llx oct:%llx\n",
2100 netif_napi_add(netdev
, napi
, liquidio_napi_poll
, 64);
2102 /* designate a CPU for this droq */
2103 droq
->cpu_id
= cpu_id
;
2105 if (cpu_id
>= cpu_id_modulus
)
2108 octeon_register_droq_ops(octeon_dev
, q_no
, &droq_ops
);
2112 for (q
= 0; q
< lio
->linfo
.num_txpciq
; q
++) {
2113 num_tx_descs
= CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf
2116 retval
= octeon_setup_iq(octeon_dev
, ifidx
, q
,
2117 lio
->linfo
.txpciq
[q
], num_tx_descs
,
2118 netdev_get_tx_queue(netdev
, q
));
2120 dev_err(&octeon_dev
->pci_dev
->dev
,
2121 " %s : Runtime IQ(TxQ) creation failed.\n",
2131 * \brief Poll routine for checking transmit queue status
2132 * @param work work_struct data structure
2134 static void octnet_poll_check_txq_status(struct work_struct
*work
)
2136 struct cavium_wk
*wk
= (struct cavium_wk
*)work
;
2137 struct lio
*lio
= (struct lio
*)wk
->ctxptr
;
2139 if (!ifstate_check(lio
, LIO_IFSTATE_RUNNING
))
2142 check_txq_status(lio
);
2143 queue_delayed_work(lio
->txq_status_wq
.wq
,
2144 &lio
->txq_status_wq
.wk
.work
, msecs_to_jiffies(1));
2148 * \brief Sets up the txq poll check
2149 * @param netdev network device
2151 static inline void setup_tx_poll_fn(struct net_device
*netdev
)
2153 struct lio
*lio
= GET_LIO(netdev
);
2154 struct octeon_device
*oct
= lio
->oct_dev
;
2156 lio
->txq_status_wq
.wq
= alloc_workqueue("txq-status",
2158 if (!lio
->txq_status_wq
.wq
) {
2159 dev_err(&oct
->pci_dev
->dev
, "unable to create cavium txq status wq\n");
2162 INIT_DELAYED_WORK(&lio
->txq_status_wq
.wk
.work
,
2163 octnet_poll_check_txq_status
);
2164 lio
->txq_status_wq
.wk
.ctxptr
= lio
;
2165 queue_delayed_work(lio
->txq_status_wq
.wq
,
2166 &lio
->txq_status_wq
.wk
.work
, msecs_to_jiffies(1));
2170 * \brief Net device open for LiquidIO
2171 * @param netdev network device
2173 static int liquidio_open(struct net_device
*netdev
)
2175 struct lio
*lio
= GET_LIO(netdev
);
2176 struct octeon_device
*oct
= lio
->oct_dev
;
2177 struct napi_struct
*napi
, *n
;
2179 list_for_each_entry_safe(napi
, n
, &netdev
->napi_list
, dev_list
)
2182 oct_ptp_open(netdev
);
2184 ifstate_set(lio
, LIO_IFSTATE_RUNNING
);
2185 setup_tx_poll_fn(netdev
);
2188 netif_info(lio
, ifup
, lio
->netdev
, "Interface Open, ready for traffic\n");
2189 try_module_get(THIS_MODULE
);
2191 /* tell Octeon to start forwarding packets to host */
2192 send_rx_ctrl_cmd(lio
, 1);
2194 /* Ready for link status updates */
2197 dev_info(&oct
->pci_dev
->dev
, "%s interface is opened\n",
2204 * \brief Net device stop for LiquidIO
2205 * @param netdev network device
2207 static int liquidio_stop(struct net_device
*netdev
)
2209 struct napi_struct
*napi
, *n
;
2210 struct lio
*lio
= GET_LIO(netdev
);
2211 struct octeon_device
*oct
= lio
->oct_dev
;
2213 netif_info(lio
, ifdown
, lio
->netdev
, "Stopping interface!\n");
2214 /* Inform that netif carrier is down */
2216 lio
->linfo
.link
.s
.link_up
= 0;
2217 lio
->link_changes
++;
2219 netif_carrier_off(netdev
);
2221 /* tell Octeon to stop forwarding packets to host */
2222 send_rx_ctrl_cmd(lio
, 0);
2224 cancel_delayed_work_sync(&lio
->txq_status_wq
.wk
.work
);
2225 destroy_workqueue(lio
->txq_status_wq
.wq
);
2227 if (lio
->ptp_clock
) {
2228 ptp_clock_unregister(lio
->ptp_clock
);
2229 lio
->ptp_clock
= NULL
;
2232 ifstate_reset(lio
, LIO_IFSTATE_RUNNING
);
2234 /* This is a hack that allows DHCP to continue working. */
2235 set_bit(__LINK_STATE_START
, &lio
->netdev
->state
);
2237 list_for_each_entry_safe(napi
, n
, &netdev
->napi_list
, dev_list
)
2242 dev_info(&oct
->pci_dev
->dev
, "%s interface is stopped\n", netdev
->name
);
2243 module_put(THIS_MODULE
);
2248 void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr
)
2250 struct octnic_ctrl_pkt
*nctrl
= (struct octnic_ctrl_pkt
*)nctrl_ptr
;
2251 struct net_device
*netdev
= (struct net_device
*)nctrl
->netpndev
;
2252 struct lio
*lio
= GET_LIO(netdev
);
2253 struct octeon_device
*oct
= lio
->oct_dev
;
2255 switch (nctrl
->ncmd
.s
.cmd
) {
2256 case OCTNET_CMD_CHANGE_DEVFLAGS
:
2257 case OCTNET_CMD_SET_MULTI_LIST
:
2260 case OCTNET_CMD_CHANGE_MACADDR
:
2261 /* If command is successful, change the MACADDR. */
2262 netif_info(lio
, probe
, lio
->netdev
, " MACAddr changed to 0x%llx\n",
2263 CVM_CAST64(nctrl
->udd
[0]));
2264 dev_info(&oct
->pci_dev
->dev
, "%s MACAddr changed to 0x%llx\n",
2265 netdev
->name
, CVM_CAST64(nctrl
->udd
[0]));
2266 memcpy(netdev
->dev_addr
, ((u8
*)&nctrl
->udd
[0]) + 2, ETH_ALEN
);
2269 case OCTNET_CMD_CHANGE_MTU
:
2270 /* If command is successful, change the MTU. */
2271 netif_info(lio
, probe
, lio
->netdev
, " MTU Changed from %d to %d\n",
2272 netdev
->mtu
, nctrl
->ncmd
.s
.param2
);
2273 dev_info(&oct
->pci_dev
->dev
, "%s MTU Changed from %d to %d\n",
2274 netdev
->name
, netdev
->mtu
,
2275 nctrl
->ncmd
.s
.param2
);
2276 netdev
->mtu
= nctrl
->ncmd
.s
.param2
;
2279 case OCTNET_CMD_GPIO_ACCESS
:
2280 netif_info(lio
, probe
, lio
->netdev
, "LED Flashing visual identification\n");
2284 case OCTNET_CMD_LRO_ENABLE
:
2285 dev_info(&oct
->pci_dev
->dev
, "%s LRO Enabled\n", netdev
->name
);
2288 case OCTNET_CMD_LRO_DISABLE
:
2289 dev_info(&oct
->pci_dev
->dev
, "%s LRO Disabled\n",
2293 case OCTNET_CMD_VERBOSE_ENABLE
:
2294 dev_info(&oct
->pci_dev
->dev
, "%s LRO Enabled\n", netdev
->name
);
2297 case OCTNET_CMD_VERBOSE_DISABLE
:
2298 dev_info(&oct
->pci_dev
->dev
, "%s LRO Disabled\n",
2302 case OCTNET_CMD_SET_SETTINGS
:
2303 dev_info(&oct
->pci_dev
->dev
, "%s settings changed\n",
2309 dev_err(&oct
->pci_dev
->dev
, "%s Unknown cmd %d\n", __func__
,
2315 * \brief Converts a mask based on net device flags
2316 * @param netdev network device
2318 * This routine generates a octnet_ifflags mask from the net device flags
2319 * received from the OS.
2321 static inline enum octnet_ifflags
get_new_flags(struct net_device
*netdev
)
2323 enum octnet_ifflags f
= OCTNET_IFFLAG_UNICAST
;
2325 if (netdev
->flags
& IFF_PROMISC
)
2326 f
|= OCTNET_IFFLAG_PROMISC
;
2328 if (netdev
->flags
& IFF_ALLMULTI
)
2329 f
|= OCTNET_IFFLAG_ALLMULTI
;
2331 if (netdev
->flags
& IFF_MULTICAST
) {
2332 f
|= OCTNET_IFFLAG_MULTICAST
;
2334 /* Accept all multicast addresses if there are more than we
2337 if (netdev_mc_count(netdev
) > MAX_OCTEON_MULTICAST_ADDR
)
2338 f
|= OCTNET_IFFLAG_ALLMULTI
;
2341 if (netdev
->flags
& IFF_BROADCAST
)
2342 f
|= OCTNET_IFFLAG_BROADCAST
;
2348 * \brief Net device set_multicast_list
2349 * @param netdev network device
2351 static void liquidio_set_mcast_list(struct net_device
*netdev
)
2353 struct lio
*lio
= GET_LIO(netdev
);
2354 struct octeon_device
*oct
= lio
->oct_dev
;
2355 struct octnic_ctrl_pkt nctrl
;
2356 struct netdev_hw_addr
*ha
;
2359 int mc_count
= min(netdev_mc_count(netdev
), MAX_OCTEON_MULTICAST_ADDR
);
2361 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
2363 /* Create a ctrl pkt command to be sent to core app. */
2365 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_SET_MULTI_LIST
;
2366 nctrl
.ncmd
.s
.param1
= get_new_flags(netdev
);
2367 nctrl
.ncmd
.s
.param2
= mc_count
;
2368 nctrl
.ncmd
.s
.more
= mc_count
;
2369 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
2370 nctrl
.netpndev
= (u64
)netdev
;
2371 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
2373 /* copy all the addresses into the udd */
2376 netdev_for_each_mc_addr(ha
, netdev
) {
2378 memcpy(((u8
*)mc
) + 2, ha
->addr
, ETH_ALEN
);
2379 /* no need to swap bytes */
2381 if (++mc
> &nctrl
.udd
[mc_count
])
2385 /* Apparently, any activity in this call from the kernel has to
2386 * be atomic. So we won't wait for response.
2388 nctrl
.wait_time
= 0;
2390 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
2392 dev_err(&oct
->pci_dev
->dev
, "DEVFLAGS change failed in core (ret: 0x%x)\n",
2398 * \brief Net device set_mac_address
2399 * @param netdev network device
2401 static int liquidio_set_mac(struct net_device
*netdev
, void *p
)
2404 struct lio
*lio
= GET_LIO(netdev
);
2405 struct octeon_device
*oct
= lio
->oct_dev
;
2406 struct sockaddr
*addr
= (struct sockaddr
*)p
;
2407 struct octnic_ctrl_pkt nctrl
;
2409 if (!is_valid_ether_addr(addr
->sa_data
))
2410 return -EADDRNOTAVAIL
;
2412 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
2415 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_CHANGE_MACADDR
;
2416 nctrl
.ncmd
.s
.param1
= 0;
2417 nctrl
.ncmd
.s
.more
= 1;
2418 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
2419 nctrl
.netpndev
= (u64
)netdev
;
2420 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
2421 nctrl
.wait_time
= 100;
2424 /* The MAC Address is presented in network byte order. */
2425 memcpy((u8
*)&nctrl
.udd
[0] + 2, addr
->sa_data
, ETH_ALEN
);
2427 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
2429 dev_err(&oct
->pci_dev
->dev
, "MAC Address change failed\n");
2432 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
2433 memcpy(((u8
*)&lio
->linfo
.hw_addr
) + 2, addr
->sa_data
, ETH_ALEN
);
2439 * \brief Net device get_stats
2440 * @param netdev network device
2442 static struct net_device_stats
*liquidio_get_stats(struct net_device
*netdev
)
2444 struct lio
*lio
= GET_LIO(netdev
);
2445 struct net_device_stats
*stats
= &netdev
->stats
;
2446 struct octeon_device
*oct
;
2447 u64 pkts
= 0, drop
= 0, bytes
= 0;
2448 struct oct_droq_stats
*oq_stats
;
2449 struct oct_iq_stats
*iq_stats
;
2450 int i
, iq_no
, oq_no
;
2454 for (i
= 0; i
< lio
->linfo
.num_txpciq
; i
++) {
2455 iq_no
= lio
->linfo
.txpciq
[i
].s
.q_no
;
2456 iq_stats
= &oct
->instr_queue
[iq_no
]->stats
;
2457 pkts
+= iq_stats
->tx_done
;
2458 drop
+= iq_stats
->tx_dropped
;
2459 bytes
+= iq_stats
->tx_tot_bytes
;
2462 stats
->tx_packets
= pkts
;
2463 stats
->tx_bytes
= bytes
;
2464 stats
->tx_dropped
= drop
;
2470 for (i
= 0; i
< lio
->linfo
.num_rxpciq
; i
++) {
2471 oq_no
= lio
->linfo
.rxpciq
[i
].s
.q_no
;
2472 oq_stats
= &oct
->droq
[oq_no
]->stats
;
2473 pkts
+= oq_stats
->rx_pkts_received
;
2474 drop
+= (oq_stats
->rx_dropped
+
2475 oq_stats
->dropped_nodispatch
+
2476 oq_stats
->dropped_toomany
+
2477 oq_stats
->dropped_nomem
);
2478 bytes
+= oq_stats
->rx_bytes_received
;
2481 stats
->rx_bytes
= bytes
;
2482 stats
->rx_packets
= pkts
;
2483 stats
->rx_dropped
= drop
;
2489 * \brief Net device change_mtu
2490 * @param netdev network device
2492 static int liquidio_change_mtu(struct net_device
*netdev
, int new_mtu
)
2494 struct lio
*lio
= GET_LIO(netdev
);
2495 struct octeon_device
*oct
= lio
->oct_dev
;
2496 struct octnic_ctrl_pkt nctrl
;
2497 int max_frm_size
= new_mtu
+ OCTNET_FRM_HEADER_SIZE
;
2500 /* Limit the MTU to make sure the ethernet packets are between 64 bytes
2503 if ((max_frm_size
< OCTNET_MIN_FRM_SIZE
) ||
2504 (max_frm_size
> OCTNET_MAX_FRM_SIZE
)) {
2505 dev_err(&oct
->pci_dev
->dev
, "Invalid MTU: %d\n", new_mtu
);
2506 dev_err(&oct
->pci_dev
->dev
, "Valid range %d and %d\n",
2507 (OCTNET_MIN_FRM_SIZE
- OCTNET_FRM_HEADER_SIZE
),
2508 (OCTNET_MAX_FRM_SIZE
- OCTNET_FRM_HEADER_SIZE
));
2512 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
2515 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_CHANGE_MTU
;
2516 nctrl
.ncmd
.s
.param1
= new_mtu
;
2517 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
2518 nctrl
.wait_time
= 100;
2519 nctrl
.netpndev
= (u64
)netdev
;
2520 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
2522 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
2524 dev_err(&oct
->pci_dev
->dev
, "Failed to set MTU\n");
2534 * \brief Handler for SIOCSHWTSTAMP ioctl
2535 * @param netdev network device
2536 * @param ifr interface request
2537 * @param cmd command
2539 static int hwtstamp_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
2541 struct hwtstamp_config conf
;
2542 struct lio
*lio
= GET_LIO(netdev
);
2544 if (copy_from_user(&conf
, ifr
->ifr_data
, sizeof(conf
)))
2550 switch (conf
.tx_type
) {
2551 case HWTSTAMP_TX_ON
:
2552 case HWTSTAMP_TX_OFF
:
2558 switch (conf
.rx_filter
) {
2559 case HWTSTAMP_FILTER_NONE
:
2561 case HWTSTAMP_FILTER_ALL
:
2562 case HWTSTAMP_FILTER_SOME
:
2563 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
2564 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
2565 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
2566 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
2567 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
2568 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
2569 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
2570 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
2571 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
2572 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
2573 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
2574 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
2575 conf
.rx_filter
= HWTSTAMP_FILTER_ALL
;
2581 if (conf
.rx_filter
== HWTSTAMP_FILTER_ALL
)
2582 ifstate_set(lio
, LIO_IFSTATE_RX_TIMESTAMP_ENABLED
);
2585 ifstate_reset(lio
, LIO_IFSTATE_RX_TIMESTAMP_ENABLED
);
2587 return copy_to_user(ifr
->ifr_data
, &conf
, sizeof(conf
)) ? -EFAULT
: 0;
2591 * \brief ioctl handler
2592 * @param netdev network device
2593 * @param ifr interface request
2594 * @param cmd command
2596 static int liquidio_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
2600 return hwtstamp_ioctl(netdev
, ifr
, cmd
);
2607 * \brief handle a Tx timestamp response
2608 * @param status response status
2609 * @param buf pointer to skb
2611 static void handle_timestamp(struct octeon_device
*oct
,
2615 struct octnet_buf_free_info
*finfo
;
2616 struct octeon_soft_command
*sc
;
2617 struct oct_timestamp_resp
*resp
;
2619 struct sk_buff
*skb
= (struct sk_buff
*)buf
;
2621 finfo
= (struct octnet_buf_free_info
*)skb
->cb
;
2625 resp
= (struct oct_timestamp_resp
*)sc
->virtrptr
;
2627 if (status
!= OCTEON_REQUEST_DONE
) {
2628 dev_err(&oct
->pci_dev
->dev
, "Tx timestamp instruction failed. Status: %llx\n",
2629 CVM_CAST64(status
));
2630 resp
->timestamp
= 0;
2633 octeon_swap_8B_data(&resp
->timestamp
, 1);
2635 if (unlikely((skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
) != 0)) {
2636 struct skb_shared_hwtstamps ts
;
2637 u64 ns
= resp
->timestamp
;
2639 netif_info(lio
, tx_done
, lio
->netdev
,
2640 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
2641 skb
, (unsigned long long)ns
);
2642 ts
.hwtstamp
= ns_to_ktime(ns
+ lio
->ptp_adjust
);
2643 skb_tstamp_tx(skb
, &ts
);
2646 octeon_free_soft_command(oct
, sc
);
2647 tx_buffer_free(skb
);
2650 /* \brief Send a data packet that will be timestamped
2651 * @param oct octeon device
2652 * @param ndata pointer to network data
2653 * @param finfo pointer to private network data
2655 static inline int send_nic_timestamp_pkt(struct octeon_device
*oct
,
2656 struct octnic_data_pkt
*ndata
,
2657 struct octnet_buf_free_info
*finfo
,
2661 struct octeon_soft_command
*sc
;
2668 sc
= octeon_alloc_soft_command_resp(oct
, &ndata
->cmd
,
2669 sizeof(struct oct_timestamp_resp
));
2673 dev_err(&oct
->pci_dev
->dev
, "No memory for timestamped data packet\n");
2674 return IQ_SEND_FAILED
;
2677 if (ndata
->reqtype
== REQTYPE_NORESP_NET
)
2678 ndata
->reqtype
= REQTYPE_RESP_NET
;
2679 else if (ndata
->reqtype
== REQTYPE_NORESP_NET_SG
)
2680 ndata
->reqtype
= REQTYPE_RESP_NET_SG
;
2682 sc
->callback
= handle_timestamp
;
2683 sc
->callback_arg
= finfo
->skb
;
2684 sc
->iq_no
= ndata
->q_no
;
2686 len
= (u32
)((struct octeon_instr_ih2
*)(&sc
->cmd
.cmd2
.ih2
))->dlengsz
;
2688 ring_doorbell
= !xmit_more
;
2689 retval
= octeon_send_command(oct
, sc
->iq_no
, ring_doorbell
, &sc
->cmd
,
2690 sc
, len
, ndata
->reqtype
);
2692 if (retval
== IQ_SEND_FAILED
) {
2693 dev_err(&oct
->pci_dev
->dev
, "timestamp data packet failed status: %x\n",
2695 octeon_free_soft_command(oct
, sc
);
2697 netif_info(lio
, tx_queued
, lio
->netdev
, "Queued timestamp packet\n");
2703 /** \brief Transmit networks packets to the Octeon interface
2704 * @param skbuff skbuff struct to be passed to network layer.
2705 * @param netdev pointer to network device
2706 * @returns whether the packet was transmitted to the device okay or not
2707 * (NETDEV_TX_OK or NETDEV_TX_BUSY)
2709 static int liquidio_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
2712 struct octnet_buf_free_info
*finfo
;
2713 union octnic_cmd_setup cmdsetup
;
2714 struct octnic_data_pkt ndata
;
2715 struct octeon_device
*oct
;
2716 struct oct_iq_stats
*stats
;
2717 struct octeon_instr_irh
*irh
;
2718 union tx_info
*tx_info
;
2720 int q_idx
= 0, iq_no
= 0;
2725 lio
= GET_LIO(netdev
);
2728 if (netif_is_multiqueue(netdev
)) {
2729 q_idx
= skb
->queue_mapping
;
2730 q_idx
= (q_idx
% (lio
->linfo
.num_txpciq
));
2732 iq_no
= lio
->linfo
.txpciq
[q_idx
].s
.q_no
;
2737 stats
= &oct
->instr_queue
[iq_no
]->stats
;
2739 /* Check for all conditions in which the current packet cannot be
2742 if (!(atomic_read(&lio
->ifstate
) & LIO_IFSTATE_RUNNING
) ||
2743 (!lio
->linfo
.link
.s
.link_up
) ||
2745 netif_info(lio
, tx_err
, lio
->netdev
,
2746 "Transmit failed link_status : %d\n",
2747 lio
->linfo
.link
.s
.link_up
);
2748 goto lio_xmit_failed
;
2751 /* Use space in skb->cb to store info used to unmap and
2754 finfo
= (struct octnet_buf_free_info
*)skb
->cb
;
2759 /* Prepare the attributes for the data to be passed to OSI. */
2760 memset(&ndata
, 0, sizeof(struct octnic_data_pkt
));
2762 ndata
.buf
= (void *)finfo
;
2766 if (netif_is_multiqueue(netdev
)) {
2767 if (octnet_iq_is_full(oct
, ndata
.q_no
)) {
2768 /* defer sending if queue is full */
2769 netif_info(lio
, tx_err
, lio
->netdev
, "Transmit failed iq:%d full\n",
2771 stats
->tx_iq_busy
++;
2772 return NETDEV_TX_BUSY
;
2775 if (octnet_iq_is_full(oct
, lio
->txq
)) {
2776 /* defer sending if queue is full */
2777 stats
->tx_iq_busy
++;
2778 netif_info(lio
, tx_err
, lio
->netdev
, "Transmit failed iq:%d full\n",
2780 return NETDEV_TX_BUSY
;
2783 /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
2784 * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no );
2787 ndata
.datasize
= skb
->len
;
2790 cmdsetup
.s
.iq_no
= iq_no
;
2792 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
2793 cmdsetup
.s
.transport_csum
= 1;
2795 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)) {
2796 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
2797 cmdsetup
.s
.timestamp
= 1;
2800 if (skb_shinfo(skb
)->nr_frags
== 0) {
2801 cmdsetup
.s
.u
.datasize
= skb
->len
;
2802 octnet_prepare_pci_cmd(oct
, &ndata
.cmd
, &cmdsetup
, tag
);
2803 /* Offload checksum calculation for TCP/UDP packets */
2804 dptr
= dma_map_single(&oct
->pci_dev
->dev
,
2808 if (dma_mapping_error(&oct
->pci_dev
->dev
, dptr
)) {
2809 dev_err(&oct
->pci_dev
->dev
, "%s DMA mapping error 1\n",
2811 return NETDEV_TX_BUSY
;
2814 ndata
.cmd
.cmd2
.dptr
= dptr
;
2816 ndata
.reqtype
= REQTYPE_NORESP_NET
;
2820 struct skb_frag_struct
*frag
;
2821 struct octnic_gather
*g
;
2823 spin_lock(&lio
->glist_lock
[q_idx
]);
2824 g
= (struct octnic_gather
*)
2825 list_delete_head(&lio
->glist
[q_idx
]);
2826 spin_unlock(&lio
->glist_lock
[q_idx
]);
2829 netif_info(lio
, tx_err
, lio
->netdev
,
2830 "Transmit scatter gather: glist null!\n");
2831 goto lio_xmit_failed
;
2834 cmdsetup
.s
.gather
= 1;
2835 cmdsetup
.s
.u
.gatherptrs
= (skb_shinfo(skb
)->nr_frags
+ 1);
2836 octnet_prepare_pci_cmd(oct
, &ndata
.cmd
, &cmdsetup
, tag
);
2838 memset(g
->sg
, 0, g
->sg_size
);
2840 g
->sg
[0].ptr
[0] = dma_map_single(&oct
->pci_dev
->dev
,
2842 (skb
->len
- skb
->data_len
),
2844 if (dma_mapping_error(&oct
->pci_dev
->dev
, g
->sg
[0].ptr
[0])) {
2845 dev_err(&oct
->pci_dev
->dev
, "%s DMA mapping error 2\n",
2847 return NETDEV_TX_BUSY
;
2849 add_sg_size(&g
->sg
[0], (skb
->len
- skb
->data_len
), 0);
2851 frags
= skb_shinfo(skb
)->nr_frags
;
2854 frag
= &skb_shinfo(skb
)->frags
[i
- 1];
2856 g
->sg
[(i
>> 2)].ptr
[(i
& 3)] =
2857 dma_map_page(&oct
->pci_dev
->dev
,
2863 if (dma_mapping_error(&oct
->pci_dev
->dev
,
2864 g
->sg
[i
>> 2].ptr
[i
& 3])) {
2865 dma_unmap_single(&oct
->pci_dev
->dev
,
2867 skb
->len
- skb
->data_len
,
2869 for (j
= 1; j
< i
; j
++) {
2870 frag
= &skb_shinfo(skb
)->frags
[j
- 1];
2871 dma_unmap_page(&oct
->pci_dev
->dev
,
2872 g
->sg
[j
>> 2].ptr
[j
& 3],
2876 dev_err(&oct
->pci_dev
->dev
, "%s DMA mapping error 3\n",
2878 return NETDEV_TX_BUSY
;
2881 add_sg_size(&g
->sg
[(i
>> 2)], frag
->size
, (i
& 3));
2885 dma_sync_single_for_device(&oct
->pci_dev
->dev
, g
->sg_dma_ptr
,
2886 g
->sg_size
, DMA_TO_DEVICE
);
2887 dptr
= g
->sg_dma_ptr
;
2889 ndata
.cmd
.cmd2
.dptr
= dptr
;
2893 ndata
.reqtype
= REQTYPE_NORESP_NET_SG
;
2896 irh
= (struct octeon_instr_irh
*)&ndata
.cmd
.cmd2
.irh
;
2897 tx_info
= (union tx_info
*)&ndata
.cmd
.cmd2
.ossp
[0];
2899 if (skb_shinfo(skb
)->gso_size
) {
2900 tx_info
->s
.gso_size
= skb_shinfo(skb
)->gso_size
;
2901 tx_info
->s
.gso_segs
= skb_shinfo(skb
)->gso_segs
;
2904 xmit_more
= skb
->xmit_more
;
2906 if (unlikely(cmdsetup
.s
.timestamp
))
2907 status
= send_nic_timestamp_pkt(oct
, &ndata
, finfo
, xmit_more
);
2909 status
= octnet_send_nic_data_pkt(oct
, &ndata
, xmit_more
);
2910 if (status
== IQ_SEND_FAILED
)
2911 goto lio_xmit_failed
;
2913 netif_info(lio
, tx_queued
, lio
->netdev
, "Transmit queued successfully\n");
2915 if (status
== IQ_SEND_STOP
)
2916 stop_q(lio
->netdev
, q_idx
);
2918 netif_trans_update(netdev
);
2921 stats
->tx_tot_bytes
+= skb
->len
;
2923 return NETDEV_TX_OK
;
2926 stats
->tx_dropped
++;
2927 netif_info(lio
, tx_err
, lio
->netdev
, "IQ%d Transmit dropped:%llu\n",
2928 iq_no
, stats
->tx_dropped
);
2930 dma_unmap_single(&oct
->pci_dev
->dev
, dptr
,
2931 ndata
.datasize
, DMA_TO_DEVICE
);
2932 tx_buffer_free(skb
);
2933 return NETDEV_TX_OK
;
2936 /** \brief Network device Tx timeout
2937 * @param netdev pointer to network device
2939 static void liquidio_tx_timeout(struct net_device
*netdev
)
2943 lio
= GET_LIO(netdev
);
2945 netif_info(lio
, tx_err
, lio
->netdev
,
2946 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
2947 netdev
->stats
.tx_dropped
);
2948 netif_trans_update(netdev
);
2952 int liquidio_set_feature(struct net_device
*netdev
, int cmd
, u16 param1
)
2954 struct lio
*lio
= GET_LIO(netdev
);
2955 struct octeon_device
*oct
= lio
->oct_dev
;
2956 struct octnic_ctrl_pkt nctrl
;
2959 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
2962 nctrl
.ncmd
.s
.cmd
= cmd
;
2963 nctrl
.ncmd
.s
.param1
= param1
;
2964 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
2965 nctrl
.wait_time
= 100;
2966 nctrl
.netpndev
= (u64
)netdev
;
2967 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
2969 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
2971 dev_err(&oct
->pci_dev
->dev
, "Feature change failed in core (ret: 0x%x)\n",
2977 /** \brief Net device fix features
2978 * @param netdev pointer to network device
2979 * @param request features requested
2980 * @returns updated features list
2982 static netdev_features_t
liquidio_fix_features(struct net_device
*netdev
,
2983 netdev_features_t request
)
2985 struct lio
*lio
= netdev_priv(netdev
);
2987 if ((request
& NETIF_F_RXCSUM
) &&
2988 !(lio
->dev_capability
& NETIF_F_RXCSUM
))
2989 request
&= ~NETIF_F_RXCSUM
;
2991 if ((request
& NETIF_F_HW_CSUM
) &&
2992 !(lio
->dev_capability
& NETIF_F_HW_CSUM
))
2993 request
&= ~NETIF_F_HW_CSUM
;
2995 if ((request
& NETIF_F_TSO
) && !(lio
->dev_capability
& NETIF_F_TSO
))
2996 request
&= ~NETIF_F_TSO
;
2998 if ((request
& NETIF_F_TSO6
) && !(lio
->dev_capability
& NETIF_F_TSO6
))
2999 request
&= ~NETIF_F_TSO6
;
3001 if ((request
& NETIF_F_LRO
) && !(lio
->dev_capability
& NETIF_F_LRO
))
3002 request
&= ~NETIF_F_LRO
;
3004 /*Disable LRO if RXCSUM is off */
3005 if (!(request
& NETIF_F_RXCSUM
) && (netdev
->features
& NETIF_F_LRO
) &&
3006 (lio
->dev_capability
& NETIF_F_LRO
))
3007 request
&= ~NETIF_F_LRO
;
3012 /** \brief Net device set features
3013 * @param netdev pointer to network device
3014 * @param features features to enable/disable
3016 static int liquidio_set_features(struct net_device
*netdev
,
3017 netdev_features_t features
)
3019 struct lio
*lio
= netdev_priv(netdev
);
3021 if (!((netdev
->features
^ features
) & NETIF_F_LRO
))
3024 if ((features
& NETIF_F_LRO
) && (lio
->dev_capability
& NETIF_F_LRO
))
3025 liquidio_set_feature(netdev
, OCTNET_CMD_LRO_ENABLE
,
3026 OCTNIC_LROIPV4
| OCTNIC_LROIPV6
);
3027 else if (!(features
& NETIF_F_LRO
) &&
3028 (lio
->dev_capability
& NETIF_F_LRO
))
3029 liquidio_set_feature(netdev
, OCTNET_CMD_LRO_DISABLE
,
3030 OCTNIC_LROIPV4
| OCTNIC_LROIPV6
);
3035 static struct net_device_ops lionetdevops
= {
3036 .ndo_open
= liquidio_open
,
3037 .ndo_stop
= liquidio_stop
,
3038 .ndo_start_xmit
= liquidio_xmit
,
3039 .ndo_get_stats
= liquidio_get_stats
,
3040 .ndo_set_mac_address
= liquidio_set_mac
,
3041 .ndo_set_rx_mode
= liquidio_set_mcast_list
,
3042 .ndo_tx_timeout
= liquidio_tx_timeout
,
3043 .ndo_change_mtu
= liquidio_change_mtu
,
3044 .ndo_do_ioctl
= liquidio_ioctl
,
3045 .ndo_fix_features
= liquidio_fix_features
,
3046 .ndo_set_features
= liquidio_set_features
,
3049 /** \brief Entry point for the liquidio module
3051 static int __init
liquidio_init(void)
3054 struct handshake
*hs
;
3056 init_completion(&first_stage
);
3058 octeon_init_device_list(conf_type
);
3060 if (liquidio_init_pci())
3063 wait_for_completion_timeout(&first_stage
, msecs_to_jiffies(1000));
3065 for (i
= 0; i
< MAX_OCTEON_DEVICES
; i
++) {
3068 wait_for_completion(&hs
->init
);
3070 /* init handshake failed */
3071 dev_err(&hs
->pci_dev
->dev
,
3072 "Failed to init device\n");
3073 liquidio_deinit_pci();
3079 for (i
= 0; i
< MAX_OCTEON_DEVICES
; i
++) {
3082 wait_for_completion_timeout(&hs
->started
,
3083 msecs_to_jiffies(30000));
3084 if (!hs
->started_ok
) {
3085 /* starter handshake failed */
3086 dev_err(&hs
->pci_dev
->dev
,
3087 "Firmware failed to start\n");
3088 liquidio_deinit_pci();
3097 static int lio_nic_info(struct octeon_recv_info
*recv_info
, void *buf
)
3099 struct octeon_device
*oct
= (struct octeon_device
*)buf
;
3100 struct octeon_recv_pkt
*recv_pkt
= recv_info
->recv_pkt
;
3102 union oct_link_status
*ls
;
3105 if (recv_pkt
->buffer_size
[0] != sizeof(*ls
)) {
3106 dev_err(&oct
->pci_dev
->dev
, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3107 recv_pkt
->buffer_size
[0],
3108 recv_pkt
->rh
.r_nic_info
.gmxport
);
3112 gmxport
= recv_pkt
->rh
.r_nic_info
.gmxport
;
3113 ls
= (union oct_link_status
*)get_rbd(recv_pkt
->buffer_ptr
[0]);
3115 octeon_swap_8B_data((u64
*)ls
, (sizeof(union oct_link_status
)) >> 3);
3116 for (i
= 0; i
< oct
->ifcount
; i
++) {
3117 if (oct
->props
[i
].gmxport
== gmxport
) {
3118 update_link_status(oct
->props
[i
].netdev
, ls
);
3124 for (i
= 0; i
< recv_pkt
->buffer_count
; i
++)
3125 recv_buffer_free(recv_pkt
->buffer_ptr
[i
]);
3126 octeon_free_recv_info(recv_info
);
3131 * \brief Setup network interfaces
3132 * @param octeon_dev octeon device
3134 * Called during init time for each device. It assumes the NIC
3135 * is already up and running. The link information for each
3136 * interface is passed in link_info.
3138 static int setup_nic_devices(struct octeon_device
*octeon_dev
)
3140 struct lio
*lio
= NULL
;
3141 struct net_device
*netdev
;
3143 struct octeon_soft_command
*sc
;
3144 struct liquidio_if_cfg_context
*ctx
;
3145 struct liquidio_if_cfg_resp
*resp
;
3146 struct octdev_props
*props
;
3147 int retval
, num_iqueues
, num_oqueues
;
3148 int num_cpus
= num_online_cpus();
3149 union oct_nic_if_cfg if_cfg
;
3150 unsigned int base_queue
;
3151 unsigned int gmx_port_id
;
3152 u32 resp_size
, ctx_size
;
3155 /* This is to handle link status changes */
3156 octeon_register_dispatch_fn(octeon_dev
, OPCODE_NIC
,
3158 lio_nic_info
, octeon_dev
);
3160 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3161 * They are handled directly.
3163 octeon_register_reqtype_free_fn(octeon_dev
, REQTYPE_NORESP_NET
,
3166 octeon_register_reqtype_free_fn(octeon_dev
, REQTYPE_NORESP_NET_SG
,
3169 octeon_register_reqtype_free_fn(octeon_dev
, REQTYPE_RESP_NET_SG
,
3170 free_netsgbuf_with_resp
);
3172 for (i
= 0; i
< octeon_dev
->ifcount
; i
++) {
3173 resp_size
= sizeof(struct liquidio_if_cfg_resp
);
3174 ctx_size
= sizeof(struct liquidio_if_cfg_context
);
3175 sc
= (struct octeon_soft_command
*)
3176 octeon_alloc_soft_command(octeon_dev
, 0,
3177 resp_size
, ctx_size
);
3178 resp
= (struct liquidio_if_cfg_resp
*)sc
->virtrptr
;
3179 ctx
= (struct liquidio_if_cfg_context
*)sc
->ctxptr
;
3182 CFG_GET_NUM_TXQS_NIC_IF(octeon_get_conf(octeon_dev
), i
);
3184 CFG_GET_NUM_RXQS_NIC_IF(octeon_get_conf(octeon_dev
), i
);
3186 CFG_GET_BASE_QUE_NIC_IF(octeon_get_conf(octeon_dev
), i
);
3188 CFG_GET_GMXID_NIC_IF(octeon_get_conf(octeon_dev
), i
);
3190 if (num_iqueues
> num_cpus
)
3191 num_iqueues
= num_cpus
;
3192 if (num_oqueues
> num_cpus
)
3193 num_oqueues
= num_cpus
;
3194 dev_dbg(&octeon_dev
->pci_dev
->dev
,
3195 "requesting config for interface %d, iqs %d, oqs %d\n",
3196 ifidx_or_pfnum
, num_iqueues
, num_oqueues
);
3197 ACCESS_ONCE(ctx
->cond
) = 0;
3198 ctx
->octeon_id
= lio_get_device_id(octeon_dev
);
3199 init_waitqueue_head(&ctx
->wc
);
3202 if_cfg
.s
.num_iqueues
= num_iqueues
;
3203 if_cfg
.s
.num_oqueues
= num_oqueues
;
3204 if_cfg
.s
.base_queue
= base_queue
;
3205 if_cfg
.s
.gmx_port_id
= gmx_port_id
;
3209 octeon_prepare_soft_command(octeon_dev
, sc
, OPCODE_NIC
,
3210 OPCODE_NIC_IF_CFG
, 0,
3213 sc
->callback
= if_cfg_callback
;
3214 sc
->callback_arg
= sc
;
3215 sc
->wait_time
= 1000;
3217 retval
= octeon_send_soft_command(octeon_dev
, sc
);
3218 if (retval
== IQ_SEND_FAILED
) {
3219 dev_err(&octeon_dev
->pci_dev
->dev
,
3220 "iq/oq config failed status: %x\n",
3222 /* Soft instr is freed by driver in case of failure. */
3223 goto setup_nic_dev_fail
;
3226 /* Sleep on a wait queue till the cond flag indicates that the
3227 * response arrived or timed-out.
3229 sleep_cond(&ctx
->wc
, &ctx
->cond
);
3230 retval
= resp
->status
;
3232 dev_err(&octeon_dev
->pci_dev
->dev
, "iq/oq config failed\n");
3233 goto setup_nic_dev_fail
;
3236 octeon_swap_8B_data((u64
*)(&resp
->cfg_info
),
3237 (sizeof(struct liquidio_if_cfg_info
)) >> 3);
3239 num_iqueues
= hweight64(resp
->cfg_info
.iqmask
);
3240 num_oqueues
= hweight64(resp
->cfg_info
.oqmask
);
3242 if (!(num_iqueues
) || !(num_oqueues
)) {
3243 dev_err(&octeon_dev
->pci_dev
->dev
,
3244 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
3245 resp
->cfg_info
.iqmask
,
3246 resp
->cfg_info
.oqmask
);
3247 goto setup_nic_dev_fail
;
3249 dev_dbg(&octeon_dev
->pci_dev
->dev
,
3250 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
3251 i
, resp
->cfg_info
.iqmask
, resp
->cfg_info
.oqmask
,
3252 num_iqueues
, num_oqueues
);
3253 netdev
= alloc_etherdev_mq(LIO_SIZE
, num_iqueues
);
3256 dev_err(&octeon_dev
->pci_dev
->dev
, "Device allocation failed\n");
3257 goto setup_nic_dev_fail
;
3260 SET_NETDEV_DEV(netdev
, &octeon_dev
->pci_dev
->dev
);
3262 if (num_iqueues
> 1)
3263 lionetdevops
.ndo_select_queue
= select_q
;
3265 /* Associate the routines that will handle different
3268 netdev
->netdev_ops
= &lionetdevops
;
3270 lio
= GET_LIO(netdev
);
3272 memset(lio
, 0, sizeof(struct lio
));
3274 lio
->ifidx
= ifidx_or_pfnum
;
3276 props
= &octeon_dev
->props
[i
];
3277 props
->gmxport
= resp
->cfg_info
.linfo
.gmxport
;
3278 props
->netdev
= netdev
;
3280 lio
->linfo
.num_rxpciq
= num_oqueues
;
3281 lio
->linfo
.num_txpciq
= num_iqueues
;
3282 for (j
= 0; j
< num_oqueues
; j
++) {
3283 lio
->linfo
.rxpciq
[j
].u64
=
3284 resp
->cfg_info
.linfo
.rxpciq
[j
].u64
;
3286 for (j
= 0; j
< num_iqueues
; j
++) {
3287 lio
->linfo
.txpciq
[j
].u64
=
3288 resp
->cfg_info
.linfo
.txpciq
[j
].u64
;
3290 lio
->linfo
.hw_addr
= resp
->cfg_info
.linfo
.hw_addr
;
3291 lio
->linfo
.gmxport
= resp
->cfg_info
.linfo
.gmxport
;
3292 lio
->linfo
.link
.u64
= resp
->cfg_info
.linfo
.link
.u64
;
3294 lio
->msg_enable
= netif_msg_init(debug
, DEFAULT_MSG_ENABLE
);
3296 lio
->dev_capability
= NETIF_F_HIGHDMA
3297 | NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
3298 | NETIF_F_SG
| NETIF_F_RXCSUM
3300 | NETIF_F_TSO
| NETIF_F_TSO6
3302 netif_set_gso_max_size(netdev
, OCTNIC_GSO_MAX_SIZE
);
3304 netdev
->features
= (lio
->dev_capability
& ~NETIF_F_LRO
);
3306 netdev
->vlan_features
= lio
->dev_capability
;
3308 netdev
->hw_features
= lio
->dev_capability
;
3310 /* Point to the properties for octeon device to which this
3311 * interface belongs.
3313 lio
->oct_dev
= octeon_dev
;
3314 lio
->octprops
= props
;
3315 lio
->netdev
= netdev
;
3317 dev_dbg(&octeon_dev
->pci_dev
->dev
,
3318 "if%d gmx: %d hw_addr: 0x%llx\n", i
,
3319 lio
->linfo
.gmxport
, CVM_CAST64(lio
->linfo
.hw_addr
));
3321 /* 64-bit swap required on LE machines */
3322 octeon_swap_8B_data(&lio
->linfo
.hw_addr
, 1);
3323 for (j
= 0; j
< 6; j
++)
3324 mac
[j
] = *((u8
*)(((u8
*)&lio
->linfo
.hw_addr
) + 2 + j
));
3326 /* Copy MAC Address to OS network device structure */
3328 ether_addr_copy(netdev
->dev_addr
, mac
);
3330 /* By default all interfaces on a single Octeon uses the same
3333 lio
->txq
= lio
->linfo
.txpciq
[0].s
.q_no
;
3334 lio
->rxq
= lio
->linfo
.rxpciq
[0].s
.q_no
;
3335 if (setup_io_queues(octeon_dev
, i
)) {
3336 dev_err(&octeon_dev
->pci_dev
->dev
, "I/O queues creation failed\n");
3337 goto setup_nic_dev_fail
;
3340 ifstate_set(lio
, LIO_IFSTATE_DROQ_OPS
);
3342 lio
->tx_qsize
= octeon_get_tx_qsize(octeon_dev
, lio
->txq
);
3343 lio
->rx_qsize
= octeon_get_rx_qsize(octeon_dev
, lio
->rxq
);
3345 if (setup_glists(octeon_dev
, lio
, num_iqueues
)) {
3346 dev_err(&octeon_dev
->pci_dev
->dev
,
3347 "Gather list allocation failed\n");
3348 goto setup_nic_dev_fail
;
3351 /* Register ethtool support */
3352 liquidio_set_ethtool_ops(netdev
);
3354 if (netdev
->features
& NETIF_F_LRO
)
3355 liquidio_set_feature(netdev
, OCTNET_CMD_LRO_ENABLE
,
3356 OCTNIC_LROIPV4
| OCTNIC_LROIPV6
);
3358 if ((debug
!= -1) && (debug
& NETIF_MSG_HW
))
3359 liquidio_set_feature(netdev
, OCTNET_CMD_VERBOSE_ENABLE
,
3362 /* Register the network device with the OS */
3363 if (register_netdev(netdev
)) {
3364 dev_err(&octeon_dev
->pci_dev
->dev
, "Device registration failed\n");
3365 goto setup_nic_dev_fail
;
3368 dev_dbg(&octeon_dev
->pci_dev
->dev
,
3369 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
3370 i
, mac
[0], mac
[1], mac
[2], mac
[3], mac
[4], mac
[5]);
3371 netif_carrier_off(netdev
);
3372 lio
->link_changes
++;
3374 ifstate_set(lio
, LIO_IFSTATE_REGISTERED
);
3376 dev_dbg(&octeon_dev
->pci_dev
->dev
,
3377 "NIC ifidx:%d Setup successful\n", i
);
3379 octeon_free_soft_command(octeon_dev
, sc
);
3386 octeon_free_soft_command(octeon_dev
, sc
);
3389 dev_err(&octeon_dev
->pci_dev
->dev
,
3390 "NIC ifidx:%d Setup failed\n", i
);
3391 liquidio_destroy_nic_device(octeon_dev
, i
);
3397 * \brief initialize the NIC
3398 * @param oct octeon device
3400 * This initialization routine is called once the Octeon device application is
3403 static int liquidio_init_nic_module(struct octeon_device
*oct
)
3405 struct oct_intrmod_cfg
*intrmod_cfg
;
3407 int num_nic_ports
= CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct
));
3409 dev_dbg(&oct
->pci_dev
->dev
, "Initializing network interfaces\n");
3411 /* only default iq and oq were initialized
3412 * initialize the rest as well
3414 /* run port_config command for each port */
3415 oct
->ifcount
= num_nic_ports
;
3417 memset(oct
->props
, 0,
3418 sizeof(struct octdev_props
) * num_nic_ports
);
3420 for (i
= 0; i
< MAX_OCTEON_LINKS
; i
++)
3421 oct
->props
[i
].gmxport
= -1;
3423 retval
= setup_nic_devices(oct
);
3425 dev_err(&oct
->pci_dev
->dev
, "Setup NIC devices failed\n");
3426 goto octnet_init_failure
;
3429 liquidio_ptp_init(oct
);
3431 /* Initialize interrupt moderation params */
3432 intrmod_cfg
= &((struct octeon_device
*)oct
)->intrmod
;
3433 intrmod_cfg
->intrmod_enable
= 1;
3434 intrmod_cfg
->intrmod_check_intrvl
= LIO_INTRMOD_CHECK_INTERVAL
;
3435 intrmod_cfg
->intrmod_maxpkt_ratethr
= LIO_INTRMOD_MAXPKT_RATETHR
;
3436 intrmod_cfg
->intrmod_minpkt_ratethr
= LIO_INTRMOD_MINPKT_RATETHR
;
3437 intrmod_cfg
->intrmod_maxcnt_trigger
= LIO_INTRMOD_MAXCNT_TRIGGER
;
3438 intrmod_cfg
->intrmod_maxtmr_trigger
= LIO_INTRMOD_MAXTMR_TRIGGER
;
3439 intrmod_cfg
->intrmod_mintmr_trigger
= LIO_INTRMOD_MINTMR_TRIGGER
;
3440 intrmod_cfg
->intrmod_mincnt_trigger
= LIO_INTRMOD_MINCNT_TRIGGER
;
3442 dev_dbg(&oct
->pci_dev
->dev
, "Network interfaces ready\n");
3446 octnet_init_failure
:
3454 * \brief starter callback that invokes the remaining initialization work after
3455 * the NIC is up and running.
3456 * @param octptr work struct work_struct
3458 static void nic_starter(struct work_struct
*work
)
3460 struct octeon_device
*oct
;
3461 struct cavium_wk
*wk
= (struct cavium_wk
*)work
;
3463 oct
= (struct octeon_device
*)wk
->ctxptr
;
3465 if (atomic_read(&oct
->status
) == OCT_DEV_RUNNING
)
3468 /* If the status of the device is CORE_OK, the core
3469 * application has reported its application type. Call
3470 * any registered handlers now and move to the RUNNING
3473 if (atomic_read(&oct
->status
) != OCT_DEV_CORE_OK
) {
3474 schedule_delayed_work(&oct
->nic_poll_work
.work
,
3475 LIQUIDIO_STARTER_POLL_INTERVAL_MS
);
3479 atomic_set(&oct
->status
, OCT_DEV_RUNNING
);
3481 if (oct
->app_mode
&& oct
->app_mode
== CVM_DRV_NIC_APP
) {
3482 dev_dbg(&oct
->pci_dev
->dev
, "Starting NIC module\n");
3484 if (liquidio_init_nic_module(oct
))
3485 dev_err(&oct
->pci_dev
->dev
, "NIC initialization failed\n");
3487 handshake
[oct
->octeon_id
].started_ok
= 1;
3489 dev_err(&oct
->pci_dev
->dev
,
3490 "Unexpected application running on NIC (%d). Check firmware.\n",
3494 complete(&handshake
[oct
->octeon_id
].started
);
3498 * \brief Device initialization for each Octeon device that is probed
3499 * @param octeon_dev octeon device
3501 static int octeon_device_init(struct octeon_device
*octeon_dev
)
3504 struct octeon_device_priv
*oct_priv
=
3505 (struct octeon_device_priv
*)octeon_dev
->priv
;
3506 atomic_set(&octeon_dev
->status
, OCT_DEV_BEGIN_STATE
);
3508 /* Enable access to the octeon device and make its DMA capability
3511 if (octeon_pci_os_setup(octeon_dev
))
3514 /* Identify the Octeon type and map the BAR address space. */
3515 if (octeon_chip_specific_setup(octeon_dev
)) {
3516 dev_err(&octeon_dev
->pci_dev
->dev
, "Chip specific setup failed\n");
3520 atomic_set(&octeon_dev
->status
, OCT_DEV_PCI_MAP_DONE
);
3522 octeon_dev
->app_mode
= CVM_DRV_INVALID_APP
;
3524 /* Do a soft reset of the Octeon device. */
3525 if (octeon_dev
->fn_list
.soft_reset(octeon_dev
))
3528 /* Initialize the dispatch mechanism used to push packets arriving on
3529 * Octeon Output queues.
3531 if (octeon_init_dispatch_list(octeon_dev
))
3534 octeon_register_dispatch_fn(octeon_dev
, OPCODE_NIC
,
3535 OPCODE_NIC_CORE_DRV_ACTIVE
,
3536 octeon_core_drv_init
,
3539 INIT_DELAYED_WORK(&octeon_dev
->nic_poll_work
.work
, nic_starter
);
3540 octeon_dev
->nic_poll_work
.ctxptr
= (void *)octeon_dev
;
3541 schedule_delayed_work(&octeon_dev
->nic_poll_work
.work
,
3542 LIQUIDIO_STARTER_POLL_INTERVAL_MS
);
3544 atomic_set(&octeon_dev
->status
, OCT_DEV_DISPATCH_INIT_DONE
);
3546 octeon_set_io_queues_off(octeon_dev
);
3548 /* Setup the data structures that manage this Octeon's Input queues. */
3549 if (octeon_setup_instr_queues(octeon_dev
)) {
3550 dev_err(&octeon_dev
->pci_dev
->dev
,
3551 "instruction queue initialization failed\n");
3552 /* On error, release any previously allocated queues */
3553 for (j
= 0; j
< octeon_dev
->num_iqs
; j
++)
3554 octeon_delete_instr_queue(octeon_dev
, j
);
3557 atomic_set(&octeon_dev
->status
, OCT_DEV_INSTR_QUEUE_INIT_DONE
);
3559 /* Initialize soft command buffer pool
3561 if (octeon_setup_sc_buffer_pool(octeon_dev
)) {
3562 dev_err(&octeon_dev
->pci_dev
->dev
, "sc buffer pool allocation failed\n");
3565 atomic_set(&octeon_dev
->status
, OCT_DEV_SC_BUFF_POOL_INIT_DONE
);
3567 /* Initialize lists to manage the requests of different types that
3568 * arrive from user & kernel applications for this octeon device.
3570 if (octeon_setup_response_list(octeon_dev
)) {
3571 dev_err(&octeon_dev
->pci_dev
->dev
, "Response list allocation failed\n");
3574 atomic_set(&octeon_dev
->status
, OCT_DEV_RESP_LIST_INIT_DONE
);
3576 if (octeon_setup_output_queues(octeon_dev
)) {
3577 dev_err(&octeon_dev
->pci_dev
->dev
, "Output queue initialization failed\n");
3578 /* Release any previously allocated queues */
3579 for (j
= 0; j
< octeon_dev
->num_oqs
; j
++)
3580 octeon_delete_droq(octeon_dev
, j
);
3583 atomic_set(&octeon_dev
->status
, OCT_DEV_DROQ_INIT_DONE
);
3585 /* The input and output queue registers were setup earlier (the queues
3586 * were not enabled). Any additional registers that need to be
3587 * programmed should be done now.
3589 ret
= octeon_dev
->fn_list
.setup_device_regs(octeon_dev
);
3591 dev_err(&octeon_dev
->pci_dev
->dev
,
3592 "Failed to configure device registers\n");
3596 /* Initialize the tasklet that handles output queue packet processing.*/
3597 dev_dbg(&octeon_dev
->pci_dev
->dev
, "Initializing droq tasklet\n");
3598 tasklet_init(&oct_priv
->droq_tasklet
, octeon_droq_bh
,
3599 (unsigned long)octeon_dev
);
3601 /* Setup the interrupt handler and record the INT SUM register address
3603 octeon_setup_interrupt(octeon_dev
);
3605 /* Enable Octeon device interrupts */
3606 octeon_dev
->fn_list
.enable_interrupt(octeon_dev
->chip
);
3608 /* Enable the input and output queues for this Octeon device */
3609 octeon_dev
->fn_list
.enable_io_queues(octeon_dev
);
3611 atomic_set(&octeon_dev
->status
, OCT_DEV_IO_QUEUES_DONE
);
3613 dev_dbg(&octeon_dev
->pci_dev
->dev
, "Waiting for DDR initialization...\n");
3615 if (ddr_timeout
== 0) {
3616 dev_info(&octeon_dev
->pci_dev
->dev
,
3617 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
3620 schedule_timeout_uninterruptible(HZ
* LIO_RESET_SECS
);
3622 /* Wait for the octeon to initialize DDR after the soft-reset. */
3623 ret
= octeon_wait_for_ddr_init(octeon_dev
, &ddr_timeout
);
3625 dev_err(&octeon_dev
->pci_dev
->dev
,
3626 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
3631 if (octeon_wait_for_bootloader(octeon_dev
, 1000) != 0) {
3632 dev_err(&octeon_dev
->pci_dev
->dev
, "Board not responding\n");
3636 dev_dbg(&octeon_dev
->pci_dev
->dev
, "Initializing consoles\n");
3637 ret
= octeon_init_consoles(octeon_dev
);
3639 dev_err(&octeon_dev
->pci_dev
->dev
, "Could not access board consoles\n");
3642 ret
= octeon_add_console(octeon_dev
, 0);
3644 dev_err(&octeon_dev
->pci_dev
->dev
, "Could not access board console\n");
3648 atomic_set(&octeon_dev
->status
, OCT_DEV_CONSOLE_INIT_DONE
);
3650 dev_dbg(&octeon_dev
->pci_dev
->dev
, "Loading firmware\n");
3651 ret
= load_firmware(octeon_dev
);
3653 dev_err(&octeon_dev
->pci_dev
->dev
, "Could not load firmware to board\n");
3657 handshake
[octeon_dev
->octeon_id
].init_ok
= 1;
3658 complete(&handshake
[octeon_dev
->octeon_id
].init
);
3660 atomic_set(&octeon_dev
->status
, OCT_DEV_HOST_OK
);
3662 /* Send Credit for Octeon Output queues. Credits are always sent after
3663 * the output queue is enabled.
3665 for (j
= 0; j
< octeon_dev
->num_oqs
; j
++)
3666 writel(octeon_dev
->droq
[j
]->max_count
,
3667 octeon_dev
->droq
[j
]->pkts_credit_reg
);
3669 /* Packets can start arriving on the output queues from this point. */
3675 * \brief Exits the module
3677 static void __exit
liquidio_exit(void)
3679 liquidio_deinit_pci();
3681 pr_info("LiquidIO network module is now unloaded\n");
3684 module_init(liquidio_init
);
3685 module_exit(liquidio_exit
);