liquidio: Remove redundant code
[deliverable/linux.git] / drivers / net / ethernet / cavium / liquidio / request_manager.c
index 7eafa75ac0954221e7eb2b68133f065bec7906fb..d32492f185ffc984be3abbcb5de1d81e53878348 100644 (file)
  * This file may also be available under a different license from Cavium.
  * Contact Cavium, Inc. for more information
  **********************************************************************/
-#include <linux/version.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
 #include <linux/pci.h>
-#include <linux/kthread.h>
 #include <linux/netdevice.h>
 #include <linux/vmalloc.h>
-#include "octeon_config.h"
 #include "liquidio_common.h"
 #include "octeon_droq.h"
 #include "octeon_iq.h"
 #include "response_manager.h"
 #include "octeon_device.h"
-#include "octeon_nic.h"
 #include "octeon_main.h"
 #include "octeon_network.h"
-#include "cn66xx_regs.h"
 #include "cn66xx_device.h"
-#include "cn68xx_regs.h"
-#include "cn68xx_device.h"
-#include "liquidio_image.h"
 
 #define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count)  \
        (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
@@ -301,40 +290,8 @@ static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq,
        memcpy(iqptr, cmd, cmdsize);
 }
 
-static inline int
-__post_command(struct octeon_device *octeon_dev __attribute__((unused)),
-              struct octeon_instr_queue *iq,
-              u32 force_db __attribute__((unused)), u8 *cmd)
-{
-       u32 index = -1;
-
-       /* This ensures that the read index does not wrap around to the same
-        * position if queue gets full before Octeon could fetch any instr.
-        */
-       if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1))
-               return -1;
-
-       __copy_cmd_into_iq(iq, cmd);
-
-       /* "index" is returned, host_write_index is modified. */
-       index = iq->host_write_index;
-       INCR_INDEX_BY1(iq->host_write_index, iq->max_count);
-       iq->fill_cnt++;
-
-       /* Flush the command into memory. We need to be sure the data is in
-        * memory before indicating that the instruction is pending.
-        */
-       wmb();
-
-       atomic_inc(&iq->instr_pending);
-
-       return index;
-}
-
 static inline struct iq_post_status
-__post_command2(struct octeon_device *octeon_dev __attribute__((unused)),
-               struct octeon_instr_queue *iq,
-               u32 force_db __attribute__((unused)), u8 *cmd)
+__post_command2(struct octeon_instr_queue *iq, u8 *cmd)
 {
        struct iq_post_status st;
 
@@ -392,6 +349,7 @@ __add_to_request_list(struct octeon_instr_queue *iq,
        iq->request_list[idx].reqtype = reqtype;
 }
 
+/* Can only run in process context */
 int
 lio_process_iq_request_list(struct octeon_device *oct,
                            struct octeon_instr_queue *iq, u32 napi_budget)
@@ -403,6 +361,7 @@ lio_process_iq_request_list(struct octeon_device *oct,
        unsigned int pkts_compl = 0, bytes_compl = 0;
        struct octeon_soft_command *sc;
        struct octeon_instr_irh *irh;
+       unsigned long flags;
 
        while (old != iq->octeon_read_index) {
                reqtype = iq->request_list[old].reqtype;
@@ -432,17 +391,22 @@ lio_process_iq_request_list(struct octeon_device *oct,
                                 * command response list because we expect
                                 * a response from Octeon.
                                 */
-                               spin_lock_bh(&oct->response_list
-                                       [OCTEON_ORDERED_SC_LIST].lock);
+                               spin_lock_irqsave
+                                       (&oct->response_list
+                                        [OCTEON_ORDERED_SC_LIST].lock,
+                                        flags);
                                atomic_inc(&oct->response_list
                                        [OCTEON_ORDERED_SC_LIST].
                                        pending_req_count);
                                list_add_tail(&sc->node, &oct->response_list
                                        [OCTEON_ORDERED_SC_LIST].head);
-                               spin_unlock_bh(&oct->response_list
-                                       [OCTEON_ORDERED_SC_LIST].lock);
+                               spin_unlock_irqrestore
+                                       (&oct->response_list
+                                        [OCTEON_ORDERED_SC_LIST].lock,
+                                        flags);
                        } else {
                                if (sc->callback) {
+                                       /* This callback must not sleep */
                                        sc->callback(oct, OCTEON_REQUEST_DONE,
                                                     sc->callback_arg);
                                }
@@ -559,11 +523,12 @@ static void check_db_timeout(struct work_struct *work)
 {
        struct cavium_wk *wk = (struct cavium_wk *)work;
        struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
-       unsigned long iq_no = wk->ctxul;
+       u64 iq_no = wk->ctxul;
        struct cavium_wq *db_wq = &oct->check_db_wq[iq_no];
+       u32 delay = 10;
 
        __check_db_timeout(oct, iq_no);
-       queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));
+       queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(delay));
 }
 
 int
@@ -579,7 +544,7 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no,
         */
        spin_lock_bh(&iq->post_lock);
 
-       st = __post_command2(oct, iq, force_db, cmd);
+       st = __post_command2(iq, cmd);
 
        if (st.status != IQ_SEND_FAILED) {
                octeon_report_sent_bytes_to_bql(buf, reqtype);
@@ -587,7 +552,7 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no,
                INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize);
                INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1);
 
-               if (iq->fill_cnt >= iq->fill_threshold || force_db)
+               if (force_db)
                        ring_doorbell(oct, iq);
        } else {
                INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
@@ -618,8 +583,8 @@ octeon_prepare_soft_command(struct octeon_device *oct,
        struct octeon_instr_irh *irh;
        struct octeon_instr_rdp *rdp;
 
-       BUG_ON(opcode > 15);
-       BUG_ON(subcode > 127);
+       WARN_ON(opcode > 15);
+       WARN_ON(subcode > 127);
 
        oct_cfg = octeon_get_conf(oct);
 
@@ -661,7 +626,6 @@ int octeon_send_soft_command(struct octeon_device *oct,
 {
        struct octeon_instr_ih2 *ih2;
        struct octeon_instr_irh *irh;
-       struct octeon_instr_rdp *rdp;
        u32 len;
 
        ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
@@ -671,12 +635,10 @@ int octeon_send_soft_command(struct octeon_device *oct,
        }
        irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
        if (irh->rflag) {
-               BUG_ON(!sc->dmarptr);
-               BUG_ON(!sc->status_word);
+               WARN_ON(!sc->dmarptr);
+               WARN_ON(!sc->status_word);
                *sc->status_word = COMPLETION_WORD_INIT;
 
-               rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
-
                sc->cmd.cmd2.rptr = sc->dmarptr;
        }
        len = (u32)ih2->dlengsz;
@@ -720,7 +682,7 @@ int octeon_free_sc_buffer_pool(struct octeon_device *oct)
        struct list_head *tmp, *tmp2;
        struct octeon_soft_command *sc;
 
-       spin_lock(&oct->sc_buf_pool.lock);
+       spin_lock_bh(&oct->sc_buf_pool.lock);
 
        list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) {
                list_del(tmp);
@@ -732,7 +694,7 @@ int octeon_free_sc_buffer_pool(struct octeon_device *oct)
 
        INIT_LIST_HEAD(&oct->sc_buf_pool.head);
 
-       spin_unlock(&oct->sc_buf_pool.lock);
+       spin_unlock_bh(&oct->sc_buf_pool.lock);
 
        return 0;
 }
@@ -748,13 +710,13 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
        struct octeon_soft_command *sc = NULL;
        struct list_head *tmp;
 
-       BUG_ON((offset + datasize + rdatasize + ctxsize) >
+       WARN_ON((offset + datasize + rdatasize + ctxsize) >
               SOFT_COMMAND_BUFFER_SIZE);
 
-       spin_lock(&oct->sc_buf_pool.lock);
+       spin_lock_bh(&oct->sc_buf_pool.lock);
 
        if (list_empty(&oct->sc_buf_pool.head)) {
-               spin_unlock(&oct->sc_buf_pool.lock);
+               spin_unlock_bh(&oct->sc_buf_pool.lock);
                return NULL;
        }
 
@@ -765,7 +727,7 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
 
        atomic_inc(&oct->sc_buf_pool.alloc_buf_count);
 
-       spin_unlock(&oct->sc_buf_pool.lock);
+       spin_unlock_bh(&oct->sc_buf_pool.lock);
 
        sc = (struct octeon_soft_command *)tmp;
 
@@ -795,7 +757,7 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
        offset = (offset + datasize + 127) & 0xffffff80;
 
        if (rdatasize) {
-               BUG_ON(rdatasize < 16);
+               WARN_ON(rdatasize < 16);
                sc->virtrptr = (u8 *)sc + offset;
                sc->dmarptr = dma_addr + offset;
                sc->rdatasize = rdatasize;
@@ -808,11 +770,11 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
 void octeon_free_soft_command(struct octeon_device *oct,
                              struct octeon_soft_command *sc)
 {
-       spin_lock(&oct->sc_buf_pool.lock);
+       spin_lock_bh(&oct->sc_buf_pool.lock);
 
        list_add_tail(&sc->node, &oct->sc_buf_pool.head);
 
        atomic_dec(&oct->sc_buf_pool.alloc_buf_count);
 
-       spin_unlock(&oct->sc_buf_pool.lock);
+       spin_unlock_bh(&oct->sc_buf_pool.lock);
 }
This page took 0.02882 seconds and 5 git commands to generate.