iwl3945: kill iwl3945_rx_queue_restock
[deliverable/linux.git] / drivers / net / wireless / iwlwifi / iwl-3945.c
index 5a8b75d94d7d99c6d4d98ff6771b436151de2441..24d818d1b06bcfdb5f562a5875bf7e8df81dd0b3 100644 (file)
@@ -38,6 +38,7 @@
 #include <asm/unaligned.h>
 #include <net/mac80211.h>
 
+#include "iwl-fh.h"
 #include "iwl-3945-fh.h"
 #include "iwl-commands.h"
 #include "iwl-3945.h"
@@ -305,9 +306,9 @@ int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate)
 static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
                                     int txq_id, int index)
 {
-       struct iwl3945_tx_queue *txq = &priv->txq39[txq_id];
+       struct iwl_tx_queue *txq = &priv->txq[txq_id];
        struct iwl_queue *q = &txq->q;
-       struct iwl3945_tx_info *tx_info;
+       struct iwl_tx_info *tx_info;
 
        BUG_ON(txq_id == IWL_CMD_QUEUE_NUM);
 
@@ -336,7 +337,7 @@ static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
        u16 sequence = le16_to_cpu(pkt->hdr.sequence);
        int txq_id = SEQ_TO_QUEUE(sequence);
        int index = SEQ_TO_INDEX(sequence);
-       struct iwl3945_tx_queue *txq = &priv->txq39[txq_id];
+       struct iwl_tx_queue *txq = &priv->txq[txq_id];
        struct ieee80211_tx_info *info;
        struct iwl3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
        u32  status = le32_to_cpu(tx_resp->status);
@@ -728,7 +729,7 @@ int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr,
 {
        int count;
        u32 pad;
-       struct iwl3945_tfd_frame *tfd = (struct iwl3945_tfd_frame *)ptr;
+       struct iwl3945_tfd *tfd = (struct iwl3945_tfd *)ptr;
 
        count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
        pad = TFD_CTL_PAD_GET(le32_to_cpu(tfd->control_flags));
@@ -739,8 +740,8 @@ int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr,
                return -EINVAL;
        }
 
-       tfd->pa[count].addr = cpu_to_le32(addr);
-       tfd->pa[count].len = cpu_to_le32(len);
+       tfd->tbs[count].addr = cpu_to_le32(addr);
+       tfd->tbs[count].len = cpu_to_le32(len);
 
        count++;
 
@@ -755,10 +756,10 @@ int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr,
  *
  * Does NOT advance any indexes
  */
-int iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl3945_tx_queue *txq)
+int iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
 {
-       struct iwl3945_tfd_frame *bd_tmp = (struct iwl3945_tfd_frame *)&txq->bd[0];
-       struct iwl3945_tfd_frame *bd = &bd_tmp[txq->q.read_ptr];
+       struct iwl3945_tfd *tfd_tmp = (struct iwl3945_tfd *)&txq->tfds39[0];
+       struct iwl3945_tfd *tfd = &tfd_tmp[txq->q.read_ptr];
        struct pci_dev *dev = priv->pci_dev;
        int i;
        int counter;
@@ -769,7 +770,7 @@ int iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl3945_tx_queue *txq)
                return 0;
 
        /* sanity check */
-       counter = TFD_CTL_COUNT_GET(le32_to_cpu(bd->control_flags));
+       counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
        if (counter > NUM_TFD_CHUNKS) {
                IWL_ERR(priv, "Too many chunks: %i\n", counter);
                /* @todo issue fatal error, it is quite serious situation */
@@ -779,8 +780,8 @@ int iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl3945_tx_queue *txq)
        /* unmap chunks if any */
 
        for (i = 1; i < counter; i++) {
-               pci_unmap_single(dev, le32_to_cpu(bd->pa[i].addr),
-                                le32_to_cpu(bd->pa[i].len), PCI_DMA_TODEVICE);
+               pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr),
+                        le32_to_cpu(tfd->tbs[i].len), PCI_DMA_TODEVICE);
                if (txq->txb[txq->q.read_ptr].skb[0]) {
                        struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[0];
                        if (txq->txb[txq->q.read_ptr].skb[0]) {
@@ -974,9 +975,7 @@ static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
        }
 
        iwl_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->dma_addr);
-       iwl_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0),
-                            priv->shared_phys +
-                            offsetof(struct iwl3945_shared, rx_read_ptr[0]));
+       iwl_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma);
        iwl_write_direct32(priv, FH39_RCSR_WPTR(0), 0);
        iwl_write_direct32(priv, FH39_RCSR_CONFIG(0),
                FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |
@@ -1062,7 +1061,7 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
        for (txq_id = 0; txq_id < TFD_QUEUE_MAX; txq_id++) {
                slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
                                TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
-               rc = iwl3945_tx_queue_init(priv, &priv->txq39[txq_id], slots_num,
+               rc = iwl3945_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
                                txq_id);
                if (rc) {
                        IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
@@ -1203,13 +1202,13 @@ int iwl3945_hw_nic_init(struct iwl_priv *priv)
 
        /* Allocate the RX queue, or reset if it is already allocated */
        if (!rxq->bd) {
-               rc = iwl3945_rx_queue_alloc(priv);
+               rc = iwl_rx_queue_alloc(priv);
                if (rc) {
                        IWL_ERR(priv, "Unable to initialize Rx queue\n");
                        return -ENOMEM;
                }
        } else
-               iwl3945_rx_queue_reset(priv, rxq);
+               iwl_rx_queue_reset(priv, rxq);
 
        iwl3945_rx_replenish(priv);
 
@@ -1219,7 +1218,7 @@ int iwl3945_hw_nic_init(struct iwl_priv *priv)
 
        /* Look at using this instead:
        rxq->need_update = 1;
-       iwl3945_rx_queue_update_write_ptr(priv, rxq);
+       iwl_rx_queue_update_write_ptr(priv, rxq);
        */
 
        rc = iwl_grab_nic_access(priv);
@@ -1252,7 +1251,7 @@ void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv)
 
        /* Tx queues */
        for (txq_id = 0; txq_id < TFD_QUEUE_MAX; txq_id++)
-               iwl3945_tx_queue_free(priv, &priv->txq39[txq_id]);
+               iwl3945_tx_queue_free(priv, &priv->txq[txq_id]);
 }
 
 void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
@@ -1323,7 +1322,7 @@ static void iwl3945_apm_stop(struct iwl_priv *priv)
        spin_unlock_irqrestore(&priv->lock, flags);
 }
 
-int iwl3945_apm_reset(struct iwl_priv *priv)
+static int iwl3945_apm_reset(struct iwl_priv *priv)
 {
        int rc;
        unsigned long flags;
@@ -2343,7 +2342,7 @@ int iwl3945_hw_rxq_stop(struct iwl_priv *priv)
        return 0;
 }
 
-int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl3945_tx_queue *txq)
+int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
 {
        int rc;
        unsigned long flags;
@@ -2377,11 +2376,17 @@ int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl3945_tx_queue *txq
        return 0;
 }
 
-int iwl3945_hw_get_rx_read(struct iwl_priv *priv)
+/*
+ * HCMD utils
+ */
+static u16 iwl3945_get_hcmd_size(u8 cmd_id, u16 len)
 {
-       struct iwl3945_shared *shared_data = priv->shared_virt;
-
-       return le32_to_cpu(shared_data->rx_read_ptr[0]);
+       switch (cmd_id) {
+       case REPLY_RXON:
+               return (u16) sizeof(struct iwl3945_rxon_cmd);
+       default:
+               return len;
+       }
 }
 
 /**
@@ -2487,7 +2492,8 @@ int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
        priv->hw_params.max_stations = IWL3945_STATION_COUNT;
        priv->hw_params.bcast_sta_id = IWL3945_BROADCAST_ID;
 
-       priv->hw_params.tx_ant_num = 2;
+       priv->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR;
+
        return 0;
 }
 
@@ -2701,8 +2707,13 @@ static struct iwl_lib_ops iwl3945_lib = {
        },
 };
 
+static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
+       .get_hcmd_size = iwl3945_get_hcmd_size,
+};
+
 static struct iwl_ops iwl3945_ops = {
        .lib = &iwl3945_lib,
+       .utils = &iwl3945_hcmd_utils,
 };
 
 static struct iwl_cfg iwl3945_bg_cfg = {
This page took 0.028536 seconds and 5 git commands to generate.