iwlwifi: update copyright year to 2010
[deliverable/linux.git] / drivers / net / wireless / iwlwifi / iwl-rx.c
index 493626bcd3ec53102f7d5d00c15cc5a7045588dd..eb45f8be0746bc66eec6476e0102464b65b007a9 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
@@ -140,6 +140,8 @@ int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
                reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
 
                if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+                       IWL_DEBUG_INFO(priv, "Rx queue requesting wakeup, GP1 = 0x%x\n",
+                                     reg);
                        iwl_set_bit(priv, CSR_GP_CNTRL,
                                    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
                        goto exit_unlock;
@@ -200,7 +202,7 @@ int iwl_rx_queue_restock(struct iwl_priv *priv)
                list_del(element);
 
                /* Point to Rx buffer via next RBD in circular buffer */
-               rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->aligned_dma_addr);
+               rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->page_dma);
                rxq->queue[rxq->write] = rxb;
                rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
                rxq->free_count--;
@@ -239,8 +241,9 @@ void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
        struct iwl_rx_queue *rxq = &priv->rxq;
        struct list_head *element;
        struct iwl_rx_mem_buffer *rxb;
-       struct sk_buff *skb;
+       struct page *page;
        unsigned long flags;
+       gfp_t gfp_mask = priority;
 
        while (1) {
                spin_lock_irqsave(&rxq->lock, flags);
@@ -251,30 +254,35 @@ void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
                spin_unlock_irqrestore(&rxq->lock, flags);
 
                if (rxq->free_count > RX_LOW_WATERMARK)
-                       priority |= __GFP_NOWARN;
-               /* Alloc a new receive buffer */
-               skb = alloc_skb(priv->hw_params.rx_buf_size + 256,
-                                               priority);
+                       gfp_mask |= __GFP_NOWARN;
+
+               if (priv->hw_params.rx_page_order > 0)
+                       gfp_mask |= __GFP_COMP;
 
-               if (!skb) {
+               /* Alloc a new receive buffer */
+               page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
+               if (!page) {
                        if (net_ratelimit())
-                               IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
+                               IWL_DEBUG_INFO(priv, "alloc_pages failed, "
+                                              "order: %d\n",
+                                              priv->hw_params.rx_page_order);
+
                        if ((rxq->free_count <= RX_LOW_WATERMARK) &&
                            net_ratelimit())
-                               IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n",
+                               IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n",
                                         priority == GFP_ATOMIC ?  "GFP_ATOMIC" : "GFP_KERNEL",
                                         rxq->free_count);
                        /* We don't reschedule replenish work here -- we will
                         * call the restock method and if it still needs
                         * more buffers it will schedule replenish */
-                       break;
+                       return;
                }
 
                spin_lock_irqsave(&rxq->lock, flags);
 
                if (list_empty(&rxq->rx_used)) {
                        spin_unlock_irqrestore(&rxq->lock, flags);
-                       dev_kfree_skb_any(skb);
+                       __free_pages(page, priv->hw_params.rx_page_order);
                        return;
                }
                element = rxq->rx_used.next;
@@ -283,24 +291,21 @@ void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
 
                spin_unlock_irqrestore(&rxq->lock, flags);
 
-               rxb->skb = skb;
-               /* Get physical address of RB/SKB */
-               rxb->real_dma_addr = pci_map_single(
-                                       priv->pci_dev,
-                                       rxb->skb->data,
-                                       priv->hw_params.rx_buf_size + 256,
-                                       PCI_DMA_FROMDEVICE);
+               rxb->page = page;
+               /* Get physical address of the RB */
+               rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
+                               PAGE_SIZE << priv->hw_params.rx_page_order,
+                               PCI_DMA_FROMDEVICE);
                /* dma address must be no more than 36 bits */
-               BUG_ON(rxb->real_dma_addr & ~DMA_BIT_MASK(36));
+               BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
                /* and also 256 byte aligned! */
-               rxb->aligned_dma_addr = ALIGN(rxb->real_dma_addr, 256);
-               skb_reserve(rxb->skb, rxb->aligned_dma_addr - rxb->real_dma_addr);
+               BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
 
                spin_lock_irqsave(&rxq->lock, flags);
 
                list_add_tail(&rxb->list, &rxq->rx_free);
                rxq->free_count++;
-               priv->alloc_rxb_skb++;
+               priv->alloc_rxb_page++;
 
                spin_unlock_irqrestore(&rxq->lock, flags);
        }
@@ -336,12 +341,12 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
 {
        int i;
        for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
-               if (rxq->pool[i].skb != NULL) {
-                       pci_unmap_single(priv->pci_dev,
-                                        rxq->pool[i].real_dma_addr,
-                                        priv->hw_params.rx_buf_size + 256,
-                                        PCI_DMA_FROMDEVICE);
-                       dev_kfree_skb(rxq->pool[i].skb);
+               if (rxq->pool[i].page != NULL) {
+                       pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
+                               PAGE_SIZE << priv->hw_params.rx_page_order,
+                               PCI_DMA_FROMDEVICE);
+                       __iwl_free_pages(priv, rxq->pool[i].page);
+                       rxq->pool[i].page = NULL;
                }
        }
 
@@ -405,14 +410,12 @@ void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
        for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
                /* In the reset function, these buffers may have been allocated
                 * to an SKB, so we need to unmap and free potential storage */
-               if (rxq->pool[i].skb != NULL) {
-                       pci_unmap_single(priv->pci_dev,
-                                        rxq->pool[i].real_dma_addr,
-                                        priv->hw_params.rx_buf_size + 256,
-                                        PCI_DMA_FROMDEVICE);
-                       priv->alloc_rxb_skb--;
-                       dev_kfree_skb(rxq->pool[i].skb);
-                       rxq->pool[i].skb = NULL;
+               if (rxq->pool[i].page != NULL) {
+                       pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
+                               PAGE_SIZE << priv->hw_params.rx_page_order,
+                               PCI_DMA_FROMDEVICE);
+                       __iwl_free_pages(priv, rxq->pool[i].page);
+                       rxq->pool[i].page = NULL;
                }
                list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
        }
@@ -470,7 +473,8 @@ int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
                           (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
                           (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
 
-       iwl_write32(priv, CSR_INT_COALESCING, 0x40);
+       /* Set interrupt coalescing timer to default (2048 usecs) */
+       iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
 
        return 0;
 }
@@ -491,7 +495,7 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
                                struct iwl_rx_mem_buffer *rxb)
 
 {
-       struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_missed_beacon_notif *missed_beacon;
 
        missed_beacon = &pkt->u.missed_beacon;
@@ -548,13 +552,60 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
                        priv->last_rx_noise);
 }
 
+#ifdef CONFIG_IWLWIFI_DEBUG
+/*
+ *  based on the assumption of all statistics counter are in DWORD
+ *  FIXME: This function is for debugging, do not deal with
+ *  the case of counters roll-over.
+ */
+static void iwl_accumulative_statistics(struct iwl_priv *priv,
+                                       __le32 *stats)
+{
+       int i;
+       __le32 *prev_stats;
+       u32 *accum_stats;
+       u32 *delta, *max_delta;
+
+       prev_stats = (__le32 *)&priv->statistics;
+       accum_stats = (u32 *)&priv->accum_statistics;
+       delta = (u32 *)&priv->delta_statistics;
+       max_delta = (u32 *)&priv->max_delta;
+
+       for (i = sizeof(__le32); i < sizeof(struct iwl_notif_statistics);
+            i += sizeof(__le32), stats++, prev_stats++, delta++,
+            max_delta++, accum_stats++) {
+               if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
+                       *delta = (le32_to_cpu(*stats) -
+                               le32_to_cpu(*prev_stats));
+                       *accum_stats += *delta;
+                       if (*delta > *max_delta)
+                               *max_delta = *delta;
+               }
+       }
+
+       /* reset accumulative statistics for "no-counter" type statistics */
+       priv->accum_statistics.general.temperature =
+               priv->statistics.general.temperature;
+       priv->accum_statistics.general.temperature_m =
+               priv->statistics.general.temperature_m;
+       priv->accum_statistics.general.ttl_timestamp =
+               priv->statistics.general.ttl_timestamp;
+       priv->accum_statistics.tx.tx_power.ant_a =
+               priv->statistics.tx.tx_power.ant_a;
+       priv->accum_statistics.tx.tx_power.ant_b =
+               priv->statistics.tx.tx_power.ant_b;
+       priv->accum_statistics.tx.tx_power.ant_c =
+               priv->statistics.tx.tx_power.ant_c;
+}
+#endif
+
 #define REG_RECALIB_PERIOD (60)
 
 void iwl_rx_statistics(struct iwl_priv *priv,
                              struct iwl_rx_mem_buffer *rxb)
 {
        int change;
-       struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
 
        IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
                     (int)sizeof(priv->statistics),
@@ -566,6 +617,9 @@ void iwl_rx_statistics(struct iwl_priv *priv,
                    STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
                   (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
 
+#ifdef CONFIG_IWLWIFI_DEBUG
+       iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
+#endif
        memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
 
        set_bit(STATUS_STATISTICS, &priv->status);
@@ -582,54 +636,30 @@ void iwl_rx_statistics(struct iwl_priv *priv,
                iwl_rx_calc_noise(priv);
                queue_work(priv->workqueue, &priv->run_time_calib_work);
        }
-
-       iwl_leds_background(priv);
-
        if (priv->cfg->ops->lib->temp_ops.temperature && change)
                priv->cfg->ops->lib->temp_ops.temperature(priv);
 }
 EXPORT_SYMBOL(iwl_rx_statistics);
 
-#define PERFECT_RSSI (-20) /* dBm */
-#define WORST_RSSI (-95)   /* dBm */
-#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
-
-/* Calculate an indication of rx signal quality (a percentage, not dBm!).
- * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
- *   about formulas used below. */
-static int iwl_calc_sig_qual(int rssi_dbm, int noise_dbm)
+void iwl_reply_statistics(struct iwl_priv *priv,
+                             struct iwl_rx_mem_buffer *rxb)
 {
-       int sig_qual;
-       int degradation = PERFECT_RSSI - rssi_dbm;
-
-       /* If we get a noise measurement, use signal-to-noise ratio (SNR)
-        * as indicator; formula is (signal dbm - noise dbm).
-        * SNR at or above 40 is a great signal (100%).
-        * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
-        * Weakest usable signal is usually 10 - 15 dB SNR. */
-       if (noise_dbm) {
-               if (rssi_dbm - noise_dbm >= 40)
-                       return 100;
-               else if (rssi_dbm < noise_dbm)
-                       return 0;
-               sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
-
-       /* Else use just the signal level.
-        * This formula is a least squares fit of data points collected and
-        *   compared with a reference system that had a percentage (%) display
-        *   for signal quality. */
-       } else
-               sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
-                           (15 * RSSI_RANGE + 62 * degradation)) /
-                          (RSSI_RANGE * RSSI_RANGE);
-
-       if (sig_qual > 100)
-               sig_qual = 100;
-       else if (sig_qual < 1)
-               sig_qual = 0;
-
-       return sig_qual;
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+       if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
+#ifdef CONFIG_IWLWIFI_DEBUG
+               memset(&priv->accum_statistics, 0,
+                       sizeof(struct iwl_notif_statistics));
+               memset(&priv->delta_statistics, 0,
+                       sizeof(struct iwl_notif_statistics));
+               memset(&priv->max_delta, 0,
+                       sizeof(struct iwl_notif_statistics));
+#endif
+               IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
+       }
+       iwl_rx_statistics(priv, rxb);
 }
+EXPORT_SYMBOL(iwl_reply_statistics);
 
 /* Calc max signal level (dBm) among 3 possible receivers */
 static inline int iwl_calc_rssi(struct iwl_priv *priv,
@@ -878,6 +908,10 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
                                        struct iwl_rx_mem_buffer *rxb,
                                        struct ieee80211_rx_status *stats)
 {
+       struct sk_buff *skb;
+       int ret = 0;
+       __le16 fc = hdr->frame_control;
+
        /* We only process data packets if the interface is open */
        if (unlikely(!priv->is_open)) {
                IWL_DEBUG_DROP_LIMIT(priv,
@@ -890,15 +924,44 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
            iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
                return;
 
-       /* Resize SKB from mac header to end of packet */
-       skb_reserve(rxb->skb, (void *)hdr - (void *)rxb->skb->data);
-       skb_put(rxb->skb, len);
+       skb = alloc_skb(IWL_LINK_HDR_MAX * 2, GFP_ATOMIC);
+       if (!skb) {
+               IWL_ERR(priv, "alloc_skb failed\n");
+               return;
+       }
+
+       skb_reserve(skb, IWL_LINK_HDR_MAX);
+       skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
+
+       /* mac80211 currently doesn't support paged SKB. Convert it to
+        * linear SKB for management frame and data frame requires
+        * software decryption or software defragementation. */
+       if (ieee80211_is_mgmt(fc) ||
+           ieee80211_has_protected(fc) ||
+           ieee80211_has_morefrags(fc) ||
+           le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)
+               ret = skb_linearize(skb);
+       else
+               ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
+                        0 : -ENOMEM;
+
+       if (ret) {
+               kfree_skb(skb);
+               goto out;
+       }
+
+       /*
+        * XXX: We cannot touch the page and its virtual memory (hdr) after
+        * here. It might have already been freed by the above skb change.
+        */
 
-       iwl_update_stats(priv, false, hdr->frame_control, len);
-       memcpy(IEEE80211_SKB_RXCB(rxb->skb), stats, sizeof(*stats));
-       ieee80211_rx_irqsafe(priv->hw, rxb->skb);
-       priv->alloc_rxb_skb--;
-       rxb->skb = NULL;
+       iwl_update_stats(priv, false, fc, len);
+       memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
+
+       ieee80211_rx(priv->hw, skb);
+ out:
+       priv->alloc_rxb_page--;
+       rxb->page = NULL;
 }
 
 /* This is necessary only for a number of statistics, see the caller. */
@@ -926,13 +989,12 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
 {
        struct ieee80211_hdr *header;
        struct ieee80211_rx_status rx_status;
-       struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_rx_phy_res *phy_res;
        __le32 rx_pkt_status;
        struct iwl4965_rx_mpdu_res_start *amsdu;
        u32 len;
        u32 ampdu_status;
-       u16 fc;
        u32 rate_n_flags;
 
        /**
@@ -1009,11 +1071,8 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
        if (iwl_is_associated(priv) &&
            !test_bit(STATUS_SCANNING, &priv->status)) {
                rx_status.noise = priv->last_rx_noise;
-               rx_status.qual = iwl_calc_sig_qual(rx_status.signal,
-                                                        rx_status.noise);
        } else {
                rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
-               rx_status.qual = iwl_calc_sig_qual(rx_status.signal, 0);
        }
 
        /* Reset beacon noise level if not associated. */
@@ -1026,8 +1085,8 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
                iwl_dbg_report_frame(priv, phy_res, len, header, 1);
 #endif
        iwl_dbg_log_rx_data_frame(priv, len, header);
-       IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, noise %d, qual %d, TSF %llu\n",
-               rx_status.signal, rx_status.noise, rx_status.qual,
+       IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, noise %d, TSF %llu\n",
+               rx_status.signal, rx_status.noise,
                (unsigned long long)rx_status.mactime);
 
        /*
@@ -1065,20 +1124,8 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
                priv->last_tsf = le64_to_cpu(phy_res->timestamp);
        }
 
-       fc = le16_to_cpu(header->frame_control);
-       switch (fc & IEEE80211_FCTL_FTYPE) {
-       case IEEE80211_FTYPE_MGMT:
-       case IEEE80211_FTYPE_DATA:
-               if (priv->iw_mode == NL80211_IFTYPE_AP)
-                       iwl_update_ps_mode(priv, fc  & IEEE80211_FCTL_PM,
-                                               header->addr2);
-               /* fall through */
-       default:
-               iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status,
-                               rxb, &rx_status);
-               break;
-
-       }
+       iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status,
+                                   rxb, &rx_status);
 }
 EXPORT_SYMBOL(iwl_rx_reply_rx);
 
@@ -1087,7 +1134,7 @@ EXPORT_SYMBOL(iwl_rx_reply_rx);
 void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
                                    struct iwl_rx_mem_buffer *rxb)
 {
-       struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
        priv->last_phy_res[0] = 1;
        memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
               sizeof(struct iwl_rx_phy_res));
This page took 0.029089 seconds and 5 git commands to generate.