list_del(element);
/* Point to Rx buffer via next RBD in circular buffer */
- rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->dma_addr);
+ rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->aligned_dma_addr);
rxq->queue[rxq->write] = rxb;
rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
rxq->free_count--;
rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
/* Alloc a new receive buffer */
- rxb->skb = alloc_skb(priv->hw_params.rx_buf_size,
+ rxb->skb = alloc_skb(priv->hw_params.rx_buf_size + 256,
__GFP_NOWARN | GFP_ATOMIC);
if (!rxb->skb) {
if (net_ratelimit())
list_del(element);
/* Get physical address of RB/SKB */
- rxb->dma_addr =
- pci_map_single(priv->pci_dev, rxb->skb->data,
- priv->hw_params.rx_buf_size, PCI_DMA_FROMDEVICE);
+ rxb->real_dma_addr = pci_map_single(
+ priv->pci_dev,
+ rxb->skb->data,
+ priv->hw_params.rx_buf_size + 256,
+ PCI_DMA_FROMDEVICE);
+ /* dma address must be no more than 36 bits */
+ BUG_ON(rxb->real_dma_addr & ~DMA_BIT_MASK(36));
+ /* and also 256 byte aligned! */
+ rxb->aligned_dma_addr = ALIGN(rxb->real_dma_addr, 256);
+ skb_reserve(rxb->skb, rxb->aligned_dma_addr - rxb->real_dma_addr);
+
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
}
for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
if (rxq->pool[i].skb != NULL) {
pci_unmap_single(priv->pci_dev,
- rxq->pool[i].dma_addr,
- priv->hw_params.rx_buf_size,
+ rxq->pool[i].real_dma_addr,
+ priv->hw_params.rx_buf_size + 256,
PCI_DMA_FROMDEVICE);
dev_kfree_skb(rxq->pool[i].skb);
}
pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
rxq->dma_addr);
+ pci_free_consistent(priv->pci_dev, sizeof(struct iwl_rb_status),
+ rxq->rb_stts, rxq->rb_stts_dma);
rxq->bd = NULL;
+ rxq->rb_stts = NULL;
}
EXPORT_SYMBOL(iwl_rx_queue_free);
/* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
if (!rxq->bd)
- return -ENOMEM;
+ goto err_bd;
+
+ rxq->rb_stts = pci_alloc_consistent(dev, sizeof(struct iwl_rb_status),
+ &rxq->rb_stts_dma);
+ if (!rxq->rb_stts)
+ goto err_rb;
/* Fill the rx_used queue with _all_ of the Rx buffers */
for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
rxq->free_count = 0;
rxq->need_update = 0;
return 0;
+
+err_rb:
+ pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+ rxq->dma_addr);
+err_bd:
+ return -ENOMEM;
}
EXPORT_SYMBOL(iwl_rx_queue_alloc);
* to an SKB, so we need to unmap and free potential storage */
if (rxq->pool[i].skb != NULL) {
pci_unmap_single(priv->pci_dev,
- rxq->pool[i].dma_addr,
- priv->hw_params.rx_buf_size,
+ rxq->pool[i].real_dma_addr,
+ priv->hw_params.rx_buf_size + 256,
PCI_DMA_FROMDEVICE);
priv->alloc_rxb_skb--;
dev_kfree_skb(rxq->pool[i].skb);
/* Tell device where in DRAM to update its Rx status */
iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
- (priv->shared_phys + priv->rb_closed_offset) >> 4);
+ rxq->rb_stts_dma >> 4);
/* Enable Rx DMA
* FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in