net/mlx4_en: num cores tx rings for every UP
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx4 / en_tx.c
index 17968244c399509b169540388b8b1d971739eef1..019d856b1334fc4e7b389eb00d16da872d63ee4a 100644 (file)
@@ -67,8 +67,6 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
 
        inline_thold = min(inline_thold, MAX_INLINE);
 
-       spin_lock_init(&ring->comp_lock);
-
        tmp = size * sizeof(struct mlx4_en_tx_info);
        ring->tx_info = vmalloc(tmp);
        if (!ring->tx_info)
@@ -156,7 +154,7 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
 
 int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
                             struct mlx4_en_tx_ring *ring,
-                            int cq)
+                            int cq, int user_prio)
 {
        struct mlx4_en_dev *mdev = priv->mdev;
        int err;
@@ -174,7 +172,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
        ring->doorbell_qpn = ring->qp.qpn << 8;
 
        mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
-                               ring->cqn, &ring->context);
+                               ring->cqn, user_prio, &ring->context);
        if (ring->bf_enabled)
                ring->context.usr_page = cpu_to_be32(ring->bf.uar->index);
 
@@ -317,6 +315,8 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
        int size = cq->size;
        u32 size_mask = ring->size_mask;
        struct mlx4_cqe *buf = cq->buf;
+       u32 packets = 0;
+       u32 bytes = 0;
 
        if (!priv->port_up)
                return;
@@ -345,6 +345,8 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
                                        priv, ring, ring_index,
                                        !!((ring->cons + txbbs_skipped) &
                                                        ring->size));
+                       packets++;
+                       bytes += ring->tx_info[ring_index].nr_bytes;
                } while (ring_index != new_index);
 
                ++cons_index;
@@ -361,13 +363,14 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
        mlx4_cq_set_ci(mcq);
        wmb();
        ring->cons += txbbs_skipped;
+       netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
 
        /* Wakeup Tx queue if this ring stopped it */
        if (unlikely(ring->blocked)) {
                if ((u32) (ring->prod - ring->cons) <=
                     ring->size - HEADROOM - MAX_DESC_TXBBS) {
                        ring->blocked = 0;
-                       netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
+                       netif_tx_wake_queue(ring->tx_queue);
                        priv->port_stats.wake_queue++;
                }
        }
@@ -377,41 +380,12 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq)
 {
        struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
        struct mlx4_en_priv *priv = netdev_priv(cq->dev);
-       struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
 
-       if (!spin_trylock(&ring->comp_lock))
-               return;
        mlx4_en_process_tx_cq(cq->dev, cq);
-       mod_timer(&cq->timer, jiffies + 1);
-       spin_unlock(&ring->comp_lock);
+       mlx4_en_arm_cq(priv, cq);
 }
 
 
-void mlx4_en_poll_tx_cq(unsigned long data)
-{
-       struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data;
-       struct mlx4_en_priv *priv = netdev_priv(cq->dev);
-       struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
-       u32 inflight;
-
-       INC_PERF_COUNTER(priv->pstats.tx_poll);
-
-       if (!spin_trylock_irq(&ring->comp_lock)) {
-               mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
-               return;
-       }
-       mlx4_en_process_tx_cq(cq->dev, cq);
-       inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
-
-       /* If there are still packets in flight and the timer has not already
-        * been scheduled by the Tx routine then schedule it here to guarantee
-        * completion processing of these packets */
-       if (inflight && priv->port_up)
-               mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
-
-       spin_unlock_irq(&ring->comp_lock);
-}
-
 static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
                                                      struct mlx4_en_tx_ring *ring,
                                                      u32 index,
@@ -440,25 +414,6 @@ static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
        return ring->buf + index * TXBB_SIZE;
 }
 
-static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
-{
-       struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
-       struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
-       unsigned long flags;
-
-       /* If we don't have a pending timer, set one up to catch our recent
-          post in case the interface becomes idle */
-       if (!timer_pending(&cq->timer))
-               mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
-
-       /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
-       if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
-               if (spin_trylock_irqsave(&ring->comp_lock, flags)) {
-                       mlx4_en_process_tx_cq(priv->dev, cq);
-                       spin_unlock_irqrestore(&ring->comp_lock, flags);
-               }
-}
-
 static int is_inline(struct sk_buff *skb, void **pfrag)
 {
        void *ptr;
@@ -571,17 +526,16 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
 u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
-       u16 vlan_tag = 0;
+       u16 rings_p_up = priv->mdev->profile.num_tx_rings_p_up;
+       u8 up = 0;
 
-       /* If we support per priority flow control and the packet contains
-        * a vlan tag, send the packet to the TX ring assigned to that priority
-        */
-       if (priv->prof->rx_ppp && vlan_tx_tag_present(skb)) {
-               vlan_tag = vlan_tx_tag_get(skb);
-               return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13);
-       }
+       if (dev->num_tc)
+               return skb_tx_hash(dev, skb);
 
-       return skb_tx_hash(dev, skb);
+       if (vlan_tx_tag_present(skb))
+               up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT;
+
+       return __skb_tx_hash(dev, skb, rings_p_up) + up * rings_p_up;
 }
 
 static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt)
@@ -594,7 +548,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
        struct mlx4_en_tx_ring *ring;
-       struct mlx4_en_cq *cq;
        struct mlx4_en_tx_desc *tx_desc;
        struct mlx4_wqe_data_seg *data;
        struct skb_frag_struct *frag;
@@ -638,13 +591,10 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
        if (unlikely(((int)(ring->prod - ring->cons)) >
                     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
                /* every full Tx ring stops queue */
-               netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
+               netif_tx_stop_queue(ring->tx_queue);
                ring->blocked = 1;
                priv->port_stats.queue_stopped++;
 
-               /* Use interrupts to find out when queue opened */
-               cq = &priv->tx_cq[tx_ind];
-               mlx4_en_arm_cq(priv, cq);
                return NETDEV_TX_BUSY;
        }
 
@@ -707,7 +657,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
                priv->port_stats.tso_packets++;
                i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
                        !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size);
-               ring->bytes += skb->len + (i - 1) * lso_header_size;
+               tx_info->nr_bytes = skb->len + (i - 1) * lso_header_size;
                ring->packets += i;
        } else {
                /* Normal (Non LSO) packet */
@@ -715,10 +665,12 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
                        ((ring->prod & ring->size) ?
                         cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
                data = &tx_desc->data;
-               ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN);
+               tx_info->nr_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
                ring->packets++;
 
        }
+       ring->bytes += tx_info->nr_bytes;
+       netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes);
        AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
 
 
@@ -792,9 +744,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
                iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL);
        }
 
-       /* Poll CQ here */
-       mlx4_en_xmit_poll(priv, tx_ind);
-
        return NETDEV_TX_OK;
 
 tx_drop:
This page took 0.028041 seconds and 5 git commands to generate.