Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 3 May 2016 22:07:50 +0000 (15:07 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 3 May 2016 22:07:50 +0000 (15:07 -0700)
Pull networking fixes from David Miller:
 "Some straggler bug fixes:

   1) Batman-adv DAT must consider VLAN IDs when choosing candidate
      nodes, from Antonio Quartulli.

   2) Fix botched reference counting of vlan objects and neigh nodes in
      batman-adv, from Sven Eckelmann.

   3) netem can crash when it sees GSO packets, the fix is to segment
      then upon ->enqueue.  Fix from Neil Horman with help from Eric
      Dumazet.

   4) Fix VXLAN dependencies in mlx5 driver Kconfig, from Matthew
      Finlay.

   5) Handle VXLAN ops outside of rcu lock, via a workqueue, in mlx5,
      since it can sleep.  Fix also from Matthew Finlay.

   6) Check mdiobus_scan() return values properly in pxa168_eth and macb
      drivers.  From Sergei Shtylyov.

   7) If the netdevice doesn't support checksumming, disable
      segmentation.  From Alexandery Duyck.

   8) Fix races between RDS tcp accept and sending, from Sowmini
      Varadhan.

   9) In macb driver, probe MDIO bus before we register the netdev,
      otherwise we can try to open the device before it is really ready
      for that.  Fix from Florian Fainelli.

  10) Netlink attribute size for ILA "tunnels" not calculated properly,
      fix from Nicolas Dichtel"

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
  ipv6/ila: fix nlsize calculation for lwtunnel
  net: macb: Probe MDIO bus before registering netdev
  RDS: TCP: Synchronize accept() and connect() paths on t_conn_lock.
  RDS:TCP: Synchronize rds_tcp_accept_one with rds_send_xmit when resetting t_sock
  vxlan: Add checksum check to the features check function
  net: Disable segmentation if checksumming is not supported
  net: mvneta: Remove superfluous SMP function call
  macb: fix mdiobus_scan() error check
  pxa168_eth: fix mdiobus_scan() error check
  net/mlx5e: Use workqueue for vxlan ops
  net/mlx5e: Implement a mlx5e workqueue
  net/mlx5: Kconfig: Fix MLX5_EN/VXLAN build issue
  net/mlx5: Unmap only the relevant IO memory mapping
  netem: Segment GSO packets on enqueue
  batman-adv: Fix reference counting of hardif_neigh_node object for neigh_node
  batman-adv: Fix reference counting of vlan object for tt_local_entry
  batman-adv: B.A.T.M.A.N V - make sure iface is reactivated upon NETDEV_UP event
  batman-adv: fix DAT candidate selection (must use vid)

24 files changed:
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/pxa168_eth.c
drivers/net/ethernet/mellanox/mlx5/core/Kconfig
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/uar.c
drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
include/linux/if_ether.h
include/net/vxlan.h
net/batman-adv/bat_v.c
net/batman-adv/distributed-arp-table.c
net/batman-adv/hard-interface.c
net/batman-adv/originator.c
net/batman-adv/translation-table.c
net/batman-adv/types.h
net/core/dev.c
net/ipv6/ila/ila_lwt.c
net/rds/tcp.c
net/rds/tcp.h
net/rds/tcp_connect.c
net/rds/tcp_listen.c
net/sched/sch_netem.c

index 48a7d7dee8461117b3f47bcbdebe4c0745385bf7..a63551d0a18a4b63a5ea385a46fabef39b0d36a8 100644 (file)
@@ -441,7 +441,7 @@ static int macb_mii_init(struct macb *bp)
        snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
                bp->pdev->name, bp->pdev->id);
        bp->mii_bus->priv = bp;
-       bp->mii_bus->parent = &bp->dev->dev;
+       bp->mii_bus->parent = &bp->pdev->dev;
        pdata = dev_get_platdata(&bp->pdev->dev);
 
        dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
@@ -458,7 +458,8 @@ static int macb_mii_init(struct macb *bp)
                                struct phy_device *phydev;
 
                                phydev = mdiobus_scan(bp->mii_bus, i);
-                               if (IS_ERR(phydev)) {
+                               if (IS_ERR(phydev) &&
+                                   PTR_ERR(phydev) != -ENODEV) {
                                        err = PTR_ERR(phydev);
                                        break;
                                }
@@ -3019,29 +3020,36 @@ static int macb_probe(struct platform_device *pdev)
        if (err)
                goto err_out_free_netdev;
 
+       err = macb_mii_init(bp);
+       if (err)
+               goto err_out_free_netdev;
+
+       phydev = bp->phy_dev;
+
+       netif_carrier_off(dev);
+
        err = register_netdev(dev);
        if (err) {
                dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
-               goto err_out_unregister_netdev;
+               goto err_out_unregister_mdio;
        }
 
-       err = macb_mii_init(bp);
-       if (err)
-               goto err_out_unregister_netdev;
-
-       netif_carrier_off(dev);
+       phy_attached_info(phydev);
 
        netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
                    macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
                    dev->base_addr, dev->irq, dev->dev_addr);
 
-       phydev = bp->phy_dev;
-       phy_attached_info(phydev);
-
        return 0;
 
-err_out_unregister_netdev:
-       unregister_netdev(dev);
+err_out_unregister_mdio:
+       phy_disconnect(bp->phy_dev);
+       mdiobus_unregister(bp->mii_bus);
+       mdiobus_free(bp->mii_bus);
+
+       /* Shutdown the PHY if there is a GPIO reset */
+       if (bp->reset_gpio)
+               gpiod_set_value(bp->reset_gpio, 0);
 
 err_out_free_netdev:
        free_netdev(dev);
index 7fc490225da507de93c66c1b55eba3f0e5295e58..a6d26d351dfc47c777b39c04a44c2f17bca0feab 100644 (file)
@@ -3354,8 +3354,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
                /* Enable per-CPU interrupts on the CPU that is
                 * brought up.
                 */
-               smp_call_function_single(cpu, mvneta_percpu_enable,
-                                        pp, true);
+               mvneta_percpu_enable(pp);
 
                /* Enable per-CPU interrupt on the one CPU we care
                 * about.
@@ -3387,8 +3386,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
                /* Disable per-CPU interrupts on the CPU that is
                 * brought down.
                 */
-               smp_call_function_single(cpu, mvneta_percpu_disable,
-                                        pp, true);
+               mvneta_percpu_disable(pp);
 
                break;
        case CPU_DEAD:
index 7ace07dad6a31d4b18ab5aa1e5335c0727cff596..c442f6ad15fff806af5f747e70d085766dd37ae9 100644 (file)
@@ -979,6 +979,8 @@ static int pxa168_init_phy(struct net_device *dev)
                return 0;
 
        pep->phy = mdiobus_scan(pep->smi_bus, pep->phy_addr);
+       if (IS_ERR(pep->phy))
+               return PTR_ERR(pep->phy);
        if (!pep->phy)
                return -ENODEV;
 
index 1cf722eba607d82e8e34ff498e1b154f8fc8cc5d..559d11a443bc4973aae435e611f45ee014cf41fb 100644 (file)
@@ -14,6 +14,7 @@ config MLX5_CORE_EN
        bool "Mellanox Technologies ConnectX-4 Ethernet support"
        depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
        select PTP_1588_CLOCK
+       select VXLAN if MLX5_CORE=y
        default n
        ---help---
          Ethernet support in Mellanox Technologies ConnectX-4 NIC.
index e80ce94b5dcf0e139b629732da3658892ca8590d..3881dce0cc30b6829c8a850ce9ff887024d665a9 100644 (file)
@@ -567,6 +567,7 @@ struct mlx5e_priv {
        struct mlx5e_vxlan_db      vxlan;
 
        struct mlx5e_params        params;
+       struct workqueue_struct    *wq;
        struct work_struct         update_carrier_work;
        struct work_struct         set_rx_mode_work;
        struct delayed_work        update_stats_work;
index 67d548b70e142a376d0fc802a4576ee965f92520..d4dfc5ce516a41daf2f668aa3015c37ff4b73853 100644 (file)
@@ -262,9 +262,8 @@ static void mlx5e_update_stats_work(struct work_struct *work)
        mutex_lock(&priv->state_lock);
        if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
                mlx5e_update_stats(priv);
-               schedule_delayed_work(dwork,
-                                     msecs_to_jiffies(
-                                             MLX5E_UPDATE_STATS_INTERVAL));
+               queue_delayed_work(priv->wq, dwork,
+                                  msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
        }
        mutex_unlock(&priv->state_lock);
 }
@@ -280,7 +279,7 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
        switch (event) {
        case MLX5_DEV_EVENT_PORT_UP:
        case MLX5_DEV_EVENT_PORT_DOWN:
-               schedule_work(&priv->update_carrier_work);
+               queue_work(priv->wq, &priv->update_carrier_work);
                break;
 
        default:
@@ -1505,7 +1504,7 @@ int mlx5e_open_locked(struct net_device *netdev)
        mlx5e_update_carrier(priv);
        mlx5e_timestamp_init(priv);
 
-       schedule_delayed_work(&priv->update_stats_work, 0);
+       queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
 
        return 0;
 
@@ -1961,7 +1960,7 @@ static void mlx5e_set_rx_mode(struct net_device *dev)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
 
-       schedule_work(&priv->set_rx_mode_work);
+       queue_work(priv->wq, &priv->set_rx_mode_work);
 }
 
 static int mlx5e_set_mac(struct net_device *netdev, void *addr)
@@ -1976,7 +1975,7 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr)
        ether_addr_copy(netdev->dev_addr, saddr->sa_data);
        netif_addr_unlock_bh(netdev);
 
-       schedule_work(&priv->set_rx_mode_work);
+       queue_work(priv->wq, &priv->set_rx_mode_work);
 
        return 0;
 }
@@ -2158,7 +2157,7 @@ static void mlx5e_add_vxlan_port(struct net_device *netdev,
        if (!mlx5e_vxlan_allowed(priv->mdev))
                return;
 
-       mlx5e_vxlan_add_port(priv, be16_to_cpu(port));
+       mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 1);
 }
 
 static void mlx5e_del_vxlan_port(struct net_device *netdev,
@@ -2169,7 +2168,7 @@ static void mlx5e_del_vxlan_port(struct net_device *netdev,
        if (!mlx5e_vxlan_allowed(priv->mdev))
                return;
 
-       mlx5e_vxlan_del_port(priv, be16_to_cpu(port));
+       mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 0);
 }
 
 static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
@@ -2498,10 +2497,14 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
 
        priv = netdev_priv(netdev);
 
+       priv->wq = create_singlethread_workqueue("mlx5e");
+       if (!priv->wq)
+               goto err_free_netdev;
+
        err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false);
        if (err) {
                mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
-               goto err_free_netdev;
+               goto err_destroy_wq;
        }
 
        err = mlx5_core_alloc_pd(mdev, &priv->pdn);
@@ -2580,7 +2583,7 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
                vxlan_get_rx_port(netdev);
 
        mlx5e_enable_async_events(priv);
-       schedule_work(&priv->set_rx_mode_work);
+       queue_work(priv->wq, &priv->set_rx_mode_work);
 
        return priv;
 
@@ -2617,6 +2620,9 @@ err_dealloc_pd:
 err_unmap_free_uar:
        mlx5_unmap_free_uar(mdev, &priv->cq_uar);
 
+err_destroy_wq:
+       destroy_workqueue(priv->wq);
+
 err_free_netdev:
        free_netdev(netdev);
 
@@ -2630,9 +2636,9 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
 
        set_bit(MLX5E_STATE_DESTROYING, &priv->state);
 
-       schedule_work(&priv->set_rx_mode_work);
+       queue_work(priv->wq, &priv->set_rx_mode_work);
        mlx5e_disable_async_events(priv);
-       flush_scheduled_work();
+       flush_workqueue(priv->wq);
        if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
                netif_device_detach(netdev);
                mutex_lock(&priv->state_lock);
@@ -2655,6 +2661,8 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
        mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn);
        mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
        mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
+       cancel_delayed_work_sync(&priv->update_stats_work);
+       destroy_workqueue(priv->wq);
 
        if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state))
                free_netdev(netdev);
index 8ba080e441a1863e63d4188820afbd2f1c39bd6e..5ff8af472bf5221e736e5369b4de73bfe996b0e9 100644 (file)
@@ -269,8 +269,10 @@ EXPORT_SYMBOL(mlx5_alloc_map_uar);
 
 void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
 {
-       iounmap(uar->map);
-       iounmap(uar->bf_map);
+       if (uar->map)
+               iounmap(uar->map);
+       else
+               iounmap(uar->bf_map);
        mlx5_cmd_free_uar(mdev, uar->index);
 }
 EXPORT_SYMBOL(mlx5_unmap_free_uar);
index 9f10df25f3cd52d9f631fd1e0963483eb1e7073a..f2fd1ef16da7eba68deb5af1648b7ff28cf7ff44 100644 (file)
@@ -95,21 +95,22 @@ struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
        return vxlan;
 }
 
-int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port)
+static void mlx5e_vxlan_add_port(struct work_struct *work)
 {
+       struct mlx5e_vxlan_work *vxlan_work =
+               container_of(work, struct mlx5e_vxlan_work, work);
+       struct mlx5e_priv *priv = vxlan_work->priv;
        struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
+       u16 port = vxlan_work->port;
        struct mlx5e_vxlan *vxlan;
        int err;
 
-       err = mlx5e_vxlan_core_add_port_cmd(priv->mdev, port);
-       if (err)
-               return err;
+       if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
+               goto free_work;
 
        vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL);
-       if (!vxlan) {
-               err = -ENOMEM;
+       if (!vxlan)
                goto err_delete_port;
-       }
 
        vxlan->udp_port = port;
 
@@ -119,13 +120,14 @@ int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port)
        if (err)
                goto err_free;
 
-       return 0;
+       goto free_work;
 
 err_free:
        kfree(vxlan);
 err_delete_port:
        mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
-       return err;
+free_work:
+       kfree(vxlan_work);
 }
 
 static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
@@ -145,12 +147,36 @@ static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
        kfree(vxlan);
 }
 
-void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port)
+static void mlx5e_vxlan_del_port(struct work_struct *work)
 {
-       if (!mlx5e_vxlan_lookup_port(priv, port))
-               return;
+       struct mlx5e_vxlan_work *vxlan_work =
+               container_of(work, struct mlx5e_vxlan_work, work);
+       struct mlx5e_priv *priv = vxlan_work->priv;
+       u16 port = vxlan_work->port;
 
        __mlx5e_vxlan_core_del_port(priv, port);
+
+       kfree(vxlan_work);
+}
+
+void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
+                           u16 port, int add)
+{
+       struct mlx5e_vxlan_work *vxlan_work;
+
+       vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC);
+       if (!vxlan_work)
+               return;
+
+       if (add)
+               INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_port);
+       else
+               INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_port);
+
+       vxlan_work->priv = priv;
+       vxlan_work->port = port;
+       vxlan_work->sa_family = sa_family;
+       queue_work(priv->wq, &vxlan_work->work);
 }
 
 void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)
index a01685056ab156201c1945607e34bb7b53771ee3..129f3527aa147ca8cf02b7df2128b7cfeaadfecf 100644 (file)
@@ -39,6 +39,13 @@ struct mlx5e_vxlan {
        u16 udp_port;
 };
 
+struct mlx5e_vxlan_work {
+       struct work_struct      work;
+       struct mlx5e_priv       *priv;
+       sa_family_t             sa_family;
+       u16                     port;
+};
+
 static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev)
 {
        return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) &&
@@ -46,8 +53,8 @@ static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev)
 }
 
 void mlx5e_vxlan_init(struct mlx5e_priv *priv);
-int  mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port);
-void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port);
+void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
+                           u16 port, int add);
 struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port);
 void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv);
 
index d5569734f6724d6194497fe9842e9aac56ef0b09..548fd535fd02399634bace0607471d07b973d8dc 100644 (file)
@@ -28,6 +28,11 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb)
        return (struct ethhdr *)skb_mac_header(skb);
 }
 
+static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb)
+{
+       return (struct ethhdr *)skb_inner_mac_header(skb);
+}
+
 int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
 
 extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len);
index 73ed2e951c020d28c21457d907c55764ea7b3d88..35437c779da8d6d6d113ed0deedbd39428852238 100644 (file)
@@ -252,7 +252,9 @@ static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
            (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
             skb->inner_protocol != htons(ETH_P_TEB) ||
             (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
-             sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
+             sizeof(struct udphdr) + sizeof(struct vxlanhdr)) ||
+            (skb->ip_summed != CHECKSUM_NONE &&
+             !can_checksum_protocol(features, inner_eth_hdr(skb)->h_proto))))
                return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
 
        return features;
index 3315b9a598af0f71a95a080d450cf1faf3a70a1d..4026f198a7345c91a34ef1c8aed6f750e7b27d3e 100644 (file)
 
 #include "bat_v_elp.h"
 #include "bat_v_ogm.h"
+#include "hard-interface.h"
 #include "hash.h"
 #include "originator.h"
 #include "packet.h"
 
+static void batadv_v_iface_activate(struct batadv_hard_iface *hard_iface)
+{
+       /* B.A.T.M.A.N. V does not use any queuing mechanism, therefore it can
+        * set the interface as ACTIVE right away, without any risk of race
+        * condition
+        */
+       if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
+               hard_iface->if_status = BATADV_IF_ACTIVE;
+}
+
 static int batadv_v_iface_enable(struct batadv_hard_iface *hard_iface)
 {
        int ret;
@@ -274,6 +285,7 @@ static bool batadv_v_neigh_is_sob(struct batadv_neigh_node *neigh1,
 
 static struct batadv_algo_ops batadv_batman_v __read_mostly = {
        .name = "BATMAN_V",
+       .bat_iface_activate = batadv_v_iface_activate,
        .bat_iface_enable = batadv_v_iface_enable,
        .bat_iface_disable = batadv_v_iface_disable,
        .bat_iface_update_mac = batadv_v_iface_update_mac,
index e96d7c745b4a1de6444284f38207ed599925ac2e..3e6b2624f9809365ac29da7f88a3895af3df8728 100644 (file)
@@ -568,6 +568,7 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
  * be sent to
  * @bat_priv: the bat priv with all the soft interface information
  * @ip_dst: ipv4 to look up in the DHT
+ * @vid: VLAN identifier
  *
  * An originator O is selected if and only if its DHT_ID value is one of three
  * closest values (from the LEFT, with wrap around if needed) then the hash
@@ -576,7 +577,8 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
  * Return: the candidate array of size BATADV_DAT_CANDIDATE_NUM.
  */
 static struct batadv_dat_candidate *
-batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
+batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst,
+                            unsigned short vid)
 {
        int select;
        batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key;
@@ -592,7 +594,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
                return NULL;
 
        dat.ip = ip_dst;
-       dat.vid = 0;
+       dat.vid = vid;
        ip_key = (batadv_dat_addr_t)batadv_hash_dat(&dat,
                                                    BATADV_DAT_ADDR_MAX);
 
@@ -612,6 +614,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: payload to send
  * @ip: the DHT key
+ * @vid: VLAN identifier
  * @packet_subtype: unicast4addr packet subtype to use
  *
  * This function copies the skb with pskb_copy() and is sent as unicast packet
@@ -622,7 +625,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
  */
 static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
                                 struct sk_buff *skb, __be32 ip,
-                                int packet_subtype)
+                                unsigned short vid, int packet_subtype)
 {
        int i;
        bool ret = false;
@@ -631,7 +634,7 @@ static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
        struct sk_buff *tmp_skb;
        struct batadv_dat_candidate *cand;
 
-       cand = batadv_dat_select_candidates(bat_priv, ip);
+       cand = batadv_dat_select_candidates(bat_priv, ip, vid);
        if (!cand)
                goto out;
 
@@ -1022,7 +1025,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
                ret = true;
        } else {
                /* Send the request to the DHT */
-               ret = batadv_dat_send_data(bat_priv, skb, ip_dst,
+               ret = batadv_dat_send_data(bat_priv, skb, ip_dst, vid,
                                           BATADV_P_DAT_DHT_GET);
        }
 out:
@@ -1150,8 +1153,8 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
        /* Send the ARP reply to the candidates for both the IP addresses that
         * the node obtained from the ARP reply
         */
-       batadv_dat_send_data(bat_priv, skb, ip_src, BATADV_P_DAT_DHT_PUT);
-       batadv_dat_send_data(bat_priv, skb, ip_dst, BATADV_P_DAT_DHT_PUT);
+       batadv_dat_send_data(bat_priv, skb, ip_src, vid, BATADV_P_DAT_DHT_PUT);
+       batadv_dat_send_data(bat_priv, skb, ip_dst, vid, BATADV_P_DAT_DHT_PUT);
 }
 
 /**
index c61d5b0b24d27be7d9d962cb4b15de890fed1803..0a7deaf2670a981078d0c5c3de1805f69c265796 100644 (file)
@@ -407,6 +407,9 @@ batadv_hardif_activate_interface(struct batadv_hard_iface *hard_iface)
 
        batadv_update_min_mtu(hard_iface->soft_iface);
 
+       if (bat_priv->bat_algo_ops->bat_iface_activate)
+               bat_priv->bat_algo_ops->bat_iface_activate(hard_iface);
+
 out:
        if (primary_if)
                batadv_hardif_put(primary_if);
index d52f67a0c057230b4b1dfa66d43594d7f30625ee..c355a824713cd9224e2c705e222c2061cb504b5a 100644 (file)
@@ -250,7 +250,6 @@ static void batadv_neigh_node_release(struct kref *ref)
 {
        struct hlist_node *node_tmp;
        struct batadv_neigh_node *neigh_node;
-       struct batadv_hardif_neigh_node *hardif_neigh;
        struct batadv_neigh_ifinfo *neigh_ifinfo;
        struct batadv_algo_ops *bao;
 
@@ -262,13 +261,7 @@ static void batadv_neigh_node_release(struct kref *ref)
                batadv_neigh_ifinfo_put(neigh_ifinfo);
        }
 
-       hardif_neigh = batadv_hardif_neigh_get(neigh_node->if_incoming,
-                                              neigh_node->addr);
-       if (hardif_neigh) {
-               /* batadv_hardif_neigh_get() increases refcount too */
-               batadv_hardif_neigh_put(hardif_neigh);
-               batadv_hardif_neigh_put(hardif_neigh);
-       }
+       batadv_hardif_neigh_put(neigh_node->hardif_neigh);
 
        if (bao->bat_neigh_free)
                bao->bat_neigh_free(neigh_node);
@@ -665,6 +658,10 @@ batadv_neigh_node_new(struct batadv_orig_node *orig_node,
        neigh_node->orig_node = orig_node;
        neigh_node->last_seen = jiffies;
 
+       /* increment unique neighbor refcount */
+       kref_get(&hardif_neigh->refcount);
+       neigh_node->hardif_neigh = hardif_neigh;
+
        /* extra reference for return */
        kref_init(&neigh_node->refcount);
        kref_get(&neigh_node->refcount);
@@ -673,9 +670,6 @@ batadv_neigh_node_new(struct batadv_orig_node *orig_node,
        hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
        spin_unlock_bh(&orig_node->neigh_list_lock);
 
-       /* increment unique neighbor refcount */
-       kref_get(&hardif_neigh->refcount);
-
        batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv,
                   "Creating new neighbor %pM for orig_node %pM on interface %s\n",
                   neigh_addr, orig_node->orig, hard_iface->net_dev->name);
index 0b43e86328a59bb42f7200380b840b0209fd26a1..9b4551a86535c6fb0264948cfa5aad63b8ee6dbb 100644 (file)
@@ -215,6 +215,8 @@ static void batadv_tt_local_entry_release(struct kref *ref)
        tt_local_entry = container_of(ref, struct batadv_tt_local_entry,
                                      common.refcount);
 
+       batadv_softif_vlan_put(tt_local_entry->vlan);
+
        kfree_rcu(tt_local_entry, common.rcu);
 }
 
@@ -673,6 +675,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
        kref_get(&tt_local->common.refcount);
        tt_local->last_seen = jiffies;
        tt_local->common.added_at = tt_local->last_seen;
+       tt_local->vlan = vlan;
 
        /* the batman interface mac and multicast addresses should never be
         * purged
@@ -991,7 +994,6 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_tt_local_entry *tt_local;
        struct batadv_hard_iface *primary_if;
-       struct batadv_softif_vlan *vlan;
        struct hlist_head *head;
        unsigned short vid;
        u32 i;
@@ -1027,14 +1029,6 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
                        last_seen_msecs = last_seen_msecs % 1000;
 
                        no_purge = tt_common_entry->flags & np_flag;
-
-                       vlan = batadv_softif_vlan_get(bat_priv, vid);
-                       if (!vlan) {
-                               seq_printf(seq, "Cannot retrieve VLAN %d\n",
-                                          BATADV_PRINT_VID(vid));
-                               continue;
-                       }
-
                        seq_printf(seq,
                                   " * %pM %4i [%c%c%c%c%c%c] %3u.%03u   (%#.8x)\n",
                                   tt_common_entry->addr,
@@ -1052,9 +1046,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
                                     BATADV_TT_CLIENT_ISOLA) ? 'I' : '.'),
                                   no_purge ? 0 : last_seen_secs,
                                   no_purge ? 0 : last_seen_msecs,
-                                  vlan->tt.crc);
-
-                       batadv_softif_vlan_put(vlan);
+                                  tt_local->vlan->tt.crc);
                }
                rcu_read_unlock();
        }
@@ -1099,7 +1091,6 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
 {
        struct batadv_tt_local_entry *tt_local_entry;
        u16 flags, curr_flags = BATADV_NO_FLAGS;
-       struct batadv_softif_vlan *vlan;
        void *tt_entry_exists;
 
        tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
@@ -1139,14 +1130,6 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
        /* extra call to free the local tt entry */
        batadv_tt_local_entry_put(tt_local_entry);
 
-       /* decrease the reference held for this vlan */
-       vlan = batadv_softif_vlan_get(bat_priv, vid);
-       if (!vlan)
-               goto out;
-
-       batadv_softif_vlan_put(vlan);
-       batadv_softif_vlan_put(vlan);
-
 out:
        if (tt_local_entry)
                batadv_tt_local_entry_put(tt_local_entry);
@@ -1219,7 +1202,6 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
        spinlock_t *list_lock; /* protects write access to the hash lists */
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_tt_local_entry *tt_local;
-       struct batadv_softif_vlan *vlan;
        struct hlist_node *node_tmp;
        struct hlist_head *head;
        u32 i;
@@ -1241,14 +1223,6 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
                                                struct batadv_tt_local_entry,
                                                common);
 
-                       /* decrease the reference held for this vlan */
-                       vlan = batadv_softif_vlan_get(bat_priv,
-                                                     tt_common_entry->vid);
-                       if (vlan) {
-                               batadv_softif_vlan_put(vlan);
-                               batadv_softif_vlan_put(vlan);
-                       }
-
                        batadv_tt_local_entry_put(tt_local);
                }
                spin_unlock_bh(list_lock);
@@ -3309,7 +3283,6 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
        struct batadv_hashtable *hash = bat_priv->tt.local_hash;
        struct batadv_tt_common_entry *tt_common;
        struct batadv_tt_local_entry *tt_local;
-       struct batadv_softif_vlan *vlan;
        struct hlist_node *node_tmp;
        struct hlist_head *head;
        spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -3339,13 +3312,6 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
                                                struct batadv_tt_local_entry,
                                                common);
 
-                       /* decrease the reference held for this vlan */
-                       vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid);
-                       if (vlan) {
-                               batadv_softif_vlan_put(vlan);
-                               batadv_softif_vlan_put(vlan);
-                       }
-
                        batadv_tt_local_entry_put(tt_local);
                }
                spin_unlock_bh(list_lock);
index 9abfb3e73c3448a9612c9c282166fe5e18b44327..1e47fbe8bb7b2e9b3ec79c64ef508d7305b5d1a7 100644 (file)
@@ -433,6 +433,7 @@ struct batadv_hardif_neigh_node {
  * @ifinfo_lock: lock protecting private ifinfo members and list
  * @if_incoming: pointer to incoming hard-interface
  * @last_seen: when last packet via this neighbor was received
+ * @hardif_neigh: hardif_neigh of this neighbor
  * @refcount: number of contexts the object is used
  * @rcu: struct used for freeing in an RCU-safe manner
  */
@@ -444,6 +445,7 @@ struct batadv_neigh_node {
        spinlock_t ifinfo_lock; /* protects ifinfo_list and its members */
        struct batadv_hard_iface *if_incoming;
        unsigned long last_seen;
+       struct batadv_hardif_neigh_node *hardif_neigh;
        struct kref refcount;
        struct rcu_head rcu;
 };
@@ -1073,10 +1075,12 @@ struct batadv_tt_common_entry {
  * struct batadv_tt_local_entry - translation table local entry data
  * @common: general translation table data
  * @last_seen: timestamp used for purging stale tt local entries
+ * @vlan: soft-interface vlan of the entry
  */
 struct batadv_tt_local_entry {
        struct batadv_tt_common_entry common;
        unsigned long last_seen;
+       struct batadv_softif_vlan *vlan;
 };
 
 /**
@@ -1250,6 +1254,8 @@ struct batadv_forw_packet {
  * struct batadv_algo_ops - mesh algorithm callbacks
  * @list: list node for the batadv_algo_list
  * @name: name of the algorithm
+ * @bat_iface_activate: start routing mechanisms when hard-interface is brought
+ *  up
  * @bat_iface_enable: init routing info when hard-interface is enabled
  * @bat_iface_disable: de-init routing info when hard-interface is disabled
  * @bat_iface_update_mac: (re-)init mac addresses of the protocol information
@@ -1277,6 +1283,7 @@ struct batadv_forw_packet {
 struct batadv_algo_ops {
        struct hlist_node list;
        char *name;
+       void (*bat_iface_activate)(struct batadv_hard_iface *hard_iface);
        int (*bat_iface_enable)(struct batadv_hard_iface *hard_iface);
        void (*bat_iface_disable)(struct batadv_hard_iface *hard_iface);
        void (*bat_iface_update_mac)(struct batadv_hard_iface *hard_iface);
index 77a71cd68535fc02ae939168934fa4fb4f419644..5c925ac50b9572755e35be168cde8c858fd4ba95 100644 (file)
@@ -2802,7 +2802,7 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
 
        if (skb->ip_summed != CHECKSUM_NONE &&
            !can_checksum_protocol(features, type)) {
-               features &= ~NETIF_F_CSUM_MASK;
+               features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
        } else if (illegal_highdma(skb->dev, skb)) {
                features &= ~NETIF_F_SG;
        }
index 2ae3c4fd8aabc65a7206a73480b3dca4b975f7e3..41f18de5dcc2cda4686af5fe89c8ab4760966d91 100644 (file)
@@ -120,8 +120,7 @@ nla_put_failure:
 
 static int ila_encap_nlsize(struct lwtunnel_state *lwtstate)
 {
-       /* No encapsulation overhead */
-       return 0;
+       return nla_total_size(sizeof(u64)); /* ILA_ATTR_LOCATOR */
 }
 
 static int ila_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
index 61ed2a8764ba4c7a66788d7843cef18abb9e904a..86187dad14403100ff6199ee79beed5262de818f 100644 (file)
@@ -127,7 +127,7 @@ void rds_tcp_restore_callbacks(struct socket *sock,
 
 /*
  * This is the only path that sets tc->t_sock.  Send and receive trust that
- * it is set.  The RDS_CONN_CONNECTED bit protects those paths from being
+ * it is set.  The RDS_CONN_UP bit protects those paths from being
  * called while it isn't set.
  */
 void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn)
@@ -216,6 +216,7 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
        if (!tc)
                return -ENOMEM;
 
+       mutex_init(&tc->t_conn_lock);
        tc->t_sock = NULL;
        tc->t_tinc = NULL;
        tc->t_tinc_hdr_rem = sizeof(struct rds_header);
index 64f873c0c6b6d15574ec9771506d04e617a6aaac..41c228300525c029df02472b609ada1a71cea98e 100644 (file)
@@ -12,6 +12,10 @@ struct rds_tcp_connection {
 
        struct list_head        t_tcp_node;
        struct rds_connection   *conn;
+       /* t_conn_lock synchronizes the connection establishment between
+        * rds_tcp_accept_one and rds_tcp_conn_connect
+        */
+       struct mutex            t_conn_lock;
        struct socket           *t_sock;
        void                    *t_orig_write_space;
        void                    *t_orig_data_ready;
index 5cb16875c4603dba71c733de6600ada40e39cffc..49a3fcfed360edfb146416976c700b24e4520864 100644 (file)
@@ -78,7 +78,14 @@ int rds_tcp_conn_connect(struct rds_connection *conn)
        struct socket *sock = NULL;
        struct sockaddr_in src, dest;
        int ret;
+       struct rds_tcp_connection *tc = conn->c_transport_data;
+
+       mutex_lock(&tc->t_conn_lock);
 
+       if (rds_conn_up(conn)) {
+               mutex_unlock(&tc->t_conn_lock);
+               return 0;
+       }
        ret = sock_create_kern(rds_conn_net(conn), PF_INET,
                               SOCK_STREAM, IPPROTO_TCP, &sock);
        if (ret < 0)
@@ -120,6 +127,7 @@ int rds_tcp_conn_connect(struct rds_connection *conn)
        }
 
 out:
+       mutex_unlock(&tc->t_conn_lock);
        if (sock)
                sock_release(sock);
        return ret;
index 0936a4a32b476fdde5c7208fc465ec3324bbcf09..be263cdf268bae5b59a4746d4011d1042ede58cd 100644 (file)
@@ -76,7 +76,9 @@ int rds_tcp_accept_one(struct socket *sock)
        struct rds_connection *conn;
        int ret;
        struct inet_sock *inet;
-       struct rds_tcp_connection *rs_tcp;
+       struct rds_tcp_connection *rs_tcp = NULL;
+       int conn_state;
+       struct sock *nsk;
 
        ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family,
                               sock->sk->sk_type, sock->sk->sk_protocol,
@@ -115,28 +117,44 @@ int rds_tcp_accept_one(struct socket *sock)
         * rds_tcp_state_change() will do that cleanup
         */
        rs_tcp = (struct rds_tcp_connection *)conn->c_transport_data;
-       if (rs_tcp->t_sock &&
-           ntohl(inet->inet_saddr) < ntohl(inet->inet_daddr)) {
-               struct sock *nsk = new_sock->sk;
-
-               nsk->sk_user_data = NULL;
-               nsk->sk_prot->disconnect(nsk, 0);
-               tcp_done(nsk);
-               new_sock = NULL;
-               ret = 0;
-               goto out;
-       } else if (rs_tcp->t_sock) {
-               rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp);
-               conn->c_outgoing = 0;
-       }
-
        rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING);
+       mutex_lock(&rs_tcp->t_conn_lock);
+       conn_state = rds_conn_state(conn);
+       if (conn_state != RDS_CONN_CONNECTING && conn_state != RDS_CONN_UP)
+               goto rst_nsk;
+       if (rs_tcp->t_sock) {
+               /* Need to resolve a duelling SYN between peers.
+                * We have an outstanding SYN to this peer, which may
+                * potentially have transitioned to the RDS_CONN_UP state,
+                * so we must quiesce any send threads before resetting
+                * c_transport_data.
+                */
+               wait_event(conn->c_waitq,
+                          !test_bit(RDS_IN_XMIT, &conn->c_flags));
+               if (ntohl(inet->inet_saddr) < ntohl(inet->inet_daddr)) {
+                       goto rst_nsk;
+               } else if (rs_tcp->t_sock) {
+                       rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp);
+                       conn->c_outgoing = 0;
+               }
+       }
        rds_tcp_set_callbacks(new_sock, conn);
-       rds_connect_complete(conn);
+       rds_connect_complete(conn); /* marks RDS_CONN_UP */
+       new_sock = NULL;
+       ret = 0;
+       goto out;
+rst_nsk:
+       /* reset the newly returned accept sock and bail */
+       nsk = new_sock->sk;
+       rds_tcp_stats_inc(s_tcp_listen_closed_stale);
+       nsk->sk_user_data = NULL;
+       nsk->sk_prot->disconnect(nsk, 0);
+       tcp_done(nsk);
        new_sock = NULL;
        ret = 0;
-
 out:
+       if (rs_tcp)
+               mutex_unlock(&rs_tcp->t_conn_lock);
        if (new_sock)
                sock_release(new_sock);
        return ret;
index 9640bb39a5d293d55a96edc5164d369c1cded127..4befe97a90349832030e139125369c048cb1d67c 100644 (file)
@@ -395,6 +395,25 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
        sch->q.qlen++;
 }
 
+/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
+ * when we statistically choose to corrupt one, we instead segment it, returning
+ * the first packet to be corrupted, and re-enqueue the remaining frames
+ */
+static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch)
+{
+       struct sk_buff *segs;
+       netdev_features_t features = netif_skb_features(skb);
+
+       segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+
+       if (IS_ERR_OR_NULL(segs)) {
+               qdisc_reshape_fail(skb, sch);
+               return NULL;
+       }
+       consume_skb(skb);
+       return segs;
+}
+
 /*
  * Insert one skb into qdisc.
  * Note: parent depends on return value to account for queue length.
@@ -407,7 +426,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        /* We don't fill cb now as skb_unshare() may invalidate it */
        struct netem_skb_cb *cb;
        struct sk_buff *skb2;
+       struct sk_buff *segs = NULL;
+       unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
+       int nb = 0;
        int count = 1;
+       int rc = NET_XMIT_SUCCESS;
 
        /* Random duplication */
        if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
@@ -453,10 +476,23 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
         * do it now in software before we mangle it.
         */
        if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
+               if (skb_is_gso(skb)) {
+                       segs = netem_segment(skb, sch);
+                       if (!segs)
+                               return NET_XMIT_DROP;
+               } else {
+                       segs = skb;
+               }
+
+               skb = segs;
+               segs = segs->next;
+
                if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
                    (skb->ip_summed == CHECKSUM_PARTIAL &&
-                    skb_checksum_help(skb)))
-                       return qdisc_drop(skb, sch);
+                    skb_checksum_help(skb))) {
+                       rc = qdisc_drop(skb, sch);
+                       goto finish_segs;
+               }
 
                skb->data[prandom_u32() % skb_headlen(skb)] ^=
                        1<<(prandom_u32() % 8);
@@ -516,6 +552,27 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                sch->qstats.requeues++;
        }
 
+finish_segs:
+       if (segs) {
+               while (segs) {
+                       skb2 = segs->next;
+                       segs->next = NULL;
+                       qdisc_skb_cb(segs)->pkt_len = segs->len;
+                       last_len = segs->len;
+                       rc = qdisc_enqueue(segs, sch);
+                       if (rc != NET_XMIT_SUCCESS) {
+                               if (net_xmit_drop_count(rc))
+                                       qdisc_qstats_drop(sch);
+                       } else {
+                               nb++;
+                               len += last_len;
+                       }
+                       segs = skb2;
+               }
+               sch->q.qlen += nb;
+               if (nb > 1)
+                       qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
+       }
        return NET_XMIT_SUCCESS;
 }
 
This page took 0.050126 seconds and 5 git commands to generate.