Merge gregkh@master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
authorGreg Kroah-Hartman <gregkh@suse.de>
Fri, 18 Aug 2006 18:02:52 +0000 (11:02 -0700)
committerGreg Kroah-Hartman <gregkh@suse.de>
Fri, 18 Aug 2006 18:02:52 +0000 (11:02 -0700)
19 files changed:
Documentation/networking/ip-sysctl.txt
drivers/net/bnx2.c
drivers/net/bnx2.h
drivers/net/ppp_generic.c
include/linux/if_vlan.h
include/linux/netdevice.h
net/atm/proc.c
net/bridge/br_if.c
net/core/dev.c
net/core/utils.c
net/ipv4/fib_semantics.c
net/ipv4/igmp.c
net/ipv4/netfilter/ip_conntrack_netlink.c
net/ipv4/netfilter/ip_tables.c
net/ipv6/icmp.c
net/ipv6/mcast.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/xt_physdev.c
net/sched/cls_u32.c

index d46338af6002997b0593d4875a6aee825c77126f..3e0c017e78772aba2857aca4d78ac24364a1e15d 100644 (file)
@@ -294,15 +294,15 @@ tcp_rmem - vector of 3 INTEGERs: min, default, max
        Default: 87380*2 bytes.
 
 tcp_mem - vector of 3 INTEGERs: min, pressure, max
-       low: below this number of pages TCP is not bothered about its
+       min: below this number of pages TCP is not bothered about its
        memory appetite.
 
        pressure: when amount of memory allocated by TCP exceeds this number
        of pages, TCP moderates its memory consumption and enters memory
        pressure mode, which is exited when memory consumption falls
-       under "low".
+       under "min".
 
-       high: number of pages allowed for queueing by all TCP sockets.
+       max: number of pages allowed for queueing by all TCP sockets.
 
        Defaults are calculated at boot time from amount of available
        memory.
index db73de0d25117339ede7074548003cec7986bddf..652eb05a6c2df6c7e406eef8ecbfa82cfcb03564 100644 (file)
@@ -56,8 +56,8 @@
 
 #define DRV_MODULE_NAME                "bnx2"
 #define PFX DRV_MODULE_NAME    ": "
-#define DRV_MODULE_VERSION     "1.4.43"
-#define DRV_MODULE_RELDATE     "June 28, 2006"
+#define DRV_MODULE_VERSION     "1.4.44"
+#define DRV_MODULE_RELDATE     "August 10, 2006"
 
 #define RUN_AT(x) (jiffies + (x))
 
@@ -209,8 +209,10 @@ MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
 
 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
 {
-       u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
+       u32 diff;
 
+       smp_mb();
+       diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
        if (diff > MAX_TX_DESC_CNT)
                diff = (diff & MAX_TX_DESC_CNT) - 1;
        return (bp->tx_ring_size - diff);
@@ -1569,7 +1571,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
        struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
        unsigned long align;
 
-       skb = dev_alloc_skb(bp->rx_buf_size);
+       skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
        if (skb == NULL) {
                return -ENOMEM;
        }
@@ -1578,7 +1580,6 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
                skb_reserve(skb, 8 - align);
        }
 
-       skb->dev = bp->dev;
        mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
                PCI_DMA_FROMDEVICE);
 
@@ -1686,15 +1687,20 @@ bnx2_tx_int(struct bnx2 *bp)
        }
 
        bp->tx_cons = sw_cons;
+       /* Need to make the tx_cons update visible to bnx2_start_xmit()
+        * before checking for netif_queue_stopped().  Without the
+        * memory barrier, there is a small possibility that bnx2_start_xmit()
+        * will miss it and cause the queue to be stopped forever.
+        */
+       smp_mb();
 
-       if (unlikely(netif_queue_stopped(bp->dev))) {
-               spin_lock(&bp->tx_lock);
+       if (unlikely(netif_queue_stopped(bp->dev)) &&
+                    (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
+               netif_tx_lock(bp->dev);
                if ((netif_queue_stopped(bp->dev)) &&
-                   (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) {
-
+                   (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
                        netif_wake_queue(bp->dev);
-               }
-               spin_unlock(&bp->tx_lock);
+               netif_tx_unlock(bp->dev);
        }
 }
 
@@ -1786,7 +1792,7 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
                if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
                        struct sk_buff *new_skb;
 
-                       new_skb = dev_alloc_skb(len + 2);
+                       new_skb = netdev_alloc_skb(bp->dev, len + 2);
                        if (new_skb == NULL)
                                goto reuse_rx;
 
@@ -1797,7 +1803,6 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
 
                        skb_reserve(new_skb, 2);
                        skb_put(new_skb, len);
-                       new_skb->dev = bp->dev;
 
                        bnx2_reuse_rx_skb(bp, skb,
                                sw_ring_cons, sw_ring_prod);
@@ -3503,6 +3508,8 @@ bnx2_init_tx_ring(struct bnx2 *bp)
        struct tx_bd *txbd;
        u32 val;
 
+       bp->tx_wake_thresh = bp->tx_ring_size / 2;
+
        txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
                
        txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
@@ -3952,7 +3959,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
                return -EINVAL;
 
        pkt_size = 1514;
-       skb = dev_alloc_skb(pkt_size);
+       skb = netdev_alloc_skb(bp->dev, pkt_size);
        if (!skb)
                return -ENOMEM;
        packet = skb_put(skb, pkt_size);
@@ -4390,10 +4397,8 @@ bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
 #endif
 
 /* Called with netif_tx_lock.
- * hard_start_xmit is pseudo-lockless - a lock is only required when
- * the tx queue is full. This way, we get the benefit of lockless
- * operations most of the time without the complexities to handle
- * netif_stop_queue/wake_queue race conditions.
+ * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
+ * netif_wake_queue().
  */
 static int
 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -4512,12 +4517,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
        dev->trans_start = jiffies;
 
        if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
-               spin_lock(&bp->tx_lock);
                netif_stop_queue(dev);
-               
-               if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)
+               if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
                        netif_wake_queue(dev);
-               spin_unlock(&bp->tx_lock);
        }
 
        return NETDEV_TX_OK;
@@ -5628,7 +5630,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
        bp->pdev = pdev;
 
        spin_lock_init(&bp->phy_lock);
-       spin_lock_init(&bp->tx_lock);
        INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
 
        dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
@@ -5751,7 +5752,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
        bp->mac_addr[5] = (u8) reg;
 
        bp->tx_ring_size = MAX_TX_DESC_CNT;
-       bnx2_set_rx_ring_size(bp, 100);
+       bnx2_set_rx_ring_size(bp, 255);
 
        bp->rx_csum = 1;
 
index 658c5ee95c73ecdea4bd4c34c74e68e2b82b54bb..fe804763c60738b0d8c442175fa05d7816e1882c 100644 (file)
@@ -3890,10 +3890,6 @@ struct bnx2 {
        u32             tx_prod_bseq __attribute__((aligned(L1_CACHE_BYTES)));
        u16             tx_prod;
 
-       struct tx_bd    *tx_desc_ring;
-       struct sw_bd    *tx_buf_ring;
-       int             tx_ring_size;
-
        u16             tx_cons __attribute__((aligned(L1_CACHE_BYTES)));
        u16             hw_tx_cons;
 
@@ -3916,9 +3912,11 @@ struct bnx2 {
        struct sw_bd            *rx_buf_ring;
        struct rx_bd            *rx_desc_ring[MAX_RX_RINGS];
 
-       /* Only used to synchronize netif_stop_queue/wake_queue when tx */
-       /* ring is full */
-       spinlock_t              tx_lock;
+       /* TX constants */
+       struct tx_bd    *tx_desc_ring;
+       struct sw_bd    *tx_buf_ring;
+       int             tx_ring_size;
+       u32             tx_wake_thresh;
 
        /* End of fields used in the performance code paths. */
 
index 0ec6e9d57b9499d438c9cf1838a212238e261a09..c872f7c6cce39392e7490cb2437ea62bd8fcc53f 100644 (file)
@@ -192,7 +192,7 @@ struct cardmap {
        void *ptr[CARDMAP_WIDTH];
 };
 static void *cardmap_get(struct cardmap *map, unsigned int nr);
-static void cardmap_set(struct cardmap **map, unsigned int nr, void *ptr);
+static int cardmap_set(struct cardmap **map, unsigned int nr, void *ptr);
 static unsigned int cardmap_find_first_free(struct cardmap *map);
 static void cardmap_destroy(struct cardmap **map);
 
@@ -1995,10 +1995,9 @@ ppp_register_channel(struct ppp_channel *chan)
 {
        struct channel *pch;
 
-       pch = kmalloc(sizeof(struct channel), GFP_KERNEL);
+       pch = kzalloc(sizeof(struct channel), GFP_KERNEL);
        if (pch == 0)
                return -ENOMEM;
-       memset(pch, 0, sizeof(struct channel));
        pch->ppp = NULL;
        pch->chan = chan;
        chan->ppp = pch;
@@ -2408,13 +2407,12 @@ ppp_create_interface(int unit, int *retp)
        int ret = -ENOMEM;
        int i;
 
-       ppp = kmalloc(sizeof(struct ppp), GFP_KERNEL);
+       ppp = kzalloc(sizeof(struct ppp), GFP_KERNEL);
        if (!ppp)
                goto out;
        dev = alloc_netdev(0, "", ppp_setup);
        if (!dev)
                goto out1;
-       memset(ppp, 0, sizeof(struct ppp));
 
        ppp->mru = PPP_MRU;
        init_ppp_file(&ppp->file, INTERFACE);
@@ -2454,11 +2452,16 @@ ppp_create_interface(int unit, int *retp)
        }
 
        atomic_inc(&ppp_unit_count);
-       cardmap_set(&all_ppp_units, unit, ppp);
+       ret = cardmap_set(&all_ppp_units, unit, ppp);
+       if (ret != 0)
+               goto out3;
+
        mutex_unlock(&all_ppp_mutex);
        *retp = 0;
        return ppp;
 
+out3:
+       atomic_dec(&ppp_unit_count);
 out2:
        mutex_unlock(&all_ppp_mutex);
        free_netdev(dev);
@@ -2695,7 +2698,7 @@ static void *cardmap_get(struct cardmap *map, unsigned int nr)
        return NULL;
 }
 
-static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
+static int cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
 {
        struct cardmap *p;
        int i;
@@ -2704,8 +2707,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
        if (p == NULL || (nr >> p->shift) >= CARDMAP_WIDTH) {
                do {
                        /* need a new top level */
-                       struct cardmap *np = kmalloc(sizeof(*np), GFP_KERNEL);
-                       memset(np, 0, sizeof(*np));
+                       struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL);
+                       if (!np)
+                               goto enomem;
                        np->ptr[0] = p;
                        if (p != NULL) {
                                np->shift = p->shift + CARDMAP_ORDER;
@@ -2719,8 +2723,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
        while (p->shift > 0) {
                i = (nr >> p->shift) & CARDMAP_MASK;
                if (p->ptr[i] == NULL) {
-                       struct cardmap *np = kmalloc(sizeof(*np), GFP_KERNEL);
-                       memset(np, 0, sizeof(*np));
+                       struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL);
+                       if (!np)
+                               goto enomem;
                        np->shift = p->shift - CARDMAP_ORDER;
                        np->parent = p;
                        p->ptr[i] = np;
@@ -2735,6 +2740,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
                set_bit(i, &p->inuse);
        else
                clear_bit(i, &p->inuse);
+       return 0;
+ enomem:
+       return -ENOMEM;
 }
 
 static unsigned int cardmap_find_first_free(struct cardmap *map)
index 383627ad328f383bfe07f78ddc77e09e37066d95..ab2740832742e85c6a6870384aa47bbe70c8a201 100644 (file)
@@ -155,6 +155,11 @@ static inline int __vlan_hwaccel_rx(struct sk_buff *skb,
 {
        struct net_device_stats *stats;
 
+       if (skb_bond_should_drop(skb)) {
+               dev_kfree_skb_any(skb);
+               return NET_RX_DROP;
+       }
+
        skb->dev = grp->vlan_devices[vlan_tag & VLAN_VID_MASK];
        if (skb->dev == NULL) {
                dev_kfree_skb_any(skb);
index 75f02d8c6ed376b192858faff4de312c3b71e2ab..50a4719512ede141d2fbf045031ff2096227f98b 100644 (file)
@@ -320,6 +320,9 @@ struct net_device
 #define NETIF_F_TSO_ECN                (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
 #define NETIF_F_TSO6           (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
 
+       /* List of features with software fallbacks. */
+#define NETIF_F_GSO_SOFTWARE   (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
+
 #define NETIF_F_GEN_CSUM       (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
 #define NETIF_F_ALL_CSUM       (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM)
 
@@ -1012,6 +1015,30 @@ static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
                unlikely(skb->ip_summed != CHECKSUM_HW));
 }
 
+/* On bonding slaves other than the currently active slave, suppress
+ * duplicates except for 802.3ad ETH_P_SLOW and alb non-mcast/bcast.
+ */
+static inline int skb_bond_should_drop(struct sk_buff *skb)
+{
+       struct net_device *dev = skb->dev;
+       struct net_device *master = dev->master;
+
+       if (master &&
+           (dev->priv_flags & IFF_SLAVE_INACTIVE)) {
+               if (master->priv_flags & IFF_MASTER_ALB) {
+                       if (skb->pkt_type != PACKET_BROADCAST &&
+                           skb->pkt_type != PACKET_MULTICAST)
+                               return 0;
+               }
+               if (master->priv_flags & IFF_MASTER_8023AD &&
+                   skb->protocol == __constant_htons(ETH_P_SLOW))
+                       return 0;
+
+               return 1;
+       }
+       return 0;
+}
+
 #endif /* __KERNEL__ */
 
 #endif /* _LINUX_DEV_H */
index 3f95b0886a6a659afd540ea8cb9f12011a03a3c5..91fe5f53ff112377755054d16ff82cd599dd54a5 100644 (file)
@@ -507,7 +507,7 @@ err_out:
        goto out;
 }
 
-void __exit atm_proc_exit(void)
+void atm_proc_exit(void)
 {
        atm_proc_dirs_remove();
 }
index f55ef682ef846e979b7e1b24569399a7c2fbd7da..b1211d5342f6cac5f43926a2c6a39f1651cb6761 100644 (file)
@@ -386,12 +386,17 @@ void br_features_recompute(struct net_bridge *br)
                        checksum = 0;
 
                if (feature & NETIF_F_GSO)
-                       feature |= NETIF_F_TSO;
+                       feature |= NETIF_F_GSO_SOFTWARE;
                feature |= NETIF_F_GSO;
 
                features &= feature;
        }
 
+       if (!(checksum & NETIF_F_ALL_CSUM))
+               features &= ~NETIF_F_SG;
+       if (!(features & NETIF_F_SG))
+               features &= ~NETIF_F_GSO_MASK;
+
        br->dev->features = features | checksum | NETIF_F_LLTX |
                            NETIF_F_GSO_ROBUST;
 }
index d95e2626d944ea754731a3e78382c7f8156bb461..d4a1ec3bded5f6afa92c0cbb860101b509f0f338 100644 (file)
 #include <linux/audit.h>
 #include <linux/dmaengine.h>
 #include <linux/err.h>
+#include <linux/ctype.h>
 
 /*
  *     The list of packet types we will receive (as opposed to discard)
@@ -632,14 +633,22 @@ struct net_device * dev_get_by_flags(unsigned short if_flags, unsigned short mas
  *     @name: name string
  *
  *     Network device names need to be valid file names to
- *     to allow sysfs to work
+ *     to allow sysfs to work.  We also disallow any kind of
+ *     whitespace.
  */
 int dev_valid_name(const char *name)
 {
-       return !(*name == '\0' 
-                || !strcmp(name, ".")
-                || !strcmp(name, "..")
-                || strchr(name, '/'));
+       if (*name == '\0')
+               return 0;
+       if (!strcmp(name, ".") || !strcmp(name, ".."))
+               return 0;
+
+       while (*name) {
+               if (*name == '/' || isspace(*name))
+                       return 0;
+               name++;
+       }
+       return 1;
 }
 
 /**
@@ -1619,26 +1628,10 @@ static inline struct net_device *skb_bond(struct sk_buff *skb)
        struct net_device *dev = skb->dev;
 
        if (dev->master) {
-               /*
-                * On bonding slaves other than the currently active
-                * slave, suppress duplicates except for 802.3ad
-                * ETH_P_SLOW and alb non-mcast/bcast.
-                */
-               if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
-                       if (dev->master->priv_flags & IFF_MASTER_ALB) {
-                               if (skb->pkt_type != PACKET_BROADCAST &&
-                                   skb->pkt_type != PACKET_MULTICAST)
-                                       goto keep;
-                       }
-
-                       if (dev->master->priv_flags & IFF_MASTER_8023AD &&
-                           skb->protocol == __constant_htons(ETH_P_SLOW))
-                               goto keep;
-               
+               if (skb_bond_should_drop(skb)) {
                        kfree_skb(skb);
                        return NULL;
                }
-keep:
                skb->dev = dev->master;
        }
 
index 4f96f389243d7d6322288609f6477c74eab5ac09..e31c90e055941e73f020e07adcd92360e81960da 100644 (file)
@@ -130,12 +130,13 @@ void __init net_random_init(void)
 static int net_random_reseed(void)
 {
        int i;
-       unsigned long seed[NR_CPUS];
+       unsigned long seed;
 
-       get_random_bytes(seed, sizeof(seed));
        for_each_possible_cpu(i) {
                struct nrnd_state *state = &per_cpu(net_rand_state,i);
-               __net_srandom(state, seed[i]);
+
+               get_random_bytes(&seed, sizeof(seed));
+               __net_srandom(state, seed);
        }
        return 0;
 }
index 9be53a8e72c338d7865dadcc747d6003cd1c8b20..51738000f3dc4c12c5db72063548b8535f7517e6 100644 (file)
@@ -159,7 +159,7 @@ void free_fib_info(struct fib_info *fi)
 
 void fib_release_info(struct fib_info *fi)
 {
-       write_lock(&fib_info_lock);
+       write_lock_bh(&fib_info_lock);
        if (fi && --fi->fib_treeref == 0) {
                hlist_del(&fi->fib_hash);
                if (fi->fib_prefsrc)
@@ -172,7 +172,7 @@ void fib_release_info(struct fib_info *fi)
                fi->fib_dead = 1;
                fib_info_put(fi);
        }
-       write_unlock(&fib_info_lock);
+       write_unlock_bh(&fib_info_lock);
 }
 
 static __inline__ int nh_comp(const struct fib_info *fi, const struct fib_info *ofi)
@@ -598,7 +598,7 @@ static void fib_hash_move(struct hlist_head *new_info_hash,
        unsigned int old_size = fib_hash_size;
        unsigned int i, bytes;
 
-       write_lock(&fib_info_lock);
+       write_lock_bh(&fib_info_lock);
        old_info_hash = fib_info_hash;
        old_laddrhash = fib_info_laddrhash;
        fib_hash_size = new_size;
@@ -639,7 +639,7 @@ static void fib_hash_move(struct hlist_head *new_info_hash,
        }
        fib_info_laddrhash = new_laddrhash;
 
-       write_unlock(&fib_info_lock);
+       write_unlock_bh(&fib_info_lock);
 
        bytes = old_size * sizeof(struct hlist_head *);
        fib_hash_free(old_info_hash, bytes);
@@ -820,7 +820,7 @@ link_it:
 
        fi->fib_treeref++;
        atomic_inc(&fi->fib_clntref);
-       write_lock(&fib_info_lock);
+       write_lock_bh(&fib_info_lock);
        hlist_add_head(&fi->fib_hash,
                       &fib_info_hash[fib_info_hashfn(fi)]);
        if (fi->fib_prefsrc) {
@@ -839,7 +839,7 @@ link_it:
                head = &fib_info_devhash[hash];
                hlist_add_head(&nh->nh_hash, head);
        } endfor_nexthops(fi)
-       write_unlock(&fib_info_lock);
+       write_unlock_bh(&fib_info_lock);
        return fi;
 
 err_inval:
index 9f4b752f5a337e7dfae09970d5112dc4bc6dc54b..8e8117c19e4db24ad74d55cfe540fb0d4c9816f3 100644 (file)
@@ -1793,29 +1793,35 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
        struct in_device *in_dev;
        u32 group = imr->imr_multiaddr.s_addr;
        u32 ifindex;
+       int ret = -EADDRNOTAVAIL;
 
        rtnl_lock();
        in_dev = ip_mc_find_dev(imr);
-       if (!in_dev) {
-               rtnl_unlock();
-               return -ENODEV;
-       }
        ifindex = imr->imr_ifindex;
        for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) {
-               if (iml->multi.imr_multiaddr.s_addr == group &&
-                   iml->multi.imr_ifindex == ifindex) {
-                       (void) ip_mc_leave_src(sk, iml, in_dev);
+               if (iml->multi.imr_multiaddr.s_addr != group)
+                       continue;
+               if (ifindex) {
+                       if (iml->multi.imr_ifindex != ifindex)
+                               continue;
+               } else if (imr->imr_address.s_addr && imr->imr_address.s_addr !=
+                               iml->multi.imr_address.s_addr)
+                       continue;
+
+               (void) ip_mc_leave_src(sk, iml, in_dev);
 
-                       *imlp = iml->next;
+               *imlp = iml->next;
 
+               if (in_dev)
                        ip_mc_dec_group(in_dev, group);
-                       rtnl_unlock();
-                       sock_kfree_s(sk, iml, sizeof(*iml));
-                       return 0;
-               }
+               rtnl_unlock();
+               sock_kfree_s(sk, iml, sizeof(*iml));
+               return 0;
        }
+       if (!in_dev)
+               ret = -ENODEV;
        rtnl_unlock();
-       return -EADDRNOTAVAIL;
+       return ret;
 }
 
 int ip_mc_source(int add, int omode, struct sock *sk, struct
@@ -2199,13 +2205,13 @@ void ip_mc_drop_socket(struct sock *sk)
                struct in_device *in_dev;
                inet->mc_list = iml->next;
 
-               if ((in_dev = inetdev_by_index(iml->multi.imr_ifindex)) != NULL) {
-                       (void) ip_mc_leave_src(sk, iml, in_dev);
+               in_dev = inetdev_by_index(iml->multi.imr_ifindex);
+               (void) ip_mc_leave_src(sk, iml, in_dev);
+               if (in_dev != NULL) {
                        ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
                        in_dev_put(in_dev);
                }
                sock_kfree_s(sk, iml, sizeof(*iml));
-
        }
        rtnl_unlock();
 }
index 33891bb1fde438a97c5d85516b3c25bb826d1ba4..0d4cc92391fa54a23d26e64564c7b58b7e250d2f 100644 (file)
@@ -415,21 +415,18 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
                        cb->args[0], *id);
 
        read_lock_bh(&ip_conntrack_lock);
+       last = (struct ip_conntrack *)cb->args[1];
        for (; cb->args[0] < ip_conntrack_htable_size; cb->args[0]++) {
 restart:
-               last = (struct ip_conntrack *)cb->args[1];
                list_for_each_prev(i, &ip_conntrack_hash[cb->args[0]]) {
                        h = (struct ip_conntrack_tuple_hash *) i;
                        if (DIRECTION(h) != IP_CT_DIR_ORIGINAL)
                                continue;
                        ct = tuplehash_to_ctrack(h);
-                       if (last != NULL) {
-                               if (ct == last) {
-                                       ip_conntrack_put(last);
-                                       cb->args[1] = 0;
-                                       last = NULL;
-                               } else
+                       if (cb->args[1]) {
+                               if (ct != last)
                                        continue;
+                               cb->args[1] = 0;
                        }
                        if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
                                                cb->nlh->nlmsg_seq,
@@ -440,17 +437,17 @@ restart:
                                goto out;
                        }
                }
-               if (last != NULL) {
-                       ip_conntrack_put(last);
+               if (cb->args[1]) {
                        cb->args[1] = 0;
                        goto restart;
                }
        }
 out:
        read_unlock_bh(&ip_conntrack_lock);
+       if (last)
+               ip_conntrack_put(last);
 
        DEBUGP("leaving, last bucket=%lu id=%u\n", cb->args[0], *id);
-
        return skb->len;
 }
 
index f316ff5fd8a64ff7cf4096e583760977c1d5403f..048514f15f2ffe116dcbebc0aa1672eae54045f4 100644 (file)
@@ -230,7 +230,7 @@ ipt_do_table(struct sk_buff **pskb,
        const char *indev, *outdev;
        void *table_base;
        struct ipt_entry *e, *back;
-       struct xt_table_info *private = table->private;
+       struct xt_table_info *private;
 
        /* Initialization */
        ip = (*pskb)->nh.iph;
@@ -247,6 +247,7 @@ ipt_do_table(struct sk_buff **pskb,
 
        read_lock_bh(&table->lock);
        IP_NF_ASSERT(table->valid_hooks & (1 << hook));
+       private = table->private;
        table_base = (void *)private->entries[smp_processor_id()];
        e = get_entry(table_base, private->hook_entry[hook]);
 
index 1044b6fce0d5d472b11b0eef3a2e261650869e5e..3d6e9a351150294376557616ab29c72a783ed2c4 100644 (file)
@@ -712,6 +712,11 @@ discard_it:
        return 0;
 }
 
+/*
+ * Special lock-class for __icmpv6_socket:
+ */
+static struct lock_class_key icmpv6_socket_sk_dst_lock_key;
+
 int __init icmpv6_init(struct net_proto_family *ops)
 {
        struct sock *sk;
@@ -730,6 +735,14 @@ int __init icmpv6_init(struct net_proto_family *ops)
 
                sk = per_cpu(__icmpv6_socket, i)->sk;
                sk->sk_allocation = GFP_ATOMIC;
+               /*
+                * Split off their lock-class, because sk->sk_dst_lock
+                * gets used from softirqs, which is safe for
+                * __icmpv6_socket (because those never get directly used
+                * via userspace syscalls), but unsafe for normal sockets.
+                */
+               lockdep_set_class(&sk->sk_dst_lock,
+                                 &icmpv6_socket_sk_dst_lock_key);
 
                /* Enough space for 2 64K ICMP packets, including
                 * sk_buff struct overhead.
index 9d697d4dcffccde33651b99a5e9b7fe9b37ddf48..639eb20c9f1fd08181c104593f3f39bc8bad3e0d 100644 (file)
@@ -268,13 +268,14 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, struct in6_addr *addr)
                        if ((dev = dev_get_by_index(mc_lst->ifindex)) != NULL) {
                                struct inet6_dev *idev = in6_dev_get(dev);
 
+                               (void) ip6_mc_leave_src(sk, mc_lst, idev);
                                if (idev) {
-                                       (void) ip6_mc_leave_src(sk,mc_lst,idev);
                                        __ipv6_dev_mc_dec(idev, &mc_lst->addr);
                                        in6_dev_put(idev);
                                }
                                dev_put(dev);
-                       }
+                       } else
+                               (void) ip6_mc_leave_src(sk, mc_lst, NULL);
                        sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
                        return 0;
                }
@@ -334,13 +335,14 @@ void ipv6_sock_mc_close(struct sock *sk)
                if (dev) {
                        struct inet6_dev *idev = in6_dev_get(dev);
 
+                       (void) ip6_mc_leave_src(sk, mc_lst, idev);
                        if (idev) {
-                               (void) ip6_mc_leave_src(sk, mc_lst, idev);
                                __ipv6_dev_mc_dec(idev, &mc_lst->addr);
                                in6_dev_put(idev);
                        }
                        dev_put(dev);
-               }
+               } else
+                       (void) ip6_mc_leave_src(sk, mc_lst, NULL);
 
                sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
 
index af4845971f70606d568e731093118b74c0fbf104..6527d4e048d81395f1e5263eb3103f6d65ed2114 100644 (file)
@@ -429,9 +429,9 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
                        cb->args[0], *id);
 
        read_lock_bh(&nf_conntrack_lock);
+       last = (struct nf_conn *)cb->args[1];
        for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) {
 restart:
-               last = (struct nf_conn *)cb->args[1];
                list_for_each_prev(i, &nf_conntrack_hash[cb->args[0]]) {
                        h = (struct nf_conntrack_tuple_hash *) i;
                        if (DIRECTION(h) != IP_CT_DIR_ORIGINAL)
@@ -442,13 +442,10 @@ restart:
                         * then dump everything. */
                        if (l3proto && L3PROTO(ct) != l3proto)
                                continue;
-                       if (last != NULL) {
-                               if (ct == last) {
-                                       nf_ct_put(last);
-                                       cb->args[1] = 0;
-                                       last = NULL;
-                               } else
+                       if (cb->args[1]) {
+                               if (ct != last)
                                        continue;
+                               cb->args[1] = 0;
                        }
                        if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
                                                cb->nlh->nlmsg_seq,
@@ -459,17 +456,17 @@ restart:
                                goto out;
                        }
                }
-               if (last != NULL) {
-                       nf_ct_put(last);
+               if (cb->args[1]) {
                        cb->args[1] = 0;
                        goto restart;
                }
        }
 out:
        read_unlock_bh(&nf_conntrack_lock);
+       if (last)
+               nf_ct_put(last);
 
        DEBUGP("leaving, last bucket=%lu id=%u\n", cb->args[0], *id);
-
        return skb->len;
 }
 
index a9f4f6f3c628225e9720d0cefad672a598fa00c9..63a96546746575979e2200b87d9a557c774e6970 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <linux/module.h>
 #include <linux/skbuff.h>
+#include <linux/netfilter_bridge.h>
 #include <linux/netfilter/xt_physdev.h>
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter_bridge.h>
index eea3669667400868ccde06397175a6bf48fe50a5..0a6cfa0005bed31627f6ec8de3c7ffc3763abe36 100644 (file)
@@ -796,7 +796,7 @@ static int __init init_u32(void)
 {
        printk("u32 classifier\n");
 #ifdef CONFIG_CLS_U32_PERF
-       printk("    Perfomance counters on\n");
+       printk("    Performance counters on\n");
 #endif
 #ifdef CONFIG_NET_CLS_POLICE
        printk("    OLD policer on \n");
This page took 0.046405 seconds and 5 git commands to generate.