drivers/net: Move && and || to end of previous line
[deliverable/linux.git] / drivers / net / sundance.c
index 5c396c2e6e769f8bfa0f761976247878dc86ac6a..d58e1891ca604577bb5bd1f0f83c4e09cf9c5ee5 100644 (file)
@@ -603,8 +603,8 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
                            strcmp (media[card_idx], "4") == 0) {
                                np->speed = 100;
                                np->mii_if.full_duplex = 1;
-                       } else if (strcmp (media[card_idx], "100mbps_hd") == 0
-                                  || strcmp (media[card_idx], "3") == 0) {
+                       } else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
+                                  strcmp (media[card_idx], "3") == 0) {
                                np->speed = 100;
                                np->mii_if.full_duplex = 0;
                        } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
@@ -1079,8 +1079,8 @@ start_tx (struct sk_buff *skb, struct net_device *dev)
        tasklet_schedule(&np->tx_tasklet);
 
        /* On some architectures: explicitly flush cache lines here. */
-       if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1
-                       && !netif_queue_stopped(dev)) {
+       if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
+           !netif_queue_stopped(dev)) {
                /* do nothing */
        } else {
                netif_stop_queue (dev);
@@ -1336,8 +1336,8 @@ static void rx_poll(unsigned long data)
 #endif
                        /* Check if the packet is long enough to accept without copying
                           to a minimally-sized skbuff. */
-                       if (pkt_len < rx_copybreak
-                               && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+                       if (pkt_len < rx_copybreak &&
+                           (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
                                pci_dma_sync_single_for_cpu(np->pci_dev,
                                                            le32_to_cpu(desc->frag[0].addr),
@@ -1517,8 +1517,8 @@ static void set_rx_mode(struct net_device *dev)
        if (dev->flags & IFF_PROMISC) {                 /* Set promiscuous. */
                memset(mc_filter, 0xff, sizeof(mc_filter));
                rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
-       } else if ((dev->mc_count > multicast_filter_limit)
-                          ||  (dev->flags & IFF_ALLMULTI)) {
+       } else if ((dev->mc_count > multicast_filter_limit) ||
+                  (dev->flags & IFF_ALLMULTI)) {
                /* Too many to match, or accept all multicasts. */
                memset(mc_filter, 0xff, sizeof(mc_filter));
                rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
This page took 0.026754 seconds and 5 git commands to generate.