ipv4: coding style: comparison for inequality with NULL
authorIan Morris <ipm@chirality.org.uk>
Fri, 3 Apr 2015 08:17:27 +0000 (09:17 +0100)
committerDavid S. Miller <davem@davemloft.net>
Fri, 3 Apr 2015 16:11:15 +0000 (12:11 -0400)
The ipv4 code uses a mixture of coding styles. In some instances check
for non-NULL pointer is done as x != NULL and sometimes as x. x is
preferred according to checkpatch and this patch makes the code
consistent by adopting the latter form.

No changes detected by objdiff.

Signed-off-by: Ian Morris <ipm@chirality.org.uk>
Signed-off-by: David S. Miller <davem@davemloft.net>
30 files changed:
net/ipv4/af_inet.c
net/ipv4/arp.c
net/ipv4/cipso_ipv4.c
net/ipv4/devinet.c
net/ipv4/fib_trie.c
net/ipv4/geneve.c
net/ipv4/gre_offload.c
net/ipv4/igmp.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_hashtables.c
net/ipv4/inet_timewait_sock.c
net/ipv4/ip_fragment.c
net/ipv4/ip_input.c
net/ipv4/ip_options.c
net/ipv4/ip_output.c
net/ipv4/ip_sockglue.c
net/ipv4/ip_tunnel.c
net/ipv4/ip_vti.c
net/ipv4/ipmr.c
net/ipv4/ping.c
net/ipv4/raw.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_diag.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c
net/ipv4/udp.c
net/ipv4/udp_offload.c

index 7d3b00c01bc8aef3db551f63480629c1bfa20cb2..8b47a4d79d040e39e592d3583affb7fec2d19f3d 100644 (file)
@@ -1269,7 +1269,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
                if (udpfrag) {
                        iph->id = htons(id);
                        iph->frag_off = htons(offset >> 3);
-                       if (skb->next != NULL)
+                       if (skb->next)
                                iph->frag_off |= htons(IP_MF);
                        offset += skb->len - nhoff - ihl;
                } else {
index ffe84226a2c8647637dca338d40f42dc23f2f2b9..c6e67aa46c32aa78eb7fe9172d90628112e61739 100644 (file)
@@ -569,7 +569,7 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
                break;
 #endif
        default:
-               if (target_hw != NULL)
+               if (target_hw)
                        memcpy(arp_ptr, target_hw, dev->addr_len);
                else
                        memset(arp_ptr, 0, dev->addr_len);
index 1b28e1183c1b95a2c0279b2d4b8edb0d68f6eb43..bdb2a07ec363b709197435ac602b74377a600780 100644 (file)
@@ -502,7 +502,7 @@ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def,
        atomic_set(&doi_def->refcount, 1);
 
        spin_lock(&cipso_v4_doi_list_lock);
-       if (cipso_v4_doi_search(doi_def->doi) != NULL) {
+       if (cipso_v4_doi_search(doi_def->doi)) {
                spin_unlock(&cipso_v4_doi_list_lock);
                ret_val = -EEXIST;
                goto doi_add_return;
@@ -513,7 +513,7 @@ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def,
 
 doi_add_return:
        audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info);
-       if (audit_buf != NULL) {
+       if (audit_buf) {
                const char *type_str;
                switch (doi_type) {
                case CIPSO_V4_MAP_TRANS:
@@ -617,7 +617,7 @@ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info)
 
 doi_remove_return:
        audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info);
-       if (audit_buf != NULL) {
+       if (audit_buf) {
                audit_log_format(audit_buf,
                                 " cipso_doi=%u res=%u",
                                 doi, ret_val == 0 ? 1 : 0);
index 0ee21689d37eef247ab87bc17842b21665b487a4..419d23c53ec756327178f9101ea8287d671c9a47 100644 (file)
@@ -1290,7 +1290,7 @@ __be32 inet_confirm_addr(struct net *net, struct in_device *in_dev,
        __be32 addr = 0;
        struct net_device *dev;
 
-       if (in_dev != NULL)
+       if (in_dev)
                return confirm_addr_indev(in_dev, dst, local, scope);
 
        rcu_read_lock();
index 9e4a3e3423b4c7e3b360a75702c44123b46f05b1..e13fcc602da20ee44dfd505ab1115bbcc0e13375 100644 (file)
@@ -391,9 +391,9 @@ static void put_child(struct key_vector *tn, unsigned long i,
        BUG_ON(i >= child_length(tn));
 
        /* update emptyChildren, overflow into fullChildren */
-       if (!n && chi != NULL)
+       if (!n && chi)
                empty_child_inc(tn);
-       if (n != NULL && !chi)
+       if (n && !chi)
                empty_child_dec(tn);
 
        /* update fullChildren */
index a7d8be3dd3deff4a78c0ff87b846f2f27f304419..e64f8e9785d184cd033d23682691b397adc65f1e 100644 (file)
@@ -230,7 +230,7 @@ static int geneve_gro_complete(struct sk_buff *skb, int nhoff,
 
        rcu_read_lock();
        ptype = gro_find_complete_by_type(type);
-       if (ptype != NULL)
+       if (ptype)
                err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
 
        rcu_read_unlock();
index 9358f11aae404157b305ea8ec231607a75e3c6f6..5aa46d4b44efb99702ccd89005528f20ae422a0e 100644 (file)
@@ -243,7 +243,7 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff)
 
        rcu_read_lock();
        ptype = gro_find_complete_by_type(type);
-       if (ptype != NULL)
+       if (ptype)
                err = ptype->callbacks.gro_complete(skb, nhoff + grehlen);
 
        rcu_read_unlock();
index 27d204b834f99b66445fcd8f833166c58ccb0453..a3a697f5ffbaba1b30db8341ea9b51b229ac29df 100644 (file)
@@ -2370,7 +2370,7 @@ void ip_mc_drop_socket(struct sock *sk)
                inet->mc_list = iml->next_rcu;
                in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
                (void) ip_mc_leave_src(sk, iml, in_dev);
-               if (in_dev != NULL)
+               if (in_dev)
                        ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
                /* decrease mem now to avoid the memleak warning */
                atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
@@ -2590,10 +2590,10 @@ static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
                if (unlikely(!idev))
                        continue;
                im = rcu_dereference(idev->mc_list);
-               if (likely(im != NULL)) {
+               if (likely(im)) {
                        spin_lock_bh(&im->lock);
                        psf = im->sources;
-                       if (likely(psf != NULL)) {
+                       if (likely(psf)) {
                                state->im = im;
                                state->idev = idev;
                                break;
@@ -2663,7 +2663,7 @@ static void igmp_mcf_seq_stop(struct seq_file *seq, void *v)
        __releases(rcu)
 {
        struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
-       if (likely(state->im != NULL)) {
+       if (likely(state->im)) {
                spin_unlock_bh(&state->im->lock);
                state->im = NULL;
        }
index 79c0c9439fdc7dd0b68421a6b229c869f37f7a01..5c3dd6267ed3557f2f139f83002fd7b1feaab237 100644 (file)
@@ -673,7 +673,7 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
 {
        struct sock *newsk = sk_clone_lock(sk, priority);
 
-       if (newsk != NULL) {
+       if (newsk) {
                struct inet_connection_sock *newicsk = inet_csk(newsk);
 
                newsk->sk_state = TCP_SYN_RECV;
@@ -843,7 +843,7 @@ void inet_csk_listen_stop(struct sock *sk)
                sk_acceptq_removed(sk);
                reqsk_put(req);
        }
-       if (queue->fastopenq != NULL) {
+       if (queue->fastopenq) {
                /* Free all the reqs queued in rskq_rst_head. */
                spin_lock_bh(&queue->fastopenq->lock);
                acc_req = queue->fastopenq->rskq_rst_head;
@@ -875,7 +875,7 @@ int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
 
-       if (icsk->icsk_af_ops->compat_getsockopt != NULL)
+       if (icsk->icsk_af_ops->compat_getsockopt)
                return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname,
                                                            optval, optlen);
        return icsk->icsk_af_ops->getsockopt(sk, level, optname,
@@ -888,7 +888,7 @@ int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
 
-       if (icsk->icsk_af_ops->compat_setsockopt != NULL)
+       if (icsk->icsk_af_ops->compat_setsockopt)
                return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
                                                            optval, optlen);
        return icsk->icsk_af_ops->setsockopt(sk, level, optname,
index 0fb841b9d83409c133d20d2144cc2edcef5a31c5..d4630bf2d9aad1fd9070a11323b1cd0f7c0b9949 100644 (file)
@@ -64,7 +64,7 @@ struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
 {
        struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
 
-       if (tb != NULL) {
+       if (tb) {
                write_pnet(&tb->ib_net, net);
                tb->port      = snum;
                tb->fastreuse = 0;
index f38e387448fb5596c64cdcb07e7cb06d3e624451..118f0f195820fa98554bafa5e1ddbd0da7c002c7 100644 (file)
@@ -173,7 +173,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat
        struct inet_timewait_sock *tw =
                kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
                                 GFP_ATOMIC);
-       if (tw != NULL) {
+       if (tw) {
                const struct inet_sock *inet = inet_sk(sk);
 
                kmemcheck_annotate_bitfield(tw, flags);
index 5a6cf8667a9d54c9532dd7270877d7a4524ff79e..cc1da6d9cb351de56c7f357faebe32cdbb6f7c27 100644 (file)
@@ -639,7 +639,8 @@ int ip_defrag(struct sk_buff *skb, u32 user)
        IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
 
        /* Lookup (or create) queue header */
-       if ((qp = ip_find(net, ip_hdr(skb), user)) != NULL) {
+       qp = ip_find(net, ip_hdr(skb), user);
+       if (qp) {
                int ret;
 
                spin_lock(&qp->q.lock);
index 00bed6fe3b663347742f8b4db274d5ebcc81d0c2..2e0410ed8f16f0d41189a6846633a7598a04b504 100644 (file)
@@ -203,7 +203,7 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
                raw = raw_local_deliver(skb, protocol);
 
                ipprot = rcu_dereference(inet_protos[protocol]);
-               if (ipprot != NULL) {
+               if (ipprot) {
                        int ret;
 
                        if (!ipprot->no_policy) {
index 5b3d91be2db0c8f1a78606727c703475dd61b598..bd246792360b4b8dcda2c13328ea5f01bb603e06 100644 (file)
@@ -264,7 +264,7 @@ int ip_options_compile(struct net *net,
        unsigned char *iph;
        int optlen, l;
 
-       if (skb != NULL) {
+       if (skb) {
                rt = skb_rtable(skb);
                optptr = (unsigned char *)&(ip_hdr(skb)[1]);
        } else
index 561d67b2ac7407c2578c72f6f7a386d16915f77d..26f6f7956168a795e1c465a65e89939c5b7431d6 100644 (file)
@@ -257,7 +257,7 @@ static int ip_finish_output(struct sk_buff *skb)
 {
 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
        /* Policy lookup after SNAT yielded a new policy */
-       if (skb_dst(skb)->xfrm != NULL) {
+       if (skb_dst(skb)->xfrm) {
                IPCB(skb)->flags |= IPSKB_REROUTED;
                return dst_output(skb);
        }
@@ -376,7 +376,7 @@ int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
        inet_opt = rcu_dereference(inet->inet_opt);
        fl4 = &fl->u.ip4;
        rt = skb_rtable(skb);
-       if (rt != NULL)
+       if (rt)
                goto packet_routed;
 
        /* Make sure we can route this packet. */
@@ -587,7 +587,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
                                        ip_options_fragment(frag);
                                offset += skb->len - hlen;
                                iph->frag_off = htons(offset>>3);
-                               if (frag->next != NULL)
+                               if (frag->next)
                                        iph->frag_off |= htons(IP_MF);
                                /* Ready, complete checksum */
                                ip_send_check(iph);
index f64b1b24c64fb46da859826921ef07851e2bcaae..7cfb0893f2636bcc87537da3014643362f72b10f 100644 (file)
@@ -387,7 +387,7 @@ void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
                                   skb_network_header(skb);
        serr->port = port;
 
-       if (skb_pull(skb, payload - skb->data) != NULL) {
+       if (skb_pull(skb, payload - skb->data)) {
                skb_reset_transport_header(skb);
                if (sock_queue_err_skb(sk, skb) == 0)
                        return;
index 31eaa9ba1803a81b079d62e839ccf0e37225dc85..6d364ab8e14eec5ff24c49357fbea45b02b92b39 100644 (file)
@@ -876,7 +876,7 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
                        break;
                }
                if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
-                       if (t != NULL) {
+                       if (t) {
                                if (t->dev != dev) {
                                        err = -EEXIST;
                                        break;
index c4f93c0d11047b0b96bf9fbcb17cadfa21cda52e..9f7269f3c54af2ecbc74db4ec2c0f71d5184dc1c 100644 (file)
@@ -60,7 +60,7 @@ static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi,
 
        tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
                                  iph->saddr, iph->daddr, 0);
-       if (tunnel != NULL) {
+       if (tunnel) {
                if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
                        goto drop;
 
index a170e4bc9006953181f2abc4fc69e8ba63cd307e..c204b728bbc14f3abbc6964cb42dbf7ef226bda1 100644 (file)
@@ -316,7 +316,7 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
        unsigned int i;
 
        mrt = ipmr_get_table(net, id);
-       if (mrt != NULL)
+       if (mrt)
                return mrt;
 
        mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
index 2dcd2e60df643690db364e4a7fc674ceb2a18956..a93f260cf24ca0a9d60346dc085eb51afdb43927 100644 (file)
@@ -971,7 +971,7 @@ bool ping_rcv(struct sk_buff *skb)
        skb_push(skb, skb->data - (u8 *)icmph);
 
        sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
-       if (sk != NULL) {
+       if (sk) {
                struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
 
                pr_debug("rcv on socket %p\n", sk);
index 46a78204189d2566c551e5b4faa4de8baaddf4ea..6d0fa8fb8af0afd652d99de84faa9ab1eb4e99f4 100644 (file)
@@ -293,7 +293,7 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
 
        read_lock(&raw_v4_hashinfo.lock);
        raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]);
-       if (raw_sk != NULL) {
+       if (raw_sk) {
                iph = (const struct iphdr *)skb->data;
                net = dev_net(skb->dev);
 
@@ -872,7 +872,7 @@ static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg)
 
                spin_lock_bh(&sk->sk_receive_queue.lock);
                skb = skb_peek(&sk->sk_receive_queue);
-               if (skb != NULL)
+               if (skb)
                        amount = skb->len;
                spin_unlock_bh(&sk->sk_receive_queue.lock);
                return put_user(amount, (int __user *)arg);
index 26a1cb348b3daa42df611ba597c498dba234a4ff..a78540f28276771e4c8f35024d3ee133c31317ab 100644 (file)
@@ -1591,7 +1591,7 @@ static int __mkroute_input(struct sk_buff *skb,
 
        fnhe = find_exception(&FIB_RES_NH(*res), daddr);
        if (do_cache) {
-               if (fnhe != NULL)
+               if (fnhe)
                        rth = rcu_dereference(fnhe->fnhe_rth_input);
                else
                        rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
index 5bd809bfd0aaa454b207e45806018f650bd8ab24..094a6822c71d8cc69b1be28a9c6bb511f8f8b87b 100644 (file)
@@ -496,7 +496,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
 
        /* Connected or passive Fast Open socket? */
        if (sk->sk_state != TCP_SYN_SENT &&
-           (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk != NULL)) {
+           (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk)) {
                int target = sock_rcvlowat(sk, 0, INT_MAX);
 
                if (tp->urg_seq == tp->copied_seq &&
@@ -1028,7 +1028,7 @@ static inline int select_size(const struct sock *sk, bool sg)
 
 void tcp_free_fastopen_req(struct tcp_sock *tp)
 {
-       if (tp->fastopen_req != NULL) {
+       if (tp->fastopen_req) {
                kfree(tp->fastopen_req);
                tp->fastopen_req = NULL;
        }
@@ -1042,7 +1042,7 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
 
        if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE))
                return -EOPNOTSUPP;
-       if (tp->fastopen_req != NULL)
+       if (tp->fastopen_req)
                return -EALREADY; /* Another Fast Open is in progress */
 
        tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request),
@@ -2138,7 +2138,7 @@ adjudge_to_death:
                 * aborted (e.g., closed with unread data) before 3WHS
                 * finishes.
                 */
-               if (req != NULL)
+               if (req)
                        reqsk_fastopen_remove(sk, req, false);
                inet_csk_destroy_sock(sk);
        }
@@ -2776,7 +2776,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
                break;
 
        case TCP_FASTOPEN:
-               if (icsk->icsk_accept_queue.fastopenq != NULL)
+               if (icsk->icsk_accept_queue.fastopenq)
                        val = icsk->icsk_accept_queue.fastopenq->max_qlen;
                else
                        val = 0;
@@ -2960,7 +2960,7 @@ void tcp_done(struct sock *sk)
 
        tcp_set_state(sk, TCP_CLOSE);
        tcp_clear_xmit_timers(sk);
-       if (req != NULL)
+       if (req)
                reqsk_fastopen_remove(sk, req, false);
 
        sk->sk_shutdown = SHUTDOWN_MASK;
index 86dc119a38156b794c10ef5084ebb92b4ccde152..79b34a0f4a4ae519c3f66c511c989b92fca02b09 100644 (file)
@@ -29,7 +29,7 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
                r->idiag_rqueue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
                r->idiag_wqueue = tp->write_seq - tp->snd_una;
        }
-       if (info != NULL)
+       if (info)
                tcp_get_info(sk, info);
 }
 
index 1fd28368430356cb2cf210470319888ed6c9072f..df7e7fa1273333a963eddf799eebd2b5bf71e9ad 100644 (file)
@@ -1256,7 +1256,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
                fack_count += pcount;
 
                /* Lost marker hint past SACKed? Tweak RFC3517 cnt */
-               if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) &&
+               if (!tcp_is_fack(tp) && tp->lost_skb_hint &&
                    before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq))
                        tp->lost_cnt_hint += pcount;
 
@@ -1535,7 +1535,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
                if (!before(TCP_SKB_CB(skb)->seq, end_seq))
                        break;
 
-               if ((next_dup != NULL) &&
+               if (next_dup  &&
                    before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) {
                        in_sack = tcp_match_skb_to_sack(sk, skb,
                                                        next_dup->start_seq,
@@ -1551,7 +1551,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
                if (in_sack <= 0) {
                        tmp = tcp_shift_skb_data(sk, skb, state,
                                                 start_seq, end_seq, dup_sack);
-                       if (tmp != NULL) {
+                       if (tmp) {
                                if (tmp != skb) {
                                        skb = tmp;
                                        continue;
@@ -5321,7 +5321,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
 
        tcp_set_state(sk, TCP_ESTABLISHED);
 
-       if (skb != NULL) {
+       if (skb) {
                icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
                security_inet_conn_established(sk, skb);
        }
@@ -5690,7 +5690,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
        }
 
        req = tp->fastopen_rsk;
-       if (req != NULL) {
+       if (req) {
                WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
                    sk->sk_state != TCP_FIN_WAIT1);
 
@@ -5780,7 +5780,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                 * ACK we have received, this would have acknowledged
                 * our SYNACK so stop the SYNACK timer.
                 */
-               if (req != NULL) {
+               if (req) {
                        /* Return RST if ack_seq is invalid.
                         * Note that RFC793 only says to generate a
                         * DUPACK for it but for TCP Fast Open it seems
index 9ff311cf00f3cf83bb2d4a711ff70997f56f5984..560f9571f7c43957b5996957ebed5f093fff6d75 100644 (file)
@@ -1305,7 +1305,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
        /* Copy over the MD5 key from the original socket */
        key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
                                AF_INET);
-       if (key != NULL) {
+       if (key) {
                /*
                 * We're using one, so create a matching key
                 * on the newsk structure. If we fail to get
@@ -1797,7 +1797,7 @@ void tcp_v4_destroy_sock(struct sock *sk)
        if (inet_csk(sk)->icsk_bind_hash)
                inet_put_port(sk);
 
-       BUG_ON(tp->fastopen_rsk != NULL);
+       BUG_ON(tp->fastopen_rsk);
 
        /* If socket is aborted during connect operation */
        tcp_free_fastopen_req(tp);
index f0db1599a09cf28dca243fc3d7b58b24f8628fb1..d7003911c894075c209756a0ce26950a6a31aba4 100644 (file)
@@ -294,7 +294,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
        if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets)
                tw = inet_twsk_alloc(sk, state);
 
-       if (tw != NULL) {
+       if (tw) {
                struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
                const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
                struct inet_sock *inet = inet_sk(sk);
@@ -332,7 +332,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
                        struct tcp_md5sig_key *key;
                        tcptw->tw_md5_key = NULL;
                        key = tp->af_specific->md5_lookup(sk, sk);
-                       if (key != NULL) {
+                       if (key) {
                                tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
                                if (tcptw->tw_md5_key && !tcp_alloc_md5sig_pool())
                                        BUG();
@@ -454,7 +454,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
 {
        struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
 
-       if (newsk != NULL) {
+       if (newsk) {
                const struct inet_request_sock *ireq = inet_rsk(req);
                struct tcp_request_sock *treq = tcp_rsk(req);
                struct inet_connection_sock *newicsk = inet_csk(newsk);
index bdc80734cd2c79a2bdbb07f9c0e4eea077077417..7404e5238e004395ce0a55a073c806076b2394c8 100644 (file)
@@ -641,7 +641,7 @@ static unsigned int tcp_synack_options(struct sock *sk,
                if (unlikely(!ireq->tstamp_ok))
                        remaining -= TCPOLEN_SACKPERM_ALIGNED;
        }
-       if (foc != NULL && foc->len >= 0) {
+       if (foc && foc->len >= 0) {
                u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
                need = (need + 3) & ~3U;  /* Align to 32 bits */
                if (remaining >= need) {
@@ -2224,7 +2224,7 @@ void tcp_send_loss_probe(struct sock *sk)
        int mss = tcp_current_mss(sk);
        int err = -1;
 
-       if (tcp_send_head(sk) != NULL) {
+       if (tcp_send_head(sk)) {
                err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
                goto rearm_timer;
        }
@@ -2758,7 +2758,7 @@ begin_fwd:
                        if (!tcp_can_forward_retransmit(sk))
                                break;
                        /* Backtrack if necessary to non-L'ed skb */
-                       if (hole != NULL) {
+                       if (hole) {
                                skb = hole;
                                hole = NULL;
                        }
@@ -2811,7 +2811,7 @@ void tcp_send_fin(struct sock *sk)
         */
        mss_now = tcp_current_mss(sk);
 
-       if (tcp_send_head(sk) != NULL) {
+       if (tcp_send_head(sk)) {
                TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
                TCP_SKB_CB(skb)->end_seq++;
                tp->write_seq++;
@@ -3015,7 +3015,7 @@ static void tcp_connect_init(struct sock *sk)
                (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
 
 #ifdef CONFIG_TCP_MD5SIG
-       if (tp->af_specific->md5_lookup(sk, sk) != NULL)
+       if (tp->af_specific->md5_lookup(sk, sk))
                tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
 #endif
 
@@ -3376,8 +3376,8 @@ int tcp_write_wakeup(struct sock *sk)
        if (sk->sk_state == TCP_CLOSE)
                return -1;
 
-       if ((skb = tcp_send_head(sk)) != NULL &&
-           before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
+       skb = tcp_send_head(sk);
+       if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
                int err;
                unsigned int mss = tcp_current_mss(sk);
                unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
index 9f525a2a68df1e032a6dde7c159dfd7637aa7f4a..2162fc6ce1c1e779e0bbe4c63aa126ab1db08540 100644 (file)
@@ -1522,7 +1522,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 
                /* if we're overly short, let UDP handle it */
                encap_rcv = ACCESS_ONCE(up->encap_rcv);
-               if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) {
+               if (skb->len > sizeof(struct udphdr) && encap_rcv) {
                        int ret;
 
                        /* Verify checksum before giving to encap */
@@ -1802,7 +1802,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
                                                saddr, daddr, udptable, proto);
 
        sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
-       if (sk != NULL) {
+       if (sk) {
                int ret;
 
                if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
index 4915d8284a86f9ec6e5536804b24398d205e2a2b..f9386160cbee0288e294ea2cd8ba3b5be65cdbf6 100644 (file)
@@ -285,7 +285,7 @@ void udp_del_offload(struct udp_offload *uo)
        pr_warn("udp_del_offload: didn't find offload for port %d\n", ntohs(uo->port));
 unlock:
        spin_unlock(&udp_offload_lock);
-       if (uo_priv != NULL)
+       if (uo_priv)
                call_rcu(&uo_priv->rcu, udp_offload_free_routine);
 }
 EXPORT_SYMBOL(udp_del_offload);
@@ -394,7 +394,7 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff)
                        break;
        }
 
-       if (uo_priv != NULL) {
+       if (uo_priv) {
                NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
                err = uo_priv->offload->callbacks.gro_complete(skb,
                                nhoff + sizeof(struct udphdr),
This page took 0.044173 seconds and 5 git commands to generate.