From: Andy Grover Date: Tue, 12 Jan 2010 19:56:44 +0000 (-0800) Subject: RDS: cleanup: remove "== NULL"s and "!= NULL"s in ptr comparisons X-Git-Url: http://git.lede-project.org./?a=commitdiff_plain;h=8690bfa17aea4c42da1bcf90a7af93d161eca624;p=openwrt%2Fstaging%2Fblogic.git RDS: cleanup: remove "== NULL"s and "!= NULL"s in ptr comparisons Favor "if (foo)" style over "if (foo != NULL)". Signed-off-by: Andy Grover --- diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c index aebfecbdb841..63474e1f4dd8 100644 --- a/net/rds/af_rds.c +++ b/net/rds/af_rds.c @@ -62,7 +62,7 @@ static int rds_release(struct socket *sock) struct rds_sock *rs; unsigned long flags; - if (sk == NULL) + if (!sk) goto out; rs = rds_sk_to_rs(sk); diff --git a/net/rds/bind.c b/net/rds/bind.c index 5d95fc007f1a..65de5cbdb576 100644 --- a/net/rds/bind.c +++ b/net/rds/bind.c @@ -121,7 +121,7 @@ static int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port) do { if (rover == 0) rover++; - if (rds_bind_tree_walk(addr, cpu_to_be16(rover), rs) == NULL) { + if (!rds_bind_tree_walk(addr, cpu_to_be16(rover), rs)) { *port = cpu_to_be16(rover); ret = 0; break; @@ -184,7 +184,7 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) goto out; trans = rds_trans_get_preferred(sin->sin_addr.s_addr); - if (trans == NULL) { + if (!trans) { ret = -EADDRNOTAVAIL; rds_remove_bound(rs); if (printk_ratelimit()) diff --git a/net/rds/cong.c b/net/rds/cong.c index 0871a29f0780..c741e906d49f 100644 --- a/net/rds/cong.c +++ b/net/rds/cong.c @@ -141,7 +141,7 @@ static struct rds_cong_map *rds_cong_from_addr(__be32 addr) unsigned long flags; map = kzalloc(sizeof(struct rds_cong_map), GFP_KERNEL); - if (map == NULL) + if (!map) return NULL; map->m_addr = addr; @@ -159,7 +159,7 @@ static struct rds_cong_map *rds_cong_from_addr(__be32 addr) ret = rds_cong_tree_walk(addr, map); spin_unlock_irqrestore(&rds_cong_lock, flags); - if (ret == NULL) { + if (!ret) { ret = map; map = NULL; } @@ -205,7 +205,7 @@ int rds_cong_get_maps(struct rds_connection *conn) conn->c_lcong = rds_cong_from_addr(conn->c_laddr); conn->c_fcong = rds_cong_from_addr(conn->c_faddr); - if (conn->c_lcong == NULL || conn->c_fcong == NULL) + if (!(conn->c_lcong && conn->c_fcong)) return -ENOMEM; return 0; diff --git a/net/rds/connection.c b/net/rds/connection.c index 895e39cdc6a6..9c9afb58a143 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c @@ -148,7 +148,7 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr, goto out; conn = kmem_cache_zalloc(rds_conn_slab, gfp); - if (conn == NULL) { + if (!conn) { conn = ERR_PTR(-ENOMEM); goto out; } @@ -502,7 +502,7 @@ int __init rds_conn_init(void) rds_conn_slab = kmem_cache_create("rds_connection", sizeof(struct rds_connection), 0, 0, NULL); - if (rds_conn_slab == NULL) + if (!rds_conn_slab) return -ENOMEM; rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info); diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index f68832798db2..b46bc2f22ab6 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c @@ -230,7 +230,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn) * the rds_ibdev at all. */ rds_ibdev = ib_get_client_data(dev, &rds_ib_client); - if (rds_ibdev == NULL) { + if (!rds_ibdev) { if (printk_ratelimit()) printk(KERN_NOTICE "RDS/IB: No client_data for device %s\n", dev->name); @@ -306,7 +306,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn) ic->i_send_ring.w_nr * sizeof(struct rds_header), &ic->i_send_hdrs_dma, GFP_KERNEL); - if (ic->i_send_hdrs == NULL) { + if (!ic->i_send_hdrs) { ret = -ENOMEM; rdsdebug("ib_dma_alloc_coherent send failed\n"); goto out; @@ -316,7 +316,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn) ic->i_recv_ring.w_nr * sizeof(struct rds_header), &ic->i_recv_hdrs_dma, GFP_KERNEL); - if (ic->i_recv_hdrs == NULL) { + if (!ic->i_recv_hdrs) { ret = -ENOMEM; rdsdebug("ib_dma_alloc_coherent recv failed\n"); goto out; @@ -324,14 +324,14 @@ static int rds_ib_setup_qp(struct rds_connection *conn) ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header), &ic->i_ack_dma, GFP_KERNEL); - if (ic->i_ack == NULL) { + if (!ic->i_ack) { ret = -ENOMEM; rdsdebug("ib_dma_alloc_coherent ack failed\n"); goto out; } ic->i_sends = vmalloc(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work)); - if (ic->i_sends == NULL) { + if (!ic->i_sends) { ret = -ENOMEM; rdsdebug("send allocation failed\n"); goto out; @@ -339,7 +339,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn) memset(ic->i_sends, 0, ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work)); ic->i_recvs = vmalloc(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work)); - if (ic->i_recvs == NULL) { + if (!ic->i_recvs) { ret = -ENOMEM; rdsdebug("recv allocation failed\n"); goto out; @@ -693,7 +693,7 @@ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp) /* XXX too lazy? */ ic = kzalloc(sizeof(struct rds_ib_connection), GFP_KERNEL); - if (ic == NULL) + if (!ic) return -ENOMEM; INIT_LIST_HEAD(&ic->ib_node); diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index c74e9904a6b2..d0ee9c114c6c 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c @@ -53,7 +53,7 @@ static void rds_ib_frag_drop_page(struct rds_page_frag *frag) static void rds_ib_frag_free(struct rds_page_frag *frag) { rdsdebug("frag %p page %p\n", frag, frag->f_page); - BUG_ON(frag->f_page != NULL); + BUG_ON(frag->f_page); kmem_cache_free(rds_ib_frag_slab, frag); } @@ -143,14 +143,14 @@ static int rds_ib_recv_refill_one(struct rds_connection *conn, struct ib_sge *sge; int ret = -ENOMEM; - if (recv->r_ibinc == NULL) { + if (!recv->r_ibinc) { if (!atomic_add_unless(&rds_ib_allocation, 1, rds_ib_sysctl_max_recv_allocation)) { rds_ib_stats_inc(s_ib_rx_alloc_limit); goto out; } recv->r_ibinc = kmem_cache_alloc(rds_ib_incoming_slab, kptr_gfp); - if (recv->r_ibinc == NULL) { + if (!recv->r_ibinc) { atomic_dec(&rds_ib_allocation); goto out; } @@ -158,17 +158,17 @@ static int rds_ib_recv_refill_one(struct rds_connection *conn, rds_inc_init(&recv->r_ibinc->ii_inc, conn, conn->c_faddr); } - if (recv->r_frag == NULL) { + if (!recv->r_frag) { recv->r_frag = kmem_cache_alloc(rds_ib_frag_slab, kptr_gfp); - if (recv->r_frag == NULL) + if (!recv->r_frag) goto out; INIT_LIST_HEAD(&recv->r_frag->f_item); recv->r_frag->f_page = NULL; } - if (ic->i_frag.f_page == NULL) { + if (!ic->i_frag.f_page) { ic->i_frag.f_page = alloc_page(page_gfp); - if (ic->i_frag.f_page == NULL) + if (!ic->i_frag.f_page) goto out; ic->i_frag.f_offset = 0; } @@ -757,7 +757,7 @@ static void rds_ib_process_recv(struct rds_connection *conn, * into the inc and save the inc so we can hang upcoming fragments * off its list. */ - if (ibinc == NULL) { + if (!ibinc) { ibinc = recv->r_ibinc; recv->r_ibinc = NULL; ic->i_ibinc = ibinc; @@ -940,13 +940,13 @@ int __init rds_ib_recv_init(void) rds_ib_incoming_slab = kmem_cache_create("rds_ib_incoming", sizeof(struct rds_ib_incoming), 0, 0, NULL); - if (rds_ib_incoming_slab == NULL) + if (!rds_ib_incoming_slab) goto out; rds_ib_frag_slab = kmem_cache_create("rds_ib_frag", sizeof(struct rds_page_frag), 0, 0, NULL); - if (rds_ib_frag_slab == NULL) + if (!rds_ib_frag_slab) kmem_cache_destroy(rds_ib_incoming_slab); else ret = 0; diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c index 17fa80803ab0..0b0090d2ee01 100644 --- a/net/rds/ib_send.c +++ b/net/rds/ib_send.c @@ -86,7 +86,7 @@ static void rds_ib_send_unmap_rm(struct rds_ib_connection *ic, rm->m_sg, rm->m_nents, DMA_TO_DEVICE); - if (rm->m_rdma_op != NULL) { + if (rm->m_rdma_op) { rds_ib_send_unmap_rdma(ic, rm->m_rdma_op); /* If the user asked for a completion notification on this @@ -525,7 +525,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, } /* map the message the first time we see it */ - if (ic->i_rm == NULL) { + if (!ic->i_rm) { /* printk(KERN_NOTICE "rds_ib_xmit prep msg dport=%u flags=0x%x len=%d\n", be16_to_cpu(rm->m_inc.i_hdr.h_dport), diff --git a/net/rds/ib_sysctl.c b/net/rds/ib_sysctl.c index 03f01cb4e0fe..c070524c4d95 100644 --- a/net/rds/ib_sysctl.c +++ b/net/rds/ib_sysctl.c @@ -135,7 +135,7 @@ void rds_ib_sysctl_exit(void) int __init rds_ib_sysctl_init(void) { rds_ib_sysctl_hdr = register_sysctl_paths(rds_ib_sysctl_path, rds_ib_sysctl_table); - if (rds_ib_sysctl_hdr == NULL) + if (!rds_ib_sysctl_hdr) return -ENOMEM; return 0; } diff --git a/net/rds/info.c b/net/rds/info.c index c45c4173a44d..4fdf1b6e84ff 100644 --- a/net/rds/info.c +++ b/net/rds/info.c @@ -76,7 +76,7 @@ void rds_info_register_func(int optname, rds_info_func func) BUG_ON(optname < RDS_INFO_FIRST || optname > RDS_INFO_LAST); spin_lock(&rds_info_lock); - BUG_ON(rds_info_funcs[offset] != NULL); + BUG_ON(rds_info_funcs[offset]); rds_info_funcs[offset] = func; spin_unlock(&rds_info_lock); } @@ -102,7 +102,7 @@ EXPORT_SYMBOL_GPL(rds_info_deregister_func); */ void rds_info_iter_unmap(struct rds_info_iterator *iter) { - if (iter->addr != NULL) { + if (iter->addr) { kunmap_atomic(iter->addr, KM_USER0); iter->addr = NULL; } @@ -117,7 +117,7 @@ void rds_info_copy(struct rds_info_iterator *iter, void *data, unsigned long this; while (bytes) { - if (iter->addr == NULL) + if (!iter->addr) iter->addr = kmap_atomic(*iter->pages, KM_USER0); this = min(bytes, PAGE_SIZE - iter->offset); @@ -188,7 +188,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval, >> PAGE_SHIFT; pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); - if (pages == NULL) { + if (!pages) { ret = -ENOMEM; goto out; } @@ -206,7 +206,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval, call_func: func = rds_info_funcs[optname - RDS_INFO_FIRST]; - if (func == NULL) { + if (!func) { ret = -ENOPROTOOPT; goto out; } @@ -234,7 +234,7 @@ call_func: ret = -EFAULT; out: - for (i = 0; pages != NULL && i < nr_pages; i++) + for (i = 0; pages && i < nr_pages; i++) put_page(pages[i]); kfree(pages); diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c index b5dd6ac39be8..712cf2d1f28e 100644 --- a/net/rds/iw_cm.c +++ b/net/rds/iw_cm.c @@ -257,7 +257,7 @@ static int rds_iw_setup_qp(struct rds_connection *conn) * the rds_iwdev at all. */ rds_iwdev = ib_get_client_data(dev, &rds_iw_client); - if (rds_iwdev == NULL) { + if (!rds_iwdev) { if (printk_ratelimit()) printk(KERN_NOTICE "RDS/IW: No client_data for device %s\n", dev->name); @@ -292,7 +292,7 @@ static int rds_iw_setup_qp(struct rds_connection *conn) ic->i_send_ring.w_nr * sizeof(struct rds_header), &ic->i_send_hdrs_dma, GFP_KERNEL); - if (ic->i_send_hdrs == NULL) { + if (!ic->i_send_hdrs) { ret = -ENOMEM; rdsdebug("ib_dma_alloc_coherent send failed\n"); goto out; @@ -302,7 +302,7 @@ static int rds_iw_setup_qp(struct rds_connection *conn) ic->i_recv_ring.w_nr * sizeof(struct rds_header), &ic->i_recv_hdrs_dma, GFP_KERNEL); - if (ic->i_recv_hdrs == NULL) { + if (!ic->i_recv_hdrs) { ret = -ENOMEM; rdsdebug("ib_dma_alloc_coherent recv failed\n"); goto out; @@ -310,14 +310,14 @@ static int rds_iw_setup_qp(struct rds_connection *conn) ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header), &ic->i_ack_dma, GFP_KERNEL); - if (ic->i_ack == NULL) { + if (!ic->i_ack) { ret = -ENOMEM; rdsdebug("ib_dma_alloc_coherent ack failed\n"); goto out; } ic->i_sends = vmalloc(ic->i_send_ring.w_nr * sizeof(struct rds_iw_send_work)); - if (ic->i_sends == NULL) { + if (!ic->i_sends) { ret = -ENOMEM; rdsdebug("send allocation failed\n"); goto out; @@ -325,7 +325,7 @@ static int rds_iw_setup_qp(struct rds_connection *conn) rds_iw_send_init_ring(ic); ic->i_recvs = vmalloc(ic->i_recv_ring.w_nr * sizeof(struct rds_iw_recv_work)); - if (ic->i_recvs == NULL) { + if (!ic->i_recvs) { ret = -ENOMEM; rdsdebug("recv allocation failed\n"); goto out; @@ -696,7 +696,7 @@ int rds_iw_conn_alloc(struct rds_connection *conn, gfp_t gfp) /* XXX too lazy? */ ic = kzalloc(sizeof(struct rds_iw_connection), GFP_KERNEL); - if (ic == NULL) + if (!ic) return -ENOMEM; INIT_LIST_HEAD(&ic->iw_node); diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c index 3d479067d54d..48bcf4f2bf3c 100644 --- a/net/rds/iw_recv.c +++ b/net/rds/iw_recv.c @@ -53,7 +53,7 @@ static void rds_iw_frag_drop_page(struct rds_page_frag *frag) static void rds_iw_frag_free(struct rds_page_frag *frag) { rdsdebug("frag %p page %p\n", frag, frag->f_page); - BUG_ON(frag->f_page != NULL); + BUG_ON(frag->f_page); kmem_cache_free(rds_iw_frag_slab, frag); } @@ -143,14 +143,14 @@ static int rds_iw_recv_refill_one(struct rds_connection *conn, struct ib_sge *sge; int ret = -ENOMEM; - if (recv->r_iwinc == NULL) { + if (!recv->r_iwinc) { if (!atomic_add_unless(&rds_iw_allocation, 1, rds_iw_sysctl_max_recv_allocation)) { rds_iw_stats_inc(s_iw_rx_alloc_limit); goto out; } recv->r_iwinc = kmem_cache_alloc(rds_iw_incoming_slab, kptr_gfp); - if (recv->r_iwinc == NULL) { + if (!recv->r_iwinc) { atomic_dec(&rds_iw_allocation); goto out; } @@ -158,17 +158,17 @@ static int rds_iw_recv_refill_one(struct rds_connection *conn, rds_inc_init(&recv->r_iwinc->ii_inc, conn, conn->c_faddr); } - if (recv->r_frag == NULL) { + if (!recv->r_frag) { recv->r_frag = kmem_cache_alloc(rds_iw_frag_slab, kptr_gfp); - if (recv->r_frag == NULL) + if (!recv->r_frag) goto out; INIT_LIST_HEAD(&recv->r_frag->f_item); recv->r_frag->f_page = NULL; } - if (ic->i_frag.f_page == NULL) { + if (!ic->i_frag.f_page) { ic->i_frag.f_page = alloc_page(page_gfp); - if (ic->i_frag.f_page == NULL) + if (!ic->i_frag.f_page) goto out; ic->i_frag.f_offset = 0; } @@ -716,7 +716,7 @@ static void rds_iw_process_recv(struct rds_connection *conn, * into the inc and save the inc so we can hang upcoming fragments * off its list. */ - if (iwinc == NULL) { + if (!iwinc) { iwinc = recv->r_iwinc; recv->r_iwinc = NULL; ic->i_iwinc = iwinc; @@ -899,13 +899,13 @@ int __init rds_iw_recv_init(void) rds_iw_incoming_slab = kmem_cache_create("rds_iw_incoming", sizeof(struct rds_iw_incoming), 0, 0, NULL); - if (rds_iw_incoming_slab == NULL) + if (!rds_iw_incoming_slab) goto out; rds_iw_frag_slab = kmem_cache_create("rds_iw_frag", sizeof(struct rds_page_frag), 0, 0, NULL); - if (rds_iw_frag_slab == NULL) + if (!rds_iw_frag_slab) kmem_cache_destroy(rds_iw_incoming_slab); else ret = 0; diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c index 52182ff7519e..dced532f9cfb 100644 --- a/net/rds/iw_send.c +++ b/net/rds/iw_send.c @@ -86,7 +86,7 @@ static void rds_iw_send_unmap_rm(struct rds_iw_connection *ic, rm->m_sg, rm->m_nents, DMA_TO_DEVICE); - if (rm->m_rdma_op != NULL) { + if (rm->m_rdma_op) { rds_iw_send_unmap_rdma(ic, rm->m_rdma_op); /* If the user asked for a completion notification on this @@ -556,7 +556,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm, } /* map the message the first time we see it */ - if (ic->i_rm == NULL) { + if (!ic->i_rm) { /* printk(KERN_NOTICE "rds_iw_xmit prep msg dport=%u flags=0x%x len=%d\n", be16_to_cpu(rm->m_inc.i_hdr.h_dport), diff --git a/net/rds/iw_sysctl.c b/net/rds/iw_sysctl.c index 1c4428a61a02..3cb0587d6f50 100644 --- a/net/rds/iw_sysctl.c +++ b/net/rds/iw_sysctl.c @@ -125,7 +125,7 @@ void rds_iw_sysctl_exit(void) int __init rds_iw_sysctl_init(void) { rds_iw_sysctl_hdr = register_sysctl_paths(rds_iw_sysctl_path, rds_iw_sysctl_table); - if (rds_iw_sysctl_hdr == NULL) + if (!rds_iw_sysctl_hdr) return -ENOMEM; return 0; } diff --git a/net/rds/loop.c b/net/rds/loop.c index dd9879379457..a74b469a844a 100644 --- a/net/rds/loop.c +++ b/net/rds/loop.c @@ -112,7 +112,7 @@ static int rds_loop_conn_alloc(struct rds_connection *conn, gfp_t gfp) unsigned long flags; lc = kzalloc(sizeof(struct rds_loop_connection), GFP_KERNEL); - if (lc == NULL) + if (!lc) return -ENOMEM; INIT_LIST_HEAD(&lc->loop_node); diff --git a/net/rds/message.c b/net/rds/message.c index 9a1d67e001ba..809656c2b25c 100644 --- a/net/rds/message.c +++ b/net/rds/message.c @@ -240,7 +240,7 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in unsigned int i; rm = rds_message_alloc(ceil(total_len, PAGE_SIZE), GFP_KERNEL); - if (rm == NULL) + if (!rm) return ERR_PTR(-ENOMEM); set_bit(RDS_MSG_PAGEVEC, &rm->m_flags); @@ -284,7 +284,7 @@ struct rds_message *rds_message_copy_from_user(struct iovec *first_iov, sg_off = 0; /* Dear gcc, sg->page will be null from kzalloc. */ while (total_len) { - if (sg_page(sg) == NULL) { + if (!sg_page(sg)) { ret = rds_page_remainder_alloc(sg, total_len, GFP_HIGHUSER); if (ret) diff --git a/net/rds/page.c b/net/rds/page.c index 595a952d4b17..e5b2527ae257 100644 --- a/net/rds/page.c +++ b/net/rds/page.c @@ -116,7 +116,7 @@ int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes, /* jump straight to allocation if we're trying for a huge page */ if (bytes >= PAGE_SIZE) { page = alloc_page(gfp); - if (page == NULL) { + if (!page) { ret = -ENOMEM; } else { sg_set_page(scat, page, PAGE_SIZE, 0); @@ -162,7 +162,7 @@ int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes, rem = &per_cpu(rds_page_remainders, get_cpu()); local_irq_save(flags); - if (page == NULL) { + if (!page) { ret = -ENOMEM; break; } diff --git a/net/rds/rdma.c b/net/rds/rdma.c index 463b458ff27e..dee698b979af 100644 --- a/net/rds/rdma.c +++ b/net/rds/rdma.c @@ -189,7 +189,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args, goto out; } - if (rs->rs_transport->get_mr == NULL) { + if (!rs->rs_transport->get_mr) { ret = -EOPNOTSUPP; goto out; } @@ -205,13 +205,13 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args, /* XXX clamp nr_pages to limit the size of this alloc? */ pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); - if (pages == NULL) { + if (!pages) { ret = -ENOMEM; goto out; } mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL); - if (mr == NULL) { + if (!mr) { ret = -ENOMEM; goto out; } @@ -244,7 +244,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args, nents = ret; sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL); - if (sg == NULL) { + if (!sg) { ret = -ENOMEM; goto out; } @@ -425,7 +425,7 @@ void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force) /* May have to issue a dma_sync on this memory region. * Note we could avoid this if the operation was a RDMA READ, * but at this point we can't tell. */ - if (mr != NULL) { + if (mr) { if (mr->r_trans->sync_mr) mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE); @@ -511,13 +511,13 @@ static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs, } pages = kcalloc(max_pages, sizeof(struct page *), GFP_KERNEL); - if (pages == NULL) { + if (!pages) { ret = -ENOMEM; goto out; } op = kzalloc(offsetof(struct rds_rdma_op, r_sg[nr_pages]), GFP_KERNEL); - if (op == NULL) { + if (!op) { ret = -ENOMEM; goto out; } @@ -643,7 +643,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, struct rds_rdma_op *op; if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args)) || - rm->m_rdma_op != NULL) + rm->m_rdma_op) return -EINVAL; op = rds_rdma_prepare(rs, CMSG_DATA(cmsg)); @@ -681,7 +681,7 @@ int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm, spin_lock_irqsave(&rs->rs_rdma_lock, flags); mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL); - if (mr == NULL) + if (!mr) err = -EINVAL; /* invalid r_key */ else atomic_inc(&mr->r_refcount); diff --git a/net/rds/recv.c b/net/rds/recv.c index c93588c2d553..88f1f5aecfa6 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c @@ -210,7 +210,7 @@ void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr, } rs = rds_find_bound(daddr, inc->i_hdr.h_dport); - if (rs == NULL) { + if (!rs) { rds_stats_inc(s_recv_drop_no_sock); goto out; } @@ -251,7 +251,7 @@ static int rds_next_incoming(struct rds_sock *rs, struct rds_incoming **inc) { unsigned long flags; - if (*inc == NULL) { + if (!*inc) { read_lock_irqsave(&rs->rs_recv_lock, flags); if (!list_empty(&rs->rs_recv_queue)) { *inc = list_entry(rs->rs_recv_queue.next, diff --git a/net/rds/send.c b/net/rds/send.c index 725fb0419797..817997daf785 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -164,7 +164,7 @@ int rds_send_xmit(struct rds_connection *conn) * offset and S/G temporaries. */ rm = conn->c_xmit_rm; - if (rm != NULL && + if (rm && conn->c_xmit_hdr_off == sizeof(struct rds_header) && conn->c_xmit_sg == rm->m_nents) { conn->c_xmit_rm = NULL; @@ -180,8 +180,8 @@ int rds_send_xmit(struct rds_connection *conn) /* If we're asked to send a cong map update, do so. */ - if (rm == NULL && test_and_clear_bit(0, &conn->c_map_queued)) { - if (conn->c_trans->xmit_cong_map != NULL) { + if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) { + if (conn->c_trans->xmit_cong_map) { conn->c_map_offset = 0; conn->c_map_bytes = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; @@ -204,7 +204,7 @@ int rds_send_xmit(struct rds_connection *conn) * the connction. We can use this ref while holding the * send_sem.. rds_send_reset() is serialized with it. */ - if (rm == NULL) { + if (!rm) { unsigned int len; spin_lock_irqsave(&conn->c_lock, flags); @@ -224,7 +224,7 @@ int rds_send_xmit(struct rds_connection *conn) spin_unlock_irqrestore(&conn->c_lock, flags); - if (rm == NULL) { + if (!rm) { was_empty = 1; break; } @@ -875,7 +875,7 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, goto out; if ((rm->m_rdma_cookie || rm->m_rdma_op) && - conn->c_trans->xmit_rdma == NULL) { + !conn->c_trans->xmit_rdma) { if (printk_ratelimit()) printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n", rm->m_rdma_op, conn->c_trans->xmit_rdma); @@ -961,7 +961,7 @@ rds_send_pong(struct rds_connection *conn, __be16 dport) int ret = 0; rm = rds_message_alloc(0, GFP_ATOMIC); - if (rm == NULL) { + if (!rm) { ret = -ENOMEM; goto out; } diff --git a/net/rds/sysctl.c b/net/rds/sysctl.c index 7829a20325d3..5a6dd81de188 100644 --- a/net/rds/sysctl.c +++ b/net/rds/sysctl.c @@ -111,7 +111,7 @@ int __init rds_sysctl_init(void) rds_sysctl_reconnect_min_jiffies = rds_sysctl_reconnect_min; rds_sysctl_reg_table = register_sysctl_paths(rds_sysctl_path, rds_sysctl_rds_table); - if (rds_sysctl_reg_table == NULL) + if (!rds_sysctl_reg_table) return -ENOMEM; return 0; } diff --git a/net/rds/tcp.c b/net/rds/tcp.c index babf4577ff7d..aebe10314fdb 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c @@ -200,7 +200,7 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp) struct rds_tcp_connection *tc; tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp); - if (tc == NULL) + if (!tc) return -ENOMEM; tc->t_sock = NULL; @@ -283,7 +283,7 @@ int __init rds_tcp_init(void) rds_tcp_conn_slab = kmem_cache_create("rds_tcp_connection", sizeof(struct rds_tcp_connection), 0, 0, NULL); - if (rds_tcp_conn_slab == NULL) { + if (!rds_tcp_conn_slab) { ret = -ENOMEM; goto out; } diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c index c397524c039c..a65ee78db0c5 100644 --- a/net/rds/tcp_connect.c +++ b/net/rds/tcp_connect.c @@ -45,7 +45,7 @@ void rds_tcp_state_change(struct sock *sk) read_lock(&sk->sk_callback_lock); conn = sk->sk_user_data; - if (conn == NULL) { + if (!conn) { state_change = sk->sk_state_change; goto out; } diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c index 975183fe6950..50b649eb6933 100644 --- a/net/rds/tcp_listen.c +++ b/net/rds/tcp_listen.c @@ -116,7 +116,7 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes) read_lock(&sk->sk_callback_lock); ready = sk->sk_user_data; - if (ready == NULL) { /* check for teardown race */ + if (!ready) { /* check for teardown race */ ready = sk->sk_data_ready; goto out; } @@ -178,7 +178,7 @@ void rds_tcp_listen_stop(void) struct socket *sock = rds_tcp_listen_sock; struct sock *sk; - if (sock == NULL) + if (!sock) return; sk = sock->sk; diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c index 1aba6878fa5d..ea7382908aa5 100644 --- a/net/rds/tcp_recv.c +++ b/net/rds/tcp_recv.c @@ -190,10 +190,10 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb, * processing. */ while (left) { - if (tinc == NULL) { + if (!tinc) { tinc = kmem_cache_alloc(rds_tcp_incoming_slab, arg->gfp); - if (tinc == NULL) { + if (!tinc) { desc->error = -ENOMEM; goto out; } @@ -229,7 +229,7 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb, if (left && tc->t_tinc_data_rem) { clone = skb_clone(skb, arg->gfp); - if (clone == NULL) { + if (!clone) { desc->error = -ENOMEM; goto out; } @@ -326,7 +326,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes) read_lock(&sk->sk_callback_lock); conn = sk->sk_user_data; - if (conn == NULL) { /* check for teardown race */ + if (!conn) { /* check for teardown race */ ready = sk->sk_data_ready; goto out; } @@ -347,7 +347,7 @@ int __init rds_tcp_recv_init(void) rds_tcp_incoming_slab = kmem_cache_create("rds_tcp_incoming", sizeof(struct rds_tcp_incoming), 0, 0, NULL); - if (rds_tcp_incoming_slab == NULL) + if (!rds_tcp_incoming_slab) return -ENOMEM; return 0; } diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c index a28b895ff0d1..e5f6ccef79ef 100644 --- a/net/rds/tcp_send.c +++ b/net/rds/tcp_send.c @@ -226,7 +226,7 @@ void rds_tcp_write_space(struct sock *sk) read_lock(&sk->sk_callback_lock); conn = sk->sk_user_data; - if (conn == NULL) { + if (!conn) { write_space = sk->sk_write_space; goto out; } diff --git a/net/rds/threads.c b/net/rds/threads.c index 6e2e43d5f576..7a8ca7a1d983 100644 --- a/net/rds/threads.c +++ b/net/rds/threads.c @@ -215,7 +215,7 @@ void rds_threads_exit(void) int __init rds_threads_init(void) { rds_wq = create_workqueue("krdsd"); - if (rds_wq == NULL) + if (!rds_wq) return -ENOMEM; return 0;