From 86357b19bcabd9355937f3fb84f90ba9fe76a5d3 Mon Sep 17 00:00:00 2001 From: Andy Grover Date: Fri, 30 Oct 2009 08:51:54 +0000 Subject: [PATCH] RDS: Fix potential race around rds_i[bw]_allocation "At rds_ib_recv_refill_one(), it first executes atomic_read(&rds_ib_allocation) for if-condition checking, and then executes atomic_inc(&rds_ib_allocation) if the condition was not satisfied. However, if any other code which updates rds_ib_allocation executes between these two atomic operation executions, it seems that it may result race condition. (especially when rds_ib_allocation + 1 == rds_ib_sysctl_max_recv_allocation)" This patch fixes this by using atomic_inc_unless to eliminate the possibility of allocating more than rds_ib_sysctl_max_recv_allocation and then decrementing the count if the allocation fails. It also makes an identical change to the iwarp transport. Reported-by: Shin Hong Signed-off-by: Andy Grover Signed-off-by: David S. Miller --- net/rds/ib_recv.c | 7 ++++--- net/rds/iw_recv.c | 7 ++++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index cd7a6cfcab03..2f009d391c29 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c @@ -143,15 +143,16 @@ static int rds_ib_recv_refill_one(struct rds_connection *conn, int ret = -ENOMEM; if (recv->r_ibinc == NULL) { - if (atomic_read(&rds_ib_allocation) >= rds_ib_sysctl_max_recv_allocation) { + if (!atomic_add_unless(&rds_ib_allocation, 1, rds_ib_sysctl_max_recv_allocation)) { rds_ib_stats_inc(s_ib_rx_alloc_limit); goto out; } recv->r_ibinc = kmem_cache_alloc(rds_ib_incoming_slab, kptr_gfp); - if (recv->r_ibinc == NULL) + if (recv->r_ibinc == NULL) { + atomic_dec(&rds_ib_allocation); goto out; - atomic_inc(&rds_ib_allocation); + } INIT_LIST_HEAD(&recv->r_ibinc->ii_frags); rds_inc_init(&recv->r_ibinc->ii_inc, conn, conn->c_faddr); } diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c index 8683f5f66c4b..9f98150af19f 100644 --- a/net/rds/iw_recv.c +++ b/net/rds/iw_recv.c @@ -143,15 +143,16 @@ static int rds_iw_recv_refill_one(struct rds_connection *conn, int ret = -ENOMEM; if (recv->r_iwinc == NULL) { - if (atomic_read(&rds_iw_allocation) >= rds_iw_sysctl_max_recv_allocation) { + if (!atomic_add_unless(&rds_iw_allocation, 1, rds_iw_sysctl_max_recv_allocation)) { rds_iw_stats_inc(s_iw_rx_alloc_limit); goto out; } recv->r_iwinc = kmem_cache_alloc(rds_iw_incoming_slab, kptr_gfp); - if (recv->r_iwinc == NULL) + if (recv->r_iwinc == NULL) { + atomic_dec(&rds_iw_allocation); goto out; - atomic_inc(&rds_iw_allocation); + } INIT_LIST_HEAD(&recv->r_iwinc->ii_frags); rds_inc_init(&recv->r_iwinc->ii_inc, conn, conn->c_faddr); } -- 2.30.2