From 1b52fa98edd1c3e663ea4a06519e3d20976084a8 Mon Sep 17 00:00:00 2001 From: Sean Hefty Date: Fri, 12 May 2006 14:57:52 -0700 Subject: [PATCH] IB: refcount race fixes Fix race condition during destruction calls to avoid possibility of accessing object after it has been freed. Instead of waking up a wait queue directly, which is susceptible to a race where the object is freed between the reference count going to 0 and the wake_up(), use a completion to wait in the function doing the freeing. Signed-off-by: Sean Hefty Signed-off-by: Roland Dreier --- drivers/infiniband/core/cm.c | 12 ++++---- drivers/infiniband/core/mad.c | 47 ++++++++++++++++-------------- drivers/infiniband/core/mad_priv.h | 5 ++-- drivers/infiniband/core/mad_rmpp.c | 20 ++++++------- drivers/infiniband/core/ucm.c | 12 ++++---- 5 files changed, 52 insertions(+), 44 deletions(-) diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 7cfedb8d9bcd..86fee43502cd 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -34,6 +34,8 @@ * * $Id: cm.c 2821 2005-07-08 17:07:28Z sean.hefty $ */ + +#include #include #include #include @@ -122,7 +124,7 @@ struct cm_id_private { struct rb_node service_node; struct rb_node sidr_id_node; spinlock_t lock; /* Do not acquire inside cm.lock */ - wait_queue_head_t wait; + struct completion comp; atomic_t refcount; struct ib_mad_send_buf *msg; @@ -159,7 +161,7 @@ static void cm_work_handler(void *data); static inline void cm_deref_id(struct cm_id_private *cm_id_priv) { if (atomic_dec_and_test(&cm_id_priv->refcount)) - wake_up(&cm_id_priv->wait); + complete(&cm_id_priv->comp); } static int cm_alloc_msg(struct cm_id_private *cm_id_priv, @@ -559,7 +561,7 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device, goto error; spin_lock_init(&cm_id_priv->lock); - init_waitqueue_head(&cm_id_priv->wait); + init_completion(&cm_id_priv->comp); INIT_LIST_HEAD(&cm_id_priv->work_list); atomic_set(&cm_id_priv->work_count, -1); atomic_set(&cm_id_priv->refcount, 1); @@ -724,8 +726,8 @@ retest: } cm_free_id(cm_id->local_id); - atomic_dec(&cm_id_priv->refcount); - wait_event(cm_id_priv->wait, !atomic_read(&cm_id_priv->refcount)); + cm_deref_id(cm_id_priv); + wait_for_completion(&cm_id_priv->comp); while ((work = cm_dequeue_work(cm_id_priv)) != NULL) cm_free_work(work); if (cm_id_priv->private_data && cm_id_priv->private_data_len) diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 469b6923a2e2..5ad41a64314c 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -352,7 +352,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, INIT_WORK(&mad_agent_priv->local_work, local_completions, mad_agent_priv); atomic_set(&mad_agent_priv->refcount, 1); - init_waitqueue_head(&mad_agent_priv->wait); + init_completion(&mad_agent_priv->comp); return &mad_agent_priv->agent; @@ -467,7 +467,7 @@ struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; mad_snoop_priv->agent.port_num = port_num; mad_snoop_priv->mad_snoop_flags = mad_snoop_flags; - init_waitqueue_head(&mad_snoop_priv->wait); + init_completion(&mad_snoop_priv->comp); mad_snoop_priv->snoop_index = register_snoop_agent( &port_priv->qp_info[qpn], mad_snoop_priv); @@ -486,6 +486,18 @@ error1: } EXPORT_SYMBOL(ib_register_mad_snoop); +static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv) +{ + if (atomic_dec_and_test(&mad_agent_priv->refcount)) + complete(&mad_agent_priv->comp); +} + +static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv) +{ + if (atomic_dec_and_test(&mad_snoop_priv->refcount)) + complete(&mad_snoop_priv->comp); +} + static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) { struct ib_mad_port_private *port_priv; @@ -509,9 +521,8 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) flush_workqueue(port_priv->wq); ib_cancel_rmpp_recvs(mad_agent_priv); - atomic_dec(&mad_agent_priv->refcount); - wait_event(mad_agent_priv->wait, - !atomic_read(&mad_agent_priv->refcount)); + deref_mad_agent(mad_agent_priv); + wait_for_completion(&mad_agent_priv->comp); kfree(mad_agent_priv->reg_req); ib_dereg_mr(mad_agent_priv->agent.mr); @@ -529,9 +540,8 @@ static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv) atomic_dec(&qp_info->snoop_count); spin_unlock_irqrestore(&qp_info->snoop_lock, flags); - atomic_dec(&mad_snoop_priv->refcount); - wait_event(mad_snoop_priv->wait, - !atomic_read(&mad_snoop_priv->refcount)); + deref_snoop_agent(mad_snoop_priv); + wait_for_completion(&mad_snoop_priv->comp); kfree(mad_snoop_priv); } @@ -600,8 +610,7 @@ static void snoop_send(struct ib_mad_qp_info *qp_info, spin_unlock_irqrestore(&qp_info->snoop_lock, flags); mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent, send_buf, mad_send_wc); - if (atomic_dec_and_test(&mad_snoop_priv->refcount)) - wake_up(&mad_snoop_priv->wait); + deref_snoop_agent(mad_snoop_priv); spin_lock_irqsave(&qp_info->snoop_lock, flags); } spin_unlock_irqrestore(&qp_info->snoop_lock, flags); @@ -626,8 +635,7 @@ static void snoop_recv(struct ib_mad_qp_info *qp_info, spin_unlock_irqrestore(&qp_info->snoop_lock, flags); mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, mad_recv_wc); - if (atomic_dec_and_test(&mad_snoop_priv->refcount)) - wake_up(&mad_snoop_priv->wait); + deref_snoop_agent(mad_snoop_priv); spin_lock_irqsave(&qp_info->snoop_lock, flags); } spin_unlock_irqrestore(&qp_info->snoop_lock, flags); @@ -968,8 +976,7 @@ void ib_free_send_mad(struct ib_mad_send_buf *send_buf) free_send_rmpp_list(mad_send_wr); kfree(send_buf->mad); - if (atomic_dec_and_test(&mad_agent_priv->refcount)) - wake_up(&mad_agent_priv->wait); + deref_mad_agent(mad_agent_priv); } EXPORT_SYMBOL(ib_free_send_mad); @@ -1757,8 +1764,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, mad_recv_wc); if (!mad_recv_wc) { - if (atomic_dec_and_test(&mad_agent_priv->refcount)) - wake_up(&mad_agent_priv->wait); + deref_mad_agent(mad_agent_priv); return; } } @@ -1770,8 +1776,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, if (!mad_send_wr) { spin_unlock_irqrestore(&mad_agent_priv->lock, flags); ib_free_recv_mad(mad_recv_wc); - if (atomic_dec_and_test(&mad_agent_priv->refcount)) - wake_up(&mad_agent_priv->wait); + deref_mad_agent(mad_agent_priv); return; } ib_mark_mad_done(mad_send_wr); @@ -1790,8 +1795,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, } else { mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, mad_recv_wc); - if (atomic_dec_and_test(&mad_agent_priv->refcount)) - wake_up(&mad_agent_priv->wait); + deref_mad_agent(mad_agent_priv); } } @@ -2021,8 +2025,7 @@ void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, mad_send_wc); /* Release reference on agent taken when sending */ - if (atomic_dec_and_test(&mad_agent_priv->refcount)) - wake_up(&mad_agent_priv->wait); + deref_mad_agent(mad_agent_priv); return; done: spin_unlock_irqrestore(&mad_agent_priv->lock, flags); diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h index 6c9c133d71ef..b4fa28d3160f 100644 --- a/drivers/infiniband/core/mad_priv.h +++ b/drivers/infiniband/core/mad_priv.h @@ -37,6 +37,7 @@ #ifndef __IB_MAD_PRIV_H__ #define __IB_MAD_PRIV_H__ +#include #include #include #include @@ -108,7 +109,7 @@ struct ib_mad_agent_private { struct list_head rmpp_list; atomic_t refcount; - wait_queue_head_t wait; + struct completion comp; }; struct ib_mad_snoop_private { @@ -117,7 +118,7 @@ struct ib_mad_snoop_private { int snoop_index; int mad_snoop_flags; atomic_t refcount; - wait_queue_head_t wait; + struct completion comp; }; struct ib_mad_send_wr_private { diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c index dfd4e588ce03..d4704e054e30 100644 --- a/drivers/infiniband/core/mad_rmpp.c +++ b/drivers/infiniband/core/mad_rmpp.c @@ -49,7 +49,7 @@ struct mad_rmpp_recv { struct list_head list; struct work_struct timeout_work; struct work_struct cleanup_work; - wait_queue_head_t wait; + struct completion comp; enum rmpp_state state; spinlock_t lock; atomic_t refcount; @@ -69,10 +69,16 @@ struct mad_rmpp_recv { u8 method; }; +static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv) +{ + if (atomic_dec_and_test(&rmpp_recv->refcount)) + complete(&rmpp_recv->comp); +} + static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv) { - atomic_dec(&rmpp_recv->refcount); - wait_event(rmpp_recv->wait, !atomic_read(&rmpp_recv->refcount)); + deref_rmpp_recv(rmpp_recv); + wait_for_completion(&rmpp_recv->comp); ib_destroy_ah(rmpp_recv->ah); kfree(rmpp_recv); } @@ -253,7 +259,7 @@ create_rmpp_recv(struct ib_mad_agent_private *agent, goto error; rmpp_recv->agent = agent; - init_waitqueue_head(&rmpp_recv->wait); + init_completion(&rmpp_recv->comp); INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv); INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv); spin_lock_init(&rmpp_recv->lock); @@ -279,12 +285,6 @@ error: kfree(rmpp_recv); return NULL; } -static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv) -{ - if (atomic_dec_and_test(&rmpp_recv->refcount)) - wake_up(&rmpp_recv->wait); -} - static struct mad_rmpp_recv * find_rmpp_recv(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index f6a05965a4e8..9164a09b6ccd 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c @@ -32,6 +32,8 @@ * * $Id: ucm.c 2594 2005-06-13 19:46:02Z libor $ */ + +#include #include #include #include @@ -72,7 +74,7 @@ struct ib_ucm_file { struct ib_ucm_context { int id; - wait_queue_head_t wait; + struct completion comp; atomic_t ref; int events_reported; @@ -138,7 +140,7 @@ static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id) static void ib_ucm_ctx_put(struct ib_ucm_context *ctx) { if (atomic_dec_and_test(&ctx->ref)) - wake_up(&ctx->wait); + complete(&ctx->comp); } static inline int ib_ucm_new_cm_id(int event) @@ -178,7 +180,7 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file) return NULL; atomic_set(&ctx->ref, 1); - init_waitqueue_head(&ctx->wait); + init_completion(&ctx->comp); ctx->file = file; INIT_LIST_HEAD(&ctx->events); @@ -586,8 +588,8 @@ static ssize_t ib_ucm_destroy_id(struct ib_ucm_file *file, if (IS_ERR(ctx)) return PTR_ERR(ctx); - atomic_dec(&ctx->ref); - wait_event(ctx->wait, !atomic_read(&ctx->ref)); + ib_ucm_ctx_put(ctx); + wait_for_completion(&ctx->comp); /* No new events will be generated after destroying the cm_id. */ ib_destroy_cm_id(ctx->cm_id); -- 2.30.2