RDS/IB: create a work queue for FMR flushing
authorZach Brown <zach.brown@oracle.com>
Tue, 6 Jul 2010 22:09:56 +0000 (15:09 -0700)
committerAndy Grover <andy.grover@oracle.com>
Thu, 9 Sep 2010 01:16:34 +0000 (18:16 -0700)
This patch moves the FMR flushing work in to its own mult-threaded work queue.
This is to maintain performance in preparation for returning the main krdsd
work queue back to a single threaded work queue to avoid deep-rooted
concurrency bugs.

This is also good because it further separates FMRs, which might be removed
some day, from the rest of the code base.

Signed-off-by: Zach Brown <zach.brown@oracle.com>
net/rds/ib.c
net/rds/ib.h
net/rds/ib_rdma.c

index af1ef18b6ff0c833c1df911e6d77889a42257e3e..d2007b9316160e3c6bf9df526af316c4772c86dc 100644 (file)
@@ -351,6 +351,7 @@ void rds_ib_exit(void)
        rds_ib_sysctl_exit();
        rds_ib_recv_exit();
        rds_trans_unregister(&rds_ib_transport);
+       rds_ib_fmr_exit();
 }
 
 struct rds_transport rds_ib_transport = {
@@ -386,10 +387,14 @@ int __init rds_ib_init(void)
 
        INIT_LIST_HEAD(&rds_ib_devices);
 
-       ret = ib_register_client(&rds_ib_client);
+       ret = rds_ib_fmr_init();
        if (ret)
                goto out;
 
+       ret = ib_register_client(&rds_ib_client);
+       if (ret)
+               goto out_fmr_exit;
+
        ret = rds_ib_sysctl_init();
        if (ret)
                goto out_ibreg;
@@ -412,6 +417,8 @@ out_sysctl:
        rds_ib_sysctl_exit();
 out_ibreg:
        rds_ib_unregister_client();
+out_fmr_exit:
+       rds_ib_fmr_exit();
 out:
        return ret;
 }
index e9f9ddf440ca99be35e8f47c7ee55572a7779f81..fd4ea69d24431b1216f3ab86156d0557d0ea86d2 100644 (file)
@@ -308,6 +308,8 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
 void rds_ib_sync_mr(void *trans_private, int dir);
 void rds_ib_free_mr(void *trans_private, int invalidate);
 void rds_ib_flush_mrs(void);
+int __init rds_ib_fmr_init(void);
+void __exit rds_ib_fmr_exit(void);
 
 /* ib_recv.c */
 int __init rds_ib_recv_init(void);
index 00f3995351c8f21c833be2cd44a593f3b8f67966..0eb597670c5b5a655b94fa5ea033452298428d39 100644 (file)
@@ -691,6 +691,26 @@ out_nolock:
        return ret;
 }
 
+struct workqueue_struct *rds_ib_fmr_wq;
+
+int __init rds_ib_fmr_init(void)
+{
+       rds_ib_fmr_wq = create_workqueue("rds_fmr_flushd");
+       if (!rds_ib_fmr_wq)
+               return -ENOMEM;
+       return 0;
+}
+
+/*
+ * By the time this is called all the IB devices should have been torn down and
+ * had their pools freed.  As each pool is freed its work struct is waited on,
+ * so the pool flushing work queue should be idle by the time we get here.
+ */
+void __exit rds_ib_fmr_exit(void)
+{
+       destroy_workqueue(rds_ib_fmr_wq);
+}
+
 static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
 {
        struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
@@ -718,7 +738,7 @@ void rds_ib_free_mr(void *trans_private, int invalidate)
        /* If we've pinned too many pages, request a flush */
        if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
            atomic_read(&pool->dirty_count) >= pool->max_items / 10)
-               queue_delayed_work(rds_wq, &pool->flush_worker, 10);
+               queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10);
 
        if (invalidate) {
                if (likely(!in_interrupt())) {
@@ -726,7 +746,8 @@ void rds_ib_free_mr(void *trans_private, int invalidate)
                } else {
                        /* We get here if the user created a MR marked
                         * as use_once and invalidate at the same time. */
-                       queue_delayed_work(rds_wq, &pool->flush_worker, 10);
+                       queue_delayed_work(rds_ib_fmr_wq,
+                                          &pool->flush_worker, 10);
                }
        }