dm: ensure bio-based DM's bioset and io_pool support targets' maximum IOs
authorMike Snitzer <snitzer@redhat.com>
Fri, 8 Dec 2017 19:40:52 +0000 (14:40 -0500)
committerMike Snitzer <snitzer@redhat.com>
Wed, 13 Dec 2017 17:16:00 +0000 (12:16 -0500)
alloc_multiple_bios() assumes it can allocate the requested number of
bios but until now there was no gaurantee that the mempools would be
accomodating.

Suggested-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
drivers/md/dm-table.c
drivers/md/dm.c
drivers/md/dm.h

index aaffd0c0ee9a76c71f23f9bb1074ec8057b8c6f7..7b22cc8d30f445526ed83a7730e77b6d3645a4eb 100644 (file)
@@ -1079,7 +1079,8 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
 {
        enum dm_queue_mode type = dm_table_get_type(t);
        unsigned per_io_data_size = 0;
-       struct dm_target *tgt;
+       unsigned min_pool_size = 0;
+       struct dm_target *ti;
        unsigned i;
 
        if (unlikely(type == DM_TYPE_NONE)) {
@@ -1089,11 +1090,13 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
 
        if (__table_type_bio_based(type))
                for (i = 0; i < t->num_targets; i++) {
-                       tgt = t->targets + i;
-                       per_io_data_size = max(per_io_data_size, tgt->per_io_data_size);
+                       ti = t->targets + i;
+                       per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
+                       min_pool_size = max(min_pool_size, ti->num_flush_bios);
                }
 
-       t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, per_io_data_size);
+       t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported,
+                                          per_io_data_size, min_pool_size);
        if (!t->mempools)
                return -ENOMEM;
 
index 2e0e10a1c03027d022e24a38df427dc04c08e61c..9d255e5c968892091a41c9f3783bd368f7786efa 100644 (file)
@@ -1810,17 +1810,26 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
 {
        struct dm_md_mempools *p = dm_table_get_md_mempools(t);
 
-       if (md->bs) {
-               /* The md already has necessary mempools. */
-               if (dm_table_bio_based(t)) {
+       if (dm_table_bio_based(t)) {
+               /* The md may already have mempools that need changing. */
+               if (md->bs) {
                        /*
                         * Reload bioset because front_pad may have changed
                         * because a different table was loaded.
                         */
                        bioset_free(md->bs);
-                       md->bs = p->bs;
-                       p->bs = NULL;
+                       md->bs = NULL;
                }
+               if (md->io_pool) {
+                       /*
+                        * Reload io_pool because pool_size may have changed
+                        * because a different table was loaded.
+                        */
+                       mempool_destroy(md->io_pool);
+                       md->io_pool = NULL;
+               }
+
+       } else if (md->bs) {
                /*
                 * There's no need to reload with request-based dm
                 * because the size of front_pad doesn't change.
@@ -1838,7 +1847,6 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
        p->io_pool = NULL;
        md->bs = p->bs;
        p->bs = NULL;
-
 out:
        /* mempool bind completed, no longer need any mempools in the table */
        dm_table_free_md_mempools(t);
@@ -2727,7 +2735,8 @@ int dm_noflush_suspending(struct dm_target *ti)
 EXPORT_SYMBOL_GPL(dm_noflush_suspending);
 
 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
-                                           unsigned integrity, unsigned per_io_data_size)
+                                           unsigned integrity, unsigned per_io_data_size,
+                                           unsigned min_pool_size)
 {
        struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
        unsigned int pool_size = 0;
@@ -2739,16 +2748,15 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu
        switch (type) {
        case DM_TYPE_BIO_BASED:
        case DM_TYPE_DAX_BIO_BASED:
-               pool_size = dm_get_reserved_bio_based_ios();
+               pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
                front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
-       
                pools->io_pool = mempool_create_slab_pool(pool_size, _io_cache);
                if (!pools->io_pool)
                        goto out;
                break;
        case DM_TYPE_REQUEST_BASED:
        case DM_TYPE_MQ_REQUEST_BASED:
-               pool_size = dm_get_reserved_rq_based_ios();
+               pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size);
                front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
                /* per_io_data_size is used for blk-mq pdu at queue allocation */
                break;
index 36399bb875ddac7fd0e74981d55fcd4d24b1b030..7c66c316add34b64f787bb4e4d4ed24e3150a2c9 100644 (file)
@@ -206,7 +206,8 @@ void dm_kcopyd_exit(void);
  * Mempool operations
  */
 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
-                                           unsigned integrity, unsigned per_bio_data_size);
+                                           unsigned integrity, unsigned per_bio_data_size,
+                                           unsigned min_pool_size);
 void dm_free_md_mempools(struct dm_md_mempools *pools);
 
 /*