kvfree(md);
}
-static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
+static int __bind_mempools(struct mapped_device *md, struct dm_table *t)
{
struct dm_md_mempools *p = dm_table_get_md_mempools(t);
+ int ret = 0;
if (dm_table_bio_based(t)) {
/*
bioset_initialized(&md->bs) ||
bioset_initialized(&md->io_bs));
- md->bs = p->bs;
- memset(&p->bs, 0, sizeof(p->bs));
- md->io_bs = p->io_bs;
- memset(&p->io_bs, 0, sizeof(p->io_bs));
+ ret = bioset_init_from_src(&md->bs, &p->bs);
+ if (ret)
+ goto out;
+ ret = bioset_init_from_src(&md->io_bs, &p->io_bs);
+ if (ret)
+ bioset_exit(&md->bs);
out:
/* mempool bind completed, no longer need any mempools in the table */
dm_table_free_md_mempools(t);
+ return ret;
}
/*
struct request_queue *q = md->queue;
bool request_based = dm_table_request_based(t);
sector_t size;
+ int ret;
lockdep_assert_held(&md->suspend_lock);
md->immutable_target = dm_table_get_immutable_target(t);
}
- __bind_mempools(md, t);
+ ret = __bind_mempools(md, t);
+ if (ret) {
+ old_map = ERR_PTR(ret);
+ goto out;
+ }
old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
rcu_assign_pointer(md->map, (void *)t);
if (old_map)
dm_sync_table(md);
+out:
return old_map;
}