* struct iser_reg_resources - Fast registration recources
*
* @mr: memory region
- * @frpl: fast reg page list
+ * @fmr_pool: pool of fmrs
+ * @frpl: fast reg page list used by frwrs
+ * @page_vec: fast reg page list used by fmr pool
* @mr_valid: is mr valid indicator
*/
struct iser_reg_resources {
- struct ib_mr *mr;
- struct ib_fast_reg_page_list *frpl;
+ union {
+ struct ib_mr *mr;
+ struct ib_fmr_pool *fmr_pool;
+ };
+ union {
+ struct ib_fast_reg_page_list *frpl;
+ struct iser_page_vec *page_vec;
+ };
u8 mr_valid:1;
};
/**
* struct iser_fr_pool: connection fast registration pool
*
+ * @list: list of fastreg descriptors
* @lock: protects fmr/fastreg pool
- * @union.fmr:
- * @pool: FMR pool for fast registrations
- * @page_vec: fast reg page list to hold mapped commands pages
- * used for registration
- * @union.fastreg:
- * @pool: Fast registration descriptors pool for fast
- * registrations
- * @pool_size: Size of pool
+ * @size: size of the pool
*/
struct iser_fr_pool {
- spinlock_t lock;
- union {
- struct {
- struct ib_fmr_pool *pool;
- struct iser_page_vec *page_vec;
- } fmr;
- struct {
- struct list_head pool;
- int pool_size;
- } fastreg;
- };
+ struct list_head list;
+ spinlock_t lock;
+ int size;
};
/**
unsigned long flags;
spin_lock_irqsave(&fr_pool->lock, flags);
- desc = list_first_entry(&fr_pool->fastreg.pool,
+ desc = list_first_entry(&fr_pool->list,
struct iser_fr_desc, list);
list_del(&desc->list);
spin_unlock_irqrestore(&fr_pool->lock, flags);
unsigned long flags;
spin_lock_irqsave(&fr_pool->lock, flags);
- list_add(&desc->list, &fr_pool->fastreg.pool);
+ list_add(&desc->list, &fr_pool->list);
spin_unlock_irqrestore(&fr_pool->lock, flags);
}
static
int iser_reg_page_vec(struct iscsi_iser_task *iser_task,
struct iser_data_buf *mem,
- struct iser_page_vec *page_vec,
+ struct iser_reg_resources *rsc,
struct iser_mem_reg *mem_reg)
{
struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
- struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
struct iser_device *device = ib_conn->device;
+ struct iser_page_vec *page_vec = rsc->page_vec;
+ struct ib_fmr_pool *fmr_pool = rsc->fmr_pool;
struct ib_pool_fmr *fmr;
int ret, plen;
return -EINVAL;
}
- fmr = ib_fmr_pool_map_phys(fr_pool->fmr.pool,
+ fmr = ib_fmr_pool_map_phys(fmr_pool,
page_vec->pages,
page_vec->length,
page_vec->pages[0]);
if (mem->dma_nents == 1) {
return iser_reg_dma(device, mem, mem_reg);
} else { /* use FMR for multiple dma entries */
- err = iser_reg_page_vec(iser_task, mem,
- fr_pool->fmr.page_vec, mem_reg);
+ struct iser_fr_desc *desc;
+
+ desc = list_first_entry(&fr_pool->list,
+ struct iser_fr_desc, list);
+ err = iser_reg_page_vec(iser_task, mem, &desc->rsc, mem_reg);
if (err && err != -EAGAIN) {
iser_data_buf_dump(mem, ibdev);
iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
mem->dma_nents,
ntoh24(iser_task->desc.iscsi_header.dlength));
iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
- fr_pool->fmr.page_vec->data_size,
- fr_pool->fmr.page_vec->length,
- fr_pool->fmr.page_vec->offset);
- for (i = 0; i < fr_pool->fmr.page_vec->length; i++)
+ desc->rsc.page_vec->data_size,
+ desc->rsc.page_vec->length,
+ desc->rsc.page_vec->offset);
+ for (i = 0; i < desc->rsc.page_vec->length; i++)
iser_err("page_vec[%d] = 0x%llx\n", i,
- (unsigned long long)fr_pool->fmr.page_vec->pages[i]);
+ (unsigned long long)desc->rsc.page_vec->pages[i]);
}
if (err)
return err;
struct iser_device *device = ib_conn->device;
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
struct iser_page_vec *page_vec;
+ struct iser_fr_desc *desc;
struct ib_fmr_pool *fmr_pool;
struct ib_fmr_pool_param params;
- int ret = -ENOMEM;
+ int ret;
+ INIT_LIST_HEAD(&fr_pool->list);
spin_lock_init(&fr_pool->lock);
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
page_vec = kmalloc(sizeof(*page_vec) +
(sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE + 1)),
GFP_KERNEL);
- if (!page_vec)
- return ret;
+ if (!page_vec) {
+ ret = -ENOMEM;
+ goto err_frpl;
+ }
page_vec->pages = (u64 *)(page_vec + 1);
if (IS_ERR(fmr_pool)) {
ret = PTR_ERR(fmr_pool);
iser_err("FMR allocation failed, err %d\n", ret);
- goto err;
+ goto err_fmr;
}
- fr_pool->fmr.page_vec = page_vec;
- fr_pool->fmr.pool = fmr_pool;
+ desc->rsc.page_vec = page_vec;
+ desc->rsc.fmr_pool = fmr_pool;
+ list_add(&desc->list, &fr_pool->list);
return 0;
-err:
+err_fmr:
kfree(page_vec);
+err_frpl:
+ kfree(desc);
+
return ret;
}
void iser_free_fmr_pool(struct ib_conn *ib_conn)
{
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
+ struct iser_fr_desc *desc;
+
+ desc = list_first_entry(&fr_pool->list,
+ struct iser_fr_desc, list);
+ list_del(&desc->list);
iser_info("freeing conn %p fmr pool %p\n",
- ib_conn, fr_pool->fmr.pool);
+ ib_conn, desc->rsc.fmr_pool);
- ib_destroy_fmr_pool(fr_pool->fmr.pool);
- fr_pool->fmr.pool = NULL;
- kfree(fr_pool->fmr.page_vec);
- fr_pool->fmr.page_vec = NULL;
+ ib_destroy_fmr_pool(desc->rsc.fmr_pool);
+ kfree(desc->rsc.page_vec);
+ kfree(desc);
}
static int
struct iser_fr_desc *desc;
int i, ret;
- INIT_LIST_HEAD(&fr_pool->fastreg.pool);
+ INIT_LIST_HEAD(&fr_pool->list);
spin_lock_init(&fr_pool->lock);
- fr_pool->fastreg.pool_size = 0;
+ fr_pool->size = 0;
for (i = 0; i < cmds_max; i++) {
desc = iser_create_fastreg_desc(device->ib_device, device->pd,
ib_conn->pi_support);
goto err;
}
- list_add_tail(&desc->list, &fr_pool->fastreg.pool);
- fr_pool->fastreg.pool_size++;
+ list_add_tail(&desc->list, &fr_pool->list);
+ fr_pool->size++;
}
return 0;
struct iser_fr_desc *desc, *tmp;
int i = 0;
- if (list_empty(&fr_pool->fastreg.pool))
+ if (list_empty(&fr_pool->list))
return;
iser_info("freeing conn %p fr pool\n", ib_conn);
- list_for_each_entry_safe(desc, tmp, &fr_pool->fastreg.pool, list) {
+ list_for_each_entry_safe(desc, tmp, &fr_pool->list, list) {
list_del(&desc->list);
iser_free_reg_res(&desc->rsc);
if (desc->pi_ctx)
++i;
}
- if (i < fr_pool->fastreg.pool_size)
+ if (i < fr_pool->size)
iser_warn("pool still has %d regions registered\n",
- fr_pool->fastreg.pool_size - i);
+ fr_pool->size - i);
}
/**