lightnvm: pblk: move lba list to partial read context
authorIgor Konopko <igor.j.konopko@intel.com>
Tue, 11 Dec 2018 19:16:22 +0000 (20:16 +0100)
committerJens Axboe <axboe@kernel.dk>
Tue, 11 Dec 2018 19:22:34 +0000 (12:22 -0700)
Currently DMA allocated memory is reused on partial read
for lba_list_mem and lba_list_media arrays. In preparation
for dynamic DMA pool sizes we need to move this arrays
into pblk_pr_ctx structures.

Reviewed-by: Javier González <javier@cnexlabs.com>
Signed-off-by: Igor Konopko <igor.j.konopko@intel.com>
Signed-off-by: Matias Bjørling <mb@lightnvm.io>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/lightnvm/pblk-read.c
drivers/lightnvm/pblk.h

index 9fba614adeeb22393aed40778209701a5641c1c8..19917d3c19b3745c779c5188632b91ad34c732a3 100644 (file)
@@ -224,7 +224,6 @@ static void pblk_end_partial_read(struct nvm_rq *rqd)
        unsigned long *read_bitmap = pr_ctx->bitmap;
        int nr_secs = pr_ctx->orig_nr_secs;
        int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
-       __le64 *lba_list_mem, *lba_list_media;
        void *src_p, *dst_p;
        int hole, i;
 
@@ -237,13 +236,9 @@ static void pblk_end_partial_read(struct nvm_rq *rqd)
                rqd->ppa_list[0] = ppa;
        }
 
-       /* Re-use allocated memory for intermediate lbas */
-       lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size);
-       lba_list_media = (((void *)rqd->ppa_list) + 2 * pblk_dma_ppa_size);
-
        for (i = 0; i < nr_secs; i++) {
-               lba_list_media[i] = meta_list[i].lba;
-               meta_list[i].lba = lba_list_mem[i];
+               pr_ctx->lba_list_media[i] = meta_list[i].lba;
+               meta_list[i].lba = pr_ctx->lba_list_mem[i];
        }
 
        /* Fill the holes in the original bio */
@@ -255,7 +250,7 @@ static void pblk_end_partial_read(struct nvm_rq *rqd)
                line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]);
                kref_put(&line->ref, pblk_line_put);
 
-               meta_list[hole].lba = lba_list_media[i];
+               meta_list[hole].lba = pr_ctx->lba_list_media[i];
 
                src_bv = new_bio->bi_io_vec[i++];
                dst_bv = bio->bi_io_vec[bio_init_idx + hole];
@@ -295,13 +290,9 @@ static int pblk_setup_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
        struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
        struct pblk_pr_ctx *pr_ctx;
        struct bio *new_bio, *bio = r_ctx->private;
-       __le64 *lba_list_mem;
        int nr_secs = rqd->nr_ppas;
        int i;
 
-       /* Re-use allocated memory for intermediate lbas */
-       lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size);
-
        new_bio = bio_alloc(GFP_KERNEL, nr_holes);
 
        if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
@@ -312,12 +303,12 @@ static int pblk_setup_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
                goto fail_free_pages;
        }
 
-       pr_ctx = kmalloc(sizeof(struct pblk_pr_ctx), GFP_KERNEL);
+       pr_ctx = kzalloc(sizeof(struct pblk_pr_ctx), GFP_KERNEL);
        if (!pr_ctx)
                goto fail_free_pages;
 
        for (i = 0; i < nr_secs; i++)
-               lba_list_mem[i] = meta_list[i].lba;
+               pr_ctx->lba_list_mem[i] = meta_list[i].lba;
 
        new_bio->bi_iter.bi_sector = 0; /* internal bio */
        bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
@@ -325,7 +316,6 @@ static int pblk_setup_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
        rqd->bio = new_bio;
        rqd->nr_ppas = nr_holes;
 
-       pr_ctx->ppa_ptr = NULL;
        pr_ctx->orig_bio = bio;
        bitmap_copy(pr_ctx->bitmap, read_bitmap, NVM_MAX_VLBA);
        pr_ctx->bio_init_idx = bio_init_idx;
index e5b88a25d4d6e2a1ab85a1c92fb37d0519929d60..0e9d3960ac4c88a696b13584cddcc750d8199b91 100644 (file)
@@ -132,6 +132,8 @@ struct pblk_pr_ctx {
        unsigned int bio_init_idx;
        void *ppa_ptr;
        dma_addr_t dma_ppa_list;
+       __le64 lba_list_mem[NVM_MAX_VLBA];
+       __le64 lba_list_media[NVM_MAX_VLBA];
 };
 
 /* Pad context */