return err;
}
-int erofs_map_blocks_iter(struct inode *inode,
- struct erofs_map_blocks *map,
- struct page **mpage_ret, int flags)
-{
- /* by default, reading raw data never use erofs_map_blocks_iter */
- if (unlikely(!is_inode_layout_compression(inode))) {
- if (*mpage_ret)
- put_page(*mpage_ret);
- *mpage_ret = NULL;
-
- return erofs_map_blocks(inode, map, flags);
- }
-
-#ifdef CONFIG_EROFS_FS_ZIP
- return z_erofs_map_blocks_iter(inode, map, mpage_ret, flags);
-#else
- /* data compression is not available */
- return -ENOTSUPP;
-#endif
-}
-
int erofs_map_blocks(struct inode *inode,
struct erofs_map_blocks *map, int flags)
{
if (unlikely(is_inode_layout_compression(inode))) {
- struct page *mpage = NULL;
- int err;
+ int err = z_erofs_map_blocks_iter(inode, map, flags);
- err = erofs_map_blocks_iter(inode, map, &mpage, flags);
- if (mpage)
- put_page(mpage);
+ if (map->mpage) {
+ put_page(map->mpage);
+ map->mpage = NULL;
+ }
return err;
}
return erofs_map_blocks_flatmode(inode, map, flags);
u64 m_plen, m_llen;
unsigned int m_flags;
+
+ struct page *mpage;
};
/* Flags used by erofs_map_blocks() */
#define EROFS_GET_BLOCKS_RAW 0x0001
+#ifdef CONFIG_EROFS_FS_ZIP
+int z_erofs_map_blocks_iter(struct inode *inode,
+ struct erofs_map_blocks *map,
+ int flags);
+#else
+static inline int z_erofs_map_blocks_iter(struct inode *inode,
+ struct erofs_map_blocks *map,
+ int flags)
+{
+ return -ENOTSUPP;
+}
+#endif
+
/* data.c */
static inline struct bio *
erofs_grab_bio(struct super_block *sb,
}
extern int erofs_map_blocks(struct inode *, struct erofs_map_blocks *, int);
-extern int erofs_map_blocks_iter(struct inode *, struct erofs_map_blocks *,
- struct page **, int);
-
-struct erofs_map_blocks_iter {
- struct erofs_map_blocks map;
- struct page *mpage;
-};
-
-#ifdef CONFIG_EROFS_FS_ZIP
-extern int z_erofs_map_blocks_iter(struct inode *,
- struct erofs_map_blocks *,
- struct page **, int);
-#endif
static inline struct page *
erofs_get_inline_page(struct inode *inode,
struct inode *const inode;
struct z_erofs_vle_work_builder builder;
- struct erofs_map_blocks_iter m_iter;
+ struct erofs_map_blocks map;
z_erofs_vle_owned_workgrp_t owned_head;
#define VLE_FRONTEND_INIT(__i) { \
.inode = __i, \
- .m_iter = { \
- { .m_llen = 0, .m_plen = 0 }, \
+ .map = { \
+ .m_llen = 0, \
+ .m_plen = 0, \
.mpage = NULL \
}, \
.builder = VLE_WORK_BUILDER_INIT(), \
{
struct super_block *const sb = fe->inode->i_sb;
struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb);
- struct erofs_map_blocks_iter *const m = &fe->m_iter;
- struct erofs_map_blocks *const map = &m->map;
+ struct erofs_map_blocks *const map = &fe->map;
struct z_erofs_vle_work_builder *const builder = &fe->builder;
const loff_t offset = page_offset(page);
map->m_la = offset + cur;
map->m_llen = 0;
- err = erofs_map_blocks_iter(fe->inode, map, &m->mpage, 0);
+ err = z_erofs_map_blocks_iter(fe->inode, map, 0);
if (unlikely(err))
goto err_out;
z_erofs_submit_and_unzip(&f, &pagepool, true);
out:
- if (f.m_iter.mpage)
- put_page(f.m_iter.mpage);
+ if (f.map.mpage)
+ put_page(f.map.mpage);
/* clean up the remaining free pages */
put_pages_list(&pagepool);
z_erofs_submit_and_unzip(&f, &pagepool, sync);
- if (f.m_iter.mpage)
- put_page(f.m_iter.mpage);
+ if (f.map.mpage)
+ put_page(f.map.mpage);
/* clean up the remaining free pages */
put_pages_list(&pagepool);
int z_erofs_map_blocks_iter(struct inode *inode,
struct erofs_map_blocks *map,
- struct page **mpage_ret, int flags)
+ int flags)
{
void *kaddr;
const struct vle_map_blocks_iter_ctx ctx = {
.inode = inode,
.sb = inode->i_sb,
.clusterbits = EROFS_I_SB(inode)->clusterbits,
- .mpage_ret = mpage_ret,
+ .mpage_ret = &map->mpage,
.kaddr_ret = &kaddr
};
const unsigned int clustersize = 1 << ctx.clusterbits;
/* initialize `pblk' to keep gcc from printing foolish warnings */
erofs_blk_t mblk, pblk = 0;
- struct page *mpage = *mpage_ret;
+ struct page *mpage = map->mpage;
struct z_erofs_vle_decompressed_index *di;
unsigned int cluster_type, logical_cluster_ofs;
int err = 0;
err = PTR_ERR(mpage);
goto out;
}
- *mpage_ret = mpage;
+ map->mpage = mpage;
} else {
lock_page(mpage);
DBG_BUGON(!PageUptodate(mpage));
/* get the correspoinding first chunk */
err = vle_get_logical_extent_head(&ctx, lcn, &ofs,
&pblk, &map->m_flags);
- mpage = *mpage_ret;
+ mpage = map->mpage;
if (unlikely(err)) {
if (mpage)