z_erofs_vle_owned_workgrp_t owned_head;
- bool initial;
-#if (EROFS_FS_ZIP_CACHE_LVL >= 2)
- erofs_off_t cachedzone_la;
-#endif
+ /* used for applying cache strategy on the fly */
+ bool backmost;
+ erofs_off_t headoffset;
};
#define VLE_FRONTEND_INIT(__i) { \
}, \
.builder = VLE_WORK_BUILDER_INIT(), \
.owned_head = Z_EROFS_VLE_WORKGRP_TAIL, \
- .initial = true, }
+ .backmost = true, }
static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
struct page *page,
debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
if (z_erofs_vle_work_iter_end(builder))
- fe->initial = false;
+ fe->backmost = false;
map->m_la = offset + cur;
map->m_llen = 0;
erofs_blknr(map->m_pa),
grp->compressed_pages, erofs_blknr(map->m_plen),
/* compressed page caching selection strategy */
- fe->initial | (EROFS_FS_ZIP_CACHE_LVL >= 2 ?
- map->m_la < fe->cachedzone_la : 0));
+ fe->backmost | (EROFS_FS_ZIP_CACHE_LVL >= 2 ?
+ map->m_la < fe->headoffset : 0));
if (noio_outoforder && builder_is_followed(builder))
builder->role = Z_EROFS_VLE_WORK_PRIMARY;
trace_erofs_readpage(page, false);
-#if (EROFS_FS_ZIP_CACHE_LVL >= 2)
- f.cachedzone_la = (erofs_off_t)page->index << PAGE_SHIFT;
-#endif
+ f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
+
err = z_erofs_do_read_page(&f, page, &pagepool);
(void)z_erofs_vle_work_iter_end(&f.builder);
trace_erofs_readpages(mapping->host, lru_to_page(pages),
nr_pages, false);
-#if (EROFS_FS_ZIP_CACHE_LVL >= 2)
- f.cachedzone_la = (erofs_off_t)lru_to_page(pages)->index << PAGE_SHIFT;
-#endif
+ f.headoffset = (erofs_off_t)lru_to_page(pages)->index << PAGE_SHIFT;
+
for (; nr_pages; --nr_pages) {
struct page *page = lru_to_page(pages);