*/
#define PAGE_UNALLOCATED ((void *)0x5F0E4B1D)
+/* how to allocate cached pages for a workgroup */
+enum z_erofs_cache_alloctype {
+ DONTALLOC, /* don't allocate any cached pages */
+ DELAYEDALLOC, /* delayed allocation (at the time of submitting io) */
+};
+
+/*
+ * tagged pointer with 1-bit tag for all compressed pages
+ * tag 0 - the page is just found with an extra page reference
+ */
+typedef tagptr1_t compressed_page_t;
+
+#define tag_compressed_page_justfound(page) \
+ tagptr_fold(compressed_page_t, page, 1)
+
static struct workqueue_struct *z_erofs_workqueue __read_mostly;
static struct kmem_cache *z_erofs_workgroup_cachep __read_mostly;
{ .work = NULL, .role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED }
#ifdef EROFS_FS_HAS_MANAGED_CACHE
-
-static bool grab_managed_cache_pages(struct address_space *mapping,
- erofs_blk_t start,
- struct page **compressed_pages,
- int clusterblks,
- bool reserve_allocation)
+static void preload_compressed_pages(struct z_erofs_vle_work_builder *bl,
+ struct address_space *mc,
+ pgoff_t index,
+ unsigned int clusterpages,
+ enum z_erofs_cache_alloctype type,
+ struct list_head *pagepool,
+ gfp_t gfp)
{
- bool noio = true;
- unsigned int i;
+ struct page **const pages = bl->compressed_pages;
+ const unsigned int remaining = bl->compressed_deficit;
+ bool standalone = true;
+ unsigned int i, j = 0;
+
+ if (bl->role < Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
+ return;
+
+ gfp = mapping_gfp_constraint(mc, gfp) & ~__GFP_RECLAIM;
- /* TODO: optimize by introducing find_get_pages_range */
- for (i = 0; i < clusterblks; ++i) {
- struct page *page, *found;
+ index += clusterpages - remaining;
- if (READ_ONCE(compressed_pages[i]))
+ for (i = 0; i < remaining; ++i) {
+ struct page *page;
+ compressed_page_t t;
+
+ /* the compressed page was loaded before */
+ if (READ_ONCE(pages[i]))
continue;
- page = found = find_get_page(mapping, start + i);
- if (!found) {
- noio = false;
- if (!reserve_allocation)
- continue;
- page = PAGE_UNALLOCATED;
+ page = find_get_page(mc, index + i);
+
+ if (page) {
+ t = tag_compressed_page_justfound(page);
+ } else if (type == DELAYEDALLOC) {
+ t = tagptr_init(compressed_page_t, PAGE_UNALLOCATED);
+ } else { /* DONTALLOC */
+ if (standalone)
+ j = i;
+ standalone = false;
+ continue;
}
- if (!cmpxchg(compressed_pages + i, NULL, page))
+ if (!cmpxchg_relaxed(&pages[i], NULL, tagptr_cast_ptr(t)))
continue;
- if (found)
- put_page(found);
+ if (page)
+ put_page(page);
}
- return noio;
+ bl->compressed_pages += j;
+ bl->compressed_deficit = remaining - j;
+
+ if (standalone)
+ bl->role = Z_EROFS_VLE_WORK_PRIMARY;
}
/* called by erofs_shrinker to get rid of all compressed_pages */
}
return ret;
}
+#else
+static void preload_compressed_pages(struct z_erofs_vle_work_builder *bl,
+ struct address_space *mc,
+ pgoff_t index,
+ unsigned int clusterpages,
+ enum z_erofs_cache_alloctype type,
+ struct list_head *pagepool,
+ gfp_t gfp)
+{
+ /* nowhere to load compressed pages from */
+}
#endif
/* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
.owned_head = Z_EROFS_VLE_WORKGRP_TAIL, \
.backmost = true, }
+#ifdef EROFS_FS_HAS_MANAGED_CACHE
+static inline bool
+should_alloc_managed_pages(struct z_erofs_vle_frontend *fe, erofs_off_t la)
+{
+ if (fe->backmost)
+ return true;
+
+ if (EROFS_FS_ZIP_CACHE_LVL >= 2)
+ return la < fe->headoffset;
+
+ return false;
+}
+#else
+static inline bool
+should_alloc_managed_pages(struct z_erofs_vle_frontend *fe, erofs_off_t la)
+{
+ return false;
+}
+#endif
+
static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
struct page *page,
struct list_head *page_pool)
bool tight = builder_is_followed(builder);
struct z_erofs_vle_work *work = builder->work;
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
- struct address_space *const mc = MNGD_MAPPING(sbi);
- struct z_erofs_vle_workgroup *grp;
- bool noio_outoforder;
-#endif
-
+ enum z_erofs_cache_alloctype cache_strategy;
enum z_erofs_page_type page_type;
unsigned int cur, end, spiltted, index;
int err = 0;
if (unlikely(err))
goto err_out;
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
- grp = fe->builder.grp;
-
- /* let's do out-of-order decompression for noio */
- noio_outoforder = grab_managed_cache_pages(mc,
- erofs_blknr(map->m_pa),
- grp->compressed_pages, erofs_blknr(map->m_plen),
- /* compressed page caching selection strategy */
- fe->backmost | (EROFS_FS_ZIP_CACHE_LVL >= 2 ?
- map->m_la < fe->headoffset : 0));
-
- if (noio_outoforder && builder_is_followed(builder))
- builder->role = Z_EROFS_VLE_WORK_PRIMARY;
-#endif
+ /* preload all compressed pages (maybe downgrade role if necessary) */
+ if (should_alloc_managed_pages(fe, map->m_la))
+ cache_strategy = DELAYEDALLOC;
+ else
+ cache_strategy = DONTALLOC;
+
+ preload_compressed_pages(builder, MNGD_MAPPING(sbi),
+ map->m_pa / PAGE_SIZE,
+ map->m_plen / PAGE_SIZE,
+ cache_strategy, page_pool, GFP_KERNEL);
tight &= builder_is_followed(builder);
work = builder->work;
struct address_space *mapping;
struct page *oldpage, *page;
+ compressed_page_t t;
+ int justfound;
+
repeat:
page = READ_ONCE(grp->compressed_pages[nr]);
oldpage = page;
goto out_allocpage;
}
+ /* process the target tagged pointer */
+ t = tagptr_init(compressed_page_t, page);
+ justfound = tagptr_unfold_tags(t);
+ page = tagptr_unfold_ptr(t);
+
mapping = READ_ONCE(page->mapping);
/*
* get such a cached-like page.
*/
if (nocache) {
- /* should be locked, not uptodate, and not truncated */
+ /* if managed cache is disabled, it is impossible `justfound' */
+ DBG_BUGON(justfound);
+
+ /* and it should be locked, not uptodate, and not truncated */
DBG_BUGON(!PageLocked(page));
DBG_BUGON(PageUptodate(page));
DBG_BUGON(!mapping);
lock_page(page);
+ /* only true if page reclaim goes wrong, should never happen */
+ DBG_BUGON(justfound && PagePrivate(page));
+
/* the page is still in manage cache */
if (page->mapping == mc) {
WRITE_ONCE(grp->compressed_pages[nr], page);
if (!PagePrivate(page)) {
+ /*
+ * impossible to be !PagePrivate(page) for
+ * the current restriction as well if
+ * the page is already in compressed_pages[].
+ */
+ DBG_BUGON(!justfound);
+
+ justfound = 0;
set_page_private(page, (unsigned long)grp);
SetPagePrivate(page);
}
* reuse this one, let's allocate a new cache-managed page.
*/
DBG_BUGON(page->mapping);
+ DBG_BUGON(!justfound);
tocache = true;
unlock_page(page);