int zpool_unregister_driver(struct zpool_driver *driver);
+bool zpool_evictable(struct zpool *pool);
+
#endif
struct zpool_driver *driver;
void *pool;
const struct zpool_ops *ops;
+ bool evictable;
struct list_head list;
};
*
* This creates a new zpool of the specified type. The gfp flags will be
* used when allocating memory, if the implementation supports it. If the
- * ops param is NULL, then the created zpool will not be shrinkable.
+ * ops param is NULL, then the created zpool will not be evictable.
*
* Implementations must guarantee this to be thread-safe.
*
zpool->driver = driver;
zpool->pool = driver->create(name, gfp, ops, zpool);
zpool->ops = ops;
+ zpool->evictable = driver->shrink && ops && ops->evict;
if (!zpool->pool) {
pr_err("couldn't create %s pool\n", type);
int zpool_shrink(struct zpool *zpool, unsigned int pages,
unsigned int *reclaimed)
{
- return zpool->driver->shrink(zpool->pool, pages, reclaimed);
+ return zpool->driver->shrink ?
+ zpool->driver->shrink(zpool->pool, pages, reclaimed) : -EINVAL;
}
/**
return zpool->driver->total_size(zpool->pool);
}
+/**
+ * zpool_evictable() - Test if zpool is potentially evictable
+ * @pool The zpool to test
+ *
+ * Zpool is only potentially evictable when it's created with struct
+ * zpool_ops.evict and its driver implements struct zpool_driver.shrink.
+ *
+ * However, it doesn't necessarily mean driver will use zpool_ops.evict
+ * in its implementation of zpool_driver.shrink. It could do internal
+ * defragmentation instead.
+ *
+ * Returns: true if potentially evictable; false otherwise.
+ */
+bool zpool_evictable(struct zpool *zpool)
+{
+ return zpool->evictable;
+}
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
MODULE_DESCRIPTION("Common API for compressed memory storage");
zs_free(pool, handle);
}
-static int zs_zpool_shrink(void *pool, unsigned int pages,
- unsigned int *reclaimed)
-{
- return -EINVAL;
-}
-
static void *zs_zpool_map(void *pool, unsigned long handle,
enum zpool_mapmode mm)
{
.destroy = zs_zpool_destroy,
.malloc = zs_zpool_malloc,
.free = zs_zpool_free,
- .shrink = zs_zpool_shrink,
.map = zs_zpool_map,
.unmap = zs_zpool_unmap,
.total_size = zs_zpool_total_size,
struct zswap_entry *entry, *dupentry;
struct crypto_comp *tfm;
int ret;
- unsigned int dlen = PAGE_SIZE, len;
+ unsigned int hlen, dlen = PAGE_SIZE;
unsigned long handle, value;
char *buf;
u8 *src, *dst;
- struct zswap_header *zhdr;
+ struct zswap_header zhdr = { .swpentry = swp_entry(type, offset) };
if (!zswap_enabled || !tree) {
ret = -ENODEV;
}
/* store */
- len = dlen + sizeof(struct zswap_header);
- ret = zpool_malloc(entry->pool->zpool, len,
+ hlen = zpool_evictable(entry->pool->zpool) ? sizeof(zhdr) : 0;
+ ret = zpool_malloc(entry->pool->zpool, hlen + dlen,
__GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM,
&handle);
if (ret == -ENOSPC) {
zswap_reject_alloc_fail++;
goto put_dstmem;
}
- zhdr = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_RW);
- zhdr->swpentry = swp_entry(type, offset);
- buf = (u8 *)(zhdr + 1);
- memcpy(buf, dst, dlen);
+ buf = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_RW);
+ memcpy(buf, &zhdr, hlen);
+ memcpy(buf + hlen, dst, dlen);
zpool_unmap_handle(entry->pool->zpool, handle);
put_cpu_var(zswap_dstmem);
/* decompress */
dlen = PAGE_SIZE;
- src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle,
- ZPOOL_MM_RO) + sizeof(struct zswap_header);
+ src = zpool_map_handle(entry->pool->zpool, entry->handle, ZPOOL_MM_RO);
+ if (zpool_evictable(entry->pool->zpool))
+ src += sizeof(struct zswap_header);
dst = kmap_atomic(page);
tfm = *get_cpu_ptr(entry->pool->tfm);
ret = crypto_comp_decompress(tfm, src, entry->length, dst, &dlen);