int order;
+ /* Scan (total_size >> priority) pages at once */
+ int priority;
+
/*
* The memory cgroup that hit its limit and as a result is the
* primary target of this reclaim invocation.
static unsigned long shrink_page_list(struct list_head *page_list,
struct mem_cgroup_zone *mz,
struct scan_control *sc,
- int priority,
unsigned long *ret_nr_dirty,
unsigned long *ret_nr_writeback)
{
* unless under significant pressure.
*/
if (page_is_file_cache(page) &&
- (!current_is_kswapd() || priority >= DEF_PRIORITY - 2)) {
+ (!current_is_kswapd() ||
+ sc->priority >= DEF_PRIORITY - 2)) {
/*
* Immediately reclaim when written back.
* Similar in principal to deactivate_page()
*/
static noinline_for_stack unsigned long
shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
- struct scan_control *sc, int priority, enum lru_list lru)
+ struct scan_control *sc, enum lru_list lru)
{
LIST_HEAD(page_list);
unsigned long nr_scanned;
update_isolated_counts(mz, &page_list, &nr_anon, &nr_file);
- nr_reclaimed = shrink_page_list(&page_list, mz, sc, priority,
+ nr_reclaimed = shrink_page_list(&page_list, mz, sc,
&nr_dirty, &nr_writeback);
spin_lock_irq(&zone->lru_lock);
* DEF_PRIORITY-6 For SWAP_CLUSTER_MAX isolated pages, throttle if any
* isolated page is PageWriteback
*/
- if (nr_writeback && nr_writeback >= (nr_taken >> (DEF_PRIORITY-priority)))
+ if (nr_writeback && nr_writeback >=
+ (nr_taken >> (DEF_PRIORITY - sc->priority)))
wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10);
trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
zone_idx(zone),
nr_scanned, nr_reclaimed,
- priority,
+ sc->priority,
trace_shrink_flags(file));
return nr_reclaimed;
}
static void shrink_active_list(unsigned long nr_to_scan,
struct mem_cgroup_zone *mz,
struct scan_control *sc,
- int priority, enum lru_list lru)
+ enum lru_list lru)
{
unsigned long nr_taken;
unsigned long nr_scanned;
static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
struct mem_cgroup_zone *mz,
- struct scan_control *sc, int priority)
+ struct scan_control *sc)
{
int file = is_file_lru(lru);
if (is_active_lru(lru)) {
if (inactive_list_is_low(mz, file))
- shrink_active_list(nr_to_scan, mz, sc, priority, lru);
+ shrink_active_list(nr_to_scan, mz, sc, lru);
return 0;
}
- return shrink_inactive_list(nr_to_scan, mz, sc, priority, lru);
+ return shrink_inactive_list(nr_to_scan, mz, sc, lru);
}
static int vmscan_swappiness(struct scan_control *sc)
* nr[0] = anon pages to scan; nr[1] = file pages to scan
*/
static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
- unsigned long *nr, int priority)
+ unsigned long *nr)
{
unsigned long anon, file, free;
unsigned long anon_prio, file_prio;
unsigned long scan;
scan = zone_nr_lru_pages(mz, lru);
- if (priority || noswap || !vmscan_swappiness(sc)) {
- scan >>= priority;
+ if (sc->priority || noswap || !vmscan_swappiness(sc)) {
+ scan >>= sc->priority;
if (!scan && force_scan)
scan = SWAP_CLUSTER_MAX;
scan = div64_u64(scan * fraction[file], denominator);
}
/* Use reclaim/compaction for costly allocs or under memory pressure */
-static bool in_reclaim_compaction(int priority, struct scan_control *sc)
+static bool in_reclaim_compaction(struct scan_control *sc)
{
if (COMPACTION_BUILD && sc->order &&
(sc->order > PAGE_ALLOC_COSTLY_ORDER ||
- priority < DEF_PRIORITY - 2))
+ sc->priority < DEF_PRIORITY - 2))
return true;
return false;
static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
unsigned long nr_reclaimed,
unsigned long nr_scanned,
- int priority,
struct scan_control *sc)
{
unsigned long pages_for_compaction;
unsigned long inactive_lru_pages;
/* If not in reclaim/compaction mode, stop */
- if (!in_reclaim_compaction(priority, sc))
+ if (!in_reclaim_compaction(sc))
return false;
/* Consider stopping depending on scan and reclaim activity */
/*
* This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
*/
-static void shrink_mem_cgroup_zone(int priority, struct mem_cgroup_zone *mz,
+static void shrink_mem_cgroup_zone(struct mem_cgroup_zone *mz,
struct scan_control *sc)
{
unsigned long nr[NR_LRU_LISTS];
restart:
nr_reclaimed = 0;
nr_scanned = sc->nr_scanned;
- get_scan_count(mz, sc, nr, priority);
+ get_scan_count(mz, sc, nr);
blk_start_plug(&plug);
while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
nr[lru] -= nr_to_scan;
nr_reclaimed += shrink_list(lru, nr_to_scan,
- mz, sc, priority);
+ mz, sc);
}
}
/*
* with multiple processes reclaiming pages, the total
* freeing target can get unreasonably large.
*/
- if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY)
+ if (nr_reclaimed >= nr_to_reclaim &&
+ sc->priority < DEF_PRIORITY)
break;
}
blk_finish_plug(&plug);
*/
if (inactive_anon_is_low(mz))
shrink_active_list(SWAP_CLUSTER_MAX, mz,
- sc, priority, LRU_ACTIVE_ANON);
+ sc, LRU_ACTIVE_ANON);
/* reclaim/compaction might need reclaim to continue */
if (should_continue_reclaim(mz, nr_reclaimed,
- sc->nr_scanned - nr_scanned,
- priority, sc))
+ sc->nr_scanned - nr_scanned, sc))
goto restart;
throttle_vm_writeout(sc->gfp_mask);
}
-static void shrink_zone(int priority, struct zone *zone,
- struct scan_control *sc)
+static void shrink_zone(struct zone *zone, struct scan_control *sc)
{
struct mem_cgroup *root = sc->target_mem_cgroup;
struct mem_cgroup_reclaim_cookie reclaim = {
.zone = zone,
- .priority = priority,
+ .priority = sc->priority,
};
struct mem_cgroup *memcg;
.zone = zone,
};
- shrink_mem_cgroup_zone(priority, &mz, sc);
+ shrink_mem_cgroup_zone(&mz, sc);
/*
* Limit reclaim has historically picked one memcg and
* scanned it with decreasing priority levels until
* the caller that it should consider retrying the allocation instead of
* further reclaim.
*/
-static bool shrink_zones(int priority, struct zonelist *zonelist,
- struct scan_control *sc)
+static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
{
struct zoneref *z;
struct zone *zone;
if (global_reclaim(sc)) {
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
continue;
- if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+ if (zone->all_unreclaimable &&
+ sc->priority != DEF_PRIORITY)
continue; /* Let kswapd poll it */
if (COMPACTION_BUILD) {
/*
/* need some check for avoid more shrink_zone() */
}
- shrink_zone(priority, zone, sc);
+ shrink_zone(zone, sc);
}
return aborted_reclaim;
struct scan_control *sc,
struct shrink_control *shrink)
{
- int priority;
unsigned long total_scanned = 0;
struct reclaim_state *reclaim_state = current->reclaim_state;
struct zoneref *z;
if (global_reclaim(sc))
count_vm_event(ALLOCSTALL);
- for (priority = DEF_PRIORITY; priority >= 0; priority--) {
+ do {
sc->nr_scanned = 0;
- aborted_reclaim = shrink_zones(priority, zonelist, sc);
+ aborted_reclaim = shrink_zones(zonelist, sc);
/*
* Don't shrink slabs when reclaiming memory from
/* Take a nap, wait for some writeback to complete */
if (!sc->hibernation_mode && sc->nr_scanned &&
- priority < DEF_PRIORITY - 2) {
+ sc->priority < DEF_PRIORITY - 2) {
struct zone *preferred_zone;
first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask),
&preferred_zone);
wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10);
}
- }
+ } while (--sc->priority >= 0);
out:
delayacct_freepages_end();
.may_unmap = 1,
.may_swap = 1,
.order = order,
+ .priority = DEF_PRIORITY,
.target_mem_cgroup = NULL,
.nodemask = nodemask,
};
.may_unmap = 1,
.may_swap = !noswap,
.order = 0,
+ .priority = 0,
.target_mem_cgroup = memcg,
};
struct mem_cgroup_zone mz = {
sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
- trace_mm_vmscan_memcg_softlimit_reclaim_begin(0,
+ trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
sc.may_writepage,
sc.gfp_mask);
* will pick up pages from other mem cgroup's as well. We hack
* the priority and make it zero.
*/
- shrink_mem_cgroup_zone(0, &mz, &sc);
+ shrink_mem_cgroup_zone(&mz, &sc);
trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
.may_swap = !noswap,
.nr_to_reclaim = SWAP_CLUSTER_MAX,
.order = 0,
+ .priority = DEF_PRIORITY,
.target_mem_cgroup = memcg,
.nodemask = NULL, /* we don't care the placement */
.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
}
#endif
-static void age_active_anon(struct zone *zone, struct scan_control *sc,
- int priority)
+static void age_active_anon(struct zone *zone, struct scan_control *sc)
{
struct mem_cgroup *memcg;
if (inactive_anon_is_low(&mz))
shrink_active_list(SWAP_CLUSTER_MAX, &mz,
- sc, priority, LRU_ACTIVE_ANON);
+ sc, LRU_ACTIVE_ANON);
memcg = mem_cgroup_iter(NULL, memcg, NULL);
} while (memcg);
{
int all_zones_ok;
unsigned long balanced;
- int priority;
int i;
int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
unsigned long total_scanned;
};
loop_again:
total_scanned = 0;
+ sc.priority = DEF_PRIORITY;
sc.nr_reclaimed = 0;
sc.may_writepage = !laptop_mode;
count_vm_event(PAGEOUTRUN);
- for (priority = DEF_PRIORITY; priority >= 0; priority--) {
+ do {
unsigned long lru_pages = 0;
int has_under_min_watermark_zone = 0;
if (!populated_zone(zone))
continue;
- if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+ if (zone->all_unreclaimable &&
+ sc.priority != DEF_PRIORITY)
continue;
/*
* Do some background aging of the anon list, to give
* pages a chance to be referenced before reclaiming.
*/
- age_active_anon(zone, &sc, priority);
+ age_active_anon(zone, &sc);
/*
* If the number of buffer_heads in the machine
if (!populated_zone(zone))
continue;
- if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+ if (zone->all_unreclaimable &&
+ sc.priority != DEF_PRIORITY)
continue;
sc.nr_scanned = 0;
!zone_watermark_ok_safe(zone, testorder,
high_wmark_pages(zone) + balance_gap,
end_zone, 0)) {
- shrink_zone(priority, zone, &sc);
+ shrink_zone(zone, &sc);
reclaim_state->reclaimed_slab = 0;
nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
* OK, kswapd is getting into trouble. Take a nap, then take
* another pass across the zones.
*/
- if (total_scanned && (priority < DEF_PRIORITY - 2)) {
+ if (total_scanned && (sc.priority < DEF_PRIORITY - 2)) {
if (has_under_min_watermark_zone)
count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
else
*/
if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
break;
- }
+ } while (--sc.priority >= 0);
out:
/*
if (!populated_zone(zone))
continue;
- if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+ if (zone->all_unreclaimable &&
+ sc.priority != DEF_PRIORITY)
continue;
/* Would compaction fail due to lack of free memory? */
.nr_to_reclaim = nr_to_reclaim,
.hibernation_mode = 1,
.order = 0,
+ .priority = DEF_PRIORITY,
};
struct shrink_control shrink = {
.gfp_mask = sc.gfp_mask,
const unsigned long nr_pages = 1 << order;
struct task_struct *p = current;
struct reclaim_state reclaim_state;
- int priority;
struct scan_control sc = {
.may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
.may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
SWAP_CLUSTER_MAX),
.gfp_mask = gfp_mask,
.order = order,
+ .priority = ZONE_RECLAIM_PRIORITY,
};
struct shrink_control shrink = {
.gfp_mask = sc.gfp_mask,
* Free memory by calling shrink zone with increasing
* priorities until we have enough memory freed.
*/
- priority = ZONE_RECLAIM_PRIORITY;
do {
- shrink_zone(priority, zone, &sc);
- priority--;
- } while (priority >= 0 && sc.nr_reclaimed < nr_pages);
+ shrink_zone(zone, &sc);
+ } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
}
nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);