1 From eca3858631e0cbad2ca6e40f788892749428e4cb Mon Sep 17 00:00:00 2001
2 From: Yu Zhao <yuzhao@google.com>
3 Date: Wed, 21 Dec 2022 21:19:03 -0700
4 Subject: [PATCH 05/19] UPSTREAM: mm: multi-gen LRU: shuffle should_run_aging()
6 Move should_run_aging() next to its only caller left.
8 Link: https://lkml.kernel.org/r/20221222041905.2431096-6-yuzhao@google.com
9 Cc: Johannes Weiner <hannes@cmpxchg.org>
10 Cc: Jonathan Corbet <corbet@lwn.net>
11 Cc: Michael Larabel <Michael@MichaelLarabel.com>
12 Cc: Michal Hocko <mhocko@kernel.org>
13 Cc: Mike Rapoport <rppt@kernel.org>
14 Cc: Roman Gushchin <roman.gushchin@linux.dev>
15 Cc: Suren Baghdasaryan <surenb@google.com>
16 Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
18 (cherry picked from commit 77d4459a4a1a472b7309e475f962dda87d950abd)
19 Signed-off-by: T.J. Mercier <tjmercier@google.com>
20 Change-Id: I3b0383fe16b93a783b4d8c0b3a0b325160392576
21 Signed-off-by: Yu Zhao <yuzhao@google.com>
22 Signed-off-by: T.J. Mercier <tjmercier@google.com>
24 mm/vmscan.c | 124 ++++++++++++++++++++++++++--------------------------
25 1 file changed, 62 insertions(+), 62 deletions(-)
27 diff --git a/mm/vmscan.c b/mm/vmscan.c
28 index 5a2e83e673232..0c47952714b26 100644
31 @@ -4454,68 +4454,6 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
35 -static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
36 - struct scan_control *sc, bool can_swap, unsigned long *nr_to_scan)
38 - int gen, type, zone;
39 - unsigned long old = 0;
40 - unsigned long young = 0;
41 - unsigned long total = 0;
42 - struct lru_gen_folio *lrugen = &lruvec->lrugen;
43 - struct mem_cgroup *memcg = lruvec_memcg(lruvec);
44 - DEFINE_MIN_SEQ(lruvec);
46 - /* whether this lruvec is completely out of cold folios */
47 - if (min_seq[!can_swap] + MIN_NR_GENS > max_seq) {
52 - for (type = !can_swap; type < ANON_AND_FILE; type++) {
55 - for (seq = min_seq[type]; seq <= max_seq; seq++) {
56 - unsigned long size = 0;
58 - gen = lru_gen_from_seq(seq);
60 - for (zone = 0; zone < MAX_NR_ZONES; zone++)
61 - size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
66 - else if (seq + MIN_NR_GENS == max_seq)
71 - /* try to scrape all its memory if this memcg was deleted */
72 - *nr_to_scan = mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
75 - * The aging tries to be lazy to reduce the overhead, while the eviction
76 - * stalls when the number of generations reaches MIN_NR_GENS. Hence, the
77 - * ideal number of generations is MIN_NR_GENS+1.
79 - if (min_seq[!can_swap] + MIN_NR_GENS < max_seq)
83 - * It's also ideal to spread pages out evenly, i.e., 1/(MIN_NR_GENS+1)
84 - * of the total number of pages for each generation. A reasonable range
85 - * for this average portion is [1/MIN_NR_GENS, 1/(MIN_NR_GENS+2)]. The
86 - * aging cares about the upper bound of hot pages, while the eviction
87 - * cares about the lower bound of cold pages.
89 - if (young * MIN_NR_GENS > total)
91 - if (old * (MIN_NR_GENS + 2) < total)
97 static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc)
100 @@ -5099,6 +5037,68 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap
104 +static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
105 + struct scan_control *sc, bool can_swap, unsigned long *nr_to_scan)
107 + int gen, type, zone;
108 + unsigned long old = 0;
109 + unsigned long young = 0;
110 + unsigned long total = 0;
111 + struct lru_gen_folio *lrugen = &lruvec->lrugen;
112 + struct mem_cgroup *memcg = lruvec_memcg(lruvec);
113 + DEFINE_MIN_SEQ(lruvec);
115 + /* whether this lruvec is completely out of cold folios */
116 + if (min_seq[!can_swap] + MIN_NR_GENS > max_seq) {
121 + for (type = !can_swap; type < ANON_AND_FILE; type++) {
124 + for (seq = min_seq[type]; seq <= max_seq; seq++) {
125 + unsigned long size = 0;
127 + gen = lru_gen_from_seq(seq);
129 + for (zone = 0; zone < MAX_NR_ZONES; zone++)
130 + size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
133 + if (seq == max_seq)
135 + else if (seq + MIN_NR_GENS == max_seq)
140 + /* try to scrape all its memory if this memcg was deleted */
141 + *nr_to_scan = mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
144 + * The aging tries to be lazy to reduce the overhead, while the eviction
145 + * stalls when the number of generations reaches MIN_NR_GENS. Hence, the
146 + * ideal number of generations is MIN_NR_GENS+1.
148 + if (min_seq[!can_swap] + MIN_NR_GENS < max_seq)
152 + * It's also ideal to spread pages out evenly, i.e., 1/(MIN_NR_GENS+1)
153 + * of the total number of pages for each generation. A reasonable range
154 + * for this average portion is [1/MIN_NR_GENS, 1/(MIN_NR_GENS+2)]. The
155 + * aging cares about the upper bound of hot pages, while the eviction
156 + * cares about the lower bound of cold pages.
158 + if (young * MIN_NR_GENS > total)
160 + if (old * (MIN_NR_GENS + 2) < total)
167 * For future optimizations:
168 * 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg