caee6b096f13f58158ef071105bc08013273b47b
[openwrt/staging/mans0n.git] /
1 From e604c3ccb4dfbdde2467fccef9bb36170a392695 Mon Sep 17 00:00:00 2001
2 From: "T.J. Alumbaugh" <talumbau@google.com>
3 Date: Wed, 18 Jan 2023 00:18:27 +0000
4 Subject: [PATCH 18/19] UPSTREAM: mm: multi-gen LRU: simplify
5 lru_gen_look_around()
6
7 Update the folio generation in place with or without
8 current->reclaim_state->mm_walk. The LRU lock is held for longer, if
9 mm_walk is NULL and the number of folios to update is more than
10 PAGEVEC_SIZE.
11
12 This causes a measurable regression from the LRU lock contention during a
13 microbencmark. But a tiny regression is not worth the complexity.
14
15 Link: https://lkml.kernel.org/r/20230118001827.1040870-8-talumbau@google.com
16 Change-Id: I9ce18b4f4062e6c1c13c98ece9422478eb8e1846
17 Signed-off-by: T.J. Alumbaugh <talumbau@google.com>
18 Cc: Yu Zhao <yuzhao@google.com>
19 Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
20 (cherry picked from commit abf086721a2f1e6897c57796f7268df1b194c750)
21 Bug: 274865848
22 Signed-off-by: T.J. Mercier <tjmercier@google.com>
23 ---
24 mm/vmscan.c | 73 +++++++++++++++++------------------------------------
25 1 file changed, 23 insertions(+), 50 deletions(-)
26
27 diff --git a/mm/vmscan.c b/mm/vmscan.c
28 index 8f496c2e670a9..f6ce7a1fd78a3 100644
29 --- a/mm/vmscan.c
30 +++ b/mm/vmscan.c
31 @@ -4571,13 +4571,12 @@ static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
32 void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
33 {
34 int i;
35 - pte_t *pte;
36 unsigned long start;
37 unsigned long end;
38 - unsigned long addr;
39 struct lru_gen_mm_walk *walk;
40 int young = 0;
41 - unsigned long bitmap[BITS_TO_LONGS(MIN_LRU_BATCH)] = {};
42 + pte_t *pte = pvmw->pte;
43 + unsigned long addr = pvmw->address;
44 struct folio *folio = pfn_folio(pvmw->pfn);
45 struct mem_cgroup *memcg = folio_memcg(folio);
46 struct pglist_data *pgdat = folio_pgdat(folio);
47 @@ -4594,25 +4593,28 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
48 /* avoid taking the LRU lock under the PTL when possible */
49 walk = current->reclaim_state ? current->reclaim_state->mm_walk : NULL;
50
51 - start = max(pvmw->address & PMD_MASK, pvmw->vma->vm_start);
52 - end = min(pvmw->address | ~PMD_MASK, pvmw->vma->vm_end - 1) + 1;
53 + start = max(addr & PMD_MASK, pvmw->vma->vm_start);
54 + end = min(addr | ~PMD_MASK, pvmw->vma->vm_end - 1) + 1;
55
56 if (end - start > MIN_LRU_BATCH * PAGE_SIZE) {
57 - if (pvmw->address - start < MIN_LRU_BATCH * PAGE_SIZE / 2)
58 + if (addr - start < MIN_LRU_BATCH * PAGE_SIZE / 2)
59 end = start + MIN_LRU_BATCH * PAGE_SIZE;
60 - else if (end - pvmw->address < MIN_LRU_BATCH * PAGE_SIZE / 2)
61 + else if (end - addr < MIN_LRU_BATCH * PAGE_SIZE / 2)
62 start = end - MIN_LRU_BATCH * PAGE_SIZE;
63 else {
64 - start = pvmw->address - MIN_LRU_BATCH * PAGE_SIZE / 2;
65 - end = pvmw->address + MIN_LRU_BATCH * PAGE_SIZE / 2;
66 + start = addr - MIN_LRU_BATCH * PAGE_SIZE / 2;
67 + end = addr + MIN_LRU_BATCH * PAGE_SIZE / 2;
68 }
69 }
70
71 - pte = pvmw->pte - (pvmw->address - start) / PAGE_SIZE;
72 + /* folio_update_gen() requires stable folio_memcg() */
73 + if (!mem_cgroup_trylock_pages(memcg))
74 + return;
75
76 - rcu_read_lock();
77 arch_enter_lazy_mmu_mode();
78
79 + pte -= (addr - start) / PAGE_SIZE;
80 +
81 for (i = 0, addr = start; addr != end; i++, addr += PAGE_SIZE) {
82 unsigned long pfn;
83
84 @@ -4637,56 +4639,27 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
85 !folio_test_swapcache(folio)))
86 folio_mark_dirty(folio);
87
88 + if (walk) {
89 + old_gen = folio_update_gen(folio, new_gen);
90 + if (old_gen >= 0 && old_gen != new_gen)
91 + update_batch_size(walk, folio, old_gen, new_gen);
92 +
93 + continue;
94 + }
95 +
96 old_gen = folio_lru_gen(folio);
97 if (old_gen < 0)
98 folio_set_referenced(folio);
99 else if (old_gen != new_gen)
100 - __set_bit(i, bitmap);
101 + folio_activate(folio);
102 }
103
104 arch_leave_lazy_mmu_mode();
105 - rcu_read_unlock();
106 + mem_cgroup_unlock_pages();
107
108 /* feedback from rmap walkers to page table walkers */
109 if (suitable_to_scan(i, young))
110 update_bloom_filter(lruvec, max_seq, pvmw->pmd);
111 -
112 - if (!walk && bitmap_weight(bitmap, MIN_LRU_BATCH) < PAGEVEC_SIZE) {
113 - for_each_set_bit(i, bitmap, MIN_LRU_BATCH) {
114 - folio = pfn_folio(pte_pfn(pte[i]));
115 - folio_activate(folio);
116 - }
117 - return;
118 - }
119 -
120 - /* folio_update_gen() requires stable folio_memcg() */
121 - if (!mem_cgroup_trylock_pages(memcg))
122 - return;
123 -
124 - if (!walk) {
125 - spin_lock_irq(&lruvec->lru_lock);
126 - new_gen = lru_gen_from_seq(lruvec->lrugen.max_seq);
127 - }
128 -
129 - for_each_set_bit(i, bitmap, MIN_LRU_BATCH) {
130 - folio = pfn_folio(pte_pfn(pte[i]));
131 - if (folio_memcg_rcu(folio) != memcg)
132 - continue;
133 -
134 - old_gen = folio_update_gen(folio, new_gen);
135 - if (old_gen < 0 || old_gen == new_gen)
136 - continue;
137 -
138 - if (walk)
139 - update_batch_size(walk, folio, old_gen, new_gen);
140 - else
141 - lru_gen_update_size(lruvec, folio, old_gen, new_gen);
142 - }
143 -
144 - if (!walk)
145 - spin_unlock_irq(&lruvec->lru_lock);
146 -
147 - mem_cgroup_unlock_pages();
148 }
149
150 /******************************************************************************
151 --
152 2.40.1
153