91ec85a0b289e782bd1259fe8b90ecc4c940dd83
[openwrt/staging/robimarko.git] /
1 From 6896ae528e4654e6f4bdff575337237f40de48b2 Mon Sep 17 00:00:00 2001
2 From: David Plowman <david.plowman@raspberrypi.com>
3 Date: Tue, 29 Mar 2022 16:10:06 +0100
4 Subject: [PATCH] mm,page_alloc,cma: introduce a customisable threshold
5 for allocating pages in cma
6
7 On some platforms the cma area can be half the entire system memory,
8 meaning that allocations start happening in the cma area immediately.
9 This leads to fragmentation and subsequent fatal cma_alloc failures.
10
11 We introduce an "alloc_in_cma_threshold" parameter which requires that
12 this many sixteenths of the free pages must be in cma before it will
13 try to use them. By default this is set to 12, but the previous
14 behaviour can be restored by setting it to 8 on startup.
15
16 Signed-off-by: David Plowman <david.plowman@raspberrypi.com>
17 ---
18 mm/page_alloc.c | 28 +++++++++++++++++++++++++---
19 1 file changed, 25 insertions(+), 3 deletions(-)
20
21 --- a/mm/page_alloc.c
22 +++ b/mm/page_alloc.c
23 @@ -190,6 +190,27 @@ EXPORT_SYMBOL(init_on_alloc);
24 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
25 EXPORT_SYMBOL(init_on_free);
26
27 +#define ALLOC_IN_CMA_THRESHOLD_MAX 16
28 +#define ALLOC_IN_CMA_THRESHOLD_DEFAULT 12
29 +
30 +static unsigned long _alloc_in_cma_threshold __read_mostly
31 + = ALLOC_IN_CMA_THRESHOLD_DEFAULT;
32 +
33 +static int __init alloc_in_cma_threshold_setup(char *buf)
34 +{
35 + unsigned long res;
36 +
37 + if (kstrtoul(buf, 10, &res) < 0 ||
38 + res > ALLOC_IN_CMA_THRESHOLD_MAX) {
39 + pr_err("Bad alloc_cma_threshold value\n");
40 + return 0;
41 + }
42 + _alloc_in_cma_threshold = res;
43 + pr_info("Setting alloc_in_cma_threshold to %lu\n", res);
44 + return 0;
45 +}
46 +early_param("alloc_in_cma_threshold", alloc_in_cma_threshold_setup);
47 +
48 static bool _init_on_alloc_enabled_early __read_mostly
49 = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
50 static int __init early_init_on_alloc(char *buf)
51 @@ -2980,12 +3001,13 @@ __rmqueue(struct zone *zone, unsigned in
52 if (IS_ENABLED(CONFIG_CMA)) {
53 /*
54 * Balance movable allocations between regular and CMA areas by
55 - * allocating from CMA when over half of the zone's free memory
56 - * is in the CMA area.
57 + * allocating from CMA when over more than a given proportion of
58 + * the zone's free memory is in the CMA area.
59 */
60 if (alloc_flags & ALLOC_CMA &&
61 zone_page_state(zone, NR_FREE_CMA_PAGES) >
62 - zone_page_state(zone, NR_FREE_PAGES) / 2) {
63 + zone_page_state(zone, NR_FREE_PAGES) / ALLOC_IN_CMA_THRESHOLD_MAX
64 + * _alloc_in_cma_threshold) {
65 page = __rmqueue_cma_fallback(zone, order);
66 if (page)
67 goto out;