KERNEL_PATCHVER:=$(KERNEL_TESTING_PATCHVER)
endif
-LINUX_VERSION-5.4 = .93
+LINUX_VERSION-5.4 = .94
-LINUX_KERNEL_HASH-5.4.93 = d37449403664cc3b1bac96d0d9a199dbe619885cd899c0ae3108843f42e3d522
+LINUX_KERNEL_HASH-5.4.94 = c23df57db7312e9afa5ce477046e227a3c2153efbe1f29045ad23c820aad2b39
remove_uri_prefix=$(subst git://,,$(subst http://,,$(subst https://,,$(1))))
sanitize_uri=$(call qstrip,$(subst @,_,$(subst :,_,$(subst .,_,$(subst -,_,$(subst /,_,$(1)))))))
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
-@@ -187,7 +187,7 @@ static void __init zone_sizes_init(unsig
+@@ -181,7 +181,7 @@ static void __init zone_sizes_init(unsig
unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
#ifdef CONFIG_ZONE_DMA32
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
-@@ -56,7 +56,7 @@ EXPORT_SYMBOL(physvirt_offset);
- struct page *vmemmap __ro_after_init;
- EXPORT_SYMBOL(vmemmap);
+@@ -50,7 +50,7 @@
+ s64 memstart_addr __ro_after_init = -1;
+ EXPORT_SYMBOL(memstart_addr);
-phys_addr_t arm64_dma_phys_limit __ro_after_init;
+phys_addr_t arm64_dma32_phys_limit __ro_after_init;
#ifdef CONFIG_KEXEC_CORE
/*
-@@ -174,7 +174,7 @@ static void __init reserve_elfcorehdr(vo
+@@ -168,7 +168,7 @@ static void __init reserve_elfcorehdr(vo
* currently assumes that for memory starting above 4G, 32-bit devices will
* use a DMA offset.
*/
{
phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
return min(offset + (1ULL << 32), memblock_end_of_DRAM());
-@@ -187,7 +187,7 @@ static void __init zone_sizes_init(unsig
+@@ -181,7 +181,7 @@ static void __init zone_sizes_init(unsig
unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
#ifdef CONFIG_ZONE_DMA32
#endif
max_zone_pfns[ZONE_NORMAL] = max;
-@@ -200,16 +200,16 @@ static void __init zone_sizes_init(unsig
+@@ -194,16 +194,16 @@ static void __init zone_sizes_init(unsig
{
struct memblock_region *reg;
unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
memcpy(zhole_size, zone_size, sizeof(zhole_size));
-@@ -221,14 +221,14 @@ static void __init zone_sizes_init(unsig
+@@ -215,14 +215,14 @@ static void __init zone_sizes_init(unsig
continue;
#ifdef CONFIG_ZONE_DMA32
zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
}
}
-@@ -420,9 +420,9 @@ void __init arm64_memblock_init(void)
+@@ -410,9 +410,9 @@ void __init arm64_memblock_init(void)
/* 4GB maximum for 32-bit only capable devices */
if (IS_ENABLED(CONFIG_ZONE_DMA32))
reserve_crashkernel();
-@@ -430,7 +430,7 @@ void __init arm64_memblock_init(void)
+@@ -420,7 +420,7 @@ void __init arm64_memblock_init(void)
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
}
void __init bootmem_init(void)
-@@ -534,7 +534,7 @@ static void __init free_unused_memmap(vo
+@@ -524,7 +524,7 @@ static void __init free_unused_memmap(vo
void __init mem_init(void)
{
if (swiotlb_force == SWIOTLB_FORCE ||
#endif
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
-@@ -56,6 +56,13 @@ EXPORT_SYMBOL(physvirt_offset);
- struct page *vmemmap __ro_after_init;
- EXPORT_SYMBOL(vmemmap);
+@@ -50,6 +50,13 @@
+ s64 memstart_addr __ro_after_init = -1;
+ EXPORT_SYMBOL(memstart_addr);
+/*
+ * We create both ZONE_DMA and ZONE_DMA32. ZONE_DMA covers the first 1G of
phys_addr_t arm64_dma32_phys_limit __ro_after_init;
#ifdef CONFIG_KEXEC_CORE
-@@ -169,15 +176,16 @@ static void __init reserve_elfcorehdr(vo
+@@ -163,15 +170,16 @@ static void __init reserve_elfcorehdr(vo
{
}
#endif /* CONFIG_CRASH_DUMP */
}
#ifdef CONFIG_NUMA
-@@ -186,6 +194,9 @@ static void __init zone_sizes_init(unsig
+@@ -180,6 +188,9 @@ static void __init zone_sizes_init(unsig
{
unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
#ifdef CONFIG_ZONE_DMA32
max_zone_pfns[ZONE_DMA32] = PFN_DOWN(arm64_dma32_phys_limit);
#endif
-@@ -201,13 +212,18 @@ static void __init zone_sizes_init(unsig
+@@ -195,13 +206,18 @@ static void __init zone_sizes_init(unsig
struct memblock_region *reg;
unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
unsigned long max_dma32 = min;
#endif
zone_size[ZONE_NORMAL] = max - max_dma32;
-@@ -219,11 +235,17 @@ static void __init zone_sizes_init(unsig
+@@ -213,11 +229,17 @@ static void __init zone_sizes_init(unsig
if (start >= max)
continue;
}
#endif
if (end > max_dma32) {
-@@ -418,9 +440,11 @@ void __init arm64_memblock_init(void)
+@@ -408,9 +430,11 @@ void __init arm64_memblock_init(void)
early_init_fdt_scan_reserved_mem();
else
arm64_dma32_phys_limit = PHYS_MASK + 1;
-@@ -430,7 +454,7 @@ void __init arm64_memblock_init(void)
+@@ -420,7 +444,7 @@ void __init arm64_memblock_init(void)
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
}
void __init bootmem_init(void)
-@@ -534,7 +558,7 @@ static void __init free_unused_memmap(vo
+@@ -524,7 +548,7 @@ static void __init free_unused_memmap(vo
void __init mem_init(void)
{
if (swiotlb_force == SWIOTLB_FORCE ||
/*
* We need to be able to catch inadvertent references to memstart_addr
* that occur (potentially in generic code) before arm64_memblock_init()
-@@ -440,8 +443,10 @@ void __init arm64_memblock_init(void)
+@@ -430,8 +433,10 @@ void __init arm64_memblock_init(void)
early_init_fdt_scan_reserved_mem();
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
-@@ -91,7 +91,7 @@ static void __init reserve_crashkernel(v
+@@ -85,7 +85,7 @@ static void __init reserve_crashkernel(v
if (crash_base == 0) {
/* Current arm64 boot protocol requires 2MB alignment */
crash_size, SZ_2M);
if (crash_base == 0) {
pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
-@@ -459,7 +459,7 @@ void __init arm64_memblock_init(void)
+@@ -449,7 +449,7 @@ void __init arm64_memblock_init(void)
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
-@@ -214,15 +214,14 @@ static void __init zone_sizes_init(unsig
+@@ -208,15 +208,14 @@ static void __init zone_sizes_init(unsig
{
struct memblock_region *reg;
unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
#endif
#ifdef CONFIG_ZONE_DMA32
max_dma32 = PFN_DOWN(arm64_dma32_phys_limit);
-@@ -236,25 +235,23 @@ static void __init zone_sizes_init(unsig
+@@ -230,25 +229,23 @@ static void __init zone_sizes_init(unsig
unsigned long start = memblock_region_memory_base_pfn(reg);
unsigned long end = memblock_region_memory_end_pfn(reg);
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
-@@ -416,6 +416,8 @@ void __init arm64_memblock_init(void)
+@@ -406,6 +406,8 @@ void __init arm64_memblock_init(void)
initrd_end = initrd_start + phys_initrd_size;
}
early_init_fdt_scan_reserved_mem();
/* 4GB maximum for 32-bit only capable devices */
-@@ -426,8 +428,6 @@ void __init arm64_memblock_init(void)
+@@ -416,8 +418,6 @@ void __init arm64_memblock_init(void)
reserve_crashkernel();
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
-@@ -1253,7 +1253,7 @@ static int mvebu_gpio_probe(struct platf
+@@ -1248,7 +1248,7 @@ static int mvebu_gpio_probe(struct platf
* pins.
*/
for (i = 0; i < 4; i++) {