# SPDX-License-Identifier: GPL-2.0
KASAN_SANITIZE := n
UBSAN_SANITIZE_common.o := n
-UBSAN_SANITIZE_kasan.o := n
+UBSAN_SANITIZE_generic.o := n
KCOV_INSTRUMENT := n
-CFLAGS_REMOVE_kasan.o = -pg
+CFLAGS_REMOVE_generic.o = -pg
# Function splitter causes unnecessary splits in __asan_load1/__asan_store1
# see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533
CFLAGS_common.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
-CFLAGS_kasan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
+CFLAGS_generic.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
-obj-y := common.o kasan.o report.o kasan_init.o quarantine.o
+obj-y := common.o generic.o report.o init.o quarantine.o
--- /dev/null
+/*
+ * This file contains core KASAN code.
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+ *
+ * Some code borrowed from https://github.com/xairy/kasan-prototype by
+ * Andrey Konovalov <andreyknvl@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define DISABLE_BRANCH_PROFILING
+
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/kasan.h>
+#include <linux/kernel.h>
+#include <linux/kmemleak.h>
+#include <linux/linkage.h>
+#include <linux/memblock.h>
+#include <linux/memory.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/slab.h>
+#include <linux/stacktrace.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/bug.h>
+
+#include "kasan.h"
+#include "../slab.h"
+
+/*
+ * All functions below always inlined so compiler could
+ * perform better optimizations in each of __asan_loadX/__assn_storeX
+ * depending on memory access size X.
+ */
+
+static __always_inline bool memory_is_poisoned_1(unsigned long addr)
+{
+ s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
+
+ if (unlikely(shadow_value)) {
+ s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
+ return unlikely(last_accessible_byte >= shadow_value);
+ }
+
+ return false;
+}
+
+static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
+ unsigned long size)
+{
+ u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr);
+
+ /*
+ * Access crosses 8(shadow size)-byte boundary. Such access maps
+ * into 2 shadow bytes, so we need to check them both.
+ */
+ if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1))
+ return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
+
+ return memory_is_poisoned_1(addr + size - 1);
+}
+
+static __always_inline bool memory_is_poisoned_16(unsigned long addr)
+{
+ u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
+
+ /* Unaligned 16-bytes access maps into 3 shadow bytes. */
+ if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
+ return *shadow_addr || memory_is_poisoned_1(addr + 15);
+
+ return *shadow_addr;
+}
+
+static __always_inline unsigned long bytes_is_nonzero(const u8 *start,
+ size_t size)
+{
+ while (size) {
+ if (unlikely(*start))
+ return (unsigned long)start;
+ start++;
+ size--;
+ }
+
+ return 0;
+}
+
+static __always_inline unsigned long memory_is_nonzero(const void *start,
+ const void *end)
+{
+ unsigned int words;
+ unsigned long ret;
+ unsigned int prefix = (unsigned long)start % 8;
+
+ if (end - start <= 16)
+ return bytes_is_nonzero(start, end - start);
+
+ if (prefix) {
+ prefix = 8 - prefix;
+ ret = bytes_is_nonzero(start, prefix);
+ if (unlikely(ret))
+ return ret;
+ start += prefix;
+ }
+
+ words = (end - start) / 8;
+ while (words) {
+ if (unlikely(*(u64 *)start))
+ return bytes_is_nonzero(start, 8);
+ start += 8;
+ words--;
+ }
+
+ return bytes_is_nonzero(start, (end - start) % 8);
+}
+
+static __always_inline bool memory_is_poisoned_n(unsigned long addr,
+ size_t size)
+{
+ unsigned long ret;
+
+ ret = memory_is_nonzero(kasan_mem_to_shadow((void *)addr),
+ kasan_mem_to_shadow((void *)addr + size - 1) + 1);
+
+ if (unlikely(ret)) {
+ unsigned long last_byte = addr + size - 1;
+ s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
+
+ if (unlikely(ret != (unsigned long)last_shadow ||
+ ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
+ return true;
+ }
+ return false;
+}
+
+static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
+{
+ if (__builtin_constant_p(size)) {
+ switch (size) {
+ case 1:
+ return memory_is_poisoned_1(addr);
+ case 2:
+ case 4:
+ case 8:
+ return memory_is_poisoned_2_4_8(addr, size);
+ case 16:
+ return memory_is_poisoned_16(addr);
+ default:
+ BUILD_BUG();
+ }
+ }
+
+ return memory_is_poisoned_n(addr, size);
+}
+
+static __always_inline void check_memory_region_inline(unsigned long addr,
+ size_t size, bool write,
+ unsigned long ret_ip)
+{
+ if (unlikely(size == 0))
+ return;
+
+ if (unlikely((void *)addr <
+ kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
+ kasan_report(addr, size, write, ret_ip);
+ return;
+ }
+
+ if (likely(!memory_is_poisoned(addr, size)))
+ return;
+
+ kasan_report(addr, size, write, ret_ip);
+}
+
+void check_memory_region(unsigned long addr, size_t size, bool write,
+ unsigned long ret_ip)
+{
+ check_memory_region_inline(addr, size, write, ret_ip);
+}
+
+void kasan_cache_shrink(struct kmem_cache *cache)
+{
+ quarantine_remove_cache(cache);
+}
+
+void kasan_cache_shutdown(struct kmem_cache *cache)
+{
+ if (!__kmem_cache_empty(cache))
+ quarantine_remove_cache(cache);
+}
+
+static void register_global(struct kasan_global *global)
+{
+ size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
+
+ kasan_unpoison_shadow(global->beg, global->size);
+
+ kasan_poison_shadow(global->beg + aligned_size,
+ global->size_with_redzone - aligned_size,
+ KASAN_GLOBAL_REDZONE);
+}
+
+void __asan_register_globals(struct kasan_global *globals, size_t size)
+{
+ int i;
+
+ for (i = 0; i < size; i++)
+ register_global(&globals[i]);
+}
+EXPORT_SYMBOL(__asan_register_globals);
+
+void __asan_unregister_globals(struct kasan_global *globals, size_t size)
+{
+}
+EXPORT_SYMBOL(__asan_unregister_globals);
+
+#define DEFINE_ASAN_LOAD_STORE(size) \
+ void __asan_load##size(unsigned long addr) \
+ { \
+ check_memory_region_inline(addr, size, false, _RET_IP_);\
+ } \
+ EXPORT_SYMBOL(__asan_load##size); \
+ __alias(__asan_load##size) \
+ void __asan_load##size##_noabort(unsigned long); \
+ EXPORT_SYMBOL(__asan_load##size##_noabort); \
+ void __asan_store##size(unsigned long addr) \
+ { \
+ check_memory_region_inline(addr, size, true, _RET_IP_); \
+ } \
+ EXPORT_SYMBOL(__asan_store##size); \
+ __alias(__asan_store##size) \
+ void __asan_store##size##_noabort(unsigned long); \
+ EXPORT_SYMBOL(__asan_store##size##_noabort)
+
+DEFINE_ASAN_LOAD_STORE(1);
+DEFINE_ASAN_LOAD_STORE(2);
+DEFINE_ASAN_LOAD_STORE(4);
+DEFINE_ASAN_LOAD_STORE(8);
+DEFINE_ASAN_LOAD_STORE(16);
+
+void __asan_loadN(unsigned long addr, size_t size)
+{
+ check_memory_region(addr, size, false, _RET_IP_);
+}
+EXPORT_SYMBOL(__asan_loadN);
+
+__alias(__asan_loadN)
+void __asan_loadN_noabort(unsigned long, size_t);
+EXPORT_SYMBOL(__asan_loadN_noabort);
+
+void __asan_storeN(unsigned long addr, size_t size)
+{
+ check_memory_region(addr, size, true, _RET_IP_);
+}
+EXPORT_SYMBOL(__asan_storeN);
+
+__alias(__asan_storeN)
+void __asan_storeN_noabort(unsigned long, size_t);
+EXPORT_SYMBOL(__asan_storeN_noabort);
+
+/* to shut up compiler complaints */
+void __asan_handle_no_return(void) {}
+EXPORT_SYMBOL(__asan_handle_no_return);
+
+/* Emitted by compiler to poison large objects when they go out of scope. */
+void __asan_poison_stack_memory(const void *addr, size_t size)
+{
+ /*
+ * Addr is KASAN_SHADOW_SCALE_SIZE-aligned and the object is surrounded
+ * by redzones, so we simply round up size to simplify logic.
+ */
+ kasan_poison_shadow(addr, round_up(size, KASAN_SHADOW_SCALE_SIZE),
+ KASAN_USE_AFTER_SCOPE);
+}
+EXPORT_SYMBOL(__asan_poison_stack_memory);
+
+/* Emitted by compiler to unpoison large objects when they go into scope. */
+void __asan_unpoison_stack_memory(const void *addr, size_t size)
+{
+ kasan_unpoison_shadow(addr, size);
+}
+EXPORT_SYMBOL(__asan_unpoison_stack_memory);
+
+/* Emitted by compiler to poison alloca()ed objects. */
+void __asan_alloca_poison(unsigned long addr, size_t size)
+{
+ size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
+ size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
+ rounded_up_size;
+ size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE);
+
+ const void *left_redzone = (const void *)(addr -
+ KASAN_ALLOCA_REDZONE_SIZE);
+ const void *right_redzone = (const void *)(addr + rounded_up_size);
+
+ WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE));
+
+ kasan_unpoison_shadow((const void *)(addr + rounded_down_size),
+ size - rounded_down_size);
+ kasan_poison_shadow(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
+ KASAN_ALLOCA_LEFT);
+ kasan_poison_shadow(right_redzone,
+ padding_size + KASAN_ALLOCA_REDZONE_SIZE,
+ KASAN_ALLOCA_RIGHT);
+}
+EXPORT_SYMBOL(__asan_alloca_poison);
+
+/* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */
+void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom)
+{
+ if (unlikely(!stack_top || stack_top > stack_bottom))
+ return;
+
+ kasan_unpoison_shadow(stack_top, stack_bottom - stack_top);
+}
+EXPORT_SYMBOL(__asan_allocas_unpoison);
+
+/* Emitted by the compiler to [un]poison local variables. */
+#define DEFINE_ASAN_SET_SHADOW(byte) \
+ void __asan_set_shadow_##byte(const void *addr, size_t size) \
+ { \
+ __memset((void *)addr, 0x##byte, size); \
+ } \
+ EXPORT_SYMBOL(__asan_set_shadow_##byte)
+
+DEFINE_ASAN_SET_SHADOW(00);
+DEFINE_ASAN_SET_SHADOW(f1);
+DEFINE_ASAN_SET_SHADOW(f2);
+DEFINE_ASAN_SET_SHADOW(f3);
+DEFINE_ASAN_SET_SHADOW(f5);
+DEFINE_ASAN_SET_SHADOW(f8);
--- /dev/null
+/*
+ * This file contains some kasan initialization code.
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/memblock.h>
+#include <linux/init.h>
+#include <linux/kasan.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/pfn.h>
+#include <linux/slab.h>
+
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+
+#include "kasan.h"
+
+/*
+ * This page serves two purposes:
+ * - It used as early shadow memory. The entire shadow region populated
+ * with this page, before we will be able to setup normal shadow memory.
+ * - Latter it reused it as zero shadow to cover large ranges of memory
+ * that allowed to access, but not handled by kasan (vmalloc/vmemmap ...).
+ */
+unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
+
+#if CONFIG_PGTABLE_LEVELS > 4
+p4d_t kasan_zero_p4d[MAX_PTRS_PER_P4D] __page_aligned_bss;
+static inline bool kasan_p4d_table(pgd_t pgd)
+{
+ return pgd_page(pgd) == virt_to_page(lm_alias(kasan_zero_p4d));
+}
+#else
+static inline bool kasan_p4d_table(pgd_t pgd)
+{
+ return 0;
+}
+#endif
+#if CONFIG_PGTABLE_LEVELS > 3
+pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss;
+static inline bool kasan_pud_table(p4d_t p4d)
+{
+ return p4d_page(p4d) == virt_to_page(lm_alias(kasan_zero_pud));
+}
+#else
+static inline bool kasan_pud_table(p4d_t p4d)
+{
+ return 0;
+}
+#endif
+#if CONFIG_PGTABLE_LEVELS > 2
+pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss;
+static inline bool kasan_pmd_table(pud_t pud)
+{
+ return pud_page(pud) == virt_to_page(lm_alias(kasan_zero_pmd));
+}
+#else
+static inline bool kasan_pmd_table(pud_t pud)
+{
+ return 0;
+}
+#endif
+pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss;
+
+static inline bool kasan_pte_table(pmd_t pmd)
+{
+ return pmd_page(pmd) == virt_to_page(lm_alias(kasan_zero_pte));
+}
+
+static inline bool kasan_zero_page_entry(pte_t pte)
+{
+ return pte_page(pte) == virt_to_page(lm_alias(kasan_zero_page));
+}
+
+static __init void *early_alloc(size_t size, int node)
+{
+ return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
+ MEMBLOCK_ALLOC_ACCESSIBLE, node);
+}
+
+static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr,
+ unsigned long end)
+{
+ pte_t *pte = pte_offset_kernel(pmd, addr);
+ pte_t zero_pte;
+
+ zero_pte = pfn_pte(PFN_DOWN(__pa_symbol(kasan_zero_page)), PAGE_KERNEL);
+ zero_pte = pte_wrprotect(zero_pte);
+
+ while (addr + PAGE_SIZE <= end) {
+ set_pte_at(&init_mm, addr, pte, zero_pte);
+ addr += PAGE_SIZE;
+ pte = pte_offset_kernel(pmd, addr);
+ }
+}
+
+static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr,
+ unsigned long end)
+{
+ pmd_t *pmd = pmd_offset(pud, addr);
+ unsigned long next;
+
+ do {
+ next = pmd_addr_end(addr, end);
+
+ if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
+ pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte));
+ continue;
+ }
+
+ if (pmd_none(*pmd)) {
+ pte_t *p;
+
+ if (slab_is_available())
+ p = pte_alloc_one_kernel(&init_mm, addr);
+ else
+ p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
+ if (!p)
+ return -ENOMEM;
+
+ pmd_populate_kernel(&init_mm, pmd, p);
+ }
+ zero_pte_populate(pmd, addr, next);
+ } while (pmd++, addr = next, addr != end);
+
+ return 0;
+}
+
+static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr,
+ unsigned long end)
+{
+ pud_t *pud = pud_offset(p4d, addr);
+ unsigned long next;
+
+ do {
+ next = pud_addr_end(addr, end);
+ if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
+ pmd_t *pmd;
+
+ pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd));
+ pmd = pmd_offset(pud, addr);
+ pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte));
+ continue;
+ }
+
+ if (pud_none(*pud)) {
+ pmd_t *p;
+
+ if (slab_is_available()) {
+ p = pmd_alloc(&init_mm, pud, addr);
+ if (!p)
+ return -ENOMEM;
+ } else {
+ pud_populate(&init_mm, pud,
+ early_alloc(PAGE_SIZE, NUMA_NO_NODE));
+ }
+ }
+ zero_pmd_populate(pud, addr, next);
+ } while (pud++, addr = next, addr != end);
+
+ return 0;
+}
+
+static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
+ unsigned long end)
+{
+ p4d_t *p4d = p4d_offset(pgd, addr);
+ unsigned long next;
+
+ do {
+ next = p4d_addr_end(addr, end);
+ if (IS_ALIGNED(addr, P4D_SIZE) && end - addr >= P4D_SIZE) {
+ pud_t *pud;
+ pmd_t *pmd;
+
+ p4d_populate(&init_mm, p4d, lm_alias(kasan_zero_pud));
+ pud = pud_offset(p4d, addr);
+ pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd));
+ pmd = pmd_offset(pud, addr);
+ pmd_populate_kernel(&init_mm, pmd,
+ lm_alias(kasan_zero_pte));
+ continue;
+ }
+
+ if (p4d_none(*p4d)) {
+ pud_t *p;
+
+ if (slab_is_available()) {
+ p = pud_alloc(&init_mm, p4d, addr);
+ if (!p)
+ return -ENOMEM;
+ } else {
+ p4d_populate(&init_mm, p4d,
+ early_alloc(PAGE_SIZE, NUMA_NO_NODE));
+ }
+ }
+ zero_pud_populate(p4d, addr, next);
+ } while (p4d++, addr = next, addr != end);
+
+ return 0;
+}
+
+/**
+ * kasan_populate_zero_shadow - populate shadow memory region with
+ * kasan_zero_page
+ * @shadow_start - start of the memory range to populate
+ * @shadow_end - end of the memory range to populate
+ */
+int __ref kasan_populate_zero_shadow(const void *shadow_start,
+ const void *shadow_end)
+{
+ unsigned long addr = (unsigned long)shadow_start;
+ unsigned long end = (unsigned long)shadow_end;
+ pgd_t *pgd = pgd_offset_k(addr);
+ unsigned long next;
+
+ do {
+ next = pgd_addr_end(addr, end);
+
+ if (IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) {
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ /*
+ * kasan_zero_pud should be populated with pmds
+ * at this moment.
+ * [pud,pmd]_populate*() below needed only for
+ * 3,2 - level page tables where we don't have
+ * puds,pmds, so pgd_populate(), pud_populate()
+ * is noops.
+ *
+ * The ifndef is required to avoid build breakage.
+ *
+ * With 5level-fixup.h, pgd_populate() is not nop and
+ * we reference kasan_zero_p4d. It's not defined
+ * unless 5-level paging enabled.
+ *
+ * The ifndef can be dropped once all KASAN-enabled
+ * architectures will switch to pgtable-nop4d.h.
+ */
+#ifndef __ARCH_HAS_5LEVEL_HACK
+ pgd_populate(&init_mm, pgd, lm_alias(kasan_zero_p4d));
+#endif
+ p4d = p4d_offset(pgd, addr);
+ p4d_populate(&init_mm, p4d, lm_alias(kasan_zero_pud));
+ pud = pud_offset(p4d, addr);
+ pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd));
+ pmd = pmd_offset(pud, addr);
+ pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte));
+ continue;
+ }
+
+ if (pgd_none(*pgd)) {
+ p4d_t *p;
+
+ if (slab_is_available()) {
+ p = p4d_alloc(&init_mm, pgd, addr);
+ if (!p)
+ return -ENOMEM;
+ } else {
+ pgd_populate(&init_mm, pgd,
+ early_alloc(PAGE_SIZE, NUMA_NO_NODE));
+ }
+ }
+ zero_p4d_populate(pgd, addr, next);
+ } while (pgd++, addr = next, addr != end);
+
+ return 0;
+}
+
+static void kasan_free_pte(pte_t *pte_start, pmd_t *pmd)
+{
+ pte_t *pte;
+ int i;
+
+ for (i = 0; i < PTRS_PER_PTE; i++) {
+ pte = pte_start + i;
+ if (!pte_none(*pte))
+ return;
+ }
+
+ pte_free_kernel(&init_mm, (pte_t *)page_to_virt(pmd_page(*pmd)));
+ pmd_clear(pmd);
+}
+
+static void kasan_free_pmd(pmd_t *pmd_start, pud_t *pud)
+{
+ pmd_t *pmd;
+ int i;
+
+ for (i = 0; i < PTRS_PER_PMD; i++) {
+ pmd = pmd_start + i;
+ if (!pmd_none(*pmd))
+ return;
+ }
+
+ pmd_free(&init_mm, (pmd_t *)page_to_virt(pud_page(*pud)));
+ pud_clear(pud);
+}
+
+static void kasan_free_pud(pud_t *pud_start, p4d_t *p4d)
+{
+ pud_t *pud;
+ int i;
+
+ for (i = 0; i < PTRS_PER_PUD; i++) {
+ pud = pud_start + i;
+ if (!pud_none(*pud))
+ return;
+ }
+
+ pud_free(&init_mm, (pud_t *)page_to_virt(p4d_page(*p4d)));
+ p4d_clear(p4d);
+}
+
+static void kasan_free_p4d(p4d_t *p4d_start, pgd_t *pgd)
+{
+ p4d_t *p4d;
+ int i;
+
+ for (i = 0; i < PTRS_PER_P4D; i++) {
+ p4d = p4d_start + i;
+ if (!p4d_none(*p4d))
+ return;
+ }
+
+ p4d_free(&init_mm, (p4d_t *)page_to_virt(pgd_page(*pgd)));
+ pgd_clear(pgd);
+}
+
+static void kasan_remove_pte_table(pte_t *pte, unsigned long addr,
+ unsigned long end)
+{
+ unsigned long next;
+
+ for (; addr < end; addr = next, pte++) {
+ next = (addr + PAGE_SIZE) & PAGE_MASK;
+ if (next > end)
+ next = end;
+
+ if (!pte_present(*pte))
+ continue;
+
+ if (WARN_ON(!kasan_zero_page_entry(*pte)))
+ continue;
+ pte_clear(&init_mm, addr, pte);
+ }
+}
+
+static void kasan_remove_pmd_table(pmd_t *pmd, unsigned long addr,
+ unsigned long end)
+{
+ unsigned long next;
+
+ for (; addr < end; addr = next, pmd++) {
+ pte_t *pte;
+
+ next = pmd_addr_end(addr, end);
+
+ if (!pmd_present(*pmd))
+ continue;
+
+ if (kasan_pte_table(*pmd)) {
+ if (IS_ALIGNED(addr, PMD_SIZE) &&
+ IS_ALIGNED(next, PMD_SIZE))
+ pmd_clear(pmd);
+ continue;
+ }
+ pte = pte_offset_kernel(pmd, addr);
+ kasan_remove_pte_table(pte, addr, next);
+ kasan_free_pte(pte_offset_kernel(pmd, 0), pmd);
+ }
+}
+
+static void kasan_remove_pud_table(pud_t *pud, unsigned long addr,
+ unsigned long end)
+{
+ unsigned long next;
+
+ for (; addr < end; addr = next, pud++) {
+ pmd_t *pmd, *pmd_base;
+
+ next = pud_addr_end(addr, end);
+
+ if (!pud_present(*pud))
+ continue;
+
+ if (kasan_pmd_table(*pud)) {
+ if (IS_ALIGNED(addr, PUD_SIZE) &&
+ IS_ALIGNED(next, PUD_SIZE))
+ pud_clear(pud);
+ continue;
+ }
+ pmd = pmd_offset(pud, addr);
+ pmd_base = pmd_offset(pud, 0);
+ kasan_remove_pmd_table(pmd, addr, next);
+ kasan_free_pmd(pmd_base, pud);
+ }
+}
+
+static void kasan_remove_p4d_table(p4d_t *p4d, unsigned long addr,
+ unsigned long end)
+{
+ unsigned long next;
+
+ for (; addr < end; addr = next, p4d++) {
+ pud_t *pud;
+
+ next = p4d_addr_end(addr, end);
+
+ if (!p4d_present(*p4d))
+ continue;
+
+ if (kasan_pud_table(*p4d)) {
+ if (IS_ALIGNED(addr, P4D_SIZE) &&
+ IS_ALIGNED(next, P4D_SIZE))
+ p4d_clear(p4d);
+ continue;
+ }
+ pud = pud_offset(p4d, addr);
+ kasan_remove_pud_table(pud, addr, next);
+ kasan_free_pud(pud_offset(p4d, 0), p4d);
+ }
+}
+
+void kasan_remove_zero_shadow(void *start, unsigned long size)
+{
+ unsigned long addr, end, next;
+ pgd_t *pgd;
+
+ addr = (unsigned long)kasan_mem_to_shadow(start);
+ end = addr + (size >> KASAN_SHADOW_SCALE_SHIFT);
+
+ if (WARN_ON((unsigned long)start %
+ (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) ||
+ WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)))
+ return;
+
+ for (; addr < end; addr = next) {
+ p4d_t *p4d;
+
+ next = pgd_addr_end(addr, end);
+
+ pgd = pgd_offset_k(addr);
+ if (!pgd_present(*pgd))
+ continue;
+
+ if (kasan_p4d_table(*pgd)) {
+ if (IS_ALIGNED(addr, PGDIR_SIZE) &&
+ IS_ALIGNED(next, PGDIR_SIZE))
+ pgd_clear(pgd);
+ continue;
+ }
+
+ p4d = p4d_offset(pgd, addr);
+ kasan_remove_p4d_table(p4d, addr, next);
+ kasan_free_p4d(p4d_offset(pgd, 0), pgd);
+ }
+}
+
+int kasan_add_zero_shadow(void *start, unsigned long size)
+{
+ int ret;
+ void *shadow_start, *shadow_end;
+
+ shadow_start = kasan_mem_to_shadow(start);
+ shadow_end = shadow_start + (size >> KASAN_SHADOW_SCALE_SHIFT);
+
+ if (WARN_ON((unsigned long)start %
+ (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) ||
+ WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)))
+ return -EINVAL;
+
+ ret = kasan_populate_zero_shadow(shadow_start, shadow_end);
+ if (ret)
+ kasan_remove_zero_shadow(shadow_start,
+ size >> KASAN_SHADOW_SCALE_SHIFT);
+ return ret;
+}
+++ /dev/null
-/*
- * This file contains core KASAN code.
- *
- * Copyright (c) 2014 Samsung Electronics Co., Ltd.
- * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
- *
- * Some code borrowed from https://github.com/xairy/kasan-prototype by
- * Andrey Konovalov <andreyknvl@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#define DISABLE_BRANCH_PROFILING
-
-#include <linux/export.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/kasan.h>
-#include <linux/kernel.h>
-#include <linux/kmemleak.h>
-#include <linux/linkage.h>
-#include <linux/memblock.h>
-#include <linux/memory.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/printk.h>
-#include <linux/sched.h>
-#include <linux/sched/task_stack.h>
-#include <linux/slab.h>
-#include <linux/stacktrace.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/vmalloc.h>
-#include <linux/bug.h>
-
-#include "kasan.h"
-#include "../slab.h"
-
-/*
- * All functions below always inlined so compiler could
- * perform better optimizations in each of __asan_loadX/__assn_storeX
- * depending on memory access size X.
- */
-
-static __always_inline bool memory_is_poisoned_1(unsigned long addr)
-{
- s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
-
- if (unlikely(shadow_value)) {
- s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
- return unlikely(last_accessible_byte >= shadow_value);
- }
-
- return false;
-}
-
-static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
- unsigned long size)
-{
- u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr);
-
- /*
- * Access crosses 8(shadow size)-byte boundary. Such access maps
- * into 2 shadow bytes, so we need to check them both.
- */
- if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1))
- return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
-
- return memory_is_poisoned_1(addr + size - 1);
-}
-
-static __always_inline bool memory_is_poisoned_16(unsigned long addr)
-{
- u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
-
- /* Unaligned 16-bytes access maps into 3 shadow bytes. */
- if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
- return *shadow_addr || memory_is_poisoned_1(addr + 15);
-
- return *shadow_addr;
-}
-
-static __always_inline unsigned long bytes_is_nonzero(const u8 *start,
- size_t size)
-{
- while (size) {
- if (unlikely(*start))
- return (unsigned long)start;
- start++;
- size--;
- }
-
- return 0;
-}
-
-static __always_inline unsigned long memory_is_nonzero(const void *start,
- const void *end)
-{
- unsigned int words;
- unsigned long ret;
- unsigned int prefix = (unsigned long)start % 8;
-
- if (end - start <= 16)
- return bytes_is_nonzero(start, end - start);
-
- if (prefix) {
- prefix = 8 - prefix;
- ret = bytes_is_nonzero(start, prefix);
- if (unlikely(ret))
- return ret;
- start += prefix;
- }
-
- words = (end - start) / 8;
- while (words) {
- if (unlikely(*(u64 *)start))
- return bytes_is_nonzero(start, 8);
- start += 8;
- words--;
- }
-
- return bytes_is_nonzero(start, (end - start) % 8);
-}
-
-static __always_inline bool memory_is_poisoned_n(unsigned long addr,
- size_t size)
-{
- unsigned long ret;
-
- ret = memory_is_nonzero(kasan_mem_to_shadow((void *)addr),
- kasan_mem_to_shadow((void *)addr + size - 1) + 1);
-
- if (unlikely(ret)) {
- unsigned long last_byte = addr + size - 1;
- s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
-
- if (unlikely(ret != (unsigned long)last_shadow ||
- ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
- return true;
- }
- return false;
-}
-
-static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
-{
- if (__builtin_constant_p(size)) {
- switch (size) {
- case 1:
- return memory_is_poisoned_1(addr);
- case 2:
- case 4:
- case 8:
- return memory_is_poisoned_2_4_8(addr, size);
- case 16:
- return memory_is_poisoned_16(addr);
- default:
- BUILD_BUG();
- }
- }
-
- return memory_is_poisoned_n(addr, size);
-}
-
-static __always_inline void check_memory_region_inline(unsigned long addr,
- size_t size, bool write,
- unsigned long ret_ip)
-{
- if (unlikely(size == 0))
- return;
-
- if (unlikely((void *)addr <
- kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
- kasan_report(addr, size, write, ret_ip);
- return;
- }
-
- if (likely(!memory_is_poisoned(addr, size)))
- return;
-
- kasan_report(addr, size, write, ret_ip);
-}
-
-void check_memory_region(unsigned long addr, size_t size, bool write,
- unsigned long ret_ip)
-{
- check_memory_region_inline(addr, size, write, ret_ip);
-}
-
-void kasan_cache_shrink(struct kmem_cache *cache)
-{
- quarantine_remove_cache(cache);
-}
-
-void kasan_cache_shutdown(struct kmem_cache *cache)
-{
- if (!__kmem_cache_empty(cache))
- quarantine_remove_cache(cache);
-}
-
-static void register_global(struct kasan_global *global)
-{
- size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
-
- kasan_unpoison_shadow(global->beg, global->size);
-
- kasan_poison_shadow(global->beg + aligned_size,
- global->size_with_redzone - aligned_size,
- KASAN_GLOBAL_REDZONE);
-}
-
-void __asan_register_globals(struct kasan_global *globals, size_t size)
-{
- int i;
-
- for (i = 0; i < size; i++)
- register_global(&globals[i]);
-}
-EXPORT_SYMBOL(__asan_register_globals);
-
-void __asan_unregister_globals(struct kasan_global *globals, size_t size)
-{
-}
-EXPORT_SYMBOL(__asan_unregister_globals);
-
-#define DEFINE_ASAN_LOAD_STORE(size) \
- void __asan_load##size(unsigned long addr) \
- { \
- check_memory_region_inline(addr, size, false, _RET_IP_);\
- } \
- EXPORT_SYMBOL(__asan_load##size); \
- __alias(__asan_load##size) \
- void __asan_load##size##_noabort(unsigned long); \
- EXPORT_SYMBOL(__asan_load##size##_noabort); \
- void __asan_store##size(unsigned long addr) \
- { \
- check_memory_region_inline(addr, size, true, _RET_IP_); \
- } \
- EXPORT_SYMBOL(__asan_store##size); \
- __alias(__asan_store##size) \
- void __asan_store##size##_noabort(unsigned long); \
- EXPORT_SYMBOL(__asan_store##size##_noabort)
-
-DEFINE_ASAN_LOAD_STORE(1);
-DEFINE_ASAN_LOAD_STORE(2);
-DEFINE_ASAN_LOAD_STORE(4);
-DEFINE_ASAN_LOAD_STORE(8);
-DEFINE_ASAN_LOAD_STORE(16);
-
-void __asan_loadN(unsigned long addr, size_t size)
-{
- check_memory_region(addr, size, false, _RET_IP_);
-}
-EXPORT_SYMBOL(__asan_loadN);
-
-__alias(__asan_loadN)
-void __asan_loadN_noabort(unsigned long, size_t);
-EXPORT_SYMBOL(__asan_loadN_noabort);
-
-void __asan_storeN(unsigned long addr, size_t size)
-{
- check_memory_region(addr, size, true, _RET_IP_);
-}
-EXPORT_SYMBOL(__asan_storeN);
-
-__alias(__asan_storeN)
-void __asan_storeN_noabort(unsigned long, size_t);
-EXPORT_SYMBOL(__asan_storeN_noabort);
-
-/* to shut up compiler complaints */
-void __asan_handle_no_return(void) {}
-EXPORT_SYMBOL(__asan_handle_no_return);
-
-/* Emitted by compiler to poison large objects when they go out of scope. */
-void __asan_poison_stack_memory(const void *addr, size_t size)
-{
- /*
- * Addr is KASAN_SHADOW_SCALE_SIZE-aligned and the object is surrounded
- * by redzones, so we simply round up size to simplify logic.
- */
- kasan_poison_shadow(addr, round_up(size, KASAN_SHADOW_SCALE_SIZE),
- KASAN_USE_AFTER_SCOPE);
-}
-EXPORT_SYMBOL(__asan_poison_stack_memory);
-
-/* Emitted by compiler to unpoison large objects when they go into scope. */
-void __asan_unpoison_stack_memory(const void *addr, size_t size)
-{
- kasan_unpoison_shadow(addr, size);
-}
-EXPORT_SYMBOL(__asan_unpoison_stack_memory);
-
-/* Emitted by compiler to poison alloca()ed objects. */
-void __asan_alloca_poison(unsigned long addr, size_t size)
-{
- size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
- size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
- rounded_up_size;
- size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE);
-
- const void *left_redzone = (const void *)(addr -
- KASAN_ALLOCA_REDZONE_SIZE);
- const void *right_redzone = (const void *)(addr + rounded_up_size);
-
- WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE));
-
- kasan_unpoison_shadow((const void *)(addr + rounded_down_size),
- size - rounded_down_size);
- kasan_poison_shadow(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
- KASAN_ALLOCA_LEFT);
- kasan_poison_shadow(right_redzone,
- padding_size + KASAN_ALLOCA_REDZONE_SIZE,
- KASAN_ALLOCA_RIGHT);
-}
-EXPORT_SYMBOL(__asan_alloca_poison);
-
-/* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */
-void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom)
-{
- if (unlikely(!stack_top || stack_top > stack_bottom))
- return;
-
- kasan_unpoison_shadow(stack_top, stack_bottom - stack_top);
-}
-EXPORT_SYMBOL(__asan_allocas_unpoison);
-
-/* Emitted by the compiler to [un]poison local variables. */
-#define DEFINE_ASAN_SET_SHADOW(byte) \
- void __asan_set_shadow_##byte(const void *addr, size_t size) \
- { \
- __memset((void *)addr, 0x##byte, size); \
- } \
- EXPORT_SYMBOL(__asan_set_shadow_##byte)
-
-DEFINE_ASAN_SET_SHADOW(00);
-DEFINE_ASAN_SET_SHADOW(f1);
-DEFINE_ASAN_SET_SHADOW(f2);
-DEFINE_ASAN_SET_SHADOW(f3);
-DEFINE_ASAN_SET_SHADOW(f5);
-DEFINE_ASAN_SET_SHADOW(f8);
+++ /dev/null
-/*
- * This file contains some kasan initialization code.
- *
- * Copyright (c) 2015 Samsung Electronics Co., Ltd.
- * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/memblock.h>
-#include <linux/init.h>
-#include <linux/kasan.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/pfn.h>
-#include <linux/slab.h>
-
-#include <asm/page.h>
-#include <asm/pgalloc.h>
-
-#include "kasan.h"
-
-/*
- * This page serves two purposes:
- * - It used as early shadow memory. The entire shadow region populated
- * with this page, before we will be able to setup normal shadow memory.
- * - Latter it reused it as zero shadow to cover large ranges of memory
- * that allowed to access, but not handled by kasan (vmalloc/vmemmap ...).
- */
-unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
-
-#if CONFIG_PGTABLE_LEVELS > 4
-p4d_t kasan_zero_p4d[MAX_PTRS_PER_P4D] __page_aligned_bss;
-static inline bool kasan_p4d_table(pgd_t pgd)
-{
- return pgd_page(pgd) == virt_to_page(lm_alias(kasan_zero_p4d));
-}
-#else
-static inline bool kasan_p4d_table(pgd_t pgd)
-{
- return 0;
-}
-#endif
-#if CONFIG_PGTABLE_LEVELS > 3
-pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss;
-static inline bool kasan_pud_table(p4d_t p4d)
-{
- return p4d_page(p4d) == virt_to_page(lm_alias(kasan_zero_pud));
-}
-#else
-static inline bool kasan_pud_table(p4d_t p4d)
-{
- return 0;
-}
-#endif
-#if CONFIG_PGTABLE_LEVELS > 2
-pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss;
-static inline bool kasan_pmd_table(pud_t pud)
-{
- return pud_page(pud) == virt_to_page(lm_alias(kasan_zero_pmd));
-}
-#else
-static inline bool kasan_pmd_table(pud_t pud)
-{
- return 0;
-}
-#endif
-pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss;
-
-static inline bool kasan_pte_table(pmd_t pmd)
-{
- return pmd_page(pmd) == virt_to_page(lm_alias(kasan_zero_pte));
-}
-
-static inline bool kasan_zero_page_entry(pte_t pte)
-{
- return pte_page(pte) == virt_to_page(lm_alias(kasan_zero_page));
-}
-
-static __init void *early_alloc(size_t size, int node)
-{
- return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
- MEMBLOCK_ALLOC_ACCESSIBLE, node);
-}
-
-static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr,
- unsigned long end)
-{
- pte_t *pte = pte_offset_kernel(pmd, addr);
- pte_t zero_pte;
-
- zero_pte = pfn_pte(PFN_DOWN(__pa_symbol(kasan_zero_page)), PAGE_KERNEL);
- zero_pte = pte_wrprotect(zero_pte);
-
- while (addr + PAGE_SIZE <= end) {
- set_pte_at(&init_mm, addr, pte, zero_pte);
- addr += PAGE_SIZE;
- pte = pte_offset_kernel(pmd, addr);
- }
-}
-
-static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr,
- unsigned long end)
-{
- pmd_t *pmd = pmd_offset(pud, addr);
- unsigned long next;
-
- do {
- next = pmd_addr_end(addr, end);
-
- if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
- pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte));
- continue;
- }
-
- if (pmd_none(*pmd)) {
- pte_t *p;
-
- if (slab_is_available())
- p = pte_alloc_one_kernel(&init_mm, addr);
- else
- p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
- if (!p)
- return -ENOMEM;
-
- pmd_populate_kernel(&init_mm, pmd, p);
- }
- zero_pte_populate(pmd, addr, next);
- } while (pmd++, addr = next, addr != end);
-
- return 0;
-}
-
-static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr,
- unsigned long end)
-{
- pud_t *pud = pud_offset(p4d, addr);
- unsigned long next;
-
- do {
- next = pud_addr_end(addr, end);
- if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
- pmd_t *pmd;
-
- pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd));
- pmd = pmd_offset(pud, addr);
- pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte));
- continue;
- }
-
- if (pud_none(*pud)) {
- pmd_t *p;
-
- if (slab_is_available()) {
- p = pmd_alloc(&init_mm, pud, addr);
- if (!p)
- return -ENOMEM;
- } else {
- pud_populate(&init_mm, pud,
- early_alloc(PAGE_SIZE, NUMA_NO_NODE));
- }
- }
- zero_pmd_populate(pud, addr, next);
- } while (pud++, addr = next, addr != end);
-
- return 0;
-}
-
-static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
- unsigned long end)
-{
- p4d_t *p4d = p4d_offset(pgd, addr);
- unsigned long next;
-
- do {
- next = p4d_addr_end(addr, end);
- if (IS_ALIGNED(addr, P4D_SIZE) && end - addr >= P4D_SIZE) {
- pud_t *pud;
- pmd_t *pmd;
-
- p4d_populate(&init_mm, p4d, lm_alias(kasan_zero_pud));
- pud = pud_offset(p4d, addr);
- pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd));
- pmd = pmd_offset(pud, addr);
- pmd_populate_kernel(&init_mm, pmd,
- lm_alias(kasan_zero_pte));
- continue;
- }
-
- if (p4d_none(*p4d)) {
- pud_t *p;
-
- if (slab_is_available()) {
- p = pud_alloc(&init_mm, p4d, addr);
- if (!p)
- return -ENOMEM;
- } else {
- p4d_populate(&init_mm, p4d,
- early_alloc(PAGE_SIZE, NUMA_NO_NODE));
- }
- }
- zero_pud_populate(p4d, addr, next);
- } while (p4d++, addr = next, addr != end);
-
- return 0;
-}
-
-/**
- * kasan_populate_zero_shadow - populate shadow memory region with
- * kasan_zero_page
- * @shadow_start - start of the memory range to populate
- * @shadow_end - end of the memory range to populate
- */
-int __ref kasan_populate_zero_shadow(const void *shadow_start,
- const void *shadow_end)
-{
- unsigned long addr = (unsigned long)shadow_start;
- unsigned long end = (unsigned long)shadow_end;
- pgd_t *pgd = pgd_offset_k(addr);
- unsigned long next;
-
- do {
- next = pgd_addr_end(addr, end);
-
- if (IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) {
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
-
- /*
- * kasan_zero_pud should be populated with pmds
- * at this moment.
- * [pud,pmd]_populate*() below needed only for
- * 3,2 - level page tables where we don't have
- * puds,pmds, so pgd_populate(), pud_populate()
- * is noops.
- *
- * The ifndef is required to avoid build breakage.
- *
- * With 5level-fixup.h, pgd_populate() is not nop and
- * we reference kasan_zero_p4d. It's not defined
- * unless 5-level paging enabled.
- *
- * The ifndef can be dropped once all KASAN-enabled
- * architectures will switch to pgtable-nop4d.h.
- */
-#ifndef __ARCH_HAS_5LEVEL_HACK
- pgd_populate(&init_mm, pgd, lm_alias(kasan_zero_p4d));
-#endif
- p4d = p4d_offset(pgd, addr);
- p4d_populate(&init_mm, p4d, lm_alias(kasan_zero_pud));
- pud = pud_offset(p4d, addr);
- pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd));
- pmd = pmd_offset(pud, addr);
- pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte));
- continue;
- }
-
- if (pgd_none(*pgd)) {
- p4d_t *p;
-
- if (slab_is_available()) {
- p = p4d_alloc(&init_mm, pgd, addr);
- if (!p)
- return -ENOMEM;
- } else {
- pgd_populate(&init_mm, pgd,
- early_alloc(PAGE_SIZE, NUMA_NO_NODE));
- }
- }
- zero_p4d_populate(pgd, addr, next);
- } while (pgd++, addr = next, addr != end);
-
- return 0;
-}
-
-static void kasan_free_pte(pte_t *pte_start, pmd_t *pmd)
-{
- pte_t *pte;
- int i;
-
- for (i = 0; i < PTRS_PER_PTE; i++) {
- pte = pte_start + i;
- if (!pte_none(*pte))
- return;
- }
-
- pte_free_kernel(&init_mm, (pte_t *)page_to_virt(pmd_page(*pmd)));
- pmd_clear(pmd);
-}
-
-static void kasan_free_pmd(pmd_t *pmd_start, pud_t *pud)
-{
- pmd_t *pmd;
- int i;
-
- for (i = 0; i < PTRS_PER_PMD; i++) {
- pmd = pmd_start + i;
- if (!pmd_none(*pmd))
- return;
- }
-
- pmd_free(&init_mm, (pmd_t *)page_to_virt(pud_page(*pud)));
- pud_clear(pud);
-}
-
-static void kasan_free_pud(pud_t *pud_start, p4d_t *p4d)
-{
- pud_t *pud;
- int i;
-
- for (i = 0; i < PTRS_PER_PUD; i++) {
- pud = pud_start + i;
- if (!pud_none(*pud))
- return;
- }
-
- pud_free(&init_mm, (pud_t *)page_to_virt(p4d_page(*p4d)));
- p4d_clear(p4d);
-}
-
-static void kasan_free_p4d(p4d_t *p4d_start, pgd_t *pgd)
-{
- p4d_t *p4d;
- int i;
-
- for (i = 0; i < PTRS_PER_P4D; i++) {
- p4d = p4d_start + i;
- if (!p4d_none(*p4d))
- return;
- }
-
- p4d_free(&init_mm, (p4d_t *)page_to_virt(pgd_page(*pgd)));
- pgd_clear(pgd);
-}
-
-static void kasan_remove_pte_table(pte_t *pte, unsigned long addr,
- unsigned long end)
-{
- unsigned long next;
-
- for (; addr < end; addr = next, pte++) {
- next = (addr + PAGE_SIZE) & PAGE_MASK;
- if (next > end)
- next = end;
-
- if (!pte_present(*pte))
- continue;
-
- if (WARN_ON(!kasan_zero_page_entry(*pte)))
- continue;
- pte_clear(&init_mm, addr, pte);
- }
-}
-
-static void kasan_remove_pmd_table(pmd_t *pmd, unsigned long addr,
- unsigned long end)
-{
- unsigned long next;
-
- for (; addr < end; addr = next, pmd++) {
- pte_t *pte;
-
- next = pmd_addr_end(addr, end);
-
- if (!pmd_present(*pmd))
- continue;
-
- if (kasan_pte_table(*pmd)) {
- if (IS_ALIGNED(addr, PMD_SIZE) &&
- IS_ALIGNED(next, PMD_SIZE))
- pmd_clear(pmd);
- continue;
- }
- pte = pte_offset_kernel(pmd, addr);
- kasan_remove_pte_table(pte, addr, next);
- kasan_free_pte(pte_offset_kernel(pmd, 0), pmd);
- }
-}
-
-static void kasan_remove_pud_table(pud_t *pud, unsigned long addr,
- unsigned long end)
-{
- unsigned long next;
-
- for (; addr < end; addr = next, pud++) {
- pmd_t *pmd, *pmd_base;
-
- next = pud_addr_end(addr, end);
-
- if (!pud_present(*pud))
- continue;
-
- if (kasan_pmd_table(*pud)) {
- if (IS_ALIGNED(addr, PUD_SIZE) &&
- IS_ALIGNED(next, PUD_SIZE))
- pud_clear(pud);
- continue;
- }
- pmd = pmd_offset(pud, addr);
- pmd_base = pmd_offset(pud, 0);
- kasan_remove_pmd_table(pmd, addr, next);
- kasan_free_pmd(pmd_base, pud);
- }
-}
-
-static void kasan_remove_p4d_table(p4d_t *p4d, unsigned long addr,
- unsigned long end)
-{
- unsigned long next;
-
- for (; addr < end; addr = next, p4d++) {
- pud_t *pud;
-
- next = p4d_addr_end(addr, end);
-
- if (!p4d_present(*p4d))
- continue;
-
- if (kasan_pud_table(*p4d)) {
- if (IS_ALIGNED(addr, P4D_SIZE) &&
- IS_ALIGNED(next, P4D_SIZE))
- p4d_clear(p4d);
- continue;
- }
- pud = pud_offset(p4d, addr);
- kasan_remove_pud_table(pud, addr, next);
- kasan_free_pud(pud_offset(p4d, 0), p4d);
- }
-}
-
-void kasan_remove_zero_shadow(void *start, unsigned long size)
-{
- unsigned long addr, end, next;
- pgd_t *pgd;
-
- addr = (unsigned long)kasan_mem_to_shadow(start);
- end = addr + (size >> KASAN_SHADOW_SCALE_SHIFT);
-
- if (WARN_ON((unsigned long)start %
- (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) ||
- WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)))
- return;
-
- for (; addr < end; addr = next) {
- p4d_t *p4d;
-
- next = pgd_addr_end(addr, end);
-
- pgd = pgd_offset_k(addr);
- if (!pgd_present(*pgd))
- continue;
-
- if (kasan_p4d_table(*pgd)) {
- if (IS_ALIGNED(addr, PGDIR_SIZE) &&
- IS_ALIGNED(next, PGDIR_SIZE))
- pgd_clear(pgd);
- continue;
- }
-
- p4d = p4d_offset(pgd, addr);
- kasan_remove_p4d_table(p4d, addr, next);
- kasan_free_p4d(p4d_offset(pgd, 0), pgd);
- }
-}
-
-int kasan_add_zero_shadow(void *start, unsigned long size)
-{
- int ret;
- void *shadow_start, *shadow_end;
-
- shadow_start = kasan_mem_to_shadow(start);
- shadow_end = shadow_start + (size >> KASAN_SHADOW_SCALE_SHIFT);
-
- if (WARN_ON((unsigned long)start %
- (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) ||
- WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)))
- return -EINVAL;
-
- ret = kasan_populate_zero_shadow(shadow_start, shadow_end);
- if (ret)
- kasan_remove_zero_shadow(shadow_start,
- size >> KASAN_SHADOW_SCALE_SHIFT);
- return ret;
-}