/*
- * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <asm_macros_common.S>
#include <spinlock.h>
+/*
+ * TLBI instruction with type specifier that implements the workaround for
+ * errata 813419 of Cortex-A57.
+ */
+#if ERRATA_A57_813419
+#define TLB_INVALIDATE(_reg, _coproc) \
+ stcopr _reg, _coproc; \
+ dsb ish; \
+ stcopr _reg, _coproc
+#else
+#define TLB_INVALIDATE(_reg, _coproc) \
+ stcopr _reg, _coproc
+#endif
+
#define WORD_SIZE 4
/*
#include <asm_macros_common.S>
#include <spinlock.h>
+/*
+ * TLBI instruction with type specifier that implements the workaround for
+ * errata 813419 of Cortex-A57.
+ */
+#if ERRATA_A57_813419
+#define TLB_INVALIDATE(_type) \
+ tlbi _type; \
+ dsb ish; \
+ tlbi _type
+#else
+#define TLB_INVALIDATE(_type) \
+ tlbi _type
+#endif
+
.macro func_prologue
stp x29, x30, [sp, #-0x10]!
/*
* TTBR definitions
*/
-#define TTBR_CNP_BIT 0x1
+#define TTBR_CNP_BIT U(0x1)
/*
* CTR definitions
* expected.
*/
#define ARM_ARCH_AT_LEAST(_maj, _min) \
- ((ARM_ARCH_MAJOR > _maj) || \
- ((ARM_ARCH_MAJOR == _maj) && (ARM_ARCH_MINOR >= _min)))
+ ((ARM_ARCH_MAJOR > (_maj)) || \
+ ((ARM_ARCH_MAJOR == (_maj)) && (ARM_ARCH_MINOR >= (_min))))
/*
* Import an assembly or linker symbol as a C expression with the specified
#ifdef AARCH32
/* AArch32 specific translation table API */
void enable_mmu_secure(unsigned int flags);
+
+void enable_mmu_direct(unsigned int flags);
#else
/* AArch64 specific translation table APIs */
void enable_mmu_el1(unsigned int flags);
void enable_mmu_el3(unsigned int flags);
+
+void enable_mmu_direct_el1(unsigned int flags);
+void enable_mmu_direct_el3(unsigned int flags);
#endif /* AARCH32 */
int xlat_arch_is_granule_size_supported(size_t size);
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#define __XLAT_TABLES_V2_H__
#include <xlat_tables_defs.h>
+#include <xlat_tables_v2_helpers.h>
#ifndef __ASSEMBLY__
#include <stddef.h>
#include <stdint.h>
#include <xlat_mmu_helpers.h>
-#include <xlat_tables_v2_helpers.h>
/*
* Default granularity size for an mmap_region_t.
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#error "Do not include this header file directly. Include xlat_tables_v2.h instead."
#endif
+/* Offsets into mmu_cfg_params array. All parameters are 32 bits wide. */
+#define MMU_CFG_MAIR0 0
+#define MMU_CFG_TCR 1
+#define MMU_CFG_TTBR0_LO 2
+#define MMU_CFG_TTBR0_HI 3
+#define MMU_CFG_PARAM_MAX 4
+
#ifndef __ASSEMBLY__
#include <cassert.h>
#include <xlat_tables_arch.h>
#include <xlat_tables_defs.h>
+/* Parameters of register values required when enabling MMU */
+extern uint32_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
+
/* Forward declaration */
struct mmap_region;
.initialized = 0, \
}
+#endif /*__ASSEMBLY__*/
+
#if AARCH64
/*
#endif /* AARCH64 */
-#endif /*__ASSEMBLY__*/
-
#endif /* __XLAT_TABLES_V2_HELPERS_H__ */
--- /dev/null
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <xlat_tables_v2.h>
+
+ .global enable_mmu_direct
+
+func enable_mmu_direct
+ /* Assert that MMU is turned off */
+#if ENABLE_ASSERTIONS
+ ldcopr r1, SCTLR
+ tst r1, #SCTLR_M_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ /* Invalidate TLB entries */
+ TLB_INVALIDATE(r0, TLBIALL)
+
+ mov r3, r0
+ ldr r0, =mmu_cfg_params
+
+ /* MAIR0 */
+ ldr r1, [r0, #(MMU_CFG_MAIR0 << 2)]
+ stcopr r1, MAIR0
+
+ /* TTBCR */
+ ldr r2, [r0, #(MMU_CFG_TCR << 2)]
+ stcopr r2, TTBCR
+
+ /* TTBR0 */
+ ldr r1, [r0, #(MMU_CFG_TTBR0_LO << 2)]
+ ldr r2, [r0, #(MMU_CFG_TTBR0_HI << 2)]
+ stcopr16 r1, r2, TTBR0_64
+
+ /* TTBR1 is unused right now; set it to 0. */
+ mov r1, #0
+ mov r2, #0
+ stcopr16 r1, r2, TTBR1_64
+
+ /*
+ * Ensure all translation table writes have drained into memory, the TLB
+ * invalidation is complete, and translation register writes are
+ * committed before enabling the MMU
+ */
+ dsb ish
+ isb
+
+ /* Enable enable MMU by honoring flags */
+ ldcopr r1, SCTLR
+ ldr r2, =(SCTLR_WXN_BIT | SCTLR_C_BIT | SCTLR_M_BIT)
+ orr r1, r1, r2
+
+ /* Clear C bit if requested */
+ tst r3, #DISABLE_DCACHE
+ bicne r1, r1, #SCTLR_C_BIT
+
+ stcopr r1, SCTLR
+ isb
+
+ bx lr
+endfunc enable_mmu_direct
/*
- * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#error ARMv7 target does not support LPAE MMU descriptors
#endif
+uint32_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
+
/*
* Returns 1 if the provided granule size is supported, 0 otherwise.
*/
* Function for enabling the MMU in Secure PL1, assuming that the page tables
* have already been created.
******************************************************************************/
-void enable_mmu_arch(unsigned int flags,
- uint64_t *base_table,
+void setup_mmu_cfg(unsigned int flags,
+ const uint64_t *base_table,
unsigned long long max_pa,
uintptr_t max_va)
{
- u_register_t mair0, ttbcr, sctlr;
+ u_register_t mair0, ttbcr;
uint64_t ttbr0;
assert(IS_IN_SECURE());
- sctlr = read_sctlr();
- assert((sctlr & SCTLR_M_BIT) == 0);
-
- /* Invalidate TLBs at the current exception level */
- tlbiall();
-
/* Set attributes in the right indices of the MAIR */
mair0 = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
mair0 |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
ttbr0 |= TTBR_CNP_BIT;
#endif
- /* Now program the relevant system registers */
- write_mair0(mair0);
- write_ttbcr(ttbcr);
- write64_ttbr0(ttbr0);
- write64_ttbr1(0);
-
- /*
- * Ensure all translation table writes have drained
- * into memory, the TLB invalidation is complete,
- * and translation register writes are committed
- * before enabling the MMU
- */
- dsbish();
- isb();
-
- sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;
-
- if (flags & DISABLE_DCACHE)
- sctlr &= ~SCTLR_C_BIT;
- else
- sctlr |= SCTLR_C_BIT;
-
- write_sctlr(sctlr);
-
- /* Ensure the MMU enable takes effect immediately */
- isb();
+ /* Now populate MMU configuration */
+ mmu_cfg_params[MMU_CFG_MAIR0] = mair0;
+ mmu_cfg_params[MMU_CFG_TCR] = ttbcr;
+ mmu_cfg_params[MMU_CFG_TTBR0_LO] = (uint32_t) ttbr0;
+ mmu_cfg_params[MMU_CFG_TTBR0_HI] = ttbr0 >> 32;
}
--- /dev/null
+/*
+ * Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <asm_macros.S>
+#include <assert_macros.S>
+#include <xlat_tables_v2.h>
+
+ .global enable_mmu_direct_el1
+ .global enable_mmu_direct_el3
+
+ /* Macros to read and write to system register for a given EL. */
+ .macro _msr reg_name, el, gp_reg
+ msr \reg_name\()_el\()\el, \gp_reg
+ .endm
+
+ .macro _mrs gp_reg, reg_name, el
+ mrs \gp_reg, \reg_name\()_el\()\el
+ .endm
+
+ .macro define_mmu_enable_func el
+ func enable_mmu_direct_\()el\el
+#if ENABLE_ASSERTIONS
+ _mrs x1, sctlr, \el
+ tst x1, #SCTLR_M_BIT
+ ASM_ASSERT(eq)
+#endif
+
+ /* Invalidate TLB entries */
+ .if \el == 1
+ TLB_INVALIDATE(vmalle1)
+ .else
+ .if \el == 3
+ TLB_INVALIDATE(alle3)
+ .else
+ .error "EL must be 1 or 3"
+ .endif
+ .endif
+
+ mov x7, x0
+ ldr x0, =mmu_cfg_params
+
+ /* MAIR */
+ ldr w1, [x0, #(MMU_CFG_MAIR0 << 2)]
+ _msr mair, \el, x1
+
+ /* TCR */
+ ldr w2, [x0, #(MMU_CFG_TCR << 2)]
+ _msr tcr, \el, x2
+
+ /* TTBR */
+ ldr w3, [x0, #(MMU_CFG_TTBR0_LO << 2)]
+ ldr w4, [x0, #(MMU_CFG_TTBR0_HI << 2)]
+ orr x3, x3, x4, lsl #32
+ _msr ttbr0, \el, x3
+
+ /*
+ * Ensure all translation table writes have drained into memory, the TLB
+ * invalidation is complete, and translation register writes are
+ * committed before enabling the MMU
+ */
+ dsb ish
+ isb
+
+ /* Set and clear required fields of SCTLR */
+ _mrs x4, sctlr, \el
+ mov_imm x5, SCTLR_WXN_BIT | SCTLR_C_BIT | SCTLR_M_BIT
+ orr x4, x4, x5
+
+ /* Additionally, amend SCTLR fields based on flags */
+ bic x5, x4, #SCTLR_C_BIT
+ tst x7, #DISABLE_DCACHE
+ csel x4, x5, x4, ne
+
+ _msr sctlr, \el, x4
+ isb
+
+ ret
+ endfunc enable_mmu_direct_\()el\el
+ .endm
+
+ /*
+ * Define MMU-enabling functions for EL1 and EL3:
+ *
+ * enable_mmu_direct_el1
+ * enable_mmu_direct_el3
+ */
+ define_mmu_enable_func 1
+ define_mmu_enable_func 3
#include <xlat_tables_v2.h>
#include "../xlat_tables_private.h"
+uint32_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
+
/*
* Returns 1 if the provided granule size is supported, 0 otherwise.
*/
return el;
}
-/*******************************************************************************
- * Macro generating the code for the function enabling the MMU in the given
- * exception level, assuming that the pagetables have already been created.
- *
- * _el: Exception level at which the function will run
- * _tlbi_fct: Function to invalidate the TLBs at the current
- * exception level
- ******************************************************************************/
-#define DEFINE_ENABLE_MMU_EL(_el, _tlbi_fct) \
- static void enable_mmu_internal_el##_el(int flags, \
- uint64_t mair, \
- uint64_t tcr, \
- uint64_t ttbr) \
- { \
- uint32_t sctlr = read_sctlr_el##_el(); \
- assert((sctlr & SCTLR_M_BIT) == 0); \
- \
- /* Invalidate TLBs at the current exception level */ \
- _tlbi_fct(); \
- \
- write_mair_el##_el(mair); \
- write_tcr_el##_el(tcr); \
- \
- /* Set TTBR bits as well */ \
- if (ARM_ARCH_AT_LEAST(8, 2)) { \
- /* Enable CnP bit so as to share page tables */ \
- /* with all PEs. This is mandatory for */ \
- /* ARMv8.2 implementations. */ \
- ttbr |= TTBR_CNP_BIT; \
- } \
- write_ttbr0_el##_el(ttbr); \
- \
- /* Ensure all translation table writes have drained */ \
- /* into memory, the TLB invalidation is complete, */ \
- /* and translation register writes are committed */ \
- /* before enabling the MMU */ \
- dsbish(); \
- isb(); \
- \
- sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; \
- if (flags & DISABLE_DCACHE) \
- sctlr &= ~SCTLR_C_BIT; \
- else \
- sctlr |= SCTLR_C_BIT; \
- \
- write_sctlr_el##_el(sctlr); \
- \
- /* Ensure the MMU enable takes effect immediately */ \
- isb(); \
- }
-
-/* Define EL1 and EL3 variants of the function enabling the MMU */
-#if IMAGE_EL == 1
-DEFINE_ENABLE_MMU_EL(1, tlbivmalle1)
-#elif IMAGE_EL == 3
-DEFINE_ENABLE_MMU_EL(3, tlbialle3)
-#endif
-
-void enable_mmu_arch(unsigned int flags,
- uint64_t *base_table,
+void setup_mmu_cfg(unsigned int flags,
+ const uint64_t *base_table,
unsigned long long max_pa,
uintptr_t max_va)
{
uint64_t mair, ttbr, tcr;
+ uintptr_t virtual_addr_space_size;
/* Set attributes in the right indices of the MAIR. */
mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
ttbr = (uint64_t) base_table;
- /*
- * Set TCR bits as well.
- */
-
/*
* Limit the input address ranges and memory region sizes translated
* using TTBR0 to the given virtual address space size.
*/
- assert(max_va < UINTPTR_MAX);
- uintptr_t virtual_addr_space_size = max_va + 1;
+ assert(max_va < ((uint64_t) UINTPTR_MAX));
+
+ virtual_addr_space_size = max_va + 1;
assert(CHECK_VIRT_ADDR_SPACE_SIZE(virtual_addr_space_size));
+
/*
* __builtin_ctzll(0) is undefined but here we are guaranteed that
* virtual_addr_space_size is in the range [1,UINTPTR_MAX].
*/
- tcr = 64 - __builtin_ctzll(virtual_addr_space_size);
+ tcr = (uint64_t) 64 - __builtin_ctzll(virtual_addr_space_size);
/*
* Set the cacheability and shareability attributes for memory
* associated with translation table walks.
*/
- if (flags & XLAT_TABLE_NC) {
+ if ((flags & XLAT_TABLE_NC) != 0) {
/* Inner & outer non-cacheable non-shareable. */
tcr |= TCR_SH_NON_SHAREABLE |
TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC;
* translated using TTBR1_EL1.
*/
tcr |= TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT);
- enable_mmu_internal_el1(flags, mair, tcr, ttbr);
#elif IMAGE_EL == 3
assert(IS_IN_EL(3));
tcr |= TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT);
- enable_mmu_internal_el3(flags, mair, tcr, ttbr);
#endif
+
+ mmu_cfg_params[MMU_CFG_MAIR0] = (uint32_t) mair;
+ mmu_cfg_params[MMU_CFG_TCR] = (uint32_t) tcr;
+
+ /* Set TTBR bits as well */
+ if (ARM_ARCH_AT_LEAST(8, 2)) {
+ /*
+ * Enable CnP bit so as to share page tables with all PEs. This
+ * is mandatory for ARMv8.2 implementations.
+ */
+ ttbr |= TTBR_CNP_BIT;
+ }
+
+ mmu_cfg_params[MMU_CFG_TTBR0_LO] = (uint32_t) ttbr;
+ mmu_cfg_params[MMU_CFG_TTBR0_HI] = (uint32_t) (ttbr >> 32);
}
#
-# Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
XLAT_TABLES_LIB_SRCS := $(addprefix lib/xlat_tables_v2/, \
+ ${ARCH}/enable_mmu.S \
${ARCH}/xlat_tables_arch.c \
xlat_tables_internal.c)
* that there is free space.
*/
assert(mm_last->size == 0U);
-
+
/* Make room for new region by moving other regions up by one place */
mm_destination = mm_cursor + 1;
memmove(mm_destination, mm_cursor,
void enable_mmu_secure(unsigned int flags)
{
- enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+ setup_mmu_cfg(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
tf_xlat_ctx.va_max_address);
+ enable_mmu_direct(flags);
}
#else
void enable_mmu_el1(unsigned int flags)
{
- enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+ setup_mmu_cfg(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
tf_xlat_ctx.va_max_address);
+ enable_mmu_direct_el1(flags);
}
void enable_mmu_el3(unsigned int flags)
{
- enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
+ setup_mmu_cfg(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
tf_xlat_ctx.va_max_address);
+ enable_mmu_direct_el3(flags);
}
#endif /* AARCH32 */
unsigned long long xlat_arch_get_max_supported_pa(void);
/* Enable MMU and configure it to use the specified translation tables. */
-void enable_mmu_arch(unsigned int flags, uint64_t *base_table,
+void setup_mmu_cfg(unsigned int flags, const uint64_t *base_table,
unsigned long long max_pa, uintptr_t max_va);
/*