The region attributes specify the type of memory (for example device or cached
normal memory) as well as the memory access permissions (read-only or
-read-write, executable or not, secure or non-secure, and so on). See the
-``mmap_attr_t`` enumeration type in `xlat\_tables\_v2.h`_.
+read-write, executable or not, secure or non-secure, and so on). In the case of
+the EL1&0 translation regime, the attributes also specify whether the region is
+a User region (EL0) or Privileged region (EL1). See the ``mmap_attr_t``
+enumeration type in `xlat\_tables\_v2.h`_. Note that for the EL1&0 translation
+regime the Execute Never attribute is set simultaneously for both EL1 and EL0.
The granularity controls the translation table level to go down to when mapping
the region. For example, assuming the MMU has been configured to use a 4KB
* AP[1] bit is ignored by hardware and is
* treated as if it is One in EL2/EL3
*/
-#define AP_RO (U(0x1) << 5)
-#define AP_RW (U(0x0) << 5)
+#define AP2_SHIFT U(0x7)
+#define AP2_RO U(0x1)
+#define AP2_RW U(0x0)
+#define AP1_SHIFT U(0x6)
+#define AP1_ACCESS_UNPRIVILEGED U(0x1)
+#define AP1_NO_ACCESS_UNPRIVILEGED U(0x0)
+
+/*
+ * The following definitions must all be passed to the LOWER_ATTRS() macro to
+ * get the right bitmask.
+ */
+#define AP_RO (AP2_RO << 5)
+#define AP_RW (AP2_RW << 5)
+#define AP_ACCESS_UNPRIVILEGED (AP1_ACCESS_UNPRIVILEGED << 4)
+#define AP_NO_ACCESS_UNPRIVILEGED (AP1_NO_ACCESS_UNPRIVILEGED << 4)
#define NS (U(0x1) << 3)
#define ATTR_NON_CACHEABLE_INDEX U(0x2)
#define ATTR_DEVICE_INDEX U(0x1)
#define MT_SEC_SHIFT U(4)
/* Access permissions for instruction execution (EXECUTE/EXECUTE_NEVER) */
#define MT_EXECUTE_SHIFT U(5)
+/*
+ * In the EL1&0 translation regime, mark the region as User (EL0) or
+ * Privileged (EL1). In the EL3 translation regime this has no effect.
+ */
+#define MT_USER_SHIFT U(6)
/* All other bits are reserved */
/*
*/
MT_EXECUTE = U(0) << MT_EXECUTE_SHIFT,
MT_EXECUTE_NEVER = U(1) << MT_EXECUTE_SHIFT,
+
+ /*
+ * When mapping a region at EL0 or EL1, this attribute will be used to
+ * determine if a User mapping (EL0) will be created or a Privileged
+ * mapping (EL1).
+ */
+ MT_USER = U(1) << MT_USER_SHIFT,
+ MT_PRIVILEGED = U(0) << MT_USER_SHIFT,
} mmap_attr_t;
+/* Compound attributes for most common usages */
#define MT_CODE (MT_MEMORY | MT_RO | MT_EXECUTE)
#define MT_RO_DATA (MT_MEMORY | MT_RO | MT_EXECUTE_NEVER)
+#define MT_RW_DATA (MT_MEMORY | MT_RW | MT_EXECUTE_NEVER)
/*
* Structure for specifying a single region of memory.
*/
#define REGISTER_XLAT_CONTEXT(_ctx_name, _mmap_count, _xlat_tables_count, \
_virt_addr_space_size, _phy_addr_space_size) \
- _REGISTER_XLAT_CONTEXT(_ctx_name, _mmap_count, _xlat_tables_count, \
- _virt_addr_space_size, _phy_addr_space_size)
+ _REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, _mmap_count, \
+ _xlat_tables_count, \
+ _virt_addr_space_size, \
+ _phy_addr_space_size, \
+ IMAGE_XLAT_DEFAULT_REGIME)
+
+/*
+ * Same as REGISTER_XLAT_CONTEXT plus the additional parameter _xlat_regime to
+ * specify the translation regime managed by this xlat_ctx_t instance. The
+ * values are the one from xlat_regime_t enumeration.
+ */
+#define REGISTER_XLAT_CONTEXT2(_ctx_name, _mmap_count, _xlat_tables_count, \
+ _virt_addr_space_size, _phy_addr_space_size, \
+ _xlat_regime) \
+ _REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, _mmap_count, \
+ _xlat_tables_count, \
+ _virt_addr_space_size, \
+ _phy_addr_space_size, \
+ _xlat_regime)
/******************************************************************************
* Generic translation table APIs.
unsigned int initialized;
/*
- * Bit mask that has to be ORed to the rest of a translation table
- * descriptor in order to prohibit execution of code at the exception
- * level of this translation context.
+ * Translation regime managed by this xlat_ctx_t. It takes the values of
+ * the enumeration xlat_regime_t. The type is "int" to avoid a circular
+ * dependency on xlat_tables_v2.h, but this member must be treated as
+ * xlat_regime_t.
*/
- uint64_t execute_never_mask;
+ int xlat_regime;
};
#if PLAT_XLAT_TABLES_DYNAMIC
/* do nothing */
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
-
-#define _REGISTER_XLAT_CONTEXT(_ctx_name, _mmap_count, _xlat_tables_count, \
- _virt_addr_space_size, _phy_addr_space_size) \
+#define _REGISTER_XLAT_CONTEXT_FULL_SPEC(_ctx_name, _mmap_count, _xlat_tables_count, \
+ _virt_addr_space_size, _phy_addr_space_size, \
+ _xlat_regime) \
CASSERT(CHECK_VIRT_ADDR_SPACE_SIZE(_virt_addr_space_size), \
assert_invalid_virtual_addr_space_size_for_##_ctx_name); \
\
.tables = _ctx_name##_xlat_tables, \
.tables_num = _xlat_tables_count, \
_REGISTER_DYNMAP_STRUCT(_ctx_name) \
+ .xlat_regime = (_xlat_regime), \
.max_pa = 0, \
.max_va = 0, \
.next_table = 0, \
.initialized = 0, \
}
+
+/* This IMAGE_EL macro must not to be used outside the library */
+#if IMAGE_BL1 || IMAGE_BL31
+# define IMAGE_EL 3
+# define IMAGE_XLAT_DEFAULT_REGIME EL3_REGIME
+#else
+# define IMAGE_EL 1
+# define IMAGE_XLAT_DEFAULT_REGIME EL1_EL0_REGIME
+#endif
+
#endif /*__ASSEMBLY__*/
#endif /* __XLAT_TABLES_V2_HELPERS_H__ */
}
#endif /* ENABLE_ASSERTIONS*/
-int is_mmu_enabled(void)
+int is_mmu_enabled_ctx(const xlat_ctx_t *ctx __unused)
{
return (read_sctlr() & SCTLR_M_BIT) != 0;
}
return 3;
}
-uint64_t xlat_arch_get_xn_desc(int el __unused)
-{
- return UPPER_ATTRS(XN);
-}
-
/*******************************************************************************
* Function for enabling the MMU in Secure PL1, assuming that the page tables
* have already been created.
--- /dev/null
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __XLAT_TABLES_ARCH_PRIVATE_H__
+#define __XLAT_TABLES_ARCH_PRIVATE_H__
+
+#include <xlat_tables_defs.h>
+#include <xlat_tables_v2.h>
+
+/*
+ * Return the execute-never mask that will prevent instruction fetch at the
+ * given translation regime.
+ */
+static inline uint64_t xlat_arch_regime_get_xn_desc(xlat_regime_t regime __unused)
+{
+ return UPPER_ATTRS(XN);
+}
+
+#endif /* __XLAT_TABLES_ARCH_PRIVATE_H__ */
#include <xlat_tables_v2.h>
#include "../xlat_tables_private.h"
-#if defined(IMAGE_BL1) || defined(IMAGE_BL31)
-# define IMAGE_EL 3
-#else
-# define IMAGE_EL 1
-#endif
-
static unsigned long long calc_physical_addr_size_bits(
unsigned long long max_addr)
{
}
#endif /* ENABLE_ASSERTIONS*/
-int is_mmu_enabled(void)
+int is_mmu_enabled_ctx(const xlat_ctx_t *ctx)
{
-#if IMAGE_EL == 1
- assert(IS_IN_EL(1));
- return (read_sctlr_el1() & SCTLR_M_BIT) != 0;
-#elif IMAGE_EL == 3
- assert(IS_IN_EL(3));
- return (read_sctlr_el3() & SCTLR_M_BIT) != 0;
-#endif
+ if (ctx->xlat_regime == EL1_EL0_REGIME) {
+ assert(xlat_arch_current_el() >= 1);
+ return (read_sctlr_el1() & SCTLR_M_BIT) != 0;
+ } else {
+ assert(ctx->xlat_regime == EL3_REGIME);
+ assert(xlat_arch_current_el() >= 3);
+ return (read_sctlr_el3() & SCTLR_M_BIT) != 0;
+ }
}
+
void xlat_arch_tlbi_va(uintptr_t va)
{
#if IMAGE_EL == 1
return el;
}
-uint64_t xlat_arch_get_xn_desc(int el)
-{
- if (el == 3) {
- return UPPER_ATTRS(XN);
- } else {
- assert(el == 1);
- return UPPER_ATTRS(PXN);
- }
-}
-
/*******************************************************************************
* Macro generating the code for the function enabling the MMU in the given
* exception level, assuming that the pagetables have already been created.
--- /dev/null
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __XLAT_TABLES_ARCH_PRIVATE_H__
+#define __XLAT_TABLES_ARCH_PRIVATE_H__
+
+#include <assert.h>
+#include <xlat_tables_defs.h>
+#include <xlat_tables_v2.h>
+
+/*
+ * Return the execute-never mask that will prevent instruction fetch at all ELs
+ * that are part of the given translation regime.
+ */
+static inline uint64_t xlat_arch_regime_get_xn_desc(xlat_regime_t regime)
+{
+ if (regime == EL1_EL0_REGIME) {
+ return UPPER_ATTRS(UXN) | UPPER_ATTRS(PXN);
+ } else {
+ assert(regime == EL3_REGIME);
+ return UPPER_ATTRS(XN);
+ }
+}
+
+#endif /* __XLAT_TABLES_ARCH_PRIVATE_H__ */
XLAT_TABLES_LIB_SRCS := $(addprefix lib/xlat_tables_v2/, \
${ARCH}/xlat_tables_arch.c \
xlat_tables_internal.c)
+
+INCLUDES += -Ilib/xlat_tables_v2/${ARCH}
#include <string.h>
#include <types.h>
#include <utils.h>
-#include <xlat_tables_arch.h>
+#include <xlat_tables_arch_private.h>
#include <xlat_tables_defs.h>
#include <xlat_tables_v2.h>
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
-/* Returns a block/page table descriptor for the given level and attributes. */
-static uint64_t xlat_desc(mmap_attr_t attr, unsigned long long addr_pa,
- int level, uint64_t execute_never_mask)
+/*
+ * Returns a block/page table descriptor for the given level and attributes.
+ */
+uint64_t xlat_desc(const xlat_ctx_t *ctx, mmap_attr_t attr,
+ unsigned long long addr_pa, int level)
{
uint64_t desc;
int mem_type;
* Deduce other fields of the descriptor based on the MT_NS and MT_RW
* memory region attributes.
*/
+ desc |= LOWER_ATTRS(ACCESS_FLAG);
+
desc |= (attr & MT_NS) ? LOWER_ATTRS(NS) : 0;
desc |= (attr & MT_RW) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
- desc |= LOWER_ATTRS(ACCESS_FLAG);
+
+ /*
+ * Do not allow unprivileged access when the mapping is for a privileged
+ * EL. For translation regimes that do not have mappings for access for
+ * lower exception levels, set AP[2] to AP_NO_ACCESS_UNPRIVILEGED.
+ */
+ if (ctx->xlat_regime == EL1_EL0_REGIME) {
+ if (attr & MT_USER) {
+ /* EL0 mapping requested, so we give User access */
+ desc |= LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED);
+ } else {
+ /* EL1 mapping requested, no User access granted */
+ desc |= LOWER_ATTRS(AP_NO_ACCESS_UNPRIVILEGED);
+ }
+ } else {
+ assert(ctx->xlat_regime == EL3_REGIME);
+ desc |= LOWER_ATTRS(AP_NO_ACCESS_UNPRIVILEGED);
+ }
/*
* Deduce shareability domain and executability of the memory region
* fetch, which could be an issue if this memory region
* corresponds to a read-sensitive peripheral.
*/
- desc |= execute_never_mask;
+ desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
} else { /* Normal memory */
/*
* translation table.
*
* For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER
- * attribute to figure out the value of the XN bit.
+ * attribute to figure out the value of the XN bit. The actual
+ * XN bit(s) to set in the descriptor depends on the context's
+ * translation regime and the policy applied in
+ * xlat_arch_regime_get_xn_desc().
*/
if ((attr & MT_RW) || (attr & MT_EXECUTE_NEVER)) {
- desc |= execute_never_mask;
+ desc |= xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
}
if (mem_type == MT_MEMORY) {
if (action == ACTION_WRITE_BLOCK_ENTRY) {
table_base[table_idx] = INVALID_DESC;
- xlat_arch_tlbi_va(table_idx_va);
+ xlat_arch_tlbi_va_regime(table_idx_va, ctx->xlat_regime);
} else if (action == ACTION_RECURSE_INTO_TABLE) {
*/
if (xlat_table_is_empty(ctx, subtable)) {
table_base[table_idx] = INVALID_DESC;
- xlat_arch_tlbi_va(table_idx_va);
+ xlat_arch_tlbi_va_regime(table_idx_va,
+ ctx->xlat_regime);
}
} else {
if (action == ACTION_WRITE_BLOCK_ENTRY) {
table_base[table_idx] =
- xlat_desc(mm->attr, table_idx_pa, level,
- ctx->execute_never_mask);
+ xlat_desc(ctx, mm->attr, table_idx_pa, level);
} else if (action == ACTION_CREATE_NEW_TABLE) {
.size = end_va - mm->base_va,
.attr = 0
};
- xlat_tables_unmap_region(ctx,
- &unmap_mm, 0, ctx->base_table,
- ctx->base_table_entries, ctx->base_level);
+ xlat_tables_unmap_region(ctx, &unmap_mm, 0, ctx->base_table,
+ ctx->base_table_entries, ctx->base_level);
return -ENOMEM;
}
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
/* Print the attributes of the specified block descriptor. */
-static void xlat_desc_print(uint64_t desc, uint64_t execute_never_mask)
+static void xlat_desc_print(xlat_ctx_t *ctx, uint64_t desc)
{
int mem_type_index = ATTR_INDEX_GET(desc);
+ xlat_regime_t xlat_regime = ctx->xlat_regime;
if (mem_type_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
tf_printf("MEM");
tf_printf("DEV");
}
- tf_printf(LOWER_ATTRS(AP_RO) & desc ? "-RO" : "-RW");
+ const char *priv_str = "(PRIV)";
+ const char *user_str = "(USER)";
+
+ /*
+ * Showing Privileged vs Unprivileged only makes sense for EL1&0
+ * mappings
+ */
+ const char *ro_str = "-RO";
+ const char *rw_str = "-RW";
+ const char *no_access_str = "-NOACCESS";
+
+ if (xlat_regime == EL3_REGIME) {
+ /* For EL3, the AP[2] bit is all what matters */
+ tf_printf((desc & LOWER_ATTRS(AP_RO)) ? ro_str : rw_str);
+ } else {
+ const char *ap_str = (desc & LOWER_ATTRS(AP_RO)) ? ro_str : rw_str;
+ tf_printf(ap_str);
+ tf_printf(priv_str);
+ /*
+ * EL0 can only have the same permissions as EL1 or no
+ * permissions at all.
+ */
+ tf_printf((desc & LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED))
+ ? ap_str : no_access_str);
+ tf_printf(user_str);
+ }
+
+ const char *xn_str = "-XN";
+ const char *exec_str = "-EXEC";
+
+ if (xlat_regime == EL3_REGIME) {
+ /* For EL3, the XN bit is all what matters */
+ tf_printf(LOWER_ATTRS(XN) & desc ? xn_str : exec_str);
+ } else {
+ /* For EL0 and EL1, we need to know who has which rights */
+ tf_printf(LOWER_ATTRS(PXN) & desc ? xn_str : exec_str);
+ tf_printf(priv_str);
+
+ tf_printf(LOWER_ATTRS(UXN) & desc ? xn_str : exec_str);
+ tf_printf(user_str);
+ }
+
tf_printf(LOWER_ATTRS(NS) & desc ? "-NS" : "-S");
- tf_printf(execute_never_mask & desc ? "-XN" : "-EXEC");
}
static const char * const level_spacers[] = {
* Recursive function that reads the translation tables passed as an argument
* and prints their status.
*/
-static void xlat_tables_print_internal(const uintptr_t table_base_va,
+static void xlat_tables_print_internal(xlat_ctx_t *ctx,
+ const uintptr_t table_base_va,
uint64_t *const table_base, const int table_entries,
- const unsigned int level, const uint64_t execute_never_mask)
+ const unsigned int level)
{
assert(level <= XLAT_TABLE_LEVEL_MAX);
uintptr_t addr_inner = desc & TABLE_ADDR_MASK;
- xlat_tables_print_internal(table_idx_va,
+ xlat_tables_print_internal(ctx, table_idx_va,
(uint64_t *)addr_inner,
- XLAT_TABLE_ENTRIES, level+1,
- execute_never_mask);
+ XLAT_TABLE_ENTRIES, level + 1);
} else {
tf_printf("%sVA:%p PA:0x%llx size:0x%zx ",
level_spacers[level],
(void *)table_idx_va,
(unsigned long long)(desc & TABLE_ADDR_MASK),
level_size);
- xlat_desc_print(desc, execute_never_mask);
+ xlat_desc_print(ctx, desc);
tf_printf("\n");
}
}
void xlat_tables_print(xlat_ctx_t *ctx)
{
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
+ const char *xlat_regime_str;
+ if (ctx->xlat_regime == EL1_EL0_REGIME) {
+ xlat_regime_str = "1&0";
+ } else {
+ assert(ctx->xlat_regime == EL3_REGIME);
+ xlat_regime_str = "3";
+ }
VERBOSE("Translation tables state:\n");
+ VERBOSE(" Xlat regime: EL%s\n", xlat_regime_str);
VERBOSE(" Max allowed PA: 0x%llx\n", ctx->pa_max_address);
VERBOSE(" Max allowed VA: %p\n", (void *) ctx->va_max_address);
VERBOSE(" Max mapped PA: 0x%llx\n", ctx->max_pa);
used_page_tables, ctx->tables_num,
ctx->tables_num - used_page_tables);
- xlat_tables_print_internal(0, ctx->base_table, ctx->base_table_entries,
- ctx->base_level, ctx->execute_never_mask);
+ xlat_tables_print_internal(ctx, 0, ctx->base_table,
+ ctx->base_table_entries, ctx->base_level);
#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
}
void init_xlat_tables_ctx(xlat_ctx_t *ctx)
{
- mmap_region_t *mm = ctx->mmap;
-
- assert(!is_mmu_enabled());
+ assert(ctx != NULL);
assert(!ctx->initialized);
+ assert(ctx->xlat_regime == EL3_REGIME || ctx->xlat_regime == EL1_EL0_REGIME);
+ assert(!is_mmu_enabled_ctx(ctx));
- print_mmap(mm);
+ mmap_region_t *mm = ctx->mmap;
- ctx->execute_never_mask =
- xlat_arch_get_xn_desc(xlat_arch_current_el());
+ print_mmap(mm);
/* All tables must be zeroed before mapping any region. */
/* Returns the current Exception Level. The returned EL must be 1 or higher. */
int xlat_arch_current_el(void);
-/*
- * Returns the bit mask that has to be ORed to the rest of a translation table
- * descriptor so that execution of code is prohibited at the given Exception
- * Level.
- */
-uint64_t xlat_arch_get_xn_desc(int el);
-
/*
* Return the maximum physical address supported by the hardware.
* This value depends on the execution state (AArch32/AArch64).
void enable_mmu_arch(unsigned int flags, uint64_t *base_table,
unsigned long long pa, uintptr_t max_va);
-/* Return 1 if the MMU of this Exception Level is enabled, 0 otherwise. */
-int is_mmu_enabled(void);
+/*
+ * Return 1 if the MMU of the translation regime managed by the given xlat_ctx_t
+ * is enabled, 0 otherwise.
+ */
+int is_mmu_enabled_ctx(const xlat_ctx_t *ctx);
#endif /* __XLAT_TABLES_PRIVATE_H__ */