fvp: Provide per-EL MMU setup functions
authorSandrine Bailleux <sandrine.bailleux@arm.com>
Fri, 9 May 2014 10:35:36 +0000 (11:35 +0100)
committerSandrine Bailleux <sandrine.bailleux@arm.com>
Fri, 9 May 2014 13:56:10 +0000 (14:56 +0100)
Instead of having a single version of the MMU setup functions for all
bootloader images that can execute either in EL3 or in EL1, provide
separate functions for EL1 and EL3. Each bootloader image can then
call the appropriate version of these functions. The aim is to reduce
the amount of code compiled in each BL image by embedding only what's
needed (e.g. BL1 to embed only EL3 variants).

Change-Id: Ib86831d5450cf778ae78c9c1f7553fe91274c2fa

bl32/tsp/aarch64/tsp_entrypoint.S
plat/fvp/aarch64/plat_common.c
plat/fvp/bl1_plat_setup.c
plat/fvp/bl2_plat_setup.c
plat/fvp/bl31_plat_setup.c
plat/fvp/bl32_plat_setup.c
plat/fvp/platform.h
services/std_svc/psci/psci_afflvl_on.c

index 97d54f6597f31c666907f42495302b5d2f01c2b5..aeb54bc41a620e623d78a1ea0e828eb0a8232f53 100644 (file)
@@ -203,7 +203,7 @@ func tsp_cpu_on_entry
         * Initialise the MMU
         * ---------------------------------------------
         */
-       bl      enable_mmu
+       bl      enable_mmu_el1
 
        /* ---------------------------------------------
         * Give ourselves a stack allocated in Normal
index 9e205a0d8f02bbd19bfb70df92701b7b2c4d6057..099751dc5fa1fdde281602a61f831d0302416c48 100644 (file)
 static unsigned long platform_config[CONFIG_LIMIT];
 
 /*******************************************************************************
- * Enable the MMU assuming that the pagetables have already been created
- *******************************************************************************/
-void enable_mmu()
-{
-       unsigned long mair, tcr, ttbr, sctlr;
-
-       /* Set the attributes in the right indices of the MAIR */
-       mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
-       mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
-                                 ATTR_IWBWA_OWBWA_NTR_INDEX);
-
-       /*
-        * Set TCR bits as well. Inner & outer WBWA & shareable + T0SZ = 32
-        */
-       tcr = TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA |
-                 TCR_RGN_INNER_WBA | TCR_T0SZ_4GB;
-
-       /* Set TTBR bits as well */
-       ttbr = (unsigned long) l1_xlation_table;
-
-       if (IS_IN_EL3()) {
-               assert((read_sctlr_el3() & SCTLR_M_BIT) == 0);
-
-               write_mair_el3(mair);
-               tcr |= TCR_EL3_RES1;
-               /* Invalidate EL3 TLBs */
-               tlbialle3();
-
-               write_tcr_el3(tcr);
-               write_ttbr0_el3(ttbr);
-
-               /* ensure all translation table writes have drained into memory,
-                * the TLB invalidation is complete, and translation register
-                * writes are committed before enabling the MMU
-                */
-               dsb();
-               isb();
-
-               sctlr = read_sctlr_el3();
-               sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT | SCTLR_I_BIT;
-               sctlr |= SCTLR_A_BIT | SCTLR_C_BIT;
-               write_sctlr_el3(sctlr);
-       } else {
-               assert((read_sctlr_el1() & SCTLR_M_BIT) == 0);
-
-               write_mair_el1(mair);
-               /* Invalidate EL1 TLBs */
-               tlbivmalle1();
-
-               write_tcr_el1(tcr);
-               write_ttbr0_el1(ttbr);
-
-               /* ensure all translation table writes have drained into memory,
-                * the TLB invalidation is complete, and translation register
-                * writes are committed before enabling the MMU
-                */
-               dsb();
-               isb();
-
-               sctlr = read_sctlr_el1();
-               sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT | SCTLR_I_BIT;
-               sctlr |= SCTLR_A_BIT | SCTLR_C_BIT;
-               write_sctlr_el1(sctlr);
+ * Macro generating the code for the function enabling the MMU in the given
+ * exception level, assuming that the pagetables have already been created.
+ *
+ *   _el:              Exception level at which the function will run
+ *   _tcr_extra:       Extra bits to set in the TCR register. This mask will
+ *                     be OR'ed with the default TCR value.
+ *   _tlbi_fct:                Function to invalidate the TLBs at the current
+ *                     exception level
+ ******************************************************************************/
+#define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct)               \
+       void enable_mmu_el##_el(void)                                   \
+       {                                                               \
+               uint64_t mair, tcr, ttbr;                               \
+               uint32_t sctlr;                                         \
+                                                                       \
+               assert(IS_IN_EL(_el));                                  \
+               assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0);      \
+                                                                       \
+               /* Set attributes in the right indices of the MAIR */   \
+               mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);   \
+               mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,             \
+                               ATTR_IWBWA_OWBWA_NTR_INDEX);            \
+               write_mair_el##_el(mair);                               \
+                                                                       \
+               /* Invalidate TLBs at the current exception level */    \
+               _tlbi_fct();                                            \
+                                                                       \
+               /* Set TCR bits as well. */                             \
+               /* Inner & outer WBWA & shareable + T0SZ = 32 */        \
+               tcr = TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA |      \
+                       TCR_RGN_INNER_WBA | TCR_T0SZ_4GB;               \
+               tcr |= _tcr_extra;                                      \
+               write_tcr_el##_el(tcr);                                 \
+                                                                       \
+               /* Set TTBR bits as well */                             \
+               ttbr = (uint64_t) l1_xlation_table;                     \
+               write_ttbr0_el##_el(ttbr);                              \
+                                                                       \
+               /* Ensure all translation table writes have drained */  \
+               /* into memory, the TLB invalidation is complete, */    \
+               /* and translation register writes are committed */     \
+               /* before enabling the MMU */                           \
+               dsb();                                                  \
+               isb();                                                  \
+                                                                       \
+               sctlr = read_sctlr_el##_el();                           \
+               sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT | SCTLR_I_BIT;     \
+               sctlr |= SCTLR_A_BIT | SCTLR_C_BIT;                     \
+               write_sctlr_el##_el(sctlr);                             \
+                                                                       \
+               /* Ensure the MMU enable takes effect immediately */    \
+               isb();                                                  \
        }
-       /* ensure the MMU enable takes effect immediately */
-       isb();
 
-       return;
-}
+/* Define EL1 and EL3 variants of the function enabling the MMU */
+DEFINE_ENABLE_MMU_EL(1, 0, tlbivmalle1)
+DEFINE_ENABLE_MMU_EL(3, TCR_EL3_RES1, tlbialle3)
 
 /*
  * Table of regions to map using the MMU.
- * This doesn't include TZRAM as the 'mem_layout' argument passed to to
- * configure_mmu() will give the available subset of that,
+ * This doesn't include TZRAM as the 'mem_layout' argument passed to
+ * configure_mmu_elx() will give the available subset of that,
  */
 const mmap_region_t fvp_mmap[] = {
        { TZROM_BASE,   TZROM_SIZE,     MT_MEMORY | MT_RO | MT_SECURE },
@@ -138,28 +126,32 @@ const mmap_region_t fvp_mmap[] = {
 };
 
 /*******************************************************************************
- * Setup the pagetables as per the platform memory map & initialize the mmu
- *******************************************************************************/
-void configure_mmu(meminfo_t *mem_layout,
-                  unsigned long ro_start,
-                  unsigned long ro_limit,
-                  unsigned long coh_start,
-                  unsigned long coh_limit)
-{
-       mmap_add_region(mem_layout->total_base, mem_layout->total_size,
-                               MT_MEMORY | MT_RW | MT_SECURE);
-       mmap_add_region(ro_start, ro_limit - ro_start,
-                               MT_MEMORY | MT_RO | MT_SECURE);
-       mmap_add_region(coh_start, coh_limit - coh_start,
-                               MT_DEVICE | MT_RW | MT_SECURE);
-
-       mmap_add(fvp_mmap);
-
-       init_xlat_tables();
+ * Macro generating the code for the function setting up the pagetables as per
+ * the platform memory map & initialize the mmu, for the given exception level
+ ******************************************************************************/
+#define DEFINE_CONFIGURE_MMU_EL(_el)                                   \
+       void configure_mmu_el##_el(meminfo_t *mem_layout,               \
+                                  unsigned long ro_start,              \
+                                  unsigned long ro_limit,              \
+                                  unsigned long coh_start,             \
+                                  unsigned long coh_limit)             \
+       {                                                               \
+               mmap_add_region(mem_layout->total_base,                 \
+                               mem_layout->total_size,                 \
+                               MT_MEMORY | MT_RW | MT_SECURE);         \
+               mmap_add_region(ro_start, ro_limit - ro_start,          \
+                               MT_MEMORY | MT_RO | MT_SECURE);         \
+               mmap_add_region(coh_start, coh_limit - coh_start,       \
+                               MT_DEVICE | MT_RW | MT_SECURE);         \
+               mmap_add(fvp_mmap);                                     \
+               init_xlat_tables();                                     \
+                                                                       \
+               enable_mmu_el##_el();                                   \
+       }
 
-       enable_mmu();
-       return;
-}
+/* Define EL1 and EL3 variants of the function initialising the MMU */
+DEFINE_CONFIGURE_MMU_EL(1)
+DEFINE_CONFIGURE_MMU_EL(3)
 
 /* Simple routine which returns a configuration variable value */
 unsigned long platform_get_cfgvar(unsigned int var_id)
index fd03ec2d4b2fcc1300b32b7480890338f3c58da6..edd3f7b2f0d358cf4292a090ecad623d4efca7d2 100644 (file)
@@ -138,9 +138,9 @@ void bl1_plat_arch_setup(void)
                cci_enable_coherency(read_mpidr());
        }
 
-       configure_mmu(&bl1_tzram_layout,
-                       TZROM_BASE,
-                       TZROM_BASE + TZROM_SIZE,
-                       BL1_COHERENT_RAM_BASE,
-                       BL1_COHERENT_RAM_LIMIT);
+       configure_mmu_el3(&bl1_tzram_layout,
+                         TZROM_BASE,
+                         TZROM_BASE + TZROM_SIZE,
+                         BL1_COHERENT_RAM_BASE,
+                         BL1_COHERENT_RAM_LIMIT);
 }
index 4c649eb710d8075f6997c996dac344c47d602bd8..80bb52e5a83c4b8ae027738ca8704b5f8f54b3d1 100644 (file)
@@ -172,9 +172,9 @@ void bl2_platform_setup()
  ******************************************************************************/
 void bl2_plat_arch_setup()
 {
-       configure_mmu(&bl2_tzram_layout,
-                     BL2_RO_BASE,
-                     BL2_RO_LIMIT,
-                     BL2_COHERENT_RAM_BASE,
-                     BL2_COHERENT_RAM_LIMIT);
+       configure_mmu_el1(&bl2_tzram_layout,
+                         BL2_RO_BASE,
+                         BL2_RO_LIMIT,
+                         BL2_COHERENT_RAM_BASE,
+                         BL2_COHERENT_RAM_LIMIT);
 }
index 5c00baa4517f0206973d39e6a5f01801473f4296..baf7df15dd69b408e897e4bc627d415a3e6cbe03 100644 (file)
@@ -172,9 +172,9 @@ void bl31_platform_setup()
  ******************************************************************************/
 void bl31_plat_arch_setup()
 {
-       configure_mmu(&bl2_to_bl31_args->bl31_meminfo,
-                     BL31_RO_BASE,
-                     BL31_RO_LIMIT,
-                     BL31_COHERENT_RAM_BASE,
-                     BL31_COHERENT_RAM_LIMIT);
+       configure_mmu_el3(&bl2_to_bl31_args->bl31_meminfo,
+                         BL31_RO_BASE,
+                         BL31_RO_LIMIT,
+                         BL31_COHERENT_RAM_BASE,
+                         BL31_COHERENT_RAM_LIMIT);
 }
index 9fe8fe1c5fe8e1e646a644d84fc69472566b3804..bb2b602f14c1449568c898cb4db16b8c42a2dd7c 100644 (file)
@@ -111,9 +111,9 @@ void bl32_platform_setup()
  ******************************************************************************/
 void bl32_plat_arch_setup()
 {
-       configure_mmu(&bl32_tzdram_layout,
-                     BL32_RO_BASE,
-                     BL32_RO_LIMIT,
-                     BL32_COHERENT_RAM_BASE,
-                     BL32_COHERENT_RAM_LIMIT);
+       configure_mmu_el1(&bl32_tzdram_layout,
+                         BL32_RO_BASE,
+                         BL32_RO_LIMIT,
+                         BL32_COHERENT_RAM_BASE,
+                         BL32_COHERENT_RAM_LIMIT);
 }
index 3fe892ec2c2841275977937bc5ce21c234637049..7244866ea04d0de4bd7569b54ace265129d9fb95 100644 (file)
@@ -370,12 +370,18 @@ extern void bl2_plat_arch_setup(void);
 extern void bl31_plat_arch_setup(void);
 extern int platform_setup_pm(const struct plat_pm_ops **);
 extern unsigned int platform_get_core_pos(unsigned long mpidr);
-extern void enable_mmu(void);
-extern void configure_mmu(struct meminfo *,
-                         unsigned long,
-                         unsigned long,
-                         unsigned long,
-                         unsigned long);
+extern void enable_mmu_el1(void);
+extern void enable_mmu_el3(void);
+extern void configure_mmu_el1(struct meminfo *mem_layout,
+                             unsigned long ro_start,
+                             unsigned long ro_limit,
+                             unsigned long coh_start,
+                             unsigned long coh_limit);
+extern void configure_mmu_el3(struct meminfo *mem_layout,
+                             unsigned long ro_start,
+                             unsigned long ro_limit,
+                             unsigned long coh_start,
+                             unsigned long coh_limit);
 extern unsigned long platform_get_cfgvar(unsigned int);
 extern int platform_config_setup(void);
 extern void plat_report_exception(unsigned long);
index 8f9bb4de15afb7e6938c75c939542fa79a30a383..9f4ebf6bcc67a5c04b1312b716be41b806db11af 100644 (file)
@@ -362,7 +362,7 @@ static unsigned int psci_afflvl0_on_finish(unsigned long mpidr,
        /*
         * Arch. management: Turn on mmu & restore architectural state
         */
-       enable_mmu();
+       enable_mmu_el3();
 
        /*
         * All the platform specific actions for turning this cpu