-Iinclude/drivers/io \
-Iinclude/lib \
-Iinclude/lib/aarch64 \
+ -Iinclude/lib/cpus/aarch64 \
-Iinclude/plat/common \
-Iinclude/stdlib \
-Iinclude/stdlib/sys \
ldr x1, =__COHERENT_RAM_UNALIGNED_SIZE__
bl zeromem16
+ /* ---------------------------------------------
+ * Initialize the cpu_ops pointer.
+ * ---------------------------------------------
+ */
+ bl init_cpu_ops
+
/* ---------------------------------------------
* Use SP_EL0 for the C runtime stack.
* ---------------------------------------------
The default implementation doesn't do anything.
+### Function : plat_disable_acp()
+
+ Argument : void
+ Return : void
+
+This api allows a platform to disable the Accelerator Coherency Port (if
+present) during a cluster power down sequence. The default weak implementation
+doesn't do anything. Since this api is called during the power down sequence,
+it has restrictions for stack usage and it can use the registers x0 - x17 as
+scratch registers. It should preserve the value in x18 register as it is used
+by the caller to store the return address.
+
3. Modifications specific to a Boot Loader stage
-------------------------------------------------
#define MIDR_IMPL_SHIFT 0x18
#define MIDR_PN_MASK 0xfff
#define MIDR_PN_SHIFT 0x4
-#define MIDR_PN_AEM 0xd0f
-#define MIDR_PN_A57 0xd07
-#define MIDR_PN_A53 0xd03
+#define MIDR_PN_AEM 0xd0f
+#define MIDR_PN_A57 0xd07
+#define MIDR_PN_A53 0xd03
/*******************************************************************************
* MPIDR macros
#define SCTLR_WXN_BIT (1 << 19)
#define SCTLR_EE_BIT (1 << 25)
-/* CPUECTLR definitions */
-#define CPUECTLR_SMP_BIT (1 << 6)
-
/* CPACR_El1 definitions */
#define CPACR_EL1_FPEN(x) (x << 20)
#define CPACR_EL1_FP_TRAP_EL0 0x1
DEFINE_SYSREG_RW_FUNCS(vpidr_el2)
DEFINE_SYSREG_RW_FUNCS(vmpidr_el2)
-/* Implementation specific registers */
-
-DEFINE_RENAME_SYSREG_RW_FUNCS(cpuectlr_el1, CPUECTLR_EL1)
-
/* GICv3 System Registers */
DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sre_el1, ICC_SRE_EL1)
#define read_hcr() read_hcr_el2()
#define write_hcr(_v) write_hcr_el2(_v)
-#define read_cpuectlr() read_cpuectlr_el1()
-#define write_cpuectlr(_v) write_cpuectlr_el1(_v)
-
#define read_cpacr() read_cpacr_el1()
#define write_cpacr(_v) write_cpacr_el1(_v)
+++ /dev/null
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-
-#define CPU_IMPL_PN_MASK (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) | \
- (MIDR_PN_MASK << MIDR_PN_SHIFT)
-
- /*
- * Define the offsets to the fields in cpu_ops structure.
- */
- .struct 0
-CPU_MIDR: /* cpu_ops midr */
- .space 8
-/* Reset fn is needed in BL at reset vector */
-#if IMAGE_BL1 || (IMAGE_BL31 && RESET_TO_BL31)
-CPU_RESET_FUNC: /* cpu_ops reset_func */
- .space 8
-#endif
-CPU_OPS_SIZE = .
-
- /*
- * Convenience macro to declare cpu_ops structure.
- * Make sure the structure fields are as per the offsets
- * defined above.
- */
- .macro declare_cpu_ops _name:req, _midr:req, _noresetfunc = 0
- .section cpu_ops, "a"; .align 3
- .type cpu_ops_\_name, %object
- .quad \_midr
-#if IMAGE_BL1 || (IMAGE_BL31 && RESET_TO_BL31)
- .if \_noresetfunc
- .quad 0
- .else
- .quad \_name\()_reset_func
- .endif
-#endif
- .endm
--- /dev/null
+/*
+ * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __AEM_GENERIC_H__
+#define __AEM_GENERIC_H__
+
+/* BASE AEM midr for revision 0 */
+#define BASE_AEM_MIDR 0x410FD0F0
+
+/* Foundation AEM midr for revision 0 */
+#define FOUNDATION_AEM_MIDR 0x410FD000
+
+
+#endif /* __AEM_GENERIC_H__ */
--- /dev/null
+/*
+ * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __CORTEX_A53_H__
+#define __CORTEX_A53_H__
+
+/* Cortex-A53 midr for revision 0 */
+#define CORTEX_A53_MIDR 0x410FD030
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define CPUECTLR_SMP_BIT (1 << 6)
+
+#endif /* __CORTEX_A53_H__ */
--- /dev/null
+/*
+ * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __CORTEX_A57_H__
+#define __CORTEX_A57_H__
+
+/* Cortex-A57 midr for revision 0 */
+#define CORTEX_A57_MIDR 0x410FD070
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define CPUECTLR_SMP_BIT (1 << 6)
+#define CPUECTLR_DIS_TWD_ACC_PFTCH_BIT (1 << 38)
+#define CPUECTLR_L2_IPFTCH_DIST_MASK (0x3 << 35)
+#define CPUECTLR_L2_DPFTCH_DIST_MASK (0x3 << 32)
+
+#endif /* __CORTEX_A57_H__ */
--- /dev/null
+/*
+ * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+
+#define CPU_IMPL_PN_MASK (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) | \
+ (MIDR_PN_MASK << MIDR_PN_SHIFT)
+
+ /*
+ * Define the offsets to the fields in cpu_ops structure.
+ */
+ .struct 0
+CPU_MIDR: /* cpu_ops midr */
+ .space 8
+/* Reset fn is needed in BL at reset vector */
+#if IMAGE_BL1 || (IMAGE_BL31 && RESET_TO_BL31)
+CPU_RESET_FUNC: /* cpu_ops reset_func */
+ .space 8
+#endif
+#if IMAGE_BL31 /* The power down core and cluster is needed only in BL3-1 */
+CPU_PWR_DWN_CORE: /* cpu_ops core_pwr_dwn */
+ .space 8
+CPU_PWR_DWN_CLUSTER: /* cpu_ops cluster_pwr_dwn */
+ .space 8
+#endif
+CPU_OPS_SIZE = .
+
+ /*
+ * Convenience macro to declare cpu_ops structure.
+ * Make sure the structure fields are as per the offsets
+ * defined above.
+ */
+ .macro declare_cpu_ops _name:req, _midr:req, _noresetfunc = 0
+ .section cpu_ops, "a"; .align 3
+ .type cpu_ops_\_name, %object
+ .quad \_midr
+#if IMAGE_BL1 || (IMAGE_BL31 && RESET_TO_BL31)
+ .if \_noresetfunc
+ .quad 0
+ .else
+ .quad \_name\()_reset_func
+ .endif
+#endif
+#if IMAGE_BL31
+ .quad \_name\()_core_pwr_dwn
+ .quad \_name\()_cluster_pwr_dwn
+#endif
+ .endm
enum plat_config_flags {
- /* Whether CPUECTLR SMP bit should be enabled */
- CONFIG_CPUECTLR_SMP_BIT = 0x1,
/* Whether Base FVP memory map is in use */
- CONFIG_BASE_MMAP = 0x2,
+ CONFIG_BASE_MMAP = 0x1,
/* Whether CCI should be enabled */
- CONFIG_HAS_CCI = 0x4,
+ CONFIG_HAS_CCI = 0x2,
/* Whether TZC should be configured */
- CONFIG_HAS_TZC = 0x8
+ CONFIG_HAS_TZC = 0x4
};
typedef struct plat_config {
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
+#include <aem_generic.h>
#include <arch.h>
#include <asm_macros.S>
#include <cpu_macros.S>
-#define BASE_AEM_MIDR 0x410FD0F0
+func aem_generic_core_pwr_dwn
+ /* ---------------------------------------------
+ * Disable the Data Cache.
+ * ---------------------------------------------
+ */
+ mrs x1, sctlr_el3
+ bic x1, x1, #SCTLR_C_BIT
+ msr sctlr_el3, x1
+ isb
-#define FOUNDATION_AEM_MIDR 0x410FD000
+ mov x0, #DCCISW
+ /* ---------------------------------------------
+ * Flush L1 cache to PoU.
+ * ---------------------------------------------
+ */
+ b dcsw_op_louis
+
+func aem_generic_cluster_pwr_dwn
+ /* ---------------------------------------------
+ * Disable the Data Cache.
+ * ---------------------------------------------
+ */
+ mrs x1, sctlr_el3
+ bic x1, x1, #SCTLR_C_BIT
+ msr sctlr_el3, x1
+ isb
+
+ /* ---------------------------------------------
+ * Flush L1 and L2 caches to PoC.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ b dcsw_op_all
+
+
+/* cpu_ops for Base AEM FVP */
declare_cpu_ops aem_generic, BASE_AEM_MIDR, 1
+/* cpu_ops for Foundation FVP */
declare_cpu_ops aem_generic, FOUNDATION_AEM_MIDR, 1
*/
#include <arch.h>
#include <asm_macros.S>
+#include <cortex_a53.h>
#include <cpu_macros.S>
#include <plat_macros.S>
-#define CORTEX_A53_MIDR 0x410FD030
+ /* ---------------------------------------------
+ * Disable L1 data cache and unified L2 cache
+ * ---------------------------------------------
+ */
+func cortex_a53_disable_dcache
+ mrs x1, sctlr_el3
+ bic x1, x1, #SCTLR_C_BIT
+ msr sctlr_el3, x1
+ isb
+ ret
+
+ /* ---------------------------------------------
+ * Disable intra-cluster coherency
+ * ---------------------------------------------
+ */
+func cortex_a53_disable_smp
+ mrs x0, CPUECTLR_EL1
+ bic x0, x0, #CPUECTLR_SMP_BIT
+ msr CPUECTLR_EL1, x0
+ isb
+ dsb sy
+ ret
func cortex_a53_reset_func
/* ---------------------------------------------
isb
ret
+func cortex_a53_core_pwr_dwn
+ mov x18, x30
+
+ /* ---------------------------------------------
+ * Turn off caches.
+ * ---------------------------------------------
+ */
+ bl cortex_a53_disable_dcache
+
+ /* ---------------------------------------------
+ * Flush L1 cache to PoU.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_louis
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ mov x30, x18
+ b cortex_a53_disable_smp
+
+func cortex_a53_cluster_pwr_dwn
+ mov x18, x30
+
+ /* ---------------------------------------------
+ * Turn off caches.
+ * ---------------------------------------------
+ */
+ bl cortex_a53_disable_dcache
+
+ /* ---------------------------------------------
+ * Disable the optional ACP.
+ * ---------------------------------------------
+ */
+ bl plat_disable_acp
+
+ /* ---------------------------------------------
+ * Flush L1 and L2 caches to PoC.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_all
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ mov x30, x18
+ b cortex_a53_disable_smp
+
declare_cpu_ops cortex_a53, CORTEX_A53_MIDR
*/
#include <arch.h>
#include <asm_macros.S>
+#include <cortex_a57.h>
#include <cpu_macros.S>
#include <plat_macros.S>
-#define CORTEX_A57_MIDR 0x410FD070
+ /* ---------------------------------------------
+ * Disable L1 data cache and unified L2 cache
+ * ---------------------------------------------
+ */
+func cortex_a57_disable_dcache
+ mrs x1, sctlr_el3
+ bic x1, x1, #SCTLR_C_BIT
+ msr sctlr_el3, x1
+ isb
+ ret
+
+ /* ---------------------------------------------
+ * Disable all types of L2 prefetches.
+ * ---------------------------------------------
+ */
+func cortex_a57_disable_l2_prefetch
+ mrs x0, CPUECTLR_EL1
+ orr x0, x0, #CPUECTLR_DIS_TWD_ACC_PFTCH_BIT
+ mov x1, #CPUECTLR_L2_IPFTCH_DIST_MASK
+ orr x1, x1, #CPUECTLR_L2_DPFTCH_DIST_MASK
+ bic x0, x0, x1
+ msr CPUECTLR_EL1, x0
+ isb
+ dsb sy
+ ret
+
+ /* ---------------------------------------------
+ * Disable intra-cluster coherency
+ * ---------------------------------------------
+ */
+func cortex_a57_disable_smp
+ mrs x0, CPUECTLR_EL1
+ bic x0, x0, #CPUECTLR_SMP_BIT
+ msr CPUECTLR_EL1, x0
+ ret
+
+ /* ---------------------------------------------
+ * Disable debug interfaces
+ * ---------------------------------------------
+ */
+func cortex_a57_disable_ext_debug
+ mov x0, #1
+ msr osdlr_el1, x0
+ isb
+ dsb sy
+ ret
func cortex_a57_reset_func
/* ---------------------------------------------
isb
ret
+func cortex_a57_core_pwr_dwn
+ mov x18, x30
+
+ /* ---------------------------------------------
+ * Turn off caches.
+ * ---------------------------------------------
+ */
+ bl cortex_a57_disable_dcache
+
+ /* ---------------------------------------------
+ * Disable the L2 prefetches.
+ * ---------------------------------------------
+ */
+ bl cortex_a57_disable_l2_prefetch
+
+ /* ---------------------------------------------
+ * Flush L1 cache to PoU.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_louis
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ bl cortex_a57_disable_smp
+
+ /* ---------------------------------------------
+ * Force the debug interfaces to be quiescent
+ * ---------------------------------------------
+ */
+ mov x30, x18
+ b cortex_a57_disable_ext_debug
+
+func cortex_a57_cluster_pwr_dwn
+ mov x18, x30
+
+ /* ---------------------------------------------
+ * Turn off caches.
+ * ---------------------------------------------
+ */
+ bl cortex_a57_disable_dcache
+
+ /* ---------------------------------------------
+ * Disable the L2 prefetches.
+ * ---------------------------------------------
+ */
+ bl cortex_a57_disable_l2_prefetch
+
+ /* ---------------------------------------------
+ * Disable the optional ACP.
+ * ---------------------------------------------
+ */
+ bl plat_disable_acp
+
+ /* ---------------------------------------------
+ * Flush L1 and L2 caches to PoC.
+ * ---------------------------------------------
+ */
+ mov x0, #DCCISW
+ bl dcsw_op_all
+
+ /* ---------------------------------------------
+ * Come out of intra cluster coherency
+ * ---------------------------------------------
+ */
+ bl cortex_a57_disable_smp
+
+ /* ---------------------------------------------
+ * Force the debug interfaces to be quiescent
+ * ---------------------------------------------
+ */
+ mov x30, x18
+ b cortex_a57_disable_ext_debug
+
declare_cpu_ops cortex_a57, CORTEX_A57_MIDR
#endif /* IMAGE_BL1 || (IMAGE_BL31 && RESET_TO_BL31) */
+#if IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */
+ /*
+ * The prepare core power down function for all platforms. After
+ * the cpu_ops pointer is retrieved from cpu_data, the corresponding
+ * pwr_dwn_core in the cpu_ops is invoked.
+ */
+ .globl prepare_core_pwr_dwn
+func prepare_core_pwr_dwn
+ mrs x1, tpidr_el3
+ ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR]
+#if ASM_ASSERTION
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
+
+ /* Get the cpu_ops core_pwr_dwn handler */
+ ldr x1, [x0, #CPU_PWR_DWN_CORE]
+ br x1
+
+ /*
+ * The prepare cluster power down function for all platforms. After
+ * the cpu_ops pointer is retrieved from cpu_data, the corresponding
+ * pwr_dwn_cluster in the cpu_ops is invoked.
+ */
+ .globl prepare_cluster_pwr_dwn
+func prepare_cluster_pwr_dwn
+ mrs x1, tpidr_el3
+ ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR]
+#if ASM_ASSERTION
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
+
+ /* Get the cpu_ops cluster_pwr_dwn handler */
+ ldr x1, [x0, #CPU_PWR_DWN_CLUSTER]
+ br x1
+
+
+ /*
+ * Initializes the cpu_ops_ptr if not already initialized
+ * in cpu_data. This can be called without a runtime stack.
+ * clobbers: x0 - x6, x10
+ */
+ .globl init_cpu_ops
+func init_cpu_ops
+ mrs x6, tpidr_el3
+ ldr x0, [x6, #CPU_DATA_CPU_OPS_PTR]
+ cbnz x0, 1f
+ mov x10, x30
+ bl get_cpu_ops_ptr
+#if ASM_ASSERTION
+ cmp x0, #0
+ ASM_ASSERT(ne)
+#endif
+ str x0, [x6, #CPU_DATA_CPU_OPS_PTR]
+ mov x30, x10
+1:
+ ret
+#endif /* IMAGE_BL31 */
+
/*
* The below function returns the cpu_ops structure matching the
* midr of the core. It reads the MIDR_EL1 and finds the matching
.weak plat_crash_console_init
.weak plat_crash_console_putc
.weak plat_reset_handler
+ .weak plat_disable_acp
/* -----------------------------------------------------
* int platform_get_core_pos(int mpidr);
*/
func plat_reset_handler
ret
+
+ /* -----------------------------------------------------
+ * Placeholder function which should be redefined by
+ * each platform. This function is allowed to use
+ * registers x0 - x17.
+ * -----------------------------------------------------
+ */
+func plat_disable_acp
+ ret
******************************************************************************/
int fvp_config_setup(void)
{
- unsigned int rev, hbi, bld, arch, sys_id, midr_pn;
+ unsigned int rev, hbi, bld, arch, sys_id;
sys_id = mmio_read_32(VE_SYSREGS_BASE + V2M_SYS_ID);
rev = (sys_id >> SYS_ID_REV_SHIFT) & SYS_ID_REV_MASK;
}
break;
case HBI_FVP_BASE:
- midr_pn = (read_midr() >> MIDR_PN_SHIFT) & MIDR_PN_MASK;
- plat_config.flags =
- ((midr_pn == MIDR_PN_A57) || (midr_pn == MIDR_PN_A53))
- ? CONFIG_CPUECTLR_SMP_BIT : 0;
-
plat_config.max_aff0 = 4;
plat_config.max_aff1 = 2;
plat_config.flags |= CONFIG_BASE_MMAP | CONFIG_HAS_CCI |
******************************************************************************/
static void fvp_cpu_pwrdwn_common()
{
- uint32_t ectlr;
-
- /*
- * Take this cpu out of intra-cluster coherency if the FVP flavour
- * supports the SMP bit.
- */
- if (get_plat_config()->flags & CONFIG_CPUECTLR_SMP_BIT) {
- ectlr = read_cpuectlr();
- ectlr &= ~CPUECTLR_SMP_BIT;
- write_cpuectlr(ectlr);
- }
-
/* Prevent interrupts from spuriously waking up this cpu */
arm_gic_cpuif_deactivate();
unsigned int state)
{
int rc = PSCI_E_SUCCESS;
- unsigned int ectlr;
/* Determine if any platform actions need to be executed. */
if (fvp_do_plat_actions(afflvl, state) == -EAGAIN)
fvp_cci_enable();
}
- /*
- * Turn on intra-cluster coherency if the FVP flavour supports
- * it.
- */
- if (get_plat_config()->flags & CONFIG_CPUECTLR_SMP_BIT) {
- ectlr = read_cpuectlr();
- ectlr |= CPUECTLR_SMP_BIT;
- write_cpuectlr(ectlr);
- }
-
/*
* Clear PWKUPR.WEN bit to ensure interrupts do not interfere
* with a cpu power down unless the bit is set again
*/
bl init_cpu_data_ptr
+ /* ---------------------------------------------
+ * Initialize the cpu_ops pointer.
+ * ---------------------------------------------
+ */
+ bl init_cpu_ops
+
/* ---------------------------------------------
* Set the exception vectors
* ---------------------------------------------
cmp x0, x19
b.ne 1f
- /* ---------------------------------------------
- * Disable the Data Cache.
- * ---------------------------------------------
- */
- mrs x1, sctlr_el3
- bic x1, x1, #SCTLR_C_BIT
- msr sctlr_el3, x1
- isb
-
/* ---------------------------------------------
* Determine to how many levels of cache will be
* subject to cache maintenance. Affinity level
* ---------------------------------------------
*/
cmp x0, #MPIDR_AFFLVL0
- mov x0, #DCCISW
- b.ne flush_caches_to_poc
-
- /* ---------------------------------------------
- * Flush L1 cache to PoU.
- * ---------------------------------------------
- */
- bl dcsw_op_louis
+ b.eq do_core_pwr_dwn
+ bl prepare_cluster_pwr_dwn
b do_stack_maintenance
- /* ---------------------------------------------
- * Flush L1 and L2 caches to PoC.
- * ---------------------------------------------
- */
-flush_caches_to_poc:
- bl dcsw_op_all
-
- /* ---------------------------------------------
- * TODO: Intra-cluster coherency should be
- * turned off here once cpu-specific
- * abstractions are in place.
- * ---------------------------------------------
- */
+do_core_pwr_dwn:
+ bl prepare_core_pwr_dwn
/* ---------------------------------------------
* Do stack maintenance by flushing the used