Add CPU specific power management operations
authorSoby Mathew <soby.mathew@arm.com>
Thu, 14 Aug 2014 11:49:05 +0000 (12:49 +0100)
committerDan Handley <dan.handley@arm.com>
Wed, 20 Aug 2014 18:14:31 +0000 (19:14 +0100)
This patch adds CPU core and cluster power down sequences to the CPU specific
operations framework introduced in a earlier patch. Cortex-A53, Cortex-A57 and
generic AEM sequences have been added. The latter is suitable for the
Foundation and Base AEM FVPs. A pointer to each CPU's operations structure is
saved in the per-cpu data so that it can be easily accessed during power down
seqeunces.

An optional platform API has been introduced to allow a platform to disable the
Accelerator Coherency Port (ACP) during a cluster power down sequence. The weak
definition of this function (plat_disable_acp()) does not take any action. It
should be overriden with a strong definition if the ACP is present on a
platform.

Change-Id: I8d09bd40d2f528a28d2d3f19b77101178778685d

20 files changed:
Makefile
bl31/aarch64/bl31_entrypoint.S
docs/porting-guide.md
include/lib/aarch64/arch.h
include/lib/aarch64/arch_helpers.h
include/lib/aarch64/cpu_macros.S [deleted file]
include/lib/cpus/aarch64/aem_generic.h [new file with mode: 0644]
include/lib/cpus/aarch64/cortex_a53.h [new file with mode: 0644]
include/lib/cpus/aarch64/cortex_a57.h [new file with mode: 0644]
include/lib/cpus/aarch64/cpu_macros.S [new file with mode: 0644]
include/plat/common/plat_config.h
lib/cpus/aarch64/aem_generic.S
lib/cpus/aarch64/cortex_a53.S
lib/cpus/aarch64/cortex_a57.S
lib/cpus/aarch64/cpu_helpers.S
plat/common/aarch64/platform_helpers.S
plat/fvp/aarch64/fvp_common.c
plat/fvp/fvp_pm.c
services/std_svc/psci/psci_entry.S
services/std_svc/psci/psci_helpers.S

index 520a0d7e39d14145d4469a5826f18a678e4f3a67..6f6d703738f4193bc858d8035d91b4d0ef0fcc91 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -180,6 +180,7 @@ INCLUDES            +=      -Iinclude/bl31                  \
                                -Iinclude/drivers/io            \
                                -Iinclude/lib                   \
                                -Iinclude/lib/aarch64           \
+                               -Iinclude/lib/cpus/aarch64      \
                                -Iinclude/plat/common           \
                                -Iinclude/stdlib                \
                                -Iinclude/stdlib/sys            \
index a9238dcb99e61206d14d83415d012445bc7aab72..c74858f9a08b03a10850d3b7d7cfb75d5645adf1 100644 (file)
@@ -153,6 +153,12 @@ func bl31_entrypoint
        ldr     x1, =__COHERENT_RAM_UNALIGNED_SIZE__
        bl      zeromem16
 
+       /* ---------------------------------------------
+        * Initialize the cpu_ops pointer.
+        * ---------------------------------------------
+        */
+       bl      init_cpu_ops
+
        /* ---------------------------------------------
         * Use SP_EL0 for the C runtime stack.
         * ---------------------------------------------
index 262de73e7bd4c1823e07e0d4e9dd5c51d82b4b3a..0a07bff6afa4cf298c0f847851fadedbbd056fd3 100644 (file)
@@ -473,6 +473,18 @@ return address.
 
 The default implementation doesn't do anything.
 
+### Function : plat_disable_acp()
+
+    Argument : void
+    Return   : void
+
+This api allows a platform to disable the Accelerator Coherency Port (if
+present) during a cluster power down sequence. The default weak implementation
+doesn't do anything. Since this api is called during the power down sequence,
+it has restrictions for stack usage and it can use the registers x0 - x17 as
+scratch registers. It should preserve the value in x18 register as it is used
+by the caller to store the return address.
+
 
 3.  Modifications specific to a Boot Loader stage
 -------------------------------------------------
index bb33acbada7235f8846a10e0577f1e9e705b43ee..333dcf1d62a0569382e66b75b89420cf519c47c9 100644 (file)
@@ -39,9 +39,9 @@
 #define MIDR_IMPL_SHIFT                0x18
 #define MIDR_PN_MASK           0xfff
 #define MIDR_PN_SHIFT          0x4
-#define MIDR_PN_AEM            0xd0f
-#define MIDR_PN_A57            0xd07
-#define MIDR_PN_A53            0xd03
+#define MIDR_PN_AEM                    0xd0f
+#define MIDR_PN_A57                    0xd07
+#define MIDR_PN_A53                    0xd03
 
 /*******************************************************************************
  * MPIDR macros
 #define SCTLR_WXN_BIT          (1 << 19)
 #define SCTLR_EE_BIT           (1 << 25)
 
-/* CPUECTLR definitions */
-#define CPUECTLR_SMP_BIT       (1 << 6)
-
 /* CPACR_El1 definitions */
 #define CPACR_EL1_FPEN(x)      (x << 20)
 #define CPACR_EL1_FP_TRAP_EL0  0x1
index 6ba37c235ac6f4cd12ba65f201c1ca9d7074831d..09365fb9efa37eaf9caeb143efcc510308da7244 100644 (file)
@@ -265,10 +265,6 @@ DEFINE_SYSREG_RW_FUNCS(tpidr_el3)
 DEFINE_SYSREG_RW_FUNCS(vpidr_el2)
 DEFINE_SYSREG_RW_FUNCS(vmpidr_el2)
 
-/* Implementation specific registers */
-
-DEFINE_RENAME_SYSREG_RW_FUNCS(cpuectlr_el1, CPUECTLR_EL1)
-
 /* GICv3 System Registers */
 
 DEFINE_RENAME_SYSREG_RW_FUNCS(icc_sre_el1, ICC_SRE_EL1)
@@ -299,9 +295,6 @@ DEFINE_RENAME_SYSREG_RW_FUNCS(icc_pmr_el1, ICC_PMR_EL1)
 #define read_hcr()             read_hcr_el2()
 #define write_hcr(_v)          write_hcr_el2(_v)
 
-#define read_cpuectlr()                read_cpuectlr_el1()
-#define write_cpuectlr(_v)     write_cpuectlr_el1(_v)
-
 #define read_cpacr()           read_cpacr_el1()
 #define write_cpacr(_v)                write_cpacr_el1(_v)
 
diff --git a/include/lib/aarch64/cpu_macros.S b/include/lib/aarch64/cpu_macros.S
deleted file mode 100644 (file)
index 51c56e8..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arch.h>
-
-#define CPU_IMPL_PN_MASK       (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) | \
-                               (MIDR_PN_MASK << MIDR_PN_SHIFT)
-
-       /*
-        * Define the offsets to the fields in cpu_ops structure.
-        */
-       .struct 0
-CPU_MIDR: /* cpu_ops midr */
-       .space  8
-/* Reset fn is needed in BL at reset vector */
-#if IMAGE_BL1 || (IMAGE_BL31 && RESET_TO_BL31)
-CPU_RESET_FUNC: /* cpu_ops reset_func */
-       .space  8
-#endif
-CPU_OPS_SIZE = .
-
-       /*
-        * Convenience macro to declare cpu_ops structure.
-        * Make sure the structure fields are as per the offsets
-        * defined above.
-        */
-       .macro declare_cpu_ops _name:req, _midr:req, _noresetfunc = 0
-       .section cpu_ops, "a"; .align 3
-       .type cpu_ops_\_name, %object
-       .quad \_midr
-#if IMAGE_BL1 || (IMAGE_BL31 && RESET_TO_BL31)
-       .if \_noresetfunc
-       .quad 0
-       .else
-       .quad \_name\()_reset_func
-       .endif
-#endif
-       .endm
diff --git a/include/lib/cpus/aarch64/aem_generic.h b/include/lib/cpus/aarch64/aem_generic.h
new file mode 100644 (file)
index 0000000..2f701d1
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __AEM_GENERIC_H__
+#define __AEM_GENERIC_H__
+
+/* BASE AEM midr for revision 0 */
+#define BASE_AEM_MIDR 0x410FD0F0
+
+/* Foundation AEM midr for revision 0 */
+#define FOUNDATION_AEM_MIDR  0x410FD000
+
+
+#endif /* __AEM_GENERIC_H__ */
diff --git a/include/lib/cpus/aarch64/cortex_a53.h b/include/lib/cpus/aarch64/cortex_a53.h
new file mode 100644 (file)
index 0000000..dcae411
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __CORTEX_A53_H__
+#define __CORTEX_A53_H__
+
+/* Cortex-A53 midr for revision 0 */
+#define CORTEX_A53_MIDR 0x410FD030
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define CPUECTLR_SMP_BIT               (1 << 6)
+
+#endif /* __CORTEX_A53_H__ */
diff --git a/include/lib/cpus/aarch64/cortex_a57.h b/include/lib/cpus/aarch64/cortex_a57.h
new file mode 100644 (file)
index 0000000..cf887d0
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __CORTEX_A57_H__
+#define __CORTEX_A57_H__
+
+/* Cortex-A57 midr for revision 0 */
+#define CORTEX_A57_MIDR 0x410FD070
+
+/*******************************************************************************
+ * CPU Extended Control register specific definitions.
+ ******************************************************************************/
+#define CPUECTLR_SMP_BIT               (1 << 6)
+#define CPUECTLR_DIS_TWD_ACC_PFTCH_BIT (1 << 38)
+#define CPUECTLR_L2_IPFTCH_DIST_MASK   (0x3 << 35)
+#define CPUECTLR_L2_DPFTCH_DIST_MASK   (0x3 << 32)
+
+#endif /* __CORTEX_A57_H__ */
diff --git a/include/lib/cpus/aarch64/cpu_macros.S b/include/lib/cpus/aarch64/cpu_macros.S
new file mode 100644 (file)
index 0000000..abe8973
--- /dev/null
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+
+#define CPU_IMPL_PN_MASK       (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) | \
+                               (MIDR_PN_MASK << MIDR_PN_SHIFT)
+
+       /*
+        * Define the offsets to the fields in cpu_ops structure.
+        */
+       .struct 0
+CPU_MIDR: /* cpu_ops midr */
+       .space  8
+/* Reset fn is needed in BL at reset vector */
+#if IMAGE_BL1 || (IMAGE_BL31 && RESET_TO_BL31)
+CPU_RESET_FUNC: /* cpu_ops reset_func */
+       .space  8
+#endif
+#if IMAGE_BL31 /* The power down core and cluster is needed only in BL3-1 */
+CPU_PWR_DWN_CORE: /* cpu_ops core_pwr_dwn */
+       .space  8
+CPU_PWR_DWN_CLUSTER: /* cpu_ops cluster_pwr_dwn */
+       .space  8
+#endif
+CPU_OPS_SIZE = .
+
+       /*
+        * Convenience macro to declare cpu_ops structure.
+        * Make sure the structure fields are as per the offsets
+        * defined above.
+        */
+       .macro declare_cpu_ops _name:req, _midr:req, _noresetfunc = 0
+       .section cpu_ops, "a"; .align 3
+       .type cpu_ops_\_name, %object
+       .quad \_midr
+#if IMAGE_BL1 || (IMAGE_BL31 && RESET_TO_BL31)
+       .if \_noresetfunc
+       .quad 0
+       .else
+       .quad \_name\()_reset_func
+       .endif
+#endif
+#if IMAGE_BL31
+       .quad \_name\()_core_pwr_dwn
+       .quad \_name\()_cluster_pwr_dwn
+#endif
+       .endm
index 826d01b7e959106621ec446c18c64dee4f3701fd..20d3c0360ffe080b32ebcec43c6630a6d8ba5aee 100644 (file)
 
 
 enum plat_config_flags {
-       /* Whether CPUECTLR SMP bit should be enabled */
-       CONFIG_CPUECTLR_SMP_BIT         = 0x1,
        /* Whether Base FVP memory map is in use */
-       CONFIG_BASE_MMAP                = 0x2,
+       CONFIG_BASE_MMAP                = 0x1,
        /* Whether CCI should be enabled */
-       CONFIG_HAS_CCI                  = 0x4,
+       CONFIG_HAS_CCI                  = 0x2,
        /* Whether TZC should be configured */
-       CONFIG_HAS_TZC                  = 0x8
+       CONFIG_HAS_TZC                  = 0x4
 };
 
 typedef struct plat_config {
index a8dbf1a19b03d8d05637ca6457bd2713a8bdd239..19c9433673efa64302b73a5598296807269112ea 100644 (file)
  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  * POSSIBILITY OF SUCH DAMAGE.
  */
+#include <aem_generic.h>
 #include <arch.h>
 #include <asm_macros.S>
 #include <cpu_macros.S>
 
-#define BASE_AEM_MIDR 0x410FD0F0
+func aem_generic_core_pwr_dwn
+       /* ---------------------------------------------
+        * Disable the Data Cache.
+        * ---------------------------------------------
+        */
+       mrs     x1, sctlr_el3
+       bic     x1, x1, #SCTLR_C_BIT
+       msr     sctlr_el3, x1
+       isb
 
-#define FOUNDATION_AEM_MIDR 0x410FD000
+       mov     x0, #DCCISW
 
+       /* ---------------------------------------------
+        * Flush L1 cache to PoU.
+        * ---------------------------------------------
+        */
+       b       dcsw_op_louis
 
+
+func aem_generic_cluster_pwr_dwn
+       /* ---------------------------------------------
+        * Disable the Data Cache.
+        * ---------------------------------------------
+        */
+       mrs     x1, sctlr_el3
+       bic     x1, x1, #SCTLR_C_BIT
+       msr     sctlr_el3, x1
+       isb
+
+       /* ---------------------------------------------
+        * Flush L1 and L2 caches to PoC.
+        * ---------------------------------------------
+        */
+       mov     x0, #DCCISW
+       b       dcsw_op_all
+
+
+/* cpu_ops for Base AEM FVP */
 declare_cpu_ops aem_generic, BASE_AEM_MIDR, 1
 
+/* cpu_ops for Foundation FVP */
 declare_cpu_ops aem_generic, FOUNDATION_AEM_MIDR, 1
index 2d28dd979d319b3730cef5e641a5a105c336aaa5..08cc9381e35724b95316162a148556f459034829 100644 (file)
  */
 #include <arch.h>
 #include <asm_macros.S>
+#include <cortex_a53.h>
 #include <cpu_macros.S>
 #include <plat_macros.S>
 
-#define CORTEX_A53_MIDR 0x410FD030
+       /* ---------------------------------------------
+        * Disable L1 data cache and unified L2 cache
+        * ---------------------------------------------
+        */
+func cortex_a53_disable_dcache
+       mrs     x1, sctlr_el3
+       bic     x1, x1, #SCTLR_C_BIT
+       msr     sctlr_el3, x1
+       isb
+       ret
+
+       /* ---------------------------------------------
+        * Disable intra-cluster coherency
+        * ---------------------------------------------
+        */
+func cortex_a53_disable_smp
+       mrs     x0, CPUECTLR_EL1
+       bic     x0, x0, #CPUECTLR_SMP_BIT
+       msr     CPUECTLR_EL1, x0
+       isb
+       dsb     sy
+       ret
 
 func cortex_a53_reset_func
        /* ---------------------------------------------
@@ -45,4 +67,56 @@ func cortex_a53_reset_func
        isb
        ret
 
+func cortex_a53_core_pwr_dwn
+       mov     x18, x30
+
+       /* ---------------------------------------------
+        * Turn off caches.
+        * ---------------------------------------------
+        */
+       bl      cortex_a53_disable_dcache
+
+       /* ---------------------------------------------
+        * Flush L1 cache to PoU.
+        * ---------------------------------------------
+        */
+       mov     x0, #DCCISW
+       bl      dcsw_op_louis
+
+       /* ---------------------------------------------
+        * Come out of intra cluster coherency
+        * ---------------------------------------------
+        */
+       mov     x30, x18
+       b       cortex_a53_disable_smp
+
+func cortex_a53_cluster_pwr_dwn
+       mov     x18, x30
+
+       /* ---------------------------------------------
+        * Turn off caches.
+        * ---------------------------------------------
+        */
+       bl      cortex_a53_disable_dcache
+
+       /* ---------------------------------------------
+        * Disable the optional ACP.
+        * ---------------------------------------------
+        */
+       bl      plat_disable_acp
+
+       /* ---------------------------------------------
+        * Flush L1 and L2 caches to PoC.
+        * ---------------------------------------------
+        */
+       mov     x0, #DCCISW
+       bl      dcsw_op_all
+
+       /* ---------------------------------------------
+        * Come out of intra cluster coherency
+        * ---------------------------------------------
+        */
+       mov     x30, x18
+       b       cortex_a53_disable_smp
+
 declare_cpu_ops cortex_a53, CORTEX_A53_MIDR
index df3a8987ec10e1c9bbc6b6e4e8785050939a3797..8de7fe9219904cd0658d7b0069225703745b8a06 100644 (file)
  */
 #include <arch.h>
 #include <asm_macros.S>
+#include <cortex_a57.h>
 #include <cpu_macros.S>
 #include <plat_macros.S>
 
-#define CORTEX_A57_MIDR 0x410FD070
+       /* ---------------------------------------------
+        * Disable L1 data cache and unified L2 cache
+        * ---------------------------------------------
+        */
+func cortex_a57_disable_dcache
+       mrs     x1, sctlr_el3
+       bic     x1, x1, #SCTLR_C_BIT
+       msr     sctlr_el3, x1
+       isb
+       ret
+
+       /* ---------------------------------------------
+        * Disable all types of L2 prefetches.
+        * ---------------------------------------------
+        */
+func cortex_a57_disable_l2_prefetch
+       mrs     x0, CPUECTLR_EL1
+       orr     x0, x0, #CPUECTLR_DIS_TWD_ACC_PFTCH_BIT
+       mov     x1, #CPUECTLR_L2_IPFTCH_DIST_MASK
+       orr     x1, x1, #CPUECTLR_L2_DPFTCH_DIST_MASK
+       bic     x0, x0, x1
+       msr     CPUECTLR_EL1, x0
+       isb
+       dsb     sy
+       ret
+
+       /* ---------------------------------------------
+        * Disable intra-cluster coherency
+        * ---------------------------------------------
+        */
+func cortex_a57_disable_smp
+       mrs     x0, CPUECTLR_EL1
+       bic     x0, x0, #CPUECTLR_SMP_BIT
+       msr     CPUECTLR_EL1, x0
+       ret
+
+       /* ---------------------------------------------
+        * Disable debug interfaces
+        * ---------------------------------------------
+        */
+func cortex_a57_disable_ext_debug
+       mov     x0, #1
+       msr     osdlr_el1, x0
+       isb
+       dsb     sy
+       ret
 
 func cortex_a57_reset_func
        /* ---------------------------------------------
@@ -45,4 +91,80 @@ func cortex_a57_reset_func
        isb
        ret
 
+func cortex_a57_core_pwr_dwn
+       mov     x18, x30
+
+       /* ---------------------------------------------
+        * Turn off caches.
+        * ---------------------------------------------
+        */
+       bl      cortex_a57_disable_dcache
+
+       /* ---------------------------------------------
+        * Disable the L2 prefetches.
+        * ---------------------------------------------
+        */
+       bl      cortex_a57_disable_l2_prefetch
+
+       /* ---------------------------------------------
+        * Flush L1 cache to PoU.
+        * ---------------------------------------------
+        */
+       mov     x0, #DCCISW
+       bl      dcsw_op_louis
+
+       /* ---------------------------------------------
+        * Come out of intra cluster coherency
+        * ---------------------------------------------
+        */
+       bl      cortex_a57_disable_smp
+
+       /* ---------------------------------------------
+        * Force the debug interfaces to be quiescent
+        * ---------------------------------------------
+        */
+       mov     x30, x18
+       b       cortex_a57_disable_ext_debug
+
+func cortex_a57_cluster_pwr_dwn
+       mov     x18, x30
+
+       /* ---------------------------------------------
+        * Turn off caches.
+        * ---------------------------------------------
+        */
+       bl      cortex_a57_disable_dcache
+
+       /* ---------------------------------------------
+        * Disable the L2 prefetches.
+        * ---------------------------------------------
+        */
+       bl      cortex_a57_disable_l2_prefetch
+
+       /* ---------------------------------------------
+        * Disable the optional ACP.
+        * ---------------------------------------------
+        */
+       bl      plat_disable_acp
+
+       /* ---------------------------------------------
+        * Flush L1 and L2 caches to PoC.
+        * ---------------------------------------------
+        */
+       mov     x0, #DCCISW
+       bl      dcsw_op_all
+
+       /* ---------------------------------------------
+        * Come out of intra cluster coherency
+        * ---------------------------------------------
+        */
+       bl      cortex_a57_disable_smp
+
+       /* ---------------------------------------------
+        * Force the debug interfaces to be quiescent
+        * ---------------------------------------------
+        */
+       mov     x30, x18
+       b       cortex_a57_disable_ext_debug
+
 declare_cpu_ops cortex_a57, CORTEX_A57_MIDR
index 6db04ed92d2b56414d36382d13760e7edc561118..624a4597d289c2bc93fa518c948de4770bc21cd9 100644 (file)
@@ -65,6 +65,66 @@ func reset_handler
 
 #endif /* IMAGE_BL1 || (IMAGE_BL31 && RESET_TO_BL31) */
 
+#if IMAGE_BL31 /* The power down core and cluster is needed only in  BL31 */
+       /*
+        * The prepare core power down function for all platforms.  After
+        * the cpu_ops pointer is retrieved from cpu_data, the corresponding
+        * pwr_dwn_core in the cpu_ops is invoked.
+        */
+       .globl  prepare_core_pwr_dwn
+func prepare_core_pwr_dwn
+       mrs     x1, tpidr_el3
+       ldr     x0, [x1, #CPU_DATA_CPU_OPS_PTR]
+#if ASM_ASSERTION
+       cmp     x0, #0
+       ASM_ASSERT(ne)
+#endif
+
+       /* Get the cpu_ops core_pwr_dwn handler */
+       ldr     x1, [x0, #CPU_PWR_DWN_CORE]
+       br      x1
+
+       /*
+        * The prepare cluster power down function for all platforms.  After
+        * the cpu_ops pointer is retrieved from cpu_data, the corresponding
+        * pwr_dwn_cluster in the cpu_ops is invoked.
+        */
+       .globl  prepare_cluster_pwr_dwn
+func prepare_cluster_pwr_dwn
+       mrs     x1, tpidr_el3
+       ldr     x0, [x1, #CPU_DATA_CPU_OPS_PTR]
+#if ASM_ASSERTION
+       cmp     x0, #0
+       ASM_ASSERT(ne)
+#endif
+
+       /* Get the cpu_ops cluster_pwr_dwn handler */
+       ldr     x1, [x0, #CPU_PWR_DWN_CLUSTER]
+       br      x1
+
+
+       /*
+        * Initializes the cpu_ops_ptr if not already initialized
+        * in cpu_data. This can be called without a runtime stack.
+        * clobbers: x0 - x6, x10
+        */
+       .globl  init_cpu_ops
+func init_cpu_ops
+       mrs     x6, tpidr_el3
+       ldr     x0, [x6, #CPU_DATA_CPU_OPS_PTR]
+       cbnz    x0, 1f
+       mov     x10, x30
+       bl      get_cpu_ops_ptr
+#if ASM_ASSERTION
+       cmp     x0, #0
+       ASM_ASSERT(ne)
+#endif
+       str     x0, [x6, #CPU_DATA_CPU_OPS_PTR]
+       mov x30, x10
+1:
+       ret
+#endif /* IMAGE_BL31 */
+
        /*
         * The below function returns the cpu_ops structure matching the
         * midr of the core. It reads the MIDR_EL1 and finds the matching
index 1229ead34a1f6a58e99ebc085e98f807f33e0366..c236fd7b485286fca113041baadaebb57fd392f4 100644 (file)
@@ -39,6 +39,7 @@
        .weak   plat_crash_console_init
        .weak   plat_crash_console_putc
        .weak   plat_reset_handler
+       .weak   plat_disable_acp
 
        /* -----------------------------------------------------
         *  int platform_get_core_pos(int mpidr);
@@ -93,3 +94,12 @@ func plat_crash_console_putc
         */
 func plat_reset_handler
        ret
+
+       /* -----------------------------------------------------
+        * Placeholder function which should be redefined by
+        * each platform. This function is allowed to use
+        * registers x0 - x17.
+        * -----------------------------------------------------
+        */
+func plat_disable_acp
+       ret
index 89fd8b3ec5c4e4dc6e8f003adb74cf306c346a3d..a25c4f0c8d7a155ac5984402eef69da57543eb28 100644 (file)
@@ -134,7 +134,7 @@ DEFINE_CONFIGURE_MMU_EL(3)
  ******************************************************************************/
 int fvp_config_setup(void)
 {
-       unsigned int rev, hbi, bld, arch, sys_id, midr_pn;
+       unsigned int rev, hbi, bld, arch, sys_id;
 
        sys_id = mmio_read_32(VE_SYSREGS_BASE + V2M_SYS_ID);
        rev = (sys_id >> SYS_ID_REV_SHIFT) & SYS_ID_REV_MASK;
@@ -193,11 +193,6 @@ int fvp_config_setup(void)
                }
                break;
        case HBI_FVP_BASE:
-               midr_pn = (read_midr() >> MIDR_PN_SHIFT) & MIDR_PN_MASK;
-               plat_config.flags =
-                       ((midr_pn == MIDR_PN_A57) || (midr_pn == MIDR_PN_A53))
-                       ? CONFIG_CPUECTLR_SMP_BIT : 0;
-
                plat_config.max_aff0 = 4;
                plat_config.max_aff1 = 2;
                plat_config.flags |= CONFIG_BASE_MMAP | CONFIG_HAS_CCI |
index 568b51d9f9b88144d4659e8f7c73001c6bc9e5db..2038e87aed6bf590c605ceae0f6cf9e28abdd3f7 100644 (file)
@@ -66,18 +66,6 @@ static void fvp_program_mailbox(uint64_t mpidr, uint64_t address)
  ******************************************************************************/
 static void fvp_cpu_pwrdwn_common()
 {
-       uint32_t ectlr;
-
-       /*
-        * Take this cpu out of intra-cluster coherency if the FVP flavour
-        * supports the SMP bit.
-        */
-       if (get_plat_config()->flags & CONFIG_CPUECTLR_SMP_BIT) {
-               ectlr = read_cpuectlr();
-               ectlr &= ~CPUECTLR_SMP_BIT;
-               write_cpuectlr(ectlr);
-       }
-
        /* Prevent interrupts from spuriously waking up this cpu */
        arm_gic_cpuif_deactivate();
 
@@ -273,7 +261,6 @@ int fvp_affinst_on_finish(unsigned long mpidr,
                          unsigned int state)
 {
        int rc = PSCI_E_SUCCESS;
-       unsigned int ectlr;
 
        /* Determine if any platform actions need to be executed. */
        if (fvp_do_plat_actions(afflvl, state) == -EAGAIN)
@@ -296,16 +283,6 @@ int fvp_affinst_on_finish(unsigned long mpidr,
                fvp_cci_enable();
        }
 
-       /*
-        * Turn on intra-cluster coherency if the FVP flavour supports
-        * it.
-        */
-       if (get_plat_config()->flags & CONFIG_CPUECTLR_SMP_BIT) {
-               ectlr = read_cpuectlr();
-               ectlr |= CPUECTLR_SMP_BIT;
-               write_cpuectlr(ectlr);
-       }
-
        /*
         * Clear PWKUPR.WEN bit to ensure interrupts do not interfere
         * with a cpu power down unless the bit is set again
index cc57aa1581fcb02c944cc8835d0b37b31fcec1ba..8145012878eb8cd3d92d2c7e3dada112a2212c41 100644 (file)
@@ -77,6 +77,12 @@ psci_aff_common_finish_entry:
         */
        bl      init_cpu_data_ptr
 
+       /* ---------------------------------------------
+        * Initialize the cpu_ops pointer.
+        * ---------------------------------------------
+        */
+       bl      init_cpu_ops
+
        /* ---------------------------------------------
         * Set the exception vectors
         * ---------------------------------------------
index 91c31725ca9dce2e6071d4f7ca07a8314bc92589..9a51d5c29e8ad6a8f6a94491e9fd4c429b408f18 100644 (file)
@@ -65,15 +65,6 @@ func psci_do_pwrdown_cache_maintenance
        cmp     x0, x19
        b.ne    1f
 
-       /* ---------------------------------------------
-        * Disable the Data Cache.
-        * ---------------------------------------------
-        */
-       mrs     x1, sctlr_el3
-       bic     x1, x1, #SCTLR_C_BIT
-       msr     sctlr_el3, x1
-       isb
-
        /* ---------------------------------------------
         * Determine to how many levels of cache will be
         * subject to cache maintenance. Affinity level
@@ -87,29 +78,12 @@ func psci_do_pwrdown_cache_maintenance
         * ---------------------------------------------
         */
        cmp     x0, #MPIDR_AFFLVL0
-       mov     x0, #DCCISW
-       b.ne    flush_caches_to_poc
-
-       /* ---------------------------------------------
-        * Flush L1 cache to PoU.
-        * ---------------------------------------------
-        */
-       bl      dcsw_op_louis
+       b.eq    do_core_pwr_dwn
+       bl      prepare_cluster_pwr_dwn
        b       do_stack_maintenance
 
-       /* ---------------------------------------------
-        * Flush L1 and L2 caches to PoC.
-        * ---------------------------------------------
-        */
-flush_caches_to_poc:
-       bl      dcsw_op_all
-
-       /* ---------------------------------------------
-        * TODO: Intra-cluster coherency should be
-        * turned off here once cpu-specific
-        * abstractions are in place.
-        * ---------------------------------------------
-        */
+do_core_pwr_dwn:
+       bl      prepare_core_pwr_dwn
 
        /* ---------------------------------------------
         * Do stack maintenance by flushing the used