return NULL;
}
+#pragma weak params_early_setup
+void params_early_setup(void *plat_param_from_bl2)
+{
+}
+
/*******************************************************************************
* Perform any BL3-1 early platform setup. Here is an opportunity to copy
* parameters passed by the calling EL (S-EL1 in BL2 & S-EL3 in BL1) before they
--- /dev/null
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch_helpers.h>
+#include <console.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <mmio.h>
+#include <platform_def.h>
+#include <plat_private.h>
+#include <soc.h>
+#include <string.h>
+#include "ddr_parameter.h"
+
+/*
+ * The miniloader delivers the parameters about ddr usage info from address
+ * 0x02000000 and the data format is defined as below figure. It tells ATF the
+ * areas of ddr that are used by platform, we treat them as non-secure regions
+ * by default. Then we should parse the other part regions and configurate them
+ * as secure regions to avoid illegal access.
+ *
+ * [ddr usage info data format]
+ * 0x02000000
+ * -----------------------------------------------------------------------------
+ * | <name> | <size> | <description> |
+ * -----------------------------------------------------------------------------
+ * | count | 4byte | the array numbers of the |
+ * | | | 'addr_array' and 'size_array' |
+ * -----------------------------------------------------------------------------
+ * | reserved | 4byte | just for 'addr_array' 8byte aligned |
+ * -----------------------------------------------------------------------------
+ * | addr_array[count] | per 8byte | memory region base address |
+ * -----------------------------------------------------------------------------
+ * | size_array[count] | per 8byte | memory region size (byte) |
+ * -----------------------------------------------------------------------------
+ */
+
+/*
+ * function: read parameters info(ns-regions) and try to parse s-regions info
+ *
+ * @addr: head address to the ddr usage struct from miniloader
+ * @max_mb: the max ddr capacity(MB) that the platform support
+ */
+struct param_ddr_usage ddr_region_usage_parse(uint64_t addr, uint64_t max_mb)
+{
+ uint64_t base, top;
+ uint32_t i, addr_offset, size_offset;
+ struct param_ddr_usage p;
+
+ memset(&p, 0, sizeof(p));
+
+ /* read how many blocks of ns-regions, read from offset: 0x0 */
+ p.ns_nr = mmio_read_32(addr + REGION_NR_OFFSET);
+ if ((p.ns_nr > DDR_REGION_NR_MAX) || (p.ns_nr == 0)) {
+ ERROR("over or zero region, nr=%d, max=%d\n",
+ p.ns_nr, DDR_REGION_NR_MAX);
+ return p;
+ }
+
+ /* whole ddr regions boundary, it will be used when parse s-regions */
+ p.boundary = max_mb;
+
+ /* calculate ns-region base addr and size offset */
+ addr_offset = REGION_ADDR_OFFSET;
+ size_offset = REGION_ADDR_OFFSET + p.ns_nr * REGION_DATA_PER_BYTES;
+
+ /* read all ns-regions base and top address */
+ for (i = 0; i < p.ns_nr; i++) {
+ base = mmio_read_64(addr + addr_offset);
+ top = base + mmio_read_64(addr + size_offset);
+ /*
+ * translate byte to MB and store info,
+ * Miniloader will promise every ns-region is MB aligned.
+ */
+ p.ns_base[i] = RG_SIZE_MB(base);
+ p.ns_top[i] = RG_SIZE_MB(top);
+
+ addr_offset += REGION_DATA_PER_BYTES;
+ size_offset += REGION_DATA_PER_BYTES;
+ }
+
+ /*
+ * a s-region's base starts from previous ns-region's top, and a
+ * s-region's top ends with next ns-region's base. maybe like this:
+ *
+ * case1: ns-regison start from 0MB
+ * -----------------------------------------------
+ * | ns0 | S0 | ns1 | S1 | ns2 |
+ * 0----------------------------------------------- max_mb
+ *
+ *
+ * case2: ns-regison not start from 0MB
+ * -----------------------------------------------
+ * | S0 | ns0 | ns1 | ns2 | S1 |
+ * 0----------------------------------------------- max_mb
+ */
+
+ /* like above case2 figure, ns-region is not start from 0MB */
+ if (p.ns_base[0] != 0) {
+ p.s_base[p.s_nr] = 0;
+ p.s_top[p.s_nr] = p.ns_base[0];
+ p.s_nr++;
+ }
+
+ /*
+ * notice: if ns-regions not start from 0MB, p.s_nr = 1 now, otherwise 0
+ */
+ for (i = 0; i < p.ns_nr; i++) {
+ /*
+ * if current ns-regions top covers boundary,
+ * that means s-regions are all parsed yet, so finsh.
+ */
+ if (p.ns_top[i] == p.boundary)
+ goto out;
+
+ /* s-region's base starts from previous ns-region's top */
+ p.s_base[p.s_nr] = p.ns_top[i];
+
+ /* s-region's top ends with next ns-region's base */
+ if (i + 1 < p.ns_nr)
+ p.s_top[p.s_nr] = p.ns_base[i + 1];
+ else
+ p.s_top[p.s_nr] = p.boundary;
+ p.s_nr++;
+ }
+out:
+ return p;
+}
--- /dev/null
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __PARAMETER_H__
+#define __PARAMETER_H__
+
+#include <arch_helpers.h>
+#include <console.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <mmio.h>
+#include <platform_def.h>
+#include <plat_private.h>
+#include <soc.h>
+#include <string.h>
+
+#define DDR_REGION_NR_MAX 10
+#define REGION_NR_OFFSET 0
+#define REGION_ADDR_OFFSET 8
+#define REGION_DATA_PER_BYTES 8
+#define RG_SIZE_MB(byte) ((byte) >> 20)
+
+/* unit: MB */
+struct param_ddr_usage {
+ uint64_t boundary;
+
+ uint32_t ns_nr;
+ uint64_t ns_base[DDR_REGION_NR_MAX];
+ uint64_t ns_top[DDR_REGION_NR_MAX];
+
+ uint32_t s_nr;
+ uint64_t s_base[DDR_REGION_NR_MAX];
+ uint64_t s_top[DDR_REGION_NR_MAX];
+};
+
+struct param_ddr_usage ddr_region_usage_parse(uint64_t addr, uint64_t max_mb);
+
+#endif /* __PARAMETER_H__ */
#ifndef __PMU_COM_H__
#define __PMU_COM_H__
+#ifndef CHECK_CPU_WFIE_BASE
+#define CHECK_CPU_WFIE_BASE (PMU_BASE + PMU_CORE_PWR_ST)
+#endif
/*
* Use this macro to instantiate lock before it is used in below
* rockchip_pd_lock_xxx() macros
else
wfie_msk <<= (clstl_cpu_wfe + cpu_id);
- while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) & wfie_msk) &&
+ while (!(mmio_read_32(CHECK_CPU_WFIE_BASE) & wfie_msk) &&
(loop < CHK_CPU_LOOP)) {
udelay(1);
loop++;
}
- if ((mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) & wfie_msk) == 0) {
+ if ((mmio_read_32(CHECK_CPU_WFIE_BASE) & wfie_msk) == 0) {
WARN("%s: %d, %d, %d, error!\n", __func__,
cluster_id, cpu_id, wfie_msk);
return -EINVAL;
.pwr_domain_suspend = rockchip_pwr_domain_suspend,
.pwr_domain_on_finish = rockchip_pwr_domain_on_finish,
.pwr_domain_suspend_finish = rockchip_pwr_domain_suspend_finish,
+ .pwr_domain_pwr_down_wfi = rockchip_pd_pwr_down_wfi,
.system_reset = rockchip_system_reset,
.system_off = rockchip_system_poweroff,
.validate_power_state = rockchip_validate_power_state,
--- /dev/null
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <platform_def.h>
+
+.globl clst_warmboot_data
+
+.macro func_rockchip_clst_warmboot
+.endm
+
+.macro rockchip_clst_warmboot_data
+clst_warmboot_data:
+ .rept PLATFORM_CLUSTER_COUNT
+ .word 0
+ .endr
+.endm
--- /dev/null
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch_helpers.h>
+#include <debug.h>
+#include <assert.h>
+#include <bakery_lock.h>
+#include <bl31.h>
+#include <console.h>
+#include <delay_timer.h>
+#include <errno.h>
+#include <mmio.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <plat_private.h>
+#include <pmu_sram.h>
+#include <pmu.h>
+#include <rk3328_def.h>
+#include <pmu_com.h>
+
+DEFINE_BAKERY_LOCK(rockchip_pd_lock);
+
+static struct psram_data_t *psram_sleep_cfg =
+ (struct psram_data_t *)PSRAM_DT_BASE;
+
+static struct rk3328_sleep_ddr_data ddr_data;
+static __sramdata struct rk3328_sleep_sram_data sram_data;
+
+static uint32_t cpu_warm_boot_addr;
+
+#pragma weak rk3328_pmic_suspend
+#pragma weak rk3328_pmic_resume
+
+void plat_rockchip_pmusram_prepare(void)
+{
+ uint32_t *sram_dst, *sram_src;
+ size_t sram_size = 2;
+ /*
+ * pmu sram code and data prepare
+ */
+ sram_dst = (uint32_t *)PMUSRAM_BASE;
+ sram_src = (uint32_t *)&pmu_cpuson_entrypoint_start;
+ sram_size = (uint32_t *)&pmu_cpuson_entrypoint_end -
+ (uint32_t *)sram_src;
+ u32_align_cpy(sram_dst, sram_src, sram_size);
+
+ psram_sleep_cfg->sp = PSRAM_DT_BASE;
+}
+
+static inline uint32_t get_cpus_pwr_domain_cfg_info(uint32_t cpu_id)
+{
+ uint32_t pd_reg, apm_reg;
+
+ pd_reg = mmio_read_32(PMU_BASE + PMU_PWRDN_CON) & BIT(cpu_id);
+ apm_reg = mmio_read_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id)) &
+ BIT(core_pm_en);
+
+ if (pd_reg && !apm_reg)
+ return core_pwr_pd;
+ else if (!pd_reg && apm_reg)
+ return core_pwr_wfi;
+
+ ERROR("%s: 0x%x, 0x%x\n", __func__, pd_reg, apm_reg);
+ while (1)
+ ;
+}
+
+static int cpus_power_domain_on(uint32_t cpu_id)
+{
+ uint32_t cpu_pd, cfg_info;
+
+ cpu_pd = PD_CPU0 + cpu_id;
+ cfg_info = get_cpus_pwr_domain_cfg_info(cpu_id);
+
+ if (cfg_info == core_pwr_pd) {
+ /* disable apm cfg */
+ mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
+ CORES_PM_DISABLE);
+
+ /* if the cores have be on, power off it firstly */
+ if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) {
+ mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
+ CORES_PM_DISABLE);
+ pmu_power_domain_ctr(cpu_pd, pmu_pd_off);
+ }
+ pmu_power_domain_ctr(cpu_pd, pmu_pd_on);
+ } else {
+ if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) {
+ WARN("%s: cpu%d is not in off,!\n", __func__, cpu_id);
+ return -EINVAL;
+ }
+
+ mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
+ BIT(core_pm_sft_wakeup_en));
+ }
+
+ return 0;
+}
+
+static int cpus_power_domain_off(uint32_t cpu_id, uint32_t pd_cfg)
+{
+ uint32_t cpu_pd, core_pm_value;
+
+ cpu_pd = PD_CPU0 + cpu_id;
+ if (pmu_power_domain_st(cpu_pd) == pmu_pd_off)
+ return 0;
+
+ if (pd_cfg == core_pwr_pd) {
+ if (check_cpu_wfie(cpu_id, CKECK_WFEI_MSK))
+ return -EINVAL;
+ /* disable apm cfg */
+ mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
+ CORES_PM_DISABLE);
+ pmu_power_domain_ctr(cpu_pd, pmu_pd_off);
+ } else {
+ core_pm_value = BIT(core_pm_en) | BIT(core_pm_dis_int);
+ if (pd_cfg == core_pwr_wfi_int)
+ core_pm_value |= BIT(core_pm_int_wakeup_en);
+
+ mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id),
+ core_pm_value);
+ }
+
+ return 0;
+}
+
+static void nonboot_cpus_off(void)
+{
+ uint32_t boot_cpu, cpu;
+
+ /* turn off noboot cpus */
+ boot_cpu = plat_my_core_pos();
+ for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) {
+ if (cpu == boot_cpu)
+ continue;
+ cpus_power_domain_off(cpu, core_pwr_pd);
+ }
+}
+
+int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint)
+{
+ uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr);
+
+ assert(cpuson_flags[cpu_id] == 0);
+ cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG;
+ cpuson_entry_point[cpu_id] = entrypoint;
+ dsb();
+
+ cpus_power_domain_on(cpu_id);
+
+ return 0;
+}
+
+int rockchip_soc_cores_pwr_dm_off(void)
+{
+ uint32_t cpu_id = plat_my_core_pos();
+
+ cpus_power_domain_off(cpu_id, core_pwr_wfi);
+
+ return 0;
+}
+
+int rockchip_soc_cores_pwr_dm_suspend(void)
+{
+ uint32_t cpu_id = plat_my_core_pos();
+
+ assert(cpuson_flags[cpu_id] == 0);
+ cpuson_flags[cpu_id] = PMU_CPU_AUTO_PWRDN;
+ cpuson_entry_point[cpu_id] = (uintptr_t)plat_get_sec_entrypoint();
+ dsb();
+
+ cpus_power_domain_off(cpu_id, core_pwr_wfi_int);
+
+ return 0;
+}
+
+int rockchip_soc_cores_pwr_dm_on_finish(void)
+{
+ uint32_t cpu_id = plat_my_core_pos();
+
+ mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id), CORES_PM_DISABLE);
+
+ return 0;
+}
+
+int rockchip_soc_cores_pwr_dm_resume(void)
+{
+ uint32_t cpu_id = plat_my_core_pos();
+
+ mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(cpu_id), CORES_PM_DISABLE);
+
+ return 0;
+}
+
+void __dead2 rockchip_soc_soft_reset(void)
+{
+ mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(CPLL_ID));
+ mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(GPLL_ID));
+ mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(NPLL_ID));
+ mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(APLL_ID));
+ dsb();
+
+ mmio_write_32(CRU_BASE + CRU_GLB_SRST_FST, CRU_GLB_SRST_FST_VALUE);
+ dsb();
+ /*
+ * Maybe the HW needs some times to reset the system,
+ * so we do not hope the core to excute valid codes.
+ */
+ while (1)
+ ;
+}
+
+/*
+ * For PMIC RK805, its sleep pin is connect with gpio2_d2 from rk3328.
+ * If the PMIC is configed for responding the sleep pin to power off it,
+ * once the pin is output high, it will get the pmic power off.
+ */
+void __dead2 rockchip_soc_system_off(void)
+{
+ uint32_t val;
+
+ /* gpio config */
+ val = mmio_read_32(GRF_BASE + GRF_GPIO2D_IOMUX);
+ val &= ~GPIO2_D2_GPIO_MODE;
+ mmio_write_32(GRF_BASE + GRF_GPIO2D_IOMUX, val);
+
+ /* config output */
+ val = mmio_read_32(GPIO2_BASE + SWPORTA_DDR);
+ val |= GPIO2_D2;
+ mmio_write_32(GPIO2_BASE + SWPORTA_DDR, val);
+
+ /* config output high level */
+ val = mmio_read_32(GPIO2_BASE);
+ val |= GPIO2_D2;
+ mmio_write_32(GPIO2_BASE, val);
+ dsb();
+
+ while (1)
+ ;
+}
+
+static uint32_t clk_ungt_msk[CRU_CLKGATE_NUMS] = {
+ 0x187f, 0x0000, 0x010c, 0x0000, 0x0200,
+ 0x0010, 0x0000, 0x0017, 0x001f, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0003, 0x0000,
+ 0xf001, 0x27c0, 0x04D9, 0x03ff, 0x0000,
+ 0x0000, 0x0000, 0x0010, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0003, 0x0008
+};
+
+static void clks_gating_suspend(uint32_t *ungt_msk)
+{
+ int i;
+
+ for (i = 0; i < CRU_CLKGATE_NUMS; i++) {
+ ddr_data.clk_ungt_save[i] =
+ mmio_read_32(CRU_BASE + CRU_CLKGATE_CON(i));
+ mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(i),
+ ((~ungt_msk[i]) << 16) | 0xffff);
+ }
+}
+
+static void clks_gating_resume(void)
+{
+ int i;
+
+ for (i = 0; i < CRU_CLKGATE_NUMS; i++)
+ mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(i),
+ ddr_data.clk_ungt_save[i] | 0xffff0000);
+}
+
+static inline void pm_pll_wait_lock(uint32_t pll_id)
+{
+ uint32_t delay = PLL_LOCKED_TIMEOUT;
+
+ while (delay > 0) {
+ if (mmio_read_32(CRU_BASE + PLL_CONS(pll_id, 1)) &
+ PLL_IS_LOCKED)
+ break;
+ delay--;
+ }
+ if (delay == 0)
+ ERROR("lock-pll: %d\n", pll_id);
+}
+
+static inline void pll_pwr_dwn(uint32_t pll_id, uint32_t pd)
+{
+ mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1),
+ BITS_WITH_WMASK(1, 1, 15));
+ if (pd)
+ mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1),
+ BITS_WITH_WMASK(1, 1, 14));
+ else
+ mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1),
+ BITS_WITH_WMASK(0, 1, 14));
+}
+
+static __sramfunc void dpll_suspend(void)
+{
+ int i;
+
+ /* slow mode */
+ mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(DPLL_ID));
+
+ /* save pll con */
+ for (i = 0; i < CRU_PLL_CON_NUMS; i++)
+ sram_data.dpll_con_save[i] =
+ mmio_read_32(CRU_BASE + PLL_CONS(DPLL_ID, i));
+ mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
+ BITS_WITH_WMASK(1, 1, 15));
+ mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
+ BITS_WITH_WMASK(1, 1, 14));
+}
+
+static __sramfunc void dpll_resume(void)
+{
+ uint32_t delay = PLL_LOCKED_TIMEOUT;
+
+ mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
+ BITS_WITH_WMASK(1, 1, 15));
+ mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
+ BITS_WITH_WMASK(0, 1, 14));
+ mmio_write_32(CRU_BASE + PLL_CONS(DPLL_ID, 1),
+ sram_data.dpll_con_save[1] | 0xc0000000);
+
+ dsb();
+
+ while (delay > 0) {
+ if (mmio_read_32(CRU_BASE + PLL_CONS(DPLL_ID, 1)) &
+ PLL_IS_LOCKED)
+ break;
+ delay--;
+ }
+ if (delay == 0)
+ while (1)
+ ;
+
+ mmio_write_32(CRU_BASE + CRU_CRU_MODE,
+ PLL_NORM_MODE(DPLL_ID));
+}
+
+static inline void pll_suspend(uint32_t pll_id)
+{
+ int i;
+
+ /* slow mode */
+ mmio_write_32(CRU_BASE + CRU_CRU_MODE, PLL_SLOW_MODE(pll_id));
+
+ /* save pll con */
+ for (i = 0; i < CRU_PLL_CON_NUMS; i++)
+ ddr_data.cru_plls_con_save[pll_id][i] =
+ mmio_read_32(CRU_BASE + PLL_CONS(pll_id, i));
+
+ /* powerdown pll */
+ pll_pwr_dwn(pll_id, pmu_pd_off);
+}
+
+static inline void pll_resume(uint32_t pll_id)
+{
+ mmio_write_32(CRU_BASE + PLL_CONS(pll_id, 1),
+ ddr_data.cru_plls_con_save[pll_id][1] | 0xc0000000);
+
+ pm_pll_wait_lock(pll_id);
+
+ if (PLL_IS_NORM_MODE(ddr_data.cru_mode_save, pll_id))
+ mmio_write_32(CRU_BASE + CRU_CRU_MODE,
+ PLL_NORM_MODE(pll_id));
+}
+
+static void pm_plls_suspend(void)
+{
+ ddr_data.cru_mode_save = mmio_read_32(CRU_BASE + CRU_CRU_MODE);
+ ddr_data.clk_sel0 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(0));
+ ddr_data.clk_sel1 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(1));
+ ddr_data.clk_sel18 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(18));
+ ddr_data.clk_sel20 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(20));
+ ddr_data.clk_sel24 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(24));
+ ddr_data.clk_sel38 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON(38));
+ pll_suspend(NPLL_ID);
+ pll_suspend(CPLL_ID);
+ pll_suspend(GPLL_ID);
+ pll_suspend(APLL_ID);
+
+ /* core */
+ mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(0),
+ BITS_WITH_WMASK(0, 0x1f, 0));
+
+ /* pclk_dbg */
+ mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(1),
+ BITS_WITH_WMASK(0, 0xf, 0));
+
+ /* crypto */
+ mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(20),
+ BITS_WITH_WMASK(0, 0x1f, 0));
+
+ /* pwm0 */
+ mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(24),
+ BITS_WITH_WMASK(0, 0x7f, 8));
+
+ /* uart2 from 24M */
+ mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(18),
+ BITS_WITH_WMASK(2, 0x3, 8));
+
+ /* clk_rtc32k */
+ mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(38),
+ BITS_WITH_WMASK(767, 0x3fff, 0) |
+ BITS_WITH_WMASK(2, 0x3, 14));
+}
+
+static void pm_plls_resume(void)
+{
+ /* clk_rtc32k */
+ mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(38),
+ ddr_data.clk_sel38 |
+ BITS_WMSK(0x3fff, 0) |
+ BITS_WMSK(0x3, 14));
+
+ /* uart2 */
+ mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(18),
+ ddr_data.clk_sel18 | BITS_WMSK(0x3, 8));
+
+ /* pwm0 */
+ mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(24),
+ ddr_data.clk_sel24 | BITS_WMSK(0x7f, 8));
+
+ /* crypto */
+ mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(20),
+ ddr_data.clk_sel20 | BITS_WMSK(0x1f, 0));
+
+ /* pclk_dbg */
+ mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(1),
+ ddr_data.clk_sel1 | BITS_WMSK(0xf, 0));
+
+ /* core */
+ mmio_write_32(CRU_BASE + CRU_CLKSEL_CON(0),
+ ddr_data.clk_sel0 | BITS_WMSK(0x1f, 0));
+
+ pll_pwr_dwn(APLL_ID, pmu_pd_on);
+ pll_pwr_dwn(GPLL_ID, pmu_pd_on);
+ pll_pwr_dwn(CPLL_ID, pmu_pd_on);
+ pll_pwr_dwn(NPLL_ID, pmu_pd_on);
+
+ pll_resume(APLL_ID);
+ pll_resume(GPLL_ID);
+ pll_resume(CPLL_ID);
+ pll_resume(NPLL_ID);
+}
+
+#define ARCH_TIMER_TICKS_PER_US (SYS_COUNTER_FREQ_IN_TICKS / 1000000)
+
+static __sramfunc void sram_udelay(uint32_t us)
+{
+ uint64_t pct_orig, pct_now;
+ uint64_t to_wait = ARCH_TIMER_TICKS_PER_US * us;
+
+ isb();
+ pct_orig = read_cntpct_el0();
+
+ do {
+ isb();
+ pct_now = read_cntpct_el0();
+ } while ((pct_now - pct_orig) <= to_wait);
+}
+
+/*
+ * For PMIC RK805, its sleep pin is connect with gpio2_d2 from rk3328.
+ * If the PMIC is configed for responding the sleep pin
+ * to get it into sleep mode,
+ * once the pin is output high, it will get the pmic into sleep mode.
+ */
+__sramfunc void rk3328_pmic_suspend(void)
+{
+ sram_data.pmic_sleep_save = mmio_read_32(GRF_BASE + PMIC_SLEEP_REG);
+ sram_data.pmic_sleep_gpio_save[1] = mmio_read_32(GPIO2_BASE + 4);
+ sram_data.pmic_sleep_gpio_save[0] = mmio_read_32(GPIO2_BASE);
+ mmio_write_32(GRF_BASE + PMIC_SLEEP_REG, BITS_WITH_WMASK(0, 0x3, 4));
+ mmio_write_32(GPIO2_BASE + 4,
+ sram_data.pmic_sleep_gpio_save[1] | BIT(26));
+ mmio_write_32(GPIO2_BASE,
+ sram_data.pmic_sleep_gpio_save[0] | BIT(26));
+}
+
+__sramfunc void rk3328_pmic_resume(void)
+{
+ mmio_write_32(GPIO2_BASE, sram_data.pmic_sleep_gpio_save[0]);
+ mmio_write_32(GPIO2_BASE + 4, sram_data.pmic_sleep_gpio_save[1]);
+ mmio_write_32(GRF_BASE + PMIC_SLEEP_REG,
+ sram_data.pmic_sleep_save | BITS_WMSK(0xffff, 0));
+ /* Resuming volt need a lot of time */
+ sram_udelay(100);
+}
+
+static inline void rockchip_set_sram_sp(uint64_t set_sp)
+{
+ __asm volatile("mov sp, %0\n"::"r" (set_sp) : "sp");
+}
+
+static __sramfunc void ddr_suspend(void)
+{
+ sram_data.pd_sr_idle_save = mmio_read_32(DDR_UPCTL_BASE +
+ DDR_PCTL2_PWRCTL);
+ sram_data.pd_sr_idle_save &= SELFREF_EN;
+
+ mmio_clrbits_32(DDR_UPCTL_BASE + DDR_PCTL2_PWRCTL, SELFREF_EN);
+ sram_data.ddr_grf_con0 = mmio_read_32(DDR_GRF_BASE +
+ DDRGRF_SOC_CON(0));
+ mmio_write_32(DDR_GRF_BASE, BIT_WITH_WMSK(14) | WMSK_BIT(15));
+
+ /*
+ * Override csysreq from ddrc and
+ * send valid csysreq signal to PMU,
+ * csysreq is controlled by ddrc only
+ */
+
+ /* in self-refresh */
+ mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(0));
+ while ((mmio_read_32(DDR_GRF_BASE + DDRGRF_SOC_STATUS(1)) &
+ (0x03 << 12)) != (0x02 << 12))
+ ;
+ /* ddr retention */
+ mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(2));
+
+ /* ddr gating */
+ mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(0),
+ BITS_WITH_WMASK(0x7, 0x7, 4));
+ mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(7),
+ BITS_WITH_WMASK(1, 1, 4));
+ mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(18),
+ BITS_WITH_WMASK(0x1ff, 0x1ff, 1));
+ mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(27),
+ BITS_WITH_WMASK(0x3, 0x3, 0));
+
+ dpll_suspend();
+}
+
+static __sramfunc void ddr_resume(void)
+{
+ dpll_resume();
+
+ /* ddr gating */
+ mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(0),
+ BITS_WITH_WMASK(0, 0x7, 4));
+ mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(7),
+ BITS_WITH_WMASK(0, 1, 4));
+ mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(18),
+ BITS_WITH_WMASK(0, 0x1ff, 1));
+ mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(27),
+ BITS_WITH_WMASK(0, 0x3, 0));
+
+ /* ddr de_retention */
+ mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(2));
+ /* exit self-refresh */
+ mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(0));
+ while ((mmio_read_32(DDR_GRF_BASE + DDRGRF_SOC_STATUS(1)) &
+ (0x03 << 12)) != (0x00 << 12))
+ ;
+
+ mmio_write_32(DDR_GRF_BASE, sram_data.ddr_grf_con0 | 0xc0000000);
+ if (sram_data.pd_sr_idle_save)
+ mmio_setbits_32(DDR_UPCTL_BASE + DDR_PCTL2_PWRCTL,
+ SELFREF_EN);
+}
+
+static __sramfunc void sram_dbg_uart_suspend(void)
+{
+ sram_data.uart2_ier = mmio_read_32(UART2_BASE + UART_IER);
+ mmio_write_32(UART2_BASE + UART_IER, UART_INT_DISABLE);
+ mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(16), 0x20002000);
+ mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(2), 0x00040004);
+}
+
+static __sramfunc void sram_dbg_uart_resume(void)
+{
+ /* restore uart clk and reset fifo */
+ mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(16), 0x20000000);
+ mmio_write_32(CRU_BASE + CRU_CLKGATE_CON(2), 0x00040000);
+ mmio_write_32(UART2_BASE + UART_FCR, UART_FIFO_RESET);
+ mmio_write_32(UART2_BASE + UART_IER, sram_data.uart2_ier);
+}
+
+static __sramfunc void sram_soc_enter_lp(void)
+{
+ uint32_t apm_value;
+
+ apm_value = BIT(core_pm_en) |
+ BIT(core_pm_dis_int) |
+ BIT(core_pm_int_wakeup_en);
+ mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(PD_CPU0), apm_value);
+
+ dsb();
+ isb();
+err_loop:
+ wfi();
+ /*
+ *Soc will enter low power mode and
+ *do not return to here.
+ */
+ goto err_loop;
+}
+
+__sramfunc void sram_suspend(void)
+{
+ /* disable mmu and icache */
+ tlbialle3();
+ disable_mmu_icache_el3();
+
+ mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
+ (PMUSRAM_BASE >> CPU_BOOT_ADDR_ALIGN) |
+ CPU_BOOT_ADDR_WMASK);
+
+ /* ddr self-refresh and gating phy */
+ ddr_suspend();
+
+ rk3328_pmic_suspend();
+
+ sram_dbg_uart_suspend();
+
+ sram_soc_enter_lp();
+}
+
+static __sramfunc void sys_resume_first(void)
+{
+ sram_dbg_uart_resume();
+
+ rk3328_pmic_resume();
+
+ /* ddr self-refresh exit */
+ ddr_resume();
+
+ /* disable apm cfg */
+ mmio_write_32(PMU_BASE + PMU_CPUAPM_CON(0), CORES_PM_DISABLE);
+
+ /* the warm booting address of cpus */
+ mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
+ (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) |
+ CPU_BOOT_ADDR_WMASK);
+}
+
+void __dead2 rockchip_soc_sys_pd_pwr_dn_wfi(void)
+{
+ rockchip_set_sram_sp(PSRAM_DT_BASE);
+
+ sram_suspend();
+
+ /* should never reach here */
+ psci_power_down_wfi();
+}
+
+int rockchip_soc_sys_pwr_dm_suspend(void)
+{
+ clks_gating_suspend(clk_ungt_msk);
+
+ pm_plls_suspend();
+
+ return 0;
+}
+
+int rockchip_soc_sys_pwr_dm_resume(void)
+{
+ pm_plls_resume();
+
+ clks_gating_resume();
+
+ plat_rockchip_gic_cpuif_enable();
+
+ return 0;
+}
+
+void plat_rockchip_pmu_init(void)
+{
+ uint32_t cpu;
+
+ for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++)
+ cpuson_flags[cpu] = 0;
+
+ cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot;
+ psram_sleep_cfg->ddr_func = (uint64_t)sys_resume_first;
+ psram_sleep_cfg->ddr_data = 0x00;
+ psram_sleep_cfg->ddr_flag = 0x01;
+ psram_sleep_cfg->boot_mpidr = read_mpidr_el1() & 0xffff;
+
+ /* the warm booting address of cpus */
+ mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1),
+ (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) |
+ CPU_BOOT_ADDR_WMASK);
+
+ nonboot_cpus_off();
+
+ INFO("%s: pd status 0x%x\n",
+ __func__, mmio_read_32(PMU_BASE + PMU_PWRDN_ST));
+}
--- /dev/null
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __PMU_H__
+#define __PMU_H__
+
+#include <soc.h>
+
+struct rk3328_sleep_ddr_data {
+ uint32_t pmu_debug_enable;
+ uint32_t debug_iomux_save;
+ uint32_t pmic_sleep_save;
+ uint32_t pmu_wakeup_conf0;
+ uint32_t pmu_pwrmd_com;
+ uint32_t cru_mode_save;
+ uint32_t clk_sel0, clk_sel1, clk_sel18,
+ clk_sel20, clk_sel24, clk_sel38;
+ uint32_t clk_ungt_save[CRU_CLKGATE_NUMS];
+ uint32_t cru_plls_con_save[MAX_PLL][CRU_PLL_CON_NUMS];
+};
+
+struct rk3328_sleep_sram_data {
+ uint32_t pmic_sleep_save;
+ uint32_t pmic_sleep_gpio_save[2];
+ uint32_t ddr_grf_con0;
+ uint32_t dpll_con_save[CRU_PLL_CON_NUMS];
+ uint32_t pd_sr_idle_save;
+ uint32_t uart2_ier;
+};
+
+/*****************************************************************************
+ * The ways of cores power domain contorlling
+ *****************************************************************************/
+enum cores_pm_ctr_mode {
+ core_pwr_pd = 0,
+ core_pwr_wfi = 1,
+ core_pwr_wfi_int = 2
+};
+
+enum pmu_cores_pm_by_wfi {
+ core_pm_en = 0,
+ core_pm_int_wakeup_en,
+ core_pm_dis_int,
+ core_pm_sft_wakeup_en
+};
+
+extern void *pmu_cpuson_entrypoint_start;
+extern void *pmu_cpuson_entrypoint_end;
+extern uint64_t cpuson_entry_point[PLATFORM_CORE_COUNT];
+extern uint32_t cpuson_flags[PLATFORM_CORE_COUNT];
+
+#define CORES_PM_DISABLE 0x0
+
+/*****************************************************************************
+ * pmu con,reg
+ *****************************************************************************/
+#define PMU_WAKEUP_CFG0 0x00
+#define PMU_PWRDN_CON 0x0c
+#define PMU_PWRDN_ST 0x10
+#define PMU_PWRMD_COM 0x18
+#define PMU_SFT_CON 0x1c
+#define PMU_INT_CON 0x20
+#define PMU_INT_ST 0x24
+#define PMU_POWER_ST 0x44
+#define PMU_CPUAPM_CON(n) (0x80 + (n) * 4)
+#define PMU_SYS_REG(n) (0xa0 + (n) * 4)
+
+#define CHECK_CPU_WFIE_BASE (GRF_BASE + GRF_CPU_STATUS(1))
+
+enum pmu_core_pwrst_shift {
+ clst_cpu_wfe = 0,
+ clst_cpu_wfi = 4,
+};
+
+#define clstl_cpu_wfe (clst_cpu_wfe)
+#define clstb_cpu_wfe (clst_cpu_wfe)
+
+enum pmu_pd_id {
+ PD_CPU0 = 0,
+ PD_CPU1,
+ PD_CPU2,
+ PD_CPU3,
+};
+
+enum pmu_power_mode_common {
+ pmu_mode_en = 0,
+ sref_enter_en,
+ global_int_disable_cfg,
+ cpu0_pd_en,
+ wait_wakeup_begin_cfg = 4,
+ l2_flush_en,
+ l2_idle_en,
+ ddrio_ret_de_req,
+ ddrio_ret_en = 8,
+};
+
+enum pmu_sft_con {
+ upctl_c_sysreq_cfg = 0,
+ l2flushreq_req,
+ ddr_io_ret_cfg,
+ pmu_sft_ret_cfg,
+};
+
+#define CKECK_WFE_MSK 0x1
+#define CKECK_WFI_MSK 0x10
+#define CKECK_WFEI_MSK 0x11
+
+#define PD_CTR_LOOP 500
+#define CHK_CPU_LOOP 500
+#define MAX_WAIT_CONUT 1000
+
+#define WAKEUP_INT_CLUSTER_EN 0x1
+#define PMIC_SLEEP_REG 0x34
+
+#define PLL_IS_NORM_MODE(mode, pll_id) \
+ ((mode & (PLL_NORM_MODE(pll_id)) & 0xffff) != 0)
+
+#define CTLR_ENABLE_G1_BIT BIT(1)
+#define UART_FIFO_EMPTY BIT(6)
+
+#define UART_IER 0x04
+#define UART_FCR 0x08
+#define UART_LSR 0x14
+
+#define UART_INT_DISABLE 0x00
+#define UART_FIFO_RESET 0x07
+
+#endif /* __PMU_H__ */
--- /dev/null
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch_helpers.h>
+#include <console.h>
+#include <debug.h>
+#include <delay_timer.h>
+#include <mmio.h>
+#include <platform_def.h>
+#include <plat_private.h>
+#include <ddr_parameter.h>
+#include <rk3328_def.h>
+#include <soc.h>
+
+/* Table of regions to map using the MMU. */
+const mmap_region_t plat_rk_mmap[] = {
+ MAP_REGION_FLAT(UART2_BASE, UART2_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(PMU_BASE, PMU_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(SGRF_BASE, SGRF_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(GPIO0_BASE, GPIO0_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(GPIO1_BASE, GPIO1_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(GPIO2_BASE, GPIO2_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(GPIO3_BASE, GPIO3_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(CRU_BASE, CRU_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(GRF_BASE, GRF_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(FIREWALL_DDR_BASE, FIREWALL_DDR_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(FIREWALL_CFG_BASE, FIREWALL_CFG_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(STIME_BASE, STIME_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(GIC400_BASE, GIC400_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(PMUSRAM_BASE, PMUSRAM_SIZE,
+ MT_MEMORY | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(SHARE_MEM_BASE, SHARE_MEM_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(DDR_GRF_BASE, DDR_GRF_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(DDR_UPCTL_BASE, DDR_UPCTL_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(PWM_BASE, PWM_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(DDR_PARAM_BASE, DDR_PARAM_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(EFUSE8_BASE, EFUSE8_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(EFUSE32_BASE, EFUSE32_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(DDR_PHY_BASE, DDR_PHY_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(SERVER_MSCH_BASE, SERVER_MSCH_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(DDR_MONITOR_BASE, DDR_MONITOR_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(VOP_BASE, VOP_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+
+ { 0 }
+};
+
+/* The RockChip power domain tree descriptor */
+const unsigned char rockchip_power_domain_tree_desc[] = {
+ /* No of root nodes */
+ PLATFORM_SYSTEM_COUNT,
+ /* No of children for the root node */
+ PLATFORM_CLUSTER_COUNT,
+ /* No of children for the first cluster node */
+ PLATFORM_CLUSTER0_CORE_COUNT,
+};
+
+void secure_timer_init(void)
+{
+ mmio_write_32(STIMER_CHN_BASE(1) + TIMER_LOADE_COUNT0, 0xffffffff);
+ mmio_write_32(STIMER_CHN_BASE(1) + TIMER_LOADE_COUNT1, 0xffffffff);
+ /* auto reload & enable the timer */
+ mmio_write_32(STIMER_CHN_BASE(1) + TIMER_CONTROL_REG, TIMER_EN);
+}
+
+void sgrf_init(void)
+{
+ uint32_t i, val;
+ struct param_ddr_usage usg;
+
+ /* general secure regions */
+ usg = ddr_region_usage_parse(DDR_PARAM_BASE,
+ PLAT_MAX_DDR_CAPACITY_MB);
+ for (i = 0; i < usg.s_nr; i++) {
+ /* enable secure */
+ val = mmio_read_32(FIREWALL_DDR_BASE +
+ FIREWALL_DDR_FW_DDR_CON_REG);
+ val |= BIT(7 - i);
+ mmio_write_32(FIREWALL_DDR_BASE +
+ FIREWALL_DDR_FW_DDR_CON_REG, val);
+ /* map top and base */
+ mmio_write_32(FIREWALL_DDR_BASE +
+ FIREWALL_DDR_FW_DDR_RGN(7 - i),
+ RG_MAP_SECURE(usg.s_top[i], usg.s_base[i]));
+ }
+
+ /* set ddr rgn0_top and rga0_top as 0 */
+ mmio_write_32(FIREWALL_DDR_BASE + FIREWALL_DDR_FW_DDR_RGN(0), 0x0);
+
+ /* set all slave ip into no-secure, except stimer */
+ mmio_write_32(FIREWALL_CFG_BASE + FIREWALL_CFG_FW_SYS_CON(0),
+ SGRF_SLV_S_ALL_NS);
+ mmio_write_32(FIREWALL_CFG_BASE + FIREWALL_CFG_FW_SYS_CON(1),
+ SGRF_SLV_S_ALL_NS);
+ mmio_write_32(FIREWALL_CFG_BASE + FIREWALL_CFG_FW_SYS_CON(2),
+ SGRF_SLV_S_ALL_NS | STIMER_S);
+ mmio_write_32(FIREWALL_CFG_BASE + FIREWALL_CFG_FW_SYS_CON(3),
+ SGRF_SLV_S_ALL_NS);
+
+ /* set all master ip into no-secure */
+ mmio_write_32(SGRF_BASE + SGRF_SOC_CON(2), 0xf0000000);
+ mmio_write_32(SGRF_BASE + SGRF_SOC_CON(3), SGRF_MST_S_ALL_NS);
+ mmio_write_32(SGRF_BASE + SGRF_SOC_CON(4), SGRF_MST_S_ALL_NS);
+
+ /* set DMAC into no-secure */
+ mmio_write_32(SGRF_BASE + SGRF_DMAC_CON(3), DMA_IRQ_BOOT_NS);
+ mmio_write_32(SGRF_BASE + SGRF_DMAC_CON(4), DMA_PERI_CH_NS_15_0);
+ mmio_write_32(SGRF_BASE + SGRF_DMAC_CON(5), DMA_PERI_CH_NS_19_16);
+ mmio_write_32(SGRF_BASE + SGRF_DMAC_CON(5), DMA_MANAGER_BOOT_NS);
+
+ /* soft reset dma before use */
+ mmio_write_32(CRU_BASE + CRU_SOFTRSTS_CON(3), DMA_SOFTRST_REQ);
+ udelay(5);
+ mmio_write_32(CRU_BASE + CRU_SOFTRSTS_CON(3), DMA_SOFTRST_RLS);
+}
+
+void plat_rockchip_soc_init(void)
+{
+ secure_timer_init();
+ sgrf_init();
+
+ NOTICE("BL31:Rockchip release version: v%d.%d\n",
+ MAJOR_VERSION, MINOR_VERSION);
+}
--- /dev/null
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SOC_H__
+#define __SOC_H__
+
+/******************************* stimer ***************************************/
+#define TIMER_LOADE_COUNT0 0x00
+#define TIMER_LOADE_COUNT1 0x04
+#define TIMER_CURRENT_VALUE0 0x08
+#define TIMER_CURRENT_VALUE1 0x0C
+#define TIMER_CONTROL_REG 0x10
+#define TIMER_INTSTATUS 0x18
+#define TIMER_EN 0x1
+
+extern const unsigned char rockchip_power_domain_tree_desc[];
+
+/**************************** read/write **************************************/
+#ifndef BITS_WMSK
+#define BITS_WMSK(msk, shift) ((msk) << (shift + REG_MSK_SHIFT))
+#endif
+
+/**************************** cru *********************************************/
+enum plls_id {
+ APLL_ID = 0,
+ DPLL_ID,
+ CPLL_ID,
+ GPLL_ID,
+ REVERVE,
+ NPLL_ID,
+ MAX_PLL,
+};
+
+#define CRU_CRU_MODE 0x0080
+#define CRU_CRU_MISC 0x0084
+#define CRU_GLB_SRST_FST 0x009c
+#define CRU_GLB_SRST_FST_VALUE 0xfdb9
+#define PLL_CONS(id, i) (0x020 * (id) + ((i) * 4))
+#define CRU_CLKSEL_CON(i) (0x100 + ((i) * 4))
+#define CRU_CLKSEL_NUMS 53
+#define CRU_CLKGATE_CON(i) (0x200 + ((i) * 4))
+#define CRU_CLKGATE_NUMS 29
+#define CRU_SOFTRSTS_CON(n) (0x300 + ((n) * 4))
+#define CRU_SOFTRSTS_NUMS 12
+#define CRU_PLL_CON_NUMS 5
+
+/* PLLn_CON1 */
+#define PLL_IS_LOCKED BIT(10)
+/* PLLn_CON0 */
+#define PLL_BYPASS BITS_WITH_WMASK(1, 0x1, 15)
+#define PLL_NO_BYPASS BITS_WITH_WMASK(0, 0x1, 15)
+/* CRU_MODE */
+#define PLL_SLOW_MODE(id) ((id) == NPLL_ID) ? \
+ BITS_WITH_WMASK(0, 0x1, 1) : \
+ BITS_WITH_WMASK(0, 0x1, ((id) * 4))
+#define PLL_NORM_MODE(id) ((id) == NPLL_ID) ? \
+ BITS_WITH_WMASK(1, 0x1, 1) : \
+ BITS_WITH_WMASK(1, 0x1, ((id) * 4))
+
+#define CRU_GATEID_CONS(ID) (0x200 + (ID / 16) * 4)
+#define CRU_CONS_GATEID(i) (16 * (i))
+#define GATE_ID(reg, bit) ((reg * 16) + bit)
+
+#define PLL_LOCKED_TIMEOUT 600000U
+
+#define STIMER_CHN_BASE(n) (STIME_BASE + 0x20 * (n))
+/************************** config regs ***************************************/
+#define FIREWALL_CFG_FW_SYS_CON(n) (0x000 + (n) * 4)
+#define FIREWALL_DDR_FW_DDR_RGN(n) (0x000 + (n) * 4)
+#define FIREWALL_DDR_FW_DDR_MST(n) (0x020 + (n) * 4)
+#define FIREWALL_DDR_FW_DDR_CON_REG (0x040)
+#define GRF_SOC_CON(n) (0x400 + (n) * 4)
+#define GRF_SOC_STATUS(n) (0x480 + (n) * 4)
+#define GRF_CPU_STATUS(n) (0x520 + (n) * 4)
+#define GRF_OS_REG(n) (0x5c8 + (n) * 4)
+#define DDRGRF_SOC_CON(n) (0x000 + (n) * 4)
+#define DDRGRF_SOC_STATUS(n) (0x100 + (n) * 4)
+#define SGRF_SOC_CON(n) (0x000 + (n) * 4)
+#define SGRF_DMAC_CON(n) (0x100 + (n) * 4)
+#define SGRF_HDCP_KEY_CON(n) (0x280 + (n) * 4)
+
+#define DDR_PCTL2_PWRCTL 0x30
+/************************** regs func *****************************************/
+#define STIMER_S BIT(23)
+#define SGRF_SLV_S_ALL_NS 0x0
+#define SGRF_MST_S_ALL_NS 0xffffffff
+#define DMA_IRQ_BOOT_NS 0xffffffff
+#define DMA_MANAGER_BOOT_NS 0x80008000
+#define DMA_PERI_CH_NS_15_0 0xffffffff
+#define DMA_PERI_CH_NS_19_16 0x000f000f
+#define DMA_SOFTRST_REQ 0x01000100
+#define DMA_SOFTRST_RLS 0x01000000
+
+#define SELFREF_EN BIT(0)
+/************************** cpu ***********************************************/
+#define CPU_BOOT_ADDR_WMASK 0xffff0000
+#define CPU_BOOT_ADDR_ALIGN 16
+
+/************************** ddr secure region *********************************/
+#define PLAT_MAX_DDR_CAPACITY_MB 4096
+#define RG_MAP_SECURE(top, base) ((((top) - 1) << 16) | (base))
+
+/************************** gpio2_d2 ******************************************/
+#define SWPORTA_DR 0x00
+#define SWPORTA_DDR 0x04
+#define GPIO2_D2 BIT(26)
+#define GPIO2_D2_GPIO_MODE 0x30
+#define GRF_GPIO2D_IOMUX 0x34
+
+#endif /* __SOC_H__ */
--- /dev/null
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __ROCKCHIP_PLAT_LD_S__
+#define __ROCKCHIP_PLAT_LD_S__
+
+MEMORY {
+ SRAM (rwx): ORIGIN = SRAM_LDS_BASE, LENGTH = SRAM_LDS_SIZE
+}
+
+SECTIONS
+{
+ . = SRAM_LDS_BASE;
+ ASSERT(. == ALIGN(4096),
+ "SRAM_BASE address is not aligned on a page boundary.")
+
+ /*
+ * The SRAM space allocation for RK3328
+ * ----------------
+ * | sram text
+ * ----------------
+ * | sram data
+ * ----------------
+ */
+ .text_sram : ALIGN(4096) {
+ __bl31_sram_text_start = .;
+ *(.sram.text)
+ *(.sram.rodata)
+ . = ALIGN(4096);
+ __bl31_sram_text_end = .;
+ } >SRAM
+
+ .data_sram : ALIGN(4096) {
+ __bl31_sram_data_start = .;
+ *(.sram.data)
+ . = ALIGN(4096);
+ __bl31_sram_data_end = .;
+ } >SRAM
+ __sram_incbin_start = .;
+ __sram_incbin_end = .;
+}
+
+#endif /* __ROCKCHIP_PLAT_LD_S__ */
--- /dev/null
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __PLATFORM_DEF_H__
+#define __PLATFORM_DEF_H__
+
+#include <arch.h>
+#include <common_def.h>
+#include <rk3328_def.h>
+
+#define DEBUG_XLAT_TABLE 0
+
+/*******************************************************************************
+ * Platform binary types for linking
+ ******************************************************************************/
+#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64"
+#define PLATFORM_LINKER_ARCH aarch64
+
+/*******************************************************************************
+ * Generic platform constants
+ ******************************************************************************/
+
+/* Size of cacheable stacks */
+#if DEBUG_XLAT_TABLE
+#define PLATFORM_STACK_SIZE 0x800
+#elif IMAGE_BL1
+#define PLATFORM_STACK_SIZE 0x440
+#elif IMAGE_BL2
+#define PLATFORM_STACK_SIZE 0x400
+#elif IMAGE_BL31
+#define PLATFORM_STACK_SIZE 0x800
+#elif IMAGE_BL32
+#define PLATFORM_STACK_SIZE 0x440
+#endif
+
+#define FIRMWARE_WELCOME_STR "Booting Trusted Firmware\n"
+
+#define PLATFORM_MAX_AFFLVL MPIDR_AFFLVL2
+#define PLATFORM_SYSTEM_COUNT 1
+#define PLATFORM_CLUSTER_COUNT 1
+#define PLATFORM_CLUSTER0_CORE_COUNT 4
+#define PLATFORM_CLUSTER1_CORE_COUNT 0
+#define PLATFORM_CORE_COUNT (PLATFORM_CLUSTER1_CORE_COUNT + \
+ PLATFORM_CLUSTER0_CORE_COUNT)
+
+#define PLATFORM_NUM_AFFS (PLATFORM_SYSTEM_COUNT + \
+ PLATFORM_CLUSTER_COUNT + \
+ PLATFORM_CORE_COUNT)
+
+#define PLAT_MAX_PWR_LVL MPIDR_AFFLVL2
+
+#define PLAT_RK_CLST_TO_CPUID_SHIFT 6
+
+/*
+ * This macro defines the deepest retention state possible. A higher state
+ * id will represent an invalid or a power down state.
+ */
+#define PLAT_MAX_RET_STATE 1
+
+/*
+ * This macro defines the deepest power down states possible. Any state ID
+ * higher than this is invalid.
+ */
+#define PLAT_MAX_OFF_STATE 2
+
+/*******************************************************************************
+ * Platform memory map related constants
+ ******************************************************************************/
+/* TF txet, ro, rw, Size: 512KB */
+#define TZRAM_BASE (0x0)
+#define TZRAM_SIZE (0x80000)
+
+/*******************************************************************************
+ * BL31 specific defines.
+ ******************************************************************************/
+/*
+ * Put BL3-1 at the top of the Trusted RAM
+ */
+#define BL31_BASE (TZRAM_BASE + 0x10000)
+#define BL31_LIMIT (TZRAM_BASE + TZRAM_SIZE)
+
+/*******************************************************************************
+ * Platform specific page table and MMU setup constants
+ ******************************************************************************/
+#define ADDR_SPACE_SIZE (1ull << 32)
+#define MAX_XLAT_TABLES 9
+#define MAX_MMAP_REGIONS 33
+
+/*******************************************************************************
+ * Declarations and constants to access the mailboxes safely. Each mailbox is
+ * aligned on the biggest cache line size in the platform. This is known only
+ * to the platform as it might have a combination of integrated and external
+ * caches. Such alignment ensures that two maiboxes do not sit on the same cache
+ * line at any cache level. They could belong to different cpus/clusters &
+ * get written while being protected by different locks causing corruption of
+ * a valid mailbox address.
+ ******************************************************************************/
+#define CACHE_WRITEBACK_SHIFT 6
+#define CACHE_WRITEBACK_GRANULE (1 << CACHE_WRITEBACK_SHIFT)
+
+/*
+ * Define GICD and GICC and GICR base
+ */
+#define PLAT_RK_GICD_BASE RK3328_GICD_BASE
+#define PLAT_RK_GICC_BASE RK3328_GICC_BASE
+
+/*
+ * Define a list of Group 1 Secure and Group 0 interrupts as per GICv3
+ * terminology. On a GICv2 system or mode, the lists will be merged and treated
+ * as Group 0 interrupts.
+ */
+#define PLAT_RK_G1S_IRQS RK_G1S_IRQS
+
+#define PLAT_RK_UART_BASE RK3328_UART2_BASE
+#define PLAT_RK_UART_CLOCK RK3328_UART_CLOCK
+#define PLAT_RK_UART_BAUDRATE RK3328_BAUDRATE
+
+#define PLAT_RK_PRIMARY_CPU 0x0
+
+#endif /* __PLATFORM_DEF_H__ */
--- /dev/null
+#
+# Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# Neither the name of ARM nor the names of its contributors may be used
+# to endorse or promote products derived from this software without specific
+# prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+
+RK_PLAT := plat/rockchip
+RK_PLAT_SOC := ${RK_PLAT}/${PLAT}
+RK_PLAT_COMMON := ${RK_PLAT}/common
+
+PLAT_INCLUDES := -Idrivers/arm/gic/common/ \
+ -Idrivers/arm/gic/v2/ \
+ -Iinclude/plat/common/ \
+ -I${RK_PLAT_COMMON}/ \
+ -I${RK_PLAT_COMMON}/include/ \
+ -I${RK_PLAT_COMMON}/pmusram \
+ -I${RK_PLAT_COMMON}/drivers/pmu/ \
+ -I${RK_PLAT_COMMON}/drivers/parameter/ \
+ -I${RK_PLAT_SOC}/ \
+ -I${RK_PLAT_SOC}/drivers/pmu/ \
+ -I${RK_PLAT_SOC}/drivers/soc/ \
+ -I${RK_PLAT_SOC}/include/
+
+RK_GIC_SOURCES := drivers/arm/gic/common/gic_common.c \
+ drivers/arm/gic/v2/gicv2_main.c \
+ drivers/arm/gic/v2/gicv2_helpers.c \
+ plat/common/plat_gicv2.c \
+ ${RK_PLAT}/common/rockchip_gicv2.c
+
+PLAT_BL_COMMON_SOURCES := lib/aarch64/xlat_tables.c \
+ plat/common/aarch64/plat_psci_common.c
+
+BL31_SOURCES += ${RK_GIC_SOURCES} \
+ drivers/arm/cci/cci.c \
+ drivers/console/console.S \
+ drivers/ti/uart/16550_console.S \
+ drivers/delay_timer/delay_timer.c \
+ drivers/delay_timer/generic_delay_timer.c \
+ lib/cpus/aarch64/aem_generic.S \
+ lib/cpus/aarch64/cortex_a53.S \
+ ${RK_PLAT_COMMON}/drivers/parameter/ddr_parameter.c \
+ ${RK_PLAT_COMMON}/aarch64/plat_helpers.S \
+ ${RK_PLAT_COMMON}/bl31_plat_setup.c \
+ ${RK_PLAT_COMMON}/pmusram/pmu_sram.c \
+ ${RK_PLAT_COMMON}/pmusram/pmu_sram_cpus_on.S \
+ ${RK_PLAT_COMMON}/plat_pm.c \
+ ${RK_PLAT_COMMON}/plat_topology.c \
+ ${RK_PLAT_COMMON}/aarch64/platform_common.c \
+ ${RK_PLAT_SOC}/drivers/pmu/pmu.c \
+ ${RK_PLAT_SOC}/drivers/soc/soc.c
+
+ENABLE_PLAT_COMPAT := 0
+
+$(eval $(call add_define,PLAT_EXTRA_LD_SCRIPT))
+$(eval $(call add_define,PLAT_SKIP_OPTEE_S_EL1_INT_REGISTER))
--- /dev/null
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __PLAT_DEF_H__
+#define __PLAT_DEF_H__
+
+#define MAJOR_VERSION (1)
+#define MINOR_VERSION (2)
+
+#define SIZE_K(n) ((n) * 1024)
+
+/* Special value used to verify platform parameters from BL2 to BL3-1 */
+#define RK_BL31_PLAT_PARAM_VAL 0x0f1e2d3c4b5a6978ULL
+
+#define UART2_BASE 0xff130000
+#define UART2_SIZE SIZE_K(64)
+
+#define PMU_BASE 0xff140000
+#define PMU_SIZE SIZE_K(64)
+
+#define SGRF_BASE 0xff0d0000
+#define SGRF_SIZE SIZE_K(64)
+
+#define CRU_BASE 0xff440000
+#define CRU_SIZE SIZE_K(64)
+
+#define GRF_BASE 0xff100000
+#define GRF_SIZE SIZE_K(64)
+
+#define GPIO0_BASE 0xff210000
+#define GPIO0_SIZE SIZE_K(32)
+
+#define GPIO1_BASE 0xff220000
+#define GPIO1_SIZE SIZE_K(32)
+
+#define GPIO2_BASE 0xff230000
+#define GPIO2_SIZE SIZE_K(64)
+
+#define GPIO3_BASE 0xff240000
+#define GPIO3_SIZE SIZE_K(64)
+
+#define STIME_BASE 0xff1d0000
+#define STIME_SIZE SIZE_K(64)
+
+#define INTMEM_BASE 0xff090000
+#define INTMEM_SIZE SIZE_K(32)
+
+#define SRAM_LDS_BASE (INTMEM_BASE + SIZE_K(4))
+#define SRAM_LDS_SIZE (INTMEM_SIZE - SIZE_K(4))
+
+#define PMUSRAM_BASE INTMEM_BASE
+#define PMUSRAM_SIZE SIZE_K(4)
+#define PMUSRAM_RSIZE SIZE_K(4)
+
+#define VOP_BASE 0xff370000
+#define VOP_SIZE SIZE_K(16)
+
+#define DDR_PHY_BASE 0xff400000
+#define DDR_PHY_SIZE SIZE_K(4)
+
+#define SERVER_MSCH_BASE 0xff720000
+#define SERVER_MSCH_SIZE SIZE_K(4)
+
+#define DDR_UPCTL_BASE 0xff780000
+#define DDR_UPCTL_SIZE SIZE_K(12)
+
+#define DDR_MONITOR_BASE 0xff790000
+#define DDR_MONITOR_SIZE SIZE_K(4)
+
+#define FIREWALL_DDR_BASE 0xff7c0000
+#define FIREWALL_DDR_SIZE SIZE_K(64)
+
+#define FIREWALL_CFG_BASE 0xff7d0000
+#define FIREWALL_CFG_SIZE SIZE_K(64)
+
+#define GIC400_BASE 0xff810000
+#define GIC400_SIZE SIZE_K(64)
+
+#define DDR_GRF_BASE 0xff798000
+#define DDR_GRF_SIZE SIZE_K(16)
+
+#define PWM_BASE 0xff1b0000
+#define PWM_SIZE SIZE_K(64)
+
+#define DDR_PARAM_BASE 0x02000000
+#define DDR_PARAM_SIZE SIZE_K(4)
+
+#define EFUSE8_BASE 0xff260000
+#define EFUSE8_SIZE SIZE_K(4)
+
+#define EFUSE32_BASE 0xff0b0000
+#define EFUSE32_SIZE SIZE_K(4)
+
+/**************************************************************************
+ * UART related constants
+ **************************************************************************/
+#define RK3328_UART2_BASE UART2_BASE
+#define RK3328_BAUDRATE 1500000
+#define RK3328_UART_CLOCK 24000000
+
+/******************************************************************************
+ * System counter frequency related constants
+ ******************************************************************************/
+#define SYS_COUNTER_FREQ_IN_TICKS 24000000U
+#define SYS_COUNTER_FREQ_IN_MHZ 24
+
+/******************************************************************************
+ * GIC-400 & interrupt handling related constants
+ ******************************************************************************/
+
+/* Base rk_platform compatible GIC memory map */
+#define RK3328_GICD_BASE (GIC400_BASE + 0x1000)
+#define RK3328_GICC_BASE (GIC400_BASE + 0x2000)
+#define RK3328_GICR_BASE 0 /* no GICR in GIC-400 */
+
+/******************************************************************************
+ * sgi, ppi
+ ******************************************************************************/
+#define RK_IRQ_SEC_PHY_TIMER 29
+
+#define RK_IRQ_SEC_SGI_0 8
+#define RK_IRQ_SEC_SGI_1 9
+#define RK_IRQ_SEC_SGI_2 10
+#define RK_IRQ_SEC_SGI_3 11
+#define RK_IRQ_SEC_SGI_4 12
+#define RK_IRQ_SEC_SGI_5 13
+#define RK_IRQ_SEC_SGI_6 14
+#define RK_IRQ_SEC_SGI_7 15
+
+/*
+ * Define a list of Group 1 Secure and Group 0 interrupts as per GICv3
+ * terminology. On a GICv2 system or mode, the lists will be merged and treated
+ * as Group 0 interrupts.
+ */
+#define RK_G1S_IRQS RK_IRQ_SEC_PHY_TIMER, RK_IRQ_SEC_SGI_6
+
+#define SHARE_MEM_BASE 0x100000/* [1MB, 1MB+60K]*/
+#define SHARE_MEM_PAGE_NUM 15
+#define SHARE_MEM_SIZE SIZE_K(SHARE_MEM_PAGE_NUM * 4)
+
+#endif /* __PLAT_DEF_H__ */