Intel SocFPGA platform ports
----------------------------
:M: Tien Hock Loh <tien.hock.loh@intel.com>
-:G: `thloh85-intel`
+:G: `thloh85-intel`_
+:M: Hadi Asyrafi <muhammad.hadi.asyrafi.abdul.halim@intel.com>
+:G: `mabdulha`_
:F: plat/intel/soc
+:F: drivers/intel/soc/
MediaTek platform ports
-----------------------
--- /dev/null
+Intel Agilex SoCFPGA
+========================
+
+Agilex SoCFPGA is a FPGA with integrated quad-core 64-bit Arm Cortex A53 processor.
+
+Upon boot, Boot ROM loads bl2 into OCRAM. Bl2 subsequently initializes
+the hardware, then loads bl31 and bl33 (UEFI) into DDR and boots to bl33.
+
+::
+
+ Boot ROM --> Trusted Firmware-A --> UEFI
+
+How to build
+------------
+
+Code Locations
+~~~~~~~~~~~~~~
+
+- Trusted Firmware-A:
+ `link <https://github.com/ARM-software/arm-trusted-firmware>`__
+
+- UEFI (to be updated with new upstreamed UEFI):
+ `link <https://github.com/altera-opensource/uefi-socfpga>`__
+
+Build Procedure
+~~~~~~~~~~~~~~~
+
+- Fetch all the above 2 repositories into local host.
+ Make all the repositories in the same ${BUILD\_PATH}.
+
+- Prepare the AARCH64 toolchain.
+
+- Build UEFI using Agilex platform as configuration
+ This will be updated to use an updated UEFI using the latest EDK2 source
+
+.. code:: bash
+
+ make CROSS_COMPILE=aarch64-linux-gnu- device=agx
+
+- Build atf providing the previously generated UEFI as the BL33 image
+
+.. code:: bash
+
+ make CROSS_COMPILE=aarch64-linux-gnu- bl2 fip PLAT=agilex
+ BL33=PEI.ROM
+
+Install Procedure
+~~~~~~~~~~~~~~~~~
+
+- dd fip.bin to a A2 partition on the MMC drive to be booted in Agilex
+ board.
+
+- Generate a SOF containing bl2
+
+.. code:: bash
+
+ aarch64-linux-gnu-objcopy -I binary -O ihex --change-addresses 0xffe00000 bl2.bin bl2.hex
+ quartus_cpf --bootloader bl2.hex <quartus_generated_sof> <output_sof_with_bl2>
+
+- Configure SOF to board
+
+.. code:: bash
+
+ nios2-configure-sof <output_sof_with_bl2>
+
+Boot trace
+----------
+
+::
+ INFO: DDR: DRAM calibration success.
+ INFO: ECC is disabled.
+ NOTICE: BL2: v2.1(debug)
+ NOTICE: BL2: Built
+ INFO: BL2: Doing platform setup
+ NOTICE: BL2: Booting BL31
+ INFO: Entry point address = 0xffe1c000
+ INFO: SPSR = 0x3cd
+ NOTICE: BL31: v2.1(debug)
+ NOTICE: BL31: Built
+ INFO: ARM GICv2 driver initialized
+ INFO: BL31: Initializing runtime services
+ WARNING: BL31: cortex_a53
+ INFO: BL31: Preparing for EL3 exit to normal world
+ INFO: Entry point address = 0x50000
+ INFO: SPSR = 0x3c9
--- /dev/null
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <cpu_macros.S>
+#include <platform_def.h>
+
+ .globl plat_secondary_cold_boot_setup
+ .globl platform_is_primary_cpu
+ .globl plat_is_my_cpu_primary
+ .globl plat_my_core_pos
+ .globl plat_crash_console_init
+ .globl plat_crash_console_putc
+ .globl plat_crash_console_flush
+ .globl platform_mem_init
+
+ .globl plat_get_my_entrypoint
+
+ /* -----------------------------------------------------
+ * void plat_secondary_cold_boot_setup (void);
+ *
+ * This function performs any platform specific actions
+ * needed for a secondary cpu after a cold reset e.g
+ * mark the cpu's presence, mechanism to place it in a
+ * holding pen etc.
+ * -----------------------------------------------------
+ */
+func plat_secondary_cold_boot_setup
+ /* Wait until the it gets reset signal from rstmgr gets populated */
+poll_mailbox:
+ wfi
+
+ mov_imm x0, PLAT_AGX_SEC_ENTRY
+ ldr x1, [x0]
+ mov_imm x2, PLAT_CPUID_RELEASE
+ ldr x3, [x2]
+ mrs x4, mpidr_el1
+ and x4, x4, #0xff
+ cmp x3, x4
+ b.ne poll_mailbox
+ br x1
+endfunc plat_secondary_cold_boot_setup
+
+func platform_is_primary_cpu
+ and x0, x0, #(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)
+ cmp x0, #PLAT_PRIMARY_CPU
+ cset x0, eq
+ ret
+endfunc platform_is_primary_cpu
+
+func plat_is_my_cpu_primary
+ mrs x0, mpidr_el1
+ b platform_is_primary_cpu
+endfunc plat_is_my_cpu_primary
+
+func plat_my_core_pos
+ mrs x0, mpidr_el1
+ and x1, x0, #MPIDR_CPU_MASK
+ and x0, x0, #MPIDR_CLUSTER_MASK
+ add x0, x1, x0, LSR #6
+ ret
+endfunc plat_my_core_pos
+
+func plat_get_my_entrypoint
+ mov_imm x1, PLAT_AGX_SEC_ENTRY
+ ldr x0, [x1]
+ ret
+endfunc plat_get_my_entrypoint
+
+ /* ---------------------------------------------
+ * int plat_crash_console_init(void)
+ * Function to initialize the crash console
+ * without a C Runtime to print crash report.
+ * Clobber list : x0, x1, x2
+ * ---------------------------------------------
+ */
+func plat_crash_console_init
+ mov_imm x0, PLAT_UART0_BASE
+ mov_imm x1, PLAT_UART_CLOCK
+ mov_imm x2, PLAT_BAUDRATE
+ b console_16550_core_init
+endfunc plat_crash_console_init
+
+ /* ---------------------------------------------
+ * int plat_crash_console_putc(void)
+ * Function to print a character on the crash
+ * console without a C Runtime.
+ * Clobber list : x1, x2
+ * ---------------------------------------------
+ */
+func plat_crash_console_putc
+ mov_imm x1, PLAT_UART0_BASE
+ b console_16550_core_putc
+endfunc plat_crash_console_putc
+
+func plat_crash_console_flush
+ mov_imm x0, CRASH_CONSOLE_BASE
+ b console_16550_core_flush
+endfunc plat_crash_console_flush
+
+
+ /* --------------------------------------------------------
+ * void platform_mem_init (void);
+ *
+ * Any memory init, relocation to be done before the
+ * platform boots. Called very early in the boot process.
+ * --------------------------------------------------------
+ */
+func platform_mem_init
+ mov x0, #0
+ ret
+endfunc platform_mem_init
--- /dev/null
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <platform_def.h>
+#include <plat/common/platform.h>
+#include <socfpga_private.h>
+
+
+unsigned int plat_get_syscnt_freq2(void)
+{
+ return PLAT_SYS_COUNTER_FREQ_IN_TICKS;
+}
+
+unsigned long socfpga_get_ns_image_entrypoint(void)
+{
+ return PLAT_NS_IMAGE_OFFSET;
+}
+
+/******************************************************************************
+ * Gets SPSR for BL32 entry
+ *****************************************************************************/
+uint32_t socfpga_get_spsr_for_bl32_entry(void)
+{
+ /*
+ * The Secure Payload Dispatcher service is responsible for
+ * setting the SPSR prior to entry into the BL32 image.
+ */
+ return 0;
+}
+
+/******************************************************************************
+ * Gets SPSR for BL33 entry
+ *****************************************************************************/
+uint32_t socfpga_get_spsr_for_bl33_entry(void)
+{
+ unsigned long el_status;
+ unsigned int mode;
+ uint32_t spsr;
+
+ /* Figure out what mode we enter the non-secure world in */
+ el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
+ el_status &= ID_AA64PFR0_ELX_MASK;
+
+ mode = (el_status) ? MODE_EL2 : MODE_EL1;
+
+ /*
+ * TODO: Consider the possibility of specifying the SPSR in
+ * the FIP ToC and allowing the platform to have a say as
+ * well.
+ */
+ spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+ return spsr;
+}
+
--- /dev/null
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <common/bl_common.h>
+#include <common/desc_image_load.h>
+#include <platform_def.h>
+#include <plat/common/platform.h>
+
+
+/*******************************************************************************
+ * Following descriptor provides BL image/ep information that gets used
+ * by BL2 to load the images and also subset of this information is
+ * passed to next BL image. The image loading sequence is managed by
+ * populating the images in required loading order. The image execution
+ * sequence is managed by populating the `next_handoff_image_id` with
+ * the next executable image id.
+ ******************************************************************************/
+static bl_mem_params_node_t bl2_mem_params_descs[] = {
+#ifdef SCP_BL2_BASE
+ /* Fill SCP_BL2 related information if it exists */
+ {
+ .image_id = SCP_BL2_IMAGE_ID,
+
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_IMAGE_BINARY,
+ VERSION_2, entry_point_info_t, SECURE | NON_EXECUTABLE),
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_IMAGE_BINARY,
+ VERSION_2, image_info_t, 0),
+ .image_info.image_base = SCP_BL2_BASE,
+ .image_info.image_max_size = SCP_BL2_SIZE,
+
+ .next_handoff_image_id = INVALID_IMAGE_ID,
+ },
+#endif /* SCP_BL2_BASE */
+
+#ifdef EL3_PAYLOAD_BASE
+ /* Fill EL3 payload related information (BL31 is EL3 payload)*/
+ {
+ .image_id = BL31_IMAGE_ID,
+
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+ VERSION_2, entry_point_info_t,
+ SECURE | EXECUTABLE | EP_FIRST_EXE),
+ .ep_info.pc = EL3_PAYLOAD_BASE,
+ .ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
+ DISABLE_ALL_EXCEPTIONS),
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+ VERSION_2, image_info_t,
+ IMAGE_ATTRIB_PLAT_SETUP | IMAGE_ATTRIB_SKIP_LOADING),
+
+ .next_handoff_image_id = INVALID_IMAGE_ID,
+ },
+
+#else /* EL3_PAYLOAD_BASE */
+
+ /* Fill BL31 related information */
+ {
+ .image_id = BL31_IMAGE_ID,
+
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+ VERSION_2, entry_point_info_t,
+ SECURE | EXECUTABLE | EP_FIRST_EXE),
+ .ep_info.pc = BL31_BASE,
+ .ep_info.spsr = SPSR_64(MODE_EL3, MODE_SP_ELX,
+ DISABLE_ALL_EXCEPTIONS),
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+ VERSION_2, image_info_t, IMAGE_ATTRIB_PLAT_SETUP),
+ .image_info.image_base = BL31_BASE,
+ .image_info.image_max_size = BL31_LIMIT - BL31_BASE,
+
+ .next_handoff_image_id = BL33_IMAGE_ID,
+ },
+#endif /* EL3_PAYLOAD_BASE */
+
+ {
+ .image_id = BL33_IMAGE_ID,
+ SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP,
+ VERSION_2, entry_point_info_t, NON_SECURE | EXECUTABLE),
+ .ep_info.pc = PLAT_NS_IMAGE_OFFSET,
+
+ SET_STATIC_PARAM_HEAD(image_info, PARAM_EP,
+ VERSION_2, image_info_t, 0),
+ .image_info.image_base = PLAT_NS_IMAGE_OFFSET,
+ .image_info.image_max_size =
+ 0x0 + 0x40000000 - PLAT_NS_IMAGE_OFFSET,
+
+ .next_handoff_image_id = INVALID_IMAGE_ID,
+ },
+};
+
+REGISTER_BL_IMAGE_DESCS(bl2_mem_params_descs)
--- /dev/null
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <common/bl_common.h>
+#include <common/debug.h>
+#include <common/desc_image_load.h>
+#include <drivers/generic_delay_timer.h>
+#include <drivers/synopsys/dw_mmc.h>
+#include <drivers/ti/uart/uart_16550.h>
+#include <lib/xlat_tables/xlat_tables.h>
+#include <platform_def.h>
+#include <socfpga_private.h>
+
+#include "agilex_clock_manager.h"
+#include "agilex_handoff.h"
+#include "agilex_mailbox.h"
+#include "agilex_memory_controller.h"
+#include "agilex_pinmux.h"
+#include "agilex_private.h"
+#include "agilex_reset_manager.h"
+#include "agilex_system_manager.h"
+
+#include "ccu/ncore_ccu.h"
+#include "qspi/cadence_qspi.h"
+#include "wdt/watchdog.h"
+
+
+const mmap_region_t agilex_plat_mmap[] = {
+ MAP_REGION_FLAT(DRAM_BASE, DRAM_SIZE,
+ MT_MEMORY | MT_RW | MT_NS),
+ MAP_REGION_FLAT(DEVICE1_BASE, DEVICE1_SIZE,
+ MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(DEVICE2_BASE, DEVICE2_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(OCRAM_BASE, OCRAM_SIZE,
+ MT_NON_CACHEABLE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(DEVICE3_BASE, DEVICE3_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(MEM64_BASE, MEM64_SIZE,
+ MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(DEVICE4_BASE, DEVICE4_SIZE,
+ MT_DEVICE | MT_RW | MT_NS),
+ {0},
+};
+
+boot_source_type boot_source;
+
+void bl2_el3_early_platform_setup(u_register_t x0, u_register_t x1,
+ u_register_t x2, u_register_t x4)
+{
+ static console_16550_t console;
+ handoff reverse_handoff_ptr;
+
+ generic_delay_timer_init();
+
+ if (agilex_get_handoff(&reverse_handoff_ptr))
+ return;
+ config_pinmux(&reverse_handoff_ptr);
+ boot_source = reverse_handoff_ptr.boot_source;
+ config_clkmgr_handoff(&reverse_handoff_ptr);
+
+ enable_nonsecure_access();
+ deassert_peripheral_reset();
+ config_hps_hs_before_warm_reset();
+
+ watchdog_init(get_wdt_clk(&reverse_handoff_ptr));
+
+ console_16550_register(PLAT_UART0_BASE, PLAT_UART_CLOCK, PLAT_BAUDRATE,
+ &console);
+
+ socfpga_delay_timer_init();
+ init_ncore_ccu();
+ init_hard_memory_controller();
+ enable_ns_bridge_access();
+}
+
+
+void bl2_el3_plat_arch_setup(void)
+{
+
+ struct mmc_device_info info;
+ const mmap_region_t bl_regions[] = {
+ MAP_REGION_FLAT(BL2_BASE, BL2_END - BL2_BASE,
+ MT_MEMORY | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(BL_CODE_BASE, BL_CODE_END - BL_CODE_BASE,
+ MT_CODE | MT_SECURE),
+ MAP_REGION_FLAT(BL_RO_DATA_BASE,
+ BL_RO_DATA_END - BL_RO_DATA_BASE,
+ MT_RO_DATA | MT_SECURE),
+#if USE_COHERENT_MEM_BAR
+ MAP_REGION_FLAT(BL_COHERENT_RAM_BASE,
+ BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+#endif
+ {0},
+ };
+
+ setup_page_tables(bl_regions, agilex_plat_mmap);
+
+ enable_mmu_el3(0);
+
+ dw_mmc_params_t params = EMMC_INIT_PARAMS(0x100000);
+
+ info.mmc_dev_type = MMC_IS_SD;
+ info.ocr_voltage = OCR_3_3_3_4 | OCR_3_2_3_3;
+
+ mailbox_init();
+
+ switch (boot_source) {
+ case BOOT_SOURCE_SDMMC:
+ dw_mmc_init(¶ms, &info);
+ socfpga_io_setup(boot_source);
+ break;
+
+ case BOOT_SOURCE_QSPI:
+ mailbox_set_qspi_open();
+ mailbox_set_qspi_direct();
+ cad_qspi_init(0, QSPI_CONFIG_CPHA, QSPI_CONFIG_CPOL,
+ QSPI_CONFIG_CSDA, QSPI_CONFIG_CSDADS,
+ QSPI_CONFIG_CSEOT, QSPI_CONFIG_CSSOT, 0);
+ socfpga_io_setup(boot_source);
+ break;
+
+ default:
+ ERROR("Unsupported boot source\n");
+ panic();
+ break;
+ }
+}
+
+uint32_t get_spsr_for_bl33_entry(void)
+{
+ unsigned long el_status;
+ unsigned int mode;
+ uint32_t spsr;
+
+ /* Figure out what mode we enter the non-secure world in */
+ el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
+ el_status &= ID_AA64PFR0_ELX_MASK;
+
+ mode = (el_status) ? MODE_EL2 : MODE_EL1;
+
+ /*
+ * TODO: Consider the possibility of specifying the SPSR in
+ * the FIP ToC and allowing the platform to have a say as
+ * well.
+ */
+ spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+ return spsr;
+}
+
+
+int bl2_plat_handle_post_image_load(unsigned int image_id)
+{
+ bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
+
+ switch (image_id) {
+ case BL33_IMAGE_ID:
+ bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
+ bl_mem_params->ep_info.spsr = get_spsr_for_bl33_entry();
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Perform any BL3-1 platform setup code
+ ******************************************************************************/
+void bl2_platform_setup(void)
+{
+}
+
--- /dev/null
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <common/bl_common.h>
+#include <drivers/arm/gicv2.h>
+#include <drivers/ti/uart/uart_16550.h>
+#include <lib/xlat_tables/xlat_tables.h>
+#include <platform_def.h>
+
+
+static entry_point_info_t bl32_image_ep_info;
+static entry_point_info_t bl33_image_ep_info;
+
+entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
+{
+ entry_point_info_t *next_image_info;
+
+ next_image_info = (type == NON_SECURE) ?
+ &bl33_image_ep_info : &bl32_image_ep_info;
+
+ /* None of the images on this platform can have 0x0 as the entrypoint */
+ if (next_image_info->pc)
+ return next_image_info;
+ else
+ return NULL;
+}
+
+void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
+ u_register_t arg2, u_register_t arg3)
+{
+ static console_16550_t console;
+
+ console_16550_register(PLAT_UART0_BASE, PLAT_UART_CLOCK, PLAT_BAUDRATE,
+ &console);
+ /*
+ * Check params passed from BL31 should not be NULL,
+ */
+ void *from_bl2 = (void *) arg0;
+
+ bl_params_t *params_from_bl2 = (bl_params_t *)from_bl2;
+
+ assert(params_from_bl2 != NULL);
+ assert(params_from_bl2->h.type == PARAM_BL_PARAMS);
+ assert(params_from_bl2->h.version >= VERSION_2);
+
+ /*
+ * Copy BL32 (if populated by BL31) and BL33 entry point information.
+ * They are stored in Secure RAM, in BL31's address space.
+ */
+
+ bl_params_node_t *bl_params = params_from_bl2->head;
+
+ while (bl_params) {
+ if (bl_params->image_id == BL33_IMAGE_ID)
+ bl33_image_ep_info = *bl_params->ep_info;
+
+ bl_params = bl_params->next_params_info;
+ }
+ SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
+}
+
+static const interrupt_prop_t s10_interrupt_props[] = {
+ PLAT_INTEL_AGX_G1S_IRQ_PROPS(GICV2_INTR_GROUP0),
+ PLAT_INTEL_AGX_G0_IRQ_PROPS(GICV2_INTR_GROUP0)
+};
+
+static unsigned int target_mask_array[PLATFORM_CORE_COUNT];
+
+static const gicv2_driver_data_t plat_gicv2_gic_data = {
+ .gicd_base = PLAT_INTEL_AGX_GICD_BASE,
+ .gicc_base = PLAT_INTEL_AGX_GICC_BASE,
+ .interrupt_props = s10_interrupt_props,
+ .interrupt_props_num = ARRAY_SIZE(s10_interrupt_props),
+ .target_masks = target_mask_array,
+ .target_masks_num = ARRAY_SIZE(target_mask_array),
+};
+
+/*******************************************************************************
+ * Perform any BL3-1 platform setup code
+ ******************************************************************************/
+void bl31_platform_setup(void)
+{
+ /* Initialize the gic cpu and distributor interfaces */
+ gicv2_driver_init(&plat_gicv2_gic_data);
+ gicv2_distif_init();
+ gicv2_pcpu_distif_init();
+ gicv2_cpuif_enable();
+}
+
+const mmap_region_t plat_agilex_mmap[] = {
+ MAP_REGION_FLAT(DRAM_BASE, DRAM_SIZE, MT_MEMORY | MT_RW | MT_NS),
+ MAP_REGION_FLAT(DEVICE1_BASE, DEVICE1_SIZE, MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(DEVICE2_BASE, DEVICE2_SIZE, MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(OCRAM_BASE, OCRAM_SIZE,
+ MT_NON_CACHEABLE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(DEVICE3_BASE, DEVICE3_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(MEM64_BASE, MEM64_SIZE, MT_DEVICE | MT_RW | MT_NS),
+ MAP_REGION_FLAT(DEVICE4_BASE, DEVICE4_SIZE, MT_DEVICE | MT_RW | MT_NS),
+ {0},
+};
+
+/*******************************************************************************
+ * Perform the very early platform specific architectural setup here. At the
+ * moment this is only intializes the mmu in a quick and dirty way.
+ ******************************************************************************/
+void bl31_plat_arch_setup(void)
+{
+ const mmap_region_t bl_regions[] = {
+ MAP_REGION_FLAT(BL31_BASE, BL31_END - BL31_BASE,
+ MT_MEMORY | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(BL_CODE_BASE, BL_CODE_END - BL_CODE_BASE,
+ MT_CODE | MT_SECURE),
+ MAP_REGION_FLAT(BL_RO_DATA_BASE,
+ BL_RO_DATA_END - BL_RO_DATA_BASE,
+ MT_RO_DATA | MT_SECURE),
+#if USE_COHERENT_MEM
+ MAP_REGION_FLAT(BL_COHERENT_RAM_BASE,
+ BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+#endif
+ {0},
+ };
+
+ setup_page_tables(bl_regions, plat_agilex_mmap);
+ enable_mmu_el3(0);
+}
+
--- /dev/null
+/*
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef CLOCKMANAGER_H
+#define CLOCKMANAGER_H
+
+#include "agilex_handoff.h"
+
+/* Clock Manager Registers */
+#define CLKMGR_OFFSET 0xffd10000
+
+#define CLKMGR_CTRL 0x0
+#define CLKMGR_STAT 0x4
+#define CLKMGR_INTRCLR 0x14
+
+/* Main PLL Group */
+#define CLKMGR_MAINPLL 0xffd10024
+#define CLKMGR_MAINPLL_EN 0x0
+#define CLKMGR_MAINPLL_BYPASS 0xc
+#define CLKMGR_MAINPLL_MPUCLK 0x18
+#define CLKMGR_MAINPLL_NOCCLK 0x1c
+#define CLKMGR_MAINPLL_NOCDIV 0x20
+#define CLKMGR_MAINPLL_PLLGLOB 0x24
+#define CLKMGR_MAINPLL_FDBCK 0x28
+#define CLKMGR_MAINPLL_MEM 0x2c
+#define CLKMGR_MAINPLL_MEMSTAT 0x30
+#define CLKMGR_MAINPLL_PLLC0 0x34
+#define CLKMGR_MAINPLL_PLLC1 0x38
+#define CLKMGR_MAINPLL_VCOCALIB 0x3c
+#define CLKMGR_MAINPLL_PLLC2 0x40
+#define CLKMGR_MAINPLL_PLLC3 0x44
+#define CLKMGR_MAINPLL_PLLM 0x48
+
+/* Peripheral PLL Group */
+#define CLKMGR_PERPLL 0xffd1007c
+#define CLKMGR_PERPLL_EN 0x0
+#define CLKMGR_PERPLL_BYPASS 0xc
+#define CLKMGR_PERPLL_EMACCTL 0x18
+#define CLKMGR_PERPLL_GPIODIV 0x1c
+#define CLKMGR_PERPLL_PLLGLOB 0x20
+#define CLKMGR_PERPLL_FDBCK 0x24
+#define CLKMGR_PERPLL_MEM 0x28
+#define CLKMGR_PERPLL_MEMSTAT 0x2c
+#define CLKMGR_PERPLL_PLLC0 0x30
+#define CLKMGR_PERPLL_PLLC1 0x34
+#define CLKMGR_PERPLL_VCOCALIB 0x38
+#define CLKMGR_PERPLL_PLLC2 0x3c
+#define CLKMGR_PERPLL_PLLC3 0x40
+#define CLKMGR_PERPLL_PLLM 0x44
+
+/* Altera Group */
+#define CLKMGR_ALTERA 0xffd100d0
+#define CLKMGR_ALTERA_JTAG 0x0
+#define CLKMGR_ALTERA_EMACACTR 0x4
+#define CLKMGR_ALTERA_EMACBCTR 0x8
+#define CLKMGR_ALTERA_EMACPTPCTR 0xc
+#define CLKMGR_ALTERA_GPIODBCTR 0x10
+#define CLKMGR_ALTERA_SDMMCCTR 0x14
+#define CLKMGR_ALTERA_S2FUSER0CTR 0x18
+#define CLKMGR_ALTERA_S2FUSER1CTR 0x1c
+#define CLKMGR_ALTERA_PSIREFCTR 0x20
+#define CLKMGR_ALTERA_EXTCNTRST 0x24
+
+/* Membus */
+#define CLKMGR_MEM_REQ BIT(24)
+#define CLKMGR_MEM_WR BIT(25)
+#define CLKMGR_MEM_ERR BIT(26)
+#define CLKMGR_MEM_WDAT_OFFSET 16
+#define CLKMGR_MEM_ADDR 0x4027
+#define CLKMGR_MEM_WDAT 0x80
+
+/* Clock Manager Macros */
+#define CLKMGR_CTRL_BOOTMODE_SET_MSK 0x00000001
+#define CLKMGR_STAT_BUSY_E_BUSY 0x1
+#define CLKMGR_STAT_BUSY(x) (((x) & 0x00000001) >> 0)
+#define CLKMGR_STAT_MAINPLLLOCKED(x) (((x) & 0x00000100) >> 8)
+#define CLKMGR_STAT_PERPLLLOCKED(x) (((x) & 0x00010000) >> 16)
+#define CLKMGR_INTRCLR_MAINLOCKLOST_SET_MSK 0x00000004
+#define CLKMGR_INTRCLR_PERLOCKLOST_SET_MSK 0x00000008
+
+/* Main PLL Macros */
+#define CLKMGR_MAINPLL_EN_RESET 0x000000ff
+#define CLKMGR_MAINPLL_PLLM_MDIV(x) ((x) & 0x000003ff)
+#define CLKMGR_MAINPLL_PLLGLOB_PD_SET_MSK 0x00000001
+#define CLKMGR_MAINPLL_PLLGLOB_RST_SET_MSK 0x00000002
+
+#define CLKMGR_MAINPLL_PLLGLOB_REFCLKDIV(x) (((x) & 0x00003f00) >> 8)
+#define CLKMGR_MAINPLL_PLLGLOB_AREFCLKDIV(x) (((x) & 0x00000f00) >> 8)
+#define CLKMGR_MAINPLL_PLLGLOB_DREFCLKDIV(x) (((x) & 0x00003000) >> 12)
+
+#define CLKMGR_MAINPLL_PLLGLOB_PSRC(x) (((x) & 0x00030000) >> 16)
+#define CLKMGR_MAINPLL_PLLGLOB_PSRC_EOSC1 0x0
+#define CLKMGR_MAINPLL_PLLGLOB_PSRC_INTOSC 0x1
+#define CLKMGR_MAINPLL_PLLGLOB_PSRC_F2S 0x2
+#define CLKMGR_MAINPLL_VCOCALIB_HSCNT_SET(x) (((x) << 0) & 0x000003ff)
+#define CLKMGR_MAINPLL_VCOCALIB_MSCNT_SET(x) (((x) << 16) & 0x00ff0000)
+
+/* Peripheral PLL Macros */
+#define CLKMGR_PERPLL_EN_RESET 0x00000fff
+#define CLKMGR_PERPLL_PLLM_MDIV(x) ((x) & 0x000003ff)
+#define CLKMGR_PERPLL_GPIODIV_GPIODBCLK_SET(x) (((x) << 0) & 0x0000ffff)
+#define CLKMGR_PERPLL_PLLGLOB_PD_SET_MSK 0x00000001
+
+#define CLKMGR_PERPLL_PLLGLOB_REFCLKDIV(x) (((x) & 0x00003f00) >> 8)
+#define CLKMGR_PERPLL_PLLGLOB_AREFCLKDIV(x) (((x) & 0x00000f00) >> 8)
+#define CLKMGR_PERPLL_PLLGLOB_DREFCLKDIV(x) (((x) & 0x00003000) >> 12)
+
+#define CLKMGR_PERPLL_PLLGLOB_RST_SET_MSK 0x00000002
+#define CLKMGR_PERPLL_VCOCALIB_HSCNT_SET(x) (((x) << 0) & 0x000003ff)
+#define CLKMGR_PERPLL_VCOCALIB_MSCNT_SET(x) (((x) << 16) & 0x00ff0000)
+
+/* Altera Macros */
+#define CLKMGR_ALTERA_EXTCNTRST_RESET 0xff
+
+
+typedef struct {
+ uint32_t clk_freq_of_eosc1;
+ uint32_t clk_freq_of_f2h_free;
+ uint32_t clk_freq_of_cb_intosc_ls;
+} CLOCK_SOURCE_CONFIG;
+
+void config_clkmgr_handoff(handoff *hoff_ptr);
+int get_wdt_clk(handoff *hoff_ptr);
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef HANDOFF_H
+#define HANDOFF_H
+
+#define HANDOFF_MAGIC_HEADER 0x424f4f54 /* BOOT */
+#define HANDOFF_MAGIC_PINMUX_SEL 0x504d5558 /* PMUX */
+#define HANDOFF_MAGIC_IOCTLR 0x494f4354 /* IOCT */
+#define HANDOFF_MAGIC_FPGA 0x46504741 /* FPGA */
+#define HANDOFF_MAGIC_IODELAY 0x444c4159 /* DLAY */
+#define HANDOFF_MAGIC_CLOCK 0x434c4b53 /* CLKS */
+#define HANDOFF_MAGIC_MISC 0x4d495343 /* MISC */
+
+typedef struct handoff_t {
+ /* header */
+ uint32_t header_magic;
+ uint32_t header_device;
+ uint32_t _pad_0x08_0x10[2];
+
+ /* pinmux configuration - select */
+ uint32_t pinmux_sel_magic;
+ uint32_t pinmux_sel_length;
+ uint32_t _pad_0x18_0x20[2];
+ uint32_t pinmux_sel_array[96]; /* offset, value */
+
+ /* pinmux configuration - io control */
+ uint32_t pinmux_io_magic;
+ uint32_t pinmux_io_length;
+ uint32_t _pad_0x1a8_0x1b0[2];
+ uint32_t pinmux_io_array[96]; /* offset, value */
+
+ /* pinmux configuration - use fpga switch */
+ uint32_t pinmux_fpga_magic;
+ uint32_t pinmux_fpga_length;
+ uint32_t _pad_0x338_0x340[2];
+ uint32_t pinmux_fpga_array[42]; /* offset, value */
+ uint32_t _pad_0x3e8_0x3f0[2];
+
+ /* pinmux configuration - io delay */
+ uint32_t pinmux_delay_magic;
+ uint32_t pinmux_delay_length;
+ uint32_t _pad_0x3f8_0x400[2];
+ uint32_t pinmux_iodelay_array[96]; /* offset, value */
+
+ /* clock configuration */
+ uint32_t clock_magic;
+ uint32_t clock_length;
+ uint32_t _pad_0x588_0x590[2];
+ uint32_t main_pll_mpuclk;
+ uint32_t main_pll_nocclk;
+ uint32_t main_pll_nocdiv;
+ uint32_t main_pll_pllglob;
+ uint32_t main_pll_fdbck;
+ uint32_t main_pll_pllc0;
+ uint32_t main_pll_pllc1;
+ uint32_t main_pll_pllc2;
+ uint32_t main_pll_pllc3;
+ uint32_t main_pll_pllm;
+ uint32_t per_pll_emacctl;
+ uint32_t per_pll_gpiodiv;
+ uint32_t per_pll_pllglob;
+ uint32_t per_pll_fdbck;
+ uint32_t per_pll_pllc0;
+ uint32_t per_pll_pllc1;
+ uint32_t per_pll_pllc2;
+ uint32_t per_pll_pllc3;
+ uint32_t per_pll_pllm;
+ uint32_t alt_emacactr;
+ uint32_t alt_emacbctr;
+ uint32_t alt_emacptpctr;
+ uint32_t alt_gpiodbctr;
+ uint32_t alt_sdmmcctr;
+ uint32_t alt_s2fuser0ctr;
+ uint32_t alt_s2fuser1ctr;
+ uint32_t alt_psirefctr;
+ uint32_t hps_osc_clk_h;
+ uint32_t fpga_clk_hz;
+ uint32_t _pad_0x604_0x610[3];
+
+ /* misc configuration */
+ uint32_t misc_magic;
+ uint32_t misc_length;
+ uint32_t _pad_0x618_0x620[2];
+ uint32_t boot_source;
+} handoff;
+
+int verify_handoff_image(handoff *hoff_ptr, handoff *reverse_hoff_ptr);
+int agilex_get_handoff(handoff *hoff_ptr);
+
+#endif
+
+
--- /dev/null
+/*
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef AGX_MBOX_H
+#define AGX_MBOX_H
+
+#define MBOX_OFFSET 0xffa30000
+
+#define MBOX_ATF_CLIENT_ID 0x1
+#define MBOX_JOB_ID 0x1
+
+/* Mailbox interrupt flags and masks */
+#define MBOX_INT_FLAG_COE 0x1
+#define MBOX_INT_FLAG_RIE 0x2
+#define MBOX_INT_FLAG_UAE 0x100
+#define MBOX_COE_BIT(INTERRUPT) ((INTERRUPT) & 0x3)
+#define MBOX_UAE_BIT(INTERRUPT) (((INTERRUPT) & (1<<8)))
+
+/* Mailbox response and status */
+#define MBOX_RESP_BUFFER_SIZE 16
+#define MBOX_RESP_ERR(BUFFER) ((BUFFER) & 0x00000fff)
+#define MBOX_RESP_LEN(BUFFER) (((BUFFER) & 0x007ff000) >> 12)
+#define MBOX_RESP_CLIENT_ID(BUFFER) (((BUFFER) & 0xf0000000) >> 28)
+#define MBOX_RESP_JOB_ID(BUFFER) (((BUFFER) & 0x0f000000) >> 24)
+#define MBOX_STATUS_UA_MASK (1<<8)
+
+/* Mailbox command and response */
+#define MBOX_CMD_FREE_OFFSET 0x14
+#define MBOX_CMD_BUFFER_SIZE 32
+#define MBOX_CLIENT_ID_CMD(CLIENT_ID) ((CLIENT_ID) << 28)
+#define MBOX_JOB_ID_CMD(JOB_ID) (JOB_ID<<24)
+#define MBOX_CMD_LEN_CMD(CMD_LEN) ((CMD_LEN) << 12)
+#define MBOX_INDIRECT (1 << 11)
+#define MBOX_INSUFFICIENT_BUFFER -2
+#define MBOX_CIN 0x00
+#define MBOX_ROUT 0x04
+#define MBOX_URG 0x08
+#define MBOX_INT 0x0C
+#define MBOX_COUT 0x20
+#define MBOX_RIN 0x24
+#define MBOX_STATUS 0x2C
+#define MBOX_CMD_BUFFER 0x40
+#define MBOX_RESP_BUFFER 0xC0
+
+#define MBOX_RESP_BUFFER_SIZE 16
+#define MBOX_RESP_OK 0
+#define MBOX_RESP_INVALID_CMD 1
+#define MBOX_RESP_UNKNOWN_BR 2
+#define MBOX_RESP_UNKNOWN 3
+#define MBOX_RESP_NOT_CONFIGURED 256
+
+/* Mailbox SDM doorbell */
+#define MBOX_DOORBELL_TO_SDM 0x400
+#define MBOX_DOORBELL_FROM_SDM 0x480
+
+/* Mailbox QSPI commands */
+#define MBOX_CMD_RESTART 2
+#define MBOX_CMD_QSPI_OPEN 50
+#define MBOX_CMD_QSPI_CLOSE 51
+#define MBOX_CMD_QSPI_DIRECT 59
+#define MBOX_CMD_GET_IDCODE 16
+#define MBOX_CMD_QSPI_SET_CS 52
+
+/* Mailbox REBOOT commands */
+#define MBOX_CMD_REBOOT_HPS 71
+
+/* Generic error handling */
+#define MBOX_TIMEOUT -2047
+#define MBOX_NO_RESPONSE -2
+#define MBOX_WRONG_ID -3
+
+/* Mailbox status */
+#define RECONFIG_STATUS_STATE 0
+#define RECONFIG_STATUS_PIN_STATUS 2
+#define RECONFIG_STATUS_SOFTFUNC_STATUS 3
+#define PIN_STATUS_NSTATUS (1 << 31)
+#define SOFTFUNC_STATUS_SEU_ERROR (1 << 3)
+#define SOFTFUNC_STATUS_INIT_DONE (1 << 1)
+#define SOFTFUNC_STATUS_CONF_DONE (1 << 0)
+#define MBOX_CFGSTAT_STATE_CONFIG 0x10000000
+
+/* SMC function IDs for SiP Service queries */
+#define SIP_SVC_CALL_COUNT 0x8200ff00
+#define SIP_SVC_UID 0x8200ff01
+#define SIP_SVC_VERSION 0x8200ff03
+
+/* SiP Service Calls version numbers */
+#define SIP_SVC_VERSION_MAJOR 0
+#define SIP_SVC_VERSION_MINOR 1
+
+/* Mailbox reconfiguration commands */
+#define MBOX_RECONFIG 6
+#define MBOX_RECONFIG_DATA 8
+#define MBOX_RECONFIG_STATUS 9
+
+/* Sip get memory */
+#define INTEL_SIP_SMC_FPGA_CONFIG_START 0xC2000001
+#define INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM 0xC2000005
+#define INTEL_SIP_SMC_FPGA_CONFIG_ISDONE 0xC2000004
+#define INTEL_SIP_SMC_FPGA_CONFIG_WRITE 0x42000002
+#define INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE 0xC2000003
+#define INTEL_SIP_SMC_STATUS_OK 0
+#define INTEL_SIP_SMC_STATUS_ERROR 0x4
+#define INTEL_SIP_SMC_STATUS_BUSY 0x1
+#define INTEL_SIP_SMC_STATUS_REJECTED 0x2
+#define INTEL_SIP_SMC_FPGA_CONFIG_ADDR 0x1000
+#define INTEL_SIP_SMC_FPGA_CONFIG_SIZE 16777216
+
+void mailbox_set_int(int interrupt_input);
+int mailbox_init(void);
+void mailbox_set_qspi_close(void);
+void mailbox_set_qspi_open(void);
+void mailbox_set_qspi_direct(void);
+int mailbox_send_cmd(int job_id, unsigned int cmd, uint32_t *args,
+ int len, int urgent, uint32_t *response);
+void mailbox_send_cmd_async(int job_id, unsigned int cmd, uint32_t *args,
+ int len, int urgent);
+int mailbox_read_response(int job_id, uint32_t *response);
+int mailbox_get_qspi_clock(void);
+void mailbox_reset_cold(void);
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef AGX_MEMORYCONTROLLER_H
+#define AGX_MEMORYCONTROLLER_H
+
+#define AGX_MPFE_IOHMC_REG_DRAMADDRW 0xf80100a8
+#define AGX_MPFE_IOHMC_CTRLCFG0 0xf8010028
+#define AGX_MPFE_IOHMC_CTRLCFG1 0xf801002c
+#define AGX_MPFE_IOHMC_DRAMADDRW 0xf80100a8
+#define AGX_MPFE_IOHMC_DRAMTIMING0 0xf8010050
+#define AGX_MPFE_IOHMC_CALTIMING0 0xf801007c
+#define AGX_MPFE_IOHMC_CALTIMING1 0xf8010080
+#define AGX_MPFE_IOHMC_CALTIMING2 0xf8010084
+#define AGX_MPFE_IOHMC_CALTIMING3 0xf8010088
+#define AGX_MPFE_IOHMC_CALTIMING4 0xf801008c
+#define AGX_MPFE_IOHMC_CALTIMING9 0xf80100a0
+#define AGX_MPFE_IOHMC_CALTIMING9_ACT_TO_ACT(x) (((x) & 0x000000ff) >> 0)
+#define AGX_MPFE_IOHMC_CTRLCFG1_CFG_ADDR_ORDER(value) \
+ (((value) & 0x00000060) >> 5)
+
+#define AGX_RSTMGR_BRGMODRST 0xffd1102c
+#define AGX_RSTMGR_BRGMODRST_DDRSCH 0x00000040
+
+#define AGX_MPFE_HMC_ADP_ECCCTRL1 0xf8011100
+#define AGX_MPFE_HMC_ADP_ECCCTRL2 0xf8011104
+#define AGX_MPFE_HMC_ADP_RSTHANDSHAKESTAT 0xf8011218
+#define AGX_MPFE_HMC_ADP_RSTHANDSHAKESTAT_SEQ2CORE 0x000000ff
+#define AGX_MPFE_HMC_ADP_RSTHANDSHAKECTRL 0xf8011214
+
+
+#define AGX_MPFE_IOHMC_REG_CTRLCFG1 0xf801002c
+
+#define AGX_MPFE_IOHMC_REG_NIOSRESERVE0_OFST 0xf8010110
+
+#define IOHMC_DRAMADDRW_COL_ADDR_WIDTH(x) (((x) & 0x0000001f) >> 0)
+#define IOHMC_DRAMADDRW_ROW_ADDR_WIDTH(x) (((x) & 0x000003e0) >> 5)
+#define IOHMC_DRAMADDRW_CS_ADDR_WIDTH(x) (((x) & 0x00070000) >> 16)
+#define IOHMC_DRAMADDRW_BANK_GRP_ADDR_WIDTH(x) (((x) & 0x0000c000) >> 14)
+#define IOHMC_DRAMADDRW_BANK_ADDR_WIDTH(x) (((x) & 0x00003c00) >> 10)
+
+#define AGX_MPFE_DDR(x) (0xf8000000 + x)
+#define AGX_MPFE_HMC_ADP_DDRCALSTAT 0xf801100c
+#define AGX_MPFE_DDR_MAIN_SCHED 0xf8000400
+#define AGX_MPFE_DDR_MAIN_SCHED_DDRCONF 0xf8000408
+#define AGX_MPFE_DDR_MAIN_SCHED_DDRTIMING 0xf800040c
+#define AGX_MPFE_DDR_MAIN_SCHED_DDRCONF_SET_MSK 0x0000001f
+#define AGX_MPFE_DDR_MAIN_SCHED_DDRMODE 0xf8000410
+#define AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV 0xf800043c
+#define AGX_MPFE_DDR_MAIN_SCHED_READLATENCY 0xf8000414
+#define AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE 0xf8000438
+#define AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE_FAWBANK_OFST 10
+#define AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE_FAW_OFST 4
+#define AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE_RRD_OFST 0
+#define AGX_MPFE_DDR_MAIN_SCHED_DDRCONF_SET(x) (((x) << 0) & 0x0000001f)
+#define AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTORD_OFST 0
+#define AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTORD_MSK (BIT(0) | BIT(1))
+#define AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTOWR_OFST 2
+#define AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTOWR_MSK (BIT(2) | BIT(3))
+#define AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSWRTORD_OFST 4
+#define AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSWRTORD_MSK (BIT(4) | BIT(5))
+
+#define AGX_MPFE_HMC_ADP(x) (0xf8011000 + (x))
+#define AGX_MPFE_HMC_ADP_HPSINTFCSEL 0xf8011210
+#define AGX_MPFE_HMC_ADP_DDRIOCTRL 0xf8011008
+#define HMC_ADP_DDRIOCTRL 0x8
+#define HMC_ADP_DDRIOCTRL_IO_SIZE(x) (((x) & 0x00000003) >> 0)
+#define HMC_ADP_DDRIOCTRL_CTRL_BURST_LENGTH(x) (((x) & 0x00003e00) >> 9)
+#define ADP_DRAMADDRWIDTH 0xe0
+
+#define ACT_TO_ACT_DIFF_BANK(value) (((value) & 0x00fc0000) >> 18)
+#define ACT_TO_ACT(value) (((value) & 0x0003f000) >> 12)
+#define ACT_TO_RDWR(value) (((value) & 0x0000003f) >> 0)
+#define ACT_TO_ACT(value) (((value) & 0x0003f000) >> 12)
+
+/* timing 2 */
+#define RD_TO_RD_DIFF_CHIP(value) (((value) & 0x00000fc0) >> 6)
+#define RD_TO_WR_DIFF_CHIP(value) (((value) & 0x3f000000) >> 24)
+#define RD_TO_WR(value) (((value) & 0x00fc0000) >> 18)
+#define RD_TO_PCH(value) (((value) & 0x00000fc0) >> 6)
+
+/* timing 3 */
+#define CALTIMING3_WR_TO_RD_DIFF_CHIP(value) (((value) & 0x0003f000) >> 12)
+#define CALTIMING3_WR_TO_RD(value) (((value) & 0x00000fc0) >> 6)
+
+/* timing 4 */
+#define PCH_TO_VALID(value) (((value) & 0x00000fc0) >> 6)
+
+#define DDRTIMING_BWRATIO_OFST 31
+#define DDRTIMING_WRTORD_OFST 26
+#define DDRTIMING_RDTOWR_OFST 21
+#define DDRTIMING_BURSTLEN_OFST 18
+#define DDRTIMING_WRTOMISS_OFST 12
+#define DDRTIMING_RDTOMISS_OFST 6
+#define DDRTIMING_ACTTOACT_OFST 0
+
+#define ADP_DDRIOCTRL_IO_SIZE(x) (((x) & 0x3) >> 0)
+
+#define DDRMODE_AUTOPRECHARGE_OFST 1
+#define DDRMODE_BWRATIOEXTENDED_OFST 0
+
+
+#define AGX_MPFE_IOHMC_REG_DRAMTIMING0_CFG_TCL(x) (((x) & 0x7f) >> 0)
+#define AGX_MPFE_IOHMC_REG_CTRLCFG0_CFG_MEM_TYPE(x) (((x) & 0x0f) >> 0)
+
+#define AGX_CCU_CPU0_MPRT_DDR 0xf7004400
+#define AGX_CCU_CPU0_MPRT_MEM0 0xf70045c0
+#define AGX_CCU_CPU0_MPRT_MEM1A 0xf70045e0
+#define AGX_CCU_CPU0_MPRT_MEM1B 0xf7004600
+#define AGX_CCU_CPU0_MPRT_MEM1C 0xf7004620
+#define AGX_CCU_CPU0_MPRT_MEM1D 0xf7004640
+#define AGX_CCU_CPU0_MPRT_MEM1E 0xf7004660
+#define AGX_CCU_IOM_MPRT_MEM0 0xf7018560
+#define AGX_CCU_IOM_MPRT_MEM1A 0xf7018580
+#define AGX_CCU_IOM_MPRT_MEM1B 0xf70185a0
+#define AGX_CCU_IOM_MPRT_MEM1C 0xf70185c0
+#define AGX_CCU_IOM_MPRT_MEM1D 0xf70185e0
+#define AGX_CCU_IOM_MPRT_MEM1E 0xf7018600
+
+#define AGX_NOC_FW_DDR_SCR 0xf8020200
+#define AGX_NOC_FW_DDR_SCR_MPUREGION0ADDR_LIMITEXT 0xf802021c
+#define AGX_NOC_FW_DDR_SCR_MPUREGION0ADDR_LIMIT 0xf8020218
+#define AGX_NOC_FW_DDR_SCR_NONMPUREGION0ADDR_LIMITEXT 0xf802029c
+#define AGX_NOC_FW_DDR_SCR_NONMPUREGION0ADDR_LIMIT 0xf8020298
+
+#define AGX_SOC_NOC_FW_DDR_SCR_ENABLE 0xf8020200
+#define AGX_CCU_NOC_DI_SET_MSK 0x10
+
+#define AGX_SYSMGR_CORE_HMC_CLK 0xffd120b4
+#define AGX_SYSMGR_CORE_HMC_CLK_STATUS 0x00000001
+
+#define AGX_MPFE_IOHMC_NIOSRESERVE0_NIOS_RESERVE0(x) (((x) & 0xffff) >> 0)
+#define AGX_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE_MSK 0x00000003
+#define AGX_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE_OFST 0
+#define AGX_MPFE_HMC_ADP_HPSINTFCSEL_ENABLE 0x001f1f1f
+#define AGX_IOHMC_CTRLCFG1_ENABLE_ECC_OFST 7
+
+#define AGX_MPFE_HMC_ADP_ECCCTRL1_AUTOWB_CNT_RST_SET_MSK 0x00010000
+#define AGX_MPFE_HMC_ADP_ECCCTRL1_CNT_RST_SET_MSK 0x00000100
+#define AGX_MPFE_HMC_ADP_ECCCTRL1_ECC_EN_SET_MSK 0x00000001
+
+#define AGX_MPFE_HMC_ADP_ECCCTRL2_AUTOWB_EN_SET_MSK 0x00000001
+#define AGX_MPFE_HMC_ADP_ECCCTRL2_OVRW_RB_ECC_EN_SET_MSK 0x00010000
+#define AGX_MPFE_HMC_ADP_ECCCTRL2_RMW_EN_SET_MSK 0x00000100
+#define AGX_MPFE_HMC_ADP_DDRCALSTAT_CAL(value) (((value) & 0x1) >> 0)
+
+
+#define AGX_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE(x) (((x) & 0x00003) >> 0)
+#define IOHMC_DRAMADDRW_CFG_BANK_ADDR_WIDTH(x) (((x) & 0x03c00) >> 10)
+#define IOHMC_DRAMADDRW_CFG_BANK_GROUP_ADDR_WIDTH(x) (((x) & 0x0c000) >> 14)
+#define IOHMC_DRAMADDRW_CFG_COL_ADDR_WIDTH(x) (((x) & 0x0001f) >> 0)
+#define IOHMC_DRAMADDRW_CFG_CS_ADDR_WIDTH(x) (((x) & 0x70000) >> 16)
+#define IOHMC_DRAMADDRW_CFG_ROW_ADDR_WIDTH(x) (((x) & 0x003e0) >> 5)
+
+#define AGX_SDRAM_0_LB_ADDR 0x0
+
+int init_hard_memory_controller(void);
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef AGX_NOC_H
+#define AGX_NOC_H
+
+
+#define AXI_AP (1<<0)
+#define FPGA2SOC (1<<16)
+#define MPU (1<<24)
+#define AGX_NOC_PER_SCR_NAND 0xffd21000
+#define AGX_NOC_PER_SCR_NAND_DATA 0xffd21004
+#define AGX_NOC_PER_SCR_USB0 0xffd2100c
+#define AGX_NOC_PER_SCR_USB1 0xffd21010
+#define AGX_NOC_PER_SCR_SPI_M0 0xffd2101c
+#define AGX_NOC_PER_SCR_SPI_M1 0xffd21020
+#define AGX_NOC_PER_SCR_SPI_S0 0xffd21024
+#define AGX_NOC_PER_SCR_SPI_S1 0xffd21028
+#define AGX_NOC_PER_SCR_EMAC0 0xffd2102c
+#define AGX_NOC_PER_SCR_EMAC1 0xffd21030
+#define AGX_NOC_PER_SCR_EMAC2 0xffd21034
+#define AGX_NOC_PER_SCR_SDMMC 0xffd21040
+#define AGX_NOC_PER_SCR_GPIO0 0xffd21044
+#define AGX_NOC_PER_SCR_GPIO1 0xffd21048
+#define AGX_NOC_PER_SCR_I2C0 0xffd21050
+#define AGX_NOC_PER_SCR_I2C1 0xffd21058
+#define AGX_NOC_PER_SCR_I2C2 0xffd2105c
+#define AGX_NOC_PER_SCR_I2C3 0xffd21060
+#define AGX_NOC_PER_SCR_SP_TIMER0 0xffd21064
+#define AGX_NOC_PER_SCR_SP_TIMER1 0xffd21068
+#define AGX_NOC_PER_SCR_UART0 0xffd2106c
+#define AGX_NOC_PER_SCR_UART1 0xffd21070
+
+
+#define AGX_NOC_SYS_SCR_DMA_ECC 0xffd21108
+#define AGX_NOC_SYS_SCR_EMAC0RX_ECC 0xffd2110c
+#define AGX_NOC_SYS_SCR_EMAC0TX_ECC 0xffd21110
+#define AGX_NOC_SYS_SCR_EMAC1RX_ECC 0xffd21114
+#define AGX_NOC_SYS_SCR_EMAC1TX_ECC 0xffd21118
+#define AGX_NOC_SYS_SCR_EMAC2RX_ECC 0xffd2111c
+#define AGX_NOC_SYS_SCR_EMAC2TX_ECC 0xffd21120
+#define AGX_NOC_SYS_SCR_NAND_ECC 0xffd2112c
+#define AGX_NOC_SYS_SCR_NAND_READ_ECC 0xffd21130
+#define AGX_NOC_SYS_SCR_NAND_WRITE_ECC 0xffd21134
+#define AGX_NOC_SYS_SCR_OCRAM_ECC 0xffd21138
+#define AGX_NOC_SYS_SCR_SDMMC_ECC 0xffd21140
+#define AGX_NOC_SYS_SCR_USB0_ECC 0xffd21144
+#define AGX_NOC_SYS_SCR_USB1_ECC 0xffd21148
+#define AGX_NOC_SYS_SCR_CLK_MGR 0xffd2114c
+#define AGX_NOC_SYS_SCR_IO_MGR 0xffd21154
+#define AGX_NOC_SYS_SCR_RST_MGR 0xffd21158
+#define AGX_NOC_SYS_SCR_SYS_MGR 0xffd2115c
+#define AGX_NOC_SYS_SCR_OSC0_TIMER 0xffd21160
+#define AGX_NOC_SYS_SCR_OSC1_TIMER 0xffd21164
+#define AGX_NOC_SYS_SCR_WATCHDOG0 0xffd21168
+#define AGX_NOC_SYS_SCR_WATCHDOG1 0xffd2116c
+#define AGX_NOC_SYS_SCR_WATCHDOG2 0xffd21170
+#define AGX_NOC_SYS_SCR_WATCHDOG3 0xffd21174
+#define AGX_NOC_SYS_SCR_DAP 0xffd21178
+#define AGX_NOC_SYS_SCR_L4_NOC_PROBES 0xffd21190
+#define AGX_NOC_SYS_SCR_L4_NOC_QOS 0xffd21194
+
+#define AGX_CCU_NOC_BRIDGE_CPU0_RAM 0xf7004688
+#define AGX_CCU_NOC_BRIDGE_IOM_RAM 0xf7004688
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef AGX_PINMUX_H
+#define AGX_PINMUX_H
+
+#define AGX_PINMUX_PIN0SEL 0xffd13000
+#define AGX_PINMUX_IO0CTRL 0xffd13130
+#define AGX_PINMUX_PINMUX_EMAC0_USEFPGA 0xffd13300
+#define AGX_PINMUX_IO0_DELAY 0xffd13400
+
+#include "agilex_handoff.h"
+
+void config_pinmux(handoff *handoff);
+
+#endif
+
--- /dev/null
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef AGX_PRIVATE_H
+#define AGX_PRIVATE_H
+
+#define AGX_MMC_REG_BASE 0xff808000
+
+#define EMMC_DESC_SIZE (1<<20)
+#define EMMC_INIT_PARAMS(base) \
+ { .bus_width = MMC_BUS_WIDTH_4, \
+ .clk_rate = 50000000, \
+ .desc_base = (base), \
+ .desc_size = EMMC_DESC_SIZE, \
+ .flags = 0, \
+ .reg_base = AGX_MMC_REG_BASE, \
+ \
+ }
+
+typedef enum {
+ BOOT_SOURCE_FPGA = 0,
+ BOOT_SOURCE_SDMMC,
+ BOOT_SOURCE_NAND,
+ BOOT_SOURCE_RSVD,
+ BOOT_SOURCE_QSPI,
+} boot_source_type;
+
+void enable_nonsecure_access(void);
+void socfpga_io_setup(int boot_source);
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef AGX_RESETMANAGER_H
+#define AGX_RESETMANAGER_H
+
+#define AGX_RSTMGR_HDSKEN 0xffd11010
+#define AGX_RSTMGR_PER0MODRST 0xffd11024
+#define AGX_RSTMGR_PER1MODRST 0xffd11028
+#define AGX_RSTMGR_BRGMODRST 0xffd1102c
+
+#define AGX_RSTMGR_PER0MODRST_EMAC0 0x00000001
+#define AGX_RSTMGR_PER0MODRST_EMAC1 0x00000002
+#define AGX_RSTMGR_PER0MODRST_EMAC2 0x00000004
+#define AGX_RSTMGR_PER0MODRST_USB0 0x00000008
+#define AGX_RSTMGR_PER0MODRST_USB1 0x00000010
+#define AGX_RSTMGR_PER0MODRST_NAND 0x00000020
+#define AGX_RSTMGR_PER0MODRST_SDMMC 0x00000080
+#define AGX_RSTMGR_PER0MODRST_EMAC0OCP 0x00000100
+#define AGX_RSTMGR_PER0MODRST_EMAC1OCP 0x00000200
+#define AGX_RSTMGR_PER0MODRST_EMAC2OCP 0x00000400
+#define AGX_RSTMGR_PER0MODRST_USB0OCP 0x00000800
+#define AGX_RSTMGR_PER0MODRST_USB1OCP 0x00001000
+#define AGX_RSTMGR_PER0MODRST_NANDOCP 0x00002000
+#define AGX_RSTMGR_PER0MODRST_SDMMCOCP 0x00008000
+#define AGX_RSTMGR_PER0MODRST_DMA 0x00010000
+#define AGX_RSTMGR_PER0MODRST_SPIM0 0x00020000
+#define AGX_RSTMGR_PER0MODRST_SPIM1 0x00040000
+#define AGX_RSTMGR_PER0MODRST_SPIS0 0x00080000
+#define AGX_RSTMGR_PER0MODRST_SPIS1 0x00100000
+#define AGX_RSTMGR_PER0MODRST_DMAOCP 0x00200000
+#define AGX_RSTMGR_PER0MODRST_EMACPTP 0x00400000
+#define AGX_RSTMGR_PER0MODRST_DMAIF0 0x01000000
+#define AGX_RSTMGR_PER0MODRST_DMAIF1 0x02000000
+#define AGX_RSTMGR_PER0MODRST_DMAIF2 0x04000000
+#define AGX_RSTMGR_PER0MODRST_DMAIF3 0x08000000
+#define AGX_RSTMGR_PER0MODRST_DMAIF4 0x10000000
+#define AGX_RSTMGR_PER0MODRST_DMAIF5 0x20000000
+#define AGX_RSTMGR_PER0MODRST_DMAIF6 0x40000000
+#define AGX_RSTMGR_PER0MODRST_DMAIF7 0x80000000
+
+#define AGX_RSTMGR_PER1MODRST_WATCHDOG0 0x1
+#define AGX_RSTMGR_PER1MODRST_WATCHDOG1 0x2
+#define AGX_RSTMGR_PER1MODRST_WATCHDOG2 0x4
+#define AGX_RSTMGR_PER1MODRST_WATCHDOG3 0x8
+#define AGX_RSTMGR_PER1MODRST_L4SYSTIMER0 0x00000010
+#define AGX_RSTMGR_PER1MODRST_L4SYSTIMER1 0x00000020
+#define AGX_RSTMGR_PER1MODRST_SPTIMER0 0x00000040
+#define AGX_RSTMGR_PER1MODRST_SPTIMER1 0x00000080
+#define AGX_RSTMGR_PER1MODRST_I2C0 0x00000100
+#define AGX_RSTMGR_PER1MODRST_I2C1 0x00000200
+#define AGX_RSTMGR_PER1MODRST_I2C2 0x00000400
+#define AGX_RSTMGR_PER1MODRST_I2C3 0x00000800
+#define AGX_RSTMGR_PER1MODRST_I2C4 0x00001000
+#define AGX_RSTMGR_PER1MODRST_UART0 0x00010000
+#define AGX_RSTMGR_PER1MODRST_UART1 0x00020000
+#define AGX_RSTMGR_PER1MODRST_GPIO0 0x01000000
+#define AGX_RSTMGR_PER1MODRST_GPIO1 0x02000000
+
+#define AGX_RSTMGR_HDSKEN_FPGAHSEN 0x00000004
+#define AGX_RSTMGR_HDSKEN_ETRSTALLEN 0x00000008
+#define AGX_RSTMGR_HDSKEN_L2FLUSHEN 0x00000100
+#define AGX_RSTMGR_HDSKEN_L3NOC_DBG 0x00010000
+#define AGX_RSTMGR_HDSKEN_DEBUG_L3NOC 0x00020000
+#define AGX_RSTMGR_HDSKEN_SDRSELFREFEN 0x00000001
+
+#define AGX_RSTMGR_BRGMODRST_SOC2FPGA 0x1
+#define AGX_RSTMGR_BRGMODRST_LWHPS2FPGA 0x2
+#define AGX_RSTMGR_BRGMODRST_FPGA2SOC 0x4
+#define AGX_RSTMGR_BRGMODRST_MPFE 0x40
+
+void deassert_peripheral_reset(void);
+void config_hps_hs_before_warm_reset(void);
+
+#endif
+
--- /dev/null
+/*
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef AGX_SYSTEMMANAGER_H
+#define AGX_SYSTEMMANAGER_H
+
+#define AGX_FIREWALL_SOC2FPGA 0xffd21200
+#define AGX_FIREWALL_LWSOC2FPGA 0xffd21300
+
+#define AGX_NOC_FW_L4_PER_SCR_NAND_REGISTER 0xffd21000
+#define AGX_NOC_FW_L4_PER_SCR_NAND_DATA 0xffd21004
+#define AGX_NOC_FW_L4_PER_SCR_USB0_REGISTER 0xffd2100c
+#define AGX_NOC_FW_L4_PER_SCR_USB1_REGISTER 0xffd21010
+#define AGX_NOC_FW_L4_PER_SCR_SPI_MASTER0 0xffd2101c
+#define AGX_NOC_FW_L4_PER_SCR_SPI_MASTER1 0xffd21020
+#define AGX_NOC_FW_L4_PER_SCR_SPI_SLAVE0 0xffd21024
+#define AGX_NOC_FW_L4_PER_SCR_SPI_SLAVE1 0xffd21028
+#define AGX_NOC_FW_L4_PER_SCR_EMAC0 0xffd2102c
+#define AGX_NOC_FW_L4_PER_SCR_EMAC1 0xffd21030
+#define AGX_NOC_FW_L4_PER_SCR_EMAC2 0xffd21034
+#define AGX_NOC_FW_L4_PER_SCR_SDMMC 0xffd21040
+#define AGX_NOC_FW_L4_PER_SCR_GPIO0 0xffd21044
+#define AGX_NOC_FW_L4_PER_SCR_GPIO1 0xffd21048
+#define AGX_NOC_FW_L4_PER_SCR_I2C0 0xffd21050
+#define AGX_NOC_FW_L4_PER_SCR_I2C1 0xffd21054
+#define AGX_NOC_FW_L4_PER_SCR_I2C2 0xffd21058
+#define AGX_NOC_FW_L4_PER_SCR_I2C3 0xffd2105c
+#define AGX_NOC_FW_L4_PER_SCR_I2C4 0xffd21060
+#define AGX_NOC_FW_L4_PER_SCR_SP_TIMER0 0xffd21064
+#define AGX_NOC_FW_L4_PER_SCR_SP_TIMER1 0xffd21068
+#define AGX_NOC_FW_L4_PER_SCR_UART0 0xffd2106c
+#define AGX_NOC_FW_L4_PER_SCR_UART1 0xffd21070
+
+#define AGX_NOC_FW_L4_SYS_SCR_DMA_ECC 0xffd21108
+#define AGX_NOC_FW_L4_SYS_SCR_EMAC0RX_ECC 0xffd2110c
+#define AGX_NOC_FW_L4_SYS_SCR_EMAC0TX_ECC 0xffd21110
+#define AGX_NOC_FW_L4_SYS_SCR_EMAC1RX_ECC 0xffd21114
+#define AGX_NOC_FW_L4_SYS_SCR_EMAC1TX_ECC 0xffd21118
+#define AGX_NOC_FW_L4_SYS_SCR_EMAC2RX_ECC 0xffd2111c
+#define AGX_NOC_FW_L4_SYS_SCR_EMAC2TX_ECC 0xffd21120
+#define AGX_NOC_FW_L4_SYS_SCR_NAND_ECC 0xffd2112c
+#define AGX_NOC_FW_L4_SYS_SCR_NAND_READ_ECC 0xffd21130
+#define AGX_NOC_FW_L4_SYS_SCR_NAND_WRITE_ECC 0xffd21134
+#define AGX_NOC_FW_L4_SYS_SCR_OCRAM_ECC 0xffd21138
+#define AGX_NOC_FW_L4_SYS_SCR_SDMMC_ECC 0xffd21140
+#define AGX_NOC_FW_L4_SYS_SCR_USB0_ECC 0xffd21144
+#define AGX_NOC_FW_L4_SYS_SCR_USB1_ECC 0xffd21148
+#define AGX_NOC_FW_L4_SYS_SCR_CLK_MGR 0xffd2114c
+#define AGX_NOC_FW_L4_SYS_SCR_IO_MGR 0xffd21154
+#define AGX_NOC_FW_L4_SYS_SCR_RST_MGR 0xffd21158
+#define AGX_NOC_FW_L4_SYS_SCR_SYS_MGR 0xffd2115c
+#define AGX_NOC_FW_L4_SYS_SCR_OSC0_TIMER 0xffd21160
+#define AGX_NOC_FW_L4_SYS_SCR_OSC1_TIMER 0xffd21164
+#define AGX_NOC_FW_L4_SYS_SCR_WATCHDOG0 0xffd21168
+#define AGX_NOC_FW_L4_SYS_SCR_WATCHDOG1 0xffd2116c
+#define AGX_NOC_FW_L4_SYS_SCR_WATCHDOG2 0xffd21170
+#define AGX_NOC_FW_L4_SYS_SCR_WATCHDOG3 0xffd21174
+#define AGX_NOC_FW_L4_SYS_SCR_DAP 0xffd21178
+#define AGX_NOC_FW_L4_SYS_SCR_L4_NOC_PROBES 0xffd21190
+#define AGX_NOC_FW_L4_SYS_SCR_L4_NOC_QOS 0xffd21194
+
+#define AGX_CCU_NOC_CPU0_RAMSPACE0_0 0xf7004688
+#define AGX_CCU_NOC_IOM_RAMSPACE0_0 0xf7018628
+
+#define DISABLE_BRIDGE_FIREWALL 0x0ffe0101
+#define DISABLE_L4_FIREWALL (BIT(0) | BIT(16) | BIT(24))
+
+void enable_nonsecure_access(void);
+void enable_ns_bridge_access(void);
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PLAT_MACROS_S
+#define PLAT_MACROS_S
+
+#include <platform_def.h>
+
+ /* ---------------------------------------------
+ * The below required platform porting macro
+ * prints out relevant platform registers
+ * whenever an unhandled exception is taken in
+ * BL31.
+ * ---------------------------------------------
+ */
+ .macro plat_crash_print_regs
+ .endm
+
+#endif /* PLAT_MACROS_S */
--- /dev/null
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PLATFORM_DEF_H
+#define PLATFORM_DEF_H
+
+#include <arch.h>
+#include <common/interrupt_props.h>
+#include <common/tbbr/tbbr_img_def.h>
+#include <plat/common/common_def.h>
+
+
+#define PLAT_CPUID_RELEASE 0xffe1b000
+#define PLAT_AGX_SEC_ENTRY 0xffe1b008
+
+/* Define next boot image name and offset */
+#define PLAT_NS_IMAGE_OFFSET 0x50000
+#define PLAT_HANDOFF_OFFSET 0xFFE3F000
+
+/*******************************************************************************
+ * Platform binary types for linking
+ ******************************************************************************/
+#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64"
+#define PLATFORM_LINKER_ARCH aarch64
+
+/* Agilex supports up to 124GB RAM */
+#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 39)
+#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 39)
+
+
+/*******************************************************************************
+ * Generic platform constants
+ ******************************************************************************/
+#define PLAT_PRIMARY_CPU 0
+#define PLAT_SECONDARY_ENTRY_BASE 0x01f78bf0
+
+/* Size of cacheable stacks */
+#define PLATFORM_STACK_SIZE 0x2000
+
+/* PSCI related constant */
+#define PLAT_NUM_POWER_DOMAINS 5
+#define PLAT_MAX_PWR_LVL 1
+#define PLAT_MAX_RET_STATE 1
+#define PLAT_MAX_OFF_STATE 2
+#define PLATFORM_SYSTEM_COUNT 1
+#define PLATFORM_CLUSTER_COUNT 1
+#define PLATFORM_CLUSTER0_CORE_COUNT 4
+#define PLATFORM_CLUSTER1_CORE_COUNT 0
+#define PLATFORM_CORE_COUNT (PLATFORM_CLUSTER1_CORE_COUNT + \
+ PLATFORM_CLUSTER0_CORE_COUNT)
+#define PLATFORM_MAX_CPUS_PER_CLUSTER 4
+
+/* Interrupt related constant */
+
+#define INTEL_AGX_IRQ_SEC_PHY_TIMER 29
+
+#define INTEL_AGX_IRQ_SEC_SGI_0 8
+#define INTEL_AGX_IRQ_SEC_SGI_1 9
+#define INTEL_AGX_IRQ_SEC_SGI_2 10
+#define INTEL_AGX_IRQ_SEC_SGI_3 11
+#define INTEL_AGX_IRQ_SEC_SGI_4 12
+#define INTEL_AGX_IRQ_SEC_SGI_5 13
+#define INTEL_AGX_IRQ_SEC_SGI_6 14
+#define INTEL_AGX_IRQ_SEC_SGI_7 15
+
+#define TSP_IRQ_SEC_PHY_TIMER INTEL_AGX_IRQ_SEC_PHY_TIMER
+#define TSP_SEC_MEM_BASE BL32_BASE
+#define TSP_SEC_MEM_SIZE (BL32_LIMIT - BL32_BASE + 1)
+/*******************************************************************************
+ * Platform memory map related constants
+ ******************************************************************************/
+#define DRAM_BASE (0x0)
+#define DRAM_SIZE (0x80000000)
+
+#define OCRAM_BASE (0xFFE00000)
+#define OCRAM_SIZE (0x00040000)
+
+#define MEM64_BASE (0x0100000000)
+#define MEM64_SIZE (0x1F00000000)
+
+#define DEVICE1_BASE (0x80000000)
+#define DEVICE1_SIZE (0x60000000)
+
+#define DEVICE2_BASE (0xF7000000)
+#define DEVICE2_SIZE (0x08E00000)
+
+#define DEVICE3_BASE (0xFFFC0000)
+#define DEVICE3_SIZE (0x00008000)
+
+#define DEVICE4_BASE (0x2000000000)
+#define DEVICE4_SIZE (0x0100000000)
+
+/*******************************************************************************
+ * BL31 specific defines.
+ ******************************************************************************/
+/*
+ * Put BL3-1 at the top of the Trusted SRAM (just below the shared memory, if
+ * present). BL31_BASE is calculated using the current BL3-1 debug size plus a
+ * little space for growth.
+ */
+
+
+#define FIRMWARE_WELCOME_STR "Booting Trusted Firmware\n"
+
+#define BL1_RO_BASE (0xffe00000)
+#define BL1_RO_LIMIT (0xffe0f000)
+#define BL1_RW_BASE (0xffe10000)
+#define BL1_RW_LIMIT (0xffe1ffff)
+#define BL1_RW_SIZE (0x14000)
+
+#define BL2_BASE (0xffe00000)
+#define BL2_LIMIT (0xffe1b000)
+
+#define BL31_BASE (0xffe1c000)
+#define BL31_LIMIT (0xffe3bfff)
+
+/*******************************************************************************
+ * Platform specific page table and MMU setup constants
+ ******************************************************************************/
+#define MAX_XLAT_TABLES 8
+#define MAX_MMAP_REGIONS 16
+
+/*******************************************************************************
+ * Declarations and constants to access the mailboxes safely. Each mailbox is
+ * aligned on the biggest cache line size in the platform. This is known only
+ * to the platform as it might have a combination of integrated and external
+ * caches. Such alignment ensures that two maiboxes do not sit on the same cache
+ * line at any cache level. They could belong to different cpus/clusters &
+ * get written while being protected by different locks causing corruption of
+ * a valid mailbox address.
+ ******************************************************************************/
+#define CACHE_WRITEBACK_SHIFT 6
+#define CACHE_WRITEBACK_GRANULE (1 << CACHE_WRITEBACK_SHIFT)
+
+#define PLAT_GIC_BASE (0xFFFC0000)
+#define PLAT_GICC_BASE (PLAT_GIC_BASE + 0x2000)
+#define PLAT_GICD_BASE (PLAT_GIC_BASE + 0x1000)
+#define PLAT_GICR_BASE 0
+
+/*******************************************************************************
+ * UART related constants
+ ******************************************************************************/
+#define PLAT_UART0_BASE (0xFFC02000)
+#define PLAT_UART1_BASE (0xFFC02100)
+
+#define CRASH_CONSOLE_BASE PLAT_UART0_BASE
+
+#define PLAT_BAUDRATE (115200)
+#define PLAT_UART_CLOCK (100000000)
+
+/*******************************************************************************
+ * System counter frequency related constants
+ ******************************************************************************/
+#define PLAT_SYS_COUNTER_FREQ_IN_TICKS (400000000)
+#define PLAT_SYS_COUNTER_FREQ_IN_MHZ (400)
+
+#define PLAT_INTEL_AGX_GICD_BASE PLAT_GICD_BASE
+#define PLAT_INTEL_AGX_GICC_BASE PLAT_GICC_BASE
+
+/*
+ * Define a list of Group 1 Secure and Group 0 interrupts as per GICv3
+ * terminology. On a GICv2 system or mode, the lists will be merged and treated
+ * as Group 0 interrupts.
+ */
+#define PLAT_INTEL_AGX_G1S_IRQ_PROPS(grp) \
+ INTR_PROP_DESC(INTEL_AGX_IRQ_SEC_PHY_TIMER, GIC_HIGHEST_SEC_PRIORITY, \
+ grp, GIC_INTR_CFG_LEVEL), \
+ INTR_PROP_DESC(INTEL_AGX_IRQ_SEC_SGI_0, GIC_HIGHEST_SEC_PRIORITY, grp, \
+ GIC_INTR_CFG_EDGE), \
+ INTR_PROP_DESC(INTEL_AGX_IRQ_SEC_SGI_1, GIC_HIGHEST_SEC_PRIORITY, grp, \
+ GIC_INTR_CFG_EDGE), \
+ INTR_PROP_DESC(INTEL_AGX_IRQ_SEC_SGI_2, GIC_HIGHEST_SEC_PRIORITY, grp, \
+ GIC_INTR_CFG_EDGE), \
+ INTR_PROP_DESC(INTEL_AGX_IRQ_SEC_SGI_3, GIC_HIGHEST_SEC_PRIORITY, grp, \
+ GIC_INTR_CFG_EDGE), \
+ INTR_PROP_DESC(INTEL_AGX_IRQ_SEC_SGI_4, GIC_HIGHEST_SEC_PRIORITY, grp, \
+ GIC_INTR_CFG_EDGE), \
+ INTR_PROP_DESC(INTEL_AGX_IRQ_SEC_SGI_5, GIC_HIGHEST_SEC_PRIORITY, grp, \
+ GIC_INTR_CFG_EDGE), \
+ INTR_PROP_DESC(INTEL_AGX_IRQ_SEC_SGI_6, GIC_HIGHEST_SEC_PRIORITY, grp, \
+ GIC_INTR_CFG_EDGE), \
+ INTR_PROP_DESC(INTEL_AGX_IRQ_SEC_SGI_7, GIC_HIGHEST_SEC_PRIORITY, grp, \
+ GIC_INTR_CFG_EDGE)
+
+#define PLAT_INTEL_AGX_G0_IRQ_PROPS(grp)
+
+#define MAX_IO_HANDLES 4
+#define MAX_IO_DEVICES 4
+#define MAX_IO_BLOCK_DEVICES 2
+
+#endif /* PLATFORM_DEF_H */
+
--- /dev/null
+/*
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PLATFORM_PRIVATE_H
+#define PLATFORM_PRIVATE_H
+
+/*******************************************************************************
+ * Function and variable prototypes
+ ******************************************************************************/
+void socfgpa_configure_mmu_el3(unsigned long total_base,
+ unsigned long total_size,
+ unsigned long ro_start,
+ unsigned long ro_limit,
+ unsigned long coh_start,
+ unsigned long coh_limit);
+
+
+void socfpga_configure_mmu_el1(unsigned long total_base,
+ unsigned long total_size,
+ unsigned long ro_start,
+ unsigned long ro_limit,
+ unsigned long coh_start,
+ unsigned long coh_limit);
+
+void socfpga_delay_timer_init(void);
+
+void socfpga_gic_driver_init(void);
+
+uint32_t socfpga_get_spsr_for_bl32_entry(void);
+
+uint32_t socfpga_get_spsr_for_bl33_entry(void);
+
+unsigned long socfpga_get_ns_image_entrypoint(void);
+
+
+#endif /* PLATFORM_PRIVATE_H */
--- /dev/null
+#
+# Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2019, Intel Corporation. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#
+PLAT_INCLUDES := \
+ -Iplat/intel/soc/agilex/include/ \
+ -Iplat/intel/soc/common/drivers/
+
+PLAT_BL_COMMON_SOURCES := \
+ drivers/arm/gic/common/gic_common.c \
+ drivers/arm/gic/v2/gicv2_main.c \
+ drivers/arm/gic/v2/gicv2_helpers.c \
+ drivers/delay_timer/delay_timer.c \
+ drivers/delay_timer/generic_delay_timer.c \
+ drivers/ti/uart/aarch64/16550_console.S \
+ lib/xlat_tables/aarch64/xlat_tables.c \
+ lib/xlat_tables/xlat_tables_common.c \
+ plat/common/plat_gicv2.c \
+ plat/intel/soc/agilex/aarch64/platform_common.c \
+ plat/intel/soc/agilex/aarch64/plat_helpers.S \
+
+BL2_SOURCES += \
+ common/desc_image_load.c \
+ drivers/partition/partition.c \
+ drivers/partition/gpt.c \
+ drivers/arm/pl061/pl061_gpio.c \
+ drivers/mmc/mmc.c \
+ drivers/synopsys/emmc/dw_mmc.c \
+ drivers/io/io_storage.c \
+ drivers/io/io_block.c \
+ drivers/io/io_fip.c \
+ drivers/gpio/gpio.c \
+ drivers/intel/soc/stratix10/io/s10_memmap_qspi.c \
+ lib/cpus/aarch64/cortex_a53.S \
+ plat/intel/soc/agilex/bl2_plat_setup.c \
+ plat/intel/soc/agilex/socfpga_storage.c \
+ plat/intel/soc/agilex/bl2_plat_mem_params_desc.c \
+ plat/intel/soc/agilex/soc/agilex_reset_manager.c \
+ plat/intel/soc/agilex/soc/agilex_handoff.c \
+ plat/intel/soc/agilex/soc/agilex_clock_manager.c \
+ plat/intel/soc/agilex/soc/agilex_pinmux.c \
+ plat/intel/soc/agilex/soc/agilex_memory_controller.c \
+ plat/intel/soc/agilex/socfpga_delay_timer.c \
+ plat/intel/soc/agilex/socfpga_image_load.c \
+ plat/intel/soc/agilex/soc/agilex_system_manager.c \
+ plat/intel/soc/agilex/soc/agilex_mailbox.c \
+ plat/intel/soc/common/drivers/qspi/cadence_qspi.c \
+ plat/intel/soc/common/drivers/wdt/watchdog.c \
+ plat/intel/soc/common/drivers/ccu/ncore_ccu.c
+
+BL31_SOURCES += \
+ drivers/arm/cci/cci.c \
+ lib/cpus/aarch64/cortex_a53.S \
+ lib/cpus/aarch64/aem_generic.S \
+ plat/common/plat_psci_common.c \
+ plat/intel/soc/agilex/socfpga_sip_svc.c \
+ plat/intel/soc/agilex/bl31_plat_setup.c \
+ plat/intel/soc/agilex/socfpga_psci.c \
+ plat/intel/soc/agilex/socfpga_topology.c \
+ plat/intel/soc/agilex/socfpga_delay_timer.c \
+ plat/intel/soc/agilex/soc/agilex_reset_manager.c \
+ plat/intel/soc/agilex/soc/agilex_pinmux.c \
+ plat/intel/soc/agilex/soc/agilex_clock_manager.c \
+ plat/intel/soc/agilex/soc/agilex_handoff.c \
+ plat/intel/soc/agilex/soc/agilex_mailbox.c
+
+PROGRAMMABLE_RESET_ADDRESS := 0
+BL2_AT_EL3 := 1
+MULTI_CONSOLE_API := 1
+USE_COHERENT_MEM := 1
--- /dev/null
+/*
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <errno.h>
+#include <lib/mmio.h>
+
+#include "agilex_clock_manager.h"
+#include "agilex_handoff.h"
+
+static const CLOCK_SOURCE_CONFIG clk_source = {
+ /* clk_freq_of_eosc1 */
+ (uint32_t) 25000000,
+ /* clk_freq_of_f2h_free */
+ (uint32_t) 400000000,
+ /* clk_freq_of_cb_intosc_ls */
+ (uint32_t) 50000000,
+};
+
+uint32_t wait_pll_lock(void)
+{
+ uint32_t data;
+ uint32_t count = 0;
+
+ do {
+ data = mmio_read_32(CLKMGR_OFFSET + CLKMGR_STAT);
+ count++;
+ if (count >= 1000)
+ return -ETIMEDOUT;
+
+ } while ((CLKMGR_STAT_MAINPLLLOCKED(data) == 0) ||
+ (CLKMGR_STAT_PERPLLLOCKED(data) == 0));
+ return 0;
+}
+
+uint32_t wait_fsm(void)
+{
+ uint32_t data;
+ uint32_t count = 0;
+
+ do {
+ data = mmio_read_32(CLKMGR_OFFSET + CLKMGR_STAT);
+ count++;
+ if (count >= 1000)
+ return -ETIMEDOUT;
+
+ } while (CLKMGR_STAT_BUSY(data) == CLKMGR_STAT_BUSY_E_BUSY);
+
+ return 0;
+}
+
+uint32_t pll_source_sync_config(uint32_t pll_mem_offset)
+{
+ uint32_t val = 0;
+ uint32_t count = 0;
+ uint32_t req_status = 0;
+
+ val = (CLKMGR_MEM_WR | CLKMGR_MEM_REQ |
+ CLKMGR_MEM_WDAT << CLKMGR_MEM_WDAT_OFFSET | CLKMGR_MEM_ADDR);
+ mmio_write_32(pll_mem_offset, val);
+
+ do {
+ req_status = mmio_read_32(pll_mem_offset);
+ count++;
+ } while ((req_status & CLKMGR_MEM_REQ) && (count < 10));
+
+ if (count >= 100)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+uint32_t pll_source_sync_read(uint32_t pll_mem_offset)
+{
+ uint32_t val = 0;
+ uint32_t rdata = 0;
+ uint32_t count = 0;
+ uint32_t req_status = 0;
+
+ val = (CLKMGR_MEM_REQ | CLKMGR_MEM_ADDR);
+ mmio_write_32(pll_mem_offset, val);
+
+ do {
+ req_status = mmio_read_32(pll_mem_offset);
+ count++;
+ } while ((req_status & CLKMGR_MEM_REQ) && (count < 10));
+
+ if (count >= 100)
+ return -ETIMEDOUT;
+
+ rdata = mmio_read_32(pll_mem_offset + 0x4);
+ INFO("rdata (%x) = %x\n", pll_mem_offset + 0x4, rdata);
+
+ return 0;
+}
+
+void config_clkmgr_handoff(handoff *hoff_ptr)
+{
+ uint32_t mdiv, mscnt, hscnt;
+ uint32_t arefclk_div, drefclk_div;
+
+ /* Bypass all mainpllgrp's clocks */
+ mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_BYPASS, 0x7);
+ wait_fsm();
+
+ /* Bypass all perpllgrp's clocks */
+ mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_BYPASS, 0x7f);
+ wait_fsm();
+
+ /* Put both PLL in reset and power down */
+ mmio_clrbits_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLGLOB,
+ CLKMGR_MAINPLL_PLLGLOB_PD_SET_MSK |
+ CLKMGR_MAINPLL_PLLGLOB_RST_SET_MSK);
+ mmio_clrbits_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLGLOB,
+ CLKMGR_PERPLL_PLLGLOB_PD_SET_MSK |
+ CLKMGR_PERPLL_PLLGLOB_RST_SET_MSK);
+
+ /* Setup main PLL dividers */
+ mdiv = CLKMGR_MAINPLL_PLLM_MDIV(hoff_ptr->main_pll_pllm);
+
+ arefclk_div = CLKMGR_MAINPLL_PLLGLOB_AREFCLKDIV(
+ hoff_ptr->main_pll_pllglob);
+ drefclk_div = CLKMGR_MAINPLL_PLLGLOB_DREFCLKDIV(
+ hoff_ptr->main_pll_pllglob);
+
+ mscnt = 100 / (mdiv / BIT(drefclk_div));
+ if (!mscnt)
+ mscnt = 1;
+ hscnt = (mdiv * mscnt * BIT(drefclk_div) / arefclk_div) - 4;
+
+ mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_VCOCALIB,
+ CLKMGR_MAINPLL_VCOCALIB_HSCNT_SET(hscnt) |
+ CLKMGR_MAINPLL_VCOCALIB_MSCNT_SET(mscnt));
+
+ mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_NOCDIV,
+ hoff_ptr->main_pll_nocdiv);
+ mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLGLOB,
+ hoff_ptr->main_pll_pllglob);
+ mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_FDBCK,
+ hoff_ptr->main_pll_fdbck);
+ mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLC0,
+ hoff_ptr->main_pll_pllc0);
+ mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLC1,
+ hoff_ptr->main_pll_pllc1);
+ mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLC2,
+ hoff_ptr->main_pll_pllc2);
+ mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLC3,
+ hoff_ptr->main_pll_pllc3);
+ mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLM,
+ hoff_ptr->main_pll_pllm);
+ mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_MPUCLK,
+ hoff_ptr->main_pll_mpuclk);
+ mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_NOCCLK,
+ hoff_ptr->main_pll_nocclk);
+
+ /* Setup peripheral PLL dividers */
+ mdiv = CLKMGR_PERPLL_PLLM_MDIV(hoff_ptr->per_pll_pllm);
+
+ arefclk_div = CLKMGR_PERPLL_PLLGLOB_AREFCLKDIV(
+ hoff_ptr->per_pll_pllglob);
+ drefclk_div = CLKMGR_PERPLL_PLLGLOB_DREFCLKDIV(
+ hoff_ptr->per_pll_pllglob);
+
+ mscnt = 100 / (mdiv / BIT(drefclk_div));
+ if (!mscnt)
+ mscnt = 1;
+ hscnt = (mdiv * mscnt * BIT(drefclk_div) / arefclk_div) - 4;
+
+ mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_VCOCALIB,
+ CLKMGR_PERPLL_VCOCALIB_HSCNT_SET(hscnt) |
+ CLKMGR_PERPLL_VCOCALIB_MSCNT_SET(mscnt));
+
+ mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_EMACCTL,
+ hoff_ptr->per_pll_emacctl);
+ mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_GPIODIV,
+ CLKMGR_PERPLL_GPIODIV_GPIODBCLK_SET(
+ hoff_ptr->per_pll_gpiodiv));
+ mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLGLOB,
+ hoff_ptr->per_pll_pllglob);
+ mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_FDBCK,
+ hoff_ptr->per_pll_fdbck);
+ mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLC0,
+ hoff_ptr->per_pll_pllc0);
+ mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLC1,
+ hoff_ptr->per_pll_pllc1);
+ mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLC2,
+ hoff_ptr->per_pll_pllc2);
+ mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLC3,
+ hoff_ptr->per_pll_pllc3);
+ mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLM,
+ hoff_ptr->per_pll_pllm);
+
+ /* Take both PLL out of reset and power up */
+ mmio_setbits_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLGLOB,
+ CLKMGR_MAINPLL_PLLGLOB_PD_SET_MSK |
+ CLKMGR_MAINPLL_PLLGLOB_RST_SET_MSK);
+ mmio_setbits_32(CLKMGR_PERPLL + CLKMGR_PERPLL_PLLGLOB,
+ CLKMGR_PERPLL_PLLGLOB_PD_SET_MSK |
+ CLKMGR_PERPLL_PLLGLOB_RST_SET_MSK);
+
+ wait_pll_lock();
+
+ pll_source_sync_config(CLKMGR_MAINPLL + CLKMGR_MAINPLL_MEM);
+ pll_source_sync_read(CLKMGR_MAINPLL + CLKMGR_MAINPLL_MEM);
+
+ pll_source_sync_config(CLKMGR_PERPLL + CLKMGR_PERPLL_MEM);
+ pll_source_sync_read(CLKMGR_PERPLL + CLKMGR_PERPLL_MEM);
+
+ /*Configure Ping Pong counters in altera group */
+ mmio_write_32(CLKMGR_ALTERA + CLKMGR_ALTERA_EMACACTR,
+ hoff_ptr->alt_emacactr);
+ mmio_write_32(CLKMGR_ALTERA + CLKMGR_ALTERA_EMACBCTR,
+ hoff_ptr->alt_emacbctr);
+ mmio_write_32(CLKMGR_ALTERA + CLKMGR_ALTERA_EMACPTPCTR,
+ hoff_ptr->alt_emacptpctr);
+ mmio_write_32(CLKMGR_ALTERA + CLKMGR_ALTERA_GPIODBCTR,
+ hoff_ptr->alt_gpiodbctr);
+ mmio_write_32(CLKMGR_ALTERA + CLKMGR_ALTERA_SDMMCCTR,
+ hoff_ptr->alt_sdmmcctr);
+ mmio_write_32(CLKMGR_ALTERA + CLKMGR_ALTERA_S2FUSER0CTR,
+ hoff_ptr->alt_s2fuser0ctr);
+ mmio_write_32(CLKMGR_ALTERA + CLKMGR_ALTERA_S2FUSER1CTR,
+ hoff_ptr->alt_s2fuser1ctr);
+ mmio_write_32(CLKMGR_ALTERA + CLKMGR_ALTERA_PSIREFCTR,
+ hoff_ptr->alt_psirefctr);
+
+ /* Take all PLLs out of bypass */
+ mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_BYPASS, 0);
+ wait_fsm();
+
+ mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_BYPASS, 0);
+ wait_fsm();
+
+ /* Clear loss lock interrupt status register that */
+ /* might be set during configuration */
+ mmio_setbits_32(CLKMGR_OFFSET + CLKMGR_INTRCLR,
+ CLKMGR_INTRCLR_MAINLOCKLOST_SET_MSK |
+ CLKMGR_INTRCLR_PERLOCKLOST_SET_MSK);
+
+ /* Take all ping pong counters out of reset */
+ mmio_clrbits_32(CLKMGR_ALTERA + CLKMGR_ALTERA_EXTCNTRST,
+ CLKMGR_ALTERA_EXTCNTRST_RESET);
+
+ /* Set safe mode / out of boot mode */
+ mmio_clrbits_32(CLKMGR_OFFSET + CLKMGR_CTRL,
+ CLKMGR_CTRL_BOOTMODE_SET_MSK);
+ wait_fsm();
+
+ /* Enable mainpllgrp's software-managed clock */
+ mmio_write_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_EN,
+ CLKMGR_MAINPLL_EN_RESET);
+ mmio_write_32(CLKMGR_PERPLL + CLKMGR_PERPLL_EN,
+ CLKMGR_PERPLL_EN_RESET);
+}
+
+int get_wdt_clk(handoff *hoff_ptr)
+{
+ int main_noc_base_clk, l3_main_free_clk, l4_sys_free_clk;
+ int data32, mdiv, arefclkdiv, ref_clk;
+
+ data32 = mmio_read_32(CLKMGR_MAINPLL + CLKMGR_MAINPLL_PLLGLOB);
+
+ switch (CLKMGR_MAINPLL_PLLGLOB_PSRC(data32)) {
+ case CLKMGR_MAINPLL_PLLGLOB_PSRC_EOSC1:
+ ref_clk = clk_source.clk_freq_of_eosc1;
+ break;
+ case CLKMGR_MAINPLL_PLLGLOB_PSRC_INTOSC:
+ ref_clk = clk_source.clk_freq_of_cb_intosc_ls;
+ break;
+ case CLKMGR_MAINPLL_PLLGLOB_PSRC_F2S:
+ ref_clk = clk_source.clk_freq_of_f2h_free;
+ break;
+ default:
+ ref_clk = 0;
+ assert(0);
+ break;
+ }
+
+ arefclkdiv = CLKMGR_MAINPLL_PLLGLOB_AREFCLKDIV(data32);
+ mdiv = CLKMGR_MAINPLL_PLLM_MDIV(hoff_ptr->main_pll_pllm);
+
+ ref_clk = (ref_clk / arefclkdiv) * mdiv;
+ main_noc_base_clk = ref_clk / (hoff_ptr->main_pll_pllc1 & 0x7ff);
+ l3_main_free_clk = main_noc_base_clk / (hoff_ptr->main_pll_nocclk + 1);
+ l4_sys_free_clk = l3_main_free_clk / 4;
+
+ return l4_sys_free_clk;
+}
--- /dev/null
+/*
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <platform_def.h>
+#include <string.h>
+
+#include "agilex_handoff.h"
+
+#define SWAP_UINT32(x) (((x) >> 24) | (((x) & 0x00FF0000) >> 8) | \
+ (((x) & 0x0000FF00) << 8) | ((x) << 24))
+
+int agilex_get_handoff(handoff *reverse_hoff_ptr)
+{
+ int i;
+ uint32_t *buffer;
+ handoff *handoff_ptr = (handoff *) PLAT_HANDOFF_OFFSET;
+
+ memcpy(reverse_hoff_ptr, handoff_ptr, sizeof(handoff));
+ buffer = (uint32_t *)reverse_hoff_ptr;
+
+ /* convert big endian to little endian */
+ for (i = 0; i < sizeof(handoff) / 4; i++)
+ buffer[i] = SWAP_UINT32(buffer[i]);
+
+ if (reverse_hoff_ptr->header_magic != HANDOFF_MAGIC_HEADER)
+ return -1;
+ if (reverse_hoff_ptr->pinmux_sel_magic != HANDOFF_MAGIC_PINMUX_SEL)
+ return -1;
+ if (reverse_hoff_ptr->pinmux_io_magic != HANDOFF_MAGIC_IOCTLR)
+ return -1;
+ if (reverse_hoff_ptr->pinmux_fpga_magic != HANDOFF_MAGIC_FPGA)
+ return -1;
+ if (reverse_hoff_ptr->pinmux_delay_magic != HANDOFF_MAGIC_IODELAY)
+ return -1;
+
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <lib/mmio.h>
+#include <common/debug.h>
+
+#include "agilex_mailbox.h"
+
+static int fill_mailbox_circular_buffer(uint32_t header_cmd, uint32_t *args,
+ int len)
+{
+ uint32_t cmd_free_offset;
+ int i;
+
+ cmd_free_offset = mmio_read_32(MBOX_OFFSET + MBOX_CIN);
+
+ if (cmd_free_offset >= MBOX_CMD_BUFFER_SIZE) {
+ INFO("Insufficient buffer in mailbox\n");
+ return MBOX_INSUFFICIENT_BUFFER;
+ }
+
+
+ mmio_write_32(MBOX_OFFSET + MBOX_CMD_BUFFER + (cmd_free_offset++ * 4),
+ header_cmd);
+
+
+ for (i = 0; i < len; i++) {
+ cmd_free_offset %= MBOX_CMD_BUFFER_SIZE;
+ mmio_write_32(MBOX_OFFSET + MBOX_CMD_BUFFER +
+ (cmd_free_offset++ * 4), args[i]);
+ }
+
+ cmd_free_offset %= MBOX_CMD_BUFFER_SIZE;
+ mmio_write_32(MBOX_OFFSET + MBOX_CIN, cmd_free_offset);
+
+ return 0;
+}
+
+int mailbox_read_response(int job_id, uint32_t *response)
+{
+ int rin = 0;
+ int rout = 0;
+ int response_length = 0;
+ int resp = 0;
+ int total_resp_len = 0;
+ int timeout = 100000;
+
+ mmio_write_32(MBOX_OFFSET + MBOX_DOORBELL_TO_SDM, 1);
+
+ while (mmio_read_32(MBOX_OFFSET + MBOX_DOORBELL_FROM_SDM) != 1) {
+ if (timeout-- < 0)
+ return MBOX_NO_RESPONSE;
+ }
+
+ mmio_write_32(MBOX_OFFSET + MBOX_DOORBELL_FROM_SDM, 0);
+
+ rin = mmio_read_32(MBOX_OFFSET + MBOX_RIN);
+ rout = mmio_read_32(MBOX_OFFSET + MBOX_ROUT);
+
+ while (rout != rin) {
+ resp = mmio_read_32(MBOX_OFFSET +
+ MBOX_RESP_BUFFER + ((rout++)*4));
+
+ rout %= MBOX_RESP_BUFFER_SIZE;
+ mmio_write_32(MBOX_OFFSET + MBOX_ROUT, rout);
+
+ if (MBOX_RESP_CLIENT_ID(resp) != MBOX_ATF_CLIENT_ID ||
+ MBOX_RESP_JOB_ID(resp) != job_id) {
+ return MBOX_WRONG_ID;
+ }
+
+ if (MBOX_RESP_ERR(resp) > 0) {
+ INFO("Error in response: %x\n", resp);
+ return -resp;
+ }
+ response_length = MBOX_RESP_LEN(resp);
+
+ while (response_length) {
+
+ response_length--;
+ resp = mmio_read_32(MBOX_OFFSET +
+ MBOX_RESP_BUFFER +
+ (rout)*4);
+ if (response) {
+ *(response + total_resp_len) = resp;
+ total_resp_len++;
+ }
+ rout++;
+ rout %= MBOX_RESP_BUFFER_SIZE;
+ mmio_write_32(MBOX_OFFSET + MBOX_ROUT, rout);
+ }
+ return total_resp_len;
+ }
+
+ return MBOX_NO_RESPONSE;
+}
+
+
+int mailbox_poll_response(int job_id, int urgent, uint32_t *response)
+{
+ int timeout = 80000;
+ int rin = 0;
+ int rout = 0;
+ int response_length = 0;
+ int resp = 0;
+ int total_resp_len = 0;
+
+ mmio_write_32(MBOX_OFFSET + MBOX_DOORBELL_TO_SDM, 1);
+
+ while (1) {
+ while (timeout > 0 &&
+ mmio_read_32(MBOX_OFFSET +
+ MBOX_DOORBELL_FROM_SDM) != 1) {
+ timeout--;
+ }
+
+ if (mmio_read_32(MBOX_OFFSET + MBOX_DOORBELL_FROM_SDM) != 1) {
+ INFO("Timed out waiting for SDM");
+ return MBOX_TIMEOUT;
+ }
+
+ mmio_write_32(MBOX_OFFSET + MBOX_DOORBELL_FROM_SDM, 0);
+
+ if (urgent & 1) {
+ if ((mmio_read_32(MBOX_OFFSET + MBOX_STATUS) &
+ MBOX_STATUS_UA_MASK) ^
+ (urgent & MBOX_STATUS_UA_MASK)) {
+ mmio_write_32(MBOX_OFFSET + MBOX_URG, 0);
+ return 0;
+ }
+
+ mmio_write_32(MBOX_OFFSET + MBOX_URG, 0);
+ INFO("Error: Mailbox did not get UA");
+ return -1;
+ }
+
+ rin = mmio_read_32(MBOX_OFFSET + MBOX_RIN);
+ rout = mmio_read_32(MBOX_OFFSET + MBOX_ROUT);
+
+ while (rout != rin) {
+ resp = mmio_read_32(MBOX_OFFSET +
+ MBOX_RESP_BUFFER + ((rout++)*4));
+
+ rout %= MBOX_RESP_BUFFER_SIZE;
+ mmio_write_32(MBOX_OFFSET + MBOX_ROUT, rout);
+
+ if (MBOX_RESP_CLIENT_ID(resp) != MBOX_ATF_CLIENT_ID ||
+ MBOX_RESP_JOB_ID(resp) != job_id)
+ continue;
+
+ if (MBOX_RESP_ERR(resp) > 0) {
+ INFO("Error in response: %x\n", resp);
+ return -MBOX_RESP_ERR(resp);
+ }
+ response_length = MBOX_RESP_LEN(resp);
+
+ while (response_length) {
+
+ response_length--;
+ resp = mmio_read_32(MBOX_OFFSET +
+ MBOX_RESP_BUFFER +
+ (rout)*4);
+ if (response) {
+ *(response + total_resp_len) = resp;
+ total_resp_len++;
+ }
+ rout++;
+ rout %= MBOX_RESP_BUFFER_SIZE;
+ mmio_write_32(MBOX_OFFSET + MBOX_ROUT, rout);
+ }
+ return total_resp_len;
+ }
+ }
+}
+
+void mailbox_send_cmd_async(int job_id, unsigned int cmd, uint32_t *args,
+ int len, int urgent)
+{
+ if (urgent)
+ mmio_write_32(MBOX_OFFSET + MBOX_URG, 1);
+
+ fill_mailbox_circular_buffer(MBOX_CLIENT_ID_CMD(MBOX_ATF_CLIENT_ID) |
+ MBOX_JOB_ID_CMD(job_id) |
+ MBOX_CMD_LEN_CMD(len) |
+ MBOX_INDIRECT |
+ cmd, args, len);
+}
+
+int mailbox_send_cmd(int job_id, unsigned int cmd, uint32_t *args,
+ int len, int urgent, uint32_t *response)
+{
+ int status;
+
+ if (urgent) {
+ urgent |= mmio_read_32(MBOX_OFFSET + MBOX_STATUS) &
+ MBOX_STATUS_UA_MASK;
+ mmio_write_32(MBOX_OFFSET + MBOX_URG, cmd);
+ status = 0;
+ } else {
+ status = fill_mailbox_circular_buffer(
+ MBOX_CLIENT_ID_CMD(MBOX_ATF_CLIENT_ID) |
+ MBOX_JOB_ID_CMD(job_id) |
+ cmd, args, len);
+ }
+
+ if (status)
+ return status;
+
+ return mailbox_poll_response(job_id, urgent, response);
+}
+
+void mailbox_set_int(int interrupt)
+{
+
+ mmio_write_32(MBOX_OFFSET+MBOX_INT, MBOX_COE_BIT(interrupt) |
+ MBOX_UAE_BIT(interrupt));
+}
+
+
+void mailbox_set_qspi_open(void)
+{
+ mailbox_set_int(MBOX_INT_FLAG_COE | MBOX_INT_FLAG_RIE);
+ mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_QSPI_OPEN, 0, 0, 0, 0);
+}
+
+void mailbox_set_qspi_direct(void)
+{
+ mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_QSPI_DIRECT, 0, 0, 0, 0);
+}
+
+void mailbox_set_qspi_close(void)
+{
+ mailbox_set_int(MBOX_INT_FLAG_COE | MBOX_INT_FLAG_RIE);
+ mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_QSPI_CLOSE, 0, 0, 0, 0);
+}
+
+int mailbox_get_qspi_clock(void)
+{
+ mailbox_set_int(MBOX_INT_FLAG_COE | MBOX_INT_FLAG_RIE);
+ return mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_QSPI_DIRECT, 0, 0, 0, 0);
+}
+
+void mailbox_qspi_set_cs(int device_select)
+{
+ uint32_t cs_setting = device_select;
+
+ /* QSPI device select settings at 31:28 */
+ cs_setting = (cs_setting << 28);
+ mailbox_set_int(MBOX_INT_FLAG_COE | MBOX_INT_FLAG_RIE);
+ mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_QSPI_SET_CS, &cs_setting,
+ 1, 0, 0);
+}
+
+void mailbox_reset_cold(void)
+{
+ mailbox_set_int(MBOX_INT_FLAG_COE | MBOX_INT_FLAG_RIE);
+ mailbox_send_cmd(MBOX_JOB_ID, MBOX_CMD_REBOOT_HPS, 0, 0, 0, 0);
+}
+
+int mailbox_init(void)
+{
+ int status = 0;
+
+ mailbox_set_int(MBOX_INT_FLAG_COE | MBOX_INT_FLAG_RIE |
+ MBOX_INT_FLAG_UAE);
+ mmio_write_32(MBOX_OFFSET + MBOX_URG, 0);
+ mmio_write_32(MBOX_OFFSET + MBOX_DOORBELL_FROM_SDM, 0);
+ status = mailbox_send_cmd(0, MBOX_CMD_RESTART, 0, 0, 1, 0);
+
+ if (status)
+ return status;
+
+ mailbox_set_int(MBOX_INT_FLAG_COE | MBOX_INT_FLAG_RIE);
+
+ return 0;
+}
+
--- /dev/null
+/*
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <errno.h>
+#include <lib/mmio.h>
+#include <lib/utils.h>
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <platform_def.h>
+
+#include "agilex_memory_controller.h"
+
+#define ALT_CCU_NOC_DI_SET_MSK 0x10
+
+#define DDR_READ_LATENCY_DELAY 40
+#define MAX_MEM_CAL_RETRY 3
+#define PRE_CALIBRATION_DELAY 1
+#define POST_CALIBRATION_DELAY 1
+#define TIMEOUT_EMIF_CALIBRATION 100
+#define CLEAR_EMIF_DELAY 50000
+#define CLEAR_EMIF_TIMEOUT 0x100000
+#define TIMEOUT_INT_RESP 10000
+
+#define DDR_CONFIG(A, B, C, R) (((A) << 24) | ((B) << 16) | ((C) << 8) | (R))
+#define DDR_CONFIG_ELEMENTS (sizeof(ddr_config)/sizeof(uint32_t))
+
+/* tWR = Min. 15ns constant, see JEDEC standard eg. DDR4 is JESD79-4.pdf */
+#define tWR_IN_NS 15
+
+void configure_hmc_adaptor_regs(void);
+void configure_ddr_sched_ctrl_regs(void);
+
+/* The followring are the supported configurations */
+uint32_t ddr_config[] = {
+ /* DDR_CONFIG(Address order,Bank,Column,Row) */
+ /* List for DDR3 or LPDDR3 (pinout order > chip, row, bank, column) */
+ DDR_CONFIG(0, 3, 10, 12),
+ DDR_CONFIG(0, 3, 9, 13),
+ DDR_CONFIG(0, 3, 10, 13),
+ DDR_CONFIG(0, 3, 9, 14),
+ DDR_CONFIG(0, 3, 10, 14),
+ DDR_CONFIG(0, 3, 10, 15),
+ DDR_CONFIG(0, 3, 11, 14),
+ DDR_CONFIG(0, 3, 11, 15),
+ DDR_CONFIG(0, 3, 10, 16),
+ DDR_CONFIG(0, 3, 11, 16),
+ DDR_CONFIG(0, 3, 12, 15), /* 0xa */
+ /* List for DDR4 only (pinout order > chip, bank, row, column) */
+ DDR_CONFIG(1, 3, 10, 14),
+ DDR_CONFIG(1, 4, 10, 14),
+ DDR_CONFIG(1, 3, 10, 15),
+ DDR_CONFIG(1, 4, 10, 15),
+ DDR_CONFIG(1, 3, 10, 16),
+ DDR_CONFIG(1, 4, 10, 16),
+ DDR_CONFIG(1, 3, 10, 17),
+ DDR_CONFIG(1, 4, 10, 17),
+};
+
+static int match_ddr_conf(uint32_t ddr_conf)
+{
+ int i;
+
+ for (i = 0; i < DDR_CONFIG_ELEMENTS; i++) {
+ if (ddr_conf == ddr_config[i])
+ return i;
+ }
+ return 0;
+}
+
+static int check_hmc_clk(void)
+{
+ unsigned long timeout = 0;
+ uint32_t hmc_clk;
+
+ do {
+ hmc_clk = mmio_read_32(AGX_SYSMGR_CORE_HMC_CLK);
+ if (hmc_clk & AGX_SYSMGR_CORE_HMC_CLK_STATUS)
+ break;
+ udelay(1);
+ } while (++timeout < 1000);
+ if (timeout >= 1000)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int clear_emif(void)
+{
+ uint32_t data;
+ unsigned long timeout;
+
+ mmio_write_32(AGX_MPFE_HMC_ADP_RSTHANDSHAKECTRL, 0);
+
+ timeout = 0;
+ do {
+ data = mmio_read_32(AGX_MPFE_HMC_ADP_RSTHANDSHAKESTAT);
+ if ((data & AGX_MPFE_HMC_ADP_RSTHANDSHAKESTAT_SEQ2CORE) == 0)
+ break;
+ udelay(CLEAR_EMIF_DELAY);
+ } while (++timeout < CLEAR_EMIF_TIMEOUT);
+ if (timeout >= CLEAR_EMIF_TIMEOUT)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int mem_calibration(void)
+{
+ int status = 0;
+ uint32_t data;
+ unsigned long timeout;
+ unsigned long retry = 0;
+
+ udelay(PRE_CALIBRATION_DELAY);
+
+ do {
+ if (retry != 0)
+ INFO("DDR: Retrying DRAM calibration\n");
+
+ timeout = 0;
+ do {
+ data = mmio_read_32(AGX_MPFE_HMC_ADP_DDRCALSTAT);
+ if (AGX_MPFE_HMC_ADP_DDRCALSTAT_CAL(data) == 1)
+ break;
+ udelay(1);
+ } while (++timeout < TIMEOUT_EMIF_CALIBRATION);
+
+ if (AGX_MPFE_HMC_ADP_DDRCALSTAT_CAL(data) == 0) {
+ status = clear_emif();
+ if (status)
+ ERROR("Failed to clear Emif\n");
+ } else {
+ break;
+ }
+ } while (++retry < MAX_MEM_CAL_RETRY);
+
+ if (AGX_MPFE_HMC_ADP_DDRCALSTAT_CAL(data) == 0) {
+ ERROR("DDR: DRAM calibration failed.\n");
+ status = -EIO;
+ } else {
+ INFO("DDR: DRAM calibration success.\n");
+ status = 0;
+ }
+
+ udelay(POST_CALIBRATION_DELAY);
+
+ return status;
+}
+
+int init_hard_memory_controller(void)
+{
+ int status;
+
+ status = check_hmc_clk();
+ if (status) {
+ ERROR("DDR: Error, HMC clock not running\n");
+ return status;
+ }
+
+/* mmio_clrbits_32(AGX_RSTMGR_BRGMODRST, AGX_RSTMGR_BRGMODRST_DDRSCH);*/
+
+ status = mem_calibration();
+ if (status) {
+ ERROR("DDR: Memory Calibration Failed\n");
+ return status;
+ }
+
+ configure_hmc_adaptor_regs();
+/* configure_ddr_sched_ctrl_regs();*/
+
+ return 0;
+}
+
+void configure_ddr_sched_ctrl_regs(void)
+{
+ uint32_t data, dram_addr_order, ddr_conf, bank, row, col,
+ rd_to_miss, wr_to_miss, burst_len, burst_len_ddr_clk,
+ burst_len_sched_clk, act_to_act, rd_to_wr, wr_to_rd, bw_ratio,
+ t_rtp, t_rp, t_rcd, rd_latency, tw_rin_clk_cycles,
+ bw_ratio_extended, auto_precharge = 0, act_to_act_bank, faw,
+ faw_bank, bus_rd_to_rd, bus_rd_to_wr, bus_wr_to_rd;
+
+ INFO("Init HPS NOC's DDR Scheduler.\n");
+
+ data = mmio_read_32(AGX_MPFE_IOHMC_CTRLCFG1);
+ dram_addr_order = AGX_MPFE_IOHMC_CTRLCFG1_CFG_ADDR_ORDER(data);
+
+ data = mmio_read_32(AGX_MPFE_IOHMC_DRAMADDRW);
+
+ col = IOHMC_DRAMADDRW_COL_ADDR_WIDTH(data);
+ row = IOHMC_DRAMADDRW_ROW_ADDR_WIDTH(data);
+ bank = IOHMC_DRAMADDRW_BANK_ADDR_WIDTH(data) +
+ IOHMC_DRAMADDRW_BANK_GRP_ADDR_WIDTH(data);
+
+ ddr_conf = match_ddr_conf(DDR_CONFIG(dram_addr_order, bank, col, row));
+
+ if (ddr_conf) {
+ mmio_clrsetbits_32(
+ AGX_MPFE_DDR_MAIN_SCHED_DDRCONF,
+ AGX_MPFE_DDR_MAIN_SCHED_DDRCONF_SET_MSK,
+ AGX_MPFE_DDR_MAIN_SCHED_DDRCONF_SET(ddr_conf));
+ } else {
+ ERROR("DDR: Cannot find predefined ddrConf configuration.\n");
+ }
+
+ mmio_write_32(AGX_MPFE_HMC_ADP(ADP_DRAMADDRWIDTH), data);
+
+ data = mmio_read_32(AGX_MPFE_IOHMC_DRAMTIMING0);
+ rd_latency = AGX_MPFE_IOHMC_REG_DRAMTIMING0_CFG_TCL(data);
+
+ data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING0);
+ act_to_act = ACT_TO_ACT(data);
+ t_rcd = ACT_TO_RDWR(data);
+ act_to_act_bank = ACT_TO_ACT_DIFF_BANK(data);
+
+ data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING1);
+ rd_to_wr = RD_TO_WR(data);
+ bus_rd_to_rd = RD_TO_RD_DIFF_CHIP(data);
+ bus_rd_to_wr = RD_TO_WR_DIFF_CHIP(data);
+
+ data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING2);
+ t_rtp = RD_TO_PCH(data);
+
+ data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING3);
+ wr_to_rd = CALTIMING3_WR_TO_RD(data);
+ bus_wr_to_rd = CALTIMING3_WR_TO_RD_DIFF_CHIP(data);
+
+ data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING4);
+ t_rp = PCH_TO_VALID(data);
+
+ data = mmio_read_32(AGX_MPFE_HMC_ADP(HMC_ADP_DDRIOCTRL));
+ bw_ratio = ((HMC_ADP_DDRIOCTRL_IO_SIZE(data) == 0) ? 0 : 1);
+
+ data = mmio_read_32(AGX_MPFE_IOHMC_CTRLCFG0);
+ burst_len = HMC_ADP_DDRIOCTRL_CTRL_BURST_LENGTH(data);
+ burst_len_ddr_clk = burst_len / 2;
+ burst_len_sched_clk = ((burst_len/2) / 2);
+
+ data = mmio_read_32(AGX_MPFE_IOHMC_CTRLCFG0);
+ switch (AGX_MPFE_IOHMC_REG_CTRLCFG0_CFG_MEM_TYPE(data)) {
+ case 1:
+ /* DDR4 - 1333MHz */
+ /* 20 (19.995) clock cycles = 15ns */
+ /* Calculate with rounding */
+ tw_rin_clk_cycles = (((tWR_IN_NS * 1333) % 1000) >= 500) ?
+ ((tWR_IN_NS * 1333) / 1000) + 1 :
+ ((tWR_IN_NS * 1333) / 1000);
+ break;
+ default:
+ /* Others - 1066MHz or slower */
+ /* 16 (15.990) clock cycles = 15ns */
+ /* Calculate with rounding */
+ tw_rin_clk_cycles = (((tWR_IN_NS * 1066) % 1000) >= 500) ?
+ ((tWR_IN_NS * 1066) / 1000) + 1 :
+ ((tWR_IN_NS * 1066) / 1000);
+ break;
+ }
+
+ rd_to_miss = t_rtp + t_rp + t_rcd - burst_len_sched_clk;
+ wr_to_miss = ((rd_latency + burst_len_ddr_clk + 2 + tw_rin_clk_cycles)
+ / 2) - rd_to_wr + t_rp + t_rcd;
+
+ mmio_write_32(AGX_MPFE_DDR_MAIN_SCHED_DDRTIMING,
+ bw_ratio << DDRTIMING_BWRATIO_OFST |
+ wr_to_rd << DDRTIMING_WRTORD_OFST|
+ rd_to_wr << DDRTIMING_RDTOWR_OFST |
+ burst_len_sched_clk << DDRTIMING_BURSTLEN_OFST |
+ wr_to_miss << DDRTIMING_WRTOMISS_OFST |
+ rd_to_miss << DDRTIMING_RDTOMISS_OFST |
+ act_to_act << DDRTIMING_ACTTOACT_OFST);
+
+ data = mmio_read_32(AGX_MPFE_HMC_ADP(HMC_ADP_DDRIOCTRL));
+ bw_ratio_extended = ((ADP_DDRIOCTRL_IO_SIZE(data) == 0) ? 1 : 0);
+
+ mmio_write_32(AGX_MPFE_DDR_MAIN_SCHED_DDRMODE,
+ bw_ratio_extended << DDRMODE_BWRATIOEXTENDED_OFST |
+ auto_precharge << DDRMODE_AUTOPRECHARGE_OFST);
+
+ mmio_write_32(AGX_MPFE_DDR_MAIN_SCHED_READLATENCY,
+ (rd_latency / 2) + DDR_READ_LATENCY_DELAY);
+
+ data = mmio_read_32(AGX_MPFE_IOHMC_CALTIMING9);
+ faw = AGX_MPFE_IOHMC_CALTIMING9_ACT_TO_ACT(data);
+
+ faw_bank = 1; // always 1 because we always have 4 bank DDR.
+
+ mmio_write_32(AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE,
+ faw_bank << AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE_FAWBANK_OFST |
+ faw << AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE_FAW_OFST |
+ act_to_act_bank << AGX_MPFE_DDR_MAIN_SCHED_ACTIVATE_RRD_OFST);
+
+ mmio_write_32(AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV,
+ ((bus_rd_to_rd
+ << AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTORD_OFST)
+ & AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTORD_MSK) |
+ ((bus_rd_to_wr
+ << AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTOWR_OFST)
+ & AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSRDTOWR_MSK) |
+ ((bus_wr_to_rd
+ << AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSWRTORD_OFST)
+ & AGX_MPFE_DDR_MAIN_SCHED_DEVTODEV_BUSWRTORD_MSK));
+
+}
+
+unsigned long get_physical_dram_size(void)
+{
+ uint32_t data;
+ unsigned long ram_addr_width, ram_ext_if_io_width;
+
+ data = mmio_read_32(AGX_MPFE_HMC_ADP_DDRIOCTRL);
+ switch (AGX_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE(data)) {
+ case 0:
+ ram_ext_if_io_width = 16;
+ break;
+ case 1:
+ ram_ext_if_io_width = 32;
+ break;
+ case 2:
+ ram_ext_if_io_width = 64;
+ break;
+ default:
+ ram_ext_if_io_width = 0;
+ break;
+ }
+
+ data = mmio_read_32(AGX_MPFE_IOHMC_REG_DRAMADDRW);
+ ram_addr_width = IOHMC_DRAMADDRW_CFG_COL_ADDR_WIDTH(data) +
+ IOHMC_DRAMADDRW_CFG_ROW_ADDR_WIDTH(data) +
+ IOHMC_DRAMADDRW_CFG_BANK_ADDR_WIDTH(data) +
+ IOHMC_DRAMADDRW_CFG_BANK_GROUP_ADDR_WIDTH(data) +
+ IOHMC_DRAMADDRW_CFG_CS_ADDR_WIDTH(data);
+
+ return (1 << ram_addr_width) * (ram_ext_if_io_width / 8);
+}
+
+
+
+void configure_hmc_adaptor_regs(void)
+{
+ uint32_t data;
+ uint32_t dram_io_width;
+
+ /* Configure DDR data rate */
+ dram_io_width = AGX_MPFE_IOHMC_NIOSRESERVE0_NIOS_RESERVE0(
+ mmio_read_32(AGX_MPFE_IOHMC_REG_NIOSRESERVE0_OFST));
+ dram_io_width = (dram_io_width & 0xFF) >> 5;
+
+ mmio_clrsetbits_32(AGX_MPFE_HMC_ADP_DDRIOCTRL,
+ AGX_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE_MSK,
+ dram_io_width << AGX_MPFE_HMC_ADP_DDRIOCTRL_IO_SIZE_OFST);
+
+ /* Copy dram addr width from IOHMC to HMC ADP */
+ data = mmio_read_32(AGX_MPFE_IOHMC_DRAMADDRW);
+ mmio_write_32(AGX_MPFE_HMC_ADP(ADP_DRAMADDRWIDTH), data);
+
+ /* Enable nonsecure access to DDR */
+ mmio_write_32(AGX_NOC_FW_DDR_SCR_MPUREGION0ADDR_LIMIT,
+ 0x4000000 - 1);
+ mmio_write_32(AGX_NOC_FW_DDR_SCR_NONMPUREGION0ADDR_LIMIT,
+ 0x4000000 - 1);
+ mmio_write_32(AGX_SOC_NOC_FW_DDR_SCR_ENABLE, BIT(0) | BIT(8));
+
+ /* ECC enablement */
+ data = mmio_read_32(AGX_MPFE_IOHMC_REG_CTRLCFG1);
+ if (data & (1 << AGX_IOHMC_CTRLCFG1_ENABLE_ECC_OFST)) {
+ mmio_clrsetbits_32(AGX_MPFE_HMC_ADP_ECCCTRL1,
+ AGX_MPFE_HMC_ADP_ECCCTRL1_AUTOWB_CNT_RST_SET_MSK |
+ AGX_MPFE_HMC_ADP_ECCCTRL1_CNT_RST_SET_MSK |
+ AGX_MPFE_HMC_ADP_ECCCTRL1_ECC_EN_SET_MSK,
+ AGX_MPFE_HMC_ADP_ECCCTRL1_AUTOWB_CNT_RST_SET_MSK |
+ AGX_MPFE_HMC_ADP_ECCCTRL1_CNT_RST_SET_MSK);
+
+ mmio_clrsetbits_32(AGX_MPFE_HMC_ADP_ECCCTRL2,
+ AGX_MPFE_HMC_ADP_ECCCTRL2_OVRW_RB_ECC_EN_SET_MSK |
+ AGX_MPFE_HMC_ADP_ECCCTRL2_RMW_EN_SET_MSK |
+ AGX_MPFE_HMC_ADP_ECCCTRL2_AUTOWB_EN_SET_MSK,
+ AGX_MPFE_HMC_ADP_ECCCTRL2_RMW_EN_SET_MSK |
+ AGX_MPFE_HMC_ADP_ECCCTRL2_AUTOWB_EN_SET_MSK);
+
+ mmio_clrsetbits_32(AGX_MPFE_HMC_ADP_ECCCTRL1,
+ AGX_MPFE_HMC_ADP_ECCCTRL1_AUTOWB_CNT_RST_SET_MSK |
+ AGX_MPFE_HMC_ADP_ECCCTRL1_CNT_RST_SET_MSK |
+ AGX_MPFE_HMC_ADP_ECCCTRL1_ECC_EN_SET_MSK,
+ AGX_MPFE_HMC_ADP_ECCCTRL1_ECC_EN_SET_MSK);
+ INFO("Scrubbing ECC\n");
+
+ /* ECC Scrubbing */
+ zeromem(DRAM_BASE, DRAM_SIZE);
+ } else {
+ INFO("ECC is disabled.\n");
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <lib/mmio.h>
+
+#include "agilex_pinmux.h"
+
+const uint32_t sysmgr_pinmux_array_sel[] = {
+ 0x00000000, 0x00000001, /* usb */
+ 0x00000004, 0x00000001,
+ 0x00000008, 0x00000001,
+ 0x0000000c, 0x00000001,
+ 0x00000010, 0x00000001,
+ 0x00000014, 0x00000001,
+ 0x00000018, 0x00000001,
+ 0x0000001c, 0x00000001,
+ 0x00000020, 0x00000001,
+ 0x00000024, 0x00000001,
+ 0x00000028, 0x00000001,
+ 0x0000002c, 0x00000001,
+ 0x00000030, 0x00000000, /* emac0 */
+ 0x00000034, 0x00000000,
+ 0x00000038, 0x00000000,
+ 0x0000003c, 0x00000000,
+ 0x00000040, 0x00000000,
+ 0x00000044, 0x00000000,
+ 0x00000048, 0x00000000,
+ 0x0000004c, 0x00000000,
+ 0x00000050, 0x00000000,
+ 0x00000054, 0x00000000,
+ 0x00000058, 0x00000000,
+ 0x0000005c, 0x00000000,
+ 0x00000060, 0x00000008, /* gpio1 */
+ 0x00000064, 0x00000008,
+ 0x00000068, 0x00000005, /* uart0 tx */
+ 0x0000006c, 0x00000005, /* uart 0 rx */
+ 0x00000070, 0x00000008, /* gpio */
+ 0x00000074, 0x00000008,
+ 0x00000078, 0x00000004, /* i2c1 */
+ 0x0000007c, 0x00000004,
+ 0x00000080, 0x00000007, /* jtag */
+ 0x00000084, 0x00000007,
+ 0x00000088, 0x00000007,
+ 0x0000008c, 0x00000007,
+ 0x00000090, 0x00000001, /* sdmmc data0 */
+ 0x00000094, 0x00000001,
+ 0x00000098, 0x00000001,
+ 0x0000009c, 0x00000001,
+ 0x00000100, 0x00000001,
+ 0x00000104, 0x00000001, /* sdmmc.data3 */
+ 0x00000108, 0x00000008, /* loan */
+ 0x0000010c, 0x00000008, /* gpio */
+ 0x00000110, 0x00000008,
+ 0x00000114, 0x00000008, /* gpio1.io21 */
+ 0x00000118, 0x00000005, /* mdio0.mdio */
+ 0x0000011c, 0x00000005 /* mdio0.mdc */
+};
+
+const uint32_t sysmgr_pinmux_array_ctrl[] = {
+ 0x00000000, 0x00502c38, /* Q1_1 */
+ 0x00000004, 0x00102c38,
+ 0x00000008, 0x00502c38,
+ 0x0000000c, 0x00502c38,
+ 0x00000010, 0x00502c38,
+ 0x00000014, 0x00502c38,
+ 0x00000018, 0x00502c38,
+ 0x0000001c, 0x00502c38,
+ 0x00000020, 0x00502c38,
+ 0x00000024, 0x00502c38,
+ 0x00000028, 0x00502c38,
+ 0x0000002c, 0x00502c38,
+ 0x00000030, 0x00102c38, /* Q2_1 */
+ 0x00000034, 0x00102c38,
+ 0x00000038, 0x00502c38,
+ 0x0000003c, 0x00502c38,
+ 0x00000040, 0x00102c38,
+ 0x00000044, 0x00102c38,
+ 0x00000048, 0x00502c38,
+ 0x0000004c, 0x00502c38,
+ 0x00000050, 0x00102c38,
+ 0x00000054, 0x00102c38,
+ 0x00000058, 0x00502c38,
+ 0x0000005c, 0x00502c38,
+ 0x00000060, 0x00502c38, /* Q3_1 */
+ 0x00000064, 0x00502c38,
+ 0x00000068, 0x00102c38,
+ 0x0000006c, 0x00502c38,
+ 0x000000d0, 0x00502c38,
+ 0x000000d4, 0x00502c38,
+ 0x000000d8, 0x00542c38,
+ 0x000000dc, 0x00542c38,
+ 0x000000e0, 0x00502c38,
+ 0x000000e4, 0x00502c38,
+ 0x000000e8, 0x00102c38,
+ 0x000000ec, 0x00502c38,
+ 0x000000f0, 0x00502c38, /* Q4_1 */
+ 0x000000f4, 0x00502c38,
+ 0x000000f8, 0x00102c38,
+ 0x000000fc, 0x00502c38,
+ 0x00000100, 0x00502c38,
+ 0x00000104, 0x00502c38,
+ 0x00000108, 0x00102c38,
+ 0x0000010c, 0x00502c38,
+ 0x00000110, 0x00502c38,
+ 0x00000114, 0x00502c38,
+ 0x00000118, 0x00542c38,
+ 0x0000011c, 0x00102c38
+};
+
+const uint32_t sysmgr_pinmux_array_fpga[] = {
+ 0x00000000, 0x00000000,
+ 0x00000004, 0x00000000,
+ 0x00000008, 0x00000000,
+ 0x0000000c, 0x00000000,
+ 0x00000010, 0x00000000,
+ 0x00000014, 0x00000000,
+ 0x00000018, 0x00000000,
+ 0x0000001c, 0x00000000,
+ 0x00000020, 0x00000000,
+ 0x00000028, 0x00000000,
+ 0x0000002c, 0x00000000,
+ 0x00000030, 0x00000000,
+ 0x00000034, 0x00000000,
+ 0x00000038, 0x00000000,
+ 0x0000003c, 0x00000000,
+ 0x00000040, 0x00000000,
+ 0x00000044, 0x00000000,
+ 0x00000048, 0x00000000,
+ 0x00000050, 0x00000000,
+ 0x00000054, 0x00000000,
+ 0x00000058, 0x0000002a
+};
+
+const uint32_t sysmgr_pinmux_array_iodelay[] = {
+ 0x00000000, 0x00000000,
+ 0x00000004, 0x00000000,
+ 0x00000008, 0x00000000,
+ 0x0000000c, 0x00000000,
+ 0x00000010, 0x00000000,
+ 0x00000014, 0x00000000,
+ 0x00000018, 0x00000000,
+ 0x0000001c, 0x00000000,
+ 0x00000020, 0x00000000,
+ 0x00000024, 0x00000000,
+ 0x00000028, 0x00000000,
+ 0x0000002c, 0x00000000,
+ 0x00000030, 0x00000000,
+ 0x00000034, 0x00000000,
+ 0x00000038, 0x00000000,
+ 0x0000003c, 0x00000000,
+ 0x00000040, 0x00000000,
+ 0x00000044, 0x00000000,
+ 0x00000048, 0x00000000,
+ 0x0000004c, 0x00000000,
+ 0x00000050, 0x00000000,
+ 0x00000054, 0x00000000,
+ 0x00000058, 0x00000000,
+ 0x0000005c, 0x00000000,
+ 0x00000060, 0x00000000,
+ 0x00000064, 0x00000000,
+ 0x00000068, 0x00000000,
+ 0x0000006c, 0x00000000,
+ 0x00000070, 0x00000000,
+ 0x00000074, 0x00000000,
+ 0x00000078, 0x00000000,
+ 0x0000007c, 0x00000000,
+ 0x00000080, 0x00000000,
+ 0x00000084, 0x00000000,
+ 0x00000088, 0x00000000,
+ 0x0000008c, 0x00000000,
+ 0x00000090, 0x00000000,
+ 0x00000094, 0x00000000,
+ 0x00000098, 0x00000000,
+ 0x0000009c, 0x00000000,
+ 0x00000100, 0x00000000,
+ 0x00000104, 0x00000000,
+ 0x00000108, 0x00000000,
+ 0x0000010c, 0x00000000,
+ 0x00000110, 0x00000000,
+ 0x00000114, 0x00000000,
+ 0x00000118, 0x00000000,
+ 0x0000011c, 0x00000000
+};
+
+void config_pinmux(handoff *hoff_ptr)
+{
+ unsigned int i;
+
+ for (i = 0; i < 96; i += 2) {
+ mmio_write_32(AGX_PINMUX_PIN0SEL +
+ hoff_ptr->pinmux_sel_array[i],
+ hoff_ptr->pinmux_sel_array[i+1]);
+ }
+
+ for (i = 0; i < 96; i += 2) {
+ mmio_write_32(AGX_PINMUX_IO0CTRL +
+ hoff_ptr->pinmux_io_array[i],
+ hoff_ptr->pinmux_io_array[i+1]);
+ }
+
+ for (i = 0; i < 42; i += 2) {
+ mmio_write_32(AGX_PINMUX_PINMUX_EMAC0_USEFPGA +
+ hoff_ptr->pinmux_fpga_array[i],
+ hoff_ptr->pinmux_fpga_array[i+1]);
+ }
+
+ for (i = 0; i < 96; i += 2) {
+ mmio_write_32(AGX_PINMUX_IO0_DELAY +
+ hoff_ptr->pinmux_iodelay_array[i],
+ hoff_ptr->pinmux_iodelay_array[i+1]);
+ }
+
+}
+
--- /dev/null
+/*
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <lib/mmio.h>
+
+#include "agilex_reset_manager.h"
+
+void deassert_peripheral_reset(void)
+{
+ mmio_clrbits_32(AGX_RSTMGR_PER1MODRST,
+ AGX_RSTMGR_PER1MODRST_WATCHDOG0 |
+ AGX_RSTMGR_PER1MODRST_WATCHDOG1 |
+ AGX_RSTMGR_PER1MODRST_WATCHDOG2 |
+ AGX_RSTMGR_PER1MODRST_WATCHDOG3 |
+ AGX_RSTMGR_PER1MODRST_L4SYSTIMER0 |
+ AGX_RSTMGR_PER1MODRST_L4SYSTIMER1 |
+ AGX_RSTMGR_PER1MODRST_SPTIMER0 |
+ AGX_RSTMGR_PER1MODRST_SPTIMER1 |
+ AGX_RSTMGR_PER1MODRST_I2C0 |
+ AGX_RSTMGR_PER1MODRST_I2C1 |
+ AGX_RSTMGR_PER1MODRST_I2C2 |
+ AGX_RSTMGR_PER1MODRST_I2C3 |
+ AGX_RSTMGR_PER1MODRST_I2C4 |
+ AGX_RSTMGR_PER1MODRST_UART0 |
+ AGX_RSTMGR_PER1MODRST_UART1 |
+ AGX_RSTMGR_PER1MODRST_GPIO0 |
+ AGX_RSTMGR_PER1MODRST_GPIO1);
+
+ mmio_clrbits_32(AGX_RSTMGR_PER0MODRST,
+ AGX_RSTMGR_PER0MODRST_EMAC0OCP |
+ AGX_RSTMGR_PER0MODRST_EMAC1OCP |
+ AGX_RSTMGR_PER0MODRST_EMAC2OCP |
+ AGX_RSTMGR_PER0MODRST_USB0OCP |
+ AGX_RSTMGR_PER0MODRST_USB1OCP |
+ AGX_RSTMGR_PER0MODRST_NANDOCP |
+ AGX_RSTMGR_PER0MODRST_SDMMCOCP |
+ AGX_RSTMGR_PER0MODRST_DMAOCP);
+
+ mmio_clrbits_32(AGX_RSTMGR_PER0MODRST,
+ AGX_RSTMGR_PER0MODRST_EMAC0 |
+ AGX_RSTMGR_PER0MODRST_EMAC1 |
+ AGX_RSTMGR_PER0MODRST_EMAC2 |
+ AGX_RSTMGR_PER0MODRST_USB0 |
+ AGX_RSTMGR_PER0MODRST_USB1 |
+ AGX_RSTMGR_PER0MODRST_NAND |
+ AGX_RSTMGR_PER0MODRST_SDMMC |
+ AGX_RSTMGR_PER0MODRST_DMA |
+ AGX_RSTMGR_PER0MODRST_SPIM0 |
+ AGX_RSTMGR_PER0MODRST_SPIM1 |
+ AGX_RSTMGR_PER0MODRST_SPIS0 |
+ AGX_RSTMGR_PER0MODRST_SPIS1 |
+ AGX_RSTMGR_PER0MODRST_EMACPTP |
+ AGX_RSTMGR_PER0MODRST_DMAIF0 |
+ AGX_RSTMGR_PER0MODRST_DMAIF1 |
+ AGX_RSTMGR_PER0MODRST_DMAIF2 |
+ AGX_RSTMGR_PER0MODRST_DMAIF3 |
+ AGX_RSTMGR_PER0MODRST_DMAIF4 |
+ AGX_RSTMGR_PER0MODRST_DMAIF5 |
+ AGX_RSTMGR_PER0MODRST_DMAIF6 |
+ AGX_RSTMGR_PER0MODRST_DMAIF7);
+
+ mmio_clrbits_32(AGX_RSTMGR_BRGMODRST,
+ AGX_RSTMGR_BRGMODRST_MPFE);
+}
+
+void config_hps_hs_before_warm_reset(void)
+{
+ uint32_t or_mask = 0;
+
+ or_mask |= AGX_RSTMGR_HDSKEN_SDRSELFREFEN;
+ or_mask |= AGX_RSTMGR_HDSKEN_FPGAHSEN;
+ or_mask |= AGX_RSTMGR_HDSKEN_ETRSTALLEN;
+ or_mask |= AGX_RSTMGR_HDSKEN_L2FLUSHEN;
+ or_mask |= AGX_RSTMGR_HDSKEN_L3NOC_DBG;
+ or_mask |= AGX_RSTMGR_HDSKEN_DEBUG_L3NOC;
+
+ mmio_setbits_32(AGX_RSTMGR_HDSKEN, or_mask);
+}
+
--- /dev/null
+/*
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <lib/mmio.h>
+#include <lib/utils_def.h>
+
+#include "agilex_system_manager.h"
+
+void enable_nonsecure_access(void)
+{
+ mmio_write_32(AGX_NOC_FW_L4_PER_SCR_NAND_REGISTER, DISABLE_L4_FIREWALL);
+ mmio_write_32(AGX_NOC_FW_L4_PER_SCR_NAND_DATA, DISABLE_L4_FIREWALL);
+
+ mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_NAND_ECC, DISABLE_L4_FIREWALL);
+ mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_NAND_READ_ECC, DISABLE_L4_FIREWALL);
+ mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_NAND_WRITE_ECC,
+ DISABLE_L4_FIREWALL);
+
+ mmio_write_32(AGX_NOC_FW_L4_PER_SCR_USB0_REGISTER, DISABLE_L4_FIREWALL);
+ mmio_write_32(AGX_NOC_FW_L4_PER_SCR_USB1_REGISTER, DISABLE_L4_FIREWALL);
+ mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_USB0_ECC, DISABLE_L4_FIREWALL);
+ mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_USB1_ECC, DISABLE_L4_FIREWALL);
+
+ mmio_write_32(AGX_NOC_FW_L4_PER_SCR_SPI_MASTER0, DISABLE_L4_FIREWALL);
+ mmio_write_32(AGX_NOC_FW_L4_PER_SCR_SPI_MASTER1, DISABLE_L4_FIREWALL);
+ mmio_write_32(AGX_NOC_FW_L4_PER_SCR_SPI_SLAVE0, DISABLE_L4_FIREWALL);
+ mmio_write_32(AGX_NOC_FW_L4_PER_SCR_SPI_SLAVE1, DISABLE_L4_FIREWALL);
+
+ mmio_write_32(AGX_NOC_FW_L4_PER_SCR_EMAC0, DISABLE_L4_FIREWALL);
+ mmio_write_32(AGX_NOC_FW_L4_PER_SCR_EMAC1, DISABLE_L4_FIREWALL);
+ mmio_write_32(AGX_NOC_FW_L4_PER_SCR_EMAC2, DISABLE_L4_FIREWALL);
+
+ mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_EMAC0RX_ECC, DISABLE_L4_FIREWALL);
+ mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_EMAC0TX_ECC, DISABLE_L4_FIREWALL);
+ mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_EMAC1RX_ECC, DISABLE_L4_FIREWALL);
+ mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_EMAC1TX_ECC, DISABLE_L4_FIREWALL);
+ mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_EMAC2RX_ECC, DISABLE_L4_FIREWALL);
+ mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_EMAC2TX_ECC, DISABLE_L4_FIREWALL);
+
+ mmio_write_32(AGX_NOC_FW_L4_PER_SCR_SDMMC, DISABLE_L4_FIREWALL);
+ mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_SDMMC_ECC, DISABLE_L4_FIREWALL);
+
+ mmio_write_32(AGX_NOC_FW_L4_PER_SCR_GPIO0, DISABLE_L4_FIREWALL);
+ mmio_write_32(AGX_NOC_FW_L4_PER_SCR_GPIO1, DISABLE_L4_FIREWALL);
+
+ mmio_write_32(AGX_NOC_FW_L4_PER_SCR_I2C0, DISABLE_L4_FIREWALL);
+ mmio_write_32(AGX_NOC_FW_L4_PER_SCR_I2C1, DISABLE_L4_FIREWALL);
+ mmio_write_32(AGX_NOC_FW_L4_PER_SCR_I2C2, DISABLE_L4_FIREWALL);
+ mmio_write_32(AGX_NOC_FW_L4_PER_SCR_I2C3, DISABLE_L4_FIREWALL);
+ mmio_write_32(AGX_NOC_FW_L4_PER_SCR_I2C4, DISABLE_L4_FIREWALL);
+
+ mmio_write_32(AGX_NOC_FW_L4_PER_SCR_SP_TIMER1, DISABLE_L4_FIREWALL);
+
+ mmio_write_32(AGX_NOC_FW_L4_PER_SCR_UART0, DISABLE_L4_FIREWALL);
+ mmio_write_32(AGX_NOC_FW_L4_PER_SCR_UART1, DISABLE_L4_FIREWALL);
+
+ mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_DMA_ECC, DISABLE_L4_FIREWALL);
+
+
+ mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_OCRAM_ECC, DISABLE_L4_FIREWALL);
+
+ mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_CLK_MGR, DISABLE_L4_FIREWALL);
+
+ mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_IO_MGR, DISABLE_L4_FIREWALL);
+
+
+ mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_RST_MGR, DISABLE_L4_FIREWALL);
+
+ mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_SYS_MGR, DISABLE_L4_FIREWALL);
+
+ mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_OSC0_TIMER, DISABLE_L4_FIREWALL);
+ mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_OSC1_TIMER, DISABLE_L4_FIREWALL);
+
+ mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_WATCHDOG0, DISABLE_L4_FIREWALL);
+ mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_WATCHDOG1, DISABLE_L4_FIREWALL);
+ mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_WATCHDOG2, DISABLE_L4_FIREWALL);
+ mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_WATCHDOG3, DISABLE_L4_FIREWALL);
+
+ mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_DAP, DISABLE_L4_FIREWALL);
+
+ mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_L4_NOC_PROBES, DISABLE_L4_FIREWALL);
+
+ mmio_write_32(AGX_NOC_FW_L4_SYS_SCR_L4_NOC_QOS, DISABLE_L4_FIREWALL);
+}
+
+void enable_ns_bridge_access(void)
+{
+ mmio_write_32(AGX_FIREWALL_SOC2FPGA, DISABLE_BRIDGE_FIREWALL);
+ mmio_write_32(AGX_FIREWALL_LWSOC2FPGA, DISABLE_BRIDGE_FIREWALL);
+}
--- /dev/null
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <arch_helpers.h>
+#include <drivers/delay_timer.h>
+#include <lib/mmio.h>
+
+#define AGX_GLOBAL_TIMER 0xffd01000
+#define AGX_GLOBAL_TIMER_EN 0x3
+
+/********************************************************************
+ * The timer delay function
+ ********************************************************************/
+static uint32_t socfpga_get_timer_value(void)
+{
+ /*
+ * Generic delay timer implementation expects the timer to be a down
+ * counter. We apply bitwise NOT operator to the tick values returned
+ * by read_cntpct_el0() to simulate the down counter. The value is
+ * clipped from 64 to 32 bits.
+ */
+ return (uint32_t)(~read_cntpct_el0());
+}
+
+static const timer_ops_t plat_timer_ops = {
+ .get_timer_value = socfpga_get_timer_value,
+ .clk_mult = 1,
+ .clk_div = PLAT_SYS_COUNTER_FREQ_IN_MHZ,
+};
+
+void socfpga_delay_timer_init(void)
+{
+ timer_init(&plat_timer_ops);
+ mmio_write_32(AGX_GLOBAL_TIMER, AGX_GLOBAL_TIMER_EN);
+}
--- /dev/null
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <common/desc_image_load.h>
+
+/*******************************************************************************
+ * This function flushes the data structures so that they are visible
+ * in memory for the next BL image.
+ ******************************************************************************/
+void plat_flush_next_bl_params(void)
+{
+ flush_bl_params_desc();
+}
+
+/*******************************************************************************
+ * This function returns the list of loadable images.
+ ******************************************************************************/
+bl_load_info_t *plat_get_bl_image_load_info(void)
+{
+ return get_bl_load_info_from_mem_params_desc();
+}
+
+/*******************************************************************************
+ * This function returns the list of executable images.
+ ******************************************************************************/
+bl_params_t *plat_get_next_bl_params(void)
+{
+ return get_next_bl_params_from_mem_params_desc();
+}
--- /dev/null
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/arm/gicv2.h>
+#include <lib/mmio.h>
+#include <lib/psci/psci.h>
+#include <plat/common/platform.h>
+
+#include "agilex_reset_manager.h"
+#include "agilex_mailbox.h"
+
+#define AGX_RSTMGR_OFST 0xffd11000
+#define AGX_RSTMGR_MPUMODRST_OFST 0x20
+
+uintptr_t *agilex_sec_entry = (uintptr_t *) PLAT_AGX_SEC_ENTRY;
+uintptr_t *cpuid_release = (uintptr_t *) PLAT_CPUID_RELEASE;
+
+/*******************************************************************************
+ * plat handler called when a CPU is about to enter standby.
+ ******************************************************************************/
+void socfpga_cpu_standby(plat_local_state_t cpu_state)
+{
+ /*
+ * Enter standby state
+ * dsb is good practice before using wfi to enter low power states
+ */
+ VERBOSE("%s: cpu_state: 0x%x\n", __func__, cpu_state);
+ dsb();
+ wfi();
+}
+
+/*******************************************************************************
+ * plat handler called when a power domain is about to be turned on. The
+ * mpidr determines the CPU to be turned on.
+ ******************************************************************************/
+int socfpga_pwr_domain_on(u_register_t mpidr)
+{
+ unsigned int cpu_id = plat_core_pos_by_mpidr(mpidr);
+
+ VERBOSE("%s: mpidr: 0x%lx\n", __func__, mpidr);
+
+ if (cpu_id == -1)
+ return PSCI_E_INTERN_FAIL;
+
+ *cpuid_release = cpu_id;
+
+ /* release core reset */
+ mmio_setbits_32(AGX_RSTMGR_OFST + AGX_RSTMGR_MPUMODRST_OFST,
+ 1 << cpu_id);
+ return PSCI_E_SUCCESS;
+}
+
+/*******************************************************************************
+ * plat handler called when a power domain is about to be turned off. The
+ * target_state encodes the power state that each level should transition to.
+ ******************************************************************************/
+void socfpga_pwr_domain_off(const psci_power_state_t *target_state)
+{
+ unsigned int cpu_id = plat_my_core_pos();
+
+ for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++)
+ VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n",
+ __func__, i, target_state->pwr_domain_state[i]);
+
+ /* TODO: Prevent interrupts from spuriously waking up this cpu */
+ /* gicv2_cpuif_disable(); */
+
+ /* assert core reset */
+ mmio_setbits_32(AGX_RSTMGR_OFST + AGX_RSTMGR_MPUMODRST_OFST,
+ 1 << cpu_id);
+}
+
+/*******************************************************************************
+ * plat handler called when a power domain is about to be suspended. The
+ * target_state encodes the power state that each level should transition to.
+ ******************************************************************************/
+void socfpga_pwr_domain_suspend(const psci_power_state_t *target_state)
+{
+ unsigned int cpu_id = plat_my_core_pos();
+
+ for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++)
+ VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n",
+ __func__, i, target_state->pwr_domain_state[i]);
+ /* assert core reset */
+ mmio_setbits_32(AGX_RSTMGR_OFST + AGX_RSTMGR_MPUMODRST_OFST,
+ 1 << cpu_id);
+
+}
+
+/*******************************************************************************
+ * plat handler called when a power domain has just been powered on after
+ * being turned off earlier. The target_state encodes the low power state that
+ * each level has woken up from.
+ ******************************************************************************/
+void socfpga_pwr_domain_on_finish(const psci_power_state_t *target_state)
+{
+ for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++)
+ VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n",
+ __func__, i, target_state->pwr_domain_state[i]);
+
+ /* Program the gic per-cpu distributor or re-distributor interface */
+ gicv2_pcpu_distif_init();
+ gicv2_set_pe_target_mask(plat_my_core_pos());
+
+ /* Enable the gic cpu interface */
+ gicv2_cpuif_enable();
+}
+
+/*******************************************************************************
+ * plat handler called when a power domain has just been powered on after
+ * having been suspended earlier. The target_state encodes the low power state
+ * that each level has woken up from.
+ * TODO: At the moment we reuse the on finisher and reinitialize the secure
+ * context. Need to implement a separate suspend finisher.
+ ******************************************************************************/
+void socfpga_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
+{
+ unsigned int cpu_id = plat_my_core_pos();
+
+ for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++)
+ VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n",
+ __func__, i, target_state->pwr_domain_state[i]);
+
+ /* release core reset */
+ mmio_clrbits_32(AGX_RSTMGR_OFST + AGX_RSTMGR_MPUMODRST_OFST,
+ 1 << cpu_id);
+}
+
+/*******************************************************************************
+ * plat handlers to shutdown/reboot the system
+ ******************************************************************************/
+static void __dead2 socfpga_system_off(void)
+{
+ wfi();
+ ERROR("System Off: operation not handled.\n");
+ panic();
+}
+
+static void __dead2 socfpga_system_reset(void)
+{
+ INFO("assert Peripheral from Reset\r\n");
+
+ deassert_peripheral_reset();
+ mailbox_reset_cold();
+
+ while (1)
+ wfi();
+}
+
+int socfpga_validate_power_state(unsigned int power_state,
+ psci_power_state_t *req_state)
+{
+ VERBOSE("%s: power_state: 0x%x\n", __func__, power_state);
+
+ return PSCI_E_SUCCESS;
+}
+
+int socfpga_validate_ns_entrypoint(unsigned long ns_entrypoint)
+{
+ VERBOSE("%s: ns_entrypoint: 0x%lx\n", __func__, ns_entrypoint);
+ return PSCI_E_SUCCESS;
+}
+
+void socfpga_get_sys_suspend_power_state(psci_power_state_t *req_state)
+{
+ req_state->pwr_domain_state[PSCI_CPU_PWR_LVL] = PLAT_MAX_OFF_STATE;
+ req_state->pwr_domain_state[1] = PLAT_MAX_OFF_STATE;
+}
+
+/*******************************************************************************
+ * Export the platform handlers via plat_arm_psci_pm_ops. The ARM Standard
+ * platform layer will take care of registering the handlers with PSCI.
+ ******************************************************************************/
+const plat_psci_ops_t socfpga_psci_pm_ops = {
+ .cpu_standby = socfpga_cpu_standby,
+ .pwr_domain_on = socfpga_pwr_domain_on,
+ .pwr_domain_off = socfpga_pwr_domain_off,
+ .pwr_domain_suspend = socfpga_pwr_domain_suspend,
+ .pwr_domain_on_finish = socfpga_pwr_domain_on_finish,
+ .pwr_domain_suspend_finish = socfpga_pwr_domain_suspend_finish,
+ .system_off = socfpga_system_off,
+ .system_reset = socfpga_system_reset,
+ .validate_power_state = socfpga_validate_power_state,
+ .validate_ns_entrypoint = socfpga_validate_ns_entrypoint,
+ .get_sys_suspend_power_state = socfpga_get_sys_suspend_power_state
+};
+
+/*******************************************************************************
+ * Export the platform specific power ops.
+ ******************************************************************************/
+int plat_setup_psci_ops(uintptr_t sec_entrypoint,
+ const struct plat_psci_ops **psci_ops)
+{
+ /* Save warm boot entrypoint.*/
+ *agilex_sec_entry = sec_entrypoint;
+
+ *psci_ops = &socfpga_psci_pm_ops;
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <common/debug.h>
+#include <common/runtime_svc.h>
+#include <tools_share/uuid.h>
+
+#include "agilex_mailbox.h"
+
+/* Number of SiP Calls implemented */
+#define SIP_NUM_CALLS 0x3
+
+/* Total buffer the driver can hold */
+#define FPGA_CONFIG_BUFFER_SIZE 4
+
+int current_block;
+int current_buffer;
+int current_id = 1;
+int max_blocks;
+uint32_t bytes_per_block;
+uint32_t blocks_submitted;
+uint32_t blocks_completed;
+
+struct fpga_config_info {
+ uint32_t addr;
+ int size;
+ int size_written;
+ uint32_t write_requested;
+ int subblocks_sent;
+ int block_number;
+};
+
+/* SiP Service UUID */
+DEFINE_SVC_UUID2(intl_svc_uid,
+ 0xa85273b0, 0xe85a, 0x4862, 0xa6, 0x2a,
+ 0xfa, 0x88, 0x88, 0x17, 0x68, 0x81);
+
+uint64_t socfpga_sip_handler(uint32_t smc_fid,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ ERROR("%s: unhandled SMC (0x%x)\n", __func__, smc_fid);
+ SMC_RET1(handle, SMC_UNK);
+}
+
+struct fpga_config_info fpga_config_buffers[FPGA_CONFIG_BUFFER_SIZE];
+
+static void intel_fpga_sdm_write_buffer(struct fpga_config_info *buffer)
+{
+ uint32_t args[3];
+
+ while (max_blocks > 0 && buffer->size > buffer->size_written) {
+ if (buffer->size - buffer->size_written <=
+ bytes_per_block) {
+ args[0] = (1<<8);
+ args[1] = buffer->addr + buffer->size_written;
+ args[2] = buffer->size - buffer->size_written;
+ buffer->size_written +=
+ buffer->size - buffer->size_written;
+ buffer->subblocks_sent++;
+ mailbox_send_cmd_async(0x4,
+ MBOX_RECONFIG_DATA,
+ args, 3, 0);
+ current_buffer++;
+ current_buffer %= FPGA_CONFIG_BUFFER_SIZE;
+ } else {
+ args[0] = (1<<8);
+ args[1] = buffer->addr + buffer->size_written;
+ args[2] = bytes_per_block;
+ buffer->size_written += bytes_per_block;
+ mailbox_send_cmd_async(0x4,
+ MBOX_RECONFIG_DATA,
+ args, 3, 0);
+ buffer->subblocks_sent++;
+ }
+ max_blocks--;
+ }
+}
+
+static int intel_fpga_sdm_write_all(void)
+{
+ int i;
+
+ for (i = 0; i < FPGA_CONFIG_BUFFER_SIZE; i++)
+ intel_fpga_sdm_write_buffer(
+ &fpga_config_buffers[current_buffer]);
+
+ return 0;
+}
+
+uint32_t intel_mailbox_fpga_config_isdone(void)
+{
+ uint32_t args[2];
+ uint32_t response[6];
+ int status;
+
+ status = mailbox_send_cmd(1, MBOX_RECONFIG_STATUS, args, 0, 0,
+ response);
+
+ if (status < 0)
+ return INTEL_SIP_SMC_STATUS_ERROR;
+
+ if (response[RECONFIG_STATUS_STATE] &&
+ response[RECONFIG_STATUS_STATE] != MBOX_CFGSTAT_STATE_CONFIG)
+ return INTEL_SIP_SMC_STATUS_ERROR;
+
+ if (!(response[RECONFIG_STATUS_PIN_STATUS] & PIN_STATUS_NSTATUS))
+ return INTEL_SIP_SMC_STATUS_ERROR;
+
+ if (response[RECONFIG_STATUS_SOFTFUNC_STATUS] &
+ SOFTFUNC_STATUS_SEU_ERROR)
+ return INTEL_SIP_SMC_STATUS_ERROR;
+
+ if ((response[RECONFIG_STATUS_SOFTFUNC_STATUS] &
+ SOFTFUNC_STATUS_CONF_DONE) &&
+ (response[RECONFIG_STATUS_SOFTFUNC_STATUS] &
+ SOFTFUNC_STATUS_INIT_DONE))
+ return INTEL_SIP_SMC_STATUS_OK;
+
+ return INTEL_SIP_SMC_STATUS_ERROR;
+}
+
+static int mark_last_buffer_xfer_completed(uint32_t *buffer_addr_completed)
+{
+ int i;
+
+ for (i = 0; i < FPGA_CONFIG_BUFFER_SIZE; i++) {
+ if (fpga_config_buffers[i].block_number == current_block) {
+ fpga_config_buffers[i].subblocks_sent--;
+ if (fpga_config_buffers[i].subblocks_sent == 0
+ && fpga_config_buffers[i].size <=
+ fpga_config_buffers[i].size_written) {
+ fpga_config_buffers[i].write_requested = 0;
+ current_block++;
+ *buffer_addr_completed =
+ fpga_config_buffers[i].addr;
+ return 0;
+ }
+ }
+ }
+
+ return -1;
+}
+
+unsigned int address_in_ddr(uint32_t *addr)
+{
+ if (((unsigned long long)addr > DRAM_BASE) &&
+ ((unsigned long long)addr < DRAM_BASE + DRAM_SIZE))
+ return 0;
+
+ return -1;
+}
+
+int intel_fpga_config_completed_write(uint32_t *completed_addr,
+ uint32_t *count)
+{
+ uint32_t status = INTEL_SIP_SMC_STATUS_OK;
+ *count = 0;
+ int resp_len = 0;
+ uint32_t resp[5];
+ int all_completed = 1;
+ int count_check = 0;
+
+ if (address_in_ddr(completed_addr) != 0 || address_in_ddr(count) != 0)
+ return INTEL_SIP_SMC_STATUS_ERROR;
+
+ for (count_check = 0; count_check < 3; count_check++)
+ if (address_in_ddr(&completed_addr[*count + count_check]) != 0)
+ return INTEL_SIP_SMC_STATUS_ERROR;
+
+ resp_len = mailbox_read_response(0x4, resp);
+
+ while (resp_len >= 0 && *count < 3) {
+ max_blocks++;
+ if (mark_last_buffer_xfer_completed(
+ &completed_addr[*count]) == 0)
+ *count = *count + 1;
+ else
+ break;
+ resp_len = mailbox_read_response(0x4, resp);
+ }
+
+ if (*count <= 0) {
+ if (resp_len != MBOX_NO_RESPONSE &&
+ resp_len != MBOX_TIMEOUT && resp_len != 0) {
+ return INTEL_SIP_SMC_STATUS_ERROR;
+ }
+
+ *count = 0;
+ }
+
+ intel_fpga_sdm_write_all();
+
+ if (*count > 0)
+ status = INTEL_SIP_SMC_STATUS_OK;
+ else if (*count == 0)
+ status = INTEL_SIP_SMC_STATUS_BUSY;
+
+ for (int i = 0; i < FPGA_CONFIG_BUFFER_SIZE; i++) {
+ if (fpga_config_buffers[i].write_requested != 0) {
+ all_completed = 0;
+ break;
+ }
+ }
+
+ if (all_completed == 1)
+ return INTEL_SIP_SMC_STATUS_OK;
+
+ return status;
+}
+
+int intel_fpga_config_start(uint32_t config_type)
+{
+ uint32_t response[3];
+ int status = 0;
+
+ status = mailbox_send_cmd(2, MBOX_RECONFIG, 0, 0, 0,
+ response);
+
+ if (status < 0)
+ return status;
+
+ max_blocks = response[0];
+ bytes_per_block = response[1];
+
+ for (int i = 0; i < FPGA_CONFIG_BUFFER_SIZE; i++) {
+ fpga_config_buffers[i].size = 0;
+ fpga_config_buffers[i].size_written = 0;
+ fpga_config_buffers[i].addr = 0;
+ fpga_config_buffers[i].write_requested = 0;
+ fpga_config_buffers[i].block_number = 0;
+ fpga_config_buffers[i].subblocks_sent = 0;
+ }
+
+ blocks_submitted = 0;
+ current_block = 0;
+ current_buffer = 0;
+
+ return 0;
+}
+
+
+uint32_t intel_fpga_config_write(uint64_t mem, uint64_t size)
+{
+ int i = 0;
+ uint32_t status = INTEL_SIP_SMC_STATUS_OK;
+
+ if (mem < DRAM_BASE || mem > DRAM_BASE + DRAM_SIZE)
+ status = INTEL_SIP_SMC_STATUS_REJECTED;
+
+ if (mem + size > DRAM_BASE + DRAM_SIZE)
+ status = INTEL_SIP_SMC_STATUS_REJECTED;
+
+ for (i = 0; i < FPGA_CONFIG_BUFFER_SIZE; i++) {
+ if (!fpga_config_buffers[i].write_requested) {
+ fpga_config_buffers[i].addr = mem;
+ fpga_config_buffers[i].size = size;
+ fpga_config_buffers[i].size_written = 0;
+ fpga_config_buffers[i].write_requested = 1;
+ fpga_config_buffers[i].block_number =
+ blocks_submitted++;
+ fpga_config_buffers[i].subblocks_sent = 0;
+ break;
+ }
+ }
+
+
+ if (i == FPGA_CONFIG_BUFFER_SIZE) {
+ status = INTEL_SIP_SMC_STATUS_REJECTED;
+ return status;
+ } else if (i == FPGA_CONFIG_BUFFER_SIZE - 1) {
+ status = INTEL_SIP_SMC_STATUS_BUSY;
+ }
+
+ intel_fpga_sdm_write_all();
+
+ return status;
+}
+
+/*
+ * This function is responsible for handling all SiP calls from the NS world
+ */
+
+uintptr_t sip_smc_handler(uint32_t smc_fid,
+ u_register_t x1,
+ u_register_t x2,
+ u_register_t x3,
+ u_register_t x4,
+ void *cookie,
+ void *handle,
+ u_register_t flags)
+{
+ uint32_t status = INTEL_SIP_SMC_STATUS_OK;
+ uint32_t completed_addr[3];
+ uint32_t count = 0;
+
+ switch (smc_fid) {
+ case SIP_SVC_UID:
+ /* Return UID to the caller */
+ SMC_UUID_RET(handle, intl_svc_uid);
+ break;
+ case INTEL_SIP_SMC_FPGA_CONFIG_ISDONE:
+ status = intel_mailbox_fpga_config_isdone();
+ SMC_RET4(handle, status, 0, 0, 0);
+ break;
+ case INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM:
+ SMC_RET3(handle, INTEL_SIP_SMC_STATUS_OK,
+ INTEL_SIP_SMC_FPGA_CONFIG_ADDR,
+ INTEL_SIP_SMC_FPGA_CONFIG_SIZE -
+ INTEL_SIP_SMC_FPGA_CONFIG_ADDR);
+ break;
+ case INTEL_SIP_SMC_FPGA_CONFIG_START:
+ status = intel_fpga_config_start(x1);
+ SMC_RET4(handle, status, 0, 0, 0);
+ break;
+ case INTEL_SIP_SMC_FPGA_CONFIG_WRITE:
+ status = intel_fpga_config_write(x1, x2);
+ SMC_RET4(handle, status, 0, 0, 0);
+ break;
+ case INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE:
+ status = intel_fpga_config_completed_write(completed_addr,
+ &count);
+ switch (count) {
+ case 1:
+ SMC_RET4(handle, INTEL_SIP_SMC_STATUS_OK,
+ completed_addr[0], 0, 0);
+ break;
+ case 2:
+ SMC_RET4(handle, INTEL_SIP_SMC_STATUS_OK,
+ completed_addr[0],
+ completed_addr[1], 0);
+ break;
+ case 3:
+ SMC_RET4(handle, INTEL_SIP_SMC_STATUS_OK,
+ completed_addr[0],
+ completed_addr[1],
+ completed_addr[2]);
+ break;
+ case 0:
+ SMC_RET4(handle, status, 0, 0, 0);
+ break;
+ default:
+ SMC_RET1(handle, INTEL_SIP_SMC_STATUS_ERROR);
+ }
+ break;
+
+ default:
+ return socfpga_sip_handler(smc_fid, x1, x2, x3, x4,
+ cookie, handle, flags);
+ }
+}
+
+DECLARE_RT_SVC(
+ agilex_sip_svc,
+ OEN_SIP_START,
+ OEN_SIP_END,
+ SMC_TYPE_FAST,
+ NULL,
+ sip_smc_handler
+);
+
+DECLARE_RT_SVC(
+ agilex_sip_svc_std,
+ OEN_SIP_START,
+ OEN_SIP_END,
+ SMC_TYPE_YIELD,
+ NULL,
+ sip_smc_handler
+);
--- /dev/null
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ * Copyright (c) 2019, Intel Corporation. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch_helpers.h>
+#include <assert.h>
+#include <common/debug.h>
+#include <common/tbbr/tbbr_img_def.h>
+#include <drivers/io/io_block.h>
+#include <drivers/io/io_driver.h>
+#include <drivers/io/io_fip.h>
+#include <drivers/io/io_memmap.h>
+#include <drivers/io/io_storage.h>
+#include <drivers/mmc.h>
+#include <drivers/partition/partition.h>
+#include <lib/mmio.h>
+#include <tools_share/firmware_image_package.h>
+
+#include "agilex_private.h"
+
+#define PLAT_FIP_BASE (0)
+#define PLAT_FIP_MAX_SIZE (0x1000000)
+#define PLAT_MMC_DATA_BASE (0xffe3c000)
+#define PLAT_MMC_DATA_SIZE (0x2000)
+#define PLAT_QSPI_DATA_BASE (0x3C00000)
+#define PLAT_QSPI_DATA_SIZE (0x1000000)
+
+
+static const io_dev_connector_t *fip_dev_con;
+static const io_dev_connector_t *boot_dev_con;
+
+static uintptr_t fip_dev_handle;
+static uintptr_t boot_dev_handle;
+
+static const io_uuid_spec_t bl2_uuid_spec = {
+ .uuid = UUID_TRUSTED_BOOT_FIRMWARE_BL2,
+};
+
+static const io_uuid_spec_t bl31_uuid_spec = {
+ .uuid = UUID_EL3_RUNTIME_FIRMWARE_BL31,
+};
+
+static const io_uuid_spec_t bl33_uuid_spec = {
+ .uuid = UUID_NON_TRUSTED_FIRMWARE_BL33,
+};
+
+uintptr_t a2_lba_offset;
+const char a2[] = {0xa2, 0x0};
+
+static const io_block_spec_t gpt_block_spec = {
+ .offset = 0,
+ .length = MMC_BLOCK_SIZE
+};
+
+static int check_fip(const uintptr_t spec);
+static int check_dev(const uintptr_t spec);
+
+static io_block_dev_spec_t boot_dev_spec;
+static int (*register_io_dev)(const io_dev_connector_t **);
+
+static io_block_spec_t fip_spec = {
+ .offset = PLAT_FIP_BASE,
+ .length = PLAT_FIP_MAX_SIZE,
+};
+
+struct plat_io_policy {
+ uintptr_t *dev_handle;
+ uintptr_t image_spec;
+ int (*check)(const uintptr_t spec);
+};
+
+static const struct plat_io_policy policies[] = {
+ [FIP_IMAGE_ID] = {
+ &boot_dev_handle,
+ (uintptr_t)&fip_spec,
+ check_dev
+ },
+ [BL2_IMAGE_ID] = {
+ &fip_dev_handle,
+ (uintptr_t)&bl2_uuid_spec,
+ check_fip
+ },
+ [BL31_IMAGE_ID] = {
+ &fip_dev_handle,
+ (uintptr_t)&bl31_uuid_spec,
+ check_fip
+ },
+ [BL33_IMAGE_ID] = {
+ &fip_dev_handle,
+ (uintptr_t) &bl33_uuid_spec,
+ check_fip
+ },
+ [GPT_IMAGE_ID] = {
+ &boot_dev_handle,
+ (uintptr_t) &gpt_block_spec,
+ check_dev
+ },
+};
+
+static int check_dev(const uintptr_t spec)
+{
+ int result;
+ uintptr_t local_handle;
+
+ result = io_dev_init(boot_dev_handle, (uintptr_t)NULL);
+ if (result == 0) {
+ result = io_open(boot_dev_handle, spec, &local_handle);
+ if (result == 0)
+ io_close(local_handle);
+ }
+ return result;
+}
+
+static int check_fip(const uintptr_t spec)
+{
+ int result;
+ uintptr_t local_image_handle;
+
+ result = io_dev_init(fip_dev_handle, (uintptr_t)FIP_IMAGE_ID);
+ if (result == 0) {
+ result = io_open(fip_dev_handle, spec, &local_image_handle);
+ if (result == 0)
+ io_close(local_image_handle);
+ }
+ return result;
+}
+
+void socfpga_io_setup(int boot_source)
+{
+ int result;
+
+ switch (boot_source) {
+ case BOOT_SOURCE_SDMMC:
+ register_io_dev = ®ister_io_dev_block;
+ boot_dev_spec.buffer.offset = PLAT_MMC_DATA_BASE;
+ boot_dev_spec.buffer.length = MMC_BLOCK_SIZE;
+ boot_dev_spec.ops.read = mmc_read_blocks;
+ boot_dev_spec.ops.write = mmc_write_blocks;
+ boot_dev_spec.block_size = MMC_BLOCK_SIZE;
+ break;
+
+ case BOOT_SOURCE_QSPI:
+ register_io_dev = ®ister_io_dev_memmap;
+ fip_spec.offset = fip_spec.offset + PLAT_QSPI_DATA_BASE;
+ break;
+
+ default:
+ ERROR("Unsupported boot source\n");
+ panic();
+ break;
+ }
+
+ result = (*register_io_dev)(&boot_dev_con);
+ assert(result == 0);
+
+ result = register_io_dev_fip(&fip_dev_con);
+ assert(result == 0);
+
+ result = io_dev_open(boot_dev_con, (uintptr_t)&boot_dev_spec,
+ &boot_dev_handle);
+ assert(result == 0);
+
+ result = io_dev_open(fip_dev_con, (uintptr_t)NULL, &fip_dev_handle);
+ assert(result == 0);
+
+ if (boot_source == BOOT_SOURCE_SDMMC) {
+ partition_init(GPT_IMAGE_ID);
+ fip_spec.offset = get_partition_entry(a2)->start;
+ }
+
+ (void)result;
+}
+
+int plat_get_image_source(unsigned int image_id, uintptr_t *dev_handle,
+ uintptr_t *image_spec)
+{
+ int result;
+ const struct plat_io_policy *policy;
+
+ assert(image_id < ARRAY_SIZE(policies));
+
+ policy = &policies[image_id];
+ result = policy->check(policy->image_spec);
+ assert(result == 0);
+
+ *image_spec = policy->image_spec;
+ *dev_handle = *(policy->dev_handle);
+
+ return result;
+}
--- /dev/null
+/*
+ * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <platform_def.h>
+#include <lib/psci/psci.h>
+
+static const unsigned char plat_power_domain_tree_desc[] = {1, 4};
+
+/*******************************************************************************
+ * This function returns the default topology tree information.
+ ******************************************************************************/
+const unsigned char *plat_get_power_domain_tree_desc(void)
+{
+ return plat_power_domain_tree_desc;
+}
+
+/*******************************************************************************
+ * This function implements a part of the critical interface between the psci
+ * generic layer and the platform that allows the former to query the platform
+ * to convert an MPIDR to a unique linear index. An error code (-1) is returned
+ * in case the MPIDR is invalid.
+ ******************************************************************************/
+int plat_core_pos_by_mpidr(u_register_t mpidr)
+{
+ unsigned int cluster_id, cpu_id;
+
+ mpidr &= MPIDR_AFFINITY_MASK;
+
+ if (mpidr & ~(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK))
+ return -1;
+
+ cluster_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
+ cpu_id = (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK;
+
+ if (cluster_id >= PLATFORM_CLUSTER_COUNT)
+ return -1;
+
+ /*
+ * Validate cpu_id by checking whether it represents a CPU in
+ * one of the two clusters present on the platform.
+ */
+ if (cpu_id >= PLATFORM_MAX_CPUS_PER_CLUSTER)
+ return -1;
+
+ return (cpu_id + (cluster_id * 4));
+}
+