DEFINE_SYSREG_RW_FUNCS(vpidr_el2)
DEFINE_SYSREG_RW_FUNCS(vmpidr_el2)
+DEFINE_SYSREG_RW_FUNCS(cntp_ctl_el0)
DEFINE_SYSREG_READ_FUNC(isr_el1)
--- /dev/null
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <asm_macros.S>
+#include <uart8250.h>
+
+ .globl console_core_init
+ .globl console_core_putc
+ .globl console_core_getc
+
+ /* -----------------------------------------------
+ * int console_core_init(unsigned long base_addr,
+ * unsigned int uart_clk, unsigned int baud_rate)
+ * Function to initialize the console without a
+ * C Runtime to print debug information. This
+ * function will be accessed by console_init and
+ * crash reporting.
+ * In: x0 - console base address
+ * w1 - Uart clock in Hz
+ * w2 - Baud rate
+ * Out: return 1 on success else 0 on error
+ * Clobber list : x1, x2, x3
+ * -----------------------------------------------
+ */
+func console_core_init
+ /* Check the input base address */
+ cbz x0, core_init_fail
+ /* Check baud rate and uart clock for sanity */
+ cbz w1, core_init_fail
+ cbz w2, core_init_fail
+
+ /* Disable interrupt */
+ str wzr, [x0, #UART_IER]
+
+ /* Force DTR and RTS to high */
+ mov w3, #(UART_MCR_DTR | UART_MCR_RTS)
+ str w3, [x0, #UART_MCR]
+
+ /* Check high speed */
+ movz w3, #:abs_g1:115200
+ movk w3, #:abs_g0_nc:115200
+ cmp w2, w3
+ b.hi 1f
+
+ /* Non high speed */
+ lsl w2, w2, #4
+ mov w3, wzr
+ b 2f
+
+ /* High speed */
+1: lsl w2, w2, #2
+ mov w3, #2
+
+ /* Set high speed UART register */
+2: str w3, [x0, #UART_HIGHSPEED]
+
+ /* Calculate divisor */
+ udiv w3, w1, w2 /* divisor = uartclk / (quot * baudrate) */
+ msub w1, w3, w2, w1 /* remainder = uartclk % (quot * baudrate) */
+ lsr w2, w2, #1
+ cmp w1, w2
+ cinc w3, w3, hs
+
+ /* Set line configuration, access divisor latches */
+ mov w1, #(UART_LCR_DLAB | UART_LCR_WLS_8)
+ str w1, [x0, #UART_LCR]
+
+ /* Set the divisor */
+ and w1, w3, #0xff
+ str w1, [x0, #UART_DLL]
+ lsr w1, w3, #8
+ and w1, w1, #0xff
+ str w1, [x0, #UART_DLH]
+
+ /* Hide the divisor latches */
+ mov w1, #UART_LCR_WLS_8
+ str w1, [x0, #UART_LCR]
+
+ /* Enable FIFOs, and clear receive and transmit */
+ mov w1, #(UART_FCR_FIFO_EN | UART_FCR_CLEAR_RCVR | \
+ UART_FCR_CLEAR_XMIT)
+ str w1, [x0, #UART_FCR]
+
+ mov w0, #1
+ ret
+core_init_fail:
+ mov w0, wzr
+ ret
+endfunc console_core_init
+
+ /* --------------------------------------------------------
+ * int console_core_putc(int c, unsigned long base_addr)
+ * Function to output a character over the console. It
+ * returns the character printed on success or -1 on error.
+ * In : w0 - character to be printed
+ * x1 - console base address
+ * Out : return -1 on error else return character.
+ * Clobber list : x2
+ * --------------------------------------------------------
+ */
+func console_core_putc
+ /* Check the input parameter */
+ cbz x1, putc_error
+ /* Prepend '\r' to '\n' */
+ cmp w0, #0xA
+ b.ne 2f
+
+ /* Check if the transmit FIFO is full */
+1: ldr w2, [x1, #UART_LSR]
+ and w2, w2, #UART_LSR_THRE
+ cbz w2, 1b
+ mov w2, #0xD
+ str w2, [x1, #UART_THR]
+
+ /* Check if the transmit FIFO is full */
+2: ldr w2, [x1, #UART_LSR]
+ and w2, w2, #UART_LSR_THRE
+ cbz w2, 2b
+ str w0, [x1, #UART_THR]
+ ret
+putc_error:
+ mov w0, #-1
+ ret
+endfunc console_core_putc
+
+ /* ---------------------------------------------
+ * int console_core_getc(unsigned long base_addr)
+ * Function to get a character from the console.
+ * It returns the character grabbed on success
+ * or -1 on error.
+ * In : x0 - console base address
+ * Clobber list : x0, x1
+ * ---------------------------------------------
+ */
+func console_core_getc
+ cbz x0, getc_error
+
+ /* Check if the receive FIFO is empty */
+1: ldr w1, [x0, #UART_LSR]
+ tbz w1, #UART_LSR_DR, 1b
+ ldr w0, [x0, #UART_RBR]
+ ret
+getc_error:
+ mov w0, #-1
+ ret
+endfunc console_core_getc
--- /dev/null
+/*
+ * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __UART8250_H__
+#define __UART8250_H__
+
+/* UART register */
+#define UART_RBR 0x00 /* Receive buffer register */
+#define UART_DLL 0x00 /* Divisor latch lsb */
+#define UART_THR 0x00 /* Transmit holding register */
+#define UART_DLH 0x04 /* Divisor latch msb */
+#define UART_IER 0x04 /* Interrupt enable register */
+#define UART_FCR 0x08 /* FIFO control register */
+#define UART_LCR 0x0c /* Line control register */
+#define UART_MCR 0x10 /* Modem control register */
+#define UART_LSR 0x14 /* Line status register */
+#define UART_HIGHSPEED 0x24 /* High speed UART */
+
+/* FCR */
+#define UART_FCR_FIFO_EN 0x01 /* enable FIFO */
+#define UART_FCR_CLEAR_RCVR 0x02 /* clear the RCVR FIFO */
+#define UART_FCR_CLEAR_XMIT 0x04 /* clear the XMIT FIFO */
+
+/* LCR */
+#define UART_LCR_WLS_8 0x03 /* 8 bit character length */
+#define UART_LCR_DLAB 0x80 /* divisor latch access bit */
+
+/* MCR */
+#define UART_MCR_DTR 0x01
+#define UART_MCR_RTS 0x02
+
+/* LSR */
+#define UART_LSR_DR 0x01 /* Data ready */
+#define UART_LSR_THRE 0x20 /* Xmit holding register empty */
+
+#endif /* __UART8250_H__ */
--- /dev/null
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <arch.h>
+#include <asm_macros.S>
+#include <platform_def.h>
+
+ .globl plat_secondary_cold_boot_setup
+ .globl plat_report_exception
+ .globl platform_is_primary_cpu
+ .globl plat_crash_console_init
+ .globl plat_crash_console_putc
+ .globl platform_mem_init
+
+
+ .macro crash_ram_log
+ /*
+ * Check teearg->atf_log_buf_size.
+ * Exit if atf_log_buf_size equals 0
+ */
+ adr x2, ptr_atf_crash_flag
+ ldr x2, [x2]
+ /* exit if ptr_atf_crash_flag equals NULL */
+ cbz x2, exit_putc
+
+ /*
+ * set atf crash magic number
+ */
+1:
+ adr x2, ptr_atf_crash_flag
+ ldr x2, [x2]
+ mov_imm x1, 0xdead1abf
+ /* p_atf_log_ctrl->atf_crash_flag = 0xdead1abf */
+ str w1, [x2]
+ /* can't use w3 return addr, w4, start of buffer addr */
+ ldr w2, [x2]
+ cmp w2, w1
+ b.ne 1b
+
+ /*
+ * get cpu id
+ */
+ mrs x1, mpidr_el1
+ /* refer to platform_get_core_pos */
+ and x2, x1, #MPIDR_CPU_MASK
+ and x1, x1, #MPIDR_CLUSTER_MASK
+ /* x1 = cpu id (cpu id = aff0 + aff1*4 ) */
+ add x1, x2, x1, LSR #6
+
+ adr x2, ptr_atf_except_write_pos_per_cpu
+ ldr x2, [x2]
+ /*
+ * plus (cpu_id * 8)-->
+ * &p_atf_log_ctrl->atf_except_write_pos_per_cpu[cpu_id]
+ * x2 = &p_atf_log_ctrl->atf_except_write_pos_per_cpu[cpu_id];
+ */
+ add x2, x2, x1, LSL # 3
+ /* log write */
+ /* w1 = p_atf_log_ctrl->atf_except_write_pos_per_cpu[cpu_id] */
+ ldr x1, [x2]
+ /* *x1 = w0-->
+ * *(p_atf_log_ctrl->atf_except_write_pos_per_cpu[cpu_id]) = c)
+ */
+ strb w0, [x1]
+ /* w1++ */
+ add x1, x1, #1
+ /* p_atf_log_ctrl->atf_except_write_pos_per_cpu[cpu_id] = w1 */
+ str x1, [x2]
+exit_putc:
+ .endm
+
+ /* -----------------------------------------------------
+ * void plat_secondary_cold_boot_setup (void);
+ *
+ * This function performs any platform specific actions
+ * needed for a secondary cpu after a cold reset e.g
+ * mark the cpu's presence, mechanism to place it in a
+ * holding pen etc.
+ * -----------------------------------------------------
+ */
+func plat_secondary_cold_boot_setup
+ /* Do not do cold boot for secondary CPU */
+cb_panic:
+ b cb_panic
+endfunc plat_secondary_cold_boot_setup
+
+func platform_is_primary_cpu
+ and x0, x0, #(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)
+ cmp x0, #PLAT_PRIMARY_CPU
+ cset x0, eq
+ ret
+endfunc platform_is_primary_cpu
+
+ /* ---------------------------------------------
+ * int plat_crash_console_init(void)
+ * Function to initialize the crash console
+ * without a C Runtime to print crash report.
+ * Clobber list : x0, x1, x2
+ * ---------------------------------------------
+ */
+func plat_crash_console_init
+ mov_imm x0, UART0_BASE
+ mov_imm x1, UART_CLOCK
+ mov_imm x2, UART_BAUDRATE
+ b console_init
+ ret
+endfunc plat_crash_console_init
+
+ /* ---------------------------------------------
+ * int plat_crash_console_putc(void)
+ * Function to print a character on the crash
+ * console without a C Runtime.
+ * Clobber list : x1, x2
+ * ---------------------------------------------
+ */
+func plat_crash_console_putc
+ mov_imm x1, UART0_BASE
+ b console_core_putc
+ ret
+endfunc plat_crash_console_putc
+
+ /* --------------------------------------------------------
+ * void platform_mem_init (void);
+ *
+ * Any memory init, relocation to be done before the
+ * platform boots. Called very early in the boot process.
+ * --------------------------------------------------------
+ */
+func platform_mem_init
+ ret
+endfunc platform_mem_init
+
--- /dev/null
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <platform_def.h>
+
+OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
+OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
+ENTRY(bl31_entrypoint)
+
+
+MEMORY {
+ RAM (rwx): ORIGIN = BL31_BASE, LENGTH = BL31_TZRAM_SIZE
+ RAM2 (rwx): ORIGIN = TZRAM2_BASE, LENGTH = TZRAM2_SIZE
+}
+
+
+SECTIONS
+{
+ . = BL31_BASE;
+
+ ASSERT(. == ALIGN(2048),
+ "vector base is not aligned on a 2K boundary.")
+
+ __RO_START__ = .;
+ vector . : {
+ *(.vectors)
+ } >RAM
+
+ ASSERT(. == ALIGN(4096),
+ "BL31_BASE address is not aligned on a page boundary.")
+
+ ro . : {
+ *bl31_entrypoint.o(.text*)
+ *(.text*)
+ *(.rodata*)
+
+ /* Ensure 8-byte alignment for descriptors and ensure inclusion */
+ . = ALIGN(8);
+ __RT_SVC_DESCS_START__ = .;
+ KEEP(*(rt_svc_descs))
+ __RT_SVC_DESCS_END__ = .;
+
+ /*
+ * Ensure 8-byte alignment for cpu_ops so that its fields are also
+ * aligned. Also ensure cpu_ops inclusion.
+ */
+ . = ALIGN(8);
+ __CPU_OPS_START__ = .;
+ KEEP(*(cpu_ops))
+ __CPU_OPS_END__ = .;
+
+ __RO_END_UNALIGNED__ = .;
+ /*
+ * Memory page(s) mapped to this section will be marked as read-only,
+ * executable. No RW data from the next section must creep in.
+ * Ensure the rest of the current memory page is unused.
+ */
+ . = NEXT(4096);
+ __RO_END__ = .;
+ } >RAM
+
+ ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
+ "cpu_ops not defined for this platform.")
+
+ /*
+ * Define a linker symbol to mark start of the RW memory area for this
+ * image.
+ */
+ __RW_START__ = . ;
+
+ .data . : {
+ __DATA_START__ = .;
+ *(.data*)
+ __DATA_END__ = .;
+ } >RAM
+
+#ifdef BL31_PROGBITS_LIMIT
+ ASSERT(. <= BL31_PROGBITS_LIMIT, "BL3-1 progbits has exceeded its limit.")
+#endif
+
+ stacks (NOLOAD) : {
+ __STACKS_START__ = .;
+ *(tzfw_normal_stacks)
+ __STACKS_END__ = .;
+ } >RAM
+
+ /*
+ * The .bss section gets initialised to 0 at runtime.
+ * Its base address must be 16-byte aligned.
+ */
+ .bss (NOLOAD) : ALIGN(16) {
+ __BSS_START__ = .;
+ *(.bss*)
+ *(COMMON)
+#if !USE_COHERENT_MEM
+ /*
+ * Bakery locks are stored in normal .bss memory
+ *
+ * Each lock's data is spread across multiple cache lines, one per CPU,
+ * but multiple locks can share the same cache line.
+ * The compiler will allocate enough memory for one CPU's bakery locks,
+ * the remaining cache lines are allocated by the linker script
+ */
+ . = ALIGN(CACHE_WRITEBACK_GRANULE);
+ __BAKERY_LOCK_START__ = .;
+ *(bakery_lock)
+ . = ALIGN(CACHE_WRITEBACK_GRANULE);
+ __PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(. - __BAKERY_LOCK_START__);
+ . = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1));
+ __BAKERY_LOCK_END__ = .;
+#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
+ ASSERT(__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE,
+ "PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements");
+#endif
+#endif
+ __BSS_END__ = .;
+ __RW_END__ = .;
+ } >RAM
+
+ ASSERT(. <= BL31_LIMIT, "BL3-1 image has exceeded its limit.")
+
+ /*
+ * The xlat_table section is for full, aligned page tables (4K).
+ * Removing them from .bss avoids forcing 4K alignment on
+ * the .bss section and eliminates the unecessary zero init
+ */
+ xlat_table (NOLOAD) : {
+ *(xlat_table)
+ } >RAM2
+
+#if USE_COHERENT_MEM
+ /*
+ * The base address of the coherent memory section must be page-aligned (4K)
+ * to guarantee that the coherent data are stored on their own pages and
+ * are not mixed with normal data. This is required to set up the correct
+ * memory attributes for the coherent data page tables.
+ */
+ coherent_ram (NOLOAD) : ALIGN(4096) {
+ __COHERENT_RAM_START__ = .;
+ /*
+ * Bakery locks are stored in coherent memory
+ *
+ * Each lock's data is contiguous and fully allocated by the compiler
+ */
+ *(bakery_lock)
+ *(tzfw_coherent_mem)
+ __COHERENT_RAM_END_UNALIGNED__ = .;
+ /*
+ * Memory page(s) mapped to this section will be marked
+ * as device memory. No other unexpected data must creep in.
+ * Ensure the rest of the current memory page is unused.
+ */
+ . = NEXT(4096);
+ __COHERENT_RAM_END__ = .;
+ } >RAM2
+#endif
+
+ /*
+ * Define a linker symbol to mark end of the RW memory area for this
+ * image.
+ */
+ __BL31_END__ = .;
+
+ __BSS_SIZE__ = SIZEOF(.bss);
+#if USE_COHERENT_MEM
+ __COHERENT_RAM_UNALIGNED_SIZE__ =
+ __COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
+#endif
+
+ ASSERT(. <= TZRAM2_LIMIT, "TZRAM2 image has exceeded its limit.")
+}
--- /dev/null
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <arm_gic.h>
+#include <assert.h>
+#include <arch_helpers.h>
+#include <bl_common.h>
+#include <cci.h>
+#include <console.h>
+#include <context_mgmt.h>
+#include <debug.h>
+#include <generic_delay_timer.h>
+#include <mcucfg.h>
+#include <mmio.h>
+#include <mtk_sip_svc.h>
+#include <mtk_plat_common.h>
+#include <mt_cpuxgpt.h>
+#include <platform.h>
+#include <plat_private.h>
+#include <string.h>
+#include <xlat_tables.h>
+/*******************************************************************************
+ * Declarations of linker defined symbols which will help us find the layout
+ * of trusted SRAM
+ ******************************************************************************/
+unsigned long __RO_START__;
+unsigned long __RO_END__;
+
+unsigned long __COHERENT_RAM_START__;
+unsigned long __COHERENT_RAM_END__;
+
+/*
+ * The next 2 constants identify the extents of the code & RO data region.
+ * These addresses are used by the MMU setup code and therefore they must be
+ * page-aligned. It is the responsibility of the linker script to ensure that
+ * __RO_START__ and __RO_END__ linker symbols refer to page-aligned addresses.
+ */
+#define BL31_RO_BASE (unsigned long)(&__RO_START__)
+#define BL31_RO_LIMIT (unsigned long)(&__RO_END__)
+
+/*
+ * The next 2 constants identify the extents of the coherent memory region.
+ * These addresses are used by the MMU setup code and therefore they must be
+ * page-aligned. It is the responsibility of the linker script to ensure that
+ * __COHERENT_RAM_START__ and __COHERENT_RAM_END__ linker symbols
+ * refer to page-aligned addresses.
+ */
+#define BL31_COHERENT_RAM_BASE (unsigned long)(&__COHERENT_RAM_START__)
+#define BL31_COHERENT_RAM_LIMIT (unsigned long)(&__COHERENT_RAM_END__)
+
+/*
+ * Placeholder variables for copying the arguments that have been passed to
+ * BL3-1 from BL2.
+ */
+static entry_point_info_t bl32_image_ep_info;
+static entry_point_info_t bl33_image_ep_info;
+
+static const int cci_map[] = {
+ PLAT_MT_CCI_CLUSTER0_SL_IFACE_IX,
+ PLAT_MT_CCI_CLUSTER1_SL_IFACE_IX
+};
+
+static uint32_t cci_map_length = ARRAY_SIZE(cci_map);
+
+/* Table of regions to map using the MMU. */
+static const mmap_region_t plat_mmap[] = {
+ /* for TF text, RO, RW */
+ MAP_REGION_FLAT(MTK_DEV_RNG0_BASE, MTK_DEV_RNG0_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(MTK_DEV_RNG1_BASE, MTK_DEV_RNG1_SIZE,
+ MT_DEVICE | MT_RW | MT_SECURE),
+ MAP_REGION_FLAT(RAM_CONSOLE_BASE & ~(PAGE_SIZE_MASK), RAM_CONSOLE_SIZE,
+ MT_DEVICE | MT_RW | MT_NS),
+ { 0 }
+
+};
+
+/*******************************************************************************
+ * Macro generating the code for the function setting up the pagetables as per
+ * the platform memory map & initialize the mmu, for the given exception level
+ ******************************************************************************/
+#define DEFINE_CONFIGURE_MMU_EL(_el) \
+ void plat_configure_mmu_el ## _el(unsigned long total_base, \
+ unsigned long total_size, \
+ unsigned long ro_start, \
+ unsigned long ro_limit, \
+ unsigned long coh_start, \
+ unsigned long coh_limit) \
+ { \
+ mmap_add_region(total_base, total_base, \
+ total_size, \
+ MT_MEMORY | MT_RW | MT_SECURE); \
+ mmap_add_region(ro_start, ro_start, \
+ ro_limit - ro_start, \
+ MT_MEMORY | MT_RO | MT_SECURE); \
+ mmap_add_region(coh_start, coh_start, \
+ coh_limit - coh_start, \
+ MT_DEVICE | MT_RW | MT_SECURE); \
+ mmap_add(plat_mmap); \
+ init_xlat_tables(); \
+ \
+ enable_mmu_el ## _el(0); \
+ }
+
+/* Define EL3 variants of the function initialising the MMU */
+DEFINE_CONFIGURE_MMU_EL(3)
+
+unsigned int plat_get_syscnt_freq2(void)
+{
+ return SYS_COUNTER_FREQ_IN_TICKS;
+}
+
+void plat_cci_init(void)
+{
+ /* Initialize CCI driver */
+ cci_init(PLAT_MT_CCI_BASE, cci_map, cci_map_length);
+}
+
+void plat_cci_enable(void)
+{
+ /*
+ * Enable CCI coherency for this cluster.
+ * No need for locks as no other cpu is active at the moment.
+ */
+ cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr()));
+}
+
+void plat_cci_disable(void)
+{
+ cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr()));
+}
+
+
+static void platform_setup_cpu(void)
+{
+ /* setup big cores */
+ mmio_write_32((uintptr_t)&mt6795_mcucfg->mp1_config_res,
+ MP1_DIS_RGU0_WAIT_PD_CPUS_L1_ACK |
+ MP1_DIS_RGU1_WAIT_PD_CPUS_L1_ACK |
+ MP1_DIS_RGU2_WAIT_PD_CPUS_L1_ACK |
+ MP1_DIS_RGU3_WAIT_PD_CPUS_L1_ACK |
+ MP1_DIS_RGU_NOCPU_WAIT_PD_CPUS_L1_ACK);
+ mmio_setbits_32((uintptr_t)&mt6795_mcucfg->mp1_miscdbg, MP1_AINACTS);
+ mmio_setbits_32((uintptr_t)&mt6795_mcucfg->mp1_clkenm_div,
+ MP1_SW_CG_GEN);
+ mmio_clrbits_32((uintptr_t)&mt6795_mcucfg->mp1_rst_ctl,
+ MP1_L2RSTDISABLE);
+
+ /* set big cores arm64 boot mode */
+ mmio_setbits_32((uintptr_t)&mt6795_mcucfg->mp1_cpucfg,
+ MP1_CPUCFG_64BIT);
+
+ /* set LITTLE cores arm64 boot mode */
+ mmio_setbits_32((uintptr_t)&mt6795_mcucfg->mp0_rv_addr[0].rv_addr_hw,
+ MP0_CPUCFG_64BIT);
+}
+
+/*******************************************************************************
+ * Return a pointer to the 'entry_point_info' structure of the next image for
+ * the security state specified. BL33 corresponds to the non-secure image type
+ * while BL32 corresponds to the secure image type. A NULL pointer is returned
+ * if the image does not exist.
+ ******************************************************************************/
+entry_point_info_t *bl31_plat_get_next_image_ep_info(uint32_t type)
+{
+ entry_point_info_t *next_image_info;
+
+ next_image_info = (type == NON_SECURE) ?
+ &bl33_image_ep_info : &bl32_image_ep_info;
+
+ /* None of the images on this platform can have 0x0 as the entrypoint */
+ if (next_image_info->pc)
+ return next_image_info;
+ else
+ return NULL;
+}
+
+/*******************************************************************************
+ * Perform any BL3-1 early platform setup. Here is an opportunity to copy
+ * parameters passed by the calling EL (S-EL1 in BL2 & S-EL3 in BL1) before they
+ * are lost (potentially). This needs to be done before the MMU is initialized
+ * so that the memory layout can be used while creating page tables.
+ * BL2 has flushed this information to memory, so we are guaranteed to pick up
+ * good data.
+ ******************************************************************************/
+void bl31_early_platform_setup(bl31_params_t *from_bl2,
+ void *plat_params_from_bl2)
+{
+ struct mtk_bl_param_t *pmtk_bl_param =
+ (struct mtk_bl_param_t *)from_bl2;
+ struct atf_arg_t *teearg;
+ unsigned long long normal_base;
+ unsigned long long atf_base;
+
+ assert(from_bl2 != NULL);
+ /*
+ * Mediatek preloader(i.e, BL2) is in 32 bit state, high 32bits
+ * of 64 bit GP registers are UNKNOWN if CPU warm reset from 32 bit
+ * to 64 bit state. So we need to clear high 32bit,
+ * which may be random value.
+ */
+ pmtk_bl_param =
+ (struct mtk_bl_param_t *)((uint64_t)pmtk_bl_param & 0x00000000ffffffff);
+ plat_params_from_bl2 =
+ (void *)((uint64_t)plat_params_from_bl2 & 0x00000000ffffffff);
+
+ teearg = (struct atf_arg_t *)pmtk_bl_param->tee_info_addr;
+
+ console_init(teearg->atf_log_port, UART_CLOCK, UART_BAUDRATE);
+ memcpy((void *)>eearg, (void *)teearg, sizeof(struct atf_arg_t));
+
+ normal_base = 0;
+ /* in ATF boot time, timer for cntpct_el0 is not initialized
+ * so it will not count now.
+ */
+ atf_base = read_cntpct_el0();
+ sched_clock_init(normal_base, atf_base);
+
+ VERBOSE("bl31_setup\n");
+
+ /* Populate entry point information for BL3-2 and BL3-3 */
+ SET_PARAM_HEAD(&bl32_image_ep_info,
+ PARAM_EP,
+ VERSION_1,
+ 0);
+ SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE);
+ bl32_image_ep_info.pc = BL32_BASE;
+
+ SET_PARAM_HEAD(&bl33_image_ep_info,
+ PARAM_EP,
+ VERSION_1,
+ 0);
+ /*
+ * Tell BL3-1 where the non-trusted software image
+ * is located and the entry state information
+ */
+ /* BL33_START_ADDRESS */
+ bl33_image_ep_info.pc = pmtk_bl_param->bl33_start_addr;
+ bl33_image_ep_info.spsr = plat_get_spsr_for_bl33_entry();
+ bl33_image_ep_info.args.arg4 = pmtk_bl_param->bootarg_loc;
+ bl33_image_ep_info.args.arg5 = pmtk_bl_param->bootarg_size;
+ SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
+}
+/*******************************************************************************
+ * Perform any BL3-1 platform setup code
+ ******************************************************************************/
+
+void bl31_platform_setup(void)
+{
+ platform_setup_cpu();
+
+ generic_delay_timer_init();
+
+ plat_mt_gic_driver_init();
+ /* Initialize the gic cpu and distributor interfaces */
+ plat_mt_gic_init();
+
+ /* Topologies are best known to the platform. */
+ mt_setup_topology();
+}
+/*******************************************************************************
+ * Perform the very early platform specific architectural setup here. At the
+ * moment this is only intializes the mmu in a quick and dirty way.
+ * Init MTK propiartary log buffer control field.
+ ******************************************************************************/
+void bl31_plat_arch_setup(void)
+{
+ /* Enable non-secure access to CCI-400 registers */
+ mmio_write_32(CCI400_BASE + CCI_SEC_ACCESS_OFFSET, 0x1);
+
+ plat_cci_init();
+ plat_cci_enable();
+
+ if (gteearg.atf_log_buf_size != 0) {
+ INFO("mmap atf buffer : 0x%x, 0x%x\n\r",
+ gteearg.atf_log_buf_start,
+ gteearg.atf_log_buf_size);
+
+ mmap_add_region(
+ gteearg.atf_log_buf_start &
+ ~(PAGE_SIZE_2MB_MASK),
+ gteearg.atf_log_buf_start &
+ ~(PAGE_SIZE_2MB_MASK),
+ PAGE_SIZE_2MB,
+ MT_DEVICE | MT_RW | MT_NS);
+
+ INFO("mmap atf buffer (force 2MB aligned):0x%x, 0x%x\n",
+ (gteearg.atf_log_buf_start & ~(PAGE_SIZE_2MB_MASK)),
+ PAGE_SIZE_2MB);
+ }
+ /*
+ * add TZRAM_BASE to memory map
+ * then set RO and COHERENT to different attribute
+ */
+ plat_configure_mmu_el3(
+ (TZRAM_BASE & ~(PAGE_SIZE_MASK)),
+ (TZRAM_SIZE & ~(PAGE_SIZE_MASK)),
+ (BL31_RO_BASE & ~(PAGE_SIZE_MASK)),
+ BL31_RO_LIMIT,
+ BL31_COHERENT_RAM_BASE,
+ BL31_COHERENT_RAM_LIMIT);
+ /* Initialize for ATF log buffer */
+ if (gteearg.atf_log_buf_size != 0) {
+ gteearg.atf_aee_debug_buf_size = ATF_AEE_BUFFER_SIZE;
+ gteearg.atf_aee_debug_buf_start =
+ gteearg.atf_log_buf_start +
+ gteearg.atf_log_buf_size - ATF_AEE_BUFFER_SIZE;
+ INFO("ATF log service is registered (0x%x, aee:0x%x)\n",
+ gteearg.atf_log_buf_start,
+ gteearg.atf_aee_debug_buf_start);
+ } else{
+ gteearg.atf_aee_debug_buf_size = 0;
+ gteearg.atf_aee_debug_buf_start = 0;
+ }
+
+ /* Platform code before bl31_main */
+ /* compatible to the earlier chipset */
+
+ /* Show to ATF log buffer & UART */
+ INFO("BL3-1: %s\n", version_string);
+ INFO("BL3-1: %s\n", build_message);
+
+}
+#if 0
+/* MTK Define */
+#define ACTLR_CPUECTLR_BIT (1 << 1)
+
+void enable_ns_access_to_cpuectlr(void)
+{
+ unsigned int next_actlr;
+
+
+ /* ACTLR_EL1 do not implement CUPECTLR */
+ next_actlr = read_actlr_el2();
+ next_actlr |= ACTLR_CPUECTLR_BIT;
+ write_actlr_el2(next_actlr);
+
+ next_actlr = read_actlr_el3();
+ next_actlr |= ACTLR_CPUECTLR_BIT;
+ write_actlr_el3(next_actlr);
+}
+#endif
+/*******************************************************************************
+ * This function prepare boot argument for 64 bit kernel entry
+ ******************************************************************************/
+static entry_point_info_t *bl31_plat_get_next_kernel64_ep_info(void)
+{
+ entry_point_info_t *next_image_info;
+ unsigned long el_status;
+ unsigned int mode;
+
+ el_status = 0;
+ mode = 0;
+
+ /* Kernel image is always non-secured */
+ next_image_info = &bl33_image_ep_info;
+
+ /* Figure out what mode we enter the non-secure world in */
+ el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
+ el_status &= ID_AA64PFR0_ELX_MASK;
+
+ if (el_status) {
+ INFO("Kernel_EL2\n");
+ mode = MODE_EL2;
+ } else{
+ INFO("Kernel_EL1\n");
+ mode = MODE_EL1;
+ }
+
+ INFO("Kernel is 64Bit\n");
+ next_image_info->spsr =
+ SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
+ next_image_info->pc = get_kernel_info_pc();
+ next_image_info->args.arg0 = get_kernel_info_r0();
+ next_image_info->args.arg1 = get_kernel_info_r1();
+
+ INFO("pc=0x%lx, r0=0x%lx, r1=0x%lx\n",
+ next_image_info->pc,
+ next_image_info->args.arg0,
+ next_image_info->args.arg1);
+
+
+ SET_SECURITY_STATE(next_image_info->h.attr, NON_SECURE);
+
+ /* None of the images on this platform can have 0x0 as the entrypoint */
+ if (next_image_info->pc)
+ return next_image_info;
+ else
+ return NULL;
+}
+
+/*******************************************************************************
+ * This function prepare boot argument for 32 bit kernel entry
+ ******************************************************************************/
+static entry_point_info_t *bl31_plat_get_next_kernel32_ep_info(void)
+{
+ entry_point_info_t *next_image_info;
+ unsigned int mode;
+
+ mode = 0;
+
+ /* Kernel image is always non-secured */
+ next_image_info = &bl33_image_ep_info;
+
+ /* Figure out what mode we enter the non-secure world in */
+ mode = MODE32_hyp;
+ /*
+ * TODO: Consider the possibility of specifying the SPSR in
+ * the FIP ToC and allowing the platform to have a say as
+ * well.
+ */
+
+ INFO("Kernel is 32Bit\n");
+ next_image_info->spsr =
+ SPSR_MODE32(mode, SPSR_T_ARM, SPSR_E_LITTLE,
+ (DAIF_FIQ_BIT | DAIF_IRQ_BIT | DAIF_ABT_BIT));
+ next_image_info->pc = get_kernel_info_pc();
+ next_image_info->args.arg0 = get_kernel_info_r0();
+ next_image_info->args.arg1 = get_kernel_info_r1();
+ next_image_info->args.arg2 = get_kernel_info_r2();
+
+ INFO("pc=0x%lx, r0=0x%lx, r1=0x%lx, r2=0x%lx\n",
+ next_image_info->pc,
+ next_image_info->args.arg0,
+ next_image_info->args.arg1,
+ next_image_info->args.arg2);
+
+
+ SET_SECURITY_STATE(next_image_info->h.attr, NON_SECURE);
+
+ /* None of the images on this platform can have 0x0 as the entrypoint */
+ if (next_image_info->pc)
+ return next_image_info;
+ else
+ return NULL;
+}
+
+/*******************************************************************************
+ * This function prepare boot argument for kernel entrypoint
+ ******************************************************************************/
+void bl31_prepare_kernel_entry(uint64_t k32_64)
+{
+ entry_point_info_t *next_image_info;
+ uint32_t image_type;
+
+ /* Determine which image to execute next */
+ /* image_type = bl31_get_next_image_type(); */
+ image_type = NON_SECURE;
+
+ /* Program EL3 registers to enable entry into the next EL */
+ if (k32_64 == 0)
+ next_image_info = bl31_plat_get_next_kernel32_ep_info();
+ else
+ next_image_info = bl31_plat_get_next_kernel64_ep_info();
+
+ assert(next_image_info);
+ assert(image_type == GET_SECURITY_STATE(next_image_info->h.attr));
+
+ INFO("BL3-1: Preparing for EL3 exit to %s world, Kernel\n",
+ (image_type == SECURE) ? "secure" : "normal");
+ INFO("BL3-1: Next image address = 0x%llx\n",
+ (unsigned long long) next_image_info->pc);
+ INFO("BL3-1: Next image spsr = 0x%x\n", next_image_info->spsr);
+ cm_init_context(read_mpidr_el1(), next_image_info);
+ cm_prepare_el3_exit(image_type);
+}
--- /dev/null
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <arch_helpers.h>
+#include <mmio.h>
+#include <mt_cpuxgpt.h>
+#include <stdint.h>
+#include <platform.h>
+#include <debug.h>
+#define CPUXGPT_BASE 0x10200000
+#define INDEX_BASE (CPUXGPT_BASE+0x0674)
+#define CTL_BASE (CPUXGPT_BASE+0x0670)
+
+uint64_t normal_time_base;
+uint64_t atf_time_base;
+
+void sched_clock_init(uint64_t normal_base, uint64_t atf_base)
+{
+ normal_time_base = normal_base;
+ atf_time_base = atf_base;
+}
+
+uint64_t sched_clock(void)
+{
+ uint64_t cval;
+
+ cval = (((read_cntpct_el0() - atf_time_base)*1000)/
+ SYS_COUNTER_FREQ_IN_MHZ) + normal_time_base;
+ return cval;
+}
+
+/*
+ * Return: 0 - Trying to disable the CPUXGPT control bit,
+ * and not allowed to disable it.
+ * Return: 1 - reg_addr is not realted to disable the control bit.
+ */
+unsigned char check_cpuxgpt_write_permission(unsigned int reg_addr,
+ unsigned int reg_value)
+{
+ unsigned int idx;
+ unsigned int ctl_val;
+
+ if (reg_addr == CTL_BASE) {
+ idx = mmio_read_32(INDEX_BASE);
+
+ /* idx 0: CPUXGPT system control */
+ if (idx == 0) {
+ ctl_val = mmio_read_32(CTL_BASE);
+ if (ctl_val & 1) {
+ /*
+ * if enable bit already set,
+ * then bit 0 is not allow to set as 0
+ */
+ if (!(reg_value & 1))
+ return 0;
+ }
+ }
+ }
+ return 1;
+}
+
--- /dev/null
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __MT_CPUXGPT_H__
+#define __MT_CPUXGPT_H__
+
+/* REG */
+#define INDEX_CTL_REG 0x000
+#define INDEX_STA_REG 0x004
+#define INDEX_CNT_L_INIT 0x008
+#define INDEX_CNT_H_INIT 0x00C
+
+/* CTL_REG SET */
+#define EN_CPUXGPT 0x01
+#define EN_AHLT_DEBUG 0x02
+#define CLK_DIV1 (0x1 << 8)
+#define CLK_DIV2 (0x2 << 8)
+#define CLK_DIV4 (0x4 << 8)
+#define CLK_DIV_MASK (~(0x7<<8))
+
+void generic_timer_backup(void);
+void sched_clock_init(uint64_t normal_base, uint64_t atf_base);
+uint64_t sched_clock(void);
+
+#endif /* __MT_CPUXGPT_H__ */
--- /dev/null
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __MCUCFG_H__
+#define __MCUCFG_H__
+
+#include <platform_def.h>
+#include <stdint.h>
+
+struct mt6795_mcucfg_regs {
+ uint32_t mp0_ca7l_cache_config;
+ struct {
+ uint32_t mem_delsel0;
+ uint32_t mem_delsel1;
+ } mp0_cpu[4];
+ uint32_t mp0_cache_mem_delsel0;
+ uint32_t mp0_cache_mem_delsel1;
+ uint32_t mp0_axi_config;
+ uint32_t mp0_misc_config[2];
+ struct {
+ uint32_t rv_addr_lw;
+ uint32_t rv_addr_hw;
+ } mp0_rv_addr[4];
+ uint32_t mp0_ca7l_cfg_dis;
+ uint32_t mp0_ca7l_clken_ctrl;
+ uint32_t mp0_ca7l_rst_ctrl;
+ uint32_t mp0_ca7l_misc_config;
+ uint32_t mp0_ca7l_dbg_pwr_ctrl;
+ uint32_t mp0_rw_rsvd0;
+ uint32_t mp0_rw_rsvd1;
+ uint32_t mp0_ro_rsvd;
+ uint32_t reserved0_0[100];
+ uint32_t mp1_cpucfg;
+ uint32_t mp1_miscdbg;
+ uint32_t reserved0_1[13];
+ uint32_t mp1_rst_ctl;
+ uint32_t mp1_clkenm_div;
+ uint32_t reserved0_2[7];
+ uint32_t mp1_config_res;
+ uint32_t reserved0_3[13];
+ struct {
+ uint32_t rv_addr_lw;
+ uint32_t rv_addr_hw;
+ } mp1_rv_addr[2];
+ uint32_t reserved0_4[84];
+ uint32_t mp0_rst_status; /* 0x400 */
+ uint32_t mp0_dbg_ctrl;
+ uint32_t mp0_dbg_flag;
+ uint32_t mp0_ca7l_ir_mon;
+ struct {
+ uint32_t pc_lw;
+ uint32_t pc_hw;
+ uint32_t fp_arch32;
+ uint32_t sp_arch32;
+ uint32_t fp_arch64_lw;
+ uint32_t fp_arch64_hw;
+ uint32_t sp_arch64_lw;
+ uint32_t sp_arch64_hw;
+ } mp0_dbg_core[4];
+ uint32_t dfd_ctrl;
+ uint32_t dfd_cnt_l;
+ uint32_t dfd_cnt_h;
+ uint32_t misccfg_mp0_rw_rsvd;
+ uint32_t misccfg_sec_vio_status0;
+ uint32_t misccfg_sec_vio_status1;
+ uint32_t reserved1[22];
+ uint32_t misccfg_rw_rsvd; /* 0x500 */
+ uint32_t mcusys_dbg_mon_sel_a;
+ uint32_t mcusys_dbg_mon;
+ uint32_t reserved2[61];
+ uint32_t mcusys_config_a; /* 0x600 */
+ uint32_t mcusys_config1_a;
+ uint32_t mcusys_gic_peribase_a;
+ uint32_t reserved3;
+ uint32_t sec_range0_start; /* 0x610 */
+ uint32_t sec_range0_end;
+ uint32_t sec_range_enable;
+ uint32_t reserved4;
+ uint32_t int_pol_ctl[8]; /* 0x620 */
+ uint32_t aclken_div; /* 0x640 */
+ uint32_t pclken_div;
+ uint32_t l2c_sram_ctrl;
+ uint32_t armpll_jit_ctrl;
+ uint32_t cci_addrmap; /* 0x650 */
+ uint32_t cci_config;
+ uint32_t cci_periphbase;
+ uint32_t cci_nevntcntovfl;
+ uint32_t cci_clk_ctrl; /* 0x660 */
+ uint32_t cci_acel_s1_ctrl;
+ uint32_t bus_fabric_dcm_ctrl;
+ uint32_t reserved5;
+ uint32_t xgpt_ctl; /* 0x670 */
+ uint32_t xgpt_idx;
+ uint32_t ptpod2_ctl0;
+ uint32_t ptpod2_ctl1;
+ uint32_t mcusys_revid;
+ uint32_t mcusys_rw_rsvd0;
+ uint32_t mcusys_rw_rsvd1;
+};
+
+static struct mt6795_mcucfg_regs *const mt6795_mcucfg = (void *)MCUCFG_BASE;
+
+/* cpu boot mode */
+enum {
+ MP0_CPUCFG_64BIT_SHIFT = 12,
+ MP1_CPUCFG_64BIT_SHIFT = 28,
+ MP0_CPUCFG_64BIT = 0xf << MP0_CPUCFG_64BIT_SHIFT,
+ MP1_CPUCFG_64BIT = 0xf << MP1_CPUCFG_64BIT_SHIFT
+};
+
+/* scu related */
+enum {
+ MP0_ACINACTM_SHIFT = 4,
+ MP1_ACINACTM_SHIFT = 0,
+ MP0_ACINACTM = 1 << MP0_ACINACTM_SHIFT,
+ MP1_ACINACTM = 1 << MP1_ACINACTM_SHIFT
+};
+
+enum {
+ MP1_DIS_RGU0_WAIT_PD_CPUS_L1_ACK_SHIFT = 0,
+ MP1_DIS_RGU1_WAIT_PD_CPUS_L1_ACK_SHIFT = 4,
+ MP1_DIS_RGU2_WAIT_PD_CPUS_L1_ACK_SHIFT = 8,
+ MP1_DIS_RGU3_WAIT_PD_CPUS_L1_ACK_SHIFT = 12,
+ MP1_DIS_RGU_NOCPU_WAIT_PD_CPUS_L1_ACK_SHIFT = 16,
+
+ MP1_DIS_RGU0_WAIT_PD_CPUS_L1_ACK =
+ 0xf << MP1_DIS_RGU0_WAIT_PD_CPUS_L1_ACK_SHIFT,
+ MP1_DIS_RGU1_WAIT_PD_CPUS_L1_ACK =
+ 0xf << MP1_DIS_RGU1_WAIT_PD_CPUS_L1_ACK_SHIFT,
+ MP1_DIS_RGU2_WAIT_PD_CPUS_L1_ACK =
+ 0xf << MP1_DIS_RGU2_WAIT_PD_CPUS_L1_ACK_SHIFT,
+ MP1_DIS_RGU3_WAIT_PD_CPUS_L1_ACK =
+ 0xf << MP1_DIS_RGU3_WAIT_PD_CPUS_L1_ACK_SHIFT,
+ MP1_DIS_RGU_NOCPU_WAIT_PD_CPUS_L1_ACK =
+ 0xf << MP1_DIS_RGU_NOCPU_WAIT_PD_CPUS_L1_ACK_SHIFT
+};
+
+enum {
+ MP1_AINACTS_SHIFT = 4,
+ MP1_AINACTS = 1 << MP1_AINACTS_SHIFT
+};
+
+enum {
+ MP1_SW_CG_GEN_SHIFT = 12,
+ MP1_SW_CG_GEN = 1 << MP1_SW_CG_GEN_SHIFT
+};
+
+enum {
+ MP1_L2RSTDISABLE_SHIFT = 14,
+ MP1_L2RSTDISABLE = 1 << MP1_L2RSTDISABLE_SHIFT
+};
+
+#endif /* __MCUCFG_H__ */
--- /dev/null
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <cci.h>
+#include <gic_v2.h>
+#include <platform_def.h>
+
+.section .rodata.gic_reg_name, "aS"
+gicc_regs:
+ .asciz "gicc_hppir", "gicc_ahppir", "gicc_ctlr", ""
+gicd_pend_reg:
+ .asciz "gicd_ispendr regs (Offsets 0x200 - 0x278)\n" \
+ " Offset:\t\t\tvalue\n"
+newline:
+ .asciz "\n"
+spacer:
+ .asciz ":\t\t0x"
+
+ /* ---------------------------------------------
+ * The below macro prints out relevant GIC
+ * registers whenever an unhandled exception is
+ * taken in BL3-1.
+ * Clobbers: x0 - x10, x16, x17, sp
+ * ---------------------------------------------
+ */
+ .macro plat_crash_print_regs
+ mov_imm x16, BASE_GICD_BASE
+ mov_imm x17, BASE_GICC_BASE
+ /* Load the gicc reg list to x6 */
+ adr x6, gicc_regs
+ /* Load the gicc regs to gp regs used by str_in_crash_buf_print */
+ ldr w8, [x17, #GICC_HPPIR]
+ ldr w9, [x17, #GICC_AHPPIR]
+ ldr w10, [x17, #GICC_CTLR]
+ /* Store to the crash buf and print to console */
+ bl str_in_crash_buf_print
+
+ /* Print the GICD_ISPENDR regs */
+ add x7, x16, #GICD_ISPENDR
+ adr x4, gicd_pend_reg
+ bl asm_print_str
+gicd_ispendr_loop:
+ sub x4, x7, x16
+ cmp x4, #0x280
+ b.eq exit_print_gic_regs
+ bl asm_print_hex
+
+ adr x4, spacer
+ bl asm_print_str
+
+ ldr x4, [x7], #8
+ bl asm_print_hex
+
+ adr x4, newline
+ bl asm_print_str
+ b gicd_ispendr_loop
+exit_print_gic_regs:
+ .endm
+
+.section .rodata.cci_reg_name, "aS"
+cci_iface_regs:
+ .asciz "cci_snoop_ctrl_cluster0", "cci_snoop_ctrl_cluster1" , ""
+
+ /* ------------------------------------------------
+ * The below macro prints out relevant interconnect
+ * registers whenever an unhandled exception is
+ * taken in BL3-1.
+ * Clobbers: x0 - x9, sp
+ * ------------------------------------------------
+ */
+ .macro plat_print_interconnect_regs
+ adr x6, cci_iface_regs
+ /* Store in x7 the base address of the first interface */
+ mov_imm x7, (PLAT_MT_CCI_BASE + SLAVE_IFACE_OFFSET( \
+ PLAT_MT_CCI_CLUSTER0_SL_IFACE_IX))
+ ldr w8, [x7, #SNOOP_CTRL_REG]
+ /* Store in x7 the base address of the second interface */
+ mov_imm x7, (PLAT_MT_CCI_BASE + SLAVE_IFACE_OFFSET( \
+ PLAT_MT_CCI_CLUSTER1_SL_IFACE_IX))
+ ldr w9, [x7, #SNOOP_CTRL_REG]
+ /* Store to the crash buf and print to console */
+ bl str_in_crash_buf_print
+ .endm
--- /dev/null
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __PLAT_PRIVATE_H__
+#define __PLAT_PRIVATE_H__
+#include <stdint.h>
+#include <xlat_tables.h>
+
+void plat_configure_mmu_el3(unsigned long total_base,
+ unsigned long total_size,
+ unsigned long,
+ unsigned long,
+ unsigned long,
+ unsigned long);
+
+void plat_cci_init(void);
+void plat_cci_enable(void);
+void plat_cci_disable(void);
+
+/* Declarations for plat_mt_gic.c */
+void plat_mt_gic_init(void);
+
+/* Declarations for plat_topology.c */
+int mt_setup_topology(void);
+void plat_delay_timer_init(void);
+
+void plat_mt_gic_driver_init(void);
+void plat_mt_gic_init(void);
+void plat_mt_gic_cpuif_enable(void);
+void plat_mt_gic_cpuif_disable(void);
+void plat_mt_gic_pcpu_init(void);
+
+#endif /* __PLAT_PRIVATE_H__ */
--- /dev/null
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __PLAT_SIP_CALLS_H__
+#define __PLAT_SIP_CALLS_H__
+
+/*******************************************************************************
+ * Plat SiP function constants
+ ******************************************************************************/
+#define MTK_PLAT_SIP_NUM_CALLS 0
+
+#endif /* __PLAT_SIP_CALLS_H__ */
--- /dev/null
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __PLATFORM_DEF_H__
+#define __PLATFORM_DEF_H__
+
+#define PLAT_PRIMARY_CPU 0x0
+
+/* Special value used to verify platform parameters from BL2 to BL3-1 */
+#define MT_BL31_PLAT_PARAM_VAL 0x0f1e2d3c4b5a6978ULL
+
+#define IO_PHYS (0x10000000)
+#define INFRACFG_AO_BASE (IO_PHYS + 0x1000)
+#define MCUCFG_BASE (IO_PHYS + 0x200000)
+#define PERI_BASE (IO_PHYS + 0x1000000)
+
+
+#define GPIO_BASE (IO_PHYS + 0x370000)
+#define SPM_BASE (IO_PHYS + 0x6000)
+#define RGU_BASE (MCUCFG_BASE + 0x11000)
+#define PMIC_WRAP_BASE (IO_PHYS + 0x10000)
+
+#define TRNG_base (MCUCFG_BASE + 0x230000)
+#define MT_GIC_BASE (0x10220000)
+#define MCU_SYS_SIZE (0x700000)
+#define PLAT_MT_CCI_BASE (IO_PHYS + 0x390000)
+
+/* Aggregate of all devices in the first GB */
+#define MTK_DEV_RNG0_BASE IO_PHYS
+#define MTK_DEV_RNG0_SIZE 0x400000
+#define MTK_DEV_RNG1_BASE (PERI_BASE)
+#define MTK_DEV_RNG1_SIZE 0x4000000
+
+/*******************************************************************************
+ * UART related constants
+ ******************************************************************************/
+#define UART0_BASE (PERI_BASE + 0x2000)
+
+#define UART_BAUDRATE (921600)
+#define UART_CLOCK (26000000)
+
+/*******************************************************************************
+ * System counter frequency related constants
+ ******************************************************************************/
+#define SYS_COUNTER_FREQ_IN_TICKS 13000000
+#define SYS_COUNTER_FREQ_IN_MHZ (SYS_COUNTER_FREQ_IN_TICKS/1000000)
+
+/*******************************************************************************
+ * GIC-400 & interrupt handling related constants
+ ******************************************************************************/
+
+/* Base MTK_platform compatible GIC memory map */
+#define BASE_GICD_BASE (MT_GIC_BASE+0x1000)
+#define BASE_GICC_BASE (MT_GIC_BASE + 0x2000)
+#define BASE_GICR_BASE (MT_GIC_BASE + 0x200000)
+#define BASE_GICH_BASE (MT_GIC_BASE + 0x4000)
+#define BASE_GICV_BASE (MT_GIC_BASE + 0x6000)
+
+#define INT_POL_CTL0 0x10200620
+#define GIC_PRIVATE_SIGNALS (32)
+
+/*******************************************************************************
+ * CCI-400 related constants
+ ******************************************************************************/
+#define PLAT_MT_CCI_CLUSTER0_SL_IFACE_IX 4
+#define PLAT_MT_CCI_CLUSTER1_SL_IFACE_IX 3
+
+/*******************************************************************************
+ * WDT Registers
+ ******************************************************************************/
+#define MTK_WDT_BASE (RGU_BASE)
+#define MTK_WDT_SIZE (0x1000)
+#define MTK_WDT_MODE (MTK_WDT_BASE+0x0000)
+#define MTK_WDT_LENGTH (MTK_WDT_BASE+0x0004)
+#define MTK_WDT_RESTART (MTK_WDT_BASE+0x0008)
+#define MTK_WDT_STATUS (MTK_WDT_BASE+0x000C)
+#define MTK_WDT_INTERVAL (MTK_WDT_BASE+0x0010)
+#define MTK_WDT_SWRST (MTK_WDT_BASE+0x0014)
+#define MTK_WDT_SWSYSRST (MTK_WDT_BASE+0x0018)
+#define MTK_WDT_NONRST_REG (MTK_WDT_BASE+0x0020)
+#define MTK_WDT_NONRST_REG2 (MTK_WDT_BASE+0x0024)
+#define MTK_WDT_REQ_MODE (MTK_WDT_BASE+0x0030)
+#define MTK_WDT_REQ_IRQ_EN (MTK_WDT_BASE+0x0034)
+#define MTK_WDT_DEBUG_CTL (MTK_WDT_BASE+0x0040)
+
+/*WDT_STATUS*/
+#define MTK_WDT_STATUS_HWWDT_RST (0x80000000)
+#define MTK_WDT_STATUS_SWWDT_RST (0x40000000)
+#define MTK_WDT_STATUS_IRQWDT_RST (0x20000000)
+#define MTK_WDT_STATUS_DEBUGWDT_RST (0x00080000)
+#define MTK_WDT_STATUS_SPMWDT_RST (0x0002)
+#define MTK_WDT_STATUS_SPM_THERMAL_RST (0x0001)
+#define MTK_WDT_STATUS_THERMAL_DIRECT_RST (1<<18)
+#define MTK_WDT_STATUS_SECURITY_RST (1<<28)
+
+#define MTK_WDT_MODE_DUAL_MODE 0x0040
+#define MTK_WDT_MODE_IRQ 0x0008
+#define MTK_WDT_MODE_KEY 0x22000000
+#define MTK_WDT_MODE_EXTEN 0x0004
+#define MTK_WDT_SWRST_KEY 0x1209
+#define MTK_WDT_RESTART_KEY (0x1971)
+
+/* FIQ platform related define */
+#define MT_IRQ_SEC_SGI_0 8
+#define MT_IRQ_SEC_SGI_1 9
+#define MT_IRQ_SEC_SGI_2 10
+#define MT_IRQ_SEC_SGI_3 11
+#define MT_IRQ_SEC_SGI_4 12
+#define MT_IRQ_SEC_SGI_5 13
+#define MT_IRQ_SEC_SGI_6 14
+#define MT_IRQ_SEC_SGI_7 15
+
+#define FIQ_SMP_CALL_SGI MT_IRQ_SEC_SGI_5
+
+#define PLAT_ARM_G0_IRQS FIQ_SMP_CALL_SGI
+
+#define DEBUG_XLAT_TABLE 0
+
+/*******************************************************************************
+ * Platform binary types for linking
+ ******************************************************************************/
+#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64"
+#define PLATFORM_LINKER_ARCH aarch64
+
+/*******************************************************************************
+ * Generic platform constants
+ ******************************************************************************/
+
+/* Size of cacheable stacks */
+#if DEBUG_XLAT_TABLE
+#define PLATFORM_STACK_SIZE 0x800
+#elif IMAGE_BL1
+#define PLATFORM_STACK_SIZE 0x440
+#elif IMAGE_BL2
+#define PLATFORM_STACK_SIZE 0x400
+#elif IMAGE_BL31
+#define PLATFORM_STACK_SIZE 0x800
+#elif IMAGE_BL32
+#define PLATFORM_STACK_SIZE 0x440
+#endif
+
+#define FIRMWARE_WELCOME_STR "Booting Trusted Firmware\n"
+#if ENABLE_PLAT_COMPAT
+#define PLATFORM_MAX_AFFLVL MPIDR_AFFLVL2
+#else
+#define PLAT_MAX_PWR_LVL 2 /* MPIDR_AFFLVL2 */
+#endif
+
+#define PLATFORM_CACHE_LINE_SIZE 64
+#define PLATFORM_SYSTEM_COUNT 1
+#define PLATFORM_CLUSTER_COUNT 2
+#define PLATFORM_CLUSTER0_CORE_COUNT 4
+#define PLATFORM_CLUSTER1_CORE_COUNT 4
+#define PLATFORM_CORE_COUNT (PLATFORM_CLUSTER1_CORE_COUNT + \
+ PLATFORM_CLUSTER0_CORE_COUNT)
+#define PLATFORM_MAX_CPUS_PER_CLUSTER 4
+#define PLATFORM_NUM_AFFS (PLATFORM_SYSTEM_COUNT + \
+ PLATFORM_CLUSTER_COUNT + \
+ PLATFORM_CORE_COUNT)
+
+/*******************************************************************************
+ * Platform memory map related constants
+ ******************************************************************************/
+/* ATF Argument */
+#define ATF_ARG_SIZE (0x800)
+
+/* TF txet, ro, rw, internal SRAM, Size: release: 80KB, debug: 92KB */
+#define TZRAM_BASE (0x110000)
+#if DEBUG
+#define TZRAM_SIZE (0x1C400)
+#else
+#define TZRAM_SIZE (0x1C400)
+#endif
+#define TZRAM2_BASE 0x00100000
+#define TZRAM2_SIZE 0xDC00
+#define TZRAM2_LIMIT (TZRAM2_BASE + TZRAM2_SIZE)
+
+#define RAM_CONSOLE_BASE 0x0012D000
+#define RAM_CONSOLE_SIZE 0x00001000
+/*******************************************************************************
+ * BL31 specific defines.
+ ******************************************************************************/
+/*
+ * Put BL3-1 at the top of the Trusted SRAM (just below the shared memory, if
+ * present). BL31_BASE is calculated using the current BL3-1 debug size plus a
+ * little space for growth.
+ */
+#define BL31_BASE (TZRAM_BASE + 0x1000)
+#define BL31_LIMIT (TZRAM_BASE + TZRAM_SIZE)
+#define BSS1_STACK_LIMIT (TZRAM_BASE + TZRAM_SIZE)
+#define BL31_TZRAM_SIZE (TZRAM_SIZE - ATF_ARG_SIZE)
+
+/*******************************************************************************
+ * Platform specific page table and MMU setup constants
+ ******************************************************************************/
+#define ADDR_SPACE_SIZE (1ull << 32)
+#define MAX_XLAT_TABLES 7
+#define MAX_MMAP_REGIONS 16
+
+
+/*******************************************************************************
+ * CCI-400 related constants
+ ******************************************************************************/
+#define CCI400_BASE 0x10390000
+#define CCI400_SL_IFACE_CLUSTER0 4
+#define CCI400_SL_IFACE_CLUSTER1 3
+#define CCI400_SL_IFACE_INDEX(mpidr) (mpidr & MPIDR_CLUSTER_MASK ? \
+ CCI400_SL_IFACE_CLUSTER1 : \
+ CCI400_SL_IFACE_CLUSTER0)
+#define CCI_SEC_ACCESS_OFFSET (0x8)
+
+
+/*******************************************************************************
+ * Declarations and constants to access the mailboxes safely. Each mailbox is
+ * aligned on the biggest cache line size in the platform. This is known only
+ * to the platform as it might have a combination of integrated and external
+ * caches. Such alignment ensures that two maiboxes do not sit on the same cache
+ * line at any cache level. They could belong to different cpus/clusters &
+ * get written while being protected by different locks causing corruption of
+ * a valid mailbox address.
+ ******************************************************************************/
+#define CACHE_WRITEBACK_SHIFT 6
+#define CACHE_WRITEBACK_GRANULE (1 << CACHE_WRITEBACK_SHIFT)
+
+#define BL32_BASE (0x0)
+
+/*
+ * Load address of BL3-3 for this platform port
+ */
+#define LK_SIZE_LIMIT (0x100000)
+#define PLAT_MTK_NS_IMAGE_OFFSET (0x41E00000)
+/* 16KB */
+#define ATF_AEE_BUFFER_SIZE (0x4000)
+#define PAGE_SIZE_2MB_MASK (PAGE_SIZE_2MB - 1)
+#define IS_PAGE_2MB_ALIGNED(addr) (((addr) & PAGE_SIZE_2MB_MASK) == 0)
+#define PAGE_SIZE_2MB (1 << PAGE_SIZE_2MB_SHIFT)
+#define PAGE_SIZE_2MB_SHIFT TWO_MB_SHIFT
+
+#endif /* __PLATFORM_DEF_H__ */
--- /dev/null
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __POWER_TRACER_H__
+#define __POWER_TRACER_H__
+
+#define CPU_UP 0
+#define CPU_DOWN 1
+#define CPU_SUSPEND 2
+#define CLUSTER_UP 3
+#define CLUSTER_DOWN 4
+#define CLUSTER_SUSPEND 5
+
+void trace_power_flow(unsigned long mpidr, unsigned char mode);
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SCU_H__
+#define __SCU_H__
+
+void disable_scu(unsigned long mpidr);
+void enable_scu(unsigned long mpidr);
+
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SPM_H__
+#define __SPM_H__
+
+#define SPM_POWERON_CONFIG_SET (SPM_BASE + 0x000)
+#define SPM_POWER_ON_VAL0 (SPM_BASE + 0x010)
+#define SPM_POWER_ON_VAL1 (SPM_BASE + 0x014)
+#define SPM_CLK_SETTLE (SPM_BASE + 0x100)
+#define SPM_CA7_CPU1_PWR_CON (SPM_BASE + 0x218)
+#define SPM_CA7_CPU2_PWR_CON (SPM_BASE + 0x21c)
+#define SPM_CA7_CPU3_PWR_CON (SPM_BASE + 0x220)
+#define SPM_CA7_CPU1_L1_PDN (SPM_BASE + 0x264)
+#define SPM_CA7_CPU2_L1_PDN (SPM_BASE + 0x26c)
+#define SPM_CA7_CPU3_L1_PDN (SPM_BASE + 0x274)
+#define SPM_MD32_SRAM_CON (SPM_BASE + 0x2c8)
+#define SPM_PCM_CON0 (SPM_BASE + 0x310)
+#define SPM_PCM_CON1 (SPM_BASE + 0x314)
+#define SPM_PCM_IM_PTR (SPM_BASE + 0x318)
+#define SPM_PCM_IM_LEN (SPM_BASE + 0x31c)
+#define SPM_PCM_REG_DATA_INI (SPM_BASE + 0x320)
+#define SPM_PCM_EVENT_VECTOR0 (SPM_BASE + 0x340)
+#define SPM_PCM_EVENT_VECTOR1 (SPM_BASE + 0x344)
+#define SPM_PCM_EVENT_VECTOR2 (SPM_BASE + 0x348)
+#define SPM_PCM_EVENT_VECTOR3 (SPM_BASE + 0x34c)
+#define SPM_PCM_MAS_PAUSE_MASK (SPM_BASE + 0x354)
+#define SPM_PCM_PWR_IO_EN (SPM_BASE + 0x358)
+#define SPM_PCM_TIMER_VAL (SPM_BASE + 0x35c)
+#define SPM_PCM_TIMER_OUT (SPM_BASE + 0x360)
+#define SPM_PCM_REG0_DATA (SPM_BASE + 0x380)
+#define SPM_PCM_REG1_DATA (SPM_BASE + 0x384)
+#define SPM_PCM_REG2_DATA (SPM_BASE + 0x388)
+#define SPM_PCM_REG3_DATA (SPM_BASE + 0x38c)
+#define SPM_PCM_REG4_DATA (SPM_BASE + 0x390)
+#define SPM_PCM_REG5_DATA (SPM_BASE + 0x394)
+#define SPM_PCM_REG6_DATA (SPM_BASE + 0x398)
+#define SPM_PCM_REG7_DATA (SPM_BASE + 0x39c)
+#define SPM_PCM_REG8_DATA (SPM_BASE + 0x3a0)
+#define SPM_PCM_REG9_DATA (SPM_BASE + 0x3a4)
+#define SPM_PCM_REG10_DATA (SPM_BASE + 0x3a8)
+#define SPM_PCM_REG11_DATA (SPM_BASE + 0x3ac)
+#define SPM_PCM_REG12_DATA (SPM_BASE + 0x3b0)
+#define SPM_PCM_REG13_DATA (SPM_BASE + 0x3b4)
+#define SPM_PCM_REG14_DATA (SPM_BASE + 0x3b8)
+#define SPM_PCM_REG15_DATA (SPM_BASE + 0x3bc)
+#define SPM_PCM_EVENT_REG_STA (SPM_BASE + 0x3c0)
+#define SPM_PCM_FSM_STA (SPM_BASE + 0x3c4)
+#define SPM_PCM_IM_HOST_RW_PTR (SPM_BASE + 0x3c8)
+#define SPM_PCM_IM_HOST_RW_DAT (SPM_BASE + 0x3cc)
+#define SPM_PCM_EVENT_VECTOR4 (SPM_BASE + 0x3d0)
+#define SPM_PCM_EVENT_VECTOR5 (SPM_BASE + 0x3d4)
+#define SPM_PCM_EVENT_VECTOR6 (SPM_BASE + 0x3d8)
+#define SPM_PCM_EVENT_VECTOR7 (SPM_BASE + 0x3dc)
+#define SPM_PCM_SW_INT_SET (SPM_BASE + 0x3e0)
+#define SPM_PCM_SW_INT_CLEAR (SPM_BASE + 0x3e4)
+#define SPM_CLK_CON (SPM_BASE + 0x400)
+#define SPM_SLEEP_PTPOD2_CON (SPM_BASE + 0x408)
+#define SPM_APMCU_PWRCTL (SPM_BASE + 0x600)
+#define SPM_AP_DVFS_CON_SET (SPM_BASE + 0x604)
+#define SPM_AP_STANBY_CON (SPM_BASE + 0x608)
+#define SPM_PWR_STATUS (SPM_BASE + 0x60c)
+#define SPM_PWR_STATUS_2ND (SPM_BASE + 0x610)
+#define SPM_AP_BSI_REQ (SPM_BASE + 0x614)
+#define SPM_SLEEP_TIMER_STA (SPM_BASE + 0x720)
+#define SPM_SLEEP_WAKEUP_EVENT_MASK (SPM_BASE + 0x810)
+#define SPM_SLEEP_CPU_WAKEUP_EVENT (SPM_BASE + 0x814)
+#define SPM_SLEEP_MD32_WAKEUP_EVENT_MASK (SPM_BASE + 0x818)
+#define SPM_PCM_WDT_TIMER_VAL (SPM_BASE + 0x824)
+#define SPM_PCM_WDT_TIMER_OUT (SPM_BASE + 0x828)
+#define SPM_PCM_MD32_MAILBOX (SPM_BASE + 0x830)
+#define SPM_PCM_MD32_IRQ (SPM_BASE + 0x834)
+#define SPM_SLEEP_ISR_MASK (SPM_BASE + 0x900)
+#define SPM_SLEEP_ISR_STATUS (SPM_BASE + 0x904)
+#define SPM_SLEEP_ISR_RAW_STA (SPM_BASE + 0x910)
+#define SPM_SLEEP_MD32_ISR_RAW_STA (SPM_BASE + 0x914)
+#define SPM_SLEEP_WAKEUP_MISC (SPM_BASE + 0x918)
+#define SPM_SLEEP_BUS_PROTECT_RDY (SPM_BASE + 0x91c)
+#define SPM_SLEEP_SUBSYS_IDLE_STA (SPM_BASE + 0x920)
+#define SPM_PCM_RESERVE (SPM_BASE + 0xb00)
+#define SPM_PCM_RESERVE2 (SPM_BASE + 0xb04)
+#define SPM_PCM_FLAGS (SPM_BASE + 0xb08)
+#define SPM_PCM_SRC_REQ (SPM_BASE + 0xb0c)
+#define SPM_PCM_DEBUG_CON (SPM_BASE + 0xb20)
+#define SPM_CA7_CPU0_IRQ_MASK (SPM_BASE + 0xb30)
+#define SPM_CA7_CPU1_IRQ_MASK (SPM_BASE + 0xb34)
+#define SPM_CA7_CPU2_IRQ_MASK (SPM_BASE + 0xb38)
+#define SPM_CA7_CPU3_IRQ_MASK (SPM_BASE + 0xb3c)
+#define SPM_CA15_CPU0_IRQ_MASK (SPM_BASE + 0xb40)
+#define SPM_CA15_CPU1_IRQ_MASK (SPM_BASE + 0xb44)
+#define SPM_CA15_CPU2_IRQ_MASK (SPM_BASE + 0xb48)
+#define SPM_CA15_CPU3_IRQ_MASK (SPM_BASE + 0xb4c)
+#define SPM_PCM_PASR_DPD_0 (SPM_BASE + 0xb60)
+#define SPM_PCM_PASR_DPD_1 (SPM_BASE + 0xb64)
+#define SPM_PCM_PASR_DPD_2 (SPM_BASE + 0xb68)
+#define SPM_PCM_PASR_DPD_3 (SPM_BASE + 0xb6c)
+#define SPM_SLEEP_CA7_WFI0_EN (SPM_BASE + 0xf00)
+#define SPM_SLEEP_CA7_WFI1_EN (SPM_BASE + 0xf04)
+#define SPM_SLEEP_CA7_WFI2_EN (SPM_BASE + 0xf08)
+#define SPM_SLEEP_CA7_WFI3_EN (SPM_BASE + 0xf0c)
+#define SPM_SLEEP_CA15_WFI0_EN (SPM_BASE + 0xf10)
+#define SPM_SLEEP_CA15_WFI1_EN (SPM_BASE + 0xf14)
+#define SPM_SLEEP_CA15_WFI2_EN (SPM_BASE + 0xf18)
+#define SPM_SLEEP_CA15_WFI3_EN (SPM_BASE + 0xf1c)
+
+#define SPM_PROJECT_CODE 0xb16
+
+#define SPM_REGWR_EN (1U << 0)
+#define SPM_REGWR_CFG_KEY (SPM_PROJECT_CODE << 16)
+
+#define SPM_CPU_PDN_DIS (1U << 0)
+#define SPM_INFRA_PDN_DIS (1U << 1)
+#define SPM_DDRPHY_PDN_DIS (1U << 2)
+#define SPM_DUALVCORE_PDN_DIS (1U << 3)
+#define SPM_PASR_DIS (1U << 4)
+#define SPM_DPD_DIS (1U << 5)
+#define SPM_SODI_DIS (1U << 6)
+#define SPM_MEMPLL_RESET (1U << 7)
+#define SPM_MAINPLL_PDN_DIS (1U << 8)
+#define SPM_CPU_DVS_DIS (1U << 9)
+#define SPM_CPU_DORMANT (1U << 10)
+#define SPM_EXT_VSEL_GPIO103 (1U << 11)
+#define SPM_DDR_HIGH_SPEED (1U << 12)
+#define SPM_OPT (1U << 13)
+
+#define POWER_ON_VAL1_DEF 0x01011820
+#define PCM_FSM_STA_DEF 0x48490
+#define PCM_END_FSM_STA_DEF 0x08490
+#define PCM_END_FSM_STA_MASK 0x3fff0
+#define PCM_HANDSHAKE_SEND1 0xbeefbeef
+
+#define PCM_WDT_TIMEOUT (30 * 32768)
+#define PCM_TIMER_MAX (0xffffffff - PCM_WDT_TIMEOUT)
+
+#define CON0_PCM_KICK (1U << 0)
+#define CON0_IM_KICK (1U << 1)
+#define CON0_IM_SLEEP_DVS (1U << 3)
+#define CON0_PCM_SW_RESET (1U << 15)
+#define CON0_CFG_KEY (SPM_PROJECT_CODE << 16)
+
+#define CON1_IM_SLAVE (1U << 0)
+#define CON1_MIF_APBEN (1U << 3)
+#define CON1_PCM_TIMER_EN (1U << 5)
+#define CON1_IM_NONRP_EN (1U << 6)
+#define CON1_PCM_WDT_EN (1U << 8)
+#define CON1_PCM_WDT_WAKE_MODE (1U << 9)
+#define CON1_SPM_SRAM_SLP_B (1U << 10)
+#define CON1_SPM_SRAM_ISO_B (1U << 11)
+#define CON1_EVENT_LOCK_EN (1U << 12)
+#define CON1_CFG_KEY (SPM_PROJECT_CODE << 16)
+
+#define PCM_PWRIO_EN_R0 (1U << 0)
+#define PCM_PWRIO_EN_R7 (1U << 7)
+#define PCM_RF_SYNC_R0 (1U << 16)
+#define PCM_RF_SYNC_R2 (1U << 18)
+#define PCM_RF_SYNC_R6 (1U << 22)
+#define PCM_RF_SYNC_R7 (1U << 23)
+
+#define CC_SYSCLK0_EN_0 (1U << 0)
+#define CC_SYSCLK0_EN_1 (1U << 1)
+#define CC_SYSCLK1_EN_0 (1U << 2)
+#define CC_SYSCLK1_EN_1 (1U << 3)
+#define CC_SYSSETTLE_SEL (1U << 4)
+#define CC_LOCK_INFRA_DCM (1U << 5)
+#define CC_SRCLKENA_MASK_0 (1U << 6)
+#define CC_CXO32K_RM_EN_MD1 (1U << 9)
+#define CC_CXO32K_RM_EN_MD2 (1U << 10)
+#define CC_CLKSQ1_SEL (1U << 12)
+#define CC_DISABLE_DORM_PWR (1U << 14)
+#define CC_MD32_DCM_EN (1U << 18)
+
+#define WFI_OP_AND 1
+#define WFI_OP_OR 0
+
+#define WAKE_MISC_PCM_TIMER (1U << 19)
+#define WAKE_MISC_CPU_WAKE (1U << 20)
+
+/* define WAKE_SRC_XXX */
+#define WAKE_SRC_SPM_MERGE (1 << 0)
+#define WAKE_SRC_KP (1 << 2)
+#define WAKE_SRC_WDT (1 << 3)
+#define WAKE_SRC_GPT (1 << 4)
+#define WAKE_SRC_EINT (1 << 6)
+#define WAKE_SRC_LOW_BAT (1 << 9)
+#define WAKE_SRC_MD32 (1 << 10)
+#define WAKE_SRC_USB_CD (1 << 14)
+#define WAKE_SRC_USB_PDN (1 << 15)
+#define WAKE_SRC_AFE (1 << 20)
+#define WAKE_SRC_THERM (1 << 21)
+#define WAKE_SRC_SYSPWREQ (1 << 24)
+#define WAKE_SRC_SEJ (1 << 27)
+#define WAKE_SRC_ALL_MD32 (1 << 28)
+#define WAKE_SRC_CPU_IRQ (1 << 29)
+
+#endif /* __SPM_H__ */
--- /dev/null
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch_helpers.h>
+#include <delay_timer.h>
+#include <platform_def.h>
+
+static uint32_t plat_get_timer_value(void)
+{
+ /*
+ * Generic delay timer implementation expects the timer to be a down
+ * counter. We apply bitwise NOT operator to the tick values returned
+ * by read_cntpct_el0() to simulate the down counter.
+ */
+ return (uint32_t)(~read_cntpct_el0());
+}
+
+static const timer_ops_t plat_timer_ops = {
+ .get_timer_value = plat_get_timer_value,
+ .clk_mult = 1,
+ .clk_div = SYS_COUNTER_FREQ_IN_MHZ,
+};
+
+void plat_delay_timer_init(void)
+{
+ timer_init(&plat_timer_ops);
+}
--- /dev/null
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <gicv2.h>
+#include <plat_arm.h>
+#include <platform.h>
+#include <platform_def.h>
+
+const unsigned int g0_interrupt_array[] = {
+ PLAT_ARM_G0_IRQS
+};
+
+gicv2_driver_data_t arm_gic_data = {
+ .gicd_base = BASE_GICD_BASE,
+ .gicc_base = BASE_GICC_BASE,
+ .g0_interrupt_num = ARRAY_SIZE(g0_interrupt_array),
+ .g0_interrupt_array = g0_interrupt_array,
+};
+
+void plat_mt_gic_driver_init(void)
+{
+ gicv2_driver_init(&arm_gic_data);
+}
+
+void plat_mt_gic_init(void)
+{
+ gicv2_distif_init();
+ gicv2_pcpu_distif_init();
+ gicv2_cpuif_enable();
+}
+
+void plat_mt_gic_cpuif_enable(void)
+{
+ gicv2_cpuif_enable();
+}
+
+void plat_mt_gic_cpuif_disable(void)
+{
+ gicv2_cpuif_disable();
+}
+
+void plat_mt_gic_pcpu_init(void)
+{
+ gicv2_pcpu_distif_init();
+}
--- /dev/null
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch_helpers.h>
+#include <arm_gic.h>
+#include <assert.h>
+#include <bakery_lock.h>
+#include <cci.h>
+#include <console.h>
+#include <debug.h>
+#include <errno.h>
+#include <mcucfg.h>
+#include <mmio.h>
+#include <platform_def.h>
+#include <plat_private.h>
+#include <power_tracer.h>
+#include <psci.h>
+#include <scu.h>
+
+struct core_context {
+ unsigned long timer_data[8];
+ unsigned int count;
+ unsigned int rst;
+ unsigned int abt;
+ unsigned int brk;
+};
+
+struct cluster_context {
+ struct core_context core[PLATFORM_MAX_CPUS_PER_CLUSTER];
+};
+
+/*
+ * Top level structure to hold the complete context of a multi cluster system
+ */
+struct system_context {
+ struct cluster_context cluster[PLATFORM_CLUSTER_COUNT];
+};
+
+/*
+ * Top level structure which encapsulates the context of the entire system
+ */
+static struct system_context dormant_data[1];
+
+static inline struct cluster_context *system_cluster(
+ struct system_context *system,
+ uint32_t clusterid)
+{
+ return &system->cluster[clusterid];
+}
+
+static inline struct core_context *cluster_core(struct cluster_context *cluster,
+ uint32_t cpuid)
+{
+ return &cluster->core[cpuid];
+}
+
+static struct cluster_context *get_cluster_data(unsigned long mpidr)
+{
+ uint32_t clusterid;
+
+ clusterid = (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS;
+
+ return system_cluster(dormant_data, clusterid);
+}
+
+static struct core_context *get_core_data(unsigned long mpidr)
+{
+ struct cluster_context *cluster;
+ uint32_t cpuid;
+
+ cluster = get_cluster_data(mpidr);
+ cpuid = mpidr & MPIDR_CPU_MASK;
+
+ return cluster_core(cluster, cpuid);
+}
+
+static void mt_save_generic_timer(unsigned long *container)
+{
+ uint64_t ctl;
+ uint64_t val;
+
+ __asm__ volatile("mrs %x0, cntkctl_el1\n\t"
+ "mrs %x1, cntp_cval_el0\n\t"
+ "stp %x0, %x1, [%2, #0]"
+ : "=&r" (ctl), "=&r" (val)
+ : "r" (container)
+ : "memory");
+
+ __asm__ volatile("mrs %x0, cntp_tval_el0\n\t"
+ "mrs %x1, cntp_ctl_el0\n\t"
+ "stp %x0, %x1, [%2, #16]"
+ : "=&r" (val), "=&r" (ctl)
+ : "r" (container)
+ : "memory");
+
+ __asm__ volatile("mrs %x0, cntv_tval_el0\n\t"
+ "mrs %x1, cntv_ctl_el0\n\t"
+ "stp %x0, %x1, [%2, #32]"
+ : "=&r" (val), "=&r" (ctl)
+ : "r" (container)
+ : "memory");
+}
+
+static void mt_restore_generic_timer(unsigned long *container)
+{
+ uint64_t ctl;
+ uint64_t val;
+
+ __asm__ volatile("ldp %x0, %x1, [%2, #0]\n\t"
+ "msr cntkctl_el1, %x0\n\t"
+ "msr cntp_cval_el0, %x1"
+ : "=&r" (ctl), "=&r" (val)
+ : "r" (container)
+ : "memory");
+
+ __asm__ volatile("ldp %x0, %x1, [%2, #16]\n\t"
+ "msr cntp_tval_el0, %x0\n\t"
+ "msr cntp_ctl_el0, %x1"
+ : "=&r" (val), "=&r" (ctl)
+ : "r" (container)
+ : "memory");
+
+ __asm__ volatile("ldp %x0, %x1, [%2, #32]\n\t"
+ "msr cntv_tval_el0, %x0\n\t"
+ "msr cntv_ctl_el0, %x1"
+ : "=&r" (val), "=&r" (ctl)
+ : "r" (container)
+ : "memory");
+}
+
+static void stop_generic_timer(void)
+{
+ /*
+ * Disable the timer and mask the irq to prevent
+ * suprious interrupts on this cpu interface. It
+ * will bite us when we come back if we don't. It
+ * will be replayed on the inbound cluster.
+ */
+ uint64_t cntpctl = read_cntp_ctl_el0();
+
+ write_cntp_ctl_el0(clr_cntp_ctl_enable(cntpctl));
+}
+
+static void mt_cpu_save(unsigned long mpidr)
+{
+ struct core_context *core;
+
+ core = get_core_data(mpidr);
+ mt_save_generic_timer(core->timer_data);
+
+ /* disable timer irq, and upper layer should enable it again. */
+ stop_generic_timer();
+}
+
+static void mt_cpu_restore(unsigned long mpidr)
+{
+ struct core_context *core;
+
+ core = get_core_data(mpidr);
+ mt_restore_generic_timer(core->timer_data);
+}
+
+static void mt_platform_save_context(unsigned long mpidr)
+{
+ /* mcusys_save_context: */
+ mt_cpu_save(mpidr);
+}
+
+static void mt_platform_restore_context(unsigned long mpidr)
+{
+ /* mcusys_restore_context: */
+ mt_cpu_restore(mpidr);
+}
+
+/*******************************************************************************
+* Private function which is used to determine if any platform actions
+* should be performed for the specified affinity instance given its
+* state. Nothing needs to be done if the 'state' is not off or if this is not
+* the highest affinity level which will enter the 'state'.
+*******************************************************************************/
+static int32_t plat_do_plat_actions(unsigned int afflvl, unsigned int state)
+{
+ unsigned int max_phys_off_afflvl;
+
+ assert(afflvl <= MPIDR_AFFLVL2);
+
+ if (state != PSCI_STATE_OFF)
+ return -EAGAIN;
+
+ /*
+ * Find the highest affinity level which will be suspended and postpone
+ * all the platform specific actions until that level is hit.
+ */
+ max_phys_off_afflvl = psci_get_max_phys_off_afflvl();
+ assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
+ if (afflvl != max_phys_off_afflvl)
+ return -EAGAIN;
+
+ return 0;
+}
+
+/*******************************************************************************
+ * MTK_platform handler called when an affinity instance is about to enter
+ * standby.
+ ******************************************************************************/
+static void plat_affinst_standby(unsigned int power_state)
+{
+ unsigned int target_afflvl;
+
+ /* Sanity check the requested state */
+ target_afflvl = psci_get_pstate_afflvl(power_state);
+
+ /*
+ * It's possible to enter standby only on affinity level 0 i.e. a cpu
+ * on the MTK_platform. Ignore any other affinity level.
+ */
+ if (target_afflvl == MPIDR_AFFLVL0) {
+ /*
+ * Enter standby state. dsb is good practice before using wfi
+ * to enter low power states.
+ */
+ dsb();
+ wfi();
+ }
+}
+
+/*******************************************************************************
+ * MTK_platform handler called when an affinity instance is about to be turned
+ * on. The level and mpidr determine the affinity instance.
+ ******************************************************************************/
+static int plat_affinst_on(unsigned long mpidr,
+ unsigned long sec_entrypoint,
+ unsigned int afflvl,
+ unsigned int state)
+{
+ int rc = PSCI_E_SUCCESS;
+ unsigned long cpu_id;
+ unsigned long cluster_id;
+ uintptr_t rv;
+
+ /*
+ * It's possible to turn on only affinity level 0 i.e. a cpu
+ * on the MTK_platform. Ignore any other affinity level.
+ */
+ if (afflvl != MPIDR_AFFLVL0)
+ return rc;
+
+ cpu_id = mpidr & MPIDR_CPU_MASK;
+ cluster_id = mpidr & MPIDR_CLUSTER_MASK;
+
+ if (cluster_id)
+ rv = (uintptr_t)&mt6795_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
+ else
+ rv = (uintptr_t)&mt6795_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
+
+ mmio_write_32(rv, sec_entrypoint);
+ INFO("mt_on[%ld:%ld], entry %x\n",
+ cluster_id, cpu_id, mmio_read_32(rv));
+
+ return rc;
+}
+
+/*******************************************************************************
+ * MTK_platform handler called when an affinity instance is about to be turned
+ * off. The level and mpidr determine the affinity instance. The 'state' arg.
+ * allows the platform to decide whether the cluster is being turned off and
+ * take apt actions.
+ *
+ * CAUTION: This function is called with coherent stacks so that caches can be
+ * turned off, flushed and coherency disabled. There is no guarantee that caches
+ * will remain turned on across calls to this function as each affinity level is
+ * dealt with. So do not write & read global variables across calls. It will be
+ * wise to do flush a write to the global to prevent unpredictable results.
+ ******************************************************************************/
+static void plat_affinst_off(unsigned int afflvl, unsigned int state)
+{
+ unsigned long mpidr = read_mpidr_el1();
+
+ /* Determine if any platform actions need to be executed. */
+ if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
+ return;
+
+ /* Prevent interrupts from spuriously waking up this cpu */
+ plat_mt_gic_cpuif_disable();
+
+ trace_power_flow(mpidr, CPU_DOWN);
+
+ if (afflvl != MPIDR_AFFLVL0) {
+ /* Disable coherency if this cluster is to be turned off */
+ plat_cci_disable();
+
+ trace_power_flow(mpidr, CLUSTER_DOWN);
+ }
+}
+
+/*******************************************************************************
+ * MTK_platform handler called when an affinity instance is about to be
+ * suspended. The level and mpidr determine the affinity instance. The 'state'
+ * arg. allows the platform to decide whether the cluster is being turned off
+ * and take apt actions.
+ *
+ * CAUTION: This function is called with coherent stacks so that caches can be
+ * turned off, flushed and coherency disabled. There is no guarantee that caches
+ * will remain turned on across calls to this function as each affinity level is
+ * dealt with. So do not write & read global variables across calls. It will be
+ * wise to do flush a write to the global to prevent unpredictable results.
+ ******************************************************************************/
+static void plat_affinst_suspend(unsigned long sec_entrypoint,
+ unsigned int afflvl,
+ unsigned int state)
+{
+ unsigned long mpidr = read_mpidr_el1();
+ unsigned long cluster_id;
+ unsigned long cpu_id;
+ uintptr_t rv;
+
+ /* Determine if any platform actions need to be executed. */
+ if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
+ return;
+
+ cpu_id = mpidr & MPIDR_CPU_MASK;
+ cluster_id = mpidr & MPIDR_CLUSTER_MASK;
+
+ if (cluster_id)
+ rv = (uintptr_t)&mt6795_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
+ else
+ rv = (uintptr_t)&mt6795_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
+
+ mmio_write_32(rv, sec_entrypoint);
+
+ if (afflvl >= MPIDR_AFFLVL0)
+ mt_platform_save_context(mpidr);
+
+ /* Perform the common cluster specific operations */
+ if (afflvl >= MPIDR_AFFLVL1) {
+ /* Disable coherency if this cluster is to be turned off */
+ plat_cci_disable();
+ disable_scu(mpidr);
+
+ trace_power_flow(mpidr, CLUSTER_SUSPEND);
+ }
+
+ if (afflvl >= MPIDR_AFFLVL2) {
+ /* Prevent interrupts from spuriously waking up this cpu */
+ plat_mt_gic_cpuif_disable();
+ }
+}
+
+/*******************************************************************************
+ * MTK_platform handler called when an affinity instance has just been powered
+ * on after being turned off earlier. The level and mpidr determine the affinity
+ * instance. The 'state' arg. allows the platform to decide whether the cluster
+ * was turned off prior to wakeup and do what's necessary to setup it up
+ * correctly.
+ ******************************************************************************/
+static void plat_affinst_on_finish(unsigned int afflvl, unsigned int state)
+{
+ unsigned long mpidr = read_mpidr_el1();
+
+ /* Determine if any platform actions need to be executed. */
+ if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
+ return;
+
+ /* Perform the common cluster specific operations */
+ if (afflvl >= MPIDR_AFFLVL1) {
+ enable_scu(mpidr);
+
+ /* Enable coherency if this cluster was off */
+ plat_cci_enable();
+ trace_power_flow(mpidr, CLUSTER_UP);
+ }
+
+ /* Enable the gic cpu interface */
+ plat_mt_gic_cpuif_enable();
+ plat_mt_gic_pcpu_init();
+ trace_power_flow(mpidr, CPU_UP);
+}
+
+/*******************************************************************************
+ * MTK_platform handler called when an affinity instance has just been powered
+ * on after having been suspended earlier. The level and mpidr determine the
+ * affinity instance.
+ ******************************************************************************/
+static void plat_affinst_suspend_finish(unsigned int afflvl, unsigned int state)
+{
+ unsigned long mpidr = read_mpidr_el1();
+
+ /* Determine if any platform actions need to be executed. */
+ if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
+ return;
+
+ if (afflvl >= MPIDR_AFFLVL2) {
+ /* Enable the gic cpu interface */
+ plat_mt_gic_init();
+ plat_mt_gic_cpuif_enable();
+ }
+
+ /* Perform the common cluster specific operations */
+ if (afflvl >= MPIDR_AFFLVL1) {
+ enable_scu(mpidr);
+
+ /* Enable coherency if this cluster was off */
+ plat_cci_enable();
+ trace_power_flow(mpidr, CLUSTER_UP);
+ }
+
+ if (afflvl >= MPIDR_AFFLVL0)
+ mt_platform_restore_context(mpidr);
+
+ plat_mt_gic_pcpu_init();
+}
+
+static unsigned int plat_get_sys_suspend_power_state(void)
+{
+ /* StateID: 0, StateType: 1(power down), PowerLevel: 2(system) */
+ return psci_make_powerstate(0, 1, 2);
+}
+
+/*******************************************************************************
+ * MTK handlers to shutdown/reboot the system
+ ******************************************************************************/
+static void __dead2 plat_system_off(void)
+{
+ INFO("MTK System Off\n");
+ wfi();
+ ERROR("MTK System Off: operation not handled.\n");
+ panic();
+}
+
+static void __dead2 plat_system_reset(void)
+{
+ /* Write the System Configuration Control Register */
+ INFO("MTK System Reset\n");
+
+ mmio_clrbits_32(MTK_WDT_BASE,
+ (MTK_WDT_MODE_DUAL_MODE | MTK_WDT_MODE_IRQ));
+ mmio_setbits_32(MTK_WDT_BASE, (MTK_WDT_MODE_KEY | MTK_WDT_MODE_EXTEN));
+ mmio_setbits_32(MTK_WDT_SWRST, MTK_WDT_SWRST_KEY);
+
+ wfi();
+ ERROR("MTK System Reset: operation not handled.\n");
+ panic();
+}
+
+/*******************************************************************************
+ * Export the platform handlers to enable psci to invoke them
+ ******************************************************************************/
+static const plat_pm_ops_t plat_plat_pm_ops = {
+ .affinst_standby = plat_affinst_standby,
+ .affinst_on = plat_affinst_on,
+ .affinst_off = plat_affinst_off,
+ .affinst_suspend = plat_affinst_suspend,
+ .affinst_on_finish = plat_affinst_on_finish,
+ .affinst_suspend_finish = plat_affinst_suspend_finish,
+ .system_off = plat_system_off,
+ .system_reset = plat_system_reset,
+ .get_sys_suspend_power_state = plat_get_sys_suspend_power_state,
+};
+
+/*******************************************************************************
+ * Export the platform specific power ops & initialize the mtk_platform power
+ * controller
+ ******************************************************************************/
+int platform_setup_pm(const plat_pm_ops_t **plat_ops)
+{
+ *plat_ops = &plat_plat_pm_ops;
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <arch.h>
+#include <arch_helpers.h>
+#include <assert.h>
+#include <console.h>
+#include <debug.h>
+#include <mmio.h>
+#include <mt_cpuxgpt.h>
+#include <mtk_sip_svc.h>
+#include <plat_private.h>
+#include <platform.h>
+#include <platform_def.h>
+#include <mtk_plat_common.h>
+#include <runtime_svc.h>
+#include <xlat_tables.h>
+
+/*******************************************************************************
+ * SIP top level handler for servicing SMCs.
+ ******************************************************************************/
+uint64_t mediatek_plat_sip_handler(uint32_t smc_fid,
+ uint64_t x1,
+ uint64_t x2,
+ uint64_t x3,
+ uint64_t x4,
+ void *cookie,
+ void *handle,
+ uint64_t flags)
+{
+ uint64_t rc = 0;
+ uint32_t ns;
+
+ /* Determine which security state this SMC originated from */
+ ns = is_caller_non_secure(flags);
+
+ VERBOSE("sip_smc_handler\n");
+ VERBOSE("id=0x%x\n", smc_fid);
+ VERBOSE("x1=0x%lx, x2=0x%lx, x3=0x%lx, x4=0x%lx\n", x1, x2, x3, x4);
+
+ if (!ns) {
+ /* SiP SMC service secure world's call */
+ switch (smc_fid) {
+ default:
+ rc = SMC_UNK;
+ }
+ } else {
+ /* SiP SMC service normal world's call */
+ switch (smc_fid) {
+ default:
+ rc = SMC_UNK;
+ }
+ }
+
+ if (rc == SMC_UNK) {
+ console_init(gteearg.atf_log_port,
+ UART_CLOCK, UART_BAUDRATE);
+ ERROR("%s: unhandled NS(%d) SMC (0x%x)\n",
+ __func__, ns, smc_fid);
+ console_uninit();
+ }
+ SMC_RET1(handle, rc);
+}
--- /dev/null
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <platform_def.h>
+#include <psci.h>
+
+unsigned int plat_get_aff_count(unsigned int aff_lvl, unsigned long mpidr)
+{
+ /* Report 1 (absent) instance at levels higher that the cluster level */
+ if (aff_lvl > MPIDR_AFFLVL1)
+ return PLATFORM_SYSTEM_COUNT;
+
+ if (aff_lvl == MPIDR_AFFLVL1)
+ return PLATFORM_CLUSTER_COUNT;
+
+ return mpidr & 0x100 ? PLATFORM_CLUSTER1_CORE_COUNT :
+ PLATFORM_CLUSTER0_CORE_COUNT;
+}
+
+unsigned int plat_get_aff_state(unsigned int aff_lvl, unsigned long mpidr)
+{
+ return aff_lvl <= MPIDR_AFFLVL2 ? PSCI_AFF_PRESENT : PSCI_AFF_ABSENT;
+}
+
+int mt_setup_topology(void)
+{
+ /* [TODO] Make topology configurable via SCC */
+ return 0;
+}
--- /dev/null
+#
+# Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# Neither the name of ARM nor the names of its contributors may be used
+# to endorse or promote products derived from this software without specific
+# prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+
+MTK_PLAT := plat/mediatek
+MTK_PLAT_SOC := ${MTK_PLAT}/${PLAT}
+
+# Add OEM customized codes
+OEMS := true
+MTK_SIP_KERNEL_BOOT_ENABLE := 1
+
+
+ifneq (${OEMS},none)
+ OEMS_INCLUDES := -I${MTK_PLAT}/common/custom/
+ OEMS_SOURCES := ${MTK_PLAT}/common/custom/oem_svc.c
+endif
+
+PLAT_INCLUDES := -I${MTK_PLAT}/common/ \
+ -I${MTK_PLAT}/common/drivers/uart \
+ -I${MTK_PLAT_SOC}/ \
+ -I${MTK_PLAT_SOC}/drivers/timer/ \
+ -I${MTK_PLAT_SOC}/include/ \
+ -Iinclude/plat/arm/common/ \
+ -Iinclude/common/tbbr/ \
+ ${OEMS_INCLUDES}
+
+PLAT_BL_COMMON_SOURCES := lib/aarch64/xlat_tables.c \
+ plat/common/aarch64/plat_common.c \
+ plat/common/plat_gic.c
+
+BL31_SOURCES += drivers/arm/cci/cci.c \
+ drivers/delay_timer/generic_delay_timer.c \
+ drivers/arm/gic/common/gic_common.c \
+ drivers/arm/gic/v2/gicv2_main.c \
+ drivers/arm/gic/v2/gicv2_helpers.c \
+ plat/common/plat_gicv2.c \
+ drivers/console/console.S \
+ drivers/delay_timer/delay_timer.c \
+ lib/cpus/aarch64/cortex_a53.S \
+ plat/common/aarch64/platform_mp_stack.S \
+ ${MTK_PLAT_SOC}/bl31_plat_setup.c \
+ ${MTK_PLAT_SOC}/plat_mt_gic.c \
+ ${MTK_PLAT}/common/mtk_sip_svc.c \
+ ${MTK_PLAT}/common/mtk_plat_common.c \
+ ${MTK_PLAT}/common/drivers/uart/8250_console.S \
+ ${MTK_PLAT_SOC}/aarch64/plat_helpers.S \
+ ${MTK_PLAT_SOC}/drivers/timer/mt_cpuxgpt.c \
+ ${MTK_PLAT_SOC}/plat_sip_svc.c \
+ ${MTK_PLAT_SOC}/plat_delay_timer.c \
+ ${MTK_PLAT_SOC}/plat_pm.c \
+ ${MTK_PLAT_SOC}/plat_topology.c \
+ ${MTK_PLAT_SOC}/power_tracer.c \
+ ${MTK_PLAT_SOC}/scu.c \
+ ${OEMS_SOURCES}
+
+# Flag used by the MTK_platform port to determine the version of ARM GIC
+# architecture to use for interrupt management in EL3.
+ARM_GIC_ARCH := 2
+$(eval $(call add_define,ARM_GIC_ARCH))
+
+# Enable workarounds for selected Cortex-A53 erratas.
+ERRATA_A53_826319 := 1
+ERRATA_A53_836870 := 1
+
+# indicate the reset vector address can be programmed
+PROGRAMMABLE_RESET_ADDRESS := 1
+
+$(eval $(call add_define,MTK_SIP_KERNEL_BOOT_ENABLE))
+
--- /dev/null
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <debug.h>
+#include <power_tracer.h>
+
+#define trace_log(...) INFO("psci: " __VA_ARGS__)
+
+void trace_power_flow(unsigned long mpidr, unsigned char mode)
+{
+ switch (mode) {
+ case CPU_UP:
+ trace_log("core %ld:%ld ON\n",
+ (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS,
+ (mpidr & MPIDR_CPU_MASK));
+ break;
+ case CPU_DOWN:
+ trace_log("core %ld:%ld OFF\n",
+ (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS,
+ (mpidr & MPIDR_CPU_MASK));
+ break;
+ case CPU_SUSPEND:
+ trace_log("core %ld:%ld SUSPEND\n",
+ (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS,
+ (mpidr & MPIDR_CPU_MASK));
+ break;
+ case CLUSTER_UP:
+ trace_log("cluster %ld ON\n",
+ (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS);
+ break;
+ case CLUSTER_DOWN:
+ trace_log("cluster %ld OFF\n",
+ (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS);
+ break;
+ case CLUSTER_SUSPEND:
+ trace_log("cluster %ld SUSPEND\n",
+ (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS);
+ break;
+ default:
+ trace_log("unknown power mode\n");
+ break;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <arch.h>
+#include <mcucfg.h>
+#include <mmio.h>
+
+void disable_scu(unsigned long mpidr)
+{
+ if (mpidr & MPIDR_CLUSTER_MASK)
+ mmio_setbits_32((uintptr_t)&mt6795_mcucfg->mp1_miscdbg,
+ MP1_ACINACTM);
+ else
+ mmio_setbits_32((uintptr_t)&mt6795_mcucfg->mp0_axi_config,
+ MP0_ACINACTM);
+}
+
+void enable_scu(unsigned long mpidr)
+{
+ if (mpidr & MPIDR_CLUSTER_MASK)
+ mmio_clrbits_32((uintptr_t)&mt6795_mcucfg->mp1_miscdbg,
+ MP1_ACINACTM);
+ else
+ mmio_clrbits_32((uintptr_t)&mt6795_mcucfg->mp0_axi_config,
+ MP0_ACINACTM);
+}
+++ /dev/null
-/*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#include <asm_macros.S>
-#include <uart8250.h>
-
- .globl console_core_init
- .globl console_core_putc
- .globl console_core_getc
-
- /* -----------------------------------------------
- * int console_core_init(unsigned long base_addr,
- * unsigned int uart_clk, unsigned int baud_rate)
- * Function to initialize the console without a
- * C Runtime to print debug information. This
- * function will be accessed by console_init and
- * crash reporting.
- * In: x0 - console base address
- * w1 - Uart clock in Hz
- * w2 - Baud rate
- * Out: return 1 on success else 0 on error
- * Clobber list : x1, x2, x3
- * -----------------------------------------------
- */
-func console_core_init
- /* Check the input base address */
- cbz x0, core_init_fail
- /* Check baud rate and uart clock for sanity */
- cbz w1, core_init_fail
- cbz w2, core_init_fail
-
- /* Disable interrupt */
- str wzr, [x0, #UART_IER]
-
- /* Force DTR and RTS to high */
- mov w3, #(UART_MCR_DTR | UART_MCR_RTS)
- str w3, [x0, #UART_MCR]
-
- /* Check high speed */
- movz w3, #:abs_g1:115200
- movk w3, #:abs_g0_nc:115200
- cmp w2, w3
- b.hi 1f
-
- /* Non high speed */
- lsl w2, w2, #4
- mov w3, wzr
- b 2f
-
- /* High speed */
-1: lsl w2, w2, #2
- mov w3, #2
-
- /* Set high speed UART register */
-2: str w3, [x0, #UART_HIGHSPEED]
-
- /* Calculate divisor */
- udiv w3, w1, w2 /* divisor = uartclk / (quot * baudrate) */
- msub w1, w3, w2, w1 /* remainder = uartclk % (quot * baudrate) */
- lsr w2, w2, #1
- cmp w1, w2
- cinc w3, w3, hs
-
- /* Set line configuration, access divisor latches */
- mov w1, #(UART_LCR_DLAB | UART_LCR_WLS_8)
- str w1, [x0, #UART_LCR]
-
- /* Set the divisor */
- and w1, w3, #0xff
- str w1, [x0, #UART_DLL]
- lsr w1, w3, #8
- and w1, w1, #0xff
- str w1, [x0, #UART_DLH]
-
- /* Hide the divisor latches */
- mov w1, #UART_LCR_WLS_8
- str w1, [x0, #UART_LCR]
-
- /* Enable FIFOs, and clear receive and transmit */
- mov w1, #(UART_FCR_FIFO_EN | UART_FCR_CLEAR_RCVR | \
- UART_FCR_CLEAR_XMIT)
- str w1, [x0, #UART_FCR]
-
- mov w0, #1
- ret
-core_init_fail:
- mov w0, wzr
- ret
-endfunc console_core_init
-
- /* --------------------------------------------------------
- * int console_core_putc(int c, unsigned long base_addr)
- * Function to output a character over the console. It
- * returns the character printed on success or -1 on error.
- * In : w0 - character to be printed
- * x1 - console base address
- * Out : return -1 on error else return character.
- * Clobber list : x2
- * --------------------------------------------------------
- */
-func console_core_putc
- /* Check the input parameter */
- cbz x1, putc_error
- /* Prepend '\r' to '\n' */
- cmp w0, #0xA
- b.ne 2f
-
- /* Check if the transmit FIFO is full */
-1: ldr w2, [x1, #UART_LSR]
- and w2, w2, #UART_LSR_THRE
- cbz w2, 1b
- mov w2, #0xD
- str w2, [x1, #UART_THR]
-
- /* Check if the transmit FIFO is full */
-2: ldr w2, [x1, #UART_LSR]
- and w2, w2, #UART_LSR_THRE
- cbz w2, 2b
- str w0, [x1, #UART_THR]
- ret
-putc_error:
- mov w0, #-1
- ret
-endfunc console_core_putc
-
- /* ---------------------------------------------
- * int console_core_getc(unsigned long base_addr)
- * Function to get a character from the console.
- * It returns the character grabbed on success
- * or -1 on error.
- * In : x0 - console base address
- * Clobber list : x0, x1
- * ---------------------------------------------
- */
-func console_core_getc
- cbz x0, getc_error
-
- /* Check if the receive FIFO is empty */
-1: ldr w1, [x0, #UART_LSR]
- tbz w1, #UART_LSR_DR, 1b
- ldr w0, [x0, #UART_RBR]
- ret
-getc_error:
- mov w0, #-1
- ret
-endfunc console_core_getc
+++ /dev/null
-/*
- * Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of ARM nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific
- * prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef __UART8250_H__
-#define __UART8250_H__
-
-/* UART register */
-#define UART_RBR 0x00 /* Receive buffer register */
-#define UART_DLL 0x00 /* Divisor latch lsb */
-#define UART_THR 0x00 /* Transmit holding register */
-#define UART_DLH 0x04 /* Divisor latch msb */
-#define UART_IER 0x04 /* Interrupt enable register */
-#define UART_FCR 0x08 /* FIFO control register */
-#define UART_LCR 0x0c /* Line control register */
-#define UART_MCR 0x10 /* Modem control register */
-#define UART_LSR 0x14 /* Line status register */
-#define UART_HIGHSPEED 0x24 /* High speed UART */
-
-/* FCR */
-#define UART_FCR_FIFO_EN 0x01 /* enable FIFO */
-#define UART_FCR_CLEAR_RCVR 0x02 /* clear the RCVR FIFO */
-#define UART_FCR_CLEAR_XMIT 0x04 /* clear the XMIT FIFO */
-
-/* LCR */
-#define UART_LCR_WLS_8 0x03 /* 8 bit character length */
-#define UART_LCR_DLAB 0x80 /* divisor latch access bit */
-
-/* MCR */
-#define UART_MCR_DTR 0x01
-#define UART_MCR_RTS 0x02
-
-/* LSR */
-#define UART_LSR_DR 0x01 /* Data ready */
-#define UART_LSR_THRE 0x20 /* Xmit holding register empty */
-
-#endif /* __UART8250_H__ */
MTK_PLAT_SOC := ${MTK_PLAT}/${PLAT}
PLAT_INCLUDES := -I${MTK_PLAT}/common/ \
+ -I${MTK_PLAT}/common/drivers/uart/ \
-I${MTK_PLAT_SOC}/drivers/crypt/ \
-I${MTK_PLAT_SOC}/drivers/mtcmos/ \
-I${MTK_PLAT_SOC}/drivers/pmic/ \
-I${MTK_PLAT_SOC}/drivers/rtc/ \
-I${MTK_PLAT_SOC}/drivers/spm/ \
-I${MTK_PLAT_SOC}/drivers/timer/ \
- -I${MTK_PLAT_SOC}/drivers/uart/ \
-I${MTK_PLAT_SOC}/include/
PLAT_BL_COMMON_SOURCES := lib/xlat_tables/xlat_tables_common.c \
lib/cpus/aarch64/cortex_a57.S \
lib/cpus/aarch64/cortex_a72.S \
plat/common/aarch64/platform_mp_stack.S \
+ ${MTK_PLAT}/common/drivers/uart/8250_console.S \
${MTK_PLAT}/common/mtk_plat_common.c \
${MTK_PLAT}/common/mtk_sip_svc.c \
${MTK_PLAT_SOC}/aarch64/plat_helpers.S \
${MTK_PLAT_SOC}/drivers/spm/spm_mcdi.c \
${MTK_PLAT_SOC}/drivers/spm/spm_suspend.c \
${MTK_PLAT_SOC}/drivers/timer/mt_cpuxgpt.c \
- ${MTK_PLAT_SOC}/drivers/uart/8250_console.S \
${MTK_PLAT_SOC}/plat_mt_gic.c \
${MTK_PLAT_SOC}/plat_pm.c \
${MTK_PLAT_SOC}/plat_sip_calls.c \