#
-# Copyright (C) 2009 OpenWrt.org
+# Copyright (C) 2009-2010 OpenWrt.org
#
# This is free software, licensed under the GNU General Public License v2.
# See /LICENSE for more information.
FEATURES:=squashfs broken
MAINTAINER:=Imre Kaloz <kaloz@openwrt.org>
-LINUX_VERSION:=2.6.25.20
+LINUX_VERSION:=2.6.31.14
include $(INCLUDE_DIR)/target.mk
+CONFIG_MTD_PHYSMAP=y
+# CONFIG_MTD_PHYSMAP_START=0xff800000
+# CONFIG_MTD_PHYSMAP_COMPAT=y
+# CONFIG_MTD_PHYSMAP_LEN=0x400000
+# CONFIG_MTD_PHYSMAP_BANKWIDTH=2
+CONFIG_DMADEVICES=y
+CONFIG_CFDMA=y
+CONFIG_FEC_548x_SHARED_PHY=y
+CONFIG_FPU=y
+# CONFIG_M5474LITE is not set
+CONFIG_M5484LITE=y
+# CONFIG_M5441X is not set
+# CONFIG_VDSO is not set
+CONFIG_BROADCOM5222_PHY=y
# CONFIG_8139TOO is not set
CONFIG_ADVANCED=y
# CONFIG_AMIGA is not set
# CONFIG_M547X is not set
# CONFIG_M5485AFE is not set
# CONFIG_M5485BFE is not set
-CONFIG_M5485CFE=y
+# CONFIG_M5485CFE is not set
# CONFIG_M5485DFE is not set
# CONFIG_M5485EFE is not set
# CONFIG_M5485FFE is not set
# CONFIG_SERIAL_COLDFIRE_IRDA is not set
CONFIG_SERIAL_COLDFIRE=y
# CONFIG_SERIAL_CONSOLE is not set
-# CONFIG_SERIAL_MCF is not set
+CONFIG_SERIAL_MCF=y
+# CONFIG_SERIAL_COLDFIRE_EDMA is not set
+CONFIG_SERIAL_MCF_BAUDRATE=115200
+CONFIG_SERIAL_MCF_CONSOLE=y
CONFIG_SINGLE_MEMORY_CHUNK=y
CONFIG_SLABINFO=y
# CONFIG_SUN3 is not set
--- /dev/null
+#
+# arch/m68k/boot/Makefile
+#
+# Based on arch/sh/boot/Makefile by Stuart Menefy
+#
+# Copyright (c) 2008 Freescale Semiconductor, Inc. All Rights Reserved.
+# by Kurt Mahan <kmahan@freescale.com>
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+
+MKIMAGE := $(srctree)/scripts/mkuboot.sh
+
+#
+# Assign safe dummy values if these variables are not defined,
+# in order to suppress error message.
+#
+CONFIG_SDRAM_BASE ?= 0x40000000
+CONFIG_IMG_START ?= 0x00020000
+
+export CONFIG_SDRAM_BASE CONFIG_IMG_START
+
+targets := zImage zImage.srec vmlinux.srec uImage uImage.srec
+
+$(obj)/zImage: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,gzip)
+ @echo ' Image $@ is ready'
+
+OBJCOPYFLAGS_zImage.srec := -I binary -O srec
+$(obj)/zImage.srec: $(obj)/zImage
+ $(call if_changed,objcopy)
+
+KERNEL_LOAD := $(shell /bin/bash -c 'printf "0x%08x" \
+ $$[$(CONFIG_SDRAM_BASE) + \
+ $(CONFIG_IMG_START)]')
+
+KERNEL_ENTRY := $(shell /bin/bash -c 'printf "0x%08x" \
+ $$[$(CONFIG_SDRAM_BASE) + \
+ $(CONFIG_IMG_START)]')
+
+quiet_cmd_uimage = UIMAGE $@
+ cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A m68k -O linux -T kernel \
+ -C gzip -a $(KERNEL_LOAD) -e $(KERNEL_ENTRY) \
+ -n 'Linux-$(KERNELRELEASE)' -d $< $@
+
+$(obj)/uImage: $(obj)/vmlinux.bin.gz FORCE
+ $(call if_changed,uimage)
+ @echo ' Image $@ is ready'
+
+$(obj)/vmlinux.bin: vmlinux FORCE
+ $(call if_changed,objcopy)
+
+$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,gzip)
+
+OBJCOPYFLAGS_vmlinux.srec := -I binary -O srec
+$(obj)/vmlinux.srec: $(obj)/vmlinux.bin
+ $(call if_changed,objcopy)
+
+OBJCOPYFLAGS_uImage.srec := -I binary -O srec
+$(obj)/uImage.srec: $(obj)/uImage
+ $(call if_changed,objcopy)
+
+clean-files += uImage uImage.srec \
+ zImage zImage.srec \
+ vmlinux.srec vmlinux.bin vmlinux.bin.gz
--- /dev/null
+#
+# Makefile for Linux arch/m68k/coldfire source directory
+#
+
+obj-y += common/
+obj-$(CONFIG_VDSO) += vdso/
+
+obj-$(CONFIG_M5445X) += m5445x/
+obj-$(CONFIG_M547X_8X) += m547x/
--- /dev/null
+#
+# Makefile for Linux arch/m68k/coldfire/common source directory
+#
+
+obj-y:= entry.o cache.o signal.o muldi3.o traps.o ints.o time.o
+extra-y:= head.o
+
--- /dev/null
+/*
+ * linux/arch/m68k/coldfire/cache.c
+ *
+ * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Matt Waddel Matt.Waddel@freescale.com
+ * Kurt Mahan kmahan@freescale.com
+ * Jason Jin Jason.Jin@freescale.com
+ * Shrek Wu B16972@freescale.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/interrupt.h>
+#include <asm/cfcache.h>
+#include <asm/coldfire.h>
+#include <asm/system.h>
+
+/* Cache Control Reg shadow reg */
+unsigned long shadow_cacr;
+
+/**
+ * cacr_set - Set the Cache Control Register
+ * @x Value to set
+ */
+void cacr_set(unsigned long x)
+{
+ shadow_cacr = x;
+
+ __asm__ __volatile__ ("movec %0, %%cacr"
+ : /* no outputs */
+ : "r" (shadow_cacr));
+}
+
+/**
+ * cacr_get - Get the current value of the Cache Control Register
+ *
+ * @return CACR value
+ */
+unsigned long cacr_get(void)
+{
+ return shadow_cacr;
+}
--- /dev/null
+/*
+ * arch/m68k/coldfire/entry.S
+ *
+ * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Matt Waddel Matt.Waddel@freescale.com
+ * Kurt Mahan kmahan@freescale.com
+ * Jason Jin Jason.Jin@freescale.com
+ * Shrek Wu B16972@freescale.com
+ *
+ * Based on:
+ *
+ * arch/m68knommu/platform/5307/entry.S &
+ * arch/m68k/kernel/entry.S
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file README.legal in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/cf_entry.h>
+#include <asm/errno.h>
+#include <asm/setup.h>
+#include <asm/segment.h>
+#include <asm/traps.h>
+#include <asm/unistd.h>
+
+/*
+ * TASK_INFO:
+ *
+ * - TINFO_PREEMPT (struct thread_info / preempt_count)
+ * Used to keep track of preemptability
+ * - TINFO_FLAGS (struct thread_info / flags - include/asm-m68k/thread_info.h)
+ * Various bit flags that are checked for scheduling/tracing
+ * Bits 0-7 are checked every exception exit
+ * 8-15 are checked every syscall exit
+ *
+ * TIF_SIGPENDING 6
+ * TIF_NEED_RESCHED 7
+ * TIF_DELAYED_TRACE 14
+ * TIF_SYSCALL_TRACE 15
+ * TIF_MEMDIE 16 (never checked here)
+ */
+
+.bss
+
+sw_ksp:
+.long 0
+
+sw_usp:
+.long 0
+
+.text
+
+.globl system_call
+.globl buserr
+.globl trap
+.globl resume
+.globl ret_from_exception
+.globl ret_from_signal
+.globl sys_call_table
+.globl ret_from_interrupt
+.globl inthandler
+
+ENTRY(buserr)
+#ifdef CONFIG_COLDFIRE_FOO
+ movew #0x2700,%sr /* lock interrupts */
+#endif
+ SAVE_ALL_INT
+#ifdef CONFIG_COLDFIRE_FOO
+ movew PT_SR(%sp),%d3 /* get original %sr */
+ oril #0x2000,%d3 /* set supervisor mode in it */
+ movew %d3,%sr /* recover irq state */
+#endif
+ GET_CURRENT(%d0)
+ movel %sp,%sp@- /* stack frame pointer argument */
+ jsr buserr_c
+ addql #4,%sp
+ jra .Lret_from_exception
+
+ENTRY(trap)
+ SAVE_ALL_INT
+ GET_CURRENT(%d0)
+ movel %sp,%sp@- /* stack frame pointer argument */
+ jsr trap_c
+ addql #4,%sp
+ jra .Lret_from_exception
+
+ /* After a fork we jump here directly from resume,
+ %d1 contains the previous task schedule_tail */
+ENTRY(ret_from_fork)
+ movel %d1,%sp@-
+ jsr schedule_tail
+ addql #4,%sp
+ jra .Lret_from_exception
+
+do_trace_entry:
+ movel #-ENOSYS,%d1 /* needed for strace */
+ movel %d1,%sp@(PT_D0)
+ subql #4,%sp
+ SAVE_SWITCH_STACK
+ jbsr syscall_trace
+ RESTORE_SWITCH_STACK
+ addql #4,%sp
+ movel %sp@(PT_ORIG_D0),%d0
+ cmpl #NR_syscalls,%d0
+ jcs syscall
+badsys:
+ movel #-ENOSYS,%d1
+ movel %d1,%sp@(PT_D0)
+ jra ret_from_exception
+
+do_trace_exit:
+ subql #4,%sp
+ SAVE_SWITCH_STACK
+ jbsr syscall_trace
+ RESTORE_SWITCH_STACK
+ addql #4,%sp
+ jra .Lret_from_exception
+
+ENTRY(ret_from_signal)
+ RESTORE_SWITCH_STACK
+ addql #4,%sp
+ jra .Lret_from_exception
+
+ENTRY(system_call)
+ SAVE_ALL_SYS
+
+ GET_CURRENT(%d1)
+ /* save top of frame */
+ movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
+
+ /* syscall trace */
+ tstb %curptr@(TASK_INFO+TINFO_FLAGS+2)
+ jmi do_trace_entry /* SYSCALL_TRACE is set */
+ cmpl #NR_syscalls,%d0
+ jcc badsys
+syscall:
+ movel #sys_call_table,%a0
+ asll #2,%d0
+ addl %d0,%a0
+ movel %a0@,%a0
+ jsr %a0@
+ movel %d0,%sp@(PT_D0) /* save the return value */
+ret_from_syscall:
+ movew %curptr@(TASK_INFO+TINFO_FLAGS+2),%d0
+ jne syscall_exit_work /* flags set so process */
+1: RESTORE_ALL
+
+syscall_exit_work:
+ btst #5,%sp@(PT_SR) /* check if returning to kernel */
+ bnes 1b /* if so, skip resched, signals */
+
+ btstl #15,%d0 /* check if SYSCALL_TRACE */
+ jne do_trace_exit
+ btstl #14,%d0 /* check if DELAYED_TRACE */
+ jne do_delayed_trace
+ btstl #6,%d0 /* check if SIGPENDING */
+ jne do_signal_return
+ pea resume_userspace
+ jra schedule
+
+ENTRY(ret_from_exception)
+.Lret_from_exception:
+ btst #5,%sp@(PT_SR) /* check if returning to kernel */
+ bnes 1f /* if so, skip resched, signals */
+ movel %d0,%sp@- /* Only allow interrupts when we are */
+ move %sr,%d0 /* last one on the kernel stack, */
+ andl #ALLOWINT,%d0 /* otherwise stack overflow can occur */
+ move %d0,%sr /* during heavy interrupt load. */
+ movel %sp@+,%d0
+
+resume_userspace:
+ moveb %curptr@(TASK_INFO+TINFO_FLAGS+3),%d0
+ jne exit_work /* SIGPENDING and/or NEED_RESCHED set */
+1: RESTORE_ALL
+
+exit_work:
+ /* save top of frame */
+ movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
+ btstl #6,%d0 /* check for SIGPENDING in flags */
+ jne do_signal_return
+ pea resume_userspace
+ jra schedule
+
+do_signal_return:
+ subql #4,%sp /* dummy return address */
+ SAVE_SWITCH_STACK
+ pea %sp@(SWITCH_STACK_SIZE)
+ clrl %sp@-
+ bsrl do_signal
+ addql #8,%sp
+ RESTORE_SWITCH_STACK
+ addql #4,%sp
+ jbra resume_userspace
+
+do_delayed_trace:
+ bclr #7,%sp@(PT_SR) /* clear trace bit in SR */
+ pea 1 /* send SIGTRAP */
+ movel %curptr,%sp@-
+ pea LSIGTRAP
+ jbsr send_sig
+ addql #8,%sp
+ addql #4,%sp
+ jbra resume_userspace
+
+/*
+ * This is the interrupt handler (for all hardware interrupt
+ * sources). It figures out the vector number and calls the appropriate
+ * interrupt service routine directly.
+ */
+ENTRY(inthandler)
+ SAVE_ALL_INT
+ GET_CURRENT(%d0)
+ movel %curptr@(TASK_INFO+TINFO_PREEMPT),%d0
+ addil #0x10000,%d0
+ movel %d0,%curptr@(TASK_INFO+TINFO_PREEMPT)
+ /* put exception # in d0 */
+ movel %sp@(PT_VECTOR),%d0
+ swap %d0 /* extract bits 25:18 */
+ lsrl #2,%d0
+ andl #0x0ff,%d0
+
+ movel %sp,%sp@-
+ movel %d0,%sp@- /* put vector # on stack */
+auto_irqhandler_fixup = . + 2
+ jbsr process_int /* process the IRQ */
+ addql #8,%sp /* pop parameters off stack */
+
+ret_from_interrupt:
+
+ movel %curptr@(TASK_INFO+TINFO_PREEMPT),%d0
+ subil #0x10000,%d0
+ movel %d0,%curptr@(TASK_INFO+TINFO_PREEMPT)
+ jeq ret_from_last_interrupt
+2: RESTORE_ALL
+
+ ALIGN
+ret_from_last_interrupt:
+ moveb %sp@(PT_SR),%d0
+ andl #(~ALLOWINT>>8)&0xff,%d0
+ jne 2b
+
+ /* check if we need to do software interrupts */
+ tstl irq_stat+CPUSTAT_SOFTIRQ_PENDING
+ jeq .Lret_from_exception
+ pea ret_from_exception
+ jra do_softirq
+
+ENTRY(user_inthandler)
+ SAVE_ALL_INT
+ GET_CURRENT(%d0)
+ movel %curptr@(TASK_INFO+TINFO_PREEMPT),%d0
+ addil #0x10000,%d0
+ movel %d0,%curptr@(TASK_INFO+TINFO_PREEMPT)
+ /* put exception # in d0 */
+ movel %sp@(PT_VECTOR),%d0
+user_irqvec_fixup = . + 2
+ swap %d0 /* extract bits 25:18 */
+ lsrl #2,%d0
+ andl #0x0ff,%d0
+
+ movel %sp,%sp@-
+ movel %d0,%sp@- /* put vector # on stack */
+user_irqhandler_fixup = . + 2
+ jbsr process_int /* process the IRQ */
+ addql #8,%sp /* pop parameters off stack */
+
+ movel %curptr@(TASK_INFO+TINFO_PREEMPT),%d0
+ subil #0x10000,%d0
+ movel %d0,%curptr@(TASK_INFO+TINFO_PREEMPT)
+ jeq ret_from_last_interrupt
+ RESTORE_ALL
+
+/* Handler for uninitialized and spurious interrupts */
+
+ENTRY(bad_inthandler)
+ SAVE_ALL_INT
+ GET_CURRENT(%d0)
+ movel %curptr@(TASK_INFO+TINFO_PREEMPT),%d0
+ addil #0x10000,%d0
+ movel %d0,%curptr@(TASK_INFO+TINFO_PREEMPT)
+
+ movel %sp,%sp@-
+ jsr handle_badint
+ addql #4,%sp
+
+ movel %curptr@(TASK_INFO+TINFO_PREEMPT),%d0
+ subil #0x10000,%d0
+ movel %d0,%curptr@(TASK_INFO+TINFO_PREEMPT)
+ jeq ret_from_last_interrupt
+ RESTORE_ALL
+
+ENTRY(sys_fork)
+ SAVE_SWITCH_STACK
+ pea %sp@(SWITCH_STACK_SIZE)
+ jbsr m68k_fork
+ addql #4,%sp
+ RESTORE_SWITCH_STACK
+ rts
+
+ENTRY(sys_clone)
+ SAVE_SWITCH_STACK
+ pea %sp@(SWITCH_STACK_SIZE)
+ jbsr m68k_clone
+ addql #4,%sp
+ RESTORE_SWITCH_STACK
+ rts
+
+ENTRY(sys_vfork)
+ SAVE_SWITCH_STACK
+ pea %sp@(SWITCH_STACK_SIZE)
+ jbsr m68k_vfork
+ addql #4,%sp
+ RESTORE_SWITCH_STACK
+ rts
+
+ENTRY(sys_sigsuspend)
+ SAVE_SWITCH_STACK
+ pea %sp@(SWITCH_STACK_SIZE)
+ jbsr do_sigsuspend
+ addql #4,%sp
+ RESTORE_SWITCH_STACK
+ rts
+
+ENTRY(sys_rt_sigsuspend)
+ SAVE_SWITCH_STACK
+ pea %sp@(SWITCH_STACK_SIZE)
+ jbsr do_rt_sigsuspend
+ addql #4,%sp
+ RESTORE_SWITCH_STACK
+ rts
+
+ENTRY(sys_sigreturn)
+ SAVE_SWITCH_STACK
+ jbsr do_sigreturn
+ RESTORE_SWITCH_STACK
+ rts
+
+ENTRY(sys_rt_sigreturn)
+ SAVE_SWITCH_STACK
+ jbsr do_rt_sigreturn
+ RESTORE_SWITCH_STACK
+ rts
+
+resume:
+ /*
+ * Beware - when entering resume, prev (the current task) is
+ * in a0, next (the new task) is in a1,so don't change these
+ * registers until their contents are no longer needed.
+ */
+
+ /* save sr */
+ movew %sr,%d0
+ movew %d0,%a0@(TASK_THREAD+THREAD_SR)
+
+ /* save usp */
+ /* Save USP via %a1 (which is saved/restored from %d0) */
+ movel %a1,%d0
+ movel %usp,%a1
+ movel %a1,%a0@(TASK_THREAD+THREAD_USP)
+ movel %d0,%a1
+
+ /* save non-scratch registers on stack */
+ SAVE_SWITCH_STACK
+
+ /* save current kernel stack pointer */
+ movel %sp,%a0@(TASK_THREAD+THREAD_KSP)
+#ifdef CONFIG_FPU
+ /* save floating point context */
+ fsave %a0@(TASK_THREAD+THREAD_FPSTATE)
+
+1: tstb %a0@(TASK_THREAD+THREAD_FPSTATE)
+ jeq 3f
+2:
+ fmovemd %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG)
+3:
+#endif
+ /* Return previous task in %d1 */
+ movel %curptr,%d1
+
+ /* switch to new task (a1 contains new task) */
+ movel %a1,%curptr
+#ifdef CONFIG_FPU
+ /* restore floating point context */
+1: tstb %a1@(TASK_THREAD+THREAD_FPSTATE)
+ jeq 3f
+2:
+ fmovemd %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7
+/* frestore %a1@(TASK_THREAD+THREAD_FPCNTL)*/
+3: frestore %a1@(TASK_THREAD+THREAD_FPSTATE)
+#endif
+ /* restore the kernel stack pointer */
+ movel %a1@(TASK_THREAD+THREAD_KSP),%sp
+
+ /* restore non-scratch registers */
+ RESTORE_SWITCH_STACK
+
+ /* restore user stack pointer */
+ movel %a1@(TASK_THREAD+THREAD_USP),%a0
+ movel %a0,%usp
+
+ /* restore status register */
+ movew %a1@(TASK_THREAD+THREAD_SR),%d0
+ movew %d0,%sr
+
+ rts
+
+.data
+ALIGN
+sys_call_table:
+ .long sys_ni_syscall /* 0 - old "setup()" system call*/
+ .long sys_exit
+ .long sys_fork
+ .long sys_read
+ .long sys_write
+ .long sys_open /* 5 */
+ .long sys_close
+ .long sys_waitpid
+ .long sys_creat
+ .long sys_link
+ .long sys_unlink /* 10 */
+ .long sys_execve
+ .long sys_chdir
+ .long sys_time
+ .long sys_mknod
+ .long sys_chmod /* 15 */
+ .long sys_chown16
+ .long sys_ni_syscall /* old break syscall holder */
+ .long sys_stat
+ .long sys_lseek
+ .long sys_getpid /* 20 */
+ .long sys_mount
+ .long sys_oldumount
+ .long sys_setuid16
+ .long sys_getuid16
+ .long sys_stime /* 25 */
+ .long sys_ptrace
+ .long sys_alarm
+ .long sys_fstat
+ .long sys_pause
+ .long sys_utime /* 30 */
+ .long sys_ni_syscall /* old stty syscall holder */
+ .long sys_ni_syscall /* old gtty syscall holder */
+ .long sys_access
+ .long sys_nice
+ .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
+ .long sys_sync
+ .long sys_kill
+ .long sys_rename
+ .long sys_mkdir
+ .long sys_rmdir /* 40 */
+ .long sys_dup
+ .long sys_pipe
+ .long sys_times
+ .long sys_ni_syscall /* old prof syscall holder */
+ .long sys_brk /* 45 */
+ .long sys_setgid16
+ .long sys_getgid16
+ .long sys_signal
+ .long sys_geteuid16
+ .long sys_getegid16 /* 50 */
+ .long sys_acct
+ .long sys_umount /* recycled never used phys() */
+ .long sys_ni_syscall /* old lock syscall holder */
+ .long sys_ioctl
+ .long sys_fcntl /* 55 */
+ .long sys_ni_syscall /* old mpx syscall holder */
+ .long sys_setpgid
+ .long sys_ni_syscall /* old ulimit syscall holder */
+ .long sys_ni_syscall
+ .long sys_umask /* 60 */
+ .long sys_chroot
+ .long sys_ustat
+ .long sys_dup2
+ .long sys_getppid
+ .long sys_getpgrp /* 65 */
+ .long sys_setsid
+ .long sys_sigaction
+ .long sys_sgetmask
+ .long sys_ssetmask
+ .long sys_setreuid16 /* 70 */
+ .long sys_setregid16
+ .long sys_sigsuspend
+ .long sys_sigpending
+ .long sys_sethostname
+ .long sys_setrlimit /* 75 */
+ .long sys_old_getrlimit
+ .long sys_getrusage
+ .long sys_gettimeofday
+ .long sys_settimeofday
+ .long sys_getgroups16 /* 80 */
+ .long sys_setgroups16
+ .long old_select
+ .long sys_symlink
+ .long sys_lstat
+ .long sys_readlink /* 85 */
+ .long sys_uselib
+ .long sys_swapon
+ .long sys_reboot
+ .long sys_old_readdir
+ .long old_mmap /* 90 */
+ .long sys_munmap
+ .long sys_truncate
+ .long sys_ftruncate
+ .long sys_fchmod
+ .long sys_fchown16 /* 95 */
+ .long sys_getpriority
+ .long sys_setpriority
+ .long sys_ni_syscall /* old profil syscall holder */
+ .long sys_statfs
+ .long sys_fstatfs /* 100 */
+ .long sys_ni_syscall /* ioperm for i386 */
+ .long sys_socketcall
+ .long sys_syslog
+ .long sys_setitimer
+ .long sys_getitimer /* 105 */
+ .long sys_newstat
+ .long sys_newlstat
+ .long sys_newfstat
+ .long sys_ni_syscall
+ .long sys_ni_syscall /* 110 */ /* iopl for i386 */
+ .long sys_vhangup
+ .long sys_ni_syscall /* obsolete idle() syscall */
+ .long sys_ni_syscall /* vm86old for i386 */
+ .long sys_wait4
+ .long sys_swapoff /* 115 */
+ .long sys_sysinfo
+ .long sys_ipc
+ .long sys_fsync
+ .long sys_sigreturn
+ .long sys_clone /* 120 */
+ .long sys_setdomainname
+ .long sys_newuname
+ .long sys_cacheflush /* modify_ldt for i386 */
+ .long sys_adjtimex
+ .long sys_mprotect /* 125 */
+ .long sys_sigprocmask
+ .long sys_ni_syscall /* old "create_module" */
+ .long sys_init_module
+ .long sys_delete_module
+ .long sys_ni_syscall /* 130 - old "get_kernel_syms" */
+ .long sys_quotactl
+ .long sys_getpgid
+ .long sys_fchdir
+ .long sys_bdflush
+ .long sys_sysfs /* 135 */
+ .long sys_personality
+ .long sys_ni_syscall /* for afs_syscall */
+ .long sys_setfsuid16
+ .long sys_setfsgid16
+ .long sys_llseek /* 140 */
+ .long sys_getdents
+ .long sys_select
+ .long sys_flock
+ .long sys_msync
+ .long sys_readv /* 145 */
+ .long sys_writev
+ .long sys_getsid
+ .long sys_fdatasync
+ .long sys_sysctl
+ .long sys_mlock /* 150 */
+ .long sys_munlock
+ .long sys_mlockall
+ .long sys_munlockall
+ .long sys_sched_setparam
+ .long sys_sched_getparam /* 155 */
+ .long sys_sched_setscheduler
+ .long sys_sched_getscheduler
+ .long sys_sched_yield
+ .long sys_sched_get_priority_max
+ .long sys_sched_get_priority_min /* 160 */
+ .long sys_sched_rr_get_interval
+ .long sys_nanosleep
+ .long sys_mremap
+ .long sys_setresuid16
+ .long sys_getresuid16 /* 165 */
+ .long sys_getpagesize
+ .long sys_ni_syscall /* old sys_query_module */
+ .long sys_poll
+ .long sys_nfsservctl
+ .long sys_setresgid16 /* 170 */
+ .long sys_getresgid16
+ .long sys_prctl
+ .long sys_rt_sigreturn
+ .long sys_rt_sigaction
+ .long sys_rt_sigprocmask /* 175 */
+ .long sys_rt_sigpending
+ .long sys_rt_sigtimedwait
+ .long sys_rt_sigqueueinfo
+ .long sys_rt_sigsuspend
+ .long sys_pread64 /* 180 */
+ .long sys_pwrite64
+ .long sys_lchown16;
+ .long sys_getcwd
+ .long sys_capget
+ .long sys_capset /* 185 */
+ .long sys_sigaltstack
+ .long sys_sendfile
+ .long sys_ni_syscall /* streams1 */
+ .long sys_ni_syscall /* streams2 */
+ .long sys_vfork /* 190 */
+ .long sys_getrlimit
+ .long sys_mmap2
+ .long sys_truncate64
+ .long sys_ftruncate64
+ .long sys_stat64 /* 195 */
+ .long sys_lstat64
+ .long sys_fstat64
+ .long sys_chown
+ .long sys_getuid
+ .long sys_getgid /* 200 */
+ .long sys_geteuid
+ .long sys_getegid
+ .long sys_setreuid
+ .long sys_setregid
+ .long sys_getgroups /* 205 */
+ .long sys_setgroups
+ .long sys_fchown
+ .long sys_setresuid
+ .long sys_getresuid
+ .long sys_setresgid /* 210 */
+ .long sys_getresgid
+ .long sys_lchown
+ .long sys_setuid
+ .long sys_setgid
+ .long sys_setfsuid /* 215 */
+ .long sys_setfsgid
+ .long sys_pivot_root
+ .long sys_ni_syscall
+ .long sys_ni_syscall
+ .long sys_getdents64 /* 220 */
+ .long sys_gettid
+ .long sys_tkill
+ .long sys_setxattr
+ .long sys_lsetxattr
+ .long sys_fsetxattr /* 225 */
+ .long sys_getxattr
+ .long sys_lgetxattr
+ .long sys_fgetxattr
+ .long sys_listxattr
+ .long sys_llistxattr /* 230 */
+ .long sys_flistxattr
+ .long sys_removexattr
+ .long sys_lremovexattr
+ .long sys_fremovexattr
+ .long sys_futex /* 235 */
+ .long sys_sendfile64
+ .long sys_mincore
+ .long sys_madvise
+ .long sys_fcntl64
+ .long sys_readahead /* 240 */
+ .long sys_io_setup
+ .long sys_io_destroy
+ .long sys_io_getevents
+ .long sys_io_submit
+ .long sys_io_cancel /* 245 */
+ .long sys_fadvise64
+ .long sys_exit_group
+ .long sys_lookup_dcookie
+ .long sys_epoll_create
+ .long sys_epoll_ctl /* 250 */
+ .long sys_epoll_wait
+ .long sys_remap_file_pages
+ .long sys_set_tid_address
+ .long sys_timer_create
+ .long sys_timer_settime /* 255 */
+ .long sys_timer_gettime
+ .long sys_timer_getoverrun
+ .long sys_timer_delete
+ .long sys_clock_settime
+ .long sys_clock_gettime /* 260 */
+ .long sys_clock_getres
+ .long sys_clock_nanosleep
+ .long sys_statfs64
+ .long sys_fstatfs64
+ .long sys_tgkill /* 265 */
+ .long sys_utimes
+ .long sys_fadvise64_64
+ .long sys_mbind
+ .long sys_get_mempolicy
+ .long sys_set_mempolicy /* 270 */
+ .long sys_mq_open
+ .long sys_mq_unlink
+ .long sys_mq_timedsend
+ .long sys_mq_timedreceive
+ .long sys_mq_notify /* 275 */
+ .long sys_mq_getsetattr
+ .long sys_waitid
+ .long sys_ni_syscall /* for sys_vserver */
+ .long sys_add_key
+ .long sys_request_key /* 280 */
+ .long sys_keyctl
+ .long sys_ioprio_set
+ .long sys_ioprio_get
+ .long sys_inotify_init
+ .long sys_inotify_add_watch /* 285 */
+ .long sys_inotify_rm_watch
+ .long sys_migrate_pages
+ .long sys_openat
+ .long sys_mkdirat
+ .long sys_mknodat /* 290 */
+ .long sys_fchownat
+ .long sys_futimesat
+ .long sys_fstatat64
+ .long sys_unlinkat
+ .long sys_renameat /* 295 */
+ .long sys_linkat
+ .long sys_symlinkat
+ .long sys_readlinkat
+ .long sys_fchmodat
+ .long sys_faccessat /* 300 */
+ .long sys_ni_syscall /* Reserved for pselect6 */
+ .long sys_ni_syscall /* Reserved for ppoll */
+ .long sys_unshare
+ .long sys_set_robust_list
+ .long sys_get_robust_list /* 305 */
+ .long sys_splice
+ .long sys_sync_file_range
+ .long sys_tee
+ .long sys_vmsplice
+ .long sys_move_pages /* 310 */
+ .long sys_sched_setaffinity
+ .long sys_sched_getaffinity
+ .long sys_kexec_load
+ .long sys_getcpu
+ .long sys_epoll_pwait /* 315 */
+ .long sys_utimensat
+ .long sys_signalfd
+ .long sys_timerfd_create
+ .long sys_eventfd
+ .long sys_fallocate /* 320 */
+ .long sys_timerfd_settime
+ .long sys_timerfd_gettime
+ .long sys_signalfd4
+ .long sys_eventfd2
+ .long sys_epoll_create1 /* 325 */
+ .long sys_dup3
+ .long sys_pipe2
+ .long sys_inotify_init1
+ .long sys_ni_syscall /* Reserved */
+ .long sys_ni_syscall /* 330 Reserved */
+ .long sys_ni_syscall /* Reserved */
+ .long sys_ni_syscall /* Reserved */
+ .long sys_read_tp
+ .long sys_write_tp
+ .long sys_atomic_cmpxchg_32 /* 335 */
+ .long sys_atomic_barrier
--- /dev/null
+/*
+ * head.S is the MMU enabled ColdFire specific initial boot code
+ *
+ * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Matt Waddel Matt.Waddel@freescale.com
+ * Kurt Mahan kmahan@freescale.com
+ * Jason Jin Jason.Jin@freescale.com
+ * Shrek Wu B16972@freescale.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Parts of this code came from arch/m68k/kernel/head.S
+ */
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/bootinfo.h>
+#include <asm/setup.h>
+#include <asm/entry.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/coldfire.h>
+#include <asm/cfcache.h>
+
+#define DEBUG
+
+.globl kernel_pg_dir
+.globl availmem
+.globl set_context
+.globl set_fpga
+
+#ifdef DEBUG
+/* When debugging use readable names for labels */
+#ifdef __STDC__
+#define L(name) .head.S.##name
+#else
+#define L(name) .head.S./**/name
+#endif
+#else
+#ifdef __STDC__
+#define L(name) .L##name
+#else
+#define L(name) .L/**/name
+#endif
+#endif
+
+/* The __INITDATA stuff is a no-op when ftrace or kgdb are turned on */
+#ifndef __INITDATA
+#define __INITDATA .data
+#define __FINIT .previous
+#endif
+
+#if CONFIG_SDRAM_BASE != PAGE_OFFSET
+/*
+ * Kernel mapped to virtual ram address.
+ *
+ * M5445x:
+ * Data[0]: 0xF0000000 -> 0xFFFFFFFF System regs
+ * Data[1]: 0xA0000000 -> 0xAFFFFFFF PCI
+ * Code[0]: Not Mapped
+ * Code[1]: Not Mapped
+ *
+ * M547x/M548x
+ * Data[0]: 0xF0000000 -> 0xFFFFFFFF System regs
+ * Data[1]: Not Mapped
+ * Code[0]: Not Mapped
+ * Code[1]: Not Mapped
+ */
+#if defined(CONFIG_M5445X)
+#define ACR0_DEFAULT #0xF00FA048 /* System regs */
+#define ACR1_DEFAULT #0xA00FA048 /* PCI */
+#define ACR2_DEFAULT #0x00000000 /* Not Mapped */
+#define ACR3_DEFAULT #0x00000000 /* Not Mapped */
+#elif defined(CONFIG_M547X_8X)
+#define ACR0_DEFAULT #0xF00FA048 /* System Regs */
+#define ACR1_DEFAULT #0x00000000 /* Not Mapped */
+#define ACR2_DEFAULT #0x00000000 /* Not Mapped */
+#define ACR3_DEFAULT #0x00000000 /* Not Mapped */
+#endif
+
+#else /* CONFIG_SDRAM_BASE = PAGE_OFFSET */
+/*
+ * Kernel mapped to physical ram address.
+ *
+ * M5445x:
+ * Data[0]: 0xF0000000 -> 0xFFFFFFFF System regs
+ * Data[1]: 0x40000000 -> 0x4FFFFFFF SDRAM - uncached
+ * Code[0]: Not Mapped
+ * Code[1]: 0x40000000 -> 0x4FFFFFFF SDRAM - cached
+ *
+ * M547x/M548x
+ * Data[0]: 0xF0000000 -> 0xFFFFFFFF System regs
+ * Data[1]: 0x00000000 -> 0x0FFFFFFF SDRAM - uncached
+ * Code[0]: Not Mapped
+ * Code[1]: 0x00000000 -> 0x0FFFFFFF SDRAM - cached
+ */
+#if defined(CONFIG_M5445X)
+#define ACR0_DEFAULT #0xF00FA048 /* System Regs uncached/precise */
+#define ACR1_DEFAULT #0x400FA028 /* SDRAM cached/copyback */
+#define ACR2_DEFAULT #0x00000000 /* Not mapped */
+#define ACR3_DEFAULT #0x400FA028 /* SDRAM cached/copyback */
+#elif defined(CONFIG_M547X_8X)
+#define ACR0_DEFAULT #0xF00FA048 /* System Regs */
+#define ACR1_DEFAULT #0x000FA028 /* SDRAM cached/copy-back */
+#define ACR2_DEFAULT #0x00000000 /* Not mapped */
+#define ACR3_DEFAULT #0x000FA028 /* Instruction cached/copy-back */
+#endif
+#endif
+
+/* ACR mapping for FPGA (maps 0) */
+#define ACR0_FPGA #0x000FA048 /* ACR0 enable FPGA */
+
+/* Several macros to make the writing of subroutines easier:
+ * - func_start marks the beginning of the routine which setups the frame
+ * register and saves the registers, it also defines another macro
+ * to automatically restore the registers again.
+ * - func_return marks the end of the routine and simply calls the prepared
+ * macro to restore registers and jump back to the caller.
+ * - func_define generates another macro to automatically put arguments
+ * onto the stack call the subroutine and cleanup the stack again.
+ */
+
+.macro load_symbol_address symbol,register
+ movel #\symbol,\register
+.endm
+
+.macro func_start name,saveregs,savesize,stack=0
+L(\name):
+ linkw %a6,#-\stack
+ subal #(\savesize),%sp
+ moveml \saveregs,%sp@
+.set stackstart,-\stack
+
+.macro func_return_\name
+ moveml %sp@,\saveregs
+ addal #(\savesize),%sp
+ unlk %a6
+ rts
+.endm
+.endm
+
+.macro func_return name
+ func_return_\name
+.endm
+
+.macro func_call name
+ jbsr L(\name)
+.endm
+
+.macro move_stack nr,arg1,arg2,arg3,arg4
+.if \nr
+ move_stack "(\nr-1)",\arg2,\arg3,\arg4
+ movel \arg1,%sp@-
+.endif
+.endm
+
+.macro func_define name,nr=0
+.macro \name arg1,arg2,arg3,arg4
+ move_stack \nr,\arg1,\arg2,\arg3,\arg4
+ func_call \name
+.if \nr
+ lea %sp@(\nr*4),%sp
+.endif
+.endm
+.endm
+
+func_define serial_putc,1
+
+.macro putc ch
+ pea \ch
+ func_call serial_putc
+ addql #4,%sp
+.endm
+
+.macro dputc ch
+#ifdef DEBUG
+ putc \ch
+#endif
+.endm
+
+func_define putn,1
+
+.macro dputn nr
+#ifdef DEBUG
+ putn \nr
+#endif
+.endm
+
+/*
+ mmu_map - creates a new TLB entry
+
+ virt_addr Must be on proper boundary
+ phys_addr Must be on proper boundary
+ itlb MMUOR_ITLB if instruction TLB or 0
+ asid address space ID
+ shared_global MMUTR_SG if shared between different ASIDs or 0
+ size_code MMUDR_SZ1M 1 MB
+ MMUDR_SZ4K 4 KB
+ MMUDR_SZ8K 8 KB
+ MMUDR_SZ16M 16 MB
+ cache_mode MMUDR_INC instruction non-cacheable
+ MMUDR_IC instruction cacheable
+ MMUDR_DWT data writethrough
+ MMUDR_DCB data copyback
+ MMUDR_DNCP data non-cacheable, precise
+ MMUDR_DNCIP data non-cacheable, imprecise
+ super_prot MMUDR_SP if user mode generates exception or 0
+ readable MMUDR_R if permits read access (data TLB) or 0
+ writable MMUDR_W if permits write access (data TLB) or 0
+ executable MMUDR_X if permits execute access (instruction TLB) or 0
+ locked MMUDR_LK prevents TLB entry from being replaced or 0
+ temp_data_reg a data register to use for temporary values
+*/
+.macro mmu_map virt_addr,phys_addr,itlb,asid,shared_global,size_code,cache_mode,super_prot,readable,writable,executable,locked,temp_data_reg
+ /* Set up search of TLB. */
+ movel #(\virt_addr+1), \temp_data_reg
+ movel \temp_data_reg, MMUAR
+ /* Search. */
+ movel #(MMUOR_STLB + MMUOR_ADR +\itlb), \temp_data_reg
+ movew \temp_data_reg, (MMUOR)
+ /* Set up tag value. */
+ movel #(\virt_addr + \asid + \shared_global + MMUTR_V), \temp_data_reg
+ movel \temp_data_reg, MMUTR
+ /* Set up data value. */
+ movel #(\phys_addr + \size_code + \cache_mode + \super_prot + \readable + \writable + \executable + \locked), \temp_data_reg
+ movel \temp_data_reg, MMUDR
+ /* Save it. */
+ movel #(MMUOR_ACC + MMUOR_UAA + \itlb), \temp_data_reg
+ movew \temp_data_reg, (MMUOR)
+.endm /* mmu_map */
+
+.macro mmu_unmap virt_addr,itlb,temp_data_reg
+ /* Set up search of TLB. */
+ movel #(\virt_addr+1), \temp_data_reg
+ movel \temp_data_reg, MMUAR
+ /* Search. */
+ movel #(MMUOR_STLB + MMUOR_ADR +\itlb), \temp_data_reg
+ movew \temp_data_reg, (MMUOR)
+ /* Test for hit. */
+ movel MMUSR,\temp_data_reg
+ btst #MMUSR_HITN,\temp_data_reg
+ beq 1f
+ /* Read the TLB. */
+ movel #(MMUOR_RW + MMUOR_ACC +\itlb), \temp_data_reg
+ movew \temp_data_reg, (MMUOR)
+ movel MMUSR,\temp_data_reg
+ /* Set up tag value. */
+ movel #0, \temp_data_reg
+ movel \temp_data_reg, MMUTR
+ /* Set up data value. */
+ movel #0, \temp_data_reg
+ movel \temp_data_reg, MMUDR
+ /* Save it. */
+ movel #(MMUOR_ACC + MMUOR_UAA + \itlb), \temp_data_reg
+ movew \temp_data_reg, (MMUOR)
+1:
+.endm /* mmu_unmap */
+
+/* .text */
+.section ".text.head","ax"
+ENTRY(_stext)
+/* Version numbers of the bootinfo interface -- if we later pass info
+ * from boot ROM we might want to put something real here.
+ *
+ * The area from _stext to _start will later be used as kernel pointer table
+ */
+ bras 1f /* Jump over bootinfo version numbers */
+
+ .long BOOTINFOV_MAGIC
+ .long 0
+1: jmp __start
+
+.equ kernel_pg_dir,_stext
+.equ .,_stext+0x1000
+
+ENTRY(_start)
+ jra __start
+__INIT
+ENTRY(__start)
+/* Save the location of u-boot info - cmd line, bd_info, etc. */
+ movel %a7,%a4 /* Don't use %a4 before cf_early_init */
+ addl #0x00000004,%a4 /* offset past top */
+ addl #(PAGE_OFFSET-CONFIG_SDRAM_BASE),%a4 /* high mem offset */
+
+/* Setup initial stack pointer */
+ movel #CONFIG_SDRAM_BASE+0x1000,%sp
+
+/* Setup usp */
+ subl %a0,%a0
+ movel %a0,%usp
+
+#if defined(CONFIG_M5445X)
+#if defined(CONFIG_SRAM)
+ movel #(CONFIG_SRAM_BASE+0x221), %d0
+#else
+ movel #0x80000000, %d0
+#endif
+ movec %d0, %rambar1
+#elif defined(CONFIG_M547X_8X)
+ movel #MCF_MBAR, %d0
+ movec %d0, %mbar
+ move.l #(MCF_RAMBAR0 + 0x21), %d0
+ movec %d0, %rambar0
+ move.l #(MCF_RAMBAR1 + 0x21), %d0
+ movec %d0, %rambar1
+#endif
+
+ movew #0x2700,%sr
+
+/* reset cache */
+ movel #(CF_CACR_ICINVA + CF_CACR_DCINVA),%d0
+ movecl %d0,%cacr
+
+ movel #(MMU_BASE+1),%d0
+ movecl %d0,%mmubar
+ movel #MMUOR_CA,%a0 /* Clear tlb entries */
+ movew %a0,(MMUOR)
+ movel #(MMUOR_CA + MMUOR_ITLB),%a0 /* Use ITLB for searches */
+ movew %a0,(MMUOR)
+ movel #0,%a0 /* Clear Addr Space User ID */
+ movecl %a0,%asid
+
+/* setup ACRs */
+ movel ACR0_DEFAULT, %d0 /* ACR0 (DATA) setup */
+ movec %d0, %acr0
+ nop
+ movel ACR1_DEFAULT, %d0 /* ACR1 (DATA) setup */
+ movec %d0, %acr1
+ nop
+ movel ACR2_DEFAULT, %d0 /* ACR2 (CODE) setup */
+ movec %d0, %acr2
+ nop
+ movel ACR3_DEFAULT, %d0 /* ACR3 (CODE) setup */
+ movec %d0, %acr3
+ nop
+
+ /* If you change the memory size to another value make a matching
+ change in paging_init(cf-mmu.c) to zones_size[]. */
+
+#if CONFIG_SDRAM_BASE != PAGE_OFFSET
+#if defined(CONFIG_M5445X)
+ /* Map 256MB as code */
+ mmu_map (PAGE_OFFSET+0*0x1000000), (PHYS_OFFSET+0*0x1000000), \
+ MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
+ 0, 0, MMUDR_X, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+1*0x1000000), (PHYS_OFFSET+1*0x1000000), \
+ MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
+ 0, 0, MMUDR_X, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+2*0x1000000), (PHYS_OFFSET+2*0x1000000), \
+ MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
+ 0, 0, MMUDR_X, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+3*0x1000000), (PHYS_OFFSET+3*0x1000000), \
+ MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
+ 0, 0, MMUDR_X, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+4*0x1000000), (PHYS_OFFSET+4*0x1000000), \
+ MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
+ 0, 0, MMUDR_X, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+5*0x1000000), (PHYS_OFFSET+5*0x1000000), \
+ MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
+ 0, 0, MMUDR_X, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+6*0x1000000), (PHYS_OFFSET+6*0x1000000), \
+ MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
+ 0, 0, MMUDR_X, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+7*0x1000000), (PHYS_OFFSET+7*0x1000000), \
+ MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
+ 0, 0, MMUDR_X, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+8*0x1000000), (PHYS_OFFSET+8*0x1000000), \
+ MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
+ 0, 0, MMUDR_X, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+9*0x1000000), (PHYS_OFFSET+9*0x1000000), \
+ MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
+ 0, 0, MMUDR_X, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+10*0x1000000), (PHYS_OFFSET+10*0x1000000), \
+ MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
+ 0, 0, MMUDR_X, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+11*0x1000000), (PHYS_OFFSET+11*0x1000000), \
+ MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
+ 0, 0, MMUDR_X, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+12*0x1000000), (PHYS_OFFSET+12*0x1000000), \
+ MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
+ 0, 0, MMUDR_X, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+13*0x1000000), (PHYS_OFFSET+13*0x1000000), \
+ MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
+ 0, 0, MMUDR_X, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+14*0x1000000), (PHYS_OFFSET+14*0x1000000), \
+ MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
+ 0, 0, MMUDR_X, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+15*0x1000000), (PHYS_OFFSET+15*0x1000000), \
+ MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_IC, MMUDR_SP, \
+ 0, 0, MMUDR_X, MMUDR_LK, %d0
+
+ /* Map 256MB as data also */
+ mmu_map (PAGE_OFFSET+0*0x1000000), (PHYS_OFFSET+0*0x1000000), 0, 0, \
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
+ 0, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+1*0x1000000), (PHYS_OFFSET+1*0x1000000), 0, 0, \
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
+ 0, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+2*0x1000000), (PHYS_OFFSET+2*0x1000000), 0, 0, \
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
+ 0, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+3*0x1000000), (PHYS_OFFSET+3*0x1000000), 0, 0, \
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
+ 0, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+4*0x1000000), (PHYS_OFFSET+4*0x1000000), 0, 0, \
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
+ 0, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+5*0x1000000), (PHYS_OFFSET+5*0x1000000), 0, 0, \
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
+ 0, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+6*0x1000000), (PHYS_OFFSET+6*0x1000000), 0, 0, \
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
+ 0, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+7*0x1000000), (PHYS_OFFSET+7*0x1000000), 0, 0, \
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
+ 0, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+8*0x1000000), (PHYS_OFFSET+8*0x1000000), 0, 0, \
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
+ 0, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+9*0x1000000), (PHYS_OFFSET+9*0x1000000), 0, 0, \
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
+ 0, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+10*0x1000000), (PHYS_OFFSET+10*0x1000000), 0, 0, \
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
+ 0, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+11*0x1000000), (PHYS_OFFSET+11*0x1000000), 0, 0, \
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
+ 0, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+12*0x1000000), (PHYS_OFFSET+12*0x1000000), 0, 0, \
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
+ 0, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+13*0x1000000), (PHYS_OFFSET+13*0x1000000), 0, 0, \
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
+ 0, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+14*0x1000000), (PHYS_OFFSET+14*0x1000000), 0, 0, \
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
+ 0, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+15*0x1000000), (PHYS_OFFSET+15*0x1000000), 0, 0, \
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
+ 0, MMUDR_LK, %d0
+
+ /* Map ATA registers -- sacrifice a data TLB due to the hw design */
+ mmu_map (0x90000000), (0x90000000), 0, 0, \
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
+ 0, MMUDR_LK, %d0
+
+#elif defined(CONFIG_M547X_8X)
+
+ /* Map first 8 MB as code */
+ mmu_map (PAGE_OFFSET+0*1024*1024), (0*1024*1024), MMUOR_ITLB, 0, \
+ MMUTR_SG, MMUDR_SZ1M, MMUDR_IC, MMUDR_SP, 0, 0, MMUDR_X, \
+ MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+1*1024*1024), (1*1024*1024), MMUOR_ITLB, 0, \
+ MMUTR_SG, MMUDR_SZ1M, MMUDR_IC, MMUDR_SP, 0, 0, MMUDR_X, \
+ MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+2*1024*1024), (2*1024*1024), MMUOR_ITLB, 0, \
+ MMUTR_SG, MMUDR_SZ1M, MMUDR_IC, MMUDR_SP, 0, 0, MMUDR_X, \
+ MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+3*1024*1024), (3*1024*1024), MMUOR_ITLB, 0, \
+ MMUTR_SG, MMUDR_SZ1M, MMUDR_IC, MMUDR_SP, 0, 0, MMUDR_X, \
+ MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+4*1024*1024), (4*1024*1024), MMUOR_ITLB, 0, \
+ MMUTR_SG, MMUDR_SZ1M, MMUDR_IC, MMUDR_SP, 0, 0, MMUDR_X, \
+ MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+5*1024*1024), (5*1024*1024), MMUOR_ITLB, 0, \
+ MMUTR_SG, MMUDR_SZ1M, MMUDR_IC, MMUDR_SP, 0, 0, MMUDR_X, \
+ MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+6*1024*1024), (6*1024*1024), MMUOR_ITLB, 0, \
+ MMUTR_SG, MMUDR_SZ1M, MMUDR_IC, MMUDR_SP, 0, 0, MMUDR_X, \
+ MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+7*1024*1024), (7*1024*1024), MMUOR_ITLB, 0, \
+ MMUTR_SG, MMUDR_SZ1M, MMUDR_IC, MMUDR_SP, 0, 0, MMUDR_X, \
+ MMUDR_LK, %d0
+
+ /* Map first 8 MB as data */
+ mmu_map (PAGE_OFFSET+0*1024*1024), (0*1024*1024), 0, 0, \
+ MMUTR_SG, MMUDR_SZ1M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, \
+ MMUDR_W, 0, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+1*1024*1024), (1*1024*1024), 0, 0, \
+ MMUTR_SG, MMUDR_SZ1M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, \
+ MMUDR_W, 0, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+2*1024*1024), (2*1024*1024), 0, 0, \
+ MMUTR_SG, MMUDR_SZ1M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, \
+ MMUDR_W, 0, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+3*1024*1024), (3*1024*1024), 0, 0, \
+ MMUTR_SG, MMUDR_SZ1M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, \
+ MMUDR_W, 0, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+4*1024*1024), (4*1024*1024), 0, 0, \
+ MMUTR_SG, MMUDR_SZ1M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, \
+ MMUDR_W, 0, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+5*1024*1024), (5*1024*1024), 0, 0, \
+ MMUTR_SG, MMUDR_SZ1M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, \
+ MMUDR_W, 0, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+6*1024*1024), (6*1024*1024), 0, 0, \
+ MMUTR_SG, MMUDR_SZ1M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, \
+ MMUDR_W, 0, MMUDR_LK, %d0
+ mmu_map (PAGE_OFFSET+7*1024*1024), (7*1024*1024), 0, 0, \
+ MMUTR_SG, MMUDR_SZ1M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, \
+ MMUDR_W, 0, MMUDR_LK, %d0
+#endif
+ /*
+ * Do unity mapping to enable the MMU. Map first chunk of memory
+ * in place as code/data. The TLBs will be deleted after the MMU is
+ * enabled and we are executing in high memory.
+ */
+
+#if defined(CONFIG_M5445X)
+ /* Map first 16 MB as code */
+ mmu_map (PHYS_OFFSET+0*0x1000000), (PHYS_OFFSET+0*0x1000000), \
+ MMUOR_ITLB, 0, MMUTR_SG, MMUDR_SZ16M, MMUDR_INC, MMUDR_SP, 0, \
+ 0, MMUDR_X, 0, %d0
+ /* Map first 16 MB as data too */
+ mmu_map (PHYS_OFFSET+0*0x1000000), (PHYS_OFFSET+0*0x1000000), 0, 0, \
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
+ 0, 0, %d0
+#elif defined(CONFIG_M547X_8X)
+ /* Map first 4 MB as code */
+ mmu_map (0*1024*1024), (0*1024*1024), MMUOR_ITLB, 0, \
+ MMUTR_SG, MMUDR_SZ1M, MMUDR_IC, MMUDR_SP, 0, 0, \
+ MMUDR_X, 0, %d0
+ mmu_map (1*1024*1024), (1*1024*1024), MMUOR_ITLB, 0, \
+ MMUTR_SG, MMUDR_SZ1M, MMUDR_IC, MMUDR_SP, 0, 0, \
+ MMUDR_X, 0, %d0
+ mmu_map (2*1024*1024), (2*1024*1024), MMUOR_ITLB, 0, \
+ MMUTR_SG, MMUDR_SZ1M, MMUDR_IC, MMUDR_SP, 0, 0, \
+ MMUDR_X, 0, %d0
+ mmu_map (3*1024*1024), (3*1024*1024), MMUOR_ITLB, 0, \
+ MMUTR_SG, MMUDR_SZ1M, MMUDR_IC, MMUDR_SP, 0, 0, \
+ MMUDR_X, 0, %d0
+
+ /* Map first 4 MB as data too */
+ mmu_map (0*1024*1024), (0*1024*1024), 0, 0, \
+ MMUTR_SG, MMUDR_SZ1M, MMUDR_DCB, MMUDR_SP, MMUDR_R, \
+ MMUDR_W, 0, 0, %d0
+ mmu_map (1*1024*1024), (1*1024*1024), 0, 0, \
+ MMUTR_SG, MMUDR_SZ1M, MMUDR_DCB, MMUDR_SP, MMUDR_R, \
+ MMUDR_W, 0, 0, %d0
+ mmu_map (2*1024*1024), (2*1024*1024), 0, 0, \
+ MMUTR_SG, MMUDR_SZ1M, MMUDR_DCB, MMUDR_SP, MMUDR_R, \
+ MMUDR_W, 0, 0, %d0
+ mmu_map (3*1024*1024), (3*1024*1024), 0, 0, \
+ MMUTR_SG, MMUDR_SZ1M, MMUDR_DCB, MMUDR_SP, MMUDR_R, \
+ MMUDR_W, 0, 0, %d0
+#endif
+#endif /* CONFIG_SDRAM_BASE != PAGE_OFFSET */
+
+ /* Turn on MMU */
+ movel #(MMUCR_EN),%a0
+ movel %a0,MMUCR
+ nop /* This synchs the pipeline after a write to MMUCR */
+
+ movel #__running_high,%a0 /* Get around PC-relative addressing. */
+ jmp %a0@
+
+ENTRY(__running_high)
+ load_symbol_address _stext,%sp
+ movel L(memory_start),%a0
+ movel %a0,availmem
+ load_symbol_address L(phys_kernel_start),%a0
+ load_symbol_address _stext,%a1
+ subl #_stext,%a1
+ addl #PAGE_OFFSET,%a1
+ movel %a1,%a0@
+
+/* zero bss */
+ lea _sbss,%a0
+ lea _ebss,%a1
+ clrl %d0
+_loop_bss:
+ movel %d0,(%a0)+
+ cmpl %a0,%a1
+ bne _loop_bss
+
+ /* Unmap unity mappings */
+#if CONFIG_SDRAM_BASE != PAGE_OFFSET
+#if defined(CONFIG_M5445X)
+ mmu_unmap (PHYS_OFFSET+0*0x1000000), MMUOR_ITLB, %d0
+ mmu_unmap (PHYS_OFFSET+0*0x1000000), 0, %d0
+#elif defined(CONFIG_M547X_8X)
+ mmu_unmap (PHYS_OFFSET+0*0x1000000), MMUOR_ITLB, %d0
+ mmu_unmap (PHYS_OFFSET+1*0x1000000), MMUOR_ITLB, %d0
+ mmu_unmap (PHYS_OFFSET+2*0x1000000), MMUOR_ITLB, %d0
+ mmu_unmap (PHYS_OFFSET+3*0x1000000), MMUOR_ITLB, %d0
+ mmu_unmap (PHYS_OFFSET+0*0x1000000), 0, %d0
+ mmu_unmap (PHYS_OFFSET+1*0x1000000), 0, %d0
+ mmu_unmap (PHYS_OFFSET+2*0x1000000), 0, %d0
+ mmu_unmap (PHYS_OFFSET+3*0x1000000), 0, %d0
+#endif
+#endif /* CONFIG_SDRAM_BASE != PAGE_OFFSET */
+
+/* create dma memory mirror TLB mapping */
+#if defined(CONFIG_M5445X)
+ mmu_map CONFIG_DMA_BASE, \
+ CONFIG_SDRAM_BASE, 0, 0, \
+ MMUTR_SG, MMUDR_SZ16M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, MMUDR_W, \
+ 0, MMUDR_LK, %d0
+#elif defined(CONFIG_M547X_8X)
+ mmu_map (CONFIG_DMA_BASE + 0*1024*1024), \
+ (CONFIG_SDRAM_BASE + 0*1024*1024), 0, 0, \
+ MMUTR_SG, MMUDR_SZ1M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, \
+ MMUDR_W, 0, MMUDR_LK, %d0
+ mmu_map (CONFIG_DMA_BASE + 1*1024*1024), \
+ (CONFIG_SDRAM_BASE + 1*1024*1024), 0, 0, \
+ MMUTR_SG, MMUDR_SZ1M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, \
+ MMUDR_W, 0, MMUDR_LK, %d0
+ mmu_map (CONFIG_DMA_BASE + 2*1024*1024), \
+ (CONFIG_SDRAM_BASE + 2*1024*1024), 0, 0, \
+ MMUTR_SG, MMUDR_SZ1M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, \
+ MMUDR_W, 0, MMUDR_LK, %d0
+ mmu_map (CONFIG_DMA_BASE + 3*1024*1024), \
+ (CONFIG_SDRAM_BASE + 3*1024*1024), 0, 0, \
+ MMUTR_SG, MMUDR_SZ1M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, \
+ MMUDR_W, 0, MMUDR_LK, %d0
+ mmu_map (CONFIG_DMA_BASE + 4*1024*1024), \
+ (CONFIG_SDRAM_BASE + 4*1024*1024), 0, 0, \
+ MMUTR_SG, MMUDR_SZ1M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, \
+ MMUDR_W, 0, MMUDR_LK, %d0
+ mmu_map (CONFIG_DMA_BASE + 5*1024*1024), \
+ (CONFIG_SDRAM_BASE + 5*1024*1024), 0, 0, \
+ MMUTR_SG, MMUDR_SZ1M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, \
+ MMUDR_W, 0, MMUDR_LK, %d0
+ mmu_map (CONFIG_DMA_BASE + 6*1024*1024), \
+ (CONFIG_SDRAM_BASE + 6*1024*1024), 0, 0, \
+ MMUTR_SG, MMUDR_SZ1M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, \
+ MMUDR_W, 0, MMUDR_LK, %d0
+ mmu_map (CONFIG_DMA_BASE + 7*1024*1024), \
+ (CONFIG_SDRAM_BASE + 7*1024*1024), 0, 0, \
+ MMUTR_SG, MMUDR_SZ1M, MMUDR_DNCP, MMUDR_SP, MMUDR_R, \
+ MMUDR_W, 0, MMUDR_LK, %d0
+#endif
+
+/* Setup initial stack pointer */
+ lea init_task,%a2
+ lea init_thread_union+THREAD_SIZE,%sp
+ subl %a6,%a6 /* clear a6 for gdb */
+
+#ifdef CONFIG_MCF_USER_HALT
+/* Setup debug control reg to allow halts from user space */
+ lea wdbg_uhe,%a0
+ wdebug (%a0)
+#endif
+
+ movel %a4,uboot_info_stk /* save uboot info to variable */
+ jsr cf_early_init
+ jmp start_kernel
+
+.section ".text.head","ax"
+set_context:
+func_start set_context,%d0,(1*4)
+ movel 12(%sp),%d0
+ movec %d0,%asid
+func_return set_context
+
+#ifdef CONFIG_M5445X
+/*
+ * set_fpga(addr,val) on the M5445X
+ *
+ * Map in 0x00000000 -> 0x0fffffff and then do the write.
+ */
+set_fpga:
+ movew %sr,%d1
+ movew #0x2700,%sr
+ movel ACR0_FPGA, %d0
+ movec %d0, %acr0
+ nop
+ moveal 4(%sp),%a0
+ movel 8(%sp),%a0@
+ movel ACR0_DEFAULT, %d0
+ movec %d0, %acr0
+ nop
+ movew %d1,%sr
+ rts
+#endif
+
+ .data
+ .align 4
+
+availmem:
+ .long 0
+L(phys_kernel_start):
+ .long PAGE_OFFSET
+L(kernel_end):
+ .long 0
+L(memory_start):
+ .long PAGE_OFFSET_RAW
+
+#ifdef CONFIG_MCF_USER_HALT
+/*
+ * Enable User Halt Enable in the debug control register.
+ */
+wdbg_uhe:
+ .word 0x2c80 /* DR0 */
+ .word 0x00b0 /* 31:16 */
+ .word 0x0400 /* 15:0 -- enable UHE */
+ .word 0x0000 /* unused */
+#endif
+
+
--- /dev/null
+/*
+ * linux/arch/m68k/coldfire/ints.c -- General interrupt handling code
+ *
+ * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Matt Waddel Matt.Waddel@freescale.com
+ * Kurt Mahan kmahan@freescale.com
+ * Jason Jin Jason.Jin@freescale.com
+ * Shrek Wu B16972@freescale.com
+ *
+ * Based on:
+ * linux/arch/m68k/kernel/ints.c &
+ * linux/arch/m68knommu/5307/ints.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/kernel_stat.h>
+#include <linux/errno.h>
+#include <linux/seq_file.h>
+#include <linux/interrupt.h>
+
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/traps.h>
+#include <asm/page.h>
+#include <asm/machdep.h>
+#include <asm/irq_regs.h>
+
+#include <asm/mcfsim.h>
+
+/*
+ * IRQ Handler lists.
+ */
+static struct irq_node *irq_list[SYS_IRQS];
+static struct irq_controller *irq_controller[SYS_IRQS];
+static int irq_depth[SYS_IRQS];
+
+/*
+ * IRQ Controller
+ */
+#if defined(CONFIG_M5445X)
+void m5445x_irq_enable(unsigned int irq);
+void m5445x_irq_disable(unsigned int irq);
+static struct irq_controller m5445x_irq_controller = {
+ .name = "M5445X",
+ .lock = SPIN_LOCK_UNLOCKED,
+ .enable = m5445x_irq_enable,
+ .disable = m5445x_irq_disable,
+};
+#elif defined(CONFIG_M547X_8X)
+void m547x_8x_irq_enable(unsigned int irq);
+void m547x_8x_irq_disable(unsigned int irq);
+static struct irq_controller m547x_8x_irq_controller = {
+ .name = "M547X_8X",
+ .lock = SPIN_LOCK_UNLOCKED,
+ .enable = m547x_8x_irq_enable,
+ .disable = m547x_8x_irq_disable,
+};
+#else
+# error No IRQ controller defined
+#endif
+
+#define POOL_SIZE SYS_IRQS
+static struct irq_node pool[POOL_SIZE];
+static struct irq_node *get_irq_node(void);
+
+/* The number of spurious interrupts */
+unsigned int num_spurious;
+asmlinkage void handle_badint(struct pt_regs *regs);
+
+/*
+ * void init_IRQ(void)
+ *
+ * This function should be called during kernel startup to initialize
+ * the IRQ handling routines.
+ */
+void __init init_IRQ(void)
+{
+ int i;
+
+#if defined(CONFIG_M5445X)
+ for (i = 0; i < SYS_IRQS; i++)
+ irq_controller[i] = &m5445x_irq_controller;
+#elif defined(CONFIG_M547X_8X)
+ for (i = 0; i < SYS_IRQS; i++)
+ irq_controller[i] = &m547x_8x_irq_controller;
+#endif
+}
+
+/*
+ * process_int(unsigned long vec, struct pt_regs *fp)
+ *
+ * Process an interrupt. Called from entry.S.
+ */
+asmlinkage void process_int(unsigned long vec, struct pt_regs *fp)
+{
+ struct pt_regs *old_regs;
+ struct irq_node *node;
+ old_regs = set_irq_regs(fp);
+ kstat_cpu(0).irqs[vec]++;
+
+ node = irq_list[vec];
+ if (!node)
+ handle_badint(fp);
+ else {
+ do {
+ node->handler(vec, node->dev_id);
+ node = node->next;
+ } while (node);
+ }
+
+ set_irq_regs(old_regs);
+}
+
+/*
+ * show_interrupts( struct seq_file *p, void *v)
+ *
+ * Called to show all the current interrupt information.
+ */
+int show_interrupts(struct seq_file *p, void *v)
+{
+ struct irq_controller *contr;
+ struct irq_node *node;
+ int i = *(loff_t *) v;
+
+ if ((i < NR_IRQS) && (irq_list[i])) {
+ contr = irq_controller[i];
+ node = irq_list[i];
+ seq_printf(p, "%-8s %3u: %10u %s", contr->name, i,
+ kstat_cpu(0).irqs[i], node->devname);
+ while ((node = node->next))
+ seq_printf(p, ", %s", node->devname);
+
+ seq_printf(p, "\n");
+ }
+
+ return 0;
+}
+
+/*
+ * get_irq_node(void)
+ *
+ * Get an irq node from the pool.
+ */
+struct irq_node *get_irq_node(void)
+{
+ struct irq_node *p = pool;
+ int i;
+
+ for (i = 0; i < POOL_SIZE; i++, p++) {
+ if (!p->handler) {
+ memset(p, 0, sizeof(struct irq_node));
+ return p;
+ }
+ }
+ printk(KERN_INFO "%s(%s:%d): No more irq nodes, I suggest you \
+ increase POOL_SIZE", __FUNCTION__, __FILE__, __LINE__);
+ return NULL;
+}
+
+void init_irq_proc(void)
+{
+ /* Insert /proc/irq driver here */
+}
+
+int setup_irq(unsigned int irq, struct irq_node *node)
+{
+ struct irq_controller *contr;
+ struct irq_node **prev;
+ unsigned long flags;
+
+ if (irq >= NR_IRQS || !irq_controller[irq]) {
+ printk("%s: Incorrect IRQ %d from %s\n",
+ __FUNCTION__, irq, node->devname);
+ return -ENXIO;
+ }
+
+ contr = irq_controller[irq];
+ spin_lock_irqsave(&contr->lock, flags);
+
+ prev = irq_list + irq;
+ if (*prev) {
+ /* Can't share interrupts unless both agree to */
+ if (!((*prev)->flags & node->flags & IRQF_SHARED)) {
+ spin_unlock_irqrestore(&contr->lock, flags);
+ printk(KERN_INFO "%s: -BUSY-Incorrect IRQ %d \n",
+ __FUNCTION__, irq);
+ return -EBUSY;
+ }
+ while (*prev)
+ prev = &(*prev)->next;
+ }
+
+ if (!irq_list[irq]) {
+ if (contr->startup)
+ contr->startup(irq);
+ else
+ contr->enable(irq);
+ }
+ node->next = NULL;
+ *prev = node;
+
+ spin_unlock_irqrestore(&contr->lock, flags);
+
+ return 0;
+}
+
+int request_irq(unsigned int irq,
+ irq_handler_t handler,
+ unsigned long flags, const char *devname, void *dev_id)
+{
+ struct irq_node *node = get_irq_node();
+ int res;
+
+ if (!node) {
+ printk(KERN_INFO "%s:get_irq_node error %x\n",
+ __FUNCTION__,(unsigned int) node);
+ return -ENOMEM;
+ }
+ node->handler = handler;
+ node->flags = flags;
+ node->dev_id = dev_id;
+ node->devname = devname;
+
+ res = setup_irq(irq, node);
+ if (res)
+ node->handler = NULL;
+
+ return res;
+}
+EXPORT_SYMBOL(request_irq);
+
+void free_irq(unsigned int irq, void *dev_id)
+{
+ struct irq_controller *contr;
+ struct irq_node **p, *node;
+ unsigned long flags;
+
+ if (irq >= NR_IRQS || !irq_controller[irq]) {
+ printk(KERN_DEBUG "%s: Incorrect IRQ %d\n", __FUNCTION__, irq);
+ return;
+ }
+
+ contr = irq_controller[irq];
+ spin_lock_irqsave(&contr->lock, flags);
+
+ p = irq_list + irq;
+ while ((node = *p)) {
+ if (node->dev_id == dev_id)
+ break;
+ p = &node->next;
+ }
+
+ if (node) {
+ *p = node->next;
+ node->handler = NULL;
+ } else
+ printk(KERN_DEBUG "%s: Removing probably wrong IRQ %d\n",
+ __FUNCTION__, irq);
+
+ if (!irq_list[irq]) {
+ if (contr->shutdown)
+ contr->shutdown(irq);
+ else
+ contr->disable(irq);
+ }
+
+ spin_unlock_irqrestore(&contr->lock, flags);
+}
+EXPORT_SYMBOL(free_irq);
+
+void enable_irq(unsigned int irq)
+{
+ struct irq_controller *contr;
+ unsigned long flags;
+
+ if (irq >= NR_IRQS || !irq_controller[irq]) {
+ printk(KERN_DEBUG "%s: Incorrect IRQ %d\n", __FUNCTION__, irq);
+ return;
+ }
+
+ contr = irq_controller[irq];
+ spin_lock_irqsave(&contr->lock, flags);
+ if (irq_depth[irq]) {
+ if (!--irq_depth[irq]) {
+ if (contr->enable)
+ contr->enable(irq);
+ }
+ } else
+ WARN_ON(1);
+ spin_unlock_irqrestore(&contr->lock, flags);
+}
+EXPORT_SYMBOL(enable_irq);
+
+void disable_irq(unsigned int irq)
+{
+ struct irq_controller *contr;
+ unsigned long flags;
+
+ if (irq >= NR_IRQS || !irq_controller[irq]) {
+ printk(KERN_DEBUG "%s: Incorrect IRQ %d\n", __FUNCTION__, irq);
+ return;
+ }
+
+ contr = irq_controller[irq];
+ spin_lock_irqsave(&contr->lock, flags);
+ if (!irq_depth[irq]++) {
+ if (contr->disable)
+ contr->disable(irq);
+ }
+ spin_unlock_irqrestore(&contr->lock, flags);
+}
+EXPORT_SYMBOL(disable_irq);
+
+void disable_irq_nosync(unsigned int irq) __attribute__((alias("disable_irq")));
+EXPORT_SYMBOL(disable_irq_nosync);
+
+
+unsigned long probe_irq_on(void)
+{
+ return 0;
+}
+EXPORT_SYMBOL(probe_irq_on);
+
+int probe_irq_off(unsigned long irqs)
+{
+ return 0;
+}
+EXPORT_SYMBOL(probe_irq_off);
+
+asmlinkage void handle_badint(struct pt_regs *regs)
+{
+ kstat_cpu(0).irqs[0]++;
+ num_spurious++;
+ printk(KERN_DEBUG "unexpected interrupt from %u\n", regs->vector);
+}
+EXPORT_SYMBOL(handle_badint);
+
+unsigned int irq_canonicalize(unsigned int irq)
+{
+#ifdef CONFIG_Q40
+ if (MACH_IS_Q40 && irq == 11)
+ irq = 10;
+#endif
+ return irq;
+}
+
+EXPORT_SYMBOL(irq_canonicalize);
+
+#ifdef CONFIG_M5445X
+/*
+ * M5445X Implementation
+ */
+void m5445x_irq_enable(unsigned int irq)
+{
+ /* enable the interrupt hardware */
+ if (irq < 64)
+ return;
+
+ /* adjust past non-hardware ints */
+ irq -= 64;
+
+ /* check for eport */
+ if ((irq > 0) && (irq < 8)) {
+ /* enable eport */
+ MCF_EPORT_EPPAR &= ~(3 << (irq*2)); /* level */
+ MCF_EPORT_EPDDR &= ~(1 << irq); /* input */
+ MCF_EPORT_EPIER |= 1 << irq; /* irq enabled */
+ }
+
+ if (irq < 64) {
+ /* controller 0 */
+ MCF_INTC0_ICR(irq) = 0x02;
+ MCF_INTC0_CIMR = irq;
+ } else {
+ /* controller 1 */
+ irq -= 64;
+ MCF_INTC1_ICR(irq) = 0x02;
+ MCF_INTC1_CIMR = irq;
+ }
+}
+
+void m5445x_irq_disable(unsigned int irq)
+{
+ /* disable the interrupt hardware */
+ if (irq < 64)
+ return;
+
+ /* adjust past non-hardware ints */
+ irq -= 64;
+
+ /* check for eport */
+ if ((irq > 0) && (irq < 8)) {
+ /* disable eport */
+ MCF_EPORT_EPIER &= ~(1 << irq);
+ }
+
+ if (irq < 64) {
+ /* controller 0 */
+ MCF_INTC0_ICR(irq) = 0x00;
+ MCF_INTC0_SIMR = irq;
+ } else {
+ /* controller 1 */
+ irq -= 64;
+ MCF_INTC1_ICR(irq) = 0x00;
+ MCF_INTC1_SIMR = irq;
+ }
+}
+#elif defined(CONFIG_M547X_8X)
+/*
+ * M547X_8X Implementation
+ */
+void m547x_8x_irq_enable(unsigned int irq)
+{
+ /* enable the interrupt hardware */
+ if (irq < 64)
+ return;
+
+ /* adjust past non-hardware ints */
+ irq -= 64;
+
+ /* check for eport */
+ if ((irq > 0) && (irq < 8)) {
+ /* enable eport */
+ MCF_EPPAR &= ~(3 << (irq*2));
+ /* level */
+ MCF_EPDDR &= ~(1 << irq);
+ /* input */
+ MCF_EPIER |= 1 << irq;
+ /* irq enabled */
+ }
+
+ if (irq < 32) {
+ /* *grumble* don't set low bit of IMRL */
+ MCF_IMRL &= (~(1 << irq) & 0xfffffffe);
+ } else {
+ MCF_IMRH &= ~(1 << (irq - 32));
+ }
+}
+
+void m547x_8x_irq_disable(unsigned int irq)
+{
+ /* disable the interrupt hardware */
+ if (irq < 64)
+ return;
+
+ /* adjust past non-hardware ints */
+ irq -= 64;
+
+ /* check for eport */
+ if ((irq > 0) && (irq < 8)) {
+ /* disable eport */
+ MCF_EPIER &= ~(1 << irq);
+ }
+
+ if (irq < 32)
+ MCF_IMRL |= (1 << irq);
+ else
+ MCF_IMRH |= (1 << (irq - 32));
+}
+#endif
--- /dev/null
+/*
+ * Coldfire muldi3 assembly verion
+ * Copyright 2008-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Jason Jin Jason.Jin@freescale.com
+ * Shrek Wu B16972@freescale.com
+ */
+
+#include <linux/linkage.h>
+.globl __muldi3
+
+ENTRY(__muldi3)
+ linkw %fp,#0
+ lea %sp@(-32),%sp
+ moveml %d2-%d7/%a2-%a3,%sp@
+ moveal %fp@(8), %a2
+ moveal %fp@(12), %a3
+ moveal %fp@(16), %a0
+ moveal %fp@(20),%a1
+ movel %a3,%d2
+ andil #65535,%d2
+ movel %a3,%d3
+ clrw %d3
+ swap %d3
+ movel %a1,%d0
+ andil #65535,%d0
+ movel %a1,%d1
+ clrw %d1
+ swap %d1
+ movel %d2,%d7
+ mulsl %d0,%d7
+ movel %d2,%d4
+ mulsl %d1,%d4
+ movel %d3,%d2
+ mulsl %d0,%d2
+ mulsl %d1,%d3
+ movel %d7,%d0
+ clrw %d0
+ swap %d0
+ addl %d0,%d4
+ addl %d2,%d4
+ cmpl %d4,%d2
+ blss 1f
+ addil #65536,%d3
+1:
+ movel %d4,%d0
+ clrw %d0
+ swap %d0
+ movel %d3,%d5
+ addl %d0,%d5
+ movew %d4,%d6
+ swap %d6
+ movew %d7,%d6
+ movel %d5,%d0
+ movel %d6,%d1
+ movel %a3,%d2
+ movel %a0,%d3
+ mulsl %d3,%d2
+ movel %a2,%d3
+ movel %a1,%d4
+ mulsl %d4,%d3
+ addl %d3,%d2
+ movel %d2,%d0
+ addl %d5,%d0
+ moveml %sp@, %d2-%d7/%a2-%a3
+ lea %sp@(32),%sp
+ unlk %fp
+ rts
--- /dev/null
+/*
+ * linux/arch/m68k/kernel/signal.c
+ *
+ * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Matt Waddel Matt.Waddel@freescale.com
+ * Jason Jin Jason.Jin@freescale.com
+ * Shrek Wu B16972@freescale.com
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ * Derived from m68k/kernel/signal.c and the original authors are credited
+ * there.
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/syscalls.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <linux/highuid.h>
+#include <linux/personality.h>
+#include <linux/tty.h>
+#include <linux/binfmts.h>
+
+#include <asm/setup.h>
+#include <asm/cf_uaccess.h>
+#include <asm/cf_pgtable.h>
+#include <asm/traps.h>
+#include <asm/ucontext.h>
+#include <asm/cacheflush.h>
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs);
+
+const int frame_extra_sizes[16] = {
+ [1] = -1,
+ [2] = sizeof(((struct frame *)0)->un.fmt2),
+ [3] = sizeof(((struct frame *)0)->un.fmt3),
+ [4] = 0,
+ [5] = -1,
+ [6] = -1,
+ [7] = sizeof(((struct frame *)0)->un.fmt7),
+ [8] = -1,
+ [9] = sizeof(((struct frame *)0)->un.fmt9),
+ [10] = sizeof(((struct frame *)0)->un.fmta),
+ [11] = sizeof(((struct frame *)0)->un.fmtb),
+ [12] = -1,
+ [13] = -1,
+ [14] = -1,
+ [15] = -1,
+};
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+asmlinkage int do_sigsuspend(struct pt_regs *regs)
+{
+ old_sigset_t mask = regs->d3;
+ sigset_t saveset;
+
+ mask &= _BLOCKABLE;
+ spin_lock_irq(¤t->sighand->siglock);
+ saveset = current->blocked;
+ siginitset(¤t->blocked, mask);
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
+
+ regs->d0 = -EINTR;
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(&saveset, regs))
+ return -EINTR;
+ }
+}
+
+asmlinkage int
+do_rt_sigsuspend(struct pt_regs *regs)
+{
+ sigset_t __user *unewset = (sigset_t __user *)regs->d1;
+ size_t sigsetsize = (size_t)regs->d2;
+ sigset_t saveset, newset;
+
+ /* XXX: Don't preclude handling different sized sigset_t's. */
+ if (sigsetsize != sizeof(sigset_t))
+ return -EINVAL;
+
+ if (copy_from_user(&newset, unewset, sizeof(newset)))
+ return -EFAULT;
+ sigdelsetmask(&newset, ~_BLOCKABLE);
+
+ spin_lock_irq(¤t->sighand->siglock);
+ saveset = current->blocked;
+ current->blocked = newset;
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
+
+ regs->d0 = -EINTR;
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(&saveset, regs))
+ return -EINTR;
+ }
+}
+
+asmlinkage int
+sys_sigaction(int sig, const struct old_sigaction __user *act,
+ struct old_sigaction __user *oact)
+{
+ struct k_sigaction new_ka, old_ka;
+ int ret;
+
+ if (act) {
+ old_sigset_t mask;
+ if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
+ __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
+ __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+ return -EFAULT;
+ __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+ __get_user(mask, &act->sa_mask);
+ siginitset(&new_ka.sa.sa_mask, mask);
+ }
+
+ ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+ if (!ret && oact) {
+ if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
+ __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
+ __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+ return -EFAULT;
+ __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+ __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+ }
+
+ return ret;
+}
+
+asmlinkage int
+sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
+{
+ return do_sigaltstack(uss, uoss, rdusp());
+}
+
+
+/*
+ * Do a signal return; undo the signal stack.
+ *
+ * Keep the return code on the stack quadword aligned!
+ * That makes the cache flush below easier.
+ */
+
+struct sigframe
+{
+ char __user *pretcode;
+ int sig;
+ int code;
+ struct sigcontext __user *psc;
+ char retcode[8];
+ unsigned long extramask[_NSIG_WORDS-1];
+ struct sigcontext sc;
+};
+
+struct rt_sigframe
+{
+ char __user *pretcode;
+ int sig;
+ struct siginfo __user *pinfo;
+ void __user *puc;
+ char retcode[8];
+ struct siginfo info;
+ struct ucontext uc;
+};
+
+#define FPCONTEXT_SIZE 216
+#define uc_fpstate uc_filler[0]
+#define uc_formatvec uc_filler[FPCONTEXT_SIZE/4]
+#define uc_extra uc_filler[FPCONTEXT_SIZE/4+1]
+
+#ifdef CONFIG_FPU
+static unsigned char fpu_version; /* version num of fpu, set by setup_frame */
+
+static inline int restore_fpu_state(struct sigcontext *sc)
+{
+ int err = 1;
+
+ if (FPU_IS_EMU) {
+ /* restore registers */
+ memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12);
+ memcpy(current->thread.fp, sc->sc_fpregs, 24);
+ return 0;
+ }
+
+ if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
+ /* Verify the frame format. */
+ if (!CPU_IS_060 && (sc->sc_fpstate[0] != fpu_version))
+ goto out;
+ if (CPU_IS_020_OR_030) {
+ if (m68k_fputype & FPU_68881 &&
+ !(sc->sc_fpstate[1] == 0x18 || sc->sc_fpstate[1] == 0xb4))
+ goto out;
+ if (m68k_fputype & FPU_68882 &&
+ !(sc->sc_fpstate[1] == 0x38 || sc->sc_fpstate[1] == 0xd4))
+ goto out;
+ } else if (CPU_IS_040) {
+ if (!(sc->sc_fpstate[1] == 0x00 ||
+ sc->sc_fpstate[1] == 0x28 ||
+ sc->sc_fpstate[1] == 0x60))
+ goto out;
+ } else if (CPU_IS_060) {
+ if (!(sc->sc_fpstate[3] == 0x00 ||
+ sc->sc_fpstate[3] == 0x60 ||
+ sc->sc_fpstate[3] == 0xe0))
+ goto out;
+ } else if (CPU_IS_CFV4E) {
+ pr_debug("restore v4e fpu state at %s\n", __func__);
+ } else
+ goto out;
+#ifdef CONFIG_CFV4E
+ __asm__ volatile ("fmovem %0,%/fp0-%/fp1\n\t"
+ QCHIP_RESTORE_DIRECTIVE
+ : /* no outputs */
+ : "m" (sc->sc_fpregs[0][0])
+ : "memory");
+ __asm__ volatile ("fmovel %0,%/fpcr"
+ : : "m" (sc->sc_fpcntl[0])
+ : "memory" );
+ __asm__ volatile ("fmovel %0,%/fpsr"
+ : : "m" (sc->sc_fpcntl[1])
+ : "memory" );
+ __asm__ volatile ("fmovel %0,%/fpiar"
+ : : "m" (sc->sc_fpcntl[2])
+ : "memory" );
+
+#endif
+ }
+
+#ifdef CONFIG_CFV4E
+ __asm__ volatile ("frestore %0\n\t"
+ QCHIP_RESTORE_DIRECTIVE : : "m" (*sc->sc_fpstate));
+#endif
+ err = 0;
+
+out:
+ return err;
+}
+
+static inline int rt_restore_fpu_state(struct ucontext __user *uc)
+{
+ unsigned char fpstate[FPCONTEXT_SIZE];
+ int context_size = CPU_IS_060 ? 8 : 0;
+ fpregset_t fpregs;
+ int err = 1;
+
+ if (FPU_IS_EMU) {
+ /* restore fpu control register */
+ if (__copy_from_user(current->thread.fpcntl,
+ uc->uc_mcontext.fpregs.f_fpcntl, 12))
+ goto out;
+ /* restore all other fpu register */
+ if (__copy_from_user(current->thread.fp,
+ uc->uc_mcontext.fpregs.f_fpregs, 96))
+ goto out;
+ return 0;
+ }
+
+ if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate))
+ goto out;
+ if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
+ if (!CPU_IS_060)
+ context_size = fpstate[1];
+ /* Verify the frame format. */
+ if (!CPU_IS_060 && (fpstate[0] != fpu_version))
+ goto out;
+ if (CPU_IS_020_OR_030) {
+ if (m68k_fputype & FPU_68881 &&
+ !(context_size == 0x18 || context_size == 0xb4))
+ goto out;
+ if (m68k_fputype & FPU_68882 &&
+ !(context_size == 0x38 || context_size == 0xd4))
+ goto out;
+ } else if (CPU_IS_040) {
+ if (!(context_size == 0x00 ||
+ context_size == 0x28 ||
+ context_size == 0x60))
+ goto out;
+ } else if (CPU_IS_060) {
+ if (!(fpstate[3] == 0x00 ||
+ fpstate[3] == 0x60 ||
+ fpstate[3] == 0xe0))
+ goto out;
+ } else if (CPU_IS_CFV4E) {
+ pr_debug("restore coldfire rt v4e fpu"
+ " state at %s\n", __func__);
+ } else
+ goto out;
+ if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs,
+ sizeof(fpregs)))
+ goto out;
+#ifdef CONFIG_CFV4E
+ __asm__ volatile ("fmovem %0,%/fp0-%/fp7\n\t"
+ QCHIP_RESTORE_DIRECTIVE
+ : /* no outputs */
+ : "m" (fpregs.f_fpregs[0][0])
+ : "memory");
+ __asm__ volatile ("fmovel %0,%/fpcr"
+ : : "m" (fpregs.f_fpcntl[0])
+ : "memory" );
+ __asm__ volatile ("fmovel %0,%/fpsr"
+ : : "m" (fpregs.f_fpcntl[1])
+ : "memory" );
+ __asm__ volatile ("fmovel %0,%/fpiar"
+ : : "m" (fpregs.f_fpcntl[2])
+ : "memory" );
+#endif
+ }
+ if (context_size &&
+ __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1,
+ context_size))
+ goto out;
+#ifdef CONFIG_CFV4E
+ __asm__ volatile ("frestore %0\n\t"
+ QCHIP_RESTORE_DIRECTIVE : : "m" (*fpstate));
+#endif
+ err = 0;
+
+out:
+ return err;
+}
+#endif
+
+static inline int
+restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc,
+ void __user *fp, int *pd0)
+{
+ int fsize, formatvec;
+ struct sigcontext context;
+ int err = 0;
+
+ /* get previous context */
+ if (copy_from_user(&context, usc, sizeof(context)))
+ goto badframe;
+
+ /* restore passed registers */
+ regs->d1 = context.sc_d1;
+ regs->a0 = context.sc_a0;
+ regs->a1 = context.sc_a1;
+ regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff);
+ regs->pc = context.sc_pc;
+ regs->orig_d0 = -1; /* disable syscall checks */
+ wrusp(context.sc_usp);
+ formatvec = context.sc_formatvec;
+ regs->format = formatvec >> 12;
+ regs->vector = formatvec & 0xfff;
+
+#ifdef CONFIG_FPU
+ err = restore_fpu_state(&context);
+#endif
+
+ fsize = frame_extra_sizes[regs->format];
+ if (fsize < 0) {
+ /*
+ * user process trying to return with weird frame format
+ */
+#ifdef DEBUG
+ printk(KERN_DEBUG "user process returning with weird \
+ frame format\n");
+#endif
+ goto badframe;
+ }
+
+ /* OK. Make room on the supervisor stack for the extra junk,
+ * if necessary.
+ */
+
+ {
+ struct switch_stack *sw = (struct switch_stack *)regs - 1;
+ regs->d0 = context.sc_d0;
+#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
+ __asm__ __volatile__
+ (" movel %0,%/sp\n\t"
+ " bra ret_from_signal\n"
+ "4:\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 2b,4b\n"
+ ".previous"
+ : /* no outputs, it doesn't ever return */
+ : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
+ "n" (frame_offset), "a" (fp)
+ : "a0");
+#undef frame_offset
+ /*
+ * If we ever get here an exception occurred while
+ * building the above stack-frame.
+ */
+ goto badframe;
+ }
+
+ *pd0 = context.sc_d0;
+ return err;
+
+badframe:
+ return 1;
+}
+
+static inline int
+rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
+ struct ucontext __user *uc, int *pd0)
+{
+ int fsize, temp;
+ greg_t __user *gregs = uc->uc_mcontext.gregs;
+ unsigned long usp;
+ int err;
+
+ err = __get_user(temp, &uc->uc_mcontext.version);
+ if (temp != MCONTEXT_VERSION)
+ goto badframe;
+ /* restore passed registers */
+ err |= __get_user(regs->d0, &gregs[0]);
+ err |= __get_user(regs->d1, &gregs[1]);
+ err |= __get_user(regs->d2, &gregs[2]);
+ err |= __get_user(regs->d3, &gregs[3]);
+ err |= __get_user(regs->d4, &gregs[4]);
+ err |= __get_user(regs->d5, &gregs[5]);
+ err |= __get_user(sw->d6, &gregs[6]);
+ err |= __get_user(sw->d7, &gregs[7]);
+ err |= __get_user(regs->a0, &gregs[8]);
+ err |= __get_user(regs->a1, &gregs[9]);
+ err |= __get_user(regs->a2, &gregs[10]);
+ err |= __get_user(sw->a3, &gregs[11]);
+ err |= __get_user(sw->a4, &gregs[12]);
+ err |= __get_user(sw->a5, &gregs[13]);
+ err |= __get_user(sw->a6, &gregs[14]);
+ err |= __get_user(usp, &gregs[15]);
+ wrusp(usp);
+ err |= __get_user(regs->pc, &gregs[16]);
+ err |= __get_user(temp, &gregs[17]);
+ regs->sr = (regs->sr & 0xff00) | (temp & 0xff);
+ regs->orig_d0 = -1; /* disable syscall checks */
+ err |= __get_user(temp, &uc->uc_formatvec);
+ regs->format = temp >> 12;
+ regs->vector = temp & 0xfff;
+
+#ifdef CONFIG_FPU
+ err |= rt_restore_fpu_state(uc);
+#endif
+
+ if (do_sigaltstack(&uc->uc_stack, NULL, usp) == -EFAULT)
+ goto badframe;
+
+ fsize = frame_extra_sizes[regs->format];
+ if (fsize < 0) {
+ /*
+ * user process trying to return with weird frame format
+ */
+#ifdef DEBUG
+ printk(KERN_DEBUG "user process returning with weird \
+ frame format\n");
+#endif
+ goto badframe;
+ }
+
+ /* OK. Make room on the supervisor stack for the extra junk,
+ * if necessary.
+ */
+
+ {
+#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
+ __asm__ __volatile__
+ (" movel %0,%/sp\n\t"
+ " bra ret_from_signal\n"
+ "4:\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 2b,4b\n"
+ ".previous"
+ : /* no outputs, it doesn't ever return */
+ : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
+ "n" (frame_offset), "a" (&uc->uc_extra)
+ : "a0");
+#undef frame_offset
+ /*
+ * If we ever get here an exception occurred while
+ * building the above stack-frame.
+ */
+ goto badframe;
+ }
+
+ *pd0 = regs->d0;
+ return err;
+
+badframe:
+ return 1;
+}
+
+asmlinkage int do_sigreturn(unsigned long __unused)
+{
+ struct switch_stack *sw = (struct switch_stack *) &__unused;
+ struct pt_regs *regs = (struct pt_regs *) (sw + 1);
+ unsigned long usp = rdusp();
+ struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
+ sigset_t set;
+ int d0;
+
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+ if (__get_user(set.sig[0], &frame->sc.sc_mask) ||
+ (_NSIG_WORDS > 1 &&
+ __copy_from_user(&set.sig[1], &frame->extramask,
+ sizeof(frame->extramask))))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(¤t->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
+
+ if (restore_sigcontext(regs, &frame->sc, frame + 1, &d0))
+ goto badframe;
+ return d0;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+asmlinkage int do_rt_sigreturn(unsigned long __unused)
+{
+ struct switch_stack *sw = (struct switch_stack *) &__unused;
+ struct pt_regs *regs = (struct pt_regs *) (sw + 1);
+ unsigned long usp = rdusp();
+ struct rt_sigframe __user *frame =
+ (struct rt_sigframe __user *)(usp - 4);
+ sigset_t set;
+ int d0;
+
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(¤t->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
+
+ if (rt_restore_ucontext(regs, sw, &frame->uc, &d0))
+ goto badframe;
+ return d0;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+#ifdef CONFIG_FPU
+/*
+ * Set up a signal frame.
+ */
+
+static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
+{
+ if (FPU_IS_EMU) {
+ /* save registers */
+ memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12);
+ memcpy(sc->sc_fpregs, current->thread.fp, 24);
+ return;
+ }
+
+#ifdef CONFIG_CFV4E
+ __asm__ volatile ("fsave %0\n\t"
+ QCHIP_RESTORE_DIRECTIVE
+ : : "m" (*sc->sc_fpstate) : "memory");
+#endif
+
+ if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
+ fpu_version = sc->sc_fpstate[0];
+ if (CPU_IS_020_OR_030 &&
+ regs->vector >= (VEC_FPBRUC * 4) &&
+ regs->vector <= (VEC_FPNAN * 4)) {
+ /* Clear pending exception in 68882 idle frame */
+ if (*(unsigned short *) sc->sc_fpstate == 0x1f38)
+ sc->sc_fpstate[0x38] |= 1 << 3;
+ }
+#ifdef CONFIG_CFV4E
+ __asm__ volatile ("fmovemd %/fp0-%/fp1,%0"
+ : : "m" (sc->sc_fpregs[0][0])
+ : "memory");
+ __asm__ volatile ("fmovel %/fpcr,%0"
+ : : "m" (sc->sc_fpcntl[0])
+ : "memory");
+ __asm__ volatile ("fmovel %/fpsr,%0"
+ : : "m" (sc->sc_fpcntl[1])
+ : "memory");
+ __asm__ volatile ("fmovel %/fpiar,%0"
+ : : "m" (sc->sc_fpcntl[2])
+ : "memory");
+
+#endif
+ }
+}
+
+static inline int rt_save_fpu_state(struct ucontext __user *uc,
+ struct pt_regs *regs)
+{
+ unsigned char fpstate[FPCONTEXT_SIZE];
+ int context_size = CPU_IS_060 ? 8 : 0;
+ int err = 0;
+
+ if (FPU_IS_EMU) {
+ /* save fpu control register */
+ err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpcntl,
+ current->thread.fpcntl, 12);
+ /* save all other fpu register */
+ err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs,
+ current->thread.fp, 96);
+ return err;
+ }
+
+#ifdef CONFIG_CFV4E
+ __asm__ volatile ("fsave %0\n\t"
+ QCHIP_RESTORE_DIRECTIVE
+ : : "m" (*fpstate) : "memory");
+#endif
+ err |= __put_user(*(long *)fpstate, (long *)&uc->uc_fpstate);
+ if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
+ fpregset_t fpregs;
+ if (!CPU_IS_060)
+ context_size = fpstate[1];
+ fpu_version = fpstate[0];
+#ifdef CONFIG_CFV4E
+ __asm__ volatile ("fmovemd %/fp0-%/fp7,%0"
+ : : "m" (fpregs.f_fpregs[0][0])
+ : "memory");
+ __asm__ volatile ("fmovel %/fpcr,%0"
+ : : "m" (fpregs.f_fpcntl[0])
+ : "memory");
+ __asm__ volatile ("fmovel %/fpsr,%0"
+ : : "m" (fpregs.f_fpcntl[1])
+ : "memory");
+ __asm__ volatile ("fmovel %/fpiar,%0"
+ : : "m" (fpregs.f_fpcntl[2])
+ : "memory");
+#endif
+ err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs,
+ sizeof(fpregs));
+ }
+ if (context_size)
+ err |= copy_to_user((long *)&uc->uc_fpstate + 1, fpstate + 4,
+ context_size);
+ return err;
+
+
+ return err;
+}
+#endif
+
+static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
+ unsigned long mask)
+{
+ sc->sc_mask = mask;
+ sc->sc_usp = rdusp();
+ sc->sc_d0 = regs->d0;
+ sc->sc_d1 = regs->d1;
+ sc->sc_a0 = regs->a0;
+ sc->sc_a1 = regs->a1;
+ sc->sc_sr = regs->sr;
+ sc->sc_pc = regs->pc;
+ sc->sc_formatvec = regs->format << 12 | regs->vector;
+#ifdef CONFIG_FPU
+ save_fpu_state(sc, regs);
+#endif
+}
+
+static inline int rt_setup_ucontext(struct ucontext __user *uc,
+ struct pt_regs *regs)
+{
+ struct switch_stack *sw = (struct switch_stack *)regs - 1;
+ greg_t __user *gregs = uc->uc_mcontext.gregs;
+ int err = 0;
+
+ err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
+ err |= __put_user(regs->d0, &gregs[0]);
+ err |= __put_user(regs->d1, &gregs[1]);
+ err |= __put_user(regs->d2, &gregs[2]);
+ err |= __put_user(regs->d3, &gregs[3]);
+ err |= __put_user(regs->d4, &gregs[4]);
+ err |= __put_user(regs->d5, &gregs[5]);
+ err |= __put_user(sw->d6, &gregs[6]);
+ err |= __put_user(sw->d7, &gregs[7]);
+ err |= __put_user(regs->a0, &gregs[8]);
+ err |= __put_user(regs->a1, &gregs[9]);
+ err |= __put_user(regs->a2, &gregs[10]);
+ err |= __put_user(sw->a3, &gregs[11]);
+ err |= __put_user(sw->a4, &gregs[12]);
+ err |= __put_user(sw->a5, &gregs[13]);
+ err |= __put_user(sw->a6, &gregs[14]);
+ err |= __put_user(rdusp(), &gregs[15]);
+ err |= __put_user(regs->pc, &gregs[16]);
+ err |= __put_user(regs->sr, &gregs[17]);
+ err |= __put_user((regs->format << 12) | regs->vector,
+ &uc->uc_formatvec);
+#ifdef CONFIG_FPU
+ err |= rt_save_fpu_state(uc, regs);
+#endif
+ return err;
+}
+
+static inline void push_cache(unsigned long vaddr)
+{
+#ifdef CONFIG_M5445X
+ pgd_t *pdir;
+ pmd_t *pmdp;
+ pte_t *ptep;
+ unsigned long paddr;
+
+ pdir = pgd_offset(current->mm, vaddr);
+ pmdp = pmd_offset(pdir, vaddr);
+ ptep = pte_offset_map(pmdp, vaddr);
+ paddr = ((pte_val(*ptep) & PAGE_MASK) | (vaddr & ~PAGE_MASK));
+ cf_icache_flush_range(paddr, paddr + 8);
+#elif CONFIG_M547X_8X
+ flush_icache_range(vaddr, vaddr + 8);
+#endif
+}
+
+static inline void __user *
+get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
+{
+ unsigned long usp;
+
+ /* Default to using normal stack. */
+ usp = rdusp();
+
+ /* This is the X/Open sanctioned signal stack switching. */
+ if (ka->sa.sa_flags & SA_ONSTACK) {
+ if (!sas_ss_flags(usp))
+ usp = current->sas_ss_sp + current->sas_ss_size;
+ }
+ return (void __user *)((usp - frame_size) & -8UL);
+}
+
+static void setup_frame(int sig, struct k_sigaction *ka,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct sigframe __user *frame;
+ int fsize = frame_extra_sizes[regs->format];
+ struct sigcontext context;
+ int err = 0;
+
+ if (fsize < 0) {
+#ifdef DEBUG
+ printk(KERN_DEBUG "setup_frame: Unknown frame format %#x\n",
+ regs->format);
+#endif
+ goto give_sigsegv;
+ }
+
+ frame = get_sigframe(ka, regs, sizeof(*frame));
+
+ err |= __put_user((current_thread_info()->exec_domain
+ && current_thread_info()->exec_domain->signal_invmap
+ && sig < 32
+ ? current_thread_info()->exec_domain->signal_invmap[sig]
+ : sig),
+ &frame->sig);
+
+ err |= __put_user(regs->vector, &frame->code);
+ err |= __put_user(&frame->sc, &frame->psc);
+
+ if (_NSIG_WORDS > 1)
+ err |= copy_to_user(frame->extramask, &set->sig[1],
+ sizeof(frame->extramask));
+
+ setup_sigcontext(&context, regs, set->sig[0]);
+ err |= copy_to_user(&frame->sc, &context, sizeof(context));
+
+ /* Set up to return from userspace. */
+ err |= __put_user(frame->retcode, &frame->pretcode);
+ /* moveq #,d0; trap #0 */
+ err |= __put_user(0x70004e40 + (__NR_sigreturn << 16),
+ (long __user *)(frame->retcode));
+
+ if (err)
+ goto give_sigsegv;
+
+ push_cache((unsigned long) &frame->retcode);
+
+ /* Set up registers for signal handler */
+ wrusp((unsigned long) frame);
+ regs->pc = (unsigned long) ka->sa.sa_handler;
+
+adjust_stack:
+ /* Prepare to skip over the extra stuff in the exception frame. */
+ if (regs->stkadj) {
+ struct pt_regs *tregs =
+ (struct pt_regs *)((ulong)regs + regs->stkadj);
+#ifdef DEBUG
+ printk(KERN_DEBUG "Performing stackadjust=%04x\n",
+ regs->stkadj);
+#endif
+ /* This must be copied with decreasing addresses to
+ handle overlaps. */
+ tregs->vector = 0;
+ tregs->format = 0;
+ tregs->pc = regs->pc;
+ tregs->sr = regs->sr;
+ }
+ return;
+
+give_sigsegv:
+ force_sigsegv(sig, current);
+ goto adjust_stack;
+}
+
+static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct rt_sigframe __user *frame;
+ int fsize = frame_extra_sizes[regs->format];
+ int err = 0;
+
+ if (fsize < 0) {
+#ifdef DEBUG
+ printk(KERN_DEBUG "setup_frame: Unknown frame format %#x\n",
+ regs->format);
+#endif
+ goto give_sigsegv;
+ }
+
+ frame = get_sigframe(ka, regs, sizeof(*frame));
+
+ if (fsize) {
+ err |= copy_to_user(&frame->uc.uc_extra, regs + 1, fsize);
+ regs->stkadj = fsize;
+ }
+
+ err |= __put_user((current_thread_info()->exec_domain
+ && current_thread_info()->exec_domain->signal_invmap
+ && sig < 32
+ ? current_thread_info()->exec_domain->signal_invmap[sig]
+ : sig),
+ &frame->sig);
+ err |= __put_user(&frame->info, &frame->pinfo);
+ err |= __put_user(&frame->uc, &frame->puc);
+ err |= copy_siginfo_to_user(&frame->info, info);
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->uc.uc_flags);
+ err |= __put_user(NULL, &frame->uc.uc_link);
+ err |= __put_user((void __user *)current->sas_ss_sp,
+ &frame->uc.uc_stack.ss_sp);
+ err |= __put_user(sas_ss_flags(rdusp()),
+ &frame->uc.uc_stack.ss_flags);
+ err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ err |= rt_setup_ucontext(&frame->uc, regs);
+ err |= copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+ /* Set up to return from userspace. */
+ err |= __put_user(frame->retcode, &frame->pretcode);
+
+ /* movel #__NR_rt_sigreturn(0xAD),d0; trap #0 */
+ err |= __put_user(0x203c0000, (long *)(frame->retcode + 0));
+ err |= __put_user(0x00ad4e40, (long *)(frame->retcode + 4));
+
+ if (err)
+ goto give_sigsegv;
+
+ push_cache((unsigned long) &frame->retcode);
+
+ /* Set up registers for signal handler */
+ wrusp((unsigned long) frame);
+ regs->pc = (unsigned long) ka->sa.sa_handler;
+
+adjust_stack:
+ /* Prepare to skip over the extra stuff in the exception frame. */
+ if (regs->stkadj) {
+ struct pt_regs *tregs =
+ (struct pt_regs *)((ulong)regs + regs->stkadj);
+#ifdef DEBUG
+ printk(KERN_DEBUG "Performing stackadjust=%04x\n",
+ regs->stkadj);
+#endif
+ /* This must be copied with decreasing addresses to
+ handle overlaps. */
+ tregs->vector = 0;
+ tregs->format = 0;
+ tregs->pc = regs->pc;
+ tregs->sr = regs->sr;
+ }
+ return;
+
+give_sigsegv:
+ force_sigsegv(sig, current);
+ goto adjust_stack;
+}
+
+static inline void
+handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
+{
+ switch (regs->d0) {
+ case -ERESTARTNOHAND:
+ if (!has_handler)
+ goto do_restart;
+ regs->d0 = -EINTR;
+ break;
+
+ case -ERESTARTSYS:
+ if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
+ regs->d0 = -EINTR;
+ break;
+ }
+ /* fallthrough */
+ case -ERESTARTNOINTR:
+do_restart:
+ regs->d0 = regs->orig_d0;
+ regs->pc -= 2;
+ break;
+ }
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+static void
+handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *oldset, struct pt_regs *regs)
+{
+ /* are we from a system call? */
+ if (regs->orig_d0 >= 0)
+ /* If so, check system call restarting.. */
+ handle_restart(regs, ka, 1);
+
+ /* set up the stack frame */
+ if (ka->sa.sa_flags & SA_SIGINFO)
+ setup_rt_frame(sig, ka, info, oldset, regs);
+ else
+ setup_frame(sig, ka, oldset, regs);
+
+ if (ka->sa.sa_flags & SA_ONESHOT)
+ ka->sa.sa_handler = SIG_DFL;
+
+ spin_lock_irq(¤t->sighand->siglock);
+ sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
+ sigaddset(¤t->blocked, sig);
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ */
+asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs)
+{
+ siginfo_t info;
+ struct k_sigaction ka;
+ int signr;
+
+ current->thread.esp0 = (unsigned long) regs;
+
+ if (!oldset)
+ oldset = ¤t->blocked;
+
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+ if (signr > 0) {
+ /* Whee! Actually deliver the signal. */
+ handle_signal(signr, &ka, &info, oldset, regs);
+ return 1;
+ }
+
+ /* Did we come from a system call? */
+ if (regs->orig_d0 >= 0)
+ /* Restart the system call - no handlers present */
+ handle_restart(regs, NULL, 0);
+
+ return 0;
+}
--- /dev/null
+/*
+ * linux/arch/m68k/coldfire/time.c
+ *
+ * This file contains the coldfire specific time handling pieces.
+ *
+ * Copyright 2008-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Kurt Mahan <kmahan@freescale.com>
+ * Jason Jin Jason.Jin@freescale.com
+ * Shrek Wu B16972@freescale.com
+ *
+ * based on linux/arch/m68k/kernel/time.c
+ */
+#include <linux/clk.h>
+#include <linux/clk.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/time.h>
+#include <linux/timex.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/sysdev.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/rtc.h>
+
+#include <asm/machdep.h>
+#include <linux/io.h>
+#include <asm/irq_regs.h>
+
+#include <linux/profile.h>
+#include <asm/mcfsim.h>
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+/*extern unsigned long long sys_dtim0_read(void);
+extern void sys_dtim_init(void);*/
+extern unsigned long long sys_dtim2_read(void);
+extern void sys_dtim2_init(void);
+static int cfv4_set_next_event(unsigned long evt,
+ struct clock_event_device *dev);
+static void cfv4_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *dev);
+#if defined(CONFIG_M5445X)
+#define FREQ (MCF_BUSCLK / 16)
+#else
+#define FREQ (MCF_BUSCLK)
+#endif
+/*
+ * realtime clock dummy code
+ */
+
+static unsigned long null_rtc_get_time(void)
+{
+ return mktime(2008, 1, 1, 0, 0, 0);
+}
+
+static int null_rtc_set_time(unsigned long sec)
+{
+ return 0;
+}
+
+static unsigned long (*cf_rtc_get_time)(void) = null_rtc_get_time;
+static int (*cf_rtc_set_time)(unsigned long) = null_rtc_set_time;
+#endif /* CONFIG_GENERIC_CLOCKEVENTS */
+
+/*
+ * old pre-GENERIC clock code
+ */
+
+#ifndef CONFIG_GENERIC_CLOCKEVENTS
+/*
+ * timer_interrupt() needs to keep up the real-time clock,
+ * as well as call the "do_timer()" routine every clocktick
+ */
+static irqreturn_t timer_interrupt(int irq, void *dummy)
+{
+#ifdef CONFIG_COLDFIRE
+ /* kick hardware timer if necessary */
+ if (mach_tick)
+ mach_tick();
+#endif
+ do_timer(1);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(get_irq_regs()));
+#endif
+ profile_tick(CPU_PROFILING);
+
+#ifdef CONFIG_HEARTBEAT
+ /* use power LED as a heartbeat instead -- much more useful
+ for debugging -- based on the version for PReP by Cort */
+ /* acts like an actual heart beat -- ie thump-thump-pause... */
+ if (mach_heartbeat) {
+ unsigned cnt = 0, period = 0, dist = 0;
+
+ if (cnt == 0 || cnt == dist)
+ mach_heartbeat(1);
+ else if (cnt == 7 || cnt == dist+7)
+ mach_heartbeat(0);
+
+ if (++cnt > period) {
+ cnt = 0;
+ /* The hyperbolic function below modifies
+ * the heartbeat period length in dependency
+ * of the current (5min) load. It goes through
+ * the points f(0)=126, f(1)=86, f(5)=51,
+ * f(inf)->30. */
+ period = ((672<<FSHIFT)/(5*avenrun[0]+(7<<FSHIFT)))
+ + 30;
+ dist = period / 4;
+ }
+ }
+#endif /* CONFIG_HEARTBEAT */
+ return IRQ_HANDLED;
+}
+
+void __init time_init(void)
+{
+ struct rtc_time time;
+
+ if (mach_hwclk) {
+ mach_hwclk(0, &time);
+ time.tm_year += 1900;
+ if (time.tm_year < 1970)
+ time.tm_year += 100;
+ xtime.tv_sec = mktime(time.tm_year, time.tm_mon, time.tm_mday,
+ time.tm_hour, time.tm_min, time.tm_sec);
+ xtime.tv_nsec = 0;
+ }
+ wall_to_monotonic.tv_sec = -xtime.tv_sec;
+
+ mach_sched_init(timer_interrupt);
+}
+#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
+
+#ifndef CONFIG_GENERIC_TIME
+/*
+ * This version of gettimeofday has near microsecond resolution.
+ */
+void do_gettimeofday(struct timeval *tv)
+{
+ unsigned long flags;
+ unsigned long seq;
+ unsigned long usec, sec;
+ unsigned long max_ntp_tick = tick_usec - tickadj;
+
+ do {
+ seq = read_seqbegin_irqsave(&xtime_lock, flags);
+
+ usec = mach_gettimeoffset();
+
+ /*
+ * If time_adjust is negative then NTP is slowing the clock
+ * so make sure not to go into next possible interval.
+ * Better to lose some accuracy than have time go backwards..
+ */
+ if (unlikely(time_adjust < 0))
+ usec = min(usec, max_ntp_tick);
+
+ sec = xtime.tv_sec;
+ usec += xtime.tv_nsec/1000;
+ } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
+
+
+ while (usec >= 1000000) {
+ usec -= 1000000;
+ sec++;
+ }
+
+ tv->tv_sec = sec;
+ tv->tv_usec = usec;
+}
+EXPORT_SYMBOL(do_gettimeofday);
+
+int do_settimeofday(struct timespec *tv)
+{
+ time_t wtm_sec, sec = tv->tv_sec;
+ long wtm_nsec, nsec = tv->tv_nsec;
+
+ if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
+ return -EINVAL;
+
+ write_seqlock_irq(&xtime_lock);
+ /* This is revolting. We need to set the xtime.tv_nsec
+ * correctly. However, the value in this location is
+ * is value at the last tick.
+ * Discover what correction gettimeofday
+ * would have done, and then undo it!
+ */
+ nsec -= 1000 * mach_gettimeoffset();
+
+ wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
+ wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
+
+ set_normalized_timespec(&xtime, sec, nsec);
+ set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
+
+ ntp_clear();
+ write_sequnlock_irq(&xtime_lock);
+ clock_was_set();
+ return 0;
+}
+EXPORT_SYMBOL(do_settimeofday);
+#endif /* !CONFIG_GENERIC_TIME */
+
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+/*
+ * Clock Evnt setup
+ */
+static struct clock_event_device clockevent_cfv4 = {
+ .name = "CFV4 timer2even",
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 200,
+ .shift = 20,
+ .set_mode = cfv4_set_mode,
+ .set_next_event = cfv4_set_next_event,
+};
+
+static int cfv4_set_next_event(unsigned long evt,
+ struct clock_event_device *dev)
+{
+ return 0;
+}
+
+static void cfv4_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *dev)
+{
+ if (mode != CLOCK_EVT_MODE_ONESHOT)
+ cfv4_set_next_event((FREQ / HZ), dev);
+}
+
+static int __init cfv4_clockevent_init(void)
+{
+ clockevent_cfv4.mult =
+ div_sc(FREQ, NSEC_PER_SEC,
+ clockevent_cfv4.shift);
+ clockevent_cfv4.max_delta_ns =
+ clockevent_delta2ns((FREQ / HZ),
+ &clockevent_cfv4);
+ clockevent_cfv4.min_delta_ns =
+ clockevent_delta2ns(1, &clockevent_cfv4);
+
+ clockevent_cfv4.cpumask = &cpumask_of_cpu(0);
+
+ printk(KERN_INFO "timer: register clockevent\n");
+ clockevents_register_device(&clockevent_cfv4);
+
+ return 0;
+}
+
+/*
+ * clocksource setup
+ */
+
+struct clocksource clocksource_cfv4 = {
+ .name = "ColdfireV4",
+ .rating = 250,
+ .mask = CLOCKSOURCE_MASK(32),
+ .read = sys_dtim2_read,
+ .shift = 20,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+/*
+ * Initialize time subsystem. Called from linux/init/main.c
+ */
+void __init time_init(void)
+{
+ int ret;
+
+ printk(KERN_INFO "Initializing time\n");
+#if 0
+ /* initialize system clocks */
+ clk_init();
+#endif
+ cfv4_clockevent_init();
+ /* initialize the system timer */
+ /*sys_dtim_init();*/
+ sys_dtim2_init();
+ /* setup initial system time */
+ xtime.tv_sec = cf_rtc_get_time();
+ xtime.tv_nsec = 0;
+ set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec,
+ -xtime.tv_nsec);
+
+ /* JKM */
+ clocksource_cfv4.mult = clocksource_hz2mult(FREQ,
+ clocksource_cfv4.shift);
+
+ /* register our clocksource */
+ ret = clocksource_register(&clocksource_cfv4);
+ if (ret)
+ printk(KERN_ERR "timer: unable to "
+ "register clocksource - %d\n", ret);
+}
+
+/*
+ * sysfs pieces
+ */
+
+static struct sysdev_class timer_class = {
+ .name = "timer",
+};
+
+static struct sys_device timer_device = {
+ .id = 0,
+ .cls = &timer_class,
+};
+
+static int __init timer_init_sysfs(void)
+{
+ int err = sysdev_class_register(&timer_class);
+ if (!err)
+ err = sysdev_register(&timer_device);
+ return err;
+}
+device_initcall(timer_init_sysfs);
+#endif /* CONFIG_GENERIC_CLOCKEVENTS */
--- /dev/null
+/*
+ * linux/arch/m68knommu/kernel/traps.c
+ *
+ * Copyright Freescale Semiconductor, Inc. 2008-2009
+ * Jason Jin Jason.Jin@freescale.com
+ * Shrek Wu B16972@freescale.com
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+/*
+ * Sets up all exception vectors
+ */
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/a.out.h>
+#include <linux/user.h>
+#include <linux/string.h>
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <linux/ptrace.h>
+#include <linux/kallsyms.h>
+
+#include <asm/setup.h>
+#include <asm/fpu.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/traps.h>
+#include <asm/pgtable.h>
+#include <asm/machdep.h>
+#include <asm/siginfo.h>
+
+static char const * const vec_names[] = {
+ "RESET SP", "RESET PC", "BUS ERROR", "ADDRESS ERROR",
+ "ILLEGAL INSTRUCTION", "ZERO DIVIDE", "CHK", "TRAPcc",
+ "PRIVILEGE VIOLATION", "TRACE", "LINE 1010", "LINE 1111",
+ "UNASSIGNED RESERVED 12", "COPROCESSOR PROTOCOL VIOLATION",
+ "FORMAT ERROR", "UNINITIALIZED INTERRUPT",
+ "UNASSIGNED RESERVED 16", "UNASSIGNED RESERVED 17",
+ "UNASSIGNED RESERVED 18", "UNASSIGNED RESERVED 19",
+ "UNASSIGNED RESERVED 20", "UNASSIGNED RESERVED 21",
+ "UNASSIGNED RESERVED 22", "UNASSIGNED RESERVED 23",
+ "SPURIOUS INTERRUPT", "LEVEL 1 INT", "LEVEL 2 INT", "LEVEL 3 INT",
+ "LEVEL 4 INT", "LEVEL 5 INT", "LEVEL 6 INT", "LEVEL 7 INT",
+ "SYSCALL", "TRAP #1", "TRAP #2", "TRAP #3",
+ "TRAP #4", "TRAP #5", "TRAP #6", "TRAP #7",
+ "TRAP #8", "TRAP #9", "TRAP #10", "TRAP #11",
+ "TRAP #12", "TRAP #13", "TRAP #14", "TRAP #15",
+ "FPCP BSUN", "FPCP INEXACT", "FPCP DIV BY 0", "FPCP UNDERFLOW",
+ "FPCP OPERAND ERROR", "FPCP OVERFLOW", "FPCP SNAN",
+ "FPCP UNSUPPORTED OPERATION",
+ "MMU CONFIGURATION ERROR"
+};
+
+asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
+ unsigned long error_code);
+asmlinkage void trap_c(struct frame *fp);
+extern void __init coldfire_trap_init(void);
+
+void __init trap_init(void)
+{
+ coldfire_trap_init();
+}
+
+/* The following table converts the FS encoding of a ColdFire
+ exception stack frame into the error_code value needed by
+ do_fault. */
+
+static const unsigned char fs_err_code[] = {
+ 0, /* 0000 */
+ 0, /* 0001 */
+ 0, /* 0010 */
+ 0, /* 0011 */
+ 1, /* 0100 */
+ 0, /* 0101 */
+ 0, /* 0110 */
+ 0, /* 0111 */
+ 2, /* 1000 */
+ 3, /* 1001 */
+ 2, /* 1010 */
+ 0, /* 1011 */
+ 1, /* 1100 */
+ 1, /* 1101 */
+ 0, /* 1110 */
+ 0 /* 1111 */
+};
+
+#ifdef DEBUG
+static const char *fs_err_msg[16] = {
+ "Normal",
+ "Reserved",
+ "Interrupt during debug service routine",
+ "Reserved",
+ "X Protection",
+ "TLB X miss (opword)",
+ "TLB X miss (ext. word)",
+ "IFP in emulator mode",
+ "W Protection",
+ "Write error",
+ "TLB W miss",
+ "Reserved",
+ "R Protection",
+ "R/RMW Protection",
+ "TLB R miss",
+ "OEP in emulator mode",
+};
+#endif
+
+static inline void access_errorCF(struct frame *fp)
+{
+ unsigned long int mmusr, complainingAddress;
+ unsigned int err_code, fs;
+ int need_page_fault;
+
+ mmusr = fp->ptregs.mmusr;
+ complainingAddress = fp->ptregs.mmuar;
+#ifdef DEBUG
+ printk(KERN_DEBUG "pc %#lx, mmusr %#lx, complainingAddress %#lx\n", \
+ fp->ptregs.pc, mmusr, complainingAddress);
+#endif
+
+ /*
+ * error_code:
+ * bit 0 == 0 means no page found, 1 means protection fault
+ * bit 1 == 0 means read, 1 means write
+ */
+
+ fs = (fp->ptregs.fs2 << 2) | fp->ptregs.fs1;
+ switch (fs) {
+ case 5: /* 0101 TLB opword X miss */
+ need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 0, 0);
+ complainingAddress = fp->ptregs.pc;
+ break;
+ case 6: /* 0110 TLB extension word X miss */
+ need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 0, 1);
+ complainingAddress = fp->ptregs.pc + sizeof(long);
+ break;
+ case 10: /* 1010 TLB W miss */
+ need_page_fault = cf_tlb_miss(&fp->ptregs, 1, 1, 0);
+ break;
+ case 14: /* 1110 TLB R miss */
+ need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 1, 0);
+ break;
+ default:
+ /* 0000 Normal */
+ /* 0001 Reserved */
+ /* 0010 Interrupt during debug service routine */
+ /* 0011 Reserved */
+ /* 0100 X Protection */
+ /* 0111 IFP in emulator mode */
+ /* 1000 W Protection*/
+ /* 1001 Write error*/
+ /* 1011 Reserved*/
+ /* 1100 R Protection*/
+ /* 1101 R Protection*/
+ /* 1111 OEP in emulator mode*/
+ need_page_fault = 1;
+ break;
+ }
+
+ if (need_page_fault) {
+ err_code = fs_err_code[fs];
+ if ((fs == 13) && (mmusr & MMUSR_WF)) /* rd-mod-wr access */
+ err_code |= 2; /* bit1 - write, bit0 - protection */
+ do_page_fault(&fp->ptregs, complainingAddress, err_code);
+ }
+}
+
+void die_if_kernel(char *str, struct pt_regs *fp, int nr)
+{
+ if (!(fp->sr & PS_S))
+ return;
+
+ console_verbose();
+ printk(KERN_EMERG "%s: %08x\n", str, nr);
+ printk(KERN_EMERG "PC: [<%08lx>]", fp->pc);
+ print_symbol(" %s", fp->pc);
+ printk(KERN_EMERG "\nSR: %04x SP: %p a2: %08lx\n",
+ fp->sr, fp, fp->a2);
+ printk(KERN_EMERG "d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n",
+ fp->d0, fp->d1, fp->d2, fp->d3);
+ printk(KERN_EMERG "d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n",
+ fp->d4, fp->d5, fp->a0, fp->a1);
+
+ printk(KERN_EMERG "Process %s (pid: %d, stackpage=%08lx)\n",
+ current->comm, current->pid, PAGE_SIZE+(unsigned long)current);
+ show_stack(NULL, (unsigned long *)fp);
+ do_exit(SIGSEGV);
+}
+
+asmlinkage void buserr_c(struct frame *fp)
+{
+ unsigned int fs;
+
+ /* Only set esp0 if coming from user mode */
+ if (user_mode(&fp->ptregs))
+ current->thread.esp0 = (unsigned long) fp;
+
+ fs = (fp->ptregs.fs2 << 2) | fp->ptregs.fs1;
+#if defined(DEBUG)
+ printk(KERN_DEBUG "*** Bus Error *** (%x)%s\n", fs,
+ fs_err_msg[fs & 0xf]);
+#endif
+ switch (fs) {
+ case 0x5:
+ case 0x6:
+ case 0x7:
+ case 0x9:
+ case 0xa:
+ case 0xd:
+ case 0xe:
+ case 0xf:
+ access_errorCF(fp);
+ break;
+ default:
+ die_if_kernel("bad frame format", &fp->ptregs, 0);
+#if defined(DEBUG)
+ printk(KERN_DEBUG "Unknown SIGSEGV - 4\n");
+#endif
+ force_sig(SIGSEGV, current);
+ }
+}
+
+void show_trace(unsigned long *stack)
+{
+ unsigned long *endstack;
+ unsigned long addr;
+ int i;
+
+ printk("Call Trace:");
+ addr = (unsigned long)stack + THREAD_SIZE - 1;
+ endstack = (unsigned long *)(addr & -THREAD_SIZE);
+ i = 0;
+ while (stack + 1 <= endstack) {
+ addr = *stack++;
+ /*
+ * If the address is either in the text segment of the
+ * kernel, or in the region which contains vmalloc'ed
+ * memory, it *may* be the address of a calling
+ * routine; if so, print it so that someone tracing
+ * down the cause of the crash will be able to figure
+ * out the call path that was taken.
+ */
+ if (__kernel_text_address(addr)) {
+#ifndef CONFIG_KALLSYMS
+ if (i % 5 == 0)
+ printk("\n ");
+#endif
+ printk(" [<%08lx>] %pS\n", addr, (void *)addr);
+ i++;
+ }
+ }
+ printk("\n");
+}
+
+int kstack_depth_to_print = 48;
+void show_stack(struct task_struct *task, unsigned long *stack)
+{
+ unsigned long *p;
+ unsigned long *endstack;
+ int i;
+
+ if (!stack) {
+ if (task)
+ stack = (unsigned long *)task->thread.esp0;
+ else
+ stack = (unsigned long *)&stack;
+ }
+ endstack = (unsigned long *)(((unsigned long)stack + THREAD_SIZE - 1) & -THREAD_SIZE);
+
+ printk("Stack from %08lx:", (unsigned long)stack);
+ p = stack;
+ for (i = 0; i < kstack_depth_to_print; i++) {
+ if (p + 1 > endstack)
+ break;
+ if (i % 8 == 0)
+ printk("\n ");
+ printk(" %08lx", *p++);
+ }
+ printk("\n");
+ show_trace(stack);
+}
+
+void bad_super_trap(struct frame *fp)
+{
+ console_verbose();
+ if (fp->ptregs.vector < sizeof(vec_names)/sizeof(vec_names[0]))
+ printk(KERN_WARNING "*** %s *** FORMAT=%X\n",
+ vec_names[fp->ptregs.vector],
+ fp->ptregs.format);
+ else
+ printk(KERN_WARNING "*** Exception %d *** FORMAT=%X\n",
+ fp->ptregs.vector,
+ fp->ptregs.format);
+ printk(KERN_WARNING "Current process id is %d\n", current->pid);
+ die_if_kernel("BAD KERNEL TRAP", &fp->ptregs, 0);
+}
+
+asmlinkage void trap_c(struct frame *fp)
+{
+ int sig;
+ siginfo_t info;
+
+ if (fp->ptregs.sr & PS_S) {
+ if (fp->ptregs.vector == VEC_TRACE) {
+ /* traced a trapping instruction */
+ current->ptrace |= PT_DTRACE;
+ } else
+ bad_super_trap(fp);
+ return;
+ }
+
+ /* send the appropriate signal to the user program */
+ switch (fp->ptregs.vector) {
+ case VEC_ADDRERR:
+ info.si_code = BUS_ADRALN;
+ sig = SIGBUS;
+ break;
+ case VEC_ILLEGAL:
+ case VEC_LINE10:
+ case VEC_LINE11:
+ info.si_code = ILL_ILLOPC;
+ sig = SIGILL;
+ break;
+ case VEC_PRIV:
+ info.si_code = ILL_PRVOPC;
+ sig = SIGILL;
+ break;
+ case VEC_COPROC:
+ info.si_code = ILL_COPROC;
+ sig = SIGILL;
+ break;
+ case VEC_TRAP1: /* gdbserver breakpoint */
+ fp->ptregs.pc -= 2;
+ info.si_code = TRAP_TRACE;
+ sig = SIGTRAP;
+ break;
+ case VEC_TRAP2:
+ case VEC_TRAP3:
+ case VEC_TRAP4:
+ case VEC_TRAP5:
+ case VEC_TRAP6:
+ case VEC_TRAP7:
+ case VEC_TRAP8:
+ case VEC_TRAP9:
+ case VEC_TRAP10:
+ case VEC_TRAP11:
+ case VEC_TRAP12:
+ case VEC_TRAP13:
+ case VEC_TRAP14:
+ info.si_code = ILL_ILLTRP;
+ sig = SIGILL;
+ break;
+ case VEC_FPBRUC:
+ case VEC_FPOE:
+ case VEC_FPNAN:
+ info.si_code = FPE_FLTINV;
+ sig = SIGFPE;
+ break;
+ case VEC_FPIR:
+ info.si_code = FPE_FLTRES;
+ sig = SIGFPE;
+ break;
+ case VEC_FPDIVZ:
+ info.si_code = FPE_FLTDIV;
+ sig = SIGFPE;
+ break;
+ case VEC_FPUNDER:
+ info.si_code = FPE_FLTUND;
+ sig = SIGFPE;
+ break;
+ case VEC_FPOVER:
+ info.si_code = FPE_FLTOVF;
+ sig = SIGFPE;
+ break;
+ case VEC_ZERODIV:
+ info.si_code = FPE_INTDIV;
+ sig = SIGFPE;
+ break;
+ case VEC_CHK:
+ case VEC_TRAP:
+ info.si_code = FPE_INTOVF;
+ sig = SIGFPE;
+ break;
+ case VEC_TRACE: /* ptrace single step */
+ info.si_code = TRAP_TRACE;
+ sig = SIGTRAP;
+ break;
+ case VEC_TRAP15: /* breakpoint */
+ info.si_code = TRAP_BRKPT;
+ sig = SIGTRAP;
+ break;
+ default:
+ info.si_code = ILL_ILLOPC;
+ sig = SIGILL;
+ break;
+ }
+ info.si_signo = sig;
+ info.si_errno = 0;
+ switch (fp->ptregs.format) {
+ default:
+ info.si_addr = (void *) fp->ptregs.pc;
+ break;
+ case 2:
+ info.si_addr = (void *) fp->un.fmt2.iaddr;
+ break;
+ case 7:
+ info.si_addr = (void *) fp->un.fmt7.effaddr;
+ break;
+ case 9:
+ info.si_addr = (void *) fp->un.fmt9.iaddr;
+ break;
+ case 10:
+ info.si_addr = (void *) fp->un.fmta.daddr;
+ break;
+ case 11:
+ info.si_addr = (void *) fp->un.fmtb.daddr;
+ break;
+ }
+ force_sig_info(sig, &info, current);
+}
+
+asmlinkage void set_esp0(unsigned long ssp)
+{
+ current->thread.esp0 = ssp;
+}
+
+/*
+ * The architecture-independent backtrace generator
+ */
+void dump_stack(void)
+{
+ unsigned long stack;
+
+ show_stack(current, &stack);
+}
+EXPORT_SYMBOL(dump_stack);
+
+#ifdef CONFIG_M68KFPU_EMU
+asmlinkage void fpemu_signal(int signal, int code, void *addr)
+{
+ siginfo_t info;
+
+ info.si_signo = signal;
+ info.si_errno = 0;
+ info.si_code = code;
+ info.si_addr = addr;
+ force_sig_info(signal, &info, current);
+}
+#endif
--- /dev/null
+/*
+ * drivers/dma/MCD_dma.h
+ *
+ * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Kurt Mahan <kmahan@freescale.com>
+ * Shrek Wu b16972@freescale.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+#ifndef _MCD_API_H
+#define _MCD_API_H
+
+/*
+ * Turn Execution Unit tasks ON (#define) or OFF (#undef)
+ */
+#undef MCD_INCLUDE_EU
+
+/*
+ * Number of DMA channels
+ */
+#define NCHANNELS 16
+
+/*
+ * Total number of variants
+ */
+#ifdef MCD_INCLUDE_EU
+#define NUMOFVARIANTS 6
+#else
+#define NUMOFVARIANTS 4
+#endif
+
+/*
+ * Define sizes of the various tables
+ */
+#define TASK_TABLE_SIZE (NCHANNELS*32)
+#define VAR_TAB_SIZE (128)
+#define CONTEXT_SAVE_SIZE (128)
+#define FUNCDESC_TAB_SIZE (256)
+
+#ifdef MCD_INCLUDE_EU
+#define FUNCDESC_TAB_NUM 16
+#else
+#define FUNCDESC_TAB_NUM 1
+#endif
+
+
+#ifndef DEFINESONLY
+
+/*
+ * Portability typedefs
+ */
+typedef int s32;
+typedef unsigned int u32;
+typedef short s16;
+typedef unsigned short u16;
+typedef char s8;
+typedef unsigned char u8;
+
+/*
+ * These structures represent the internal registers of the
+ * multi-channel DMA
+ */
+struct dmaRegs_s {
+ u32 taskbar; /* task table base address register */
+ u32 currPtr;
+ u32 endPtr;
+ u32 varTablePtr;
+ u16 dma_rsvd0;
+ u16 ptdControl; /* ptd control */
+ u32 intPending; /* interrupt pending register */
+ u32 intMask; /* interrupt mask register */
+ u16 taskControl[16]; /* task control registers */
+ u8 priority[32]; /* priority registers */
+ u32 initiatorMux; /* initiator mux control */
+ u32 taskSize0; /* task size control register 0. */
+ u32 taskSize1; /* task size control register 1. */
+ u32 dma_rsvd1; /* reserved */
+ u32 dma_rsvd2; /* reserved */
+ u32 debugComp1; /* debug comparator 1 */
+ u32 debugComp2; /* debug comparator 2 */
+ u32 debugControl; /* debug control */
+ u32 debugStatus; /* debug status */
+ u32 ptdDebug; /* priority task decode debug */
+ u32 dma_rsvd3[31]; /* reserved */
+};
+typedef volatile struct dmaRegs_s dmaRegs;
+
+#endif
+
+/*
+ * PTD contrl reg bits
+ */
+#define PTD_CTL_TSK_PRI 0x8000
+#define PTD_CTL_COMM_PREFETCH 0x0001
+
+/*
+ * Task Control reg bits and field masks
+ */
+#define TASK_CTL_EN 0x8000
+#define TASK_CTL_VALID 0x4000
+#define TASK_CTL_ALWAYS 0x2000
+#define TASK_CTL_INIT_MASK 0x1f00
+#define TASK_CTL_ASTRT 0x0080
+#define TASK_CTL_HIPRITSKEN 0x0040
+#define TASK_CTL_HLDINITNUM 0x0020
+#define TASK_CTL_ASTSKNUM_MASK 0x000f
+
+/*
+ * Priority reg bits and field masks
+ */
+#define PRIORITY_HLD 0x80
+#define PRIORITY_PRI_MASK 0x07
+
+/*
+ * Debug Control reg bits and field masks
+ */
+#define DBG_CTL_BLOCK_TASKS_MASK 0xffff0000
+#define DBG_CTL_AUTO_ARM 0x00008000
+#define DBG_CTL_BREAK 0x00004000
+#define DBG_CTL_COMP1_TYP_MASK 0x00003800
+#define DBG_CTL_COMP2_TYP_MASK 0x00000070
+#define DBG_CTL_EXT_BREAK 0x00000004
+#define DBG_CTL_INT_BREAK 0x00000002
+
+/*
+ * PTD Debug reg selector addresses
+ * This reg must be written with a value to show the contents of
+ * one of the desired internal register.
+ */
+#define PTD_DBG_REQ 0x00
+/* shows the state of 31 initiators */
+#define PTD_DBG_TSK_VLD_INIT 0x01
+/* shows which 16 tasks are valid and
+ * have initiators asserted */
+
+
+/*
+ * General return values
+ */
+#define MCD_OK 0
+#define MCD_ERROR -1
+#define MCD_TABLE_UNALIGNED -2
+#define MCD_CHANNEL_INVALID -3
+
+/*
+ * MCD_initDma input flags
+ */
+#define MCD_RELOC_TASKS 0x00000001
+#define MCD_NO_RELOC_TASKS 0x00000000
+#define MCD_COMM_PREFETCH_EN 0x00000002
+/* Commbus Prefetching - MCF547x/548x ONLY */
+
+/*
+ * MCD_dmaStatus Status Values for each channel
+ */
+#define MCD_NO_DMA 1
+/* No DMA has been requested since reset */
+#define MCD_IDLE 2
+/* DMA active, but the initiator is currently inactive */
+#define MCD_RUNNING 3
+/* DMA active, and the initiator is currently active */
+#define MCD_PAUSED 4
+/* DMA active but it is currently paused */
+#define MCD_HALTED 5
+/* the most recent DMA has been killed with MCD_killTask() */
+#define MCD_DONE 6
+/* the most recent DMA has completed. */
+
+
+/*
+ * MCD_startDma parameter defines
+ */
+
+/*
+ * Constants for the funcDesc parameter
+ */
+/* Byte swapping: */
+#define MCD_NO_BYTE_SWAP 0x00045670
+/* to disable byte swapping. */
+#define MCD_BYTE_REVERSE 0x00076540
+/* to reverse the bytes of each u32 of the DMAed data. */
+#define MCD_U16_REVERSE 0x00067450
+/* to reverse the 16-bit halves of
+ * each 32-bit data value being DMAed.*/
+#define MCD_U16_BYTE_REVERSE 0x00054760
+/* to reverse the byte halves of each
+ * 16-bit half of each 32-bit data value DMAed */
+#define MCD_NO_BIT_REV 0x00000000
+/* do not reverse the bits of each byte DMAed. */
+#define MCD_BIT_REV 0x00088880
+/* reverse the bits of each byte DMAed */
+/* CRCing: */
+#define MCD_CRC16 0xc0100000
+/* to perform CRC-16 on DMAed data. */
+#define MCD_CRCCCITT 0xc0200000
+/* to perform CRC-CCITT on DMAed data. */
+#define MCD_CRC32 0xc0300000
+/* to perform CRC-32 on DMAed data. */
+#define MCD_CSUMINET 0xc0400000
+/* to perform internet checksums on DMAed data.*/
+#define MCD_NO_CSUM 0xa0000000
+/* to perform no checksumming. */
+
+#define MCD_FUNC_NOEU1 (MCD_NO_BYTE_SWAP | MCD_NO_BIT_REV | MCD_NO_CSUM)
+#define MCD_FUNC_NOEU2 (MCD_NO_BYTE_SWAP | MCD_NO_CSUM)
+
+/*
+ * Constants for the flags parameter
+ */
+#define MCD_TT_FLAGS_RL 0x00000001 /* Read line */
+#define MCD_TT_FLAGS_CW 0x00000002 /* Combine Writes */
+#define MCD_TT_FLAGS_SP 0x00000004
+/* Speculative prefetch(XLB) MCF547x/548x ONLY */
+#define MCD_TT_FLAGS_PI 0x00000040 /* Precise Increment */
+#define MCD_TT_FLAGS_MASK 0x000000ff
+#define MCD_TT_FLAGS_DEF (MCD_TT_FLAGS_RL | MCD_TT_FLAGS_CW)
+
+#define MCD_SINGLE_DMA 0x00000100 /* Unchained DMA */
+#define MCD_CHAIN_DMA /* TBD */
+#define MCD_EU_DMA /* TBD */
+#define MCD_FECTX_DMA 0x00001000 /* FEC TX ring DMA */
+#define MCD_FECRX_DMA 0x00002000 /* FEC RX ring DMA */
+
+
+/* these flags are valid for MCD_startDma
+ * and the chained buffer descriptors */
+#define MCD_BUF_READY 0x80000000
+/* indicates that this buffer is now
+ * under the DMA's control */
+#define MCD_WRAP 0x20000000
+/* to tell the FEC Dmas to wrap to the first BD */
+#define MCD_INTERRUPT 0x10000000
+/* to generate an interrupt after completion of the DMA. */
+#define MCD_END_FRAME 0x08000000
+/* tell the DMA to end the frame when transferring
+ * last byte of data in buffer */
+#define MCD_CRC_RESTART 0x40000000
+/* to empty out the accumulated checksum
+ prior to performing the DMA. */
+
+/* Defines for the FEC buffer descriptor control/status word*/
+#define MCD_FEC_BUF_READY 0x8000
+#define MCD_FEC_WRAP 0x2000
+#define MCD_FEC_INTERRUPT 0x1000
+#define MCD_FEC_END_FRAME 0x0800
+
+
+/*
+ * Defines for general intuitiveness
+ */
+
+#define MCD_TRUE 1
+#define MCD_FALSE 0
+
+/*
+ * Three different cases for destination and source.
+ */
+#define MINUS1 -1
+#define ZERO 0
+#define PLUS1 1
+
+#ifndef DEFINESONLY
+
+/* Task Table Entry struct*/
+typedef struct {
+ u32 TDTstart; /* task descriptor table start */
+ u32 TDTend; /* task descriptor table end */
+ u32 varTab; /* variable table start */
+ u32 FDTandFlags; /* function descriptor table start and flags */
+ volatile u32 descAddrAndStatus;
+ volatile u32 modifiedVarTab;
+ u32 contextSaveSpace; /* context save space start */
+ u32 literalBases;
+} TaskTableEntry;
+
+
+/* Chained buffer descriptor */
+typedef volatile struct MCD_bufDesc_struct MCD_bufDesc;
+struct MCD_bufDesc_struct {
+ u32 flags;
+/* flags describing the DMA */
+ u32 csumResult;
+/* checksum from checksumming performed since last checksum reset */
+ s8 *srcAddr;
+/* the address to move data from */
+ s8 *destAddr;
+/* the address to move data to */
+ s8 *lastDestAddr;
+/* the last address written to */
+ u32 dmaSize;
+/* the number of bytes to transfer independent of the transfer size */
+ MCD_bufDesc *next;
+/* next buffer descriptor in chain */
+ u32 info;
+/* private information about this descriptor; DMA does not affect it */
+};
+
+/* Progress Query struct */
+typedef volatile struct MCD_XferProg_struct {
+ s8 *lastSrcAddr;
+/* the most-recent or last, post-increment source address */
+ s8 *lastDestAddr;
+/* the most-recent or last, post-increment destination address */
+ u32 dmaSize;
+/* the amount of data transferred for the current buffer */
+ MCD_bufDesc *currBufDesc;
+/* pointer to the current buffer descriptor being DMAed */
+} MCD_XferProg;
+
+
+/* FEC buffer descriptor */
+typedef volatile struct MCD_bufDescFec_struct {
+ u16 statCtrl;
+ u16 length;
+ u32 dataPointer;
+} MCD_bufDescFec;
+
+
+/*************************************************************************/
+/*
+ * API function Prototypes - see MCD_dmaApi.c for further notes
+ */
+
+/*
+ * MCD_startDma starts a particular kind of DMA .
+ */
+int MCD_startDma(
+ int channel,
+/* the channel on which to run the DMA */
+ s8 *srcAddr,
+/* the address to move data from, or buffer-descriptor address */
+ s16 srcIncr,
+/* the amount to increment the source address per transfer */
+ s8 *destAddr,
+/* the address to move data to */
+ s16 destIncr,
+/* the amount to increment the destination address per transfer */
+ u32 dmaSize,
+/* the number of bytes to transfer independent of the transfer size */
+ u32 xferSize,
+/* the number bytes in of each data movement (1, 2, or 4) */
+ u32 initiator,
+/* what device initiates the DMA */
+ int priority,
+/* priority of the DMA */
+ u32 flags,
+/* flags describing the DMA */
+ u32 funcDesc
+/* a description of byte swapping, bit swapping, and CRC actions */
+);
+
+/*
+ * MCD_initDma() initializes the DMA API by setting up a pointer to the DMA
+ * registers, relocating and creating the appropriate task structures, and
+ * setting up some global settings
+ */
+int MCD_initDma(dmaRegs *sDmaBarAddr, void *taskTableDest, u32 flags);
+
+/*
+ * MCD_dmaStatus() returns the status of the DMA on the requested channel.
+ */
+int MCD_dmaStatus(int channel);
+
+/*
+ * MCD_XferProgrQuery() returns progress of DMA on requested channel
+ */
+int MCD_XferProgrQuery(int channel, MCD_XferProg *progRep);
+
+/*
+ * MCD_killDma() halts the DMA on the requested channel, without any
+ * intention of resuming the DMA.
+ */
+int MCD_killDma(int channel);
+
+/*
+ * MCD_continDma() continues a DMA which as stopped due to encountering an
+ * unready buffer descriptor.
+ */
+int MCD_continDma(int channel);
+
+/*
+ * MCD_pauseDma() pauses the DMA on the given channel ( if any DMA is
+ * running on that channel).
+ */
+int MCD_pauseDma(int channel);
+
+/*
+ * MCD_resumeDma() resumes the DMA on a given channel (if any DMA is
+ * running on that channel).
+ */
+int MCD_resumeDma(int channel);
+
+/*
+ * MCD_csumQuery provides the checksum/CRC after performing a non-chained DMA
+ */
+int MCD_csumQuery(int channel, u32 *csum);
+
+/*
+ * MCD_getCodeSize provides the packed size required by the microcoded task
+ * and structures.
+ */
+int MCD_getCodeSize(void);
+
+/*
+ * MCD_getVersion provides a pointer to a version string and returns a
+ * version number.
+ */
+int MCD_getVersion(char **longVersion);
+
+/* macro for setting a location in the variable table */
+#define MCD_SET_VAR(taskTab, idx, value) \
+ ((u32 *)(taskTab)->varTab)[idx] = value
+ /* Note that MCD_SET_VAR() is invoked many times in firing up a DMA function,
+ so I'm avoiding surrounding it with "do {} while(0)" */
+
+#endif /* DEFINESONLY */
+
+#endif /* _MCD_API_H */
--- /dev/null
+/*
+ * drivers/dma/MCD_dmaApi.c
+ *
+ * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Kurt Mahan <kmahan@freescale.com>
+ * Shrek Wu b16972@freescale.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include "MCD_dma.h"
+#include "MCD_tasksInit.h"
+#include "MCD_progCheck.h"
+
+/********************************************************************/
+/*
+ * This is an API-internal pointer to the DMA's registers
+ */
+dmaRegs *MCD_dmaBar;
+
+/*
+ * These are the real and model task tables as generated by the
+ * build process
+ */
+extern TaskTableEntry MCD_realTaskTableSrc[NCHANNELS];
+extern TaskTableEntry MCD_modelTaskTableSrc[NUMOFVARIANTS];
+
+/*
+ * However, this (usually) gets relocated to on-chip SRAM, at which
+ * point we access them as these tables
+ */
+volatile TaskTableEntry *MCD_taskTable;
+TaskTableEntry *MCD_modelTaskTable;
+
+
+/*
+ * MCD_chStatus[] is an array of status indicators for remembering
+ * whether a DMA has ever been attempted on each channel, pausing
+ * status, etc.
+ */
+static int MCD_chStatus[NCHANNELS] =
+{
+ MCD_NO_DMA, MCD_NO_DMA, MCD_NO_DMA, MCD_NO_DMA,
+ MCD_NO_DMA, MCD_NO_DMA, MCD_NO_DMA, MCD_NO_DMA,
+ MCD_NO_DMA, MCD_NO_DMA, MCD_NO_DMA, MCD_NO_DMA,
+ MCD_NO_DMA, MCD_NO_DMA, MCD_NO_DMA, MCD_NO_DMA
+};
+
+/*
+ * Prototypes for local functions
+ */
+static void MCD_memcpy(int *dest, int *src, u32 size);
+static void MCD_resmActions(int channel);
+
+/*
+ * Buffer descriptors used for storage of progress info for single Dmas
+ * Also used as storage for the DMA for CRCs for single DMAs
+ * Otherwise, the DMA does not parse these buffer descriptors
+ */
+#ifdef MCD_INCLUDE_EU
+extern MCD_bufDesc MCD_singleBufDescs[NCHANNELS];
+#else
+MCD_bufDesc MCD_singleBufDescs[NCHANNELS];
+#endif
+MCD_bufDesc *MCD_relocBuffDesc;
+
+
+/*
+ * Defines for the debug control register's functions
+ */
+#define DBG_CTL_COMP1_TASK (0x00002000)
+/* have comparator 1 look for a task # */
+#define DBG_CTL_ENABLE (DBG_CTL_AUTO_ARM | \
+ DBG_CTL_BREAK | \
+ DBG_CTL_INT_BREAK | \
+ DBG_CTL_COMP1_TASK)
+#define DBG_CTL_DISABLE (DBG_CTL_AUTO_ARM | \
+ DBG_CTL_INT_BREAK | \
+ DBG_CTL_COMP1_TASK)
+#define DBG_KILL_ALL_STAT (0xFFFFFFFF)
+
+/*
+ * Offset to context save area where progress info is stored
+ */
+#define CSAVE_OFFSET 10
+
+/*
+ * Defines for Byte Swapping
+ */
+#define MCD_BYTE_SWAP_KILLER 0xFFF8888F
+#define MCD_NO_BYTE_SWAP_ATALL 0x00040000
+
+/*
+ * Execution Unit Identifiers
+ */
+#define MAC 0 /* legacy - not used */
+#define LUAC 1 /* legacy - not used */
+#define CRC 2 /* legacy - not used */
+#define LURC 3 /* Logic Unit with CRC */
+
+/*
+ * Task Identifiers
+ */
+#define TASK_CHAINNOEU 0
+#define TASK_SINGLENOEU 1
+#ifdef MCD_INCLUDE_EU
+#define TASK_CHAINEU 2
+#define TASK_SINGLEEU 3
+#define TASK_FECRX 4
+#define TASK_FECTX 5
+#else
+#define TASK_CHAINEU 0
+#define TASK_SINGLEEU 1
+#define TASK_FECRX 2
+#define TASK_FECTX 3
+#endif
+
+/*
+ * Structure to remember which variant is on which channel
+ */
+typedef struct MCD_remVariants_struct MCD_remVariant;
+struct MCD_remVariants_struct {
+ int remDestRsdIncr[NCHANNELS]; /* -1,0,1 */
+ int remSrcRsdIncr[NCHANNELS]; /* -1,0,1 */
+ s16 remDestIncr[NCHANNELS]; /* DestIncr */
+ s16 remSrcIncr[NCHANNELS]; /* srcIncr */
+ u32 remXferSize[NCHANNELS]; /* xferSize */
+};
+
+/*
+ * Structure to remember the startDma parameters for each channel
+ */
+MCD_remVariant MCD_remVariants;
+
+/********************************************************************/
+/*
+ * Function: MCD_initDma
+ * Purpose: Initializes the DMA API by setting up a pointer to the DMA
+ * registers, relocating and creating the appropriate task
+ * structures, and setting up some global settings
+ * Arguments:
+ * dmaBarAddr - pointer to the multichannel DMA registers
+ * taskTableDest - location to move DMA task code and structs to
+ * flags - operational parameters
+ * Return Value:
+ * MCD_TABLE_UNALIGNED if taskTableDest is not 512-byte aligned
+ * MCD_OK otherwise
+ */
+extern u32 MCD_funcDescTab0[];
+
+int MCD_initDma(dmaRegs *dmaBarAddr, void *taskTableDest, u32 flags)
+{
+ int i;
+ TaskTableEntry *entryPtr;
+
+ /* Setup the local pointer to register set */
+ MCD_dmaBar = dmaBarAddr;
+
+ /* Do we need to move/create a task table */
+ if ((flags & MCD_RELOC_TASKS) != 0) {
+ int fixedSize;
+ u32 *fixedPtr;
+ int varTabsOffset, funcDescTabsOffset;
+ int contextSavesOffset;
+ int taskDescTabsOffset;
+ int taskTableSize, varTabsSize;
+ int funcDescTabsSize, contextSavesSize;
+ int taskDescTabSize;
+ int i;
+
+ /* Check if physical address is
+ * aligned on 512 byte boundary */
+ if (((u32)taskTableDest & 0x000001ff) != 0)
+ return MCD_TABLE_UNALIGNED;
+
+ MCD_taskTable = taskTableDest;
+ /* set up local pointer to task Table */
+
+ /*
+ * Create a task table:
+ * compute aligned base offsets for variable tables and
+ * function descriptor tables, then
+ * loop through the task table and setup the pointers
+ *copy over model task table with the the actual
+ *task descriptor tables
+ */
+ taskTableSize = NCHANNELS * sizeof(TaskTableEntry);
+ /* Align variable tables to size */
+ varTabsOffset = taskTableSize + (u32)taskTableDest;
+ if ((varTabsOffset & (VAR_TAB_SIZE - 1)) != 0)
+ varTabsOffset = (varTabsOffset + VAR_TAB_SIZE)
+ & (~VAR_TAB_SIZE);
+ /* Align function descriptor tables */
+ varTabsSize = NCHANNELS * VAR_TAB_SIZE;
+ funcDescTabsOffset = varTabsOffset + varTabsSize;
+
+ if ((funcDescTabsOffset & (FUNCDESC_TAB_SIZE - 1)) != 0)
+ funcDescTabsOffset = (funcDescTabsOffset
+ + FUNCDESC_TAB_SIZE) &
+ (~FUNCDESC_TAB_SIZE);
+
+ funcDescTabsSize = FUNCDESC_TAB_NUM * FUNCDESC_TAB_SIZE;
+ contextSavesOffset = funcDescTabsOffset
+ + funcDescTabsSize;
+ contextSavesSize = (NCHANNELS * CONTEXT_SAVE_SIZE);
+ fixedSize = taskTableSize + varTabsSize +
+ funcDescTabsSize + contextSavesSize;
+
+ /* Zero the thing out */
+ fixedPtr = (u32 *)taskTableDest;
+ for (i = 0; i < (fixedSize/4); i++)
+ fixedPtr[i] = 0;
+
+ entryPtr = (TaskTableEntry *)MCD_taskTable;
+ /* Set up fixed pointers */
+ for (i = 0; i < NCHANNELS; i++) {
+ entryPtr[i].varTab = (u32)varTabsOffset;
+ /* update ptr to local value */
+ entryPtr[i].FDTandFlags =
+ (u32)funcDescTabsOffset | MCD_TT_FLAGS_DEF;
+ entryPtr[i].contextSaveSpace =
+ (u32)contextSavesOffset;
+ varTabsOffset += VAR_TAB_SIZE;
+#ifdef MCD_INCLUDE_EU
+ /* if not there is only one,
+ * just point to the same one */
+ funcDescTabsOffset += FUNCDESC_TAB_SIZE;
+#endif
+ contextSavesOffset += CONTEXT_SAVE_SIZE;
+ }
+ /* Copy over the function descriptor table */
+ for (i = 0; i < FUNCDESC_TAB_NUM; i++) {
+ MCD_memcpy((void *)(entryPtr[i].FDTandFlags
+ & ~MCD_TT_FLAGS_MASK),
+ (void *)MCD_funcDescTab0,
+ FUNCDESC_TAB_SIZE);
+ }
+
+ /* Copy model task table to where the
+ * context save stuff leaves off */
+ MCD_modelTaskTable =
+ (TaskTableEntry *)contextSavesOffset;
+
+ MCD_memcpy((void *)MCD_modelTaskTable,
+ (void *)MCD_modelTaskTableSrc,
+ NUMOFVARIANTS * sizeof(TaskTableEntry));
+
+ /* Point to local version of model task table */
+ entryPtr = MCD_modelTaskTable;
+ taskDescTabsOffset = (u32)MCD_modelTaskTable +
+ (NUMOFVARIANTS * sizeof(TaskTableEntry));
+
+ /* Copy actual task code and update TDT ptrs
+ * in local model task table */
+ for (i = 0; i < NUMOFVARIANTS; i++) {
+ taskDescTabSize = entryPtr[i].TDTend
+ - entryPtr[i].TDTstart + 4;
+ MCD_memcpy((void *)taskDescTabsOffset,
+ (void *)entryPtr[i].TDTstart,
+ taskDescTabSize);
+ entryPtr[i].TDTstart =
+ (u32)taskDescTabsOffset;
+ taskDescTabsOffset += taskDescTabSize;
+ entryPtr[i].TDTend =
+ (u32)taskDescTabsOffset - 4;
+ }
+#ifdef MCD_INCLUDE_EU
+ /*
+ * Tack single DMA BDs onto end of
+ * code so API controls where
+ * they are since DMA might write to them
+ */
+ MCD_relocBuffDesc = (MCD_bufDesc *)
+ (entryPtr[NUMOFVARIANTS - 1].TDTend + 4);
+#else
+ /*
+ * DMA does not touch them so they
+ * can be wherever and we don't need to
+ * waste SRAM on them
+ */
+ MCD_relocBuffDesc = MCD_singleBufDescs;
+#endif
+ } else {
+ /*
+ * Point the would-be relocated task tables and
+ * the buffer descriptors
+ * to the ones the linker generated
+ */
+ if (((u32)MCD_realTaskTableSrc & 0x000001ff) != 0)
+ return MCD_TABLE_UNALIGNED;
+
+ entryPtr = MCD_realTaskTableSrc;
+ for (i = 0; i < NCHANNELS; i++) {
+ if (((entryPtr[i].varTab
+ & (VAR_TAB_SIZE - 1)) != 0) ||
+ ((entryPtr[i].FDTandFlags &
+ (FUNCDESC_TAB_SIZE - 1)) != 0))
+ return MCD_TABLE_UNALIGNED;
+ }
+
+ MCD_taskTable = MCD_realTaskTableSrc;
+ MCD_modelTaskTable = MCD_modelTaskTableSrc;
+ MCD_relocBuffDesc = MCD_singleBufDescs;
+ }
+
+ /* Make all channels inactive,
+ * and remember them as such: */
+ MCD_dmaBar->taskbar = (u32) MCD_taskTable;
+ for (i = 0; i < NCHANNELS; i++) {
+ MCD_dmaBar->taskControl[i] = 0x0;
+ MCD_chStatus[i] = MCD_NO_DMA;
+ }
+
+ /* Set up pausing mechanism to inactive state: */
+ MCD_dmaBar->debugComp1 = 0;
+ MCD_dmaBar->debugComp2 = 0;
+ MCD_dmaBar->debugControl = DBG_CTL_DISABLE;
+ MCD_dmaBar->debugStatus = DBG_KILL_ALL_STAT;
+
+ /* Enable or disable commbus prefetch */
+ if ((flags & MCD_COMM_PREFETCH_EN) != 0)
+ MCD_dmaBar->ptdControl &= ~PTD_CTL_COMM_PREFETCH;
+ else
+ MCD_dmaBar->ptdControl |= PTD_CTL_COMM_PREFETCH;
+
+ return MCD_OK;
+}
+/*********************** End of MCD_initDma() ***********************/
+
+/********************************************************************/
+/* Function: MCD_dmaStatus
+ * Purpose: Returns the status of the DMA on the requested channel
+ * Arguments: channel - channel number
+ * Returns: Predefined status indicators
+ */
+int MCD_dmaStatus(int channel)
+{
+ u16 tcrValue;
+
+ if ((channel < 0) || (channel >= NCHANNELS))
+ return MCD_CHANNEL_INVALID;
+
+ tcrValue = MCD_dmaBar->taskControl[channel];
+ if ((tcrValue & TASK_CTL_EN) == 0) {
+ /* Nothing running if last reported
+ * with task enabled */
+ if (MCD_chStatus[channel] == MCD_RUNNING
+ || MCD_chStatus[channel] == MCD_IDLE)
+ MCD_chStatus[channel] = MCD_DONE;
+ } else /* something is running */{
+ /* There are three possibilities:
+ * paused, running or idle. */
+ if (MCD_chStatus[channel] == MCD_RUNNING
+ || MCD_chStatus[channel] == MCD_IDLE) {
+ MCD_dmaBar->ptdDebug = PTD_DBG_TSK_VLD_INIT;
+ /* Determine which initiator
+ * is asserted. */
+ if ((MCD_dmaBar->ptdDebug >> channel) & 0x1)
+ MCD_chStatus[channel] = MCD_RUNNING;
+ else
+ MCD_chStatus[channel] = MCD_IDLE;
+ /* Do not change the status if it is already paused */
+ }
+ }
+ return MCD_chStatus[channel];
+}
+/******************** End of MCD_dmaStatus() ************************/
+
+/********************************************************************/
+/* Function: MCD_startDma
+ * Ppurpose: Starts a particular kind of DMA
+ * Arguments: see below
+ * Returns: MCD_CHANNEL_INVALID if channel is invalid, else MCD_OK
+ */
+
+int MCD_startDma(
+ int channel,
+/* the channel on which to run the DMA */
+ s8 *srcAddr,
+/* the address to move data from,
+ * or physical buffer-descriptor address */
+ s16 srcIncr,
+/* the amount to increment the source
+ * address per transfer */
+ s8 *destAddr,
+/* the address to move data to */
+ s16 destIncr,
+/* the amount to increment the
+ * destination address per transfer */
+ u32 dmaSize,
+/* the number of bytes to transfer
+ * independent of the transfer size */
+ u32 xferSize,
+/* the number bytes in of each data
+ * movement (1, 2, or 4) */
+ u32 initiator,
+/* what device initiates the DMA */
+ int priority,
+/* priority of the DMA */
+ u32 flags,
+/* flags describing the DMA */
+ u32 funcDesc
+/* a description of byte swapping,
+ * bit swapping, and CRC actions */
+#ifdef MCD_NEED_ADDR_TRANS
+ s8 *srcAddrVirt
+/* virtual buffer descriptor address TBD*/
+#endif
+)
+{
+ int srcRsdIncr, destRsdIncr;
+ int *cSave;
+ short xferSizeIncr;
+ int tcrCount = 0;
+#ifdef MCD_INCLUDE_EU
+ u32 *realFuncArray;
+#endif
+
+ if ((channel < 0) || (channel >= NCHANNELS))
+ return MCD_CHANNEL_INVALID;
+
+#ifndef MCD_INCLUDE_EU
+ funcDesc = MCD_FUNC_NOEU1;
+#endif
+
+#ifdef MCD_DEBUG
+ printf("startDma:Setting up params\n");
+#endif
+
+ /* Enable task-wise priority */
+ MCD_dmaBar->ptdControl |= (u16) 0x8000;
+
+ /* Calculate additional parameters
+ * to the regular DMA calls. */
+ srcRsdIncr = srcIncr < 0 ? -1 : (srcIncr > 0 ? 1 : 0);
+ destRsdIncr = destIncr < 0 ? -1 : (destIncr > 0 ? 1 : 0);
+ xferSizeIncr = (xferSize & 0xffff) | 0x20000000;
+
+ /* Remember which variant is running for each channel */
+ MCD_remVariants.remSrcRsdIncr[channel] = srcRsdIncr;
+ MCD_remVariants.remDestRsdIncr[channel] = destRsdIncr;
+ MCD_remVariants.remDestIncr[channel] = destIncr;
+ MCD_remVariants.remSrcIncr[channel] = srcIncr;
+ MCD_remVariants.remXferSize[channel] = xferSize;
+
+ cSave = (int *)(MCD_taskTable[channel].contextSaveSpace)
+ + CSAVE_OFFSET
+ + CURRBD;
+
+#ifdef MCD_INCLUDE_EU
+ realFuncArray = (u32 *)(MCD_taskTable[channel].FDTandFlags
+ & 0xffffff00);
+
+ /*
+ * Modify the LURC's normal and byte-residue-loop functions
+ * according to parameter.
+ */
+ switch (xferSize) {
+ case 4:
+ realFuncArray[(LURC*16)] = funcDesc;
+ break;
+ case 2:
+ realFuncArray[(LURC*16)] = funcDesc & 0xfffff00f;
+ break;
+ case 1:
+ default:
+ realFuncArray[(LURC*16)] = funcDesc & 0xffff000f;
+ break;
+ }
+
+ realFuncArray[(LURC*16 + 1)] = 0
+ | (funcDesc & MCD_BYTE_SWAP_KILLER)
+ | MCD_NO_BYTE_SWAP_ATALL;
+#endif
+
+ /* Write the initiator field in the TCR and
+ * set the initiator-hold bit*/
+ MCD_dmaBar->taskControl[channel] = 0
+ | (initiator << 8)
+ | TASK_CTL_HIPRITSKEN
+ | TASK_CTL_HLDINITNUM;
+
+ /*
+ * Current versions of the MPC8220 MCD have a hardware quirk that could
+ * cause the write to the TCR to collide with an MDE access to the
+ * initiator-register file, so we have to verify that the write occurred
+ * correctly by reading back the value. On MCF547x/8x devices and any
+ * future revisions of the MPC8220, this loop will not be entered.
+ */
+ while (((MCD_dmaBar->taskControl[channel] & 0x1fff) !=
+ ((initiator << 8) | TASK_CTL_HIPRITSKEN
+ | TASK_CTL_HLDINITNUM)) && (tcrCount < 1000)) {
+ tcrCount++;
+ MCD_dmaBar->taskControl[channel] = 0
+ | (initiator << 8)
+ | TASK_CTL_HIPRITSKEN
+ | TASK_CTL_HLDINITNUM;
+ }
+
+ MCD_dmaBar->priority[channel] = (u8)priority & PRIORITY_PRI_MASK;
+
+ if (channel < 8 && channel >= 0) {
+ MCD_dmaBar->taskSize0 &= ~(0xf << (7-channel)*4);
+ MCD_dmaBar->taskSize0
+ |= (xferSize & 3) << (((7 - channel)*4) + 2);
+ MCD_dmaBar->taskSize0
+ |= (xferSize & 3) << ((7 - channel)*4);
+ } else {
+ MCD_dmaBar->taskSize1 &= ~(0xf << (15-channel)*4);
+ MCD_dmaBar->taskSize1
+ |= (xferSize & 3) << (((15 - channel)*4) + 2);
+ MCD_dmaBar->taskSize1
+ |= (xferSize & 3) << ((15 - channel)*4);
+ }
+
+ /* Setup task table flags/options */
+ MCD_taskTable[channel].FDTandFlags &= ~MCD_TT_FLAGS_MASK;
+ MCD_taskTable[channel].FDTandFlags |= (MCD_TT_FLAGS_MASK & flags);
+
+ if (flags & MCD_FECTX_DMA) {
+ /* TDTStart and TDTEnd */
+ MCD_taskTable[channel].TDTstart =
+ MCD_modelTaskTable[TASK_FECTX].TDTstart;
+ MCD_taskTable[channel].TDTend =
+ MCD_modelTaskTable[TASK_FECTX].TDTend;
+ MCD_startDmaENetXmit(srcAddr, srcAddr, destAddr,
+ MCD_taskTable, channel);
+ } else if (flags & MCD_FECRX_DMA) {
+ /* TDTStart and TDTEnd */
+ MCD_taskTable[channel].TDTstart =
+ MCD_modelTaskTable[TASK_FECRX].TDTstart;
+ MCD_taskTable[channel].TDTend =
+ MCD_modelTaskTable[TASK_FECRX].TDTend;
+ MCD_startDmaENetRcv(srcAddr, srcAddr, destAddr,
+ MCD_taskTable, channel);
+ } else if (flags & MCD_SINGLE_DMA) {
+ /*
+ * This buffer descriptor is used for storing off
+ * initial parameters for later progress query
+ * calculation and for the DMA to write the resulting
+ * checksum. The DMA does not use this to determine how
+ * to operate, that info is passed with the init routine
+ */
+ MCD_relocBuffDesc[channel].srcAddr = srcAddr;
+ MCD_relocBuffDesc[channel].destAddr = destAddr;
+ MCD_relocBuffDesc[channel].lastDestAddr = destAddr;
+ MCD_relocBuffDesc[channel].dmaSize = dmaSize;
+ MCD_relocBuffDesc[channel].flags = 0;
+ /* not used */
+ MCD_relocBuffDesc[channel].csumResult = 0;
+ /* not used */
+ MCD_relocBuffDesc[channel].next = 0;
+ /* not used */
+
+ /* Initialize the progress-querying stuff
+ * to show no progress:*/
+ ((volatile int *)MCD_taskTable[channel].contextSaveSpace)[
+ SRCPTR + CSAVE_OFFSET] = (int)srcAddr;
+ ((volatile int *)MCD_taskTable[channel].contextSaveSpace)[
+ DESTPTR + CSAVE_OFFSET] = (int)destAddr;
+ ((volatile int *)MCD_taskTable[channel].contextSaveSpace)[
+ DCOUNT + CSAVE_OFFSET] = 0;
+ ((volatile int *)MCD_taskTable[channel].contextSaveSpace)[
+ CURRBD + CSAVE_OFFSET] =
+ (u32) &(MCD_relocBuffDesc[channel]);
+
+ if ((funcDesc == MCD_FUNC_NOEU1)
+ || (funcDesc == MCD_FUNC_NOEU2)) {
+ /* TDTStart and TDTEnd */
+ MCD_taskTable[channel].TDTstart =
+ MCD_modelTaskTable[TASK_SINGLENOEU].TDTstart;
+ MCD_taskTable[channel].TDTend =
+ MCD_modelTaskTable[TASK_SINGLENOEU].TDTend;
+ MCD_startDmaSingleNoEu(srcAddr, srcIncr, destAddr,
+ destIncr, dmaSize, xferSizeIncr, flags,
+ (int *)&(MCD_relocBuffDesc[channel]),
+ cSave, MCD_taskTable, channel);
+ } else {
+ /* TDTStart and TDTEnd */
+ MCD_taskTable[channel].TDTstart =
+ MCD_modelTaskTable[TASK_SINGLEEU].TDTstart;
+ MCD_taskTable[channel].TDTend =
+ MCD_modelTaskTable[TASK_SINGLEEU].TDTend;
+ MCD_startDmaSingleEu(srcAddr, srcIncr, destAddr,
+ destIncr, dmaSize, xferSizeIncr, flags,
+ (int *)&(MCD_relocBuffDesc[channel]),
+ cSave, MCD_taskTable, channel);
+ }
+ } else /* Chained DMA */ {
+ /* Initialize the progress-querying
+ * stuff to show no progress:*/
+#if 1 /* (!defined(MCD_NEED_ADDR_TRANS)) */
+ ((volatile int *)MCD_taskTable[channel].contextSaveSpace)[
+ SRCPTR + CSAVE_OFFSET]
+ = (int)((MCD_bufDesc *) srcAddr)->srcAddr;
+ ((volatile int *)MCD_taskTable[channel].contextSaveSpace)[
+ DESTPTR + CSAVE_OFFSET]
+ = (int)((MCD_bufDesc *) srcAddr)->destAddr;
+#else
+ /* if using address translation, need the
+ * virtual addr of the first buffdesc */
+ ((volatile int *)MCD_taskTable[channel].contextSaveSpace)[
+ SRCPTR + CSAVE_OFFSET]
+ = (int)((MCD_bufDesc *) srcAddrVirt)->srcAddr;
+ ((volatile int *)MCD_taskTable[channel].contextSaveSpace)[
+ DESTPTR + CSAVE_OFFSET]
+ = (int)((MCD_bufDesc *) srcAddrVirt)->destAddr;
+#endif
+ ((volatile int *)MCD_taskTable[channel].contextSaveSpace)[
+ DCOUNT + CSAVE_OFFSET] = 0;
+ ((volatile int *)MCD_taskTable[channel].contextSaveSpace)[
+ CURRBD + CSAVE_OFFSET] = (u32) srcAddr;
+
+ if (funcDesc == MCD_FUNC_NOEU1
+ || funcDesc == MCD_FUNC_NOEU2) {
+ /* TDTStart and TDTEnd */
+ MCD_taskTable[channel].TDTstart =
+ MCD_modelTaskTable[TASK_CHAINNOEU].TDTstart;
+ MCD_taskTable[channel].TDTend =
+ MCD_modelTaskTable[TASK_CHAINNOEU].TDTend;
+ MCD_startDmaChainNoEu((int *)srcAddr, srcIncr,
+ destIncr, xferSize, xferSizeIncr, cSave,
+ MCD_taskTable, channel);
+ } else {
+ /* TDTStart and TDTEnd */
+ MCD_taskTable[channel].TDTstart =
+ MCD_modelTaskTable[TASK_CHAINEU].TDTstart;
+ MCD_taskTable[channel].TDTend =
+ MCD_modelTaskTable[TASK_CHAINEU].TDTend;
+ MCD_startDmaChainEu((int *)srcAddr, srcIncr, destIncr,
+ xferSize, xferSizeIncr, cSave,
+ MCD_taskTable, channel);
+ }
+ }
+
+ MCD_chStatus[channel] = MCD_IDLE;
+ return MCD_OK;
+}
+
+/************************ End of MCD_startDma() *********************/
+
+/********************************************************************/
+/* Function: MCD_XferProgrQuery
+ * Purpose: Returns progress of DMA on requested channel
+ * Arguments: channel - channel to retrieve progress for
+ * progRep - pointer to user supplied MCD_XferProg struct
+ * Returns: MCD_CHANNEL_INVALID if channel is invalid, else MCD_OK
+ *
+ * Notes:
+ * MCD_XferProgrQuery() upon completing or after aborting a DMA, or
+ * while the DMA is in progress, this function returns the first
+ * DMA-destination address not (or not yet) used in the DMA. When
+ * encountering a non-ready buffer descriptor, the information for
+ * the last completed descriptor is returned.
+ *
+ * MCD_XferProgQuery() has to avoid the possibility of getting
+ * partially-updated information in the event that we should happen
+ * to query DMA progress just as the DMA is updating it. It does that
+ * by taking advantage of the fact context is not saved frequently for
+ * the most part. We therefore read it at least twice until we get the
+ * same information twice in a row.
+ *
+ * Because a small, but not insignificant, amount of time is required
+ * to write out the progress-query information, especially upon
+ * completion of the DMA, it would be wise to guarantee some time lag
+ * between successive readings of the progress-query information.
+ */
+
+/*
+ * How many iterations of the loop below to execute to stabilize values
+ */
+#define STABTIME 0
+
+int MCD_XferProgrQuery(int channel, MCD_XferProg *progRep)
+{
+ MCD_XferProg prevRep;
+ int again;
+ /* true if we are to try again to get consistent results */
+ int i; /* used as a time-waste counter */
+ int destDiffBytes;
+ /* Total number of bytes that we think actually got xfered. */
+ int numIterations; /* number of iterations */
+ int bytesNotXfered; /* bytes that did not get xfered. */
+ s8 *LWAlignedInitDestAddr, *LWAlignedCurrDestAddr;
+ int subModVal, addModVal;
+ /* Mode values to added and subtracted from the final destAddr */
+
+ if ((channel < 0) || (channel >= NCHANNELS))
+ return MCD_CHANNEL_INVALID;
+
+ /* Read a trial value for the progress-reporting values*/
+ prevRep.lastSrcAddr =
+ (s8 *)((volatile int *)MCD_taskTable[channel].contextSaveSpace)[
+ SRCPTR + CSAVE_OFFSET];
+ prevRep.lastDestAddr =
+ (s8 *)((volatile int *)MCD_taskTable[channel].contextSaveSpace)[
+ DESTPTR + CSAVE_OFFSET];
+ prevRep.dmaSize =
+ ((volatile int *)MCD_taskTable[channel].contextSaveSpace)[
+ DCOUNT + CSAVE_OFFSET];
+ prevRep.currBufDesc =
+ (MCD_bufDesc *)((volatile int *)MCD_taskTable[
+ channel].contextSaveSpace)[CURRBD + CSAVE_OFFSET];
+
+ /* Repeatedly reread those values until
+ * they match previous values: */
+ do {
+ /* Take a little bit of time to ensure stability: */
+ for (i = 0; i < STABTIME; i++)
+ i += i >> 2;
+ /* make sure this loop does something so that it
+ doesn't get optimized out */
+ /* Check them again: */
+ progRep->lastSrcAddr =
+ (s8 *)((volatile int *)MCD_taskTable[
+ channel].contextSaveSpace)[SRCPTR + CSAVE_OFFSET];
+ progRep->lastDestAddr =
+ (s8 *)((volatile int *)MCD_taskTable[
+ channel].contextSaveSpace)[DESTPTR + CSAVE_OFFSET];
+ progRep->dmaSize = ((volatile int *)MCD_taskTable[
+ channel].contextSaveSpace)[DCOUNT + CSAVE_OFFSET];
+ progRep->currBufDesc =
+ (MCD_bufDesc *)((volatile int *)MCD_taskTable[
+ channel].contextSaveSpace)[CURRBD + CSAVE_OFFSET];
+
+ /* See if they match: */
+ if (prevRep.lastSrcAddr != progRep->lastSrcAddr
+ || prevRep.lastDestAddr != progRep->lastDestAddr
+ || prevRep.dmaSize != progRep->dmaSize
+ || prevRep.currBufDesc != progRep->currBufDesc) {
+ /* If they don't match, remember previous
+ values and try again:*/
+ prevRep.lastSrcAddr = progRep->lastSrcAddr;
+ prevRep.lastDestAddr = progRep->lastDestAddr;
+ prevRep.dmaSize = progRep->dmaSize;
+ prevRep.currBufDesc = progRep->currBufDesc;
+ again = MCD_TRUE;
+ } else
+ again = MCD_FALSE;
+ } while (again == MCD_TRUE);
+
+
+ /* Update dmaSize and lastDestAddr */
+ switch (MCD_remVariants.remDestRsdIncr[channel]) {
+ case MINUS1:
+ subModVal = ((int)progRep->lastDestAddr)
+ & ((MCD_remVariants.remXferSize[channel]) - 1);
+ addModVal = ((int)progRep->currBufDesc->destAddr)
+ & ((MCD_remVariants.remXferSize[channel]) - 1);
+ LWAlignedInitDestAddr = (progRep->currBufDesc->destAddr)
+ - addModVal;
+ LWAlignedCurrDestAddr = (progRep->lastDestAddr) - subModVal;
+ destDiffBytes = LWAlignedInitDestAddr - LWAlignedCurrDestAddr;
+ bytesNotXfered =
+ (destDiffBytes/MCD_remVariants.remDestIncr[channel]) *
+ (MCD_remVariants.remDestIncr[channel]
+ + MCD_remVariants.remXferSize[channel]);
+ progRep->dmaSize = destDiffBytes - bytesNotXfered
+ + addModVal - subModVal;
+ break;
+ case ZERO:
+ progRep->lastDestAddr = progRep->currBufDesc->destAddr;
+ break;
+ case PLUS1:
+ /* This value has to be subtracted
+ from the final calculated dmaSize. */
+ subModVal = ((int)progRep->currBufDesc->destAddr)
+ & ((MCD_remVariants.remXferSize[channel]) - 1);
+ /* These bytes are already in lastDestAddr. */
+ addModVal = ((int)progRep->lastDestAddr)
+ & ((MCD_remVariants.remXferSize[channel]) - 1);
+ LWAlignedInitDestAddr = (progRep->currBufDesc->destAddr)
+ - subModVal;
+ LWAlignedCurrDestAddr = (progRep->lastDestAddr) - addModVal;
+ destDiffBytes = (progRep->lastDestAddr - LWAlignedInitDestAddr);
+ numIterations = (LWAlignedCurrDestAddr -
+ LWAlignedInitDestAddr)/MCD_remVariants.remDestIncr[channel];
+ bytesNotXfered = numIterations *
+ (MCD_remVariants.remDestIncr[channel]
+ - MCD_remVariants.remXferSize[channel]);
+ progRep->dmaSize = destDiffBytes - bytesNotXfered - subModVal;
+ break;
+ default:
+ break;
+ }
+
+ /* This covers M1,P1,Z for source */
+ switch (MCD_remVariants.remSrcRsdIncr[channel]) {
+ case MINUS1:
+ progRep->lastSrcAddr =
+ progRep->currBufDesc->srcAddr +
+ (MCD_remVariants.remSrcIncr[channel] *
+ (progRep->dmaSize/MCD_remVariants.remXferSize[channel]));
+ break;
+ case ZERO:
+ progRep->lastSrcAddr = progRep->currBufDesc->srcAddr;
+ break;
+ case PLUS1:
+ progRep->lastSrcAddr =
+ progRep->currBufDesc->srcAddr +
+ (MCD_remVariants.remSrcIncr[channel] *
+ (progRep->dmaSize/MCD_remVariants.remXferSize[channel]));
+ break;
+ default:
+ break;
+ }
+
+ return MCD_OK;
+}
+/******************* End of MCD_XferProgrQuery() ********************/
+
+/********************************************************************/
+/* MCD_resmActions() does the majority of the actions of a DMA resume.
+ * It is called from MCD_killDma() and MCD_resumeDma(). It has to be
+ * a separate function because the kill function has to negate the task
+ * enable before resuming it, but the resume function has to do nothing
+ * if there is no DMA on that channel (i.e., if the enable bit is 0).
+ */
+static void MCD_resmActions(int channel)
+{
+ MCD_dmaBar->debugControl = DBG_CTL_DISABLE;
+ MCD_dmaBar->debugStatus = MCD_dmaBar->debugStatus;
+
+ /* Determine which initiators are asserted */
+ MCD_dmaBar->ptdDebug = PTD_DBG_TSK_VLD_INIT;
+
+ if ((MCD_dmaBar->ptdDebug >> channel) & 0x1)
+ MCD_chStatus[channel] = MCD_RUNNING;
+ else
+ MCD_chStatus[channel] = MCD_IDLE;
+}
+/********************* End of MCD_resmActions() *********************/
+
+/********************************************************************/
+/* Function: MCD_killDma
+ * Purpose: Halt the DMA on the requested channel, without any
+ * intention of resuming the DMA.
+ * Arguments: channel - requested channel
+ * Returns: MCD_CHANNEL_INVALID if channel is invalid, else MCD_OK
+ *
+ * Notes:
+ * A DMA may be killed from any state, including paused state, and it
+ * always goes to the MCD_HALTED state even if it is killed while in
+ * the MCD_NO_DMA or MCD_IDLE states.
+ */
+int MCD_killDma(int channel)
+{
+ if ((channel < 0) || (channel >= NCHANNELS))
+ return MCD_CHANNEL_INVALID;
+
+ MCD_dmaBar->taskControl[channel] = 0x0;
+
+ /* Clean up after a paused task */
+ if (MCD_chStatus[channel] == MCD_PAUSED) {
+ MCD_dmaBar->debugControl = DBG_CTL_DISABLE;
+ MCD_dmaBar->debugStatus = MCD_dmaBar->debugStatus;
+ }
+
+ MCD_chStatus[channel] = MCD_HALTED;
+
+ return MCD_OK;
+}
+/************************ End of MCD_killDma() **********************/
+
+/********************************************************************/
+/* Function: MCD_continDma
+ * Purpose: Continue a DMA which as stopped due to encountering an
+ * unready buffer descriptor.
+ * Arguments: channel - channel to continue the DMA on
+ * Returns: MCD_CHANNEL_INVALID if channel is invalid, else MCD_OK
+ *
+ * Notes:
+ * This routine does not check to see if there is a task which can
+ * be continued. Also this routine should not be used with single DMAs.
+ */
+int MCD_continDma(int channel)
+{
+ if ((channel < 0) || (channel >= NCHANNELS))
+ return MCD_CHANNEL_INVALID;
+
+ MCD_dmaBar->taskControl[channel] |= TASK_CTL_EN;
+ MCD_chStatus[channel] = MCD_RUNNING;
+
+ return MCD_OK;
+}
+/********************** End of MCD_continDma() **********************/
+
+/*********************************************************************
+ * MCD_pauseDma() and MCD_resumeDma() below use the DMA's debug unit
+ * to freeze a task and resume it. We freeze a task by breakpointing
+ * on the stated task. That is, not any specific place in the task,
+ * but any time that task executes. In particular, when that task
+ * executes, we want to freeze that task and only that task.
+ *
+ * The bits of the debug control register influence interrupts vs.
+ * breakpoints as follows:
+ * - Bits 14 and 0 enable or disable debug functions. If enabled, you
+ * will get the interrupt but you may or may not get a breakpoint.
+ * - Bits 2 and 1 decide whether you also get a breakpoint in addition
+ * to an interrupt.
+ *
+ * The debug unit can do these actions in response to either internally
+ * detected breakpoint conditions from the comparators, or in response
+ * to the external breakpoint pin, or both.
+ * - Bits 14 and 1 perform the above-described functions for
+ * internally-generated conditions, i.e., the debug comparators.
+ * - Bits 0 and 2 perform the above-described functions for external
+ * conditions, i.e., the breakpoint external pin.
+ *
+ * Note that, although you "always" get the interrupt when you turn
+ * the debug functions, the interrupt can nevertheless, if desired, be
+ * masked by the corresponding bit in the PTD's IMR. Note also that
+ * this means that bits 14 and 0 must enable debug functions before
+ * bits 1 and 2, respectively, have any effect.
+ *
+ * NOTE: It's extremely important to not pause more than one DMA channel
+ * at a time.
+ ********************************************************************/
+
+/********************************************************************/
+/* Function: MCD_pauseDma
+ * Purpose: Pauses the DMA on a given channel (if any DMA is running
+ * on that channel).
+ * Arguments: channel
+ * Returns: MCD_CHANNEL_INVALID if channel is invalid, else MCD_OK
+ */
+int MCD_pauseDma(int channel)
+{
+ if ((channel < 0) || (channel >= NCHANNELS))
+ return MCD_CHANNEL_INVALID;
+
+ if (MCD_dmaBar->taskControl[channel] & TASK_CTL_EN) {
+ MCD_dmaBar->debugComp1 = channel;
+ MCD_dmaBar->debugControl =
+ DBG_CTL_ENABLE | (1 << (channel + 16));
+ MCD_chStatus[channel] = MCD_PAUSED;
+ }
+
+ return MCD_OK;
+}
+/************************* End of MCD_pauseDma() ********************/
+
+/********************************************************************/
+/* Function: MCD_resumeDma
+ * Purpose: Resumes the DMA on a given channel (if any DMA is
+ * running on that channel).
+ * Arguments: channel - channel on which to resume DMA
+ * Returns: MCD_CHANNEL_INVALID if channel is invalid, else MCD_OK
+ */
+int MCD_resumeDma(int channel)
+{
+ if ((channel < 0) || (channel >= NCHANNELS))
+ return MCD_CHANNEL_INVALID;
+
+ if (MCD_dmaBar->taskControl[channel] & TASK_CTL_EN)
+ MCD_resmActions(channel);
+
+ return MCD_OK;
+}
+/************************ End of MCD_resumeDma() ********************/
+
+/********************************************************************/
+/* Function: MCD_csumQuery
+ * Purpose: Provide the checksum after performing a non-chained DMA
+ * Arguments: channel - channel to report on
+ * csum - pointer to where to write the checksum/CRC
+ * Returns: MCD_ERROR if the channel is invalid, else MCD_OK
+ *
+ * Notes:
+ *
+ */
+int MCD_csumQuery(int channel, u32 *csum)
+{
+#ifdef MCD_INCLUDE_EU
+ if ((channel < 0) || (channel >= NCHANNELS))
+ return MCD_CHANNEL_INVALID;
+
+ *csum = MCD_relocBuffDesc[channel].csumResult;
+ return MCD_OK;
+#else
+ return MCD_ERROR;
+#endif
+}
+/*********************** End of MCD_resumeDma() *********************/
+
+/********************************************************************/
+/* Function: MCD_getCodeSize
+ * Purpose: Provide the size requirements of the microcoded tasks
+ * Returns: Size in bytes
+ */
+int MCD_getCodeSize(void)
+{
+#ifdef MCD_INCLUDE_EU
+ return 0x2b64;
+#else
+ return 0x1744;
+#endif
+}
+/********************** End of MCD_getCodeSize() ********************/
+
+/********************************************************************/
+/* Function: MCD_getVersion
+ * Purpose: Provide the version string and number
+ * Arguments: longVersion - user supplied pointer to a pointer to a char
+ * which points to the version string
+ * Returns: Version number and version string (by reference)
+ */
+char MCD_versionString[] = "Multi-channel DMA API v1.0";
+#define MCD_REV_MAJOR 0x01
+#define MCD_REV_MINOR 0x00
+
+int MCD_getVersion(char **longVersion)
+{
+ int ret = 0;
+ *longVersion = MCD_versionString;
+ ret = (MCD_REV_MAJOR << 8) | MCD_REV_MINOR;
+ return ret;
+}
+/********************** End of MCD_getVersion() *********************/
+
+/********************************************************************/
+/* Private version of memcpy()
+ * Note that everything this is used for is longword-aligned.
+ */
+static void MCD_memcpy(int *dest, int *src, u32 size)
+{
+ u32 i;
+
+ for (i = 0; i < size; i += sizeof(int), dest++, src++)
+ *dest = *src;
+}
+/********************************************************************/
--- /dev/null
+/*
+ * drivers/dma/MCD_progCheck.h
+ *
+ * Copyright (C) 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Kurt Mahan <kmahan@freescale.com>
+ * Shrek Wu b16972@freescale.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+/* This file is autogenerated. Do not change */
+
+#define CURRBD 4
+#define DCOUNT 6
+#define DESTPTR 5
+#define SRCPTR 7
--- /dev/null
+/*
+ * drivers/dma/MCD_tasks.c
+ *
+ * Copyright (C) 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Kurt Mahan <kmahan@freescale.com>
+ * Shrek Wu b16972@freescale.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include "MCD_dma.h"
+
+u32 MCD_varTab0[];
+u32 MCD_varTab1[];
+u32 MCD_varTab2[];
+u32 MCD_varTab3[];
+u32 MCD_varTab4[];
+u32 MCD_varTab5[];
+u32 MCD_varTab6[];
+u32 MCD_varTab7[];
+u32 MCD_varTab8[];
+u32 MCD_varTab9[];
+u32 MCD_varTab10[];
+u32 MCD_varTab11[];
+u32 MCD_varTab12[];
+u32 MCD_varTab13[];
+u32 MCD_varTab14[];
+u32 MCD_varTab15[];
+
+u32 MCD_funcDescTab0[];
+#ifdef MCD_INCLUDE_EU
+u32 MCD_funcDescTab1[];
+u32 MCD_funcDescTab2[];
+u32 MCD_funcDescTab3[];
+u32 MCD_funcDescTab4[];
+u32 MCD_funcDescTab5[];
+u32 MCD_funcDescTab6[];
+u32 MCD_funcDescTab7[];
+u32 MCD_funcDescTab8[];
+u32 MCD_funcDescTab9[];
+u32 MCD_funcDescTab10[];
+u32 MCD_funcDescTab11[];
+u32 MCD_funcDescTab12[];
+u32 MCD_funcDescTab13[];
+u32 MCD_funcDescTab14[];
+u32 MCD_funcDescTab15[];
+#endif
+
+u32 MCD_contextSave0[];
+u32 MCD_contextSave1[];
+u32 MCD_contextSave2[];
+u32 MCD_contextSave3[];
+u32 MCD_contextSave4[];
+u32 MCD_contextSave5[];
+u32 MCD_contextSave6[];
+u32 MCD_contextSave7[];
+u32 MCD_contextSave8[];
+u32 MCD_contextSave9[];
+u32 MCD_contextSave10[];
+u32 MCD_contextSave11[];
+u32 MCD_contextSave12[];
+u32 MCD_contextSave13[];
+u32 MCD_contextSave14[];
+u32 MCD_contextSave15[];
+
+u32 MCD_realTaskTableSrc[] =
+{
+ 0x00000000,
+ 0x00000000,
+ (u32)MCD_varTab0, /* Task 0 Variable Table */
+ (u32)MCD_funcDescTab0, /* Task 0 Function Descriptor Table & Flags */
+ 0x00000000,
+ 0x00000000,
+ (u32)MCD_contextSave0, /* Task 0 context save space */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ (u32)MCD_varTab1, /* Task 1 Variable Table */
+#ifdef MCD_INCLUDE_EU
+ (u32)MCD_funcDescTab1, /* Task 1 Function Descriptor Table & Flags */
+#else
+ (u32)MCD_funcDescTab0, /* Task 0 Function Descriptor Table & Flags */
+#endif
+ 0x00000000,
+ 0x00000000,
+ (u32)MCD_contextSave1, /* Task 1 context save space */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ (u32)MCD_varTab2, /* Task 2 Variable Table */
+#ifdef MCD_INCLUDE_EU
+ (u32)MCD_funcDescTab2, /* Task 2 Function Descriptor Table & Flags */
+#else
+ (u32)MCD_funcDescTab0, /* Task 0 Function Descriptor Table & Flags */
+#endif
+ 0x00000000,
+ 0x00000000,
+ (u32)MCD_contextSave2, /* Task 2 context save space */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ (u32)MCD_varTab3, /* Task 3 Variable Table */
+#ifdef MCD_INCLUDE_EU
+ (u32)MCD_funcDescTab3, /* Task 3 Function Descriptor Table & Flags */
+#else
+ (u32)MCD_funcDescTab0, /* Task 0 Function Descriptor Table & Flags */
+#endif
+ 0x00000000,
+ 0x00000000,
+ (u32)MCD_contextSave3, /* Task 3 context save space */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ (u32)MCD_varTab4, /* Task 4 Variable Table */
+#ifdef MCD_INCLUDE_EU
+ (u32)MCD_funcDescTab4, /* Task 4 Function Descriptor Table & Flags */
+#else
+ (u32)MCD_funcDescTab0, /* Task 0 Function Descriptor Table & Flags */
+#endif
+ 0x00000000,
+ 0x00000000,
+ (u32)MCD_contextSave4, /* Task 4 context save space */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ (u32)MCD_varTab5, /* Task 5 Variable Table */
+#ifdef MCD_INCLUDE_EU
+ (u32)MCD_funcDescTab5, /* Task 5 Function Descriptor Table & Flags */
+#else
+ (u32)MCD_funcDescTab0, /* Task 0 Function Descriptor Table & Flags */
+#endif
+ 0x00000000,
+ 0x00000000,
+ (u32)MCD_contextSave5, /* Task 5 context save space */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ (u32)MCD_varTab6, /* Task 6 Variable Table */
+#ifdef MCD_INCLUDE_EU
+ (u32)MCD_funcDescTab6, /* Task 6 Function Descriptor Table & Flags */
+#else
+ (u32)MCD_funcDescTab0, /* Task 0 Function Descriptor Table & Flags */
+#endif
+ 0x00000000,
+ 0x00000000,
+ (u32)MCD_contextSave6, /* Task 6 context save space */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ (u32)MCD_varTab7, /* Task 7 Variable Table */
+#ifdef MCD_INCLUDE_EU
+ (u32)MCD_funcDescTab7, /* Task 7 Function Descriptor Table & Flags */
+#else
+ (u32)MCD_funcDescTab0, /* Task 0 Function Descriptor Table & Flags */
+#endif
+ 0x00000000,
+ 0x00000000,
+ (u32)MCD_contextSave7, /* Task 7 context save space */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ (u32)MCD_varTab8, /* Task 8 Variable Table */
+#ifdef MCD_INCLUDE_EU
+ (u32)MCD_funcDescTab8, /* Task 8 Function Descriptor Table & Flags */
+#else
+ (u32)MCD_funcDescTab0, /* Task 0 Function Descriptor Table & Flags */
+#endif
+ 0x00000000,
+ 0x00000000,
+ (u32)MCD_contextSave8, /* Task 8 context save space */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ (u32)MCD_varTab9, /* Task 9 Variable Table */
+#ifdef MCD_INCLUDE_EU
+ (u32)MCD_funcDescTab9, /* Task 9 Function Descriptor Table & Flags */
+#else
+ (u32)MCD_funcDescTab0, /* Task 0 Function Descriptor Table & Flags */
+#endif
+ 0x00000000,
+ 0x00000000,
+ (u32)MCD_contextSave9, /* Task 9 context save space */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ (u32)MCD_varTab10, /* Task 10 Variable Table */
+#ifdef MCD_INCLUDE_EU
+ (u32)MCD_funcDescTab10, /* Task 10 Function Descriptor Table & Flags */
+#else
+ (u32)MCD_funcDescTab0, /* Task 0 Function Descriptor Table & Flags */
+#endif
+ 0x00000000,
+ 0x00000000,
+ (u32)MCD_contextSave10, /* Task 10 context save space */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ (u32)MCD_varTab11, /* Task 11 Variable Table */
+#ifdef MCD_INCLUDE_EU
+ (u32)MCD_funcDescTab11, /* Task 11 Function Descriptor Table & Flags */
+#else
+ (u32)MCD_funcDescTab0, /* Task 0 Function Descriptor Table & Flags */
+#endif
+ 0x00000000,
+ 0x00000000,
+ (u32)MCD_contextSave11, /* Task 11 context save space */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ (u32)MCD_varTab12, /* Task 12 Variable Table */
+#ifdef MCD_INCLUDE_EU
+ (u32)MCD_funcDescTab12, /* Task 12 Function Descriptor Table & Flags */
+#else
+ (u32)MCD_funcDescTab0, /* Task 0 Function Descriptor Table & Flags */
+#endif
+ 0x00000000,
+ 0x00000000,
+ (u32)MCD_contextSave12, /* Task 12 context save space */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ (u32)MCD_varTab13, /* Task 13 Variable Table */
+#ifdef MCD_INCLUDE_EU
+ (u32)MCD_funcDescTab13, /* Task 13 Function Descriptor Table & Flags */
+#else
+ (u32)MCD_funcDescTab0, /* Task 0 Function Descriptor Table & Flags */
+#endif
+ 0x00000000,
+ 0x00000000,
+ (u32)MCD_contextSave13, /* Task 13 context save space */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ (u32)MCD_varTab14, /* Task 14 Variable Table */
+#ifdef MCD_INCLUDE_EU
+ (u32)MCD_funcDescTab14, /* Task 14 Function Descriptor Table & Flags */
+#else
+ (u32)MCD_funcDescTab0, /* Task 0 Function Descriptor Table & Flags */
+#endif
+ 0x00000000,
+ 0x00000000,
+ (u32)MCD_contextSave14, /* Task 14 context save space */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ (u32)MCD_varTab15, /* Task 15 Variable Table */
+#ifdef MCD_INCLUDE_EU
+ (u32)MCD_funcDescTab15, /* Task 15 Function Descriptor Table & Flags */
+#else
+ (u32)MCD_funcDescTab0, /* Task 0 Function Descriptor Table & Flags */
+#endif
+ 0x00000000,
+ 0x00000000,
+ (u32)MCD_contextSave15, /* Task 15 context save space */
+ 0x00000000,
+};
+
+
+u32 MCD_varTab0[] =
+{ /* Task 0 Variable Table */
+ 0x00000000, /* var[0] */
+ 0x00000000, /* var[1] */
+ 0x00000000, /* var[2] */
+ 0x00000000, /* var[3] */
+ 0x00000000, /* var[4] */
+ 0x00000000, /* var[5] */
+ 0x00000000, /* var[6] */
+ 0x00000000, /* var[7] */
+ 0x00000000, /* var[8] */
+ 0x00000000, /* var[9] */
+ 0x00000000, /* var[10] */
+ 0x00000000, /* var[11] */
+ 0x00000000, /* var[12] */
+ 0x00000000, /* var[13] */
+ 0x00000000, /* var[14] */
+ 0x00000000, /* var[15] */
+ 0x00000000, /* var[16] */
+ 0x00000000, /* var[17] */
+ 0x00000000, /* var[18] */
+ 0x00000000, /* var[19] */
+ 0x00000000, /* var[20] */
+ 0x00000000, /* var[21] */
+ 0x00000000, /* var[22] */
+ 0x00000000, /* var[23] */
+ 0xe0000000, /* inc[0] */
+ 0x20000000, /* inc[1] */
+ 0x2000ffff, /* inc[2] */
+ 0x00000000, /* inc[3] */
+ 0x00000000, /* inc[4] */
+ 0x00000000, /* inc[5] */
+ 0x00000000, /* inc[6] */
+ 0x00000000, /* inc[7] */
+};
+
+
+u32 MCD_varTab1[] =
+{ /* Task 1 Variable Table */
+ 0x00000000, /* var[0] */
+ 0x00000000, /* var[1] */
+ 0x00000000, /* var[2] */
+ 0x00000000, /* var[3] */
+ 0x00000000, /* var[4] */
+ 0x00000000, /* var[5] */
+ 0x00000000, /* var[6] */
+ 0x00000000, /* var[7] */
+ 0x00000000, /* var[8] */
+ 0x00000000, /* var[9] */
+ 0x00000000, /* var[10] */
+ 0x00000000, /* var[11] */
+ 0x00000000, /* var[12] */
+ 0x00000000, /* var[13] */
+ 0x00000000, /* var[14] */
+ 0x00000000, /* var[15] */
+ 0x00000000, /* var[16] */
+ 0x00000000, /* var[17] */
+ 0x00000000, /* var[18] */
+ 0x00000000, /* var[19] */
+ 0x00000000, /* var[20] */
+ 0x00000000, /* var[21] */
+ 0x00000000, /* var[22] */
+ 0x00000000, /* var[23] */
+ 0xe0000000, /* inc[0] */
+ 0x20000000, /* inc[1] */
+ 0x2000ffff, /* inc[2] */
+ 0x00000000, /* inc[3] */
+ 0x00000000, /* inc[4] */
+ 0x00000000, /* inc[5] */
+ 0x00000000, /* inc[6] */
+ 0x00000000, /* inc[7] */
+};
+
+u32 MCD_varTab2[] =
+{ /* Task 2 Variable Table */
+ 0x00000000, /* var[0] */
+ 0x00000000, /* var[1] */
+ 0x00000000, /* var[2] */
+ 0x00000000, /* var[3] */
+ 0x00000000, /* var[4] */
+ 0x00000000, /* var[5] */
+ 0x00000000, /* var[6] */
+ 0x00000000, /* var[7] */
+ 0x00000000, /* var[8] */
+ 0x00000000, /* var[9] */
+ 0x00000000, /* var[10] */
+ 0x00000000, /* var[11] */
+ 0x00000000, /* var[12] */
+ 0x00000000, /* var[13] */
+ 0x00000000, /* var[14] */
+ 0x00000000, /* var[15] */
+ 0x00000000, /* var[16] */
+ 0x00000000, /* var[17] */
+ 0x00000000, /* var[18] */
+ 0x00000000, /* var[19] */
+ 0x00000000, /* var[20] */
+ 0x00000000, /* var[21] */
+ 0x00000000, /* var[22] */
+ 0x00000000, /* var[23] */
+ 0xe0000000, /* inc[0] */
+ 0x20000000, /* inc[1] */
+ 0x2000ffff, /* inc[2] */
+ 0x00000000, /* inc[3] */
+ 0x00000000, /* inc[4] */
+ 0x00000000, /* inc[5] */
+ 0x00000000, /* inc[6] */
+ 0x00000000, /* inc[7] */
+};
+
+u32 MCD_varTab3[] =
+{ /* Task 3 Variable Table */
+ 0x00000000, /* var[0] */
+ 0x00000000, /* var[1] */
+ 0x00000000, /* var[2] */
+ 0x00000000, /* var[3] */
+ 0x00000000, /* var[4] */
+ 0x00000000, /* var[5] */
+ 0x00000000, /* var[6] */
+ 0x00000000, /* var[7] */
+ 0x00000000, /* var[8] */
+ 0x00000000, /* var[9] */
+ 0x00000000, /* var[10] */
+ 0x00000000, /* var[11] */
+ 0x00000000, /* var[12] */
+ 0x00000000, /* var[13] */
+ 0x00000000, /* var[14] */
+ 0x00000000, /* var[15] */
+ 0x00000000, /* var[16] */
+ 0x00000000, /* var[17] */
+ 0x00000000, /* var[18] */
+ 0x00000000, /* var[19] */
+ 0x00000000, /* var[20] */
+ 0x00000000, /* var[21] */
+ 0x00000000, /* var[22] */
+ 0x00000000, /* var[23] */
+ 0xe0000000, /* inc[0] */
+ 0x20000000, /* inc[1] */
+ 0x2000ffff, /* inc[2] */
+ 0x00000000, /* inc[3] */
+ 0x00000000, /* inc[4] */
+ 0x00000000, /* inc[5] */
+ 0x00000000, /* inc[6] */
+ 0x00000000, /* inc[7] */
+};
+
+u32 MCD_varTab4[] =
+{ /* Task 4 Variable Table */
+ 0x00000000, /* var[0] */
+ 0x00000000, /* var[1] */
+ 0x00000000, /* var[2] */
+ 0x00000000, /* var[3] */
+ 0x00000000, /* var[4] */
+ 0x00000000, /* var[5] */
+ 0x00000000, /* var[6] */
+ 0x00000000, /* var[7] */
+ 0x00000000, /* var[8] */
+ 0x00000000, /* var[9] */
+ 0x00000000, /* var[10] */
+ 0x00000000, /* var[11] */
+ 0x00000000, /* var[12] */
+ 0x00000000, /* var[13] */
+ 0x00000000, /* var[14] */
+ 0x00000000, /* var[15] */
+ 0x00000000, /* var[16] */
+ 0x00000000, /* var[17] */
+ 0x00000000, /* var[18] */
+ 0x00000000, /* var[19] */
+ 0x00000000, /* var[20] */
+ 0x00000000, /* var[21] */
+ 0x00000000, /* var[22] */
+ 0x00000000, /* var[23] */
+ 0xe0000000, /* inc[0] */
+ 0x20000000, /* inc[1] */
+ 0x2000ffff, /* inc[2] */
+ 0x00000000, /* inc[3] */
+ 0x00000000, /* inc[4] */
+ 0x00000000, /* inc[5] */
+ 0x00000000, /* inc[6] */
+ 0x00000000, /* inc[7] */
+};
+
+u32 MCD_varTab5[] =
+{ /* Task 5 Variable Table */
+ 0x00000000, /* var[0] */
+ 0x00000000, /* var[1] */
+ 0x00000000, /* var[2] */
+ 0x00000000, /* var[3] */
+ 0x00000000, /* var[4] */
+ 0x00000000, /* var[5] */
+ 0x00000000, /* var[6] */
+ 0x00000000, /* var[7] */
+ 0x00000000, /* var[8] */
+ 0x00000000, /* var[9] */
+ 0x00000000, /* var[10] */
+ 0x00000000, /* var[11] */
+ 0x00000000, /* var[12] */
+ 0x00000000, /* var[13] */
+ 0x00000000, /* var[14] */
+ 0x00000000, /* var[15] */
+ 0x00000000, /* var[16] */
+ 0x00000000, /* var[17] */
+ 0x00000000, /* var[18] */
+ 0x00000000, /* var[19] */
+ 0x00000000, /* var[20] */
+ 0x00000000, /* var[21] */
+ 0x00000000, /* var[22] */
+ 0x00000000, /* var[23] */
+ 0xe0000000, /* inc[0] */
+ 0x20000000, /* inc[1] */
+ 0x2000ffff, /* inc[2] */
+ 0x00000000, /* inc[3] */
+ 0x00000000, /* inc[4] */
+ 0x00000000, /* inc[5] */
+ 0x00000000, /* inc[6] */
+ 0x00000000, /* inc[7] */
+};
+
+u32 MCD_varTab6[] =
+{ /* Task 6 Variable Table */
+ 0x00000000, /* var[0] */
+ 0x00000000, /* var[1] */
+ 0x00000000, /* var[2] */
+ 0x00000000, /* var[3] */
+ 0x00000000, /* var[4] */
+ 0x00000000, /* var[5] */
+ 0x00000000, /* var[6] */
+ 0x00000000, /* var[7] */
+ 0x00000000, /* var[8] */
+ 0x00000000, /* var[9] */
+ 0x00000000, /* var[10] */
+ 0x00000000, /* var[11] */
+ 0x00000000, /* var[12] */
+ 0x00000000, /* var[13] */
+ 0x00000000, /* var[14] */
+ 0x00000000, /* var[15] */
+ 0x00000000, /* var[16] */
+ 0x00000000, /* var[17] */
+ 0x00000000, /* var[18] */
+ 0x00000000, /* var[19] */
+ 0x00000000, /* var[20] */
+ 0x00000000, /* var[21] */
+ 0x00000000, /* var[22] */
+ 0x00000000, /* var[23] */
+ 0xe0000000, /* inc[0] */
+ 0x20000000, /* inc[1] */
+ 0x2000ffff, /* inc[2] */
+ 0x00000000, /* inc[3] */
+ 0x00000000, /* inc[4] */
+ 0x00000000, /* inc[5] */
+ 0x00000000, /* inc[6] */
+ 0x00000000, /* inc[7] */
+};
+
+u32 MCD_varTab7[] =
+{ /* Task 7 Variable Table */
+ 0x00000000, /* var[0] */
+ 0x00000000, /* var[1] */
+ 0x00000000, /* var[2] */
+ 0x00000000, /* var[3] */
+ 0x00000000, /* var[4] */
+ 0x00000000, /* var[5] */
+ 0x00000000, /* var[6] */
+ 0x00000000, /* var[7] */
+ 0x00000000, /* var[8] */
+ 0x00000000, /* var[9] */
+ 0x00000000, /* var[10] */
+ 0x00000000, /* var[11] */
+ 0x00000000, /* var[12] */
+ 0x00000000, /* var[13] */
+ 0x00000000, /* var[14] */
+ 0x00000000, /* var[15] */
+ 0x00000000, /* var[16] */
+ 0x00000000, /* var[17] */
+ 0x00000000, /* var[18] */
+ 0x00000000, /* var[19] */
+ 0x00000000, /* var[20] */
+ 0x00000000, /* var[21] */
+ 0x00000000, /* var[22] */
+ 0x00000000, /* var[23] */
+ 0xe0000000, /* inc[0] */
+ 0x20000000, /* inc[1] */
+ 0x2000ffff, /* inc[2] */
+ 0x00000000, /* inc[3] */
+ 0x00000000, /* inc[4] */
+ 0x00000000, /* inc[5] */
+ 0x00000000, /* inc[6] */
+ 0x00000000, /* inc[7] */
+};
+
+u32 MCD_varTab8[] =
+{ /* Task 8 Variable Table */
+ 0x00000000, /* var[0] */
+ 0x00000000, /* var[1] */
+ 0x00000000, /* var[2] */
+ 0x00000000, /* var[3] */
+ 0x00000000, /* var[4] */
+ 0x00000000, /* var[5] */
+ 0x00000000, /* var[6] */
+ 0x00000000, /* var[7] */
+ 0x00000000, /* var[8] */
+ 0x00000000, /* var[9] */
+ 0x00000000, /* var[10] */
+ 0x00000000, /* var[11] */
+ 0x00000000, /* var[12] */
+ 0x00000000, /* var[13] */
+ 0x00000000, /* var[14] */
+ 0x00000000, /* var[15] */
+ 0x00000000, /* var[16] */
+ 0x00000000, /* var[17] */
+ 0x00000000, /* var[18] */
+ 0x00000000, /* var[19] */
+ 0x00000000, /* var[20] */
+ 0x00000000, /* var[21] */
+ 0x00000000, /* var[22] */
+ 0x00000000, /* var[23] */
+ 0xe0000000, /* inc[0] */
+ 0x20000000, /* inc[1] */
+ 0x2000ffff, /* inc[2] */
+ 0x00000000, /* inc[3] */
+ 0x00000000, /* inc[4] */
+ 0x00000000, /* inc[5] */
+ 0x00000000, /* inc[6] */
+ 0x00000000, /* inc[7] */
+};
+
+u32 MCD_varTab9[] =
+{ /* Task 9 Variable Table */
+ 0x00000000, /* var[0] */
+ 0x00000000, /* var[1] */
+ 0x00000000, /* var[2] */
+ 0x00000000, /* var[3] */
+ 0x00000000, /* var[4] */
+ 0x00000000, /* var[5] */
+ 0x00000000, /* var[6] */
+ 0x00000000, /* var[7] */
+ 0x00000000, /* var[8] */
+ 0x00000000, /* var[9] */
+ 0x00000000, /* var[10] */
+ 0x00000000, /* var[11] */
+ 0x00000000, /* var[12] */
+ 0x00000000, /* var[13] */
+ 0x00000000, /* var[14] */
+ 0x00000000, /* var[15] */
+ 0x00000000, /* var[16] */
+ 0x00000000, /* var[17] */
+ 0x00000000, /* var[18] */
+ 0x00000000, /* var[19] */
+ 0x00000000, /* var[20] */
+ 0x00000000, /* var[21] */
+ 0x00000000, /* var[22] */
+ 0x00000000, /* var[23] */
+ 0xe0000000, /* inc[0] */
+ 0x20000000, /* inc[1] */
+ 0x2000ffff, /* inc[2] */
+ 0x00000000, /* inc[3] */
+ 0x00000000, /* inc[4] */
+ 0x00000000, /* inc[5] */
+ 0x00000000, /* inc[6] */
+ 0x00000000, /* inc[7] */
+};
+
+u32 MCD_varTab10[] =
+{ /* Task 10 Variable Table */
+ 0x00000000, /* var[0] */
+ 0x00000000, /* var[1] */
+ 0x00000000, /* var[2] */
+ 0x00000000, /* var[3] */
+ 0x00000000, /* var[4] */
+ 0x00000000, /* var[5] */
+ 0x00000000, /* var[6] */
+ 0x00000000, /* var[7] */
+ 0x00000000, /* var[8] */
+ 0x00000000, /* var[9] */
+ 0x00000000, /* var[10] */
+ 0x00000000, /* var[11] */
+ 0x00000000, /* var[12] */
+ 0x00000000, /* var[13] */
+ 0x00000000, /* var[14] */
+ 0x00000000, /* var[15] */
+ 0x00000000, /* var[16] */
+ 0x00000000, /* var[17] */
+ 0x00000000, /* var[18] */
+ 0x00000000, /* var[19] */
+ 0x00000000, /* var[20] */
+ 0x00000000, /* var[21] */
+ 0x00000000, /* var[22] */
+ 0x00000000, /* var[23] */
+ 0xe0000000, /* inc[0] */
+ 0x20000000, /* inc[1] */
+ 0x2000ffff, /* inc[2] */
+ 0x00000000, /* inc[3] */
+ 0x00000000, /* inc[4] */
+ 0x00000000, /* inc[5] */
+ 0x00000000, /* inc[6] */
+ 0x00000000, /* inc[7] */
+};
+
+u32 MCD_varTab11[] =
+{ /* Task 11 Variable Table */
+ 0x00000000, /* var[0] */
+ 0x00000000, /* var[1] */
+ 0x00000000, /* var[2] */
+ 0x00000000, /* var[3] */
+ 0x00000000, /* var[4] */
+ 0x00000000, /* var[5] */
+ 0x00000000, /* var[6] */
+ 0x00000000, /* var[7] */
+ 0x00000000, /* var[8] */
+ 0x00000000, /* var[9] */
+ 0x00000000, /* var[10] */
+ 0x00000000, /* var[11] */
+ 0x00000000, /* var[12] */
+ 0x00000000, /* var[13] */
+ 0x00000000, /* var[14] */
+ 0x00000000, /* var[15] */
+ 0x00000000, /* var[16] */
+ 0x00000000, /* var[17] */
+ 0x00000000, /* var[18] */
+ 0x00000000, /* var[19] */
+ 0x00000000, /* var[20] */
+ 0x00000000, /* var[21] */
+ 0x00000000, /* var[22] */
+ 0x00000000, /* var[23] */
+ 0xe0000000, /* inc[0] */
+ 0x20000000, /* inc[1] */
+ 0x2000ffff, /* inc[2] */
+ 0x00000000, /* inc[3] */
+ 0x00000000, /* inc[4] */
+ 0x00000000, /* inc[5] */
+ 0x00000000, /* inc[6] */
+ 0x00000000, /* inc[7] */
+};
+
+u32 MCD_varTab12[] =
+{ /* Task 12 Variable Table */
+ 0x00000000, /* var[0] */
+ 0x00000000, /* var[1] */
+ 0x00000000, /* var[2] */
+ 0x00000000, /* var[3] */
+ 0x00000000, /* var[4] */
+ 0x00000000, /* var[5] */
+ 0x00000000, /* var[6] */
+ 0x00000000, /* var[7] */
+ 0x00000000, /* var[8] */
+ 0x00000000, /* var[9] */
+ 0x00000000, /* var[10] */
+ 0x00000000, /* var[11] */
+ 0x00000000, /* var[12] */
+ 0x00000000, /* var[13] */
+ 0x00000000, /* var[14] */
+ 0x00000000, /* var[15] */
+ 0x00000000, /* var[16] */
+ 0x00000000, /* var[17] */
+ 0x00000000, /* var[18] */
+ 0x00000000, /* var[19] */
+ 0x00000000, /* var[20] */
+ 0x00000000, /* var[21] */
+ 0x00000000, /* var[22] */
+ 0x00000000, /* var[23] */
+ 0xe0000000, /* inc[0] */
+ 0x20000000, /* inc[1] */
+ 0x2000ffff, /* inc[2] */
+ 0x00000000, /* inc[3] */
+ 0x00000000, /* inc[4] */
+ 0x00000000, /* inc[5] */
+ 0x00000000, /* inc[6] */
+ 0x00000000, /* inc[7] */
+};
+
+u32 MCD_varTab13[] =
+{ /* Task 13 Variable Table */
+ 0x00000000, /* var[0] */
+ 0x00000000, /* var[1] */
+ 0x00000000, /* var[2] */
+ 0x00000000, /* var[3] */
+ 0x00000000, /* var[4] */
+ 0x00000000, /* var[5] */
+ 0x00000000, /* var[6] */
+ 0x00000000, /* var[7] */
+ 0x00000000, /* var[8] */
+ 0x00000000, /* var[9] */
+ 0x00000000, /* var[10] */
+ 0x00000000, /* var[11] */
+ 0x00000000, /* var[12] */
+ 0x00000000, /* var[13] */
+ 0x00000000, /* var[14] */
+ 0x00000000, /* var[15] */
+ 0x00000000, /* var[16] */
+ 0x00000000, /* var[17] */
+ 0x00000000, /* var[18] */
+ 0x00000000, /* var[19] */
+ 0x00000000, /* var[20] */
+ 0x00000000, /* var[21] */
+ 0x00000000, /* var[22] */
+ 0x00000000, /* var[23] */
+ 0xe0000000, /* inc[0] */
+ 0x20000000, /* inc[1] */
+ 0x2000ffff, /* inc[2] */
+ 0x00000000, /* inc[3] */
+ 0x00000000, /* inc[4] */
+ 0x00000000, /* inc[5] */
+ 0x00000000, /* inc[6] */
+ 0x00000000, /* inc[7] */
+};
+
+u32 MCD_varTab14[] =
+{ /* Task 14 Variable Table */
+ 0x00000000, /* var[0] */
+ 0x00000000, /* var[1] */
+ 0x00000000, /* var[2] */
+ 0x00000000, /* var[3] */
+ 0x00000000, /* var[4] */
+ 0x00000000, /* var[5] */
+ 0x00000000, /* var[6] */
+ 0x00000000, /* var[7] */
+ 0x00000000, /* var[8] */
+ 0x00000000, /* var[9] */
+ 0x00000000, /* var[10] */
+ 0x00000000, /* var[11] */
+ 0x00000000, /* var[12] */
+ 0x00000000, /* var[13] */
+ 0x00000000, /* var[14] */
+ 0x00000000, /* var[15] */
+ 0x00000000, /* var[16] */
+ 0x00000000, /* var[17] */
+ 0x00000000, /* var[18] */
+ 0x00000000, /* var[19] */
+ 0x00000000, /* var[20] */
+ 0x00000000, /* var[21] */
+ 0x00000000, /* var[22] */
+ 0x00000000, /* var[23] */
+ 0xe0000000, /* inc[0] */
+ 0x20000000, /* inc[1] */
+ 0x2000ffff, /* inc[2] */
+ 0x00000000, /* inc[3] */
+ 0x00000000, /* inc[4] */
+ 0x00000000, /* inc[5] */
+ 0x00000000, /* inc[6] */
+ 0x00000000, /* inc[7] */
+};
+
+u32 MCD_varTab15[] =
+{ /* Task 15 Variable Table */
+ 0x00000000, /* var[0] */
+ 0x00000000, /* var[1] */
+ 0x00000000, /* var[2] */
+ 0x00000000, /* var[3] */
+ 0x00000000, /* var[4] */
+ 0x00000000, /* var[5] */
+ 0x00000000, /* var[6] */
+ 0x00000000, /* var[7] */
+ 0x00000000, /* var[8] */
+ 0x00000000, /* var[9] */
+ 0x00000000, /* var[10] */
+ 0x00000000, /* var[11] */
+ 0x00000000, /* var[12] */
+ 0x00000000, /* var[13] */
+ 0x00000000, /* var[14] */
+ 0x00000000, /* var[15] */
+ 0x00000000, /* var[16] */
+ 0x00000000, /* var[17] */
+ 0x00000000, /* var[18] */
+ 0x00000000, /* var[19] */
+ 0x00000000, /* var[20] */
+ 0x00000000, /* var[21] */
+ 0x00000000, /* var[22] */
+ 0x00000000, /* var[23] */
+ 0xe0000000, /* inc[0] */
+ 0x20000000, /* inc[1] */
+ 0x2000ffff, /* inc[2] */
+ 0x00000000, /* inc[3] */
+ 0x00000000, /* inc[4] */
+ 0x00000000, /* inc[5] */
+ 0x00000000, /* inc[6] */
+ 0x00000000, /* inc[7] */
+};
+
+u32 MCD_funcDescTab0[] =
+{ /* Task 0 Function Descriptor Table */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0xa0045670, /* mainFunc(), EU# 3 */
+ 0xa0000000, /* rsduFunc(), EU# 3 */
+ 0xa0000000, /* crcAccumVal(), EU# 3 */
+ 0x20000000, /* setCrcAccum(), EU# 3 */
+ 0x21800000, /* and(), EU# 3 */
+ 0x21e00000, /* or(), EU# 3 */
+ 0x20400000, /* add(), EU# 3 */
+ 0x20500000, /* sub(), EU# 3 */
+ 0x205a0000, /* andNot(), EU# 3 */
+ 0x20a00000, /* shiftR(), EU# 3 */
+ 0x202fa000, /* andReadyBit(), EU# 3 */
+ 0x202f9000, /* andNotReadyBit(), EU# 3 */
+ 0x202ea000, /* andWrapBit(), EU# 3 */
+ 0x202da000, /* andLastBit(), EU# 3 */
+ 0x202e2000, /* andInterruptBit(), EU# 3 */
+ 0x202f2000, /* andCrcRestartBit(), EU# 3 */
+};
+
+#ifdef MCD_INCLUDE_EU
+u32 MCD_funcDescTab1[] =
+{ /* Task 1 Function Descriptor Table */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0xa0045670, /* mainFunc(), EU# 3 */
+ 0xa0000000, /* rsduFunc(), EU# 3 */
+ 0xa0000000, /* crcAccumVal(), EU# 3 */
+ 0x20000000, /* setCrcAccum(), EU# 3 */
+ 0x21800000, /* and(), EU# 3 */
+ 0x21e00000, /* or(), EU# 3 */
+ 0x20400000, /* add(), EU# 3 */
+ 0x20500000, /* sub(), EU# 3 */
+ 0x205a0000, /* andNot(), EU# 3 */
+ 0x20a00000, /* shiftR(), EU# 3 */
+ 0x202fa000, /* andReadyBit(), EU# 3 */
+ 0x202f9000, /* andNotReadyBit(), EU# 3 */
+ 0x202ea000, /* andWrapBit(), EU# 3 */
+ 0x202da000, /* andLastBit(), EU# 3 */
+ 0x202e2000, /* andInterruptBit(), EU# 3 */
+ 0x202f2000, /* andCrcRestartBit(), EU# 3 */
+};
+
+u32 MCD_funcDescTab2[] =
+{ /* Task 2 Function Descriptor Table */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0xa0045670, /* mainFunc(), EU# 3 */
+ 0xa0000000, /* rsduFunc(), EU# 3 */
+ 0xa0000000, /* crcAccumVal(), EU# 3 */
+ 0x20000000, /* setCrcAccum(), EU# 3 */
+ 0x21800000, /* and(), EU# 3 */
+ 0x21e00000, /* or(), EU# 3 */
+ 0x20400000, /* add(), EU# 3 */
+ 0x20500000, /* sub(), EU# 3 */
+ 0x205a0000, /* andNot(), EU# 3 */
+ 0x20a00000, /* shiftR(), EU# 3 */
+ 0x202fa000, /* andReadyBit(), EU# 3 */
+ 0x202f9000, /* andNotReadyBit(), EU# 3 */
+ 0x202ea000, /* andWrapBit(), EU# 3 */
+ 0x202da000, /* andLastBit(), EU# 3 */
+ 0x202e2000, /* andInterruptBit(), EU# 3 */
+ 0x202f2000, /* andCrcRestartBit(), EU# 3 */
+};
+
+u32 MCD_funcDescTab3[] =
+{ /* Task 3 Function Descriptor Table */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0xa0045670, /* mainFunc(), EU# 3 */
+ 0xa0000000, /* rsduFunc(), EU# 3 */
+ 0xa0000000, /* crcAccumVal(), EU# 3 */
+ 0x20000000, /* setCrcAccum(), EU# 3 */
+ 0x21800000, /* and(), EU# 3 */
+ 0x21e00000, /* or(), EU# 3 */
+ 0x20400000, /* add(), EU# 3 */
+ 0x20500000, /* sub(), EU# 3 */
+ 0x205a0000, /* andNot(), EU# 3 */
+ 0x20a00000, /* shiftR(), EU# 3 */
+ 0x202fa000, /* andReadyBit(), EU# 3 */
+ 0x202f9000, /* andNotReadyBit(), EU# 3 */
+ 0x202ea000, /* andWrapBit(), EU# 3 */
+ 0x202da000, /* andLastBit(), EU# 3 */
+ 0x202e2000, /* andInterruptBit(), EU# 3 */
+ 0x202f2000, /* andCrcRestartBit(), EU# 3 */
+};
+
+u32 MCD_funcDescTab4[] =
+{ /* Task 4 Function Descriptor Table */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0xa0045670, /* mainFunc(), EU# 3 */
+ 0xa0000000, /* rsduFunc(), EU# 3 */
+ 0xa0000000, /* crcAccumVal(), EU# 3 */
+ 0x20000000, /* setCrcAccum(), EU# 3 */
+ 0x21800000, /* and(), EU# 3 */
+ 0x21e00000, /* or(), EU# 3 */
+ 0x20400000, /* add(), EU# 3 */
+ 0x20500000, /* sub(), EU# 3 */
+ 0x205a0000, /* andNot(), EU# 3 */
+ 0x20a00000, /* shiftR(), EU# 3 */
+ 0x202fa000, /* andReadyBit(), EU# 3 */
+ 0x202f9000, /* andNotReadyBit(), EU# 3 */
+ 0x202ea000, /* andWrapBit(), EU# 3 */
+ 0x202da000, /* andLastBit(), EU# 3 */
+ 0x202e2000, /* andInterruptBit(), EU# 3 */
+ 0x202f2000, /* andCrcRestartBit(), EU# 3 */
+};
+
+u32 MCD_funcDescTab5[] =
+{ /* Task 5 Function Descriptor Table */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0xa0045670, /* mainFunc(), EU# 3 */
+ 0xa0000000, /* rsduFunc(), EU# 3 */
+ 0xa0000000, /* crcAccumVal(), EU# 3 */
+ 0x20000000, /* setCrcAccum(), EU# 3 */
+ 0x21800000, /* and(), EU# 3 */
+ 0x21e00000, /* or(), EU# 3 */
+ 0x20400000, /* add(), EU# 3 */
+ 0x20500000, /* sub(), EU# 3 */
+ 0x205a0000, /* andNot(), EU# 3 */
+ 0x20a00000, /* shiftR(), EU# 3 */
+ 0x202fa000, /* andReadyBit(), EU# 3 */
+ 0x202f9000, /* andNotReadyBit(), EU# 3 */
+ 0x202ea000, /* andWrapBit(), EU# 3 */
+ 0x202da000, /* andLastBit(), EU# 3 */
+ 0x202e2000, /* andInterruptBit(), EU# 3 */
+ 0x202f2000, /* andCrcRestartBit(), EU# 3 */
+};
+
+u32 MCD_funcDescTab6[] =
+{ /* Task 6 Function Descriptor Table */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0xa0045670, /* mainFunc(), EU# 3 */
+ 0xa0000000, /* rsduFunc(), EU# 3 */
+ 0xa0000000, /* crcAccumVal(), EU# 3 */
+ 0x20000000, /* setCrcAccum(), EU# 3 */
+ 0x21800000, /* and(), EU# 3 */
+ 0x21e00000, /* or(), EU# 3 */
+ 0x20400000, /* add(), EU# 3 */
+ 0x20500000, /* sub(), EU# 3 */
+ 0x205a0000, /* andNot(), EU# 3 */
+ 0x20a00000, /* shiftR(), EU# 3 */
+ 0x202fa000, /* andReadyBit(), EU# 3 */
+ 0x202f9000, /* andNotReadyBit(), EU# 3 */
+ 0x202ea000, /* andWrapBit(), EU# 3 */
+ 0x202da000, /* andLastBit(), EU# 3 */
+ 0x202e2000, /* andInterruptBit(), EU# 3 */
+ 0x202f2000, /* andCrcRestartBit(), EU# 3 */
+};
+
+u32 MCD_funcDescTab7[] =
+{ /* Task 7 Function Descriptor Table */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0xa0045670, /* mainFunc(), EU# 3 */
+ 0xa0000000, /* rsduFunc(), EU# 3 */
+ 0xa0000000, /* crcAccumVal(), EU# 3 */
+ 0x20000000, /* setCrcAccum(), EU# 3 */
+ 0x21800000, /* and(), EU# 3 */
+ 0x21e00000, /* or(), EU# 3 */
+ 0x20400000, /* add(), EU# 3 */
+ 0x20500000, /* sub(), EU# 3 */
+ 0x205a0000, /* andNot(), EU# 3 */
+ 0x20a00000, /* shiftR(), EU# 3 */
+ 0x202fa000, /* andReadyBit(), EU# 3 */
+ 0x202f9000, /* andNotReadyBit(), EU# 3 */
+ 0x202ea000, /* andWrapBit(), EU# 3 */
+ 0x202da000, /* andLastBit(), EU# 3 */
+ 0x202e2000, /* andInterruptBit(), EU# 3 */
+ 0x202f2000, /* andCrcRestartBit(), EU# 3 */
+};
+
+u32 MCD_funcDescTab8[] =
+{ /* Task 8 Function Descriptor Table */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0xa0045670, /* mainFunc(), EU# 3 */
+ 0xa0000000, /* rsduFunc(), EU# 3 */
+ 0xa0000000, /* crcAccumVal(), EU# 3 */
+ 0x20000000, /* setCrcAccum(), EU# 3 */
+ 0x21800000, /* and(), EU# 3 */
+ 0x21e00000, /* or(), EU# 3 */
+ 0x20400000, /* add(), EU# 3 */
+ 0x20500000, /* sub(), EU# 3 */
+ 0x205a0000, /* andNot(), EU# 3 */
+ 0x20a00000, /* shiftR(), EU# 3 */
+ 0x202fa000, /* andReadyBit(), EU# 3 */
+ 0x202f9000, /* andNotReadyBit(), EU# 3 */
+ 0x202ea000, /* andWrapBit(), EU# 3 */
+ 0x202da000, /* andLastBit(), EU# 3 */
+ 0x202e2000, /* andInterruptBit(), EU# 3 */
+ 0x202f2000, /* andCrcRestartBit(), EU# 3 */
+};
+
+u32 MCD_funcDescTab9[] =
+{ /* Task 9 Function Descriptor Table */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0xa0045670, /* mainFunc(), EU# 3 */
+ 0xa0000000, /* rsduFunc(), EU# 3 */
+ 0xa0000000, /* crcAccumVal(), EU# 3 */
+ 0x20000000, /* setCrcAccum(), EU# 3 */
+ 0x21800000, /* and(), EU# 3 */
+ 0x21e00000, /* or(), EU# 3 */
+ 0x20400000, /* add(), EU# 3 */
+ 0x20500000, /* sub(), EU# 3 */
+ 0x205a0000, /* andNot(), EU# 3 */
+ 0x20a00000, /* shiftR(), EU# 3 */
+ 0x202fa000, /* andReadyBit(), EU# 3 */
+ 0x202f9000, /* andNotReadyBit(), EU# 3 */
+ 0x202ea000, /* andWrapBit(), EU# 3 */
+ 0x202da000, /* andLastBit(), EU# 3 */
+ 0x202e2000, /* andInterruptBit(), EU# 3 */
+ 0x202f2000, /* andCrcRestartBit(), EU# 3 */
+};
+
+u32 MCD_funcDescTab10[] =
+{ /* Task 10 Function Descriptor Table */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0xa0045670, /* mainFunc(), EU# 3 */
+ 0xa0000000, /* rsduFunc(), EU# 3 */
+ 0xa0000000, /* crcAccumVal(), EU# 3 */
+ 0x20000000, /* setCrcAccum(), EU# 3 */
+ 0x21800000, /* and(), EU# 3 */
+ 0x21e00000, /* or(), EU# 3 */
+ 0x20400000, /* add(), EU# 3 */
+ 0x20500000, /* sub(), EU# 3 */
+ 0x205a0000, /* andNot(), EU# 3 */
+ 0x20a00000, /* shiftR(), EU# 3 */
+ 0x202fa000, /* andReadyBit(), EU# 3 */
+ 0x202f9000, /* andNotReadyBit(), EU# 3 */
+ 0x202ea000, /* andWrapBit(), EU# 3 */
+ 0x202da000, /* andLastBit(), EU# 3 */
+ 0x202e2000, /* andInterruptBit(), EU# 3 */
+ 0x202f2000, /* andCrcRestartBit(), EU# 3 */
+};
+
+u32 MCD_funcDescTab11[] =
+{ /* Task 11 Function Descriptor Table */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0xa0045670, /* mainFunc(), EU# 3 */
+ 0xa0000000, /* rsduFunc(), EU# 3 */
+ 0xa0000000, /* crcAccumVal(), EU# 3 */
+ 0x20000000, /* setCrcAccum(), EU# 3 */
+ 0x21800000, /* and(), EU# 3 */
+ 0x21e00000, /* or(), EU# 3 */
+ 0x20400000, /* add(), EU# 3 */
+ 0x20500000, /* sub(), EU# 3 */
+ 0x205a0000, /* andNot(), EU# 3 */
+ 0x20a00000, /* shiftR(), EU# 3 */
+ 0x202fa000, /* andReadyBit(), EU# 3 */
+ 0x202f9000, /* andNotReadyBit(), EU# 3 */
+ 0x202ea000, /* andWrapBit(), EU# 3 */
+ 0x202da000, /* andLastBit(), EU# 3 */
+ 0x202e2000, /* andInterruptBit(), EU# 3 */
+ 0x202f2000, /* andCrcRestartBit(), EU# 3 */
+};
+
+u32 MCD_funcDescTab12[] =
+{ /* Task 12 Function Descriptor Table */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0xa0045670, /* mainFunc(), EU# 3 */
+ 0xa0000000, /* rsduFunc(), EU# 3 */
+ 0xa0000000, /* crcAccumVal(), EU# 3 */
+ 0x20000000, /* setCrcAccum(), EU# 3 */
+ 0x21800000, /* and(), EU# 3 */
+ 0x21e00000, /* or(), EU# 3 */
+ 0x20400000, /* add(), EU# 3 */
+ 0x20500000, /* sub(), EU# 3 */
+ 0x205a0000, /* andNot(), EU# 3 */
+ 0x20a00000, /* shiftR(), EU# 3 */
+ 0x202fa000, /* andReadyBit(), EU# 3 */
+ 0x202f9000, /* andNotReadyBit(), EU# 3 */
+ 0x202ea000, /* andWrapBit(), EU# 3 */
+ 0x202da000, /* andLastBit(), EU# 3 */
+ 0x202e2000, /* andInterruptBit(), EU# 3 */
+ 0x202f2000, /* andCrcRestartBit(), EU# 3 */
+};
+
+u32 MCD_funcDescTab13[] =
+{ /* Task 13 Function Descriptor Table */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0xa0045670, /* mainFunc(), EU# 3 */
+ 0xa0000000, /* rsduFunc(), EU# 3 */
+ 0xa0000000, /* crcAccumVal(), EU# 3 */
+ 0x20000000, /* setCrcAccum(), EU# 3 */
+ 0x21800000, /* and(), EU# 3 */
+ 0x21e00000, /* or(), EU# 3 */
+ 0x20400000, /* add(), EU# 3 */
+ 0x20500000, /* sub(), EU# 3 */
+ 0x205a0000, /* andNot(), EU# 3 */
+ 0x20a00000, /* shiftR(), EU# 3 */
+ 0x202fa000, /* andReadyBit(), EU# 3 */
+ 0x202f9000, /* andNotReadyBit(), EU# 3 */
+ 0x202ea000, /* andWrapBit(), EU# 3 */
+ 0x202da000, /* andLastBit(), EU# 3 */
+ 0x202e2000, /* andInterruptBit(), EU# 3 */
+ 0x202f2000, /* andCrcRestartBit(), EU# 3 */
+};
+
+u32 MCD_funcDescTab14[] =
+{ /* Task 14 Function Descriptor Table */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0xa0045670, /* mainFunc(), EU# 3 */
+ 0xa0000000, /* rsduFunc(), EU# 3 */
+ 0xa0000000, /* crcAccumVal(), EU# 3 */
+ 0x20000000, /* setCrcAccum(), EU# 3 */
+ 0x21800000, /* and(), EU# 3 */
+ 0x21e00000, /* or(), EU# 3 */
+ 0x20400000, /* add(), EU# 3 */
+ 0x20500000, /* sub(), EU# 3 */
+ 0x205a0000, /* andNot(), EU# 3 */
+ 0x20a00000, /* shiftR(), EU# 3 */
+ 0x202fa000, /* andReadyBit(), EU# 3 */
+ 0x202f9000, /* andNotReadyBit(), EU# 3 */
+ 0x202ea000, /* andWrapBit(), EU# 3 */
+ 0x202da000, /* andLastBit(), EU# 3 */
+ 0x202e2000, /* andInterruptBit(), EU# 3 */
+ 0x202f2000, /* andCrcRestartBit(), EU# 3 */
+};
+
+u32 MCD_funcDescTab15[] =
+{ /* Task 15 Function Descriptor Table */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0xa0045670, /* mainFunc(), EU# 3 */
+ 0xa0000000, /* rsduFunc(), EU# 3 */
+ 0xa0000000, /* crcAccumVal(), EU# 3 */
+ 0x20000000, /* setCrcAccum(), EU# 3 */
+ 0x21800000, /* and(), EU# 3 */
+ 0x21e00000, /* or(), EU# 3 */
+ 0x20400000, /* add(), EU# 3 */
+ 0x20500000, /* sub(), EU# 3 */
+ 0x205a0000, /* andNot(), EU# 3 */
+ 0x20a00000, /* shiftR(), EU# 3 */
+ 0x202fa000, /* andReadyBit(), EU# 3 */
+ 0x202f9000, /* andNotReadyBit(), EU# 3 */
+ 0x202ea000, /* andWrapBit(), EU# 3 */
+ 0x202da000, /* andLastBit(), EU# 3 */
+ 0x202e2000, /* andInterruptBit(), EU# 3 */
+ 0x202f2000, /* andCrcRestartBit(), EU# 3 */
+};
+#endif /*MCD_INCLUDE_EU*/
+
+u32 MCD_contextSave0[128]; /* Task 0 context save space */
+u32 MCD_contextSave1[128]; /* Task 1 context save space */
+u32 MCD_contextSave2[128]; /* Task 2 context save space */
+u32 MCD_contextSave3[128]; /* Task 3 context save space */
+u32 MCD_contextSave4[128]; /* Task 4 context save space */
+u32 MCD_contextSave5[128]; /* Task 5 context save space */
+u32 MCD_contextSave6[128]; /* Task 6 context save space */
+u32 MCD_contextSave7[128]; /* Task 7 context save space */
+u32 MCD_contextSave8[128]; /* Task 8 context save space */
+u32 MCD_contextSave9[128]; /* Task 9 context save space */
+u32 MCD_contextSave10[128]; /* Task 10 context save space */
+u32 MCD_contextSave11[128]; /* Task 11 context save space */
+u32 MCD_contextSave12[128]; /* Task 12 context save space */
+u32 MCD_contextSave13[128]; /* Task 13 context save space */
+u32 MCD_contextSave14[128]; /* Task 14 context save space */
+u32 MCD_contextSave15[128]; /* Task 15 context save space */
+
+u32 MCD_ChainNoEu_TDT[];
+u32 MCD_SingleNoEu_TDT[];
+#ifdef MCD_INCLUDE_EU
+u32 MCD_ChainEu_TDT[];
+u32 MCD_SingleEu_TDT[];
+#endif
+u32 MCD_ENetRcv_TDT[];
+u32 MCD_ENetXmit_TDT[];
+
+u32 MCD_modelTaskTableSrc[] =
+{
+ (u32)MCD_ChainNoEu_TDT,
+ (u32)&((u8 *)MCD_ChainNoEu_TDT)[0x0000016c],
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ (u32)MCD_SingleNoEu_TDT,
+ (u32)&((u8 *)MCD_SingleNoEu_TDT)[0x000000d4],
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+#ifdef MCD_INCLUDE_EU
+ (u32)MCD_ChainEu_TDT,
+ (u32)&((u8 *)MCD_ChainEu_TDT)[0x000001b4],
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ (u32)MCD_SingleEu_TDT,
+ (u32)&((u8 *)MCD_SingleEu_TDT)[0x00000124],
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+#endif
+ (u32)MCD_ENetRcv_TDT,
+ (u32)&((u8 *)MCD_ENetRcv_TDT)[0x000000a4],
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ (u32)MCD_ENetXmit_TDT,
+ (u32)&((u8 *)MCD_ENetXmit_TDT)[0x000000d0],
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
+u32 MCD_ChainNoEu_TDT[] =
+{
+ 0x80004000,
+ 0x8118801b,
+ 0xb8c60018,
+ 0x10002b10,
+ 0x7000000d,
+ 0x018cf89f,
+ 0x6000000a,
+ 0x080cf89f,
+ 0x000001f8,
+ 0x98180364,
+ 0x8118801b,
+ 0xf8c6001a,
+ 0xb8c6601b,
+ 0x10002710,
+ 0x00000f18,
+ 0xb8c6001d,
+ 0x10001310,
+ 0x60000007,
+ 0x014cf88b,
+ 0x98c6001c,
+ 0x00000710,
+ 0x98c70018,
+ 0x10001f10,
+ 0x0000c818,
+ 0x000001f8, /* 0060(:0): NOP */
+ 0xc1476018,
+ 0xc003231d,
+ 0x811a601b,
+ 0xc1862102,
+ 0x849be009,
+ 0x03fed7b8,
+ 0xda9b001b,
+ 0x9b9be01b,
+ 0x1000cb20,
+ 0x70000006,
+ 0x088cf88f,
+ 0x1000cb28,
+ 0x70000006,
+ 0x088cf88f,
+ 0x1000cb30,
+ 0x70000006,
+ 0x088cf88f,
+ 0x1000cb38,
+ 0x0000c728,
+ 0x000001f8, /* 00B0(:0): NOP */
+ 0xc1476018,
+ 0xc003241d,
+ 0x811a601b,
+ 0xda9b001b,
+ 0x9b9be01b,
+ 0x0000d3a0,
+ 0xc1862102,
+ 0x849be009,
+ 0x0bfed7b8,
+ 0xda9b001b,
+ 0x9b9be01b,
+ 0x1000cb20,
+ 0x70000006,
+ 0x088cf88f,
+ 0x1000cb28,
+ 0x70000006,
+ 0x088cf88f,
+ 0x1000cb30,
+ 0x70000006,
+ 0x088cf88f,
+ 0x1000cb38,
+ 0x0000c728,
+ 0x000001f8, /* 010C(:0): NOP */
+ 0x8118801b,
+ 0xd8c60018,
+ 0x98c6601c,
+ 0x6000000b,
+ 0x0c8cfc9f,
+ 0x000001f8, /* 0124(:0): NOP */
+ 0xa146001e,
+ 0x10000b08,
+ 0x10002050,
+ 0xb8c60018,
+ 0x10002b10,
+ 0x7000000a,
+ 0x080cf89f,
+ 0x6000000d,
+ 0x018cf89f,
+ 0x000001f8, /* 014C(:0): NOP */
+ 0x8618801b,
+ 0x7000000e,
+ 0x084cf21f,
+ 0xd8990336,
+ 0x8019801b,
+ 0x040001f8,
+ 0x000001f8, /* 0168(:0): NOP */
+ 0x000001f8, /* 016C(:0): NOP */
+};
+u32 MCD_SingleNoEu_TDT[] =
+{
+ 0x8198001b,
+ 0x7000000d,
+ 0x080cf81f,
+ 0x8198801b,
+ 0x6000000e,
+ 0x084cf85f,
+ 0x000001f8, /* 0018(:0): NOP */
+ 0x8298001b,
+ 0x7000000d,
+ 0x010cf81f,
+ 0x6000000e,
+ 0x018cf81f,
+ 0xc202601b,
+ 0xc002221c,
+ 0x809a601b,
+ 0xc10420c2,
+ 0x839be009,
+ 0x03fed7b8,
+ 0xda9b001b,
+ 0x9b9be01b,
+ 0x70000006,
+ 0x088cf889,
+ 0x1000cb28,
+ 0x70000006,
+ 0x088cf889,
+ 0x1000cb30,
+ 0x70000006,
+ 0x088cf889,
+ 0x0000cb38,
+ 0x000001f8, /* 0074(:0): NOP */
+ 0xc202601b,
+ 0xc002229c,
+ 0x809a601b,
+ 0xda9b001b,
+ 0x9b9be01b,
+ 0x0000d3a0,
+ 0xc10420c2,
+ 0x839be009,
+ 0x0bfed7b8,
+ 0xda9b001b,
+ 0x9b9be01b,
+ 0x70000006,
+ 0x088cf889,
+ 0x1000cb28,
+ 0x70000006,
+ 0x088cf889,
+ 0x1000cb30,
+ 0x70000006,
+ 0x088cf889,
+ 0x0000cb38,
+ 0x000001f8, /* 00C8(:0): NOP */
+ 0xc318022d,
+ 0x8018801b,
+ 0x040001f8,
+};
+#ifdef MCD_INCLUDE_EU
+u32 MCD_ChainEu_TDT[] =
+{
+ 0x80004000,
+ 0x8198801b,
+ 0xb8c68018,
+ 0x10002f10,
+ 0x7000000d,
+ 0x01ccf89f,
+ 0x6000000a,
+ 0x080cf89f,
+ 0x000001f8,
+ 0x981803a4,
+ 0x8198801b,
+ 0xf8c6801a,
+ 0xb8c6e01b,
+ 0x10002b10,
+ 0x00001318,
+ 0xb8c6801d,
+ 0x10001710,
+ 0x60000007,
+ 0x018cf88c,
+ 0x98c6801c,
+ 0x00000b10,
+ 0x98c78018,
+ 0x10002310,
+ 0x0000c820,
+ 0x000001f8, /* 0060(:0): NOP */
+ 0x8698801b,
+ 0x7000000f,
+ 0x084cf2df,
+ 0xd899042d,
+ 0x8019801b,
+ 0x60000003,
+ 0x2cd7c7df, /* 007C(:979): DRD2B2: EU3(var13) */
+ 0xd8990364,
+ 0x8019801b,
+ 0x60000003,
+ 0x2c17c7df, /* 008C(:981): DRD2B2: EU3(var1) */
+ 0x000001f8, /* 0090(:0): NOP */
+ 0xc1c7e018,
+ 0xc003a35e,
+ 0x819a601b,
+ 0xc206a142,
+ 0x851be009,
+ 0x63fe0000,
+ 0x0d4cfddf,
+ 0xda9b001b,
+ 0x9b9be01b,
+ 0x70000002,
+ 0x004cf81f,
+ 0x1000cb20,
+ 0x70000006,
+ 0x088cf891,
+ 0x1000cb28,
+ 0x70000006,
+ 0x088cf891,
+ 0x1000cb30,
+ 0x70000006,
+ 0x088cf891,
+ 0x1000cb38,
+ 0x0000c728,
+ 0x000001f8, /* 00EC(:0): NOP */
+ 0xc1c7e018,
+ 0xc003a49e,
+ 0x819a601b,
+ 0xda9b001b,
+ 0x9b9be01b,
+ 0x0000d3a0,
+ 0xc206a142,
+ 0x851be009,
+ 0x6bfe0000,
+ 0x0d4cfddf,
+ 0xda9b001b,
+ 0x9b9be01b,
+ 0x70000002,
+ 0x004cf81f,
+ 0x1000cb20,
+ 0x70000006,
+ 0x088cf891,
+ 0x1000cb28,
+ 0x70000006,
+ 0x088cf891,
+ 0x1000cb30,
+ 0x70000006,
+ 0x088cf891,
+ 0x1000cb38,
+ 0x0000c728,
+ 0x000001f8, /* 0154(:0): NOP */
+ 0x8198801b,
+ 0xd8c68018,
+ 0x98c6e01c,
+ 0x6000000b,
+ 0x0c8cfc9f,
+ 0x0000cc08,
+ 0xa1c6801e,
+ 0x10000f08,
+ 0x10002458,
+ 0xb8c68018,
+ 0x10002f10,
+ 0x7000000a,
+ 0x080cf89f,
+ 0x6000000d,
+ 0x01ccf89f,
+ 0x000001f8, /* 0194(:0): NOP */
+ 0x8698801b,
+ 0x7000000e,
+ 0x084cf25f,
+ 0xd899037f,
+ 0x8019801b,
+ 0x040001f8,
+ 0x000001f8, /* 01B0(:0): NOP */
+ 0x000001f8, /* 01B4(:0): NOP */
+};
+u32 MCD_SingleEu_TDT[] =
+{
+ 0x8218001b,
+ 0x7000000d,
+ 0x080cf81f,
+ 0x8218801b,
+ 0x6000000e,
+ 0x084cf85f,
+ 0x000001f8, /* 0018(:0): NOP */
+ 0x8318001b,
+ 0x7000000d,
+ 0x014cf81f,
+ 0x6000000e,
+ 0x01ccf81f,
+ 0x8498001b,
+ 0x7000000f,
+ 0x080cf19f,
+ 0xd81882a4,
+ 0x8019001b,
+ 0x60000003,
+ 0x2c97c7df,
+ 0xd818826d,
+ 0x8019001b,
+ 0x60000003,
+ 0x2c17c7df,
+ 0x000001f8, /* 005C(:0): NOP */
+ 0xc282e01b,
+ 0xc002a25e,
+ 0x811a601b,
+ 0xc184a102,
+ 0x841be009,
+ 0x63fe0000,
+ 0x0d4cfddf,
+ 0xda9b001b,
+ 0x9b9be01b,
+ 0x70000002,
+ 0x004cf99f,
+ 0x70000006,
+ 0x088cf88b,
+ 0x1000cb28,
+ 0x70000006,
+ 0x088cf88b,
+ 0x1000cb30,
+ 0x70000006,
+ 0x088cf88b,
+ 0x0000cb38,
+ 0x000001f8, /* 00B0(:0): NOP */
+ 0xc282e01b,
+ 0xc002a31e,
+ 0x811a601b,
+ 0xda9b001b,
+ 0x9b9be01b,
+ 0x0000d3a0,
+ 0xc184a102,
+ 0x841be009,
+ 0x6bfe0000,
+ 0x0d4cfddf,
+ 0xda9b001b,
+ 0x9b9be01b,
+ 0x70000002,
+ 0x004cf99f,
+ 0x70000006,
+ 0x088cf88b,
+ 0x1000cb28,
+ 0x70000006,
+ 0x088cf88b,
+ 0x1000cb30,
+ 0x70000006,
+ 0x088cf88b,
+ 0x0000cb38,
+ 0x000001f8, /* 0110(:0): NOP */
+ 0x8144801c,
+ 0x0000c008,
+ 0xc398027f,
+ 0x8018801b,
+ 0x040001f8,
+};
+#endif
+u32 MCD_ENetRcv_TDT[] =
+{
+ 0x80004000,
+ 0x81988000,
+ 0x10000788,
+ 0x6000000a,
+ 0x080cf05f,
+ 0x98180209,
+ 0x81c40004,
+ 0x7000000e,
+ 0x010cf05f,
+ 0x7000000c,
+ 0x01ccf05f,
+ 0x70000004,
+ 0x014cf049,
+ 0x70000004,
+ 0x004cf04a,
+ 0x00000b88,
+ 0xc4030150,
+ 0x8119e012,
+ 0x03e0cf90,
+ 0x81188000,
+ 0x000ac788,
+ 0xc4030000,
+ 0x8199e000,
+ 0x63e00004,
+ 0x084cfc8b,
+ 0xd8990000,
+ 0x9999e000,
+ 0x60000005,
+ 0x0cccf841,
+ 0x81c60000,
+ 0xc399021b,
+ 0x80198000,
+ 0x00008400,
+ 0x00000f08,
+ 0x81988000,
+ 0x10000788,
+ 0x6000000a,
+ 0x080cf05f,
+ 0xc2188209,
+ 0x80190000,
+ 0x040001f8,
+ 0x000001f8,
+};
+u32 MCD_ENetXmit_TDT[] =
+{
+ 0x80004000,
+ 0x81988000,
+ 0x10000788,
+ 0x6000000a,
+ 0x080cf05f,
+ 0x98180309,
+ 0x80004003,
+ 0x81c60004,
+ 0x7000000e,
+ 0x014cf05f,
+ 0x7000000c,
+ 0x028cf05f,
+ 0x7000000d,
+ 0x018cf05f,
+ 0x70000004,
+ 0x01ccf04d,
+ 0x10000b90,
+ 0x60000004,
+ 0x020cf0a1,
+ 0xc3188312,
+ 0x83c70000,
+ 0x00001f10,
+ 0xc583a3c3,
+ 0x81042325,
+ 0x03e0c798,
+ 0xd8990000,
+ 0x9999e000,
+ 0x000acf98,
+ 0xd8992306,
+ 0x9999e03f,
+ 0x03eac798,
+ 0xd8990000,
+ 0x9999e000,
+ 0x000acf98,
+ 0xd8990000,
+ 0x99832302,
+ 0x0beac798,
+ 0x81988000,
+ 0x6000000b,
+ 0x0c4cfc5f,
+ 0x81c80000,
+ 0xc5190312,
+ 0x80198000,
+ 0x00008400,
+ 0x00000f08,
+ 0x81988000,
+ 0x10000788,
+ 0x6000000a,
+ 0x080cf05f,
+ 0xc2988309,
+ 0x80190000,
+ 0x040001f8,
+ 0x000001f8,
+};
+
+#ifdef MCD_INCLUDE_EU
+MCD_bufDesc MCD_singleBufDescs[NCHANNELS];
+#endif
--- /dev/null
+/*
+ * drivers/dma/MCD_tasksInit.c
+ *
+ * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Kurt Mahan <kmahan@freescale.com>
+ * Shrek Wu b16972@freescale.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+/*
+ * Autogenerated - Do not edit!
+ */
+
+#include "MCD_dma.h"
+
+extern dmaRegs *MCD_dmaBar;
+
+
+/*
+ * Task 0
+ */
+
+void MCD_startDmaChainNoEu(int *currBD, short srcIncr,
+ short destIncr, int xferSize, short xferSizeIncr,
+ int *cSave, volatile TaskTableEntry *taskTable,
+ int channel)
+{
+
+ MCD_SET_VAR(taskTable+channel, 2, (u32)currBD); /* var[2] */
+ MCD_SET_VAR(taskTable+channel, 25,
+ (u32)(0xe000 << 16) | (0xffff & srcIncr));
+ /* inc[1] */
+ MCD_SET_VAR(taskTable+channel, 24,
+ (u32)(0xe000 << 16) | (0xffff & destIncr));
+ /* inc[0] */
+ MCD_SET_VAR(taskTable+channel, 11, (u32)xferSize); /* var[11] */
+ MCD_SET_VAR(taskTable+channel, 26,
+ (u32)(0x2000 << 16) | (0xffff & xferSizeIncr));
+ /* inc[2] */
+ MCD_SET_VAR(taskTable+channel, 0, (u32)cSave); /* var[0] */
+ MCD_SET_VAR(taskTable+channel, 1, (u32)0x00000000); /* var[1] */
+ MCD_SET_VAR(taskTable+channel, 3, (u32)0x00000000); /* var[3] */
+ MCD_SET_VAR(taskTable+channel, 4, (u32)0x00000000); /* var[4] */
+ MCD_SET_VAR(taskTable+channel, 5, (u32)0x00000000); /* var[5] */
+ MCD_SET_VAR(taskTable+channel, 6, (u32)0x00000000); /* var[6] */
+ MCD_SET_VAR(taskTable+channel, 7, (u32)0x00000000); /* var[7] */
+ MCD_SET_VAR(taskTable+channel, 8, (u32)0x00000000); /* var[8] */
+ MCD_SET_VAR(taskTable+channel, 9, (u32)0x00000000); /* var[9] */
+ MCD_SET_VAR(taskTable+channel, 10, (u32)0x00000000); /* var[10] */
+ MCD_SET_VAR(taskTable+channel, 12, (u32)0x00000000); /* var[12] */
+ MCD_SET_VAR(taskTable+channel, 13, (u32)0x80000000); /* var[13] */
+ MCD_SET_VAR(taskTable+channel, 14, (u32)0x00000010); /* var[14] */
+ MCD_SET_VAR(taskTable+channel, 15, (u32)0x00000004); /* var[15] */
+ MCD_SET_VAR(taskTable+channel, 16, (u32)0x08000000); /* var[16] */
+ MCD_SET_VAR(taskTable+channel, 27, (u32)0x00000000); /* inc[3] */
+ MCD_SET_VAR(taskTable+channel, 28, (u32)0x80000000); /* inc[4] */
+ MCD_SET_VAR(taskTable+channel, 29, (u32)0x80000001); /* inc[5] */
+ MCD_SET_VAR(taskTable+channel, 30, (u32)0x40000000); /* inc[6] */
+
+ /* Set the task's Enable bit in its Task Control Register */
+ MCD_dmaBar->taskControl[channel] |= (u16)0x8000;
+}
+
+
+/*
+ * Task 1
+ */
+
+void MCD_startDmaSingleNoEu(char *srcAddr, short srcIncr,
+ char *destAddr, short destIncr, int dmaSize,
+ short xferSizeIncr, int flags, int *currBD, int *cSave,
+ volatile TaskTableEntry *taskTable, int channel)
+{
+
+ MCD_SET_VAR(taskTable+channel, 7, (u32)srcAddr); /* var[7] */
+ MCD_SET_VAR(taskTable+channel, 25,
+ (u32)(0xe000 << 16) | (0xffff & srcIncr));
+ /* inc[1] */
+ MCD_SET_VAR(taskTable+channel, 2, (u32)destAddr); /* var[2] */
+ MCD_SET_VAR(taskTable+channel, 24,
+ (u32)(0xe000 << 16) | (0xffff & destIncr));
+ /* inc[0] */
+ MCD_SET_VAR(taskTable+channel, 3, (u32)dmaSize); /* var[3] */
+ MCD_SET_VAR(taskTable+channel, 26,
+ (u32)(0x2000 << 16) | (0xffff & xferSizeIncr));
+ /* inc[2] */
+ MCD_SET_VAR(taskTable+channel, 5, (u32)flags); /* var[5] */
+ MCD_SET_VAR(taskTable+channel, 1, (u32)currBD); /* var[1] */
+ MCD_SET_VAR(taskTable+channel, 0, (u32)cSave); /* var[0] */
+ MCD_SET_VAR(taskTable+channel, 4, (u32)0x00000000); /* var[4] */
+ MCD_SET_VAR(taskTable+channel, 6, (u32)0x00000000); /* var[6] */
+ MCD_SET_VAR(taskTable+channel, 8, (u32)0x00000000); /* var[8] */
+ MCD_SET_VAR(taskTable+channel, 9, (u32)0x00000004); /* var[9] */
+ MCD_SET_VAR(taskTable+channel, 10, (u32)0x08000000); /* var[10] */
+ MCD_SET_VAR(taskTable+channel, 27, (u32)0x00000000); /* inc[3] */
+ MCD_SET_VAR(taskTable+channel, 28, (u32)0x80000001); /* inc[4] */
+ MCD_SET_VAR(taskTable+channel, 29, (u32)0x40000000); /* inc[5] */
+
+ /* Set the task's Enable bit in its Task Control Register */
+ MCD_dmaBar->taskControl[channel] |= (u16)0x8000;
+}
+
+
+/*
+ * Task 2
+ */
+
+void MCD_startDmaChainEu(int *currBD, short srcIncr, short destIncr,
+ int xferSize, short xferSizeIncr, int *cSave,
+ volatile TaskTableEntry *taskTable, int channel)
+{
+
+ MCD_SET_VAR(taskTable+channel, 3, (u32)currBD); /* var[3] */
+ MCD_SET_VAR(taskTable+channel, 25,
+ (u32)(0xe000 << 16) | (0xffff & srcIncr));
+ /* inc[1] */
+ MCD_SET_VAR(taskTable+channel, 24,
+ (u32)(0xe000 << 16) | (0xffff & destIncr));
+ /* inc[0] */
+ MCD_SET_VAR(taskTable+channel, 12, (u32)xferSize);
+ /* var[12] */
+ MCD_SET_VAR(taskTable+channel, 26,
+ (u32)(0x2000 << 16) | (0xffff & xferSizeIncr));
+ /* inc[2] */
+ MCD_SET_VAR(taskTable+channel, 0, (u32)cSave); /* var[0] */
+ MCD_SET_VAR(taskTable+channel, 1, (u32)0x00000000); /* var[1] */
+ MCD_SET_VAR(taskTable+channel, 2, (u32)0x00000000); /* var[2] */
+ MCD_SET_VAR(taskTable+channel, 4, (u32)0x00000000); /* var[4] */
+ MCD_SET_VAR(taskTable+channel, 5, (u32)0x00000000); /* var[5] */
+ MCD_SET_VAR(taskTable+channel, 6, (u32)0x00000000); /* var[6] */
+ MCD_SET_VAR(taskTable+channel, 7, (u32)0x00000000); /* var[7] */
+ MCD_SET_VAR(taskTable+channel, 8, (u32)0x00000000); /* var[8] */
+ MCD_SET_VAR(taskTable+channel, 9, (u32)0x00000000); /* var[9] */
+ MCD_SET_VAR(taskTable+channel, 10, (u32)0x00000000); /* var[10] */
+ MCD_SET_VAR(taskTable+channel, 11, (u32)0x00000000); /* var[11] */
+ MCD_SET_VAR(taskTable+channel, 13, (u32)0x00000000); /* var[13] */
+ MCD_SET_VAR(taskTable+channel, 14, (u32)0x80000000); /* var[14] */
+ MCD_SET_VAR(taskTable+channel, 15, (u32)0x00000010); /* var[15] */
+ MCD_SET_VAR(taskTable+channel, 16, (u32)0x00000001); /* var[16] */
+ MCD_SET_VAR(taskTable+channel, 17, (u32)0x00000004); /* var[17] */
+ MCD_SET_VAR(taskTable+channel, 18, (u32)0x08000000); /* var[18] */
+ MCD_SET_VAR(taskTable+channel, 27, (u32)0x00000000); /* inc[3] */
+ MCD_SET_VAR(taskTable+channel, 28, (u32)0x80000000); /* inc[4] */
+ MCD_SET_VAR(taskTable+channel, 29, (u32)0xc0000000); /* inc[5] */
+ MCD_SET_VAR(taskTable+channel, 30, (u32)0x80000001); /* inc[6] */
+ MCD_SET_VAR(taskTable+channel, 31, (u32)0x40000000); /* inc[7] */
+
+ /* Set the task's Enable bit in its Task Control Register */
+ MCD_dmaBar->taskControl[channel] |= (u16)0x8000;
+}
+
+
+/*
+ * Task 3
+ */
+
+void MCD_startDmaSingleEu(char *srcAddr, short srcIncr,
+ char *destAddr, short destIncr, int dmaSize,
+ short xferSizeIncr, int flags, int *currBD, int *cSave,
+ volatile TaskTableEntry *taskTable, int channel)
+{
+
+ MCD_SET_VAR(taskTable+channel, 8, (u32)srcAddr); /* var[8] */
+ MCD_SET_VAR(taskTable+channel, 25,
+ (u32)(0xe000 << 16) | (0xffff & srcIncr)); /* inc[1] */
+ MCD_SET_VAR(taskTable+channel, 3, (u32)destAddr); /* var[3] */
+ MCD_SET_VAR(taskTable+channel, 24,
+ (u32)(0xe000 << 16) | (0xffff & destIncr)); /* inc[0] */
+ MCD_SET_VAR(taskTable+channel, 4, (u32)dmaSize); /* var[4] */
+ MCD_SET_VAR(taskTable+channel, 26,
+ (u32)(0x2000 << 16) | (0xffff & xferSizeIncr)); /* inc[2] */
+ MCD_SET_VAR(taskTable+channel, 6, (u32)flags); /* var[6] */
+ MCD_SET_VAR(taskTable+channel, 2, (u32)currBD); /* var[2] */
+ MCD_SET_VAR(taskTable+channel, 0, (u32)cSave); /* var[0] */
+ MCD_SET_VAR(taskTable+channel, 1, (u32)0x00000000); /* var[1] */
+ MCD_SET_VAR(taskTable+channel, 5, (u32)0x00000000); /* var[5] */
+ MCD_SET_VAR(taskTable+channel, 7, (u32)0x00000000); /* var[7] */
+ MCD_SET_VAR(taskTable+channel, 9, (u32)0x00000000); /* var[9] */
+ MCD_SET_VAR(taskTable+channel, 10, (u32)0x00000001); /* var[10] */
+ MCD_SET_VAR(taskTable+channel, 11, (u32)0x00000004); /* var[11] */
+ MCD_SET_VAR(taskTable+channel, 12, (u32)0x08000000); /* var[12] */
+ MCD_SET_VAR(taskTable+channel, 27, (u32)0x00000000); /* inc[3] */
+ MCD_SET_VAR(taskTable+channel, 28, (u32)0xc0000000); /* inc[4] */
+ MCD_SET_VAR(taskTable+channel, 29, (u32)0x80000000); /* inc[5] */
+ MCD_SET_VAR(taskTable+channel, 30, (u32)0x80000001); /* inc[6] */
+ MCD_SET_VAR(taskTable+channel, 31, (u32)0x40000000); /* inc[7] */
+
+ /* Set the task's Enable bit in its Task Control Register */
+ MCD_dmaBar->taskControl[channel] |= (u16)0x8000;
+}
+
+
+/*
+ * Task 4
+ */
+
+void MCD_startDmaENetRcv(char *bDBase, char *currBD, char *rcvFifoPtr,
+ volatile TaskTableEntry *taskTable, int channel)
+{
+
+ MCD_SET_VAR(taskTable+channel, 0, (u32)bDBase); /* var[0] */
+ MCD_SET_VAR(taskTable+channel, 3, (u32)currBD); /* var[3] */
+ MCD_SET_VAR(taskTable+channel, 6, (u32)rcvFifoPtr); /* var[6] */
+ MCD_SET_VAR(taskTable+channel, 1, (u32)0x00000000); /* var[1] */
+ MCD_SET_VAR(taskTable+channel, 2, (u32)0x00000000); /* var[2] */
+ MCD_SET_VAR(taskTable+channel, 4, (u32)0x00000000); /* var[4] */
+ MCD_SET_VAR(taskTable+channel, 5, (u32)0x00000000); /* var[5] */
+ MCD_SET_VAR(taskTable+channel, 7, (u32)0x00000000); /* var[7] */
+ MCD_SET_VAR(taskTable+channel, 8, (u32)0x00000000); /* var[8] */
+ MCD_SET_VAR(taskTable+channel, 9, (u32)0x0000ffff); /* var[9] */
+ MCD_SET_VAR(taskTable+channel, 10, (u32)0x30000000); /* var[10] */
+ MCD_SET_VAR(taskTable+channel, 11, (u32)0x0fffffff); /* var[11] */
+ MCD_SET_VAR(taskTable+channel, 12, (u32)0x00000008); /* var[12] */
+ MCD_SET_VAR(taskTable+channel, 24, (u32)0x00000000); /* inc[0] */
+ MCD_SET_VAR(taskTable+channel, 25, (u32)0x60000000); /* inc[1] */
+ MCD_SET_VAR(taskTable+channel, 26, (u32)0x20000004); /* inc[2] */
+ MCD_SET_VAR(taskTable+channel, 27, (u32)0x40000000); /* inc[3] */
+
+ /* Set the task's Enable bit in its Task Control Register */
+ MCD_dmaBar->taskControl[channel] |= (u16)0x8000;
+}
+
+
+/*
+ * Task 5
+ */
+
+void MCD_startDmaENetXmit(char *bDBase, char *currBD,
+ char *xmitFifoPtr, volatile TaskTableEntry *taskTable,
+ int channel)
+{
+
+ MCD_SET_VAR(taskTable+channel, 0, (u32)bDBase); /* var[0] */
+ MCD_SET_VAR(taskTable+channel, 3, (u32)currBD); /* var[3] */
+ MCD_SET_VAR(taskTable+channel, 11, (u32)xmitFifoPtr); /* var[11] */
+ MCD_SET_VAR(taskTable+channel, 1, (u32)0x00000000); /* var[1] */
+ MCD_SET_VAR(taskTable+channel, 2, (u32)0x00000000); /* var[2] */
+ MCD_SET_VAR(taskTable+channel, 4, (u32)0x00000000); /* var[4] */
+ MCD_SET_VAR(taskTable+channel, 5, (u32)0x00000000); /* var[5] */
+ MCD_SET_VAR(taskTable+channel, 6, (u32)0x00000000); /* var[6] */
+ MCD_SET_VAR(taskTable+channel, 7, (u32)0x00000000); /* var[7] */
+ MCD_SET_VAR(taskTable+channel, 8, (u32)0x00000000); /* var[8] */
+ MCD_SET_VAR(taskTable+channel, 9, (u32)0x00000000); /* var[9] */
+ MCD_SET_VAR(taskTable+channel, 10, (u32)0x00000000); /* var[10] */
+ MCD_SET_VAR(taskTable+channel, 12, (u32)0x00000000); /* var[12] */
+ MCD_SET_VAR(taskTable+channel, 13, (u32)0x0000ffff); /* var[13] */
+ MCD_SET_VAR(taskTable+channel, 14, (u32)0xffffffff); /* var[14] */
+ MCD_SET_VAR(taskTable+channel, 15, (u32)0x00000004); /* var[15] */
+ MCD_SET_VAR(taskTable+channel, 16, (u32)0x00000008); /* var[16] */
+ MCD_SET_VAR(taskTable+channel, 24, (u32)0x00000000); /* inc[0] */
+ MCD_SET_VAR(taskTable+channel, 25, (u32)0x60000000); /* inc[1] */
+ MCD_SET_VAR(taskTable+channel, 26, (u32)0x40000000); /* inc[2] */
+ MCD_SET_VAR(taskTable+channel, 27, (u32)0xc000fffc); /* inc[3] */
+ MCD_SET_VAR(taskTable+channel, 28, (u32)0xe0000004); /* inc[4] */
+ MCD_SET_VAR(taskTable+channel, 29, (u32)0x80000000); /* inc[5] */
+ MCD_SET_VAR(taskTable+channel, 30, (u32)0x4000ffff); /* inc[6] */
+ MCD_SET_VAR(taskTable+channel, 31, (u32)0xe0000001); /* inc[7] */
+
+ /* Set the task's Enable bit in its Task Control Register */
+ MCD_dmaBar->taskControl[channel] |= (u16)0x8000;
+}
--- /dev/null
+/*
+ * drivers/dma/MCD_tasksInit.h
+ *
+ * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Kurt Mahan <kmahan@freescale.com>
+ * Shrek Wu b16972@freescale.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+#ifndef MCD_TSK_INIT_H
+#define MCD_TSK_INIT_H 1
+
+/*
+ * Autogenerated - Do not edit!
+ */
+
+/*
+ * Task 0
+ */
+void MCD_startDmaChainNoEu(int *currBD, short srcIncr,
+ short destIncr, int xferSize,
+ short xferSizeIncr, int *cSave,
+ volatile TaskTableEntry *taskTable,
+ int channel);
+
+
+/*
+ * Task 1
+ */
+void MCD_startDmaSingleNoEu(char *srcAddr, short srcIncr,
+ char *destAddr, short destIncr, int dmaSize,
+ short xferSizeIncr, int flags, int *currBD,
+ int *cSave, volatile TaskTableEntry *taskTable,
+ int channel);
+
+
+/*
+ * Task 2
+ */
+void MCD_startDmaChainEu(int *currBD, short srcIncr, short destIncr,
+ int xferSize, short xferSizeIncr, int *cSave,
+ volatile TaskTableEntry *taskTable,
+ int channel);
+
+
+/*
+ * Task 3
+ */
+void MCD_startDmaSingleEu(char *srcAddr, short srcIncr,
+ char *destAddr, short destIncr, int dmaSize,
+ short xferSizeIncr, int flags, int *currBD,
+ int *cSave, volatile TaskTableEntry *taskTable,
+ int channel);
+
+
+/*
+ * Task 4
+ */
+void MCD_startDmaENetRcv(char *bDBase, char *currBD,
+ char *rcvFifoPtr,
+ volatile TaskTableEntry *taskTable, int channel);
+
+
+/*
+ * Task 5
+ */
+void MCD_startDmaENetXmit(char *bDBase, char *currBD,
+ char *xmitFifoPtr,
+ volatile TaskTableEntry *taskTable, int channel);
+
+#endif /* MCD_TSK_INIT_H */
--- /dev/null
+#
+# Makefile for Linux arch/m68k/coldfire/m547x source directory
+#
+
+obj-$(CONFIG_M547X_8X) += config.o mcf548x-devices.o devices.o
+obj-$(CONFIG_PCI) += pci.o
+obj-$(CONFIG_MCD_DMA) += dma.o MCD_tasksInit.o MCD_dmaApi.o MCD_tasks.o
--- /dev/null
+/*
+ * linux/arch/m68k/coldfire/config.c
+ *
+ * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Kurt Mahan kmahan@freescale.com
+ * Matt Waddel Matt.Waddel@freescale.com
+ * Shrek Wu b16972@freescale.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/console.h>
+#include <linux/bootmem.h>
+#include <linux/mm.h>
+#include <linux/clockchips.h>
+#include <asm/bootinfo.h>
+#include <asm/machdep.h>
+#include <asm/coldfire.h>
+#include <asm/cfcache.h>
+#include <asm/cacheflush.h>
+#include <linux/io.h>
+#include <asm/cfmmu.h>
+#include <asm/setup.h>
+#include <asm/irq.h>
+#include <asm/traps.h>
+#include <asm/movs.h>
+#include <asm/movs.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+
+#include <asm/mcfsim.h>
+
+#define UBOOT_PCI
+#include <asm/bootinfo.h>
+#include <asm/m5485gpt.h>
+
+extern int get_irq_list(struct seq_file *p, void *v);
+extern char _text, _end;
+extern char _etext, _edata, __init_begin, __init_end;
+extern struct console mcfrs_console;
+extern char m68k_command_line[CL_SIZE];
+extern unsigned long availmem;
+
+static int irq_enable[NR_IRQS];
+unsigned long num_pages;
+
+/* cf dma physical addresses */
+unsigned long cf_dma_base;
+unsigned long cf_dma_end;
+unsigned long cf_dma_size;
+EXPORT_SYMBOL(cf_dma_base);
+EXPORT_SYMBOL(cf_dma_end);
+EXPORT_SYMBOL(cf_dma_size);
+
+/* ethernet mac addresses from uboot */
+unsigned char uboot_enet0[6];
+unsigned char uboot_enet1[6];
+
+void coldfire_sort_memrec(void)
+{
+ int i, j;
+
+ /* Sort the m68k_memory records by address */
+ for (i = 0; i < m68k_num_memory; ++i) {
+ for (j = i + 1; j < m68k_num_memory; ++j) {
+ if (m68k_memory[i].addr > m68k_memory[j].addr) {
+ struct mem_info tmp;
+ tmp = m68k_memory[i];
+ m68k_memory[i] = m68k_memory[j];
+ m68k_memory[j] = tmp;
+ }
+ }
+ }
+ /* Trim off discontiguous bits */
+ for (i = 1; i < m68k_num_memory; ++i) {
+ if ((m68k_memory[i-1].addr + m68k_memory[i-1].size) !=
+ m68k_memory[i].addr) {
+ printk(KERN_DEBUG "m68k_parse_bootinfo: "
+ "addr gap between 0x%lx & 0x%lx\n",
+ m68k_memory[i-1].addr+m68k_memory[i-1].size,
+ m68k_memory[i].addr);
+ m68k_num_memory = i;
+ break;
+ }
+ }
+}
+
+/*
+ * UBoot Handler
+ */
+int __init uboot_commandline(char *bootargs)
+{
+ int len = 0, cmd_line_len;
+ static struct uboot_record uboot_info;
+ u32 offset = PAGE_OFFSET_RAW - PHYS_OFFSET;
+
+ extern unsigned long uboot_info_stk;
+
+ /* validate address */
+ if ((uboot_info_stk < PAGE_OFFSET_RAW) ||
+ (uboot_info_stk >= (PAGE_OFFSET_RAW + CONFIG_SDRAM_SIZE)))
+ return 0;
+
+ /* Add offset to get post-remapped kernel memory location */
+ uboot_info.bdi = (struct bd_info *)((*(u32 *)(uboot_info_stk))
+ + offset);
+ uboot_info.initrd_start = (*(u32 *)(uboot_info_stk+4)) + offset;
+ uboot_info.initrd_end = (*(u32 *)(uboot_info_stk+8)) + offset;
+ uboot_info.cmd_line_start = (*(u32 *)(uboot_info_stk+12)) + offset;
+ uboot_info.cmd_line_stop = (*(u32 *)(uboot_info_stk+16)) + offset;
+
+ /* copy over mac addresses */
+ memcpy(uboot_enet0, uboot_info.bdi->bi_enet0addr, 6);
+ memcpy(uboot_enet1, uboot_info.bdi->bi_enet1addr, 6);
+
+ /* copy command line */
+ cmd_line_len = uboot_info.cmd_line_stop - uboot_info.cmd_line_start;
+ if ((cmd_line_len > 0) && (cmd_line_len < CL_SIZE-1))
+ len = (int)strncpy(bootargs, (char *)uboot_info.cmd_line_start,\
+ cmd_line_len);
+
+ return len;
+}
+
+/*
+ * This routine does things not done in the bootloader.
+ */
+#define DEFAULT_COMMAND_LINE \
+ "debug root=/dev/nfs rw \
+ nfsroot=172.27.155.1:/tftpboot/rigo/rootfs/ \
+ ip=172.27.155.75:172.27.155.1"
+
+asmlinkage void __init cf_early_init(void)
+{
+ struct bi_record *record = (struct bi_record *) &_end;
+
+ extern char _end;
+
+ SET_VBR((void *)MCF_RAMBAR0);
+
+ /* Mask all interrupts */
+ MCF_IMRL = 0xFFFFFFFF;
+ MCF_IMRH = 0xFFFFFFFF;
+
+ m68k_machtype = MACH_CFMMU;
+ m68k_fputype = FPU_CFV4E;
+ m68k_mmutype = MMU_CFV4E;
+ m68k_cputype = CPU_CFV4E;
+
+ m68k_num_memory = 0;
+ m68k_memory[m68k_num_memory].addr = CONFIG_SDRAM_BASE;
+ m68k_memory[m68k_num_memory++].size = CONFIG_SDRAM_SIZE;
+
+ if (!uboot_commandline(m68k_command_line)) {
+#if defined(CONFIG_BOOTPARAM)
+ strncpy(m68k_command_line, CONFIG_BOOTPARAM_STRING, CL_SIZE-1);
+#else
+ strcpy(m68k_command_line, DEFAULT_COMMAND_LINE);
+#endif
+ }
+
+#if defined(CONFIG_BLK_DEV_INITRD)
+ /* add initrd image */
+ record = (struct bi_record *) ((void *)record + record->size);
+ record->tag = BI_RAMDISK;
+ record->size = sizeof(record->tag) + sizeof(record->size)
+ + sizeof(record->data[0]) + sizeof(record->data[1]);
+#endif
+
+ /* Mark end of tags. */
+ record = (struct bi_record *) ((void *) record + record->size);
+ record->tag = 0;
+ record->data[0] = 0;
+ record->data[1] = 0;
+ record->size = sizeof(record->tag) + sizeof(record->size)
+ + sizeof(record->data[0]) + sizeof(record->data[1]);
+
+ /* Invalidate caches via CACR */
+ flush_bcache();
+ cacr_set(CACHE_DISABLE_MODE);
+
+ /* Turn on caches via CACR, enable EUSP */
+ cacr_set(CACHE_INITIAL_MODE);
+
+}
+
+/* Assembler routines */
+asmlinkage void buserr(void);
+asmlinkage void trap(void);
+asmlinkage void system_call(void);
+asmlinkage void inthandler(void);
+
+void __init coldfire_trap_init(void)
+{
+ int i = 0;
+ e_vector *vectors;
+
+ vectors = (e_vector *)MCF_RAMBAR0;
+ /*
+ * There is a common trap handler and common interrupt
+ * handler that handle almost every vector. We treat
+ * the system call and bus error special, they get their
+ * own first level handlers.
+ */
+ for (i = 3; (i <= 23); i++)
+ vectors[i] = trap;
+ for (i = 33; (i <= 63); i++)
+ vectors[i] = trap;
+ for (i = 24; (i <= 31); i++)
+ vectors[i] = inthandler;
+ for (i = 64; (i < 255); i++)
+ vectors[i] = inthandler;
+
+ vectors[255] = 0;
+ vectors[2] = buserr;
+ vectors[32] = system_call;
+}
+
+#ifndef CONFIG_GENERIC_CLOCKEVENTS
+void coldfire_tick(void)
+{
+ /* Reset the ColdFire timer */
+ MCF_SSR(0) = MCF_SSR_ST;
+}
+
+void __init coldfire_sched_init(irq_handler_t handler)
+{
+ int irq = ISC_SLTn(0);
+
+ MCF_SCR(0) = 0;
+ MCF_ICR(irq) = ILP_SLT0;
+ request_irq(64 + irq, handler, IRQF_DISABLED, "ColdFire Timer 0", NULL);
+ MCF_SLTCNT(0) = MCF_BUSCLK / HZ;
+ MCF_SCR(0) |= MCF_SCR_TEN | MCF_SCR_IEN | MCF_SCR_RUN;
+}
+
+unsigned long coldfire_gettimeoffset(void)
+{
+ volatile unsigned long trr, tcn, offset;
+ trr = MCF_SLTCNT(0);
+ tcn = MCF_SCNT(0);
+
+ offset = (trr - tcn) * ((1000000 >> 3) / HZ) / (trr >> 3);
+ if (MCF_SSR(0) & MCF_SSR_ST)
+ offset += 1000000 / HZ;
+
+ return offset;
+}
+#else
+static unsigned long long sched_dtim_clk_val;
+
+unsigned long long sched_clock(void)
+{
+ unsigned long flags;
+ unsigned long long cycles;
+ volatile unsigned long trr, tcn, offset;
+
+ local_irq_save(flags);
+ trr = MCF_SLTCNT(0);
+ tcn = MCF_SCNT(0);
+ offset = (trr - tcn);
+ cycles = sched_dtim_clk_val;
+ local_irq_restore(flags);
+
+ return cycles + offset;
+}
+
+unsigned long long sys_dtim2_read(void)
+{
+ unsigned long flags;
+ unsigned long long cycles;
+ volatile unsigned long trr, tcn, offset;
+
+ local_irq_save(flags);
+ trr = MCF_SLTCNT(0);
+ tcn = MCF_SCNT(0);
+ offset = (trr - tcn);
+ cycles = sched_dtim_clk_val;
+ local_irq_restore(flags);
+
+ return cycles + offset;
+}
+
+static irqreturn_t coldfire_dtim_clk_irq(int irq, void *dev)
+{
+ struct clock_event_device *evt =
+ (struct clock_event_device *)dev;
+
+ MCF_SSR(0) = MCF_SSR_ST;
+ sched_dtim_clk_val += (MCF_BUSCLK) / HZ;;
+ evt->event_handler(evt);
+ return IRQ_HANDLED;
+}
+
+void sys_dtim2_init(struct clock_event_device *evt)
+{
+ int irq = ISC_SLTn(0);
+
+ sched_dtim_clk_val = 0;
+ MCF_SCR(0) = 0;
+ MCF_ICR(irq) = ILP_SLT0;
+ request_irq(64 + irq, coldfire_dtim_clk_irq, IRQF_DISABLED,
+ "ColdFire Timer 0", (void *)evt);
+ MCF_SLTCNT(0) = MCF_BUSCLK / HZ;
+ MCF_SCR(0) |= MCF_SCR_TEN | MCF_SCR_IEN | MCF_SCR_RUN;
+}
+#endif
+
+void coldfire_reboot(void)
+{
+ /* disable interrupts and enable the watchdog */
+ printk(KERN_INFO "Rebooting\n");
+ asm("movew #0x2700, %sr\n");
+ MCF_GPT_GMS0 = MCF_GPT_GMS_WDEN | MCF_GPT_GMS_CE | MCF_GPT_GMS_TMS(4);
+}
+
+static void coldfire_get_model(char *model)
+{
+ sprintf(model, "Version 4 ColdFire");
+}
+
+static void __init
+coldfire_bootmem_alloc(unsigned long memory_start, unsigned long memory_end)
+{
+ unsigned long base_pfn;
+
+ /* compute total pages in system */
+ num_pages = PAGE_ALIGN(memory_end - PAGE_OFFSET) >> PAGE_SHIFT;
+
+ /* align start/end to page boundries */
+ memory_start = PAGE_ALIGN(memory_start);
+ memory_end = memory_end & PAGE_MASK;
+
+ /* page numbers */
+ base_pfn = __pa(PAGE_OFFSET) >> PAGE_SHIFT;
+ min_low_pfn = __pa(memory_start) >> PAGE_SHIFT;
+ max_low_pfn = __pa(memory_end) >> PAGE_SHIFT;
+
+ high_memory = (void *)memory_end;
+ availmem = memory_start;
+
+ /* setup bootmem data */
+ m68k_setup_node(0);
+ availmem += init_bootmem_node(NODE_DATA(0), min_low_pfn,
+ base_pfn, max_low_pfn);
+ availmem = PAGE_ALIGN(availmem);
+
+ printk(KERN_INFO "** availmem=0x%lx pa(am)=0x%lx\n",
+ availmem, __pa(availmem));
+ printk(KERN_INFO "** mstart=0x%lx mend=0x%lx\n",
+ memory_start, memory_end);
+ printk(KERN_INFO "bpfn=0x%lx minpfn=0x%lx maxpfn=0x%lx\n",
+ base_pfn, min_low_pfn, max_low_pfn);
+
+ /* turn over physram */
+ free_bootmem(__pa(availmem), memory_end - (availmem));
+
+ /* configure physical dma area */
+ cf_dma_base = __pa(PAGE_ALIGN(memory_start));
+ cf_dma_size = CONFIG_DMA_SIZE;
+ cf_dma_end = CONFIG_SDRAM_BASE + cf_dma_size - 1;
+
+ printk(KERN_INFO "dma: phys base=0x%lx phys end=0x%lx virt base=0x%x\n",
+ cf_dma_base, cf_dma_end, CONFIG_DMA_BASE);
+
+ printk(KERN_INFO "mdma=0x%x pa(mdma)=0x%lx\n",
+ MAX_DMA_ADDRESS, __pa(MAX_DMA_ADDRESS));
+}
+
+void __init config_coldfire(void)
+{
+ unsigned long endmem, startmem;
+ int i;
+
+ /*
+ * Calculate endmem from m68k_memory, assume all are contiguous
+ */
+ startmem = ((((int) &_end) + (PAGE_SIZE - 1)) & PAGE_MASK);
+ endmem = PAGE_OFFSET;
+ for (i = 0; i < m68k_num_memory; ++i)
+ endmem += m68k_memory[i].size;
+
+ printk(KERN_INFO "starting up linux startmem 0x%lx, endmem 0x%lx, \
+ size %luMB\n", startmem, endmem, (endmem - startmem) >> 20);
+
+ memset(irq_enable, 0, sizeof(irq_enable));
+
+ /*
+ * Setup coldfire mach-specific handlers
+ */
+ mach_max_dma_address = 0xffffffff;
+#ifndef CONFIG_GENERIC_CLOCKEVENTS
+ mach_sched_init = coldfire_sched_init;
+ mach_tick = coldfire_tick;
+ mach_gettimeoffset = coldfire_gettimeoffset;
+#endif
+ mach_reset = coldfire_reboot;
+/* mach_hwclk = coldfire_hwclk; to be done */
+ mach_get_model = coldfire_get_model;
+
+ coldfire_bootmem_alloc(startmem, endmem-1);
+
+ /*
+ * initrd setup
+ */
+/* #ifdef CONFIG_BLK_DEV_INITRD
+ if (m68k_ramdisk.size) {
+ reserve_bootmem (__pa(m68k_ramdisk.addr), m68k_ramdisk.size);
+ initrd_start = (unsigned long) m68k_ramdisk.addr;
+ initrd_end = initrd_start + m68k_ramdisk.size;
+ printk (KERN_DEBUG "initrd: %08lx - %08lx\n", initrd_start,
+ initrd_end);
+ }
+#endif */
+
+#if defined(CONFIG_DUMMY_CONSOLE) || defined(CONFIG_FRAMEBUFFER_CONSOLE)
+ conswitchp = &dummy_con;
+#endif
+
+#if defined(CONFIG_SERIAL_COLDFIRE)
+ /*
+ * This causes trouble when it is re-registered later.
+ * Currently this is fixed by conditionally commenting
+ * out the register_console in mcf_serial.c
+ */
+ register_console(&mcfrs_console);
+#endif
+}
--- /dev/null
+/*
+ * arch/m68k/coldfire/m547x/devices.c
+ *
+ * Coldfire M547x/M548x Platform Device Configuration
+ *
+ * Copyright 2008-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * Kurt Mahan <kmahan@freescale.com>
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/fsl_devices.h>
+#include <linux/spi/spi.h>
+#include <linux/i2c.h>
+
+#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
+
+#ifdef CONFIG_SPI
+
+#include <asm/mcfqspi.h>
+/*
+ *
+ * DSPI
+ *
+ */
+
+/* number of supported SPI selects */
+#define SPI_NUM_CHIPSELECTS 8
+
+void coldfire_spi_cs_control(u8 cs, u8 command)
+{
+ /* nothing special required */
+}
+
+#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
+static struct coldfire_spi_chip spidev_chip_info = {
+ .bits_per_word = 8,
+};
+#endif
+
+static struct spi_board_info spi_board_info[] = {
+#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
+ {
+ .modalias = "spidev",
+ .max_speed_hz = 16000000, /* max clk (SCK) speed in HZ */
+ .bus_num = 1,
+ .chip_select = 0, /* CS0 */
+ .controller_data = &spidev_chip_info,
+ }
+#endif
+};
+
+static int spi_irq_list[] = {
+ /* IRQ, ICR Offset, ICR Val,Mask */
+ 64 + ISC_DSPI_OVRFW, ISC_DSPI_OVRFW, 0x18, 0,
+ 64 + ISC_DSPI_RFOF, ISC_DSPI_RFOF, 0x18, 0,
+ 64 + ISC_DSPI_RFDF, ISC_DSPI_RFDF, 0x18, 0,
+ 64 + ISC_DSPI_TFUF, ISC_DSPI_TFUF, 0x18, 0,
+ 64 + ISC_DSPI_TCF, ISC_DSPI_TCF, 0x18, 0,
+ 64 + ISC_DSPI_TFFF, ISC_DSPI_TFFF, 0x18, 0,
+ 64 + ISC_DSPI_EOQF, ISC_DSPI_EOQF, 0x18, 0,
+ 0,0,0,0,
+};
+
+static struct coldfire_spi_master coldfire_master_info = {
+ .bus_num = 1,
+ .num_chipselect = SPI_NUM_CHIPSELECTS,
+ .irq_list = spi_irq_list,
+ .irq_source = 0, /* not used */
+ .irq_vector = 0, /* not used */
+ .irq_mask = 0, /* not used */
+ .irq_lp = 0, /* not used */
+ .par_val = 0, /* not used */
+ .cs_control = coldfire_spi_cs_control,
+};
+
+static struct resource coldfire_spi_resources[] = {
+ [0] = {
+ .name = "spi-par",
+ .start = MCF_MBAR + 0x00000a50, /* PAR_DSPI */
+ .end = MCF_MBAR + 0x00000a50, /* PAR_DSPI */
+ .flags = IORESOURCE_MEM
+ },
+
+ [1] = {
+ .name = "spi-module",
+ .start = MCF_MBAR + 0x00008a00, /* DSPI MCR Base */
+ .end = MCF_MBAR + 0x00008ab8, /* DSPI mem map end */
+ .flags = IORESOURCE_MEM
+ },
+
+ [2] = {
+ .name = "spi-int-level",
+ .start = MCF_MBAR + 0x740, /* ICR start */
+ .end = MCF_MBAR + 0x740 + ISC_DSPI_EOQF, /* ICR end */
+ .flags = IORESOURCE_MEM
+ },
+
+ [3] = {
+ .name = "spi-int-mask",
+ .start = MCF_MBAR + 0x70c, /* IMRL */
+ .end = MCF_MBAR + 0x70c, /* IMRL */
+ .flags = IORESOURCE_MEM
+ }
+};
+
+static struct platform_device coldfire_spi = {
+ .name = "spi_coldfire",
+ .id = -1,
+ .resource = coldfire_spi_resources,
+ .num_resources = ARRAY_SIZE(coldfire_spi_resources),
+ .dev = {
+ .platform_data = &coldfire_master_info,
+ }
+};
+
+/**
+ * m547x_8x_spi_init - Initialize SPI
+ */
+static int __init m547x_8x_spi_init(void)
+{
+ int retval;
+
+ /* initialize the DSPI PAR */
+ MCF_GPIO_PAR_DSPI = (MCF_GPIO_PAR_DSPI_PAR_CS5 |
+ MCF_GPIO_PAR_DSPI_PAR_CS3_DSPICS |
+ MCF_GPIO_PAR_DSPI_PAR_CS2_DSPICS |
+ MCF_GPIO_PAR_DSPI_PAR_CS0_DSPICS |
+ MCF_GPIO_PAR_DSPI_PAR_SCK_SCK |
+ MCF_GPIO_PAR_DSPI_PAR_SIN_SIN |
+ MCF_GPIO_PAR_DSPI_PAR_SOUT_SOUT);
+
+ /* register device */
+ retval = platform_device_register(&coldfire_spi);
+ if (retval < 0) {
+ goto out;
+ }
+
+ /* register board info */
+ if (ARRAY_SIZE(spi_board_info))
+ retval = spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info));
+
+out:
+ return retval;
+}
+#endif
+
+#ifdef CONFIG_I2C_BOARDINFO
+static struct i2c_board_info mcf_i2c_devices[] = {
+ {
+ I2C_BOARD_INFO("rv5c387a", 0x32),
+ },
+};
+#endif
+
+/**
+ * m547x_8x_init_devices - Initialize M547X_8X devices
+ *
+ * Returns 0 on success.
+ */
+static int __init m547x_8x_init_devices(void)
+{
+#ifdef CONFIG_SPI
+ m547x_8x_spi_init();
+#endif
+#ifdef CONFIG_I2C_BOARDINFO
+ i2c_register_board_info(0, mcf_i2c_devices,
+ ARRAY_SIZE(mcf_i2c_devices));
+#endif
+
+ return 0;
+}
+arch_initcall(m547x_8x_init_devices);
--- /dev/null
+/*
+ * arch/m68k/coldfire/m547x/dma.c
+ *
+ * Coldfire M547x/M548x DMA
+ *
+ * Copyright 2008-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Kurt Mahan <kmahan@freescale.com>
+ * Shrek Wu b16972@freescale.com
+ *
+ * This code is based on patches from the Freescale M547x_8x BSP
+ * release mcf547x_8x-20070107-ltib.iso
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/dma.h>
+#include <asm/coldfire.h>
+#include <asm/m5485sram.h>
+#include <asm/mcfsim.h>
+#include <asm/MCD_dma.h>
+
+/*
+ * This global keeps track of which initiators have been
+ * used of the available assignments. Initiators 0-15 are
+ * hardwired. Initiators 16-31 are multiplexed and controlled
+ * via the Initiatior Mux Control Registe (IMCR). The
+ * assigned requestor is stored with the associated initiator
+ * number.
+ */
+static int used_reqs[32] = {
+ DMA_ALWAYS, DMA_DSPI_RX, DMA_DSPI_TX, DMA_DREQ0,
+ DMA_PSC0_RX, DMA_PSC0_TX, DMA_USBEP0, DMA_USBEP1,
+ DMA_USBEP2, DMA_USBEP3, DMA_PCI_TX, DMA_PCI_RX,
+ DMA_PSC1_RX, DMA_PSC1_TX, DMA_I2C_RX, DMA_I2C_TX,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0
+};
+
+/*
+ * This global keeps track of which channels have been assigned
+ * to tasks. This methology assumes that no single initiator
+ * will be tied to more than one task/channel
+ */
+static char used_channel[16] = {
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1
+};
+
+unsigned int connected_channel[16] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+/**
+ * dma_set_initiator - enable initiator
+ * @initiator: initiator identifier
+ *
+ * Returns 0 of successful, non-zero otherwise
+ *
+ * Attempt to enable the provided Initiator in the Initiator
+ * Mux Control Register.
+ */
+int dma_set_initiator(int initiator)
+{
+ switch (initiator) {
+ case DMA_ALWAYS:
+ case DMA_DSPI_RX:
+ case DMA_DSPI_TX:
+ case DMA_DREQ0:
+ case DMA_PSC0_RX:
+ case DMA_PSC0_TX:
+ case DMA_USBEP0:
+ case DMA_USBEP1:
+ case DMA_USBEP2:
+ case DMA_USBEP3:
+ case DMA_PCI_TX:
+ case DMA_PCI_RX:
+ case DMA_PSC1_RX:
+ case DMA_PSC1_TX:
+ case DMA_I2C_RX:
+ case DMA_I2C_TX:
+ /*
+ * These initiators are always active
+ */
+ break;
+
+ case DMA_FEC0_RX:
+ MCF_DMA_IMCR = (MCF_DMA_IMCR & ~MCF_DMA_IMCR_SRC16(3))
+ | MCF_DMA_IMCR_SRC16_FEC0RX;
+ used_reqs[16] = DMA_FEC0_RX;
+ break;
+
+ case DMA_FEC0_TX:
+ MCF_DMA_IMCR = (MCF_DMA_IMCR & ~MCF_DMA_IMCR_SRC17(3))
+ | MCF_DMA_IMCR_SRC17_FEC0TX;
+ used_reqs[17] = DMA_FEC0_TX;
+ break;
+
+ case DMA_FEC1_RX:
+ MCF_DMA_IMCR = (MCF_DMA_IMCR & ~MCF_DMA_IMCR_SRC20(3))
+ | MCF_DMA_IMCR_SRC20_FEC1RX;
+ used_reqs[20] = DMA_FEC1_RX;
+ break;
+
+ case DMA_FEC1_TX:
+ if (used_reqs[21] == 0) {
+ MCF_DMA_IMCR = (MCF_DMA_IMCR & ~MCF_DMA_IMCR_SRC21(3))
+ | MCF_DMA_IMCR_SRC21_FEC1TX;
+ used_reqs[21] = DMA_FEC1_TX;
+ } else if (used_reqs[25] == 0) {
+ MCF_DMA_IMCR = (MCF_DMA_IMCR & ~MCF_DMA_IMCR_SRC25(3))
+ | MCF_DMA_IMCR_SRC25_FEC1TX;
+ used_reqs[25] = DMA_FEC1_TX;
+ } else if (used_reqs[31] == 0) {
+ MCF_DMA_IMCR = (MCF_DMA_IMCR & ~MCF_DMA_IMCR_SRC31(3))
+ | MCF_DMA_IMCR_SRC31_FEC1TX;
+ used_reqs[31] = DMA_FEC1_TX;
+ } else /* No empty slots */
+ return 1;
+ break;
+
+ case DMA_DREQ1:
+ if (used_reqs[29] == 0) {
+ MCF_DMA_IMCR = (MCF_DMA_IMCR & ~MCF_DMA_IMCR_SRC29(3))
+ | MCF_DMA_IMCR_SRC29_DREQ1;
+ used_reqs[29] = DMA_DREQ1;
+ } else if (used_reqs[21] == 0) {
+ MCF_DMA_IMCR = (MCF_DMA_IMCR & ~MCF_DMA_IMCR_SRC21(3))
+ | MCF_DMA_IMCR_SRC21_DREQ1;
+ used_reqs[21] = DMA_DREQ1;
+ } else /* No empty slots */
+ return 1;
+ break;
+
+ case DMA_CTM0:
+ if (used_reqs[24] == 0) {
+ MCF_DMA_IMCR = (MCF_DMA_IMCR & ~MCF_DMA_IMCR_SRC24(3))
+ | MCF_DMA_IMCR_SRC24_CTM0;
+ used_reqs[24] = DMA_CTM0;
+ } else /* No empty slots */
+ return 1;
+ break;
+
+ case DMA_CTM1:
+ if (used_reqs[25] == 0) {
+ MCF_DMA_IMCR = (MCF_DMA_IMCR & ~MCF_DMA_IMCR_SRC25(3))
+ | MCF_DMA_IMCR_SRC25_CTM1;
+ used_reqs[25] = DMA_CTM1;
+ } else /* No empty slots */
+ return 1;
+ break;
+
+ case DMA_CTM2:
+ if (used_reqs[26] == 0) {
+ MCF_DMA_IMCR = (MCF_DMA_IMCR & ~MCF_DMA_IMCR_SRC26(3))
+ | MCF_DMA_IMCR_SRC26_CTM2;
+ used_reqs[26] = DMA_CTM2;
+ } else /* No empty slots */
+ return 1;
+ break;
+
+ case DMA_CTM3:
+ if (used_reqs[27] == 0) {
+ MCF_DMA_IMCR = (MCF_DMA_IMCR & ~MCF_DMA_IMCR_SRC27(3))
+ | MCF_DMA_IMCR_SRC27_CTM3;
+ used_reqs[27] = DMA_CTM3;
+ } else /* No empty slots */
+ return 1;
+ break;
+
+ case DMA_CTM4:
+ if (used_reqs[28] == 0) {
+ MCF_DMA_IMCR = (MCF_DMA_IMCR & ~MCF_DMA_IMCR_SRC28(3))
+ | MCF_DMA_IMCR_SRC28_CTM4;
+ used_reqs[28] = DMA_CTM4;
+ } else /* No empty slots */
+ return 1;
+ break;
+
+ case DMA_CTM5:
+ if (used_reqs[29] == 0) {
+ MCF_DMA_IMCR = (MCF_DMA_IMCR & ~MCF_DMA_IMCR_SRC29(3))
+ | MCF_DMA_IMCR_SRC29_CTM5;
+ used_reqs[29] = DMA_CTM5;
+ } else /* No empty slots */
+ return 1;
+ break;
+
+ case DMA_CTM6:
+ if (used_reqs[30] == 0) {
+ MCF_DMA_IMCR = (MCF_DMA_IMCR & ~MCF_DMA_IMCR_SRC30(3))
+ | MCF_DMA_IMCR_SRC30_CTM6;
+ used_reqs[30] = DMA_CTM6;
+ } else /* No empty slots */
+ return 1;
+ break;
+
+ case DMA_CTM7:
+ if (used_reqs[31] == 0) {
+ MCF_DMA_IMCR = (MCF_DMA_IMCR & ~MCF_DMA_IMCR_SRC31(3))
+ | MCF_DMA_IMCR_SRC31_CTM7;
+ used_reqs[31] = DMA_CTM7;
+ } else /* No empty slots */
+ return 1;
+ break;
+
+ case DMA_USBEP4:
+ if (used_reqs[26] == 0) {
+ MCF_DMA_IMCR = (MCF_DMA_IMCR & ~MCF_DMA_IMCR_SRC26(3))
+ | MCF_DMA_IMCR_SRC26_USBEP4;
+ used_reqs[26] = DMA_USBEP4;
+ } else /* No empty slots */
+ return 1;
+ break;
+
+ case DMA_USBEP5:
+ if (used_reqs[27] == 0) {
+ MCF_DMA_IMCR = (MCF_DMA_IMCR & ~MCF_DMA_IMCR_SRC27(3))
+ | MCF_DMA_IMCR_SRC27_USBEP5;
+ used_reqs[27] = DMA_USBEP5;
+ } else /* No empty slots */
+ return 1;
+ break;
+
+ case DMA_USBEP6:
+ if (used_reqs[28] == 0) {
+ MCF_DMA_IMCR = (MCF_DMA_IMCR & ~MCF_DMA_IMCR_SRC28(3))
+ | MCF_DMA_IMCR_SRC28_USBEP6;
+ used_reqs[28] = DMA_USBEP6;
+ } else /* No empty slots */
+ return 1;
+ break;
+
+ case DMA_PSC2_RX:
+ if (used_reqs[28] == 0) {
+ MCF_DMA_IMCR = (MCF_DMA_IMCR & ~MCF_DMA_IMCR_SRC28(3))
+ | MCF_DMA_IMCR_SRC28_PSC2RX;
+ used_reqs[28] = DMA_PSC2_RX;
+ } else /* No empty slots */
+ return 1;
+ break;
+
+ case DMA_PSC2_TX:
+ if (used_reqs[29] == 0) {
+ MCF_DMA_IMCR = (MCF_DMA_IMCR & ~MCF_DMA_IMCR_SRC29(3))
+ | MCF_DMA_IMCR_SRC29_PSC2TX;
+ used_reqs[29] = DMA_PSC2_TX;
+ } else /* No empty slots */
+ return 1;
+ break;
+
+ case DMA_PSC3_RX:
+ if (used_reqs[30] == 0) {
+ MCF_DMA_IMCR = (MCF_DMA_IMCR & ~MCF_DMA_IMCR_SRC30(3))
+ | MCF_DMA_IMCR_SRC30_PSC3RX;
+ used_reqs[30] = DMA_PSC3_RX;
+ } else /* No empty slots */
+ return 1;
+ break;
+
+ case DMA_PSC3_TX:
+ if (used_reqs[31] == 0) {
+ MCF_DMA_IMCR = (MCF_DMA_IMCR & ~MCF_DMA_IMCR_SRC31(3))
+ | MCF_DMA_IMCR_SRC31_PSC3TX;
+ used_reqs[31] = DMA_PSC3_TX;
+ } else /* No empty slots */
+ return 1;
+ break;
+
+ default:
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * dma_get_initiator - get the initiator for the given requestor
+ * @requestor: initiator identifier
+ *
+ * Returns initiator number (0-31) if assigned or just 0
+ */
+unsigned int dma_get_initiator(int requestor)
+{
+ u32 i;
+
+ for (i = 0; i < sizeof(used_reqs); ++i) {
+ if (used_reqs[i] == requestor)
+ return i;
+ }
+ return 0;
+}
+
+/**
+ * dma_remove_initiator - remove the given initiator from active list
+ * @requestor: requestor to remove
+ */
+void dma_remove_initiator(int requestor)
+{
+ u32 i;
+
+ for (i = 0; i < sizeof(used_reqs); ++i) {
+ if (used_reqs[i] == requestor) {
+ used_reqs[i] = -1;
+ break;
+ }
+ }
+}
+
+/**
+ * dma_set_channel_fec: find available channel for fec and mark
+ * @requestor: initiator/requestor identifier
+ *
+ * Returns first avaialble channel (0-5) or -1 if all occupied
+ */
+int dma_set_channel_fec(int requestor)
+{
+ u32 i, t;
+
+#ifdef CONFIG_FEC_548x_ENABLE_FEC2
+ t = 4;
+#else
+ t = 2;
+#endif
+
+ for (i = 0; i < t ; ++i) {
+ if (used_channel[i] == -1) {
+ used_channel[i] = requestor;
+ return i;
+ }
+ }
+ /* All channels taken */
+ return -1;
+}
+
+/**
+ * dma_set_channel - find an available channel and mark as used
+ * @requestor: initiator/requestor identifier
+ *
+ * Returns first available channel (6-15) or -1 if all occupied
+ */
+int dma_set_channel(int requestor)
+{
+ u32 i;
+#ifdef CONFIG_NET_FEC2
+ i = 4;
+#else
+ i = 2;
+#endif
+
+ for (; i < 16; ++i)
+ if (used_channel[i] == -1) {
+ used_channel[i] = requestor;
+ return i;
+ }
+
+ /* All channels taken */
+ return -1;
+}
+
+/**
+ * dma_get_channel - get the channel being initiated by the requestor
+ * @requestor: initiator/requestor identifier
+ *
+ * Returns Initiator for requestor or -1 if not found
+ */
+int dma_get_channel(int requestor)
+{
+ u32 i;
+
+ for (i = 0; i < sizeof(used_channel); ++i) {
+ if (used_channel[i] == requestor)
+ return i;
+ }
+ return -1;
+}
+
+/**
+ * dma_connect - connect a channel with reference on data
+ * @channel: channel number
+ * @address: reference address of data
+ *
+ * Returns 0 if success or -1 if invalid channel
+ */
+int dma_connect(int channel, int address)
+{
+ if ((channel < 16) && (channel >= 0)) {
+ connected_channel[channel] = address;
+ return 0;
+ }
+ return -1;
+}
+
+/**
+ * dma_disconnect - disconnect a channel
+ * @channel: channel number
+ *
+ * Returns 0 if success or -1 if invalid channel
+ */
+int dma_disconnect(int channel)
+{
+ if ((channel < 16) && (channel >= 0)) {
+ connected_channel[channel] = 0;
+ return 0;
+ }
+ return -1;
+}
+
+/**
+ * dma_remove_channel - remove channel from the active list
+ * @requestor: initiator/requestor identifier
+ */
+void dma_remove_channel(int requestor)
+{
+ u32 i;
+
+ for (i = 0; i < sizeof(used_channel); ++i) {
+ if (used_channel[i] == requestor) {
+ used_channel[i] = -1;
+ break;
+ }
+ }
+}
+
+/**
+ * dma_interrupt_handler - dma interrupt handler
+ * @irq: interrupt number
+ * @dev_id: data
+ *
+ * Returns IRQ_HANDLED
+ */
+irqreturn_t dma_interrupt_handler(int irq, void *dev_id)
+{
+ u32 i, interrupts;
+
+ /*
+ * Determine which interrupt(s) triggered by AND'ing the
+ * pending interrupts with those that aren't masked.
+ */
+ interrupts = MCF_DMA_DIPR;
+ MCF_DMA_DIPR = interrupts;
+
+ for (i = 0; i < 16; ++i, interrupts >>= 1) {
+ if (interrupts & 0x1)
+ if (connected_channel[i] != 0)
+ ((void (*)(void)) (connected_channel[i])) ();
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * dma_remove_channel_by_number - clear dma channel
+ * @channel: channel number to clear
+ */
+void dma_remove_channel_by_number(int channel)
+{
+ if ((channel < sizeof(used_channel)) && (channel >= 0))
+ used_channel[channel] = -1;
+}
+
+/**
+ * dma_init - initialize the dma subsystem
+ *
+ * Returns 0 if success non-zero if failure
+ *
+ * Handles the DMA initialization during device setup.
+ */
+int __devinit dma_init()
+{
+ int result;
+ char *dma_version_str;
+
+ MCD_getVersion(&dma_version_str);
+ printk(KERN_INFO "m547x_8x DMA: Initialize %s\n", dma_version_str);
+
+ /* attempt to setup dma interrupt handler */
+ if (request_irq(64 + ISC_DMA, dma_interrupt_handler, IRQF_DISABLED,
+ "MCD-DMA", NULL)) {
+ printk(KERN_ERR "MCD-DMA: Cannot allocate the DMA IRQ(48)\n");
+ return 1;
+ }
+
+ MCF_DMA_DIMR = 0;
+ MCF_DMA_DIPR = 0xFFFFFFFF;
+
+ MCF_ICR(ISC_DMA) = ILP_DMA;
+
+ result = MCD_initDma((dmaRegs *) (MCF_MBAR + 0x8000),
+ (void *) SYS_SRAM_DMA_START, MCD_RELOC_TASKS);
+ if (result != MCD_OK) {
+ printk(KERN_ERR "MCD-DMA: Cannot perform DMA initialization\n");
+ free_irq(64 + ISC_DMA, NULL);
+ return 1;
+ }
+
+ return 0;
+}
+device_initcall(dma_init);
--- /dev/null
+/*
+ * arch/m68k/coldfire/m547x/mcf548x-devices.c
+ *
+ * Coldfire M548x Platform Device Configuration
+ *
+ * Based on the Freescale MXC devices.c
+ *
+ * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * Kurt Mahan <kmahan@freescale.com>
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mtd/physmap.h>
+#include <linux/platform_device.h>
+#include <linux/fsl_devices.h>
+
+#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
+#include <asm/mcfuart.h>
+
+static struct resource coldfire_i2c_resources[] = {
+ { /* I/O */
+ .start = MCF_MBAR + 0x008F00,
+ .end = MCF_MBAR + 0x008F20,
+ .flags = IORESOURCE_MEM,
+ },
+ { /* IRQ */
+ .start = 40,
+ .end = 40,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device coldfire_i2c_device = {
+ .name = "mcf-i2c",
+ .id = 0, /*bus number*/
+ .num_resources = ARRAY_SIZE(coldfire_i2c_resources),
+ .resource = coldfire_i2c_resources,
+};
+
+static struct resource coldfire_sec_resources[] = {
+ [0] = { /* I/O */
+ .start = MCF_MBAR + 0x00020000,
+ .end = MCF_MBAR + 0x00033000,
+ .flags = IORESOURCE_MEM,
+ },
+ [2] = { /* IRQ */
+ .start = ISC_SEC,
+ .end = ISC_SEC,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device coldfire_sec_device = {
+ .name = "fsl-sec1",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(coldfire_sec_resources),
+ .resource = coldfire_sec_resources,
+};
+
+static struct physmap_flash_data mcf5485_flash_data = {
+ .width = 2,
+};
+
+static struct resource mcf5485_flash_resource = {
+ .start = 0xff800000,
+ .end = 0xffbfffff,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device mcf5485_flash_device = {
+ .name = "physmap-flash",
+ .id = 0,
+ .dev = {
+ .platform_data = &mcf5485_flash_data,
+ },
+ .num_resources = 1,
+ .resource = &mcf5485_flash_resource,
+};
+
+static int __init mcf5485_init_devices(void)
+{
+ printk(KERN_INFO "MCF5485x INIT_DEVICES\n");
+
+ platform_device_register(&coldfire_i2c_device);
+ platform_device_register(&coldfire_sec_device);
+ platform_device_register(&mcf5485_flash_device);
+ return 0;
+}
+arch_initcall(mcf5485_init_devices);
+
+static struct mcf_platform_uart m548x_uart_platform[] = {
+ {
+ .mapbase = MCF_MBAR + MCFUART_BASE1,
+ .irq = MCFINT_VECBASE + MCFINT_UART0,
+ },
+ {
+ .mapbase = MCF_MBAR + MCFUART_BASE2,
+ .irq = MCFINT_VECBASE + MCFINT_UART1,
+ },
+ {
+ .mapbase = MCF_MBAR + MCFUART_BASE3,
+ .irq = MCFINT_VECBASE + MCFINT_UART2,
+ },
+ { },
+};
+
+static struct platform_device m548x_uart = {
+ .name = "mcfuart",
+ .id = 0,
+ .dev.platform_data = m548x_uart_platform,
+};
+
+static struct platform_device *m548x_devices[] __initdata = {
+ &m548x_uart,
+};
+
+void m548x_uarts_init(void)
+{
+ const int nrlines = ARRAY_SIZE(m548x_uart_platform);
+ int line;
+
+ /* Set GPIO port register to enable PSC(port) signals */
+ for (line = 0; (line < nrlines); line++) {
+ MCF_PAR_PSCn(line) = (0
+ | MCF_PAR_PSC_TXD
+ | MCF_PAR_PSC_RXD);
+
+ MCF_ICR(m548x_uart_platform[line].irq - 64) = ILP_PSCn(line);
+ }
+}
+/***************************************************************************/
+
+static int __init init_BSP(void)
+{
+ m548x_uarts_init();
+ platform_add_devices(m548x_devices, ARRAY_SIZE(m548x_devices));
+ return 0;
+}
+
+arch_initcall(init_BSP);
--- /dev/null
+/*********************************************************************
+ *
+ * Copyright (C) 2004 Motorola, Inc.
+ * MOTOROLA, INC. All Rights Reserved.
+ * Copyright 2009 Freescale Semiconductor, Inc.
+ * Shrek Wu b16972@freescale.com
+ *
+ * You are hereby granted a copyright license to use
+ * the SOFTWARE so long as this entire notice is
+ * retained without alteration in any modified and/or redistributed
+ * versions, and that such modified versions are clearly identified
+ * as such. No licenses are granted by implication, estoppel or
+ * otherwise under any patents or trademarks of Motorola, Inc. This
+ * software is provided on an "AS IS" basis and without warranty.
+ *
+ * To the maximum extent permitted by applicable law, MOTOROLA
+ * DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED, INCLUDING
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR
+ * PURPOSE AND ANY WARRANTY AGAINST INFRINGEMENT WITH REGARD TO THE
+ * SOFTWARE (INCLUDING ANY MODIFIED VERSIONS THEREOF) AND ANY
+ * ACCOMPANYING WRITTEN MATERIALS.
+ *
+ * To the maximum extent permitted by applicable law, IN NO EVENT
+ * SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER (INCLUDING
+ * WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS, BUSINESS
+ * INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY
+ * LOSS) ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+ *
+ * Motorola assumes no responsibility for the maintenance and support
+ * of this software
+ ********************************************************************/
+
+/*
+ * File: MCD_dma.h
+ * Purpose: Main header file for multi-channel DMA API.
+ *
+ * Notes:
+ *
+ * Modifications:
+ */
+#ifndef _MCD_API_H
+#define _MCD_API_H
+
+#include <asm/types.h>
+
+/*
+ * Turn Execution Unit tasks ON (#define) or OFF (#undef)
+ */
+#undef MCD_INCLUDE_EU
+
+/*
+ * Number of DMA channels
+ */
+#define NCHANNELS 16
+
+/*
+ * Total number of variants
+ */
+#ifdef MCD_INCLUDE_EU
+#define NUMOFVARIANTS 6
+#else
+#define NUMOFVARIANTS 4
+#endif
+
+/*
+ * Define sizes of the various tables
+ */
+#define TASK_TABLE_SIZE (NCHANNELS*32)
+#define VAR_TAB_SIZE (128)
+#define CONTEXT_SAVE_SIZE (128)
+#define FUNCDESC_TAB_SIZE (256)
+
+#ifdef MCD_INCLUDE_EU
+#define FUNCDESC_TAB_NUM 16
+#else
+#define FUNCDESC_TAB_NUM 1
+#endif
+
+
+#ifndef DEFINESONLY
+
+/*
+ * Portability typedefs
+ */
+ /*
+#ifndef s32
+typedef int s32;
+#endif
+#ifndef u32
+typedef unsigned int u32;
+#endif
+#ifndef s16
+typedef short s16;
+#endif
+#ifndef u16
+typedef unsigned short u16;
+#endif
+#ifndef s8
+typedef char s8;
+#endif
+#ifndef u8
+typedef unsigned char u8;
+#endif
+*/
+/*
+ * These structures represent the internal registers of the
+ * multi-channel DMA
+ */
+struct dmaRegs_s {
+ u32 taskbar; /* task table base address register */
+ u32 currPtr;
+ u32 endPtr;
+ u32 varTablePtr;
+ u16 dma_rsvd0;
+ u16 ptdControl; /* ptd control */
+ u32 intPending; /* interrupt pending register */
+ u32 intMask; /* interrupt mask register */
+ u16 taskControl[16]; /* task control registers */
+ u8 priority[32]; /* priority registers */
+ u32 initiatorMux; /* initiator mux control */
+ u32 taskSize0; /* task size control register 0. */
+ u32 taskSize1; /* task size control register 1. */
+ u32 dma_rsvd1; /* reserved */
+ u32 dma_rsvd2; /* reserved */
+ u32 debugComp1; /* debug comparator 1 */
+ u32 debugComp2; /* debug comparator 2 */
+ u32 debugControl; /* debug control */
+ u32 debugStatus; /* debug status */
+ u32 ptdDebug; /* priority task decode debug */
+ u32 dma_rsvd3[31]; /* reserved */
+};
+typedef volatile struct dmaRegs_s dmaRegs;
+
+#endif
+
+/*
+ * PTD contrl reg bits
+ */
+#define PTD_CTL_TSK_PRI 0x8000
+#define PTD_CTL_COMM_PREFETCH 0x0001
+
+/*
+ * Task Control reg bits and field masks
+ */
+#define TASK_CTL_EN 0x8000
+#define TASK_CTL_VALID 0x4000
+#define TASK_CTL_ALWAYS 0x2000
+#define TASK_CTL_INIT_MASK 0x1f00
+#define TASK_CTL_ASTRT 0x0080
+#define TASK_CTL_HIPRITSKEN 0x0040
+#define TASK_CTL_HLDINITNUM 0x0020
+#define TASK_CTL_ASTSKNUM_MASK 0x000f
+
+/*
+ * Priority reg bits and field masks
+ */
+#define PRIORITY_HLD 0x80
+#define PRIORITY_PRI_MASK 0x07
+
+/*
+ * Debug Control reg bits and field masks
+ */
+#define DBG_CTL_BLOCK_TASKS_MASK 0xffff0000
+#define DBG_CTL_AUTO_ARM 0x00008000
+#define DBG_CTL_BREAK 0x00004000
+#define DBG_CTL_COMP1_TYP_MASK 0x00003800
+#define DBG_CTL_COMP2_TYP_MASK 0x00000070
+#define DBG_CTL_EXT_BREAK 0x00000004
+#define DBG_CTL_INT_BREAK 0x00000002
+
+/*
+ * PTD Debug reg selector addresses
+ * This reg must be written with a value to show the contents of
+ * one of the desired internal register.
+ */
+#define PTD_DBG_REQ 0x00 /* shows the state of 31 initiators */
+#define PTD_DBG_TSK_VLD_INIT 0x01 /* shows which 16 tasks are valid and
+ have initiators asserted */
+
+
+/*
+ * General return values
+ */
+#define MCD_OK 0
+#define MCD_ERROR -1
+#define MCD_TABLE_UNALIGNED -2
+#define MCD_CHANNEL_INVALID -3
+
+/*
+ * MCD_initDma input flags
+ */
+#define MCD_RELOC_TASKS 0x00000001
+#define MCD_NO_RELOC_TASKS 0x00000000
+#define MCD_COMM_PREFETCH_EN 0x00000002
+/* Commbus Prefetching - MCF547x/548x ONLY */
+
+/*
+ * MCD_dmaStatus Status Values for each channel
+ */
+#define MCD_NO_DMA 1 /* No DMA has been requested since reset */
+#define MCD_IDLE 2 /* DMA active, but the initiator is currently inactive */
+#define MCD_RUNNING 3 /* DMA active, and the initiator is currently active */
+#define MCD_PAUSED 4 /* DMA active but it is currently paused */
+#define MCD_HALTED 5
+/* the most recent DMA has been killed with MCD_killTask() */
+#define MCD_DONE 6 /* the most recent DMA has completed. */
+
+
+/*
+ * MCD_startDma parameter defines
+ */
+
+/*
+ * Constants for the funcDesc parameter
+ */
+/* Byte swapping: */
+#define MCD_NO_BYTE_SWAP 0x00045670 /* to disable byte swapping. */
+#define MCD_BYTE_REVERSE 0x00076540
+/* to reverse the bytes of each u32 of the DMAed data. */
+#define MCD_U16_REVERSE 0x00067450 /* to reverse the 16-bit halves of
+ each 32-bit data value being DMAed.*/
+#define MCD_U16_BYTE_REVERSE 0x00054760 /* to reverse the byte halves of each
+ 16-bit half of each 32-bit data value DMAed */
+#define MCD_NO_BIT_REV 0x00000000
+/* do not reverse the bits of each byte DMAed. */
+#define MCD_BIT_REV 0x00088880 /* reverse the bits of each byte DMAed */
+/* CRCing: */
+#define MCD_CRC16 0xc0100000 /* to perform CRC-16 on DMAed data. */
+#define MCD_CRCCCITT 0xc0200000 /* to perform CRC-CCITT on DMAed data. */
+#define MCD_CRC32 0xc0300000 /* to perform CRC-32 on DMAed data. */
+#define MCD_CSUMINET 0xc0400000
+/* to perform internet checksums on DMAed data.*/
+#define MCD_NO_CSUM 0xa0000000 /* to perform no checksumming. */
+
+#define MCD_FUNC_NOEU1 (MCD_NO_BYTE_SWAP | MCD_NO_BIT_REV | MCD_NO_CSUM)
+#define MCD_FUNC_NOEU2 (MCD_NO_BYTE_SWAP | MCD_NO_CSUM)
+
+/*
+ * Constants for the flags parameter
+ */
+#define MCD_TT_FLAGS_RL 0x00000001 /* Read line */
+#define MCD_TT_FLAGS_CW 0x00000002 /* Combine Writes */
+#define MCD_TT_FLAGS_SP 0x00000004
+/* Speculative prefetch(XLB) MCF547x/548x ONLY */
+#define MCD_TT_FLAGS_MASK 0x000000ff
+#define MCD_TT_FLAGS_DEF (MCD_TT_FLAGS_RL | MCD_TT_FLAGS_CW)
+
+#define MCD_SINGLE_DMA 0x00000100 /* Unchained DMA */
+#define MCD_CHAIN_DMA /* TBD */
+#define MCD_EU_DMA /* TBD */
+#define MCD_FECTX_DMA 0x00001000 /* FEC TX ring DMA */
+#define MCD_FECRX_DMA 0x00002000 /* FEC RX ring DMA */
+
+
+/* these flags are valid for MCD_startDma and the chained buffer descriptors */
+#define MCD_BUF_READY 0x80000000
+/* indicates that this buffer is now under the DMA's control */
+#define MCD_WRAP 0x20000000
+/* to tell the FEC Dmas to wrap to the first BD */
+#define MCD_INTERRUPT 0x10000000
+/* to generate an interrupt after completion of the DMA. */
+#define MCD_END_FRAME 0x08000000
+/* tell the DMA to end the frame when transferring
+ last byte of data in buffer */
+#define MCD_CRC_RESTART 0x40000000 /* to empty out the accumulated checksum
+ prior to performing the DMA. */
+
+/* Defines for the FEC buffer descriptor control/status word*/
+#define MCD_FEC_BUF_READY 0x8000
+#define MCD_FEC_WRAP 0x2000
+#define MCD_FEC_INTERRUPT 0x1000
+#define MCD_FEC_END_FRAME 0x0800
+
+
+/*
+ * Defines for general intuitiveness
+ */
+
+#define MCD_TRUE 1
+#define MCD_FALSE 0
+
+/*
+ * Three different cases for destination and source.
+ */
+#define MINUS1 -1
+#define ZERO 0
+#define PLUS1 1
+
+#ifndef DEFINESONLY
+
+/* Task Table Entry struct*/
+typedef struct {
+ u32 TDTstart; /* task descriptor table start */
+ u32 TDTend; /* task descriptor table end */
+ u32 varTab; /* variable table start */
+ u32 FDTandFlags; /* function descriptor table start and flags */
+ volatile u32 descAddrAndStatus;
+ volatile u32 modifiedVarTab;
+ u32 contextSaveSpace; /* context save space start */
+ u32 literalBases;
+} TaskTableEntry;
+
+
+/* Chained buffer descriptor */
+typedef volatile struct MCD_bufDesc_struct MCD_bufDesc;
+struct MCD_bufDesc_struct {
+ u32 flags; /* flags describing the DMA */
+ u32 csumResult;
+ /* checksum from checksumming performed since last checksum reset */
+ s8 *srcAddr; /* the address to move data from */
+ s8 *destAddr; /* the address to move data to */
+ s8 *lastDestAddr; /* the last address written to */
+ u32 dmaSize;
+ /* the number of bytes to transfer independent of the transfer size */
+ MCD_bufDesc *next; /* next buffer descriptor in chain */
+ u32 info;
+ /* private information about this descriptor; DMA does not affect it */
+};
+
+/* Progress Query struct */
+typedef volatile struct MCD_XferProg_struct {
+ s8 *lastSrcAddr;
+ /* the most-recent or last, post-increment source address */
+ s8 *lastDestAddr;
+ /* the most-recent or last, post-increment destination address */
+ u32 dmaSize;
+ /* the amount of data transferred for the current buffer */
+ MCD_bufDesc *currBufDesc;
+ /* pointer to the current buffer descriptor being DMAed */
+} MCD_XferProg;
+
+
+/* FEC buffer descriptor */
+typedef volatile struct MCD_bufDescFec_struct {
+ u16 statCtrl;
+ u16 length;
+ u32 dataPointer;
+} MCD_bufDescFec;
+
+
+/*************************************************************************/
+/*
+ * API function Prototypes - see MCD_dmaApi.c for further notes
+ */
+
+/*
+ * MCD_startDma starts a particular kind of DMA .
+ */
+int MCD_startDma(
+ int channel, /* the channel on which to run the DMA */
+ s8 *srcAddr,
+ /* the address to move data from, or buffer-descriptor address */
+ s16 srcIncr, /* the amount to increment the source address per transfer */
+ s8 *destAddr, /* the address to move data to */
+ s16 destIncr,
+ /* the amount to increment the destination address per transfer */
+ u32 dmaSize,
+ /* the number of bytes to transfer independent of the transfer size */
+ u32 xferSize, /* the number bytes in of each data movement (1, 2, or 4) */
+ u32 initiator, /* what device initiates the DMA */
+ int priority, /* priority of the DMA */
+ u32 flags, /* flags describing the DMA */
+ u32 funcDesc
+/* a description of byte swapping, bit swapping, and CRC actions */
+);
+
+/*
+ * MCD_initDma() initializes the DMA API by setting up a pointer to the DMA
+ * registers, relocating and creating the appropriate task structures, and
+ * setting up some global settings
+ */
+int MCD_initDma(dmaRegs *sDmaBarAddr, void *taskTableDest, u32 flags);
+
+/*
+ * MCD_dmaStatus() returns the status of the DMA on the requested channel.
+ */
+int MCD_dmaStatus(int channel);
+
+/*
+ * MCD_XferProgrQuery() returns progress of DMA on requested channel
+ */
+int MCD_XferProgrQuery(int channel, MCD_XferProg *progRep);
+
+/*
+ * MCD_killDma() halts the DMA on the requested channel, without any
+ * intention of resuming the DMA.
+ */
+int MCD_killDma(int channel);
+
+/*
+ * MCD_continDma() continues a DMA which as stopped due to encountering an
+ * unready buffer descriptor.
+ */
+int MCD_continDma(int channel);
+
+/*
+ * MCD_pauseDma() pauses the DMA on the given channel ( if any DMA is
+ * running on that channel).
+ */
+int MCD_pauseDma(int channel);
+
+/*
+ * MCD_resumeDma() resumes the DMA on a given channel (if any DMA is
+ * running on that channel).
+ */
+int MCD_resumeDma(int channel);
+
+/*
+ * MCD_csumQuery provides the checksum/CRC after performing a non-chained DMA
+ */
+int MCD_csumQuery(int channel, u32 *csum);
+
+/*
+ * MCD_getCodeSize provides the packed size required by the microcoded task
+ * and structures.
+ */
+int MCD_getCodeSize(void);
+
+/*
+ * MCD_getVersion provides a pointer to a version string and returns a
+ * version number.
+ */
+int MCD_getVersion(char **longVersion);
+
+/* macro for setting a location in the variable table */
+#define MCD_SET_VAR(taskTab, idx, value) ((u32 *)(taskTab)->varTab)[idx] = value
+ /* Note that MCD_SET_VAR() is invoked many times in firing up a DMA function,
+ so I'm avoiding surrounding it with "do {} while(0)" */
+
+#endif /* DEFINESONLY */
+
+#endif /* _MCD_API_H */
--- /dev/null
+#ifndef __ASM_OFFSETS_H__
+#define __ASM_OFFSETS_H__
+/*
+ * DO NOT MODIFY.
+ *
+ * This file was generated by Kbuild
+ *
+ */
+
+#define TASK_STATE 0 /* offsetof(struct task_struct, state) | */
+#define TASK_FLAGS 12 /* offsetof(struct task_struct, flags) | */
+#define TASK_PTRACE 16 /* offsetof(struct task_struct, ptrace) | */
+#define TASK_THREAD 476 /* offsetof(struct task_struct, thread) | */
+#define TASK_INFO 638 /* offsetof(struct task_struct, thread.info) | */
+#define TASK_MM 178 /* offsetof(struct task_struct, mm) | */
+#define TASK_ACTIVE_MM 182 /* offsetof(struct task_struct, active_mm) | */
+#define THREAD_KSP 0 /* offsetof(struct thread_struct, ksp) | */
+#define THREAD_USP 4 /* offsetof(struct thread_struct, usp) | */
+#define THREAD_SR 8 /* offsetof(struct thread_struct, sr) | */
+#define THREAD_FS 10 /* offsetof(struct thread_struct, fs) | */
+#define THREAD_CRP 14 /* offsetof(struct thread_struct, crp) | */
+#define THREAD_ESP0 22 /* offsetof(struct thread_struct, esp0) | */
+#define THREAD_FPREG 38 /* offsetof(struct thread_struct, fp) | */
+#define THREAD_FPCNTL 134 /* offsetof(struct thread_struct, fpcntl) | */
+#define THREAD_FPSTATE 146 /* offsetof(struct thread_struct, fpstate) | */
+#define TINFO_PREEMPT 12 /* offsetof(struct thread_info, preempt_count) | */
+#define TINFO_FLAGS 4 /* offsetof(struct thread_info, flags) | */
+#define PT_D0 32 /* offsetof(struct pt_regs, d0) | */
+#define PT_ORIG_D0 36 /* offsetof(struct pt_regs, orig_d0) | */
+#define PT_D1 0 /* offsetof(struct pt_regs, d1) | */
+#define PT_D2 4 /* offsetof(struct pt_regs, d2) | */
+#define PT_D3 8 /* offsetof(struct pt_regs, d3) | */
+#define PT_D4 12 /* offsetof(struct pt_regs, d4) | */
+#define PT_D5 16 /* offsetof(struct pt_regs, d5) | */
+#define PT_A0 20 /* offsetof(struct pt_regs, a0) | */
+#define PT_A1 24 /* offsetof(struct pt_regs, a1) | */
+#define PT_A2 28 /* offsetof(struct pt_regs, a2) | */
+#define PT_PC 56 /* offsetof(struct pt_regs, pc) | */
+#define PT_SR 54 /* offsetof(struct pt_regs, sr) | */
+#define MM_CONTEXT 328 /* offsetof(struct mm_struct, context) | */
+#define PT_VECTOR 52 /* offsetof(struct pt_regs, pc) - 4 | */
+#define IRQ_HANDLER 0 /* offsetof(struct irq_node, handler) | */
+#define IRQ_DEVID 4 /* offsetof(struct irq_node, dev_id) | */
+#define IRQ_NEXT 8 /* offsetof(struct irq_node, next) | */
+#define STAT_IRQ 72 /* offsetof(struct kernel_stat, irqs) | */
+#define CPUSTAT_SOFTIRQ_PENDING 0 /* offsetof(irq_cpustat_t, __softirq_pending) | */
+#define BIR_TAG 0 /* offsetof(struct bi_record, tag) | */
+#define BIR_SIZE 2 /* offsetof(struct bi_record, size) | */
+#define BIR_DATA 4 /* offsetof(struct bi_record, data) | */
+#define FONT_DESC_IDX 0 /* offsetof(struct font_desc, idx) | */
+#define FONT_DESC_NAME 4 /* offsetof(struct font_desc, name) | */
+#define FONT_DESC_WIDTH 8 /* offsetof(struct font_desc, width) | */
+#define FONT_DESC_HEIGHT 12 /* offsetof(struct font_desc, height) | */
+#define FONT_DESC_DATA 16 /* offsetof(struct font_desc, data) | */
+#define FONT_DESC_PREF 20 /* offsetof(struct font_desc, pref) | */
+#define SIGSEGV 11 /* SIGSEGV | */
+#define SEGV_MAPERR 196609 /* SEGV_MAPERR | */
+#define SIGTRAP 5 /* SIGTRAP | */
+#define TRAP_TRACE 196610 /* TRAP_TRACE | */
+#define CUSTOMBASE -2132807680 /* &amiga_custom | */
+#define C_INTENAR 28 /* offsetof(struct CUSTOM, intenar) | */
+#define C_INTREQR 30 /* offsetof(struct CUSTOM, intreqr) | */
+#define C_INTENA 154 /* offsetof(struct CUSTOM, intena) | */
+#define C_INTREQ 156 /* offsetof(struct CUSTOM, intreq) | */
+#define C_SERDATR 24 /* offsetof(struct CUSTOM, serdatr) | */
+#define C_SERDAT 48 /* offsetof(struct CUSTOM, serdat) | */
+#define C_SERPER 50 /* offsetof(struct CUSTOM, serper) | */
+#define CIAABASE -2134908927 /* &ciaa | */
+#define CIABBASE -2134913024 /* &ciab | */
+#define C_PRA 0 /* offsetof(struct CIA, pra) | */
+#define ZTWOBASE -2147483648 /* zTwoBase | */
+
+#endif
--- /dev/null
+/*
+ * arch/m68k/include/asm/cf_548x_cacheflush.h - Coldfire 547x/548x Cache
+ *
+ * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Kurt Mahan kmahan@freescale.com
+ * Shrek Wu b16972@freescale.com
+ *
+ * Based on include/asm-m68k/cacheflush.h
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#ifndef M68K_CF_548x_CACHEFLUSH_H
+#define M68K_CF_548x_CACHEFLUSH_H
+
+#include <asm/cfcache.h>
+/*
+ * Cache handling functions
+ */
+
+#define flush_icache() \
+({ \
+ unsigned long set; \
+ unsigned long start_set; \
+ unsigned long end_set; \
+ \
+ start_set = 0; \
+ end_set = (unsigned long)LAST_DCACHE_ADDR; \
+ \
+ for (set = start_set; set <= end_set; set += (0x10 - 3)) {\
+ asm volatile("cpushl %%ic,(%0)\n" \
+ "\taddq%.l #1,%0\n" \
+ "\tcpushl %%ic,(%0)\n" \
+ "\taddq%.l #1,%0\n" \
+ "\tcpushl %%ic,(%0)\n" \
+ "\taddq%.l #1,%0\n" \
+ "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set)); \
+ } \
+})
+
+#define flush_dcache() \
+({ \
+ unsigned long set; \
+ unsigned long start_set; \
+ unsigned long end_set; \
+ \
+ start_set = 0; \
+ end_set = (unsigned long)LAST_DCACHE_ADDR; \
+ \
+ for (set = start_set; set <= end_set; set += (0x10 - 3)) { \
+ asm volatile("cpushl %%dc,(%0)\n" \
+ "\taddq%.l #1,%0\n" \
+ "\tcpushl %%dc,(%0)\n" \
+ "\taddq%.l #1,%0\n" \
+ "\tcpushl %%dc,(%0)\n" \
+ "\taddq%.l #1,%0\n" \
+ "\tcpushl %%dc,(%0)" : "=a" (set) : "a" (set)); \
+ } \
+})
+
+#define flush_bcache() \
+({ \
+ unsigned long set; \
+ unsigned long start_set; \
+ unsigned long end_set; \
+ \
+ start_set = 0; \
+ end_set = (unsigned long)LAST_DCACHE_ADDR; \
+ \
+ for (set = start_set; set <= end_set; set += (0x10 - 3)) { \
+ asm volatile("cpushl %%bc,(%0)\n" \
+ "\taddq%.l #1,%0\n" \
+ "\tcpushl %%bc,(%0)\n" \
+ "\taddq%.l #1,%0\n" \
+ "\tcpushl %%bc,(%0)\n" \
+ "\taddq%.l #1,%0\n" \
+ "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set)); \
+ } \
+})
+
+/*
+ * invalidate the cache for the specified memory range.
+ * It starts at the physical address specified for
+ * the given number of bytes.
+ */
+extern void cache_clear(unsigned long paddr, int len);
+/*
+ * push any dirty cache in the specified memory range.
+ * It starts at the physical address specified for
+ * the given number of bytes.
+ */
+extern void cache_push(unsigned long paddr, int len);
+
+/*
+ * push and invalidate pages in the specified user virtual
+ * memory range.
+ */
+extern void cache_push_v(unsigned long vaddr, int len);
+
+/* This is needed whenever the virtual mapping of the current
+ process changes. */
+
+/**
+ * flush_cache_mm - Flush an mm_struct
+ * @mm: mm_struct to flush
+ */
+static inline void flush_cache_mm(struct mm_struct *mm)
+{
+ if (mm == current->mm)
+ flush_bcache();
+}
+
+#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
+
+#define flush_cache_all() flush_bcache()
+
+/**
+ * flush_cache_range - Flush a cache range
+ * @vma: vma struct
+ * @start: Starting address
+ * @end: Ending address
+ *
+ * flush_cache_range must be a macro to avoid a dependency on
+ * linux/mm.h which includes this file.
+ */
+static inline void flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ if (vma->vm_mm == current->mm)
+ flush_bcache();
+/*cf_cache_flush_range(start, end);*/
+}
+
+/**
+ * flush_cache_page - Flush a page of the cache
+ * @vma: vma struct
+ * @vmaddr:
+ * @pfn: page numer
+ *
+ * flush_cache_page must be a macro to avoid a dependency on
+ * linux/mm.h which includes this file.
+ */
+static inline void flush_cache_page(struct vm_area_struct *vma,
+ unsigned long vmaddr, unsigned long pfn)
+{
+ if (vma->vm_mm == current->mm)
+ flush_bcache();
+/*cf_cache_flush_range(vmaddr, vmaddr+PAGE_SIZE);*/
+}
+
+/* Push the page at kernel virtual address and clear the icache */
+/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
+#define flush_page_to_ram(page) __flush_page_to_ram((void *) page_address(page))
+extern inline void __flush_page_to_ram(void *address)
+{
+ unsigned long set;
+ unsigned long start_set;
+ unsigned long end_set;
+ unsigned long addr = (unsigned long) address;
+
+ addr &= ~(PAGE_SIZE - 1);
+ /* round down to page start address */
+
+ start_set = addr & _ICACHE_SET_MASK;
+ end_set = (addr + PAGE_SIZE-1) & _ICACHE_SET_MASK;
+
+ if (start_set > end_set) {
+ /* from the begining to the lowest address */
+ for (set = 0; set <= end_set; set += (0x10 - 3)) {
+ asm volatile("cpushl %%bc,(%0)\n"
+ "\taddq%.l #1,%0\n"
+ "\tcpushl %%bc,(%0)\n"
+ "\taddq%.l #1,%0\n"
+ "\tcpushl %%bc,(%0)\n"
+ "\taddq%.l #1,%0\n"
+ "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set));
+ }
+ /* next loop will finish the cache ie pass the hole */
+ end_set = LAST_ICACHE_ADDR;
+ }
+
+ for (set = start_set; set <= end_set; set += (0x10 - 3)) {
+ asm volatile("cpushl %%bc,(%0)\n"
+ "\taddq%.l #1,%0\n"
+ "\tcpushl %%bc,(%0)\n"
+ "\taddq%.l #1,%0\n"
+ "\tcpushl %%bc,(%0)\n"
+ "\taddq%.l #1,%0\n"
+ "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set));
+ }
+}
+
+/* Use __flush_page_to_ram() for flush_dcache_page all values are same - MW */
+#define flush_dcache_page(page) \
+ __flush_page_to_ram((void *) page_address(page))
+#define flush_icache_page(vma, pg) \
+ __flush_page_to_ram((void *) page_address(pg))
+#define flush_icache_user_range(adr, len) \
+ do { } while (0)
+/* NL */
+#define flush_icache_user_page(vma, page, addr, len) \
+ do { } while (0)
+
+/* Push n pages at kernel virtual address and clear the icache */
+/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
+extern inline void flush_icache_range(unsigned long address,
+ unsigned long endaddr)
+{
+ unsigned long set;
+ unsigned long start_set;
+ unsigned long end_set;
+
+ start_set = address & _ICACHE_SET_MASK;
+ end_set = endaddr & _ICACHE_SET_MASK;
+
+ if (start_set > end_set) {
+ /* from the begining to the lowest address */
+ for (set = 0; set <= end_set; set += (0x10 - 3)) {
+ asm volatile("cpushl %%ic,(%0)\n"
+ "\taddq%.l #1,%0\n"
+ "\tcpushl %%ic,(%0)\n"
+ "\taddq%.l #1,%0\n"
+ "\tcpushl %%ic,(%0)\n"
+ "\taddq%.l #1,%0\n"
+ "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set));
+ }
+ /* next loop will finish the cache ie pass the hole */
+ end_set = LAST_ICACHE_ADDR;
+ }
+ for (set = start_set; set <= end_set; set += (0x10 - 3)) {
+ asm volatile("cpushl %%ic,(%0)\n"
+ "\taddq%.l #1,%0\n"
+ "\tcpushl %%ic,(%0)\n"
+ "\taddq%.l #1,%0\n"
+ "\tcpushl %%ic,(%0)\n"
+ "\taddq%.l #1,%0\n"
+ "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set));
+ }
+}
+
+static inline void copy_to_user_page(struct vm_area_struct *vma,
+ struct page *page, unsigned long vaddr,
+ void *dst, void *src, int len)
+{
+ memcpy(dst, src, len);
+ flush_icache_user_page(vma, page, vaddr, len);
+}
+static inline void copy_from_user_page(struct vm_area_struct *vma,
+ struct page *page, unsigned long vaddr,
+ void *dst, void *src, int len)
+{
+ memcpy(dst, src, len);
+}
+
+#define flush_cache_vmap(start, end) flush_cache_all()
+#define flush_cache_vunmap(start, end) flush_cache_all()
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+
+#endif /* M68K_CF_548x_CACHEFLUSH_H */
--- /dev/null
+/*
+ * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+#ifndef __CF_BITOPS__
+#define __CF_BITOPS__
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <linux/compiler.h>
+
+#define test_and_set_bit(nr,vaddr) \
+ (__builtin_constant_p(nr) ? \
+ __constant_coldfire_test_and_set_bit(nr, vaddr) : \
+ __generic_coldfire_test_and_set_bit(nr, vaddr))
+
+static __inline__ int __constant_coldfire_test_and_set_bit(int nr,
+ volatile void *vaddr)
+{
+ char retval;
+ volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
+ __asm__ __volatile__ ("bset %2,(%4); sne %0"
+ : "=d" (retval), "=m" (*p)
+ : "di" (nr & 7), "m" (*p), "a" (p));
+ return retval;
+}
+
+static __inline__ int __generic_coldfire_test_and_set_bit(int nr,
+ volatile void *vaddr)
+{
+ char retval;
+
+ __asm__ __volatile__ ("bset %2,%1; sne %0"
+ : "=d" (retval), "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
+ : "d" (nr)
+ : "memory");
+ return retval;
+}
+#define __test_and_set_bit(nr, vaddr) test_and_set_bit(nr, vaddr)
+
+#define set_bit(nr,vaddr) \
+ (__builtin_constant_p(nr) ? \
+ __constant_coldfire_set_bit(nr, vaddr) : \
+ __generic_coldfire_set_bit(nr, vaddr))
+
+static __inline__ void __constant_coldfire_set_bit(int nr,
+ volatile void *vaddr)
+{
+ volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
+ __asm__ __volatile__ ("bset %1,(%3)"
+ : "=m" (*p) : "di" (nr & 7), "m" (*p), "a" (p));
+}
+
+static __inline__ void __generic_coldfire_set_bit(int nr,
+ volatile void *vaddr)
+{
+ __asm__ __volatile__ ("bset %1,%0"
+ : "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
+ : "d" (nr)
+ : "memory");
+}
+#define __set_bit(nr, vaddr) set_bit(nr, vaddr)
+
+#define test_and_clear_bit(nr, vaddr) \
+ (__builtin_constant_p(nr) ? \
+ __constant_coldfire_test_and_clear_bit(nr, vaddr) : \
+ __generic_coldfire_test_and_clear_bit(nr, vaddr))
+
+static __inline__ int __constant_coldfire_test_and_clear_bit(int nr,
+ volatile void *vaddr)
+{
+ char retval;
+ volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
+
+ __asm__ __volatile__ ("bclr %2,(%4); sne %0"
+ : "=d" (retval), "=m" (*p)
+ : "id" (nr & 7), "m" (*p), "a" (p));
+
+ return retval;
+}
+
+static __inline__ int __generic_coldfire_test_and_clear_bit(int nr,
+ volatile void *vaddr)
+{
+ char retval;
+
+ __asm__ __volatile__ ("bclr %2,%1; sne %0"
+ : "=d" (retval), "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
+ : "d" (nr & 7)
+ : "memory");
+
+ return retval;
+}
+#define __test_and_clear_bit(nr, vaddr) test_and_clear_bit(nr, vaddr)
+
+/*
+ * clear_bit() doesn't provide any barrier for the compiler.
+ */
+#define smp_mb__before_clear_bit() barrier()
+#define smp_mb__after_clear_bit() barrier()
+
+#define clear_bit(nr,vaddr) \
+ (__builtin_constant_p(nr) ? \
+ __constant_coldfire_clear_bit(nr, vaddr) : \
+ __generic_coldfire_clear_bit(nr, vaddr))
+
+static __inline__ void __constant_coldfire_clear_bit(int nr,
+ volatile void *vaddr)
+{
+ volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
+ __asm__ __volatile__ ("bclr %1,(%3)"
+ : "=m" (*p) : "id" (nr & 7), "m" (*p), "a" (p));
+}
+
+static __inline__ void __generic_coldfire_clear_bit(int nr,
+ volatile void *vaddr)
+{
+ __asm__ __volatile__ ("bclr %1,%0"
+ : "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
+ : "d" (nr)
+ : "memory");
+}
+#define __clear_bit(nr, vaddr) clear_bit(nr, vaddr)
+
+#define test_and_change_bit(nr, vaddr) \
+ (__builtin_constant_p(nr) ? \
+ __constant_coldfire_test_and_change_bit(nr, vaddr) : \
+ __generic_coldfire_test_and_change_bit(nr, vaddr))
+
+static __inline__ int __constant_coldfire_test_and_change_bit(int nr,
+ volatile void *vaddr)
+{
+ char retval;
+ volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
+
+ __asm__ __volatile__ ("bchg %2,(%4); sne %0"
+ : "=d" (retval), "=m" (*p)
+ : "id" (nr & 7), "m" (*p), "a" (p));
+
+ return retval;
+}
+
+static __inline__ int __generic_coldfire_test_and_change_bit(int nr,
+ volatile void *vaddr)
+{
+ char retval;
+
+ __asm__ __volatile__ ("bchg %2,%1; sne %0"
+ : "=d" (retval), "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
+ : "id" (nr)
+ : "memory");
+
+ return retval;
+}
+#define __test_and_change_bit(nr, vaddr) test_and_change_bit(nr, vaddr)
+#define __change_bit(nr, vaddr) change_bit(nr, vaddr)
+
+#define change_bit(nr,vaddr) \
+ (__builtin_constant_p(nr) ? \
+ __constant_coldfire_change_bit(nr, vaddr) : \
+ __generic_coldfire_change_bit(nr, vaddr))
+
+static __inline__ void __constant_coldfire_change_bit(int nr,
+ volatile void *vaddr)
+{
+ volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
+ __asm__ __volatile__ ("bchg %1,(%3)"
+ : "=m" (*p) : "id" (nr & 7), "m" (*p), "a" (p));
+}
+
+static __inline__ void __generic_coldfire_change_bit(int nr,
+ volatile void *vaddr)
+{
+ __asm__ __volatile__ ("bchg %1,%0"
+ : "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
+ : "d" (nr)
+ : "memory");
+}
+
+static inline int test_bit(int nr, const unsigned long *vaddr)
+{
+ return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
+}
+
+static __inline__ unsigned long ffz(unsigned long word)
+{
+ unsigned long result = 0;
+
+ while (word & 1) {
+ result++;
+ word >>= 1;
+ }
+ return result;
+}
+
+/* find_next_zero_bit() finds the first zero bit in a bit string of length
+ * 'size' bits, starting the search at bit 'offset'. This is largely based
+ * on Linus's ALPHA routines.
+ */
+static __inline__ unsigned long find_next_zero_bit(void *addr,
+ unsigned long size, unsigned long offset)
+{
+ unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
+ unsigned long result = offset & ~31UL;
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset &= 31UL;
+ if (offset) {
+ tmp = *(p++);
+ tmp |= ~0UL >> (32-offset);
+ if (size < 32)
+ goto found_first;
+ if (~tmp)
+ goto found_middle;
+ size -= 32;
+ result += 32;
+ }
+ while (size & ~31UL) {
+ tmp = *(p++);
+ if (~tmp)
+ goto found_middle;
+ result += 32;
+ size -= 32;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+
+found_first:
+ tmp |= ~0UL >> size;
+found_middle:
+ return result + ffz(tmp);
+}
+
+#define find_first_zero_bit(addr, size) find_next_zero_bit(((void *)addr), \
+ (size), 0)
+
+/* Ported from included/linux/bitops.h */
+static __inline__ int ffs(int x)
+{
+ int r = 1;
+
+ if (!x)
+ return 0;
+ if (!(x & 0xffff)) {
+ x >>= 16;
+ r += 16;
+ }
+ if (!(x & 0xff)) {
+ x >>= 8;
+ r += 8;
+ }
+ if (!(x & 0xf)) {
+ x >>= 4;
+ r += 4;
+ }
+ if (!(x & 3)) {
+ x >>= 2;
+ r += 2;
+ }
+ if (!(x & 1)) {
+ x >>= 1;
+ r += 1;
+ }
+ return r;
+}
+#define __ffs(x) (ffs(x) - 1)
+
+/* find_next_bit - find the next set bit in a memory region
+ * (from asm-ppc/bitops.h)
+ */
+static __inline__ unsigned long find_next_bit(const unsigned long *addr,
+ unsigned long size, unsigned long offset)
+{
+ unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
+ unsigned int result = offset & ~31UL;
+ unsigned int tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset &= 31UL;
+ if (offset) {
+ tmp = *p++;
+ tmp &= ~0UL << offset;
+ if (size < 32)
+ goto found_first;
+ if (tmp)
+ goto found_middle;
+ size -= 32;
+ result += 32;
+ }
+ while (size >= 32) {
+ tmp = *p++;
+ if (tmp != 0)
+ goto found_middle;
+ result += 32;
+ size -= 32;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+
+found_first:
+ tmp &= ~0UL >> (32 - size);
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
+found_middle:
+ return result + __ffs(tmp);
+}
+
+#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
+
+#ifdef __KERNEL__
+
+/* Ported from include/linux/bitops.h */
+static __inline__ int fls(int x)
+{
+ int r = 32;
+
+ if (!x)
+ return 0;
+ if (!(x & 0xffff0000u)) {
+ x <<= 16;
+ r -= 16;
+ }
+ if (!(x & 0xff000000u)) {
+ x <<= 8;
+ r -= 8;
+ }
+ if (!(x & 0xf0000000u)) {
+ x <<= 4;
+ r -= 4;
+ }
+ if (!(x & 0xc0000000u)) {
+ x <<= 2;
+ r -= 2;
+ }
+ if (!(x & 0x80000000u)) {
+ x <<= 1;
+ r -= 1;
+ }
+ return r;
+}
+
+static inline int __fls(int x)
+{
+ return fls(x) - 1;
+}
+
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
+
+#define minix_find_first_zero_bit(addr, size) find_next_zero_bit((addr), \
+ (size), 0)
+#define minix_test_and_set_bit(nr, addr) test_and_set_bit((nr), \
+ (unsigned long *)(addr))
+#define minix_set_bit(nr, addr) set_bit((nr), \
+ (unsigned long *)(addr))
+#define minix_test_and_clear_bit(nr, addr) test_and_clear_bit((nr), \
+ (unsigned long *)(addr))
+
+static inline int minix_test_bit(int nr, const volatile unsigned long *vaddr)
+{
+ int *a = (int *)vaddr;
+ int mask;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ return ((mask & *a) != 0);
+}
+
+#define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 24, \
+ (unsigned long *)(addr))
+#define ext2_set_bit_atomic(lock, nr, addr) test_and_set_bit((nr) ^ 24, \
+ (unsigned long *)(addr))
+#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 24, \
+ (unsigned long *)(addr))
+#define ext2_clear_bit_atomic(lock, nr, addr) test_and_clear_bit((nr) ^ 24, \
+ (unsigned long *)(addr))
+
+static inline int ext2_test_bit(int nr, const void *vaddr)
+{
+ const unsigned char *p = vaddr;
+ return (p[nr >> 3] & (1U << (nr & 7))) != 0;
+}
+
+static inline int ext2_find_first_zero_bit(const void *vaddr, unsigned size)
+{
+ const unsigned long *p = vaddr, *addr = vaddr;
+ int res;
+
+ if (!size)
+ return 0;
+
+ size = (size >> 5) + ((size & 31) > 0);
+ while (*p++ == ~0UL) {
+ if (--size == 0)
+ return (p - addr) << 5;
+ }
+
+ --p;
+ for (res = 0; res < 32; res++)
+ if (!ext2_test_bit (res, p))
+ break;
+ return (p - addr) * 32 + res;
+}
+
+static inline int ext2_find_next_zero_bit(const void *vaddr, unsigned size,
+ unsigned offset)
+{
+ const unsigned long *addr = vaddr;
+ const unsigned long *p = addr + (offset >> 5);
+ int bit = offset & 31UL, res;
+
+ if (offset >= size)
+ return size;
+
+ if (bit) {
+ /* Look for zero in first longword */
+ for (res = bit; res < 32; res++)
+ if (!ext2_test_bit (res, p))
+ return (p - addr) * 32 + res;
+ p++;
+ }
+ /* No zero yet, search remaining full bytes for a zero */
+ res = ext2_find_first_zero_bit(p, size - 32 * (p - addr));
+ return (p - addr) * 32 + res;
+}
+
+#endif /* KERNEL */
+
+#endif /* __CF_BITOPS__ */
--- /dev/null
+#ifndef M68K_CF_CACHEFLUSH_H
+#define M68K_CF_CACHEFLUSH_H
+
+#ifdef CONFIG_M5445X
+#include "cf_5445x_cacheflush.h"
+#else
+#include "cf_548x_cacheflush.h"
+#endif
+
+#endif /* M68K_CF_CACHEFLUSH_H */
--- /dev/null
+#ifndef __CF_M68K_ENTRY_H
+#define __CF_M68K_ENTRY_H
+
+#include <asm/setup.h>
+#include <asm/page.h>
+#include <asm/coldfire.h>
+#include <asm/cfmmu.h>
+#include <asm/asm-offsets.h>
+
+/*
+ * Stack layout in 'ret_from_exception':
+ *
+ * This allows access to the syscall arguments in registers d1-d5
+ *
+ * 0(sp) - d1
+ * 4(sp) - d2
+ * 8(sp) - d3
+ * C(sp) - d4
+ * 10(sp) - d5
+ * 14(sp) - a0
+ * 18(sp) - a1
+ * 1C(sp) - a2
+ * 20(sp) - d0
+ * 24(sp) - orig_d0
+ * 28(sp) - stack adjustment
+ * 2C(sp) - sr
+ * 2E(sp) - pc
+ * 32(sp) - format & vector
+ * 36(sp) - MMUSR
+ * 3A(sp) - MMUAR
+ */
+
+/*
+ * 97/05/14 Andreas: Register %a2 is now set to the current task throughout
+ * the whole kernel.
+ */
+
+/* the following macro is used when enabling interrupts */
+/* portable version */
+#define ALLOWINT (~0x700)
+#define MAX_NOINT_IPL 0
+
+#ifdef __ASSEMBLY__
+
+#define curptr a2
+
+LFLUSH_I_AND_D = 0x00000808
+LSIGTRAP = 5
+
+/* process bits for task_struct.ptrace */
+PT_TRACESYS_OFF = 3
+PT_TRACESYS_BIT = 1
+PT_PTRACED_OFF = 3
+PT_PTRACED_BIT = 0
+PT_DTRACE_OFF = 3
+PT_DTRACE_BIT = 2
+
+#define SAVE_ALL_INT save_all_int
+#define SAVE_ALL_SYS save_all_sys
+#define RESTORE_ALL restore_all
+/*
+ * This defines the normal kernel pt-regs layout.
+ *
+ * regs a3-a6 and d6-d7 are preserved by C code
+ * the kernel doesn't mess with usp unless it needs to
+ */
+
+/*
+ * a -1 in the orig_d0 field signifies
+ * that the stack frame is NOT for syscall
+ */
+.macro save_all_int
+ movel MMUSR,%sp@-
+ movel MMUAR,%sp@-
+ clrl %sp@- | stk_adj
+ pea -1:w | orig d0
+ movel %d0,%sp@- | d0
+ subal #(8*4), %sp
+ moveml %d1-%d5/%a0-%a1/%curptr,%sp@
+.endm
+
+.macro save_all_sys
+ movel MMUSR,%sp@-
+ movel MMUAR,%sp@-
+ clrl %sp@- | stk_adj
+ movel %d0,%sp@- | orig d0
+ movel %d0,%sp@- | d0
+ subal #(8*4), %sp
+ moveml %d1-%d5/%a0-%a1/%curptr,%sp@
+.endm
+
+.macro restore_all
+ moveml %sp@,%a0-%a1/%curptr/%d1-%d5
+ addal #(8*4), %sp
+ movel %sp@+,%d0 | d0
+ addql #4,%sp | orig d0
+ addl %sp@+,%sp | stk_adj
+ addql #8,%sp | MMUAR & MMUSR
+ rte
+.endm
+
+#define SWITCH_STACK_SIZE (6*4+4) /* includes return address */
+
+#define SAVE_SWITCH_STACK save_switch_stack
+#define RESTORE_SWITCH_STACK restore_switch_stack
+#define GET_CURRENT(tmp) get_current tmp
+
+.macro save_switch_stack
+ subal #(6*4), %sp
+ moveml %a3-%a6/%d6-%d7,%sp@
+.endm
+
+.macro restore_switch_stack
+ moveml %sp@,%a3-%a6/%d6-%d7
+ addal #(6*4), %sp
+.endm
+
+.macro get_current reg=%d0
+ movel %sp,\reg
+ andl #-THREAD_SIZE,\reg
+ movel \reg,%curptr
+ movel %curptr@,%curptr
+.endm
+
+#else /* C source */
+
+#define STR(X) STR1(X)
+#define STR1(X) #X
+
+#define PT_OFF_ORIG_D0 0x24
+#define PT_OFF_FORMATVEC 0x32
+#define PT_OFF_SR 0x2C
+#define SAVE_ALL_INT \
+ "clrl %%sp@-;" /* stk_adj */ \
+ "pea -1:w;" /* orig d0 = -1 */ \
+ "movel %%d0,%%sp@-;" /* d0 */ \
+ "subal #(8*4),%sp" \
+ "moveml %%d1-%%d5/%%a0-%%a2,%%sp@"
+#define GET_CURRENT(tmp) \
+ "movel %%sp,"#tmp"\n\t" \
+ "andw #-"STR(THREAD_SIZE)","#tmp"\n\t" \
+ "movel "#tmp",%%a2\n\t"
+
+#endif
+
+#endif /* __CF_M68K_ENTRY_H */
--- /dev/null
+/*
+ * linux/include/asm-m68k/cf_io.h
+ *
+ * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * 9/30/08 JKM - Separated Coldfire pieces out from m68k.
+ */
+
+#ifndef __CF_IO__
+#define __CF_IO__
+
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+#include <asm/raw_io.h>
+#include <asm/virtconvert.h>
+
+#include <asm-generic/iomap.h>
+
+/*
+ * These should be valid on any ioremap()ed region
+ */
+#define readb(addr) in_8(addr)
+#define writeb(val,addr) out_8((addr),(val))
+#define readw(addr) in_le16(addr)
+#define writew(val,addr) out_le16((addr),(val))
+#define readl(addr) in_le32(addr)
+#define writel(val,addr) out_le32((addr),(val))
+
+#define readb_relaxed(addr) readb(addr)
+#define readw_relaxed(addr) readw(addr)
+#define readl_relaxed(addr) readl(addr)
+
+#ifdef CONFIG_PCI
+
+/*
+ * IO space in Coldfire
+ */
+/*#define HAVE_ARCH_PIO_SIZE 1
+#define PIO_OFFSET 0x00000000UL
+#define PIO_RESERVED 0x100000000UL
+#define PIO_MASK (PIO_RESERVED - 1)
+*/
+#define inb_p inb
+#define inw_p inw
+#define inl_p inl
+#define outb_p outb
+#define outw_p outw
+#define outl_p outl
+
+#ifndef CONFIG_COLDFIRE
+#define inb(port) in_8(port)
+#define outb(val,port) out_8((port),(val))
+#define inw(port) in_le16(port)
+#define outw(val,port) out_le16((port),(val))
+#define inl(port) in_le32(port)
+#define outl(val,port) out_le32((port),(val))
+#define insb(port, buf, nr) \
+ raw_insb((u8 *)(port), (u8 *)(buf), (nr))
+#define outsb(port, buf, nr) \
+ raw_outsb((u8 *)(port), (u8 *)(buf), (nr))
+#define insw(port, buf, nr) \
+ raw_insw_swapw((u16 *)(port), (u16 *)(buf), (nr))
+#define outsw(port, buf, nr) \
+ raw_outsw_swapw((u16 *)(port), (u16 *)(buf), (nr))
+#define insl(port, buf, nr) \
+ raw_insw_swapw((u16 *)(port), (u16 *)(buf), (nr)<<1)
+#define outsl(port, buf, nr) \
+ raw_outsw_swapw((u16 *)(port), (u16 *)(buf), (nr)<<1)
+#else
+#define inb(port) pci_inb(port)
+#define outb(val, port) pci_outb((val), (port))
+#define inw(port) pci_inw(port)
+#define outw(val, port) pci_outw((val), (port))
+#define insb(a, b, c) \
+ pci_insb((volatile unsigned char *)a, (unsigned char *)b, c)
+#define insw(a, b, c) \
+ pci_insw((volatile unsigned short *)a, (const unsigned short *)b, c)
+#define insl(a, b, c) \
+ pci_insl((volatile unsigned long *)a, (const unsigned long *)b, c)
+#define outsb(a, b, c) \
+ pci_outsb((volatile unsigned char *)a, (const unsigned char *)b, c)
+#define outsw(a, b, c) \
+ pci_outsw((volatile unsigned short *)a, (const unsigned short *)b, c)
+#define outsl(a, b, c) \
+ pci_outsl((volatile unsigned long *)a, (const unsigned long *)b, c)
+#define inl(port) pci_inl(port)
+#define outl(val, port) pci_outl((val), (port))
+#endif
+
+#else
+/* no pci */
+
+#define inb(port) in_8(port)
+#define outb(val, port) out_8((port), (val))
+#define inw(port) in_le16(port)
+#define outw(val, port) out_le16((port), (val))
+#define inl(port) in_le32(port)
+#define outl(val, port) out_le32((port), (val))
+#define insb(port, buf, nr) \
+ raw_insb((u8 *)(port), (u8 *)(buf), (nr))
+#define outsb(port, buf, nr) \
+ raw_outsb((u8 *)(port), (u8 *)(buf), (nr))
+#define insw(port, buf, nr) \
+ raw_insw_swapw((u16 *)(port), (u16 *)(buf), (nr))
+#define outsw(port, buf, nr) \
+ raw_outsw_swapw((u16 *)(port), (u16 *)(buf), (nr))
+#define insl(port, buf, nr) \
+ raw_insw_swapw((u16 *)(port), (u16 *)(buf), (nr)<<1)
+#define outsl(port, buf, nr) \
+ raw_outsw_swapw((u16 *)(port), (u16 *)(buf), (nr)<<1)
+
+#endif /* CONFIG_PCI */
+
+#define mmiowb()
+
+static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size)
+{
+ return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
+}
+static inline void __iomem *ioremap_nocache(unsigned long physaddr, unsigned long size)
+{
+ return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
+}
+static inline void __iomem *ioremap_writethrough(unsigned long physaddr,
+ unsigned long size)
+{
+ return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
+}
+static inline void __iomem *ioremap_fullcache(unsigned long physaddr,
+ unsigned long size)
+{
+ return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
+}
+
+static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
+{
+ __builtin_memset((void __force *) addr, val, count);
+}
+static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
+{
+ __builtin_memcpy(dst, (void __force *) src, count);
+}
+static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
+{
+ __builtin_memcpy((void __force *) dst, src, count);
+}
+
+#define IO_SPACE_LIMIT 0xffffffff
+
+#endif /* __KERNEL__ */
+
+#define __ARCH_HAS_NO_PAGE_ZERO_MAPPED 1
+
+/*
+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
+ * access
+ */
+#define xlate_dev_mem_ptr(p) __va(p)
+
+/*
+ * Convert a virtual cached pointer to an uncached pointer
+ */
+#define xlate_dev_kmem_ptr(p) p
+
+#define __raw_readb(addr) \
+ ({ unsigned char __v = (*(volatile unsigned char *) (addr)); __v; })
+#define __raw_readw(addr) \
+ ({ unsigned short __v = (*(volatile unsigned short *) (addr)); __v; })
+#define __raw_readl(addr) \
+ ({ unsigned long __v = (*(volatile unsigned long *) (addr)); __v; })
+#define __raw_writeb(b,addr) (void)((*(volatile unsigned char *) (addr)) = (b))
+#define __raw_writew(b,addr) (void)((*(volatile unsigned short *) (addr)) = (b))
+#define __raw_writel(b,addr) (void)((*(volatile unsigned int *) (addr)) = (b))
+
+#define memset_io(a, b, c) memset((void *)(a), (b), (c))
+#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
+#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
+
+#if !defined(readb)
+#define readb(addr) \
+ ({ unsigned char __v = (*(volatile unsigned char *) (addr)); __v; })
+#define readw(addr) \
+ ({ unsigned short __v = (*(volatile unsigned short *) (addr)); __v; })
+#define readl(addr) \
+ ({ unsigned int __v = (*(volatile unsigned int *) (addr)); __v; })
+#define writeb(b, addr) (void)((*(volatile unsigned char *) (addr)) = (b))
+#define writew(b, addr) (void)((*(volatile unsigned short *) (addr)) = (b))
+#define writel(b, addr) (void)((*(volatile unsigned int *) (addr)) = (b))
+#endif /* readb */
+
+#endif /* _IO_H */
--- /dev/null
+/*
+ * linux/include/asm-m68k/cf_page.h
+ *
+ * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Based on linux/include/asm-m68k/page.h
+ *
+ * 10/09/08 JKM: split Coldfire pieces into separate file
+ */
+#ifndef __CF_PAGE__
+#define __CF_PAGE__
+
+#include <linux/const.h>
+#include <asm/setup.h>
+#include <asm/page_offset.h>
+
+/* Virtual base page location */
+#define PAGE_OFFSET (PAGE_OFFSET_RAW)
+
+/* PAGE_SHIFT determines the page size */
+#define PAGE_SHIFT (13) /* 8K pages */
+#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+#define THREAD_SIZE PAGE_SIZE
+
+#ifndef __ASSEMBLY__
+#include <linux/compiler.h>
+#include <asm/module.h>
+
+#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
+#define free_user_page(page, addr) free_page(addr)
+
+#define clear_page(page) memset((page), 0, PAGE_SIZE)
+#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE)
+
+#define clear_user_page(addr, vaddr, page) \
+ do { clear_page(addr); \
+ flush_dcache_page(page); \
+ } while (0)
+
+#define copy_user_page(to, from, vaddr, page) \
+ do { copy_page(to, from); \
+ flush_dcache_page(page); \
+ } while (0)
+
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { unsigned long pte; } pte_t;
+typedef struct { unsigned long pmd[16]; } pmd_t;
+typedef struct { unsigned long pgd; } pgd_t;
+typedef struct { unsigned long pgprot; } pgprot_t;
+typedef struct page *pgtable_t;
+
+#define pte_val(x) ((x).pte)
+#define pmd_val(x) ((&x)->pmd[0])
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) } )
+#define __pmd(x) ((pmd_t) { (x) } )
+#define __pgd(x) ((pgd_t) { (x) } )
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+/* to align the pointer to the (next) page boundary */
+/*Defined in linux/mm.h*/
+/*#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)*/
+
+extern unsigned long m68k_memoffset;
+
+#define WANT_PAGE_VIRTUAL
+
+extern unsigned long cf_dma_base;
+extern unsigned long cf_dma_end;
+
+/*
+ * Convert a virt to a phys
+ */
+static inline unsigned long ___pa(void *vaddr)
+{
+#if CONFIG_SDRAM_BASE != PAGE_OFFSET
+ return (((unsigned long)vaddr & 0x0fffffff) + CONFIG_SDRAM_BASE);
+#else
+ if ((unsigned long)vaddr >= CONFIG_DMA_BASE &&
+ (unsigned long)vaddr < (CONFIG_DMA_BASE + CONFIG_DMA_SIZE)) {
+ /* address is in carved out DMA range */
+ return ((unsigned long)vaddr - CONFIG_DMA_BASE) + CONFIG_SDRAM_BASE;
+ }
+ else if ((unsigned long)vaddr >= PAGE_OFFSET &&
+ (unsigned long)vaddr < (PAGE_OFFSET + CONFIG_SDRAM_SIZE)) {
+ /* normal mapping */
+ return ((unsigned long)vaddr - PAGE_OFFSET) + CONFIG_SDRAM_BASE;
+ }
+
+ return (unsigned long)vaddr;
+#endif
+}
+#define __pa(vaddr) ___pa((void *)(vaddr))
+
+/*
+ * Convert a phys to a virt
+ */
+static inline void *__va(unsigned long paddr)
+{
+#if CONFIG_SDRAM_BASE != PAGE_OFFSET
+ return (void *)((paddr & 0x0fffffff) + PAGE_OFFSET);
+#else
+ if (paddr >= cf_dma_base && paddr <= cf_dma_end) {
+ /* mapped address for DMA */
+ return (void *)((paddr - CONFIG_SDRAM_BASE) + CONFIG_DMA_BASE);
+ }
+ else if (paddr >= cf_dma_end &&
+ paddr < (CONFIG_SDRAM_BASE + CONFIG_SDRAM_SIZE)) {
+ /* normal mapping */
+ return (void *)((paddr - CONFIG_SDRAM_BASE) + PAGE_OFFSET);
+ }
+ return (void *)paddr;
+#endif
+}
+
+/*
+ * NOTE: virtual isn't really correct, actually it should be the offset into the
+ * memory node, but we have no highmem, so that works for now.
+ *
+ * TODO: implement (fast) pfn<->pgdat_idx conversion functions, this makes lots
+ * of the shifts unnecessary.
+ *
+ * PFNs are used to map physical pages. So PFN[0] maps to the base phys addr.
+ */
+#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
+#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
+
+extern int m68k_virt_to_node_shift;
+
+#ifdef CONFIG_SINGLE_MEMORY_CHUNK
+#define __virt_to_node(addr) (&pg_data_map[0])
+#else
+extern struct pglist_data *pg_data_table[];
+
+static inline __attribute_const__ int __virt_to_node_shift(void)
+{
+ return m68k_virt_to_node_shift;
+}
+
+#define __virt_to_node(addr) (pg_data_table[(unsigned long)(addr) >> __virt_to_node_shift()])
+#endif /* !CONFIG_SINGLE_MEMORY_CHUNK */
+
+#define virt_to_page(addr) ({ \
+ pfn_to_page(virt_to_pfn(addr)); \
+})
+#define page_to_virt(page) ({ \
+ pfn_to_virt(page_to_pfn(page)); \
+})
+
+#define pfn_to_page(pfn) ({ \
+ unsigned long __pfn = (pfn); \
+ struct pglist_data *pgdat; \
+ pgdat = __virt_to_node((unsigned long)pfn_to_virt(__pfn)); \
+ pgdat->node_mem_map + (__pfn - pgdat->node_start_pfn); \
+})
+#define page_to_pfn(_page) ({ \
+ struct page *__p = (_page); \
+ struct pglist_data *pgdat; \
+ pgdat = &pg_data_map[page_to_nid(__p)]; \
+ ((__p) - pgdat->node_mem_map) + pgdat->node_start_pfn; \
+})
+
+#define virt_addr_valid(kaddr) ( ((void *)(kaddr) >= (void *)PAGE_OFFSET && \
+ (void *)(kaddr) < high_memory) || \
+ ((void *)(kaddr) >= (void*)CONFIG_DMA_BASE && \
+ (void *)(kaddr) < (void*)(CONFIG_DMA_BASE+CONFIG_DMA_SIZE)))
+
+#define pfn_valid(pfn) virt_addr_valid(pfn_to_virt(pfn))
+
+#endif /* __ASSEMBLY__ */
+
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#include <asm-generic/getorder.h>
+
+#ifdef CONFIG_VDSO
+#define __HAVE_ARCH_GATE_AREA
+#endif
+
+#endif /* __CF_PAGE__ */
--- /dev/null
+/*
+ * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+#ifndef M68K_CF_PGALLOC_H
+#define M68K_CF_PGALLOC_H
+#include <linux/highmem.h>
+#include <asm/coldfire.h>
+#include <asm/page.h>
+#include <asm/cf_tlbflush.h>
+#include <asm/cf_cacheflush.h>
+
+extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ free_page((unsigned long) pte);
+}
+
+extern const char bad_pmd_string[];
+
+extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+ unsigned long address)
+{
+ unsigned long page = __get_free_page(GFP_KERNEL|__GFP_REPEAT);
+
+ if (!page)
+ return NULL;
+
+ memset((void *)page, 0, PAGE_SIZE);
+ return (pte_t *) (page);
+}
+
+extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
+{
+ return (pmd_t *) pgd;
+}
+
+#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
+#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
+
+#define pte_alloc_one_fast(mm, addr) pte_alloc_one(mm, addr)
+
+#define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \
+ (unsigned long)(page_address(page)))
+
+#define pmd_populate_kernel(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte))
+
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *page, unsigned long address)
+{
+ __free_page(page);
+}
+
+#define __pmd_free_tlb(tlb, pmd, addr) do { } while (0)
+
+static inline struct page *pte_alloc_one(struct mm_struct *mm,
+ unsigned long address)
+{
+ struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
+ pte_t *pte;
+
+ if (!page)
+ return NULL;
+
+ pte = kmap(page);
+// if (pte) {
+ clear_page(pte);
+ __flush_page_to_ram(pte);
+ flush_tlb_kernel_page(pte);
+ nocache_page(pte);
+// }
+ kunmap(page);
+
+ return page;
+}
+
+extern inline void pte_free(struct mm_struct *mm, struct page *page)
+{
+ __free_page(page);
+}
+
+/*
+ * In our implementation, each pgd entry contains 1 pmd that is never allocated
+ * or freed. pgd_present is always 1, so this should never be called. -NL
+ */
+#define pmd_free(mm, pmd) BUG()
+
+extern inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ free_page((unsigned long) pgd);
+}
+
+extern inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ pgd_t *new_pgd;
+
+ new_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_NOWARN);
+ if (!new_pgd)
+ return NULL;
+ memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE);
+ memset(new_pgd, 0, PAGE_OFFSET >> PGDIR_SHIFT);
+ return new_pgd;
+}
+
+#define pgd_populate(mm, pmd, pte) BUG()
+
+#endif /* M68K_CF_PGALLOC_H */
--- /dev/null
+/*
+ * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+#ifndef _CF_PGTABLE_H
+#define _CF_PGTABLE_H
+
+#include <asm/cfmmu.h>
+#include <asm/page.h>
+
+#ifndef __ASSEMBLY__
+#include <asm/virtconvert.h>
+#include <linux/linkage.h>
+
+/* For virtual address to physical address conversion */
+#define VTOP(addr) __pa(addr)
+#define PTOV(addr) __va(addr)
+
+
+#endif /* !__ASSEMBLY__ */
+
+/* Page protection values within PTE. */
+
+/* MMUDR bits, in proper place. */
+#define CF_PAGE_LOCKED (0x00000002)
+#define CF_PAGE_EXEC (0x00000004)
+#define CF_PAGE_WRITABLE (0x00000008)
+#define CF_PAGE_READABLE (0x00000010)
+#define CF_PAGE_SYSTEM (0x00000020)
+#define CF_PAGE_COPYBACK (0x00000040)
+#define CF_PAGE_NOCACHE (0x00000080)
+
+#define CF_CACHEMASK (~0x00000040)
+#define CF_PAGE_MMUDR_MASK (0x000000fe)
+
+#define _PAGE_NOCACHE030 (CF_PAGE_NOCACHE)
+
+/* MMUTR bits, need shifting down. */
+#define CF_PAGE_VALID (0x00000400)
+#define CF_PAGE_SHARED (0x00000800)
+
+#define CF_PAGE_MMUTR_MASK (0x00000c00)
+#define CF_PAGE_MMUTR_SHIFT (10)
+#define CF_ASID_MMU_SHIFT (2)
+
+/* Fake bits, not implemented in CF, will get masked out before
+ hitting hardware, and might go away altogether once this port is
+ complete. */
+#if PAGE_SHIFT < 13
+#error COLDFIRE Error: Pages must be at least 8k in size
+#endif
+#define CF_PAGE_ACCESSED (0x00001000)
+#define CF_PAGE_FILE (0x00000200)
+#define CF_PAGE_DIRTY (0x00000001)
+
+#define _PAGE_CACHE040 0x020 /* 68040 cache mode, cachable, copyback */
+#define _PAGE_NOCACHE_S 0x040 /* 68040 no-cache mode, serialized */
+#define _PAGE_NOCACHE 0x060 /* 68040 cache mode, non-serialized */
+#define _PAGE_CACHE040W 0x000 /* 68040 cache mode, cachable, write-through */
+#define _DESCTYPE_MASK 0x003
+#define _CACHEMASK040 (~0x060)
+#define _PAGE_GLOBAL040 0x400 /* 68040 global bit, used for kva descs */
+
+
+/* Externally used page protection values. */
+#define _PAGE_PRESENT (CF_PAGE_VALID)
+#define _PAGE_ACCESSED (CF_PAGE_ACCESSED)
+#define _PAGE_DIRTY (CF_PAGE_DIRTY)
+#define _PAGE_READWRITE (CF_PAGE_WRITABLE \
+ | CF_PAGE_READABLE \
+ | CF_PAGE_SHARED \
+ | CF_PAGE_SYSTEM)
+
+/* Compound page protection values. */
+#define PAGE_NONE __pgprot(CF_PAGE_VALID \
+ | CF_PAGE_ACCESSED)
+
+#define PAGE_SHARED __pgprot(CF_PAGE_VALID \
+ | CF_PAGE_READABLE \
+ | CF_PAGE_WRITABLE \
+ | CF_PAGE_ACCESSED)
+
+#define PAGE_INIT __pgprot(CF_PAGE_VALID \
+ | CF_PAGE_WRITABLE \
+ | CF_PAGE_READABLE \
+ | CF_PAGE_EXEC \
+ | CF_PAGE_SYSTEM \
+ | CF_PAGE_SHARED)
+
+#define PAGE_KERNEL __pgprot(CF_PAGE_VALID \
+ | CF_PAGE_WRITABLE \
+ | CF_PAGE_READABLE \
+ | CF_PAGE_EXEC \
+ | CF_PAGE_SYSTEM \
+ | CF_PAGE_SHARED \
+ | CF_PAGE_ACCESSED)
+
+#define PAGE_COPY __pgprot(CF_PAGE_VALID \
+ | CF_PAGE_ACCESSED \
+ | CF_PAGE_READABLE)
+
+/*
+ * Page protections for initialising protection_map. See mm/mmap.c
+ * for use. In general, the bit positions are xwr, and P-items are
+ * private, the S-items are shared.
+ */
+
+#define __P000 PAGE_NONE
+#define __P100 __pgprot(CF_PAGE_VALID \
+ | CF_PAGE_ACCESSED \
+ | CF_PAGE_EXEC)
+#define __P010 __pgprot(CF_PAGE_VALID \
+ | CF_PAGE_WRITABLE \
+ | CF_PAGE_ACCESSED)
+#define __P110 __pgprot(CF_PAGE_VALID \
+ | CF_PAGE_ACCESSED \
+ | CF_PAGE_WRITABLE \
+ | CF_PAGE_EXEC)
+#define __P001 __pgprot(CF_PAGE_VALID \
+ | CF_PAGE_ACCESSED \
+ | CF_PAGE_READABLE)
+#define __P101 __pgprot(CF_PAGE_VALID \
+ | CF_PAGE_ACCESSED \
+ | CF_PAGE_READABLE \
+ | CF_PAGE_EXEC)
+#define __P011 __pgprot(CF_PAGE_VALID \
+ | CF_PAGE_READABLE \
+ | CF_PAGE_WRITABLE \
+ | CF_PAGE_ACCESSED)
+#define __P111 __pgprot(CF_PAGE_VALID \
+ | CF_PAGE_ACCESSED \
+ | CF_PAGE_WRITABLE \
+ | CF_PAGE_READABLE \
+ | CF_PAGE_EXEC)
+
+#define __S000 PAGE_NONE
+#define __S100 __pgprot(CF_PAGE_VALID \
+ | CF_PAGE_ACCESSED \
+ | CF_PAGE_EXEC)
+#define __S010 PAGE_SHARED
+#define __S110 __pgprot(CF_PAGE_VALID \
+ | CF_PAGE_ACCESSED \
+ | CF_PAGE_WRITABLE \
+ | CF_PAGE_EXEC)
+#define __S001 __pgprot(CF_PAGE_VALID \
+ | CF_PAGE_ACCESSED \
+ | CF_PAGE_READABLE)
+#define __S101 __pgprot(CF_PAGE_VALID \
+ | CF_PAGE_ACCESSED \
+ | CF_PAGE_READABLE \
+ | CF_PAGE_EXEC)
+#define __S011 PAGE_SHARED
+#define __S111 __pgprot(CF_PAGE_VALID \
+ | CF_PAGE_ACCESSED \
+ | CF_PAGE_READABLE \
+ | CF_PAGE_WRITABLE \
+ | CF_PAGE_EXEC)
+
+#define PTE_MASK PAGE_MASK
+#define CF_PAGE_CHG_MASK (PTE_MASK | CF_PAGE_ACCESSED | CF_PAGE_DIRTY)
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+
+extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ pte_val(pte) = (pte_val(pte) & CF_PAGE_CHG_MASK) | pgprot_val(newprot);
+ return pte;
+}
+
+#define pmd_set(pmdp, ptep) do {} while (0)
+
+static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp)
+{
+ pgd_val(*pgdp) = virt_to_phys(pmdp);
+}
+
+#define __pte_page(pte) \
+ ((unsigned long) ((pte_val(pte) & CF_PAGE_PGNUM_MASK) + PAGE_OFFSET))
+#define __pmd_page(pmd) ((unsigned long) (pmd_val(pmd)))
+
+extern inline int pte_none(pte_t pte)
+{
+ return !pte_val(pte);
+}
+extern inline int pte_present(pte_t pte)
+{
+ return pte_val(pte) & CF_PAGE_VALID;
+}
+extern inline void pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ pte_val(*ptep) = 0;
+}
+
+#define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)
+#define pte_page(pte) virt_to_page(__pte_page(pte))
+
+extern inline int pmd_none2(pmd_t *pmd) { return !pmd_val(*pmd); }
+#define pmd_none(pmd) pmd_none2(&(pmd))
+extern inline int pmd_bad2(pmd_t *pmd) { return 0; }
+#define pmd_bad(pmd) pmd_bad2(&(pmd))
+#define pmd_present(pmd) (!pmd_none2(&(pmd)))
+extern inline void pmd_clear(pmd_t *pmdp) { pmd_val(*pmdp) = 0; }
+
+extern inline int pgd_none(pgd_t pgd) { return 0; }
+extern inline int pgd_bad(pgd_t pgd) { return 0; }
+extern inline int pgd_present(pgd_t pgd) { return 1; }
+extern inline void pgd_clear(pgd_t *pgdp) {}
+
+
+#define pte_ERROR(e) \
+ printk(KERN_ERR "%s:%d: bad pte %08lx.\n", \
+ __FILE__, __LINE__, pte_val(e))
+#define pmd_ERROR(e) \
+ printk(KERN_ERR "%s:%d: bad pmd %08lx.\n", \
+ __FILE__, __LINE__, pmd_val(e))
+#define pgd_ERROR(e) \
+ printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
+ __FILE__, __LINE__, pgd_val(e))
+
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not...
+ * [we have the full set here even if they don't change from m68k]
+ */
+extern inline int pte_read(pte_t pte) \
+ { return pte_val(pte) & CF_PAGE_READABLE; }
+extern inline int pte_write(pte_t pte) \
+ { return pte_val(pte) & CF_PAGE_WRITABLE; }
+extern inline int pte_exec(pte_t pte) \
+ { return pte_val(pte) & CF_PAGE_EXEC; }
+extern inline int pte_dirty(pte_t pte) \
+ { return pte_val(pte) & CF_PAGE_DIRTY; }
+extern inline int pte_young(pte_t pte) \
+ { return pte_val(pte) & CF_PAGE_ACCESSED; }
+extern inline int pte_file(pte_t pte) \
+ { return pte_val(pte) & CF_PAGE_FILE; }
+static inline int pte_special(pte_t pte) { return 0; }
+
+
+extern inline pte_t pte_wrprotect(pte_t pte) \
+ { pte_val(pte) &= ~CF_PAGE_WRITABLE; return pte; }
+extern inline pte_t pte_rdprotect(pte_t pte) \
+ { pte_val(pte) &= ~CF_PAGE_READABLE; return pte; }
+extern inline pte_t pte_exprotect(pte_t pte) \
+ { pte_val(pte) &= ~CF_PAGE_EXEC; return pte; }
+extern inline pte_t pte_mkclean(pte_t pte) \
+ { pte_val(pte) &= ~CF_PAGE_DIRTY; return pte; }
+extern inline pte_t pte_mkold(pte_t pte) \
+ { pte_val(pte) &= ~CF_PAGE_ACCESSED; return pte; }
+extern inline pte_t pte_mkwrite(pte_t pte) \
+ { pte_val(pte) |= CF_PAGE_WRITABLE; return pte; }
+extern inline pte_t pte_mkread(pte_t pte) \
+ { pte_val(pte) |= CF_PAGE_READABLE; return pte; }
+extern inline pte_t pte_mkexec(pte_t pte) \
+ { pte_val(pte) |= CF_PAGE_EXEC; return pte; }
+extern inline pte_t pte_mkdirty(pte_t pte) \
+ { pte_val(pte) |= CF_PAGE_DIRTY; return pte; }
+extern inline pte_t pte_mkyoung(pte_t pte) \
+ { pte_val(pte) |= CF_PAGE_ACCESSED; return pte; }
+extern inline pte_t pte_mknocache(pte_t pte) \
+ { pte_val(pte) |= 0x80 | (pte_val(pte) & ~0x40); return pte; }
+extern inline pte_t pte_mkcache(pte_t pte) \
+ { pte_val(pte) &= ~CF_PAGE_NOCACHE; return pte; }
+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+
+
+#define swapper_pg_dir kernel_pg_dir
+extern pgd_t kernel_pg_dir[PTRS_PER_PGD];
+
+/* Find an entry in a pagetable directory. */
+#define pgd_index(address) ((address) >> PGDIR_SHIFT)
+
+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
+
+/* Find an entry in a kernel pagetable directory. */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+/* Find an entry in the second-level pagetable. */
+extern inline pmd_t *pmd_offset(pgd_t *pgd, unsigned long address)
+{
+ return (pmd_t *) pgd;
+}
+
+/* Find an entry in the third-level pagetable. */
+#define __pte_offset(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir, address) ((pte_t *) __pmd_page(*(dir)) + \
+ __pte_offset(address))
+
+/* Disable caching for page at given kernel virtual address. */
+static inline void nocache_page(void *vaddr)
+{
+ pgd_t *dir;
+ pmd_t *pmdp;
+ pte_t *ptep;
+ unsigned long addr = (unsigned long)vaddr;
+
+ dir = pgd_offset_k(addr);
+ pmdp = pmd_offset(dir, addr);
+ ptep = pte_offset_kernel(pmdp, addr);
+ *ptep = pte_mknocache(*ptep);
+}
+
+/* Enable caching for page at given kernel virtual address. */
+static inline void cache_page(void *vaddr)
+{
+ pgd_t *dir;
+ pmd_t *pmdp;
+ pte_t *ptep;
+ unsigned long addr = (unsigned long)vaddr;
+
+ dir = pgd_offset_k(addr);
+ pmdp = pmd_offset(dir, addr);
+ ptep = pte_offset_kernel(pmdp, addr);
+ *ptep = pte_mkcache(*ptep);
+}
+
+#define PTE_FILE_MAX_BITS 21
+#define PTE_FILE_SHIFT 11
+
+static inline unsigned long pte_to_pgoff(pte_t pte)
+{
+ return pte_val(pte) >> PTE_FILE_SHIFT;
+}
+
+static inline pte_t pgoff_to_pte(unsigned pgoff)
+{
+ pte_t pte = __pte((pgoff << PTE_FILE_SHIFT) + CF_PAGE_FILE);
+ return pte;
+}
+
+/* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */
+#define __swp_entry(type, offset) ((swp_entry_t) { (type) | \
+ (offset << PTE_FILE_SHIFT) })
+#define __swp_type(x) ((x).val & 0xFF)
+#define __swp_offset(x) ((x).val >> PTE_FILE_SHIFT)
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) (__pte((x).val))
+
+#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
+
+#define pte_offset_map(pmdp, address) ((pte_t *)__pmd_page(*pmdp) + \
+ __pte_offset(address))
+#define pte_offset_map_nested(pmdp, address) pte_offset_map(pmdp, address)
+#define pte_unmap(pte) kunmap(pte)
+#define pte_unmap_nested(pte) kunmap(pte)
+
+#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
+
+
+#endif /* !__ASSEMBLY__ */
+#endif /* !_CF_PGTABLE_H */
--- /dev/null
+/*
+ * linux/include/asm-m68k/cf_raw_io.h
+ *
+ * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * 09/30/08 JKM: split Coldfire pieces into separate file
+ */
+#ifndef __CF_RAW_IO__
+#define __CF_RAW_IO__
+
+#ifdef __KERNEL__
+
+#include <asm/types.h>
+
+/* Values for nocacheflag and cmode */
+#define IOMAP_FULL_CACHING 0
+#define IOMAP_NOCACHE_SER 1
+#define IOMAP_NOCACHE_NONSER 2
+#define IOMAP_WRITETHROUGH 3
+
+extern void iounmap(void __iomem *addr);
+
+extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size,
+ int cacheflag);
+extern void __iounmap(void *addr, unsigned long size);
+
+
+/* ++roman: The assignments to temp. vars avoid that gcc sometimes generates
+ * two accesses to memory, which may be undesirable for some devices.
+ */
+#define in_8(addr) \
+ ({ u8 __v = (*(__force volatile u8 *) (addr)); __v; })
+#define in_be16(addr) \
+ ({ u16 __v = (*(__force volatile u16 *) (addr)); __v; })
+#define in_be32(addr) \
+ ({ u32 __v = (*(__force volatile u32 *) (addr)); __v; })
+#define in_le16(addr) \
+ ({ u16 __v = le16_to_cpu(*(__force volatile __le16 *) (addr)); __v; })
+#define in_le32(addr) \
+ ({ u32 __v = le32_to_cpu(*(__force volatile __le32 *) (addr)); __v; })
+
+#define out_8(addr,b) (void)((*(__force volatile u8 *) (addr)) = (b))
+#define out_be16(addr,w) (void)((*(__force volatile u16 *) (addr)) = (w))
+#define out_be32(addr,l) (void)((*(__force volatile u32 *) (addr)) = (l))
+#define out_le16(addr,w) (void)((*(__force volatile __le16 *) (addr)) = cpu_to_le16(w))
+#define out_le32(addr,l) (void)((*(__force volatile __le32 *) (addr)) = cpu_to_le32(l))
+
+
+#ifdef CONFIG_PCI
+/* pci */
+unsigned char pci_inb(long addr);
+unsigned short pci_inw(long addr);
+unsigned long pci_inl(long addr);
+
+void pci_outb(unsigned char val, long addr);
+void pci_outw(unsigned short val, long addr);
+void pci_outl(unsigned long val, long addr);
+
+void pci_insb(volatile unsigned char *addr,
+ unsigned char *buf, int len);
+void pci_insw(volatile unsigned short *addr,
+ unsigned short *buf, int len);
+void pci_insl(volatile unsigned long *addr,
+ unsigned long *buf, int len);
+
+void pci_outsb(volatile unsigned char *addr,
+ const unsigned char *buf, int len);
+void pci_outsw(volatile unsigned short *addr,
+ const unsigned short *buf, int len);
+void pci_outsl(volatile unsigned long *addr,
+ const unsigned long *buf, int len);
+
+unsigned short pci_raw_inw(long addr);
+unsigned long pci_raw_inl(long addr);
+void pci_raw_outw(unsigned short val, long addr);
+void pci_raw_outl(unsigned long val, long addr);
+
+#define raw_inb(port) pci_inb((long)((volatile unsigned char *)(port)))
+#define raw_inw(port) pci_raw_inw((long)((volatile unsigned short *)(port)))
+#define raw_inl(port) pci_raw_inl((long)((volatile unsigned long *)(port)))
+
+#define raw_outb(val, port) \
+ pci_outb((val), (long)((volatile unsigned char *)(port)))
+#define raw_outw(val, port) \
+ pci_raw_outw((val), (long)((volatile unsigned short *)(port)))
+#define raw_outl(val, port) \
+ pci_raw_outl((val), (long)((volatile unsigned long *)(port)))
+
+#define swap_inw(port) pci_inw((long)((volatile unsigned short *)(port)))
+#define swap_outw(val, port) \
+ pci_outw((val), (long)((volatile unsigned short *)(port)))
+
+#else
+/* non-pci */
+#define raw_inb in_8
+#define raw_inw in_be16
+#define raw_inl in_be32
+
+#define raw_outb(val,port) out_8((port),(val))
+#define raw_outw(val,port) out_be16((port),(val))
+#define raw_outl(val,port) out_be32((port),(val))
+
+#define swap_inw(port) in_le16((port))
+#define swap_outw(val,port) out_le16((port),(val))
+#endif
+
+static inline void raw_insb(volatile u8 __iomem *port, u8 *buf, unsigned int len)
+{
+ unsigned int i;
+
+ for (i = 0; i < len; i++)
+ *buf++ = in_8(port);
+}
+
+static inline void raw_outsb(volatile u8 __iomem *port, const u8 *buf,
+ unsigned int len)
+{
+ unsigned int i;
+
+ for (i = 0; i < len; i++)
+ out_8(port, *buf++);
+}
+
+static inline void raw_insw(volatile u16 *port, u16 *buf, unsigned int nr)
+{
+ unsigned int i;
+
+ for (i = 0; i < nr; i++)
+ *buf++ = raw_inw(port);
+}
+
+static inline void raw_outsw(volatile u16 *port, const u16 *buf,
+ unsigned int nr)
+{
+ unsigned int i;
+
+ for (i = 0; i < nr; i++, buf++)
+ raw_outw(*buf, port);
+}
+
+static inline void raw_insl(volatile u32 *port, u32 *buf, unsigned int nr)
+{
+ unsigned int i;
+
+ for (i = 0; i < nr; i++)
+ *buf++ = raw_inl(port);
+}
+
+static inline void raw_outsl(volatile u32 *port, const u32 *buf,
+ unsigned int nr)
+{
+ unsigned int i;
+
+ for (i = 0; i < nr; i++, buf++)
+ raw_outl(*buf, port);
+}
+
+static inline void raw_insw_swapw(volatile u16 *port, u16 *buf,
+ unsigned int nr)
+{
+#ifdef UNDEF
+ unsigned int i;
+
+ for (i = 0; i < nr; i++)
+ *buf++ = in_le16(port);
+#endif
+}
+
+static inline void raw_outsw_swapw(volatile u16 __iomem *port, const u16 *buf,
+ unsigned int nr)
+{
+#ifdef UNDEF
+ unsigned int i;
+
+ for (i = 0; i < nr; i++, buf++)
+ out_le16(port, *buf);
+#endif
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* __CF_RAW_IO__ */
--- /dev/null
+/*
+ * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+#ifndef M68K_CF_TLBFLUSH_H
+#define M68K_CF_TLBFLUSH_H
+
+#include <asm/coldfire.h>
+
+/* Flush all userspace mappings. */
+static inline void flush_tlb_all(void)
+{
+ preempt_disable();
+ *MMUOR = MMUOR_CNL;
+ preempt_enable();
+}
+
+/* Clear user TLB entries within the context named in mm */
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+ preempt_disable();
+ *MMUOR = MMUOR_CNL;
+ preempt_enable();
+}
+
+/* Flush a single TLB page. */
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ preempt_disable();
+ *MMUOR = MMUOR_CNL;
+ preempt_enable();
+}
+/* Flush a range of pages from TLB. */
+
+static inline void flush_tlb_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ preempt_disable();
+ *MMUOR = MMUOR_CNL;
+ preempt_enable();
+}
+
+/* Flush kernel page from TLB. */
+static inline void flush_tlb_kernel_page(void *addr)
+{
+ preempt_disable();
+ *MMUOR = MMUOR_CNL;
+ preempt_enable();
+}
+
+static inline void flush_tlb_kernel_range(unsigned long start,
+ unsigned long end)
+{
+ flush_tlb_all();
+}
+
+extern inline void flush_tlb_pgtables(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+}
+
+#endif /* M68K_CF_TLBFLUSH_H */
--- /dev/null
+/*
+ * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+#ifndef __M68K_CF_UACCESS_H
+#define __M68K_CF_UACCESS_H
+
+/*
+ * User space memory access functions
+ */
+
+/* The "moves" command is not available in the CF instruction set. */
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <asm/segment.h>
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+/* We let the MMU do all checking */
+#define access_ok(type, addr, size) 1
+
+/*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue. No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path. This means when everything is well,
+ * we don't even have to jump over them. Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+
+struct exception_table_entry
+{
+ unsigned long insn, fixup;
+};
+
+extern int __put_user_bad(void);
+extern int __get_user_bad(void);
+
+#define __put_user_asm(res, x, ptr, bwl, reg, err) \
+asm volatile ("\n" \
+ "1: move."#bwl" %2,%1\n" \
+ "2:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .even\n" \
+ "10: moveq.l %3,%0\n" \
+ " jra 2b\n" \
+ " .previous\n" \
+ "\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b,10b\n" \
+ " .long 2b,10b\n" \
+ " .previous" \
+ : "+d" (res), "=m" (*(ptr)) \
+ : #reg (x), "i" (err))
+
+/*
+ * These are the main single-value transfer routines. They automatically
+ * use the right size if we just have the right pointer type.
+ */
+
+#define __put_user(x, ptr) \
+({ \
+ typeof(*(ptr)) __pu_val = (x); \
+ int __pu_err = 0; \
+ __chk_user_ptr(ptr); \
+ switch (sizeof (*(ptr))) { \
+ case 1: \
+ __put_user_asm(__pu_err, __pu_val, ptr, b, d, -EFAULT); \
+ break; \
+ case 2: \
+ __put_user_asm(__pu_err, __pu_val, ptr, w, d, -EFAULT); \
+ break; \
+ case 4: \
+ __put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT); \
+ break; \
+ case 8: \
+ { \
+ const void __user *__pu_ptr = (ptr); \
+ asm volatile ("\n" \
+ "1: move.l %2,(%1)+\n" \
+ "2: move.l %R2,(%1)\n" \
+ "3:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .even\n" \
+ "10: movel %3,%0\n" \
+ " jra 3b\n" \
+ " .previous\n" \
+ "\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b,10b\n" \
+ " .long 2b,10b\n" \
+ " .long 3b,10b\n" \
+ " .previous" \
+ : "+d" (__pu_err), "+a" (__pu_ptr) \
+ : "r" (__pu_val), "i" (-EFAULT) \
+ : "memory"); \
+ break; \
+ } \
+ default: \
+ __pu_err = __put_user_bad(); \
+ break; \
+ } \
+ __pu_err; \
+})
+#define put_user(x, ptr) __put_user(x, ptr)
+
+
+#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \
+ type __gu_val; \
+ asm volatile ("\n" \
+ "1: move."#bwl" %2,%1\n" \
+ "2:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .even\n" \
+ "10: move.l %3,%0\n" \
+ " subl %1,%1\n" \
+ " jra 2b\n" \
+ " .previous\n" \
+ "\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b,10b\n" \
+ " .previous" \
+ : "+d" (res), "=&" #reg (__gu_val) \
+ : "m" (*(ptr)), "i" (err)); \
+ (x) = (typeof(*(ptr)))(unsigned long)__gu_val; \
+})
+
+#define __get_user(x, ptr) \
+({ \
+ int __gu_err = 0; \
+ __chk_user_ptr(ptr); \
+ switch (sizeof(*(ptr))) { \
+ case 1: \
+ __get_user_asm(__gu_err, x, ptr, u8, b, d, -EFAULT); \
+ break; \
+ case 2: \
+ __get_user_asm(__gu_err, x, ptr, u16, w, d, -EFAULT); \
+ break; \
+ case 4: \
+ __get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT); \
+ break; \
+/* case 8: disabled because gcc-4.1 has a broken typeof \
+ { \
+ const void *__gu_ptr = (ptr); \
+ u64 __gu_val; \
+ asm volatile ("\n" \
+ "1: move.l (%2)+,%1\n" \
+ "2: move.l (%2),%R1\n" \
+ "3:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .even\n" \
+ "10: move.l %3,%0\n" \
+ " subl %1,%1\n" \
+ " subl %R1,%R1\n" \
+ " jra 3b\n" \
+ " .previous\n" \
+ "\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b,10b\n" \
+ " .long 2b,10b\n" \
+ " .previous" \
+ : "+d" (__gu_err), "=&r" (__gu_val), \
+ "+a" (__gu_ptr) \
+ : "i" (-EFAULT) \
+ : "memory"); \
+ (x) = (typeof(*(ptr)))__gu_val; \
+ break; \
+ } */ \
+ default: \
+ __gu_err = __get_user_bad(); \
+ break; \
+ } \
+ __gu_err; \
+})
+#define get_user(x, ptr) __get_user(x, ptr)
+
+unsigned long __generic_copy_from_user(void *to, const void __user *from,
+ unsigned long n);
+unsigned long __generic_copy_to_user(void __user *to, const void *from,
+ unsigned long n);
+
+#define __constant_copy_from_user_asm(res, to, from, tmp, n, s1, s2, s3)\
+ asm volatile ("\n" \
+ "1: move."#s1" (%2)+,%3\n" \
+ " move."#s1" %3,(%1)+\n" \
+ "2: move."#s2" (%2)+,%3\n" \
+ " move."#s2" %3,(%1)+\n" \
+ " .ifnc \""#s3"\",\"\"\n" \
+ "3: move."#s3" (%2)+,%3\n" \
+ " move."#s3" %3,(%1)+\n" \
+ " .endif\n" \
+ "4:\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b,10f\n" \
+ " .long 2b,20f\n" \
+ " .ifnc \""#s3"\",\"\"\n" \
+ " .long 3b,30f\n" \
+ " .endif\n" \
+ " .previous\n" \
+ "\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .even\n" \
+ "10: clr."#s1" (%1)+\n" \
+ "20: clr."#s2" (%1)+\n" \
+ " .ifnc \""#s3"\",\"\"\n" \
+ "30: clr."#s3" (%1)+\n" \
+ " .endif\n" \
+ " moveq.l #"#n",%0\n" \
+ " jra 4b\n" \
+ " .previous\n" \
+ : "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp) \
+ : : "memory")
+
+static __always_inline unsigned long
+__constant_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ unsigned long res = 0, tmp;
+
+ switch (n) {
+ case 1:
+ __get_user_asm(res, *(u8 *)to, (u8 __user *)from, u8, b, d, 1);
+ break;
+ case 2:
+ __get_user_asm(res, *(u16 *)to, (u16 __user *)from, u16, w,
+ d, 2);
+ break;
+ case 3:
+ __constant_copy_from_user_asm(res, to, from, tmp, 3, w, b,);
+ break;
+ case 4:
+ __get_user_asm(res, *(u32 *)to, (u32 __user *)from, u32, l,
+ r, 4);
+ break;
+ case 5:
+ __constant_copy_from_user_asm(res, to, from, tmp, 5, l, b,);
+ break;
+ case 6:
+ __constant_copy_from_user_asm(res, to, from, tmp, 6, l, w,);
+ break;
+ case 7:
+ __constant_copy_from_user_asm(res, to, from, tmp, 7, l, w, b);
+ break;
+ case 8:
+ __constant_copy_from_user_asm(res, to, from, tmp, 8, l, l,);
+ break;
+ case 9:
+ __constant_copy_from_user_asm(res, to, from, tmp, 9, l, l, b);
+ break;
+ case 10:
+ __constant_copy_from_user_asm(res, to, from, tmp, 10, l, l, w);
+ break;
+ case 12:
+ __constant_copy_from_user_asm(res, to, from, tmp, 12, l, l, l);
+ break;
+ default:
+ /* we limit the inlined version to 3 moves */
+ return __generic_copy_from_user(to, from, n);
+ }
+
+ return res;
+}
+
+#define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \
+ asm volatile ("\n" \
+ " move."#s1" (%2)+,%3\n" \
+ "11: move."#s1" %3,(%1)+\n" \
+ "12: move."#s2" (%2)+,%3\n" \
+ "21: move."#s2" %3,(%1)+\n" \
+ "22:\n" \
+ " .ifnc \""#s3"\",\"\"\n" \
+ " move."#s3" (%2)+,%3\n" \
+ "31: move."#s3" %3,(%1)+\n" \
+ "32:\n" \
+ " .endif\n" \
+ "4:\n" \
+ "\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 11b,5f\n" \
+ " .long 12b,5f\n" \
+ " .long 21b,5f\n" \
+ " .long 22b,5f\n" \
+ " .ifnc \""#s3"\",\"\"\n" \
+ " .long 31b,5f\n" \
+ " .long 32b,5f\n" \
+ " .endif\n" \
+ " .previous\n" \
+ "\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .even\n" \
+ "5: moveq.l #"#n",%0\n" \
+ " jra 4b\n" \
+ " .previous\n" \
+ : "+d" (res), "+a" (to), "+a" (from), "=&d" (tmp) \
+ : : "memory")
+
+static __always_inline unsigned long
+__constant_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ unsigned long res = 0, tmp;
+
+ switch (n) {
+ case 1:
+ __put_user_asm(res, *(u8 *)from, (u8 __user *)to, b, d, 1);
+ break;
+ case 2:
+ __put_user_asm(res, *(u16 *)from, (u16 __user *)to, w, d, 2);
+ break;
+ case 3:
+ __constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,);
+ break;
+ case 4:
+ __put_user_asm(res, *(u32 *)from, (u32 __user *)to, l, r, 4);
+ break;
+ case 5:
+ __constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,);
+ break;
+ case 6:
+ __constant_copy_to_user_asm(res, to, from, tmp, 6, l, w,);
+ break;
+ case 7:
+ __constant_copy_to_user_asm(res, to, from, tmp, 7, l, w, b);
+ break;
+ case 8:
+ __constant_copy_to_user_asm(res, to, from, tmp, 8, l, l,);
+ break;
+ case 9:
+ __constant_copy_to_user_asm(res, to, from, tmp, 9, l, l, b);
+ break;
+ case 10:
+ __constant_copy_to_user_asm(res, to, from, tmp, 10, l, l, w);
+ break;
+ case 12:
+ __constant_copy_to_user_asm(res, to, from, tmp, 12, l, l, l);
+ break;
+ default:
+ /* limit the inlined version to 3 moves */
+ return __generic_copy_to_user(to, from, n);
+ }
+
+ return res;
+}
+
+#define __copy_from_user(to, from, n) \
+(__builtin_constant_p(n) ? \
+ __constant_copy_from_user(to, from, n) : \
+ __generic_copy_from_user(to, from, n))
+
+#define __copy_to_user(to, from, n) \
+(__builtin_constant_p(n) ? \
+ __constant_copy_to_user(to, from, n) : \
+ __generic_copy_to_user(to, from, n))
+
+#define __copy_to_user_inatomic __copy_to_user
+#define __copy_from_user_inatomic __copy_from_user
+
+#define copy_from_user(to, from, n) __copy_from_user(to, from, n)
+#define copy_to_user(to, from, n) __copy_to_user(to, from, n)
+
+long strncpy_from_user(char *dst, const char __user *src, long count);
+long strnlen_user(const char __user *src, long n);
+unsigned long __clear_user(void __user *to, unsigned long n);
+
+#define clear_user __clear_user
+
+#define strlen_user(str) strnlen_user(str, 32767)
+
+#endif /* _M68K_CF_UACCESS_H */
--- /dev/null
+/*
+ * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+#ifndef __CF_VIRTCONVERT__
+#define __CF_VIRTCONVERT__
+
+/*
+ * Macros used for converting between virtual and physical mappings.
+ *
+ * Coldfire Specific
+ */
+
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+#include <linux/mmzone.h>
+#include <asm/setup.h>
+#include <asm/page.h>
+
+/*
+ * Change virtual addresses to physical addresses and vv.
+ */
+static inline unsigned long virt_to_phys(void *address)
+{
+ return __pa(address);
+}
+
+static inline void *phys_to_virt(unsigned long address)
+{
+ return __va(address);
+}
+
+/* Permanent address of a page. */
+#ifdef CONFIG_SINGLE_MEMORY_CHUNK
+#define page_to_phys(page) \
+ __pa(PAGE_OFFSET + (((page) - pg_data_map[0].node_mem_map) << PAGE_SHIFT))
+#else
+#define page_to_phys(_page) ({ \
+ struct page *__page = _page; \
+ struct pglist_data *pgdat; \
+ pgdat = pg_data_table[page_to_nid(__page)]; \
+ page_to_pfn(__page) << PAGE_SHIFT; \
+})
+#endif
+
+/*
+ * IO bus memory addresses are 1:1 with the physical address,
+ */
+#ifdef CONFIG_PCI
+#define virt_to_bus(a) (a + PCI_DMA_BASE)
+#define bus_to_virt(a) (a - PCI_DMA_BASE)
+#else
+#define virt_to_bus(a) (a)
+#define bus_to_virt(a) (a)
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* __CF_VIRTCONVERT__ */
--- /dev/null
+/*
+ * include/asm-m68k/cfcache.h - Coldfire Cache Controller
+ *
+ * Kurt Mahan kmahan@freescale.com
+ *
+ * Copyright Freescale Semiconductor, Inc. 2007
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#ifndef CF_CFCACHE_H
+#define CF_CFCACHE_H
+
+/*
+ * CACR Cache Control Register
+ */
+#define CF_CACR_DEC (0x80000000) /* Data Cache Enable */
+#define CF_CACR_DW (0x40000000) /* Data default Write-protect */
+#define CF_CACR_DESB (0x20000000) /* Data Enable Store Buffer */
+#define CF_CACR_DPI (0x10000000) /* Data Disable CPUSHL Invalidate */
+#define CF_CACR_DHLCK (0x08000000) /* 1/2 Data Cache Lock Mode */
+#define CF_CACR_DDCM_00 (0x00000000) /* Cacheable writethrough imprecise */
+#define CF_CACR_DDCM_01 (0x02000000) /* Cacheable copyback */
+#define CF_CACR_DDCM_10 (0x04000000) /* Noncacheable precise */
+#define CF_CACR_DDCM_11 (0x06000000) /* Noncacheable imprecise */
+#define CF_CACR_DCINVA (0x01000000) /* Data Cache Invalidate All */
+#define CF_CACR_DDSP (0x00800000) /* Data default supervisor-protect */
+#define CF_CACR_IVO (0x00100000) /* Invalidate only */
+#define CF_CACR_BEC (0x00080000) /* Branch Cache Enable */
+#define CF_CACR_BCINVA (0x00040000) /* Branch Cache Invalidate All */
+#define CF_CACR_IEC (0x00008000) /* Instruction Cache Enable */
+#define CF_CACR_SPA (0x00004000) /* Search by Physical Address */
+#define CF_CACR_DNFB (0x00002000) /* Default cache-inhibited fill buf */
+#define CF_CACR_IDPI (0x00001000) /* Instr Disable CPUSHL Invalidate */
+#define CF_CACR_IHLCK (0x00000800) /* 1/2 Instruction Cache Lock Mode */
+#define CF_CACR_IDCM (0x00000400) /* Noncacheable Instr default mode */
+#define CF_CACR_ICINVA (0x00000100) /* Instr Cache Invalidate All */
+#define CF_CACR_IDSP (0x00000080) /* Ins default supervisor-protect */
+#define CF_CACR_EUSP (0x00000020) /* Switch stacks in user mode */
+
+#ifdef CONFIG_M5445X
+/*
+ * M5445x Cache Configuration
+ * - cache line size is 16 bytes
+ * - cache is 4-way set associative
+ * - each cache has 256 sets (64k / 16bytes / 4way)
+ * - I-Cache size is 16KB
+ * - D-Cache size is 16KB
+ */
+#define ICACHE_SIZE 0x4000 /* instruction - 16k */
+#define DCACHE_SIZE 0x4000 /* data - 16k */
+
+#define CACHE_LINE_SIZE 0x0010 /* 16 bytes */
+#define CACHE_SETS 0x0100 /* 256 sets */
+#define CACHE_WAYS 0x0004 /* 4 way */
+
+#define CACHE_DISABLE_MODE (CF_CACR_DCINVA+ \
+ CF_CACR_BCINVA+ \
+ CF_CACR_ICINVA)
+
+#ifndef CONFIG_M5445X_DISABLE_CACHE
+#define CACHE_INITIAL_MODE (CF_CACR_DEC+ \
+ CF_CACR_BEC+ \
+ CF_CACR_IEC+ \
+ CF_CACR_DESB+ \
+ CF_CACR_EUSP)
+#else
+/* cache disabled for testing */
+#define CACHE_INITIAL_MODE (CF_CACR_EUSP)
+#endif /* CONFIG_M5445X_DISABLE_CACHE */
+
+#elif defined(CONFIG_M547X_8X)
+/*
+ * * M547x/M548x Cache Configuration
+ * * - cache line size is 16 bytes
+ * * - cache is 4-way set associative
+ * * - each cache has 512 sets (128k / 16bytes / 4way)
+ * * - I-Cache size is 32KB
+ * * - D-Cache size is 32KB
+ * */
+#define ICACHE_SIZE 0x8000 /* instruction - 32k */
+#define DCACHE_SIZE 0x8000 /* data - 32k */
+
+#define CACHE_LINE_SIZE 0x0010 /* 16 bytes */
+#define CACHE_SETS 0x0200 /* 512 sets */
+#define CACHE_WAYS 0x0004 /* 4 way */
+
+/* in for the old cpushl caching code */
+#define _DCACHE_SET_MASK ((DCACHE_SIZE/64-1)<<CACHE_WAYS)
+#define _ICACHE_SET_MASK ((ICACHE_SIZE/64-1)<<CACHE_WAYS)
+#define LAST_DCACHE_ADDR _DCACHE_SET_MASK
+#define LAST_ICACHE_ADDR _ICACHE_SET_MASK
+
+#define CACHE_DISABLE_MODE (CF_CACR_DCINVA+ \
+ CF_CACR_BCINVA+ \
+ CF_CACR_ICINVA)
+
+#define CACHE_INITIAL_MODE (CF_CACR_DEC+ \
+ CF_CACR_BEC+ \
+ CF_CACR_IEC+ \
+ CF_CACR_DESB+ \
+ CF_CACR_EUSP)
+#endif /* CONFIG_M547X_8X */
+
+#ifndef __ASSEMBLY__
+
+extern unsigned long shadow_cacr;
+extern void cacr_set(unsigned long x);
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* CF_CACHE_H */
--- /dev/null
+/*
+ * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Definitions for Coldfire V4e MMU
+ */
+#include <asm/movs.h>
+
+#ifndef __CF_MMU_H__
+#define __CF_MMU_H__
+
+
+#define MMU_BASE 0xE8000000
+
+
+#define MMUCR (MMU_BASE+0x00)
+#define MMUCR_ASMN 1
+#define MMUCR_ASM (1<<MMUCR_ASMN)
+#define MMUCR_ENN 0
+#define MMUCR_EN (1<<MMUCR_ENN)
+
+#define MMUOR REG16(MMU_BASE+0x04+0x02)
+#define MMUOR_AAN 16
+#define MMUOR_AA (0xffff<<MMUOR_AAN)
+#define MMUOR_STLBN 8
+#define MMUOR_STLB (1<<MMUOR_STLBN)
+#define MMUOR_CAN 7
+#define MMUOR_CA (1<<MMUOR_CAN)
+#define MMUOR_CNLN 6
+#define MMUOR_CNL (1<<MMUOR_CNLN)
+#define MMUOR_CASN 5
+#define MMUOR_CAS (1<<MMUOR_CASN)
+#define MMUOR_ITLBN 4
+#define MMUOR_ITLB (1<<MMUOR_ITLBN)
+#define MMUOR_ADRN 3
+#define MMUOR_ADR (1<<MMUOR_ADRN)
+#define MMUOR_RWN 2
+#define MMUOR_RW (1<<MMUOR_RWN)
+#define MMUOR_ACCN 1
+#define MMUOR_ACC (1<<MMUOR_ACCN)
+#define MMUOR_UAAN 0
+#define MMUOR_UAA (1<<MMUOR_UAAN)
+
+#define MMUSR REG32(MMU_BASE+0x08)
+#define MMUSR_SPFN 5
+#define MMUSR_SPF (1<<MMUSR_SPFN)
+#define MMUSR_RFN 4
+#define MMUSR_RF (1<<MMUSR_RFN)
+#define MMUSR_WFN 3
+#define MMUSR_WF (1<<MMUSR_WFN)
+#define MMUSR_HITN 1
+#define MMUSR_HIT (1<<MMUSR_HITN)
+
+#define MMUAR REG32(MMU_BASE+0x10)
+#define MMUAR_VPN 1
+#define MMUAR_VP (0xfffffffe)
+#define MMUAR_SN 0
+#define MMUAR_S (1<<MMUAR_SN)
+
+#define MMUTR REG32(MMU_BASE+0x14)
+#define MMUTR_VAN 10
+#define MMUTR_VA (0xfffffc00)
+#define MMUTR_IDN 2
+#define MMUTR_ID (0xff<<MMUTR_IDN)
+#define MMUTR_SGN 1
+#define MMUTR_SG (1<<MMUTR_SGN)
+#define MMUTR_VN 0
+#define MMUTR_V (1<<MMUTR_VN)
+
+#define MMUDR REG32(MMU_BASE+0x18)
+#define MMUDR_PAN 10
+#define MMUDR_PA (0xfffffc00)
+#define MMUDR_SZN 8
+#define MMUDR_SZ_MASK (0x2<<MMUDR_SZN)
+#define MMUDR_SZ1M (0<<MMUDR_SZN)
+#define MMUDR_SZ4K (1<<MMUDR_SZN)
+#define MMUDR_SZ8K (2<<MMUDR_SZN)
+#define MMUDR_SZ16M (3<<MMUDR_SZN)
+#define MMUDR_CMN 6
+#define MMUDR_INC (2<<MMUDR_CMN)
+#define MMUDR_IC (0<<MMUDR_CMN)
+#define MMUDR_DWT (0<<MMUDR_CMN)
+#define MMUDR_DCB (1<<MMUDR_CMN)
+#define MMUDR_DNCP (2<<MMUDR_CMN)
+#define MMUDR_DNCIP (3<<MMUDR_CMN)
+#define MMUDR_SPN 5
+#define MMUDR_SP (1<<MMUDR_SPN)
+#define MMUDR_RN 4
+#define MMUDR_R (1<<MMUDR_RN)
+#define MMUDR_WN 3
+#define MMUDR_W (1<<MMUDR_WN)
+#define MMUDR_XN 2
+#define MMUDR_X (1<<MMUDR_XN)
+#define MMUDR_LKN 1
+#define MMUDR_LK (1<<MMUDR_LKN)
+
+
+#ifndef __ASSEMBLY__
+#define CF_PMEGS_NUM 256
+#define CF_INVALID_CONTEXT 255
+#define CF_PAGE_PGNUM_MASK (PAGE_MASK)
+
+extern int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb,
+ int extension_word);
+#endif /* __ASSEMBLY__*/
+
+#endif /* !__CF_MMU_H__ */
--- /dev/null
+/*
+ * m5485dma.h -- ColdFire 547x/548x DMA controller support.
+ * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+#ifndef __MCF548X_DMA_H__
+#define __MCF548X_DMA_H__
+
+
+/* Register read/write macros */
+#define MCF_DMA_DIPR MCF_REG32(0x008014)
+#define MCF_DMA_DIMR MCF_REG32(0x008018)
+#define MCF_DMA_IMCR MCF_REG32(0x00805C)
+
+/* Bit definitions and macros for MCF_DMA_DIPR */
+#define MCF_DMA_DIPR_TASK0 (0x00000001)
+#define MCF_DMA_DIPR_TASK1 (0x00000002)
+#define MCF_DMA_DIPR_TASK2 (0x00000004)
+#define MCF_DMA_DIPR_TASK3 (0x00000008)
+#define MCF_DMA_DIPR_TASK4 (0x00000010)
+#define MCF_DMA_DIPR_TASK5 (0x00000020)
+#define MCF_DMA_DIPR_TASK6 (0x00000040)
+#define MCF_DMA_DIPR_TASK7 (0x00000080)
+#define MCF_DMA_DIPR_TASK8 (0x00000100)
+#define MCF_DMA_DIPR_TASK9 (0x00000200)
+#define MCF_DMA_DIPR_TASK10 (0x00000400)
+#define MCF_DMA_DIPR_TASK11 (0x00000800)
+#define MCF_DMA_DIPR_TASK12 (0x00001000)
+#define MCF_DMA_DIPR_TASK13 (0x00002000)
+#define MCF_DMA_DIPR_TASK14 (0x00004000)
+#define MCF_DMA_DIPR_TASK15 (0x00008000)
+
+/* Bit definitions and macros for MCF_DMA_DIMR */
+#define MCF_DMA_DIMR_TASK0 (0x00000001)
+#define MCF_DMA_DIMR_TASK1 (0x00000002)
+#define MCF_DMA_DIMR_TASK2 (0x00000004)
+#define MCF_DMA_DIMR_TASK3 (0x00000008)
+#define MCF_DMA_DIMR_TASK4 (0x00000010)
+#define MCF_DMA_DIMR_TASK5 (0x00000020)
+#define MCF_DMA_DIMR_TASK6 (0x00000040)
+#define MCF_DMA_DIMR_TASK7 (0x00000080)
+#define MCF_DMA_DIMR_TASK8 (0x00000100)
+#define MCF_DMA_DIMR_TASK9 (0x00000200)
+#define MCF_DMA_DIMR_TASK10 (0x00000400)
+#define MCF_DMA_DIMR_TASK11 (0x00000800)
+#define MCF_DMA_DIMR_TASK12 (0x00001000)
+#define MCF_DMA_DIMR_TASK13 (0x00002000)
+#define MCF_DMA_DIMR_TASK14 (0x00004000)
+#define MCF_DMA_DIMR_TASK15 (0x00008000)
+
+/* Bit definitions and macros for MCF_DMA_IMCR */
+#define MCF_DMA_IMCR_SRC16(x) (((x)&0x00000003)<<0)
+#define MCF_DMA_IMCR_SRC17(x) (((x)&0x00000003)<<2)
+#define MCF_DMA_IMCR_SRC18(x) (((x)&0x00000003)<<4)
+#define MCF_DMA_IMCR_SRC19(x) (((x)&0x00000003)<<6)
+#define MCF_DMA_IMCR_SRC20(x) (((x)&0x00000003)<<8)
+#define MCF_DMA_IMCR_SRC21(x) (((x)&0x00000003)<<10)
+#define MCF_DMA_IMCR_SRC22(x) (((x)&0x00000003)<<12)
+#define MCF_DMA_IMCR_SRC23(x) (((x)&0x00000003)<<14)
+#define MCF_DMA_IMCR_SRC24(x) (((x)&0x00000003)<<16)
+#define MCF_DMA_IMCR_SRC25(x) (((x)&0x00000003)<<18)
+#define MCF_DMA_IMCR_SRC26(x) (((x)&0x00000003)<<20)
+#define MCF_DMA_IMCR_SRC27(x) (((x)&0x00000003)<<22)
+#define MCF_DMA_IMCR_SRC28(x) (((x)&0x00000003)<<24)
+#define MCF_DMA_IMCR_SRC29(x) (((x)&0x00000003)<<26)
+#define MCF_DMA_IMCR_SRC30(x) (((x)&0x00000003)<<28)
+#define MCF_DMA_IMCR_SRC31(x) (((x)&0x00000003)<<30)
+#define MCF_DMA_IMCR_SRC16_FEC0RX (0x00000000)
+#define MCF_DMA_IMCR_SRC17_FEC0TX (0x00000000)
+#define MCF_DMA_IMCR_SRC18_FEC0RX (0x00000020)
+#define MCF_DMA_IMCR_SRC19_FEC0TX (0x00000080)
+#define MCF_DMA_IMCR_SRC20_FEC1RX (0x00000100)
+#define MCF_DMA_IMCR_SRC21_DREQ1 (0x00000000)
+#define MCF_DMA_IMCR_SRC21_FEC1TX (0x00000400)
+#define MCF_DMA_IMCR_SRC22_FEC0RX (0x00001000)
+#define MCF_DMA_IMCR_SRC23_FEC0TX (0x00004000)
+#define MCF_DMA_IMCR_SRC24_CTM0 (0x00010000)
+#define MCF_DMA_IMCR_SRC24_FEC1RX (0x00020000)
+#define MCF_DMA_IMCR_SRC25_CTM1 (0x00040000)
+#define MCF_DMA_IMCR_SRC25_FEC1TX (0x00080000)
+#define MCF_DMA_IMCR_SRC26_USBEP4 (0x00000000)
+#define MCF_DMA_IMCR_SRC26_CTM2 (0x00200000)
+#define MCF_DMA_IMCR_SRC27_USBEP5 (0x00000000)
+#define MCF_DMA_IMCR_SRC27_CTM3 (0x00800000)
+#define MCF_DMA_IMCR_SRC28_USBEP6 (0x00000000)
+#define MCF_DMA_IMCR_SRC28_CTM4 (0x01000000)
+#define MCF_DMA_IMCR_SRC28_DREQ1 (0x02000000)
+#define MCF_DMA_IMCR_SRC28_PSC2RX (0x03000000)
+#define MCF_DMA_IMCR_SRC29_DREQ1 (0x04000000)
+#define MCF_DMA_IMCR_SRC29_CTM5 (0x08000000)
+#define MCF_DMA_IMCR_SRC29_PSC2TX (0x0C000000)
+#define MCF_DMA_IMCR_SRC30_FEC1RX (0x00000000)
+#define MCF_DMA_IMCR_SRC30_CTM6 (0x10000000)
+#define MCF_DMA_IMCR_SRC30_PSC3RX (0x30000000)
+#define MCF_DMA_IMCR_SRC31_FEC1TX (0x00000000)
+#define MCF_DMA_IMCR_SRC31_CTM7 (0x80000000)
+#define MCF_DMA_IMCR_SRC31_PSC3TX (0xC0000000)
+
+#endif /* __MCF548X_DMA_H__ */
--- /dev/null
+/*
+ * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * File: mcf548x_dspi.h
+ * Purpose: Register and bit definitions for the MCF548X
+ *
+ * Notes:
+ *
+ */
+
+#ifndef _M5485DSPI_H_
+#define _M5485DSPI_H_
+
+/*
+ *
+ * DMA Serial Peripheral Interface (DSPI)
+ *
+ */
+
+/* Register read/write macros */
+#define MCF_DSPI_DMCR MCF_REG32(0x008A00)
+#define MCF_DSPI_DTCR MCF_REG32(0x008A08)
+#define MCF_DSPI_DCTAR0 MCF_REG32(0x008A0C)
+#define MCF_DSPI_DCTAR1 MCF_REG32(0x008A10)
+#define MCF_DSPI_DCTAR2 MCF_REG32(0x008A14)
+#define MCF_DSPI_DCTAR3 MCF_REG32(0x008A18)
+#define MCF_DSPI_DCTAR4 MCF_REG32(0x008A1C)
+#define MCF_DSPI_DCTAR5 MCF_REG32(0x008A20)
+#define MCF_DSPI_DCTAR6 MCF_REG32(0x008A24)
+#define MCF_DSPI_DCTAR7 MCF_REG32(0x008A28)
+#define MCF_DSPI_DCTARn(x) MCF_REG32(0x008A0C+(x*4))
+#define MCF_DSPI_DSR MCF_REG32(0x008A2C)
+#define MCF_DSPI_DRSER MCF_REG32(0x008A30)
+#define MCF_DSPI_DTFR MCF_REG32(0x008A34)
+#define MCF_DSPI_DRFR MCF_REG32(0x008A38)
+#define MCF_DSPI_DTFDR0 MCF_REG32(0x008A3C)
+#define MCF_DSPI_DTFDR1 MCF_REG32(0x008A40)
+#define MCF_DSPI_DTFDR2 MCF_REG32(0x008A44)
+#define MCF_DSPI_DTFDR3 MCF_REG32(0x008A48)
+#define MCF_DSPI_DTFDRn(x) MCF_REG32(0x008A3C+(x*4))
+#define MCF_DSPI_DRFDR0 MCF_REG32(0x008A7C)
+#define MCF_DSPI_DRFDR1 MCF_REG32(0x008A80)
+#define MCF_DSPI_DRFDR2 MCF_REG32(0x008A84)
+#define MCF_DSPI_DRFDR3 MCF_REG32(0x008A88)
+#define MCF_DSPI_DRFDRn(x) MCF_REG32(0x008A7C+(x*4))
+
+/* Bit definitions and macros for MCF_DSPI_DMCR */
+#define MCF_DSPI_DMCR_HALT (0x00000001)
+#define MCF_DSPI_DMCR_SMPL_PT(x) (((x)&0x00000003)<<8)
+#define MCF_DSPI_DMCR_CRXF (0x00000400)
+#define MCF_DSPI_DMCR_CTXF (0x00000800)
+#define MCF_DSPI_DMCR_DRXF (0x00001000)
+#define MCF_DSPI_DMCR_DTXF (0x00002000)
+#define MCF_DSPI_DMCR_CSIS0 (0x00010000)
+#define MCF_DSPI_DMCR_CSIS2 (0x00040000)
+#define MCF_DSPI_DMCR_CSIS3 (0x00080000)
+#define MCF_DSPI_DMCR_CSIS5 (0x00200000)
+#define MCF_DSPI_DMCR_ROOE (0x01000000)
+#define MCF_DSPI_DMCR_PCSSE (0x02000000)
+#define MCF_DSPI_DMCR_MTFE (0x04000000)
+#define MCF_DSPI_DMCR_FRZ (0x08000000)
+#define MCF_DSPI_DMCR_DCONF(x) (((x)&0x00000003)<<28)
+#define MCF_DSPI_DMCR_CSCK (0x40000000)
+#define MCF_DSPI_DMCR_MSTR (0x80000000)
+
+/* Bit definitions and macros for MCF_DSPI_DTCR */
+#define MCF_DSPI_DTCR_SPI_TCNT(x) (((x)&0x0000FFFF)<<16)
+
+/* Bit definitions and macros for MCF_DSPI_DCTARn */
+#define MCF_DSPI_DCTAR_BR(x) (((x)&0x0000000F)<<0)
+#define MCF_DSPI_DCTAR_DT(x) (((x)&0x0000000F)<<4)
+#define MCF_DSPI_DCTAR_ASC(x) (((x)&0x0000000F)<<8)
+#define MCF_DSPI_DCTAR_CSSCK(x) (((x)&0x0000000F)<<12)
+#define MCF_DSPI_DCTAR_PBR(x) (((x)&0x00000003)<<16)
+#define MCF_DSPI_DCTAR_PDT(x) (((x)&0x00000003)<<18)
+#define MCF_DSPI_DCTAR_PASC(x) (((x)&0x00000003)<<20)
+#define MCF_DSPI_DCTAR_PCSSCK(x) (((x)&0x00000003)<<22)
+#define MCF_DSPI_DCTAR_LSBFE (0x01000000)
+#define MCF_DSPI_DCTAR_CPHA (0x02000000)
+#define MCF_DSPI_DCTAR_CPOL (0x04000000)
+/* #define MCF_DSPI_DCTAR_TRSZ(x) (((x)&0x0000000F)<<27) */
+#define MCF_DSPI_DCTAR_FMSZ(x) (((x)&0x0000000F)<<27)
+#define MCF_DSPI_DCTAR_PCSSCK_1CLK (0x00000000)
+#define MCF_DSPI_DCTAR_PCSSCK_3CLK (0x00400000)
+#define MCF_DSPI_DCTAR_PCSSCK_5CLK (0x00800000)
+#define MCF_DSPI_DCTAR_PCSSCK_7CLK (0x00A00000)
+#define MCF_DSPI_DCTAR_PASC_1CLK (0x00000000)
+#define MCF_DSPI_DCTAR_PASC_3CLK (0x00100000)
+#define MCF_DSPI_DCTAR_PASC_5CLK (0x00200000)
+#define MCF_DSPI_DCTAR_PASC_7CLK (0x00300000)
+#define MCF_DSPI_DCTAR_PDT_1CLK (0x00000000)
+#define MCF_DSPI_DCTAR_PDT_3CLK (0x00040000)
+#define MCF_DSPI_DCTAR_PDT_5CLK (0x00080000)
+#define MCF_DSPI_DCTAR_PDT_7CLK (0x000A0000)
+#define MCF_DSPI_DCTAR_PBR_1CLK (0x00000000)
+#define MCF_DSPI_DCTAR_PBR_3CLK (0x00010000)
+#define MCF_DSPI_DCTAR_PBR_5CLK (0x00020000)
+#define MCF_DSPI_DCTAR_PBR_7CLK (0x00030000)
+
+/* Bit definitions and macros for MCF_DSPI_DSR */
+#define MCF_DSPI_DSR_RXPTR(x) (((x)&0x0000000F)<<0)
+#define MCF_DSPI_DSR_RXCTR(x) (((x)&0x0000000F)<<4)
+#define MCF_DSPI_DSR_TXPTR(x) (((x)&0x0000000F)<<8)
+#define MCF_DSPI_DSR_TXCTR(x) (((x)&0x0000000F)<<12)
+#define MCF_DSPI_DSR_RFDF (0x00020000)
+#define MCF_DSPI_DSR_RFOF (0x00080000)
+#define MCF_DSPI_DSR_TFFF (0x02000000)
+#define MCF_DSPI_DSR_TFUF (0x08000000)
+#define MCF_DSPI_DSR_EOQF (0x10000000)
+#define MCF_DSPI_DSR_TXRXS (0x40000000)
+#define MCF_DSPI_DSR_TCF (0x80000000)
+
+/* Bit definitions and macros for MCF_DSPI_DRSER */
+#define MCF_DSPI_DRSER_RFDFS (0x00010000)
+#define MCF_DSPI_DRSER_RFDFE (0x00020000)
+#define MCF_DSPI_DRSER_RFOFE (0x00080000)
+#define MCF_DSPI_DRSER_TFFFS (0x01000000)
+#define MCF_DSPI_DRSER_TFFFE (0x02000000)
+#define MCF_DSPI_DRSER_TFUFE (0x08000000)
+#define MCF_DSPI_DRSER_EOQFE (0x10000000)
+#define MCF_DSPI_DRSER_TCFE (0x80000000)
+
+/* Bit definitions and macros for MCF_DSPI_DTFR */
+#define MCF_DSPI_DTFR_TXDATA(x) (((x)&0x0000FFFF)<<0)
+#define MCF_DSPI_DTFR_CS0 (0x00010000)
+#define MCF_DSPI_DTFR_CS2 (0x00040000)
+#define MCF_DSPI_DTFR_CS3 (0x00080000)
+#define MCF_DSPI_DTFR_CS5 (0x00200000)
+#define MCF_DSPI_DTFR_CTCNT (0x04000000)
+#define MCF_DSPI_DTFR_EOQ (0x08000000)
+#define MCF_DSPI_DTFR_CTAS(x) (((x)&0x00000007)<<28)
+#define MCF_DSPI_DTFR_CONT (0x80000000)
+
+/* Bit definitions and macros for MCF_DSPI_DRFR */
+#define MCF_DSPI_DRFR_RXDATA(x) (((x)&0x0000FFFF)<<0)
+
+/* Bit definitions and macros for MCF_DSPI_DTFDRn */
+#define MCF_DSPI_DTFDRn_TXDATA(x) (((x)&0x0000FFFF)<<0)
+#define MCF_DSPI_DTFDRn_TXCMD(x) (((x)&0x0000FFFF)<<16)
+
+/* Bit definitions and macros for MCF_DSPI_DRFDRn */
+#define MCF_DSPI_DRFDRn_RXDATA(x) (((x)&0x0000FFFF)<<0)
+
+/********************************************************************/
+
+#endif /* _M5485DSPI_H_ */
--- /dev/null
+/*
+ * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * File: mcf548x_gpio.h
+ * Purpose: Register and bit definitions for the MCF548X
+ *
+ * Notes:
+ *
+ */
+
+#ifndef _M5485GPIO_H_
+#define _M5485GPIO_H_
+
+/*********************************************************************
+*
+* General Purpose I/O (GPIO)
+*
+*********************************************************************/
+
+/* Register read/write macros */
+#define MCF_GPIO_PODR_FBCTL MCF_REG08(0x000A00)
+#define MCF_GPIO_PODR_FBCS MCF_REG08(0x000A01)
+#define MCF_GPIO_PODR_DMA MCF_REG08(0x000A02)
+#define MCF_GPIO_PODR_FEC0H MCF_REG08(0x000A04)
+#define MCF_GPIO_PODR_FEC0L MCF_REG08(0x000A05)
+#define MCF_GPIO_PODR_FEC1H MCF_REG08(0x000A06)
+#define MCF_GPIO_PODR_FEC1L MCF_REG08(0x000A07)
+#define MCF_GPIO_PODR_FECI2C MCF_REG08(0x000A08)
+#define MCF_GPIO_PODR_PCIBG MCF_REG08(0x000A09)
+#define MCF_GPIO_PODR_PCIBR MCF_REG08(0x000A0A)
+#define MCF_GPIO_PODR_PSC3PSC2 MCF_REG08(0x000A0C)
+#define MCF_GPIO_PODR_PSC1PSC0 MCF_REG08(0x000A0D)
+#define MCF_GPIO_PODR_DSPI MCF_REG08(0x000A0E)
+#define MCF_GPIO_PDDR_FBCTL MCF_REG08(0x000A10)
+#define MCF_GPIO_PDDR_FBCS MCF_REG08(0x000A11)
+#define MCF_GPIO_PDDR_DMA MCF_REG08(0x000A12)
+#define MCF_GPIO_PDDR_FEC0H MCF_REG08(0x000A14)
+#define MCF_GPIO_PDDR_FEC0L MCF_REG08(0x000A15)
+#define MCF_GPIO_PDDR_FEC1H MCF_REG08(0x000A16)
+#define MCF_GPIO_PDDR_FEC1L MCF_REG08(0x000A17)
+#define MCF_GPIO_PDDR_FECI2C MCF_REG08(0x000A18)
+#define MCF_GPIO_PDDR_PCIBG MCF_REG08(0x000A19)
+#define MCF_GPIO_PDDR_PCIBR MCF_REG08(0x000A1A)
+#define MCF_GPIO_PDDR_PSC3PSC2 MCF_REG08(0x000A1C)
+#define MCF_GPIO_PDDR_PSC1PSC0 MCF_REG08(0x000A1D)
+#define MCF_GPIO_PDDR_DSPI MCF_REG08(0x000A1E)
+#define MCF_GPIO_PPDSDR_FBCTL MCF_REG08(0x000A20)
+#define MCF_GPIO_PPDSDR_FBCS MCF_REG08(0x000A21)
+#define MCF_GPIO_PPDSDR_DMA MCF_REG08(0x000A22)
+#define MCF_GPIO_PPDSDR_FEC0H MCF_REG08(0x000A24)
+#define MCF_GPIO_PPDSDR_FEC0L MCF_REG08(0x000A25)
+#define MCF_GPIO_PPDSDR_FEC1H MCF_REG08(0x000A26)
+#define MCF_GPIO_PPDSDR_FEC1L MCF_REG08(0x000A27)
+#define MCF_GPIO_PPDSDR_FECI2C MCF_REG08(0x000A28)
+#define MCF_GPIO_PPDSDR_PCIBG MCF_REG08(0x000A29)
+#define MCF_GPIO_PPDSDR_PCIBR MCF_REG08(0x000A2A)
+#define MCF_GPIO_PPDSDR_PSC3PSC2 MCF_REG08(0x000A2C)
+#define MCF_GPIO_PPDSDR_PSC1PSC0 MCF_REG08(0x000A2D)
+#define MCF_GPIO_PPDSDR_DSPI MCF_REG08(0x000A2E)
+#define MCF_GPIO_PCLRR_FBCTL MCF_REG08(0x000A30)
+#define MCF_GPIO_PCLRR_FBCS MCF_REG08(0x000A31)
+#define MCF_GPIO_PCLRR_DMA MCF_REG08(0x000A32)
+#define MCF_GPIO_PCLRR_FEC0H MCF_REG08(0x000A34)
+#define MCF_GPIO_PCLRR_FEC0L MCF_REG08(0x000A35)
+#define MCF_GPIO_PCLRR_FEC1H MCF_REG08(0x000A36)
+#define MCF_GPIO_PCLRR_FEC1L MCF_REG08(0x000A37)
+#define MCF_GPIO_PCLRR_FECI2C MCF_REG08(0x000A38)
+#define MCF_GPIO_PCLRR_PCIBG MCF_REG08(0x000A39)
+#define MCF_GPIO_PCLRR_PCIBR MCF_REG08(0x000A3A)
+#define MCF_GPIO_PCLRR_PSC3PSC2 MCF_REG08(0x000A3C)
+#define MCF_GPIO_PCLRR_PSC1PSC0 MCF_REG08(0x000A3D)
+#define MCF_GPIO_PCLRR_DSPI MCF_REG08(0x000A3E)
+#define MCF_GPIO_PAR_FBCTL MCF_REG16(0x000A40)
+#define MCF_GPIO_PAR_FBCS MCF_REG08(0x000A42)
+#define MCF_GPIO_PAR_DMA MCF_REG08(0x000A43)
+#define MCF_GPIO_PAR_FECI2CIRQ MCF_REG16(0x000A44)
+#define MCF_GPIO_PAR_PCIBG MCF_REG16(0x000A48)
+#define MCF_GPIO_PAR_PCIBR MCF_REG16(0x000A4A)
+#define MCF_GPIO_PAR_PSC3 MCF_REG08(0x000A4C)
+#define MCF_GPIO_PAR_PSC2 MCF_REG08(0x000A4D)
+#define MCF_GPIO_PAR_PSC1 MCF_REG08(0x000A4E)
+#define MCF_GPIO_PAR_PSC0 MCF_REG08(0x000A4F)
+#define MCF_GPIO_PAR_DSPI MCF_REG16(0x000A50)
+#define MCF_GPIO_PAR_TIMER MCF_REG08(0x000A52)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_FBCTL */
+#define MCF_GPIO_PODR_FBCTL_PODRFBCTL0 (0x01)
+#define MCF_GPIO_PODR_FBCTL_PODRFBCTL1 (0x02)
+#define MCF_GPIO_PODR_FBCTL_PODRFBCTL2 (0x04)
+#define MCF_GPIO_PODR_FBCTL_PODRFBCTL3 (0x08)
+#define MCF_GPIO_PODR_FBCTL_PODRFBCTL4 (0x10)
+#define MCF_GPIO_PODR_FBCTL_PODRFBCTL5 (0x20)
+#define MCF_GPIO_PODR_FBCTL_PODRFBCTL6 (0x40)
+#define MCF_GPIO_PODR_FBCTL_PODRFBCTL7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_FBCS */
+#define MCF_GPIO_PODR_FBCS_PODRFBCS1 (0x02)
+#define MCF_GPIO_PODR_FBCS_PODRFBCS2 (0x04)
+#define MCF_GPIO_PODR_FBCS_PODRFBCS3 (0x08)
+#define MCF_GPIO_PODR_FBCS_PODRFBCS4 (0x10)
+#define MCF_GPIO_PODR_FBCS_PODRFBCS5 (0x20)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_DMA */
+#define MCF_GPIO_PODR_DMA_PODRDMA0 (0x01)
+#define MCF_GPIO_PODR_DMA_PODRDMA1 (0x02)
+#define MCF_GPIO_PODR_DMA_PODRDMA2 (0x04)
+#define MCF_GPIO_PODR_DMA_PODRDMA3 (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_FEC0H */
+#define MCF_GPIO_PODR_FEC0H_PODRFEC0H0 (0x01)
+#define MCF_GPIO_PODR_FEC0H_PODRFEC0H1 (0x02)
+#define MCF_GPIO_PODR_FEC0H_PODRFEC0H2 (0x04)
+#define MCF_GPIO_PODR_FEC0H_PODRFEC0H3 (0x08)
+#define MCF_GPIO_PODR_FEC0H_PODRFEC0H4 (0x10)
+#define MCF_GPIO_PODR_FEC0H_PODRFEC0H5 (0x20)
+#define MCF_GPIO_PODR_FEC0H_PODRFEC0H6 (0x40)
+#define MCF_GPIO_PODR_FEC0H_PODRFEC0H7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_FEC0L */
+#define MCF_GPIO_PODR_FEC0L_PODRFEC0L0 (0x01)
+#define MCF_GPIO_PODR_FEC0L_PODRFEC0L1 (0x02)
+#define MCF_GPIO_PODR_FEC0L_PODRFEC0L2 (0x04)
+#define MCF_GPIO_PODR_FEC0L_PODRFEC0L3 (0x08)
+#define MCF_GPIO_PODR_FEC0L_PODRFEC0L4 (0x10)
+#define MCF_GPIO_PODR_FEC0L_PODRFEC0L5 (0x20)
+#define MCF_GPIO_PODR_FEC0L_PODRFEC0L6 (0x40)
+#define MCF_GPIO_PODR_FEC0L_PODRFEC0L7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_FEC1H */
+#define MCF_GPIO_PODR_FEC1H_PODRFEC1H0 (0x01)
+#define MCF_GPIO_PODR_FEC1H_PODRFEC1H1 (0x02)
+#define MCF_GPIO_PODR_FEC1H_PODRFEC1H2 (0x04)
+#define MCF_GPIO_PODR_FEC1H_PODRFEC1H3 (0x08)
+#define MCF_GPIO_PODR_FEC1H_PODRFEC1H4 (0x10)
+#define MCF_GPIO_PODR_FEC1H_PODRFEC1H5 (0x20)
+#define MCF_GPIO_PODR_FEC1H_PODRFEC1H6 (0x40)
+#define MCF_GPIO_PODR_FEC1H_PODRFEC1H7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_FEC1L */
+#define MCF_GPIO_PODR_FEC1L_PODRFEC1L0 (0x01)
+#define MCF_GPIO_PODR_FEC1L_PODRFEC1L1 (0x02)
+#define MCF_GPIO_PODR_FEC1L_PODRFEC1L2 (0x04)
+#define MCF_GPIO_PODR_FEC1L_PODRFEC1L3 (0x08)
+#define MCF_GPIO_PODR_FEC1L_PODRFEC1L4 (0x10)
+#define MCF_GPIO_PODR_FEC1L_PODRFEC1L5 (0x20)
+#define MCF_GPIO_PODR_FEC1L_PODRFEC1L6 (0x40)
+#define MCF_GPIO_PODR_FEC1L_PODRFEC1L7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_FECI2C */
+#define MCF_GPIO_PODR_FECI2C_PODRFECI2C0 (0x01)
+#define MCF_GPIO_PODR_FECI2C_PODRFECI2C1 (0x02)
+#define MCF_GPIO_PODR_FECI2C_PODRFECI2C2 (0x04)
+#define MCF_GPIO_PODR_FECI2C_PODRFECI2C3 (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_PCIBG */
+#define MCF_GPIO_PODR_PCIBG_PODRPCIBG0 (0x01)
+#define MCF_GPIO_PODR_PCIBG_PODRPCIBG1 (0x02)
+#define MCF_GPIO_PODR_PCIBG_PODRPCIBG2 (0x04)
+#define MCF_GPIO_PODR_PCIBG_PODRPCIBG3 (0x08)
+#define MCF_GPIO_PODR_PCIBG_PODRPCIBG4 (0x10)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_PCIBR */
+#define MCF_GPIO_PODR_PCIBR_PODRPCIBR0 (0x01)
+#define MCF_GPIO_PODR_PCIBR_PODRPCIBR1 (0x02)
+#define MCF_GPIO_PODR_PCIBR_PODRPCIBR2 (0x04)
+#define MCF_GPIO_PODR_PCIBR_PODRPCIBR3 (0x08)
+#define MCF_GPIO_PODR_PCIBR_PODRPCIBR4 (0x10)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_PSC3PSC2 */
+#define MCF_GPIO_PODR_PSC3PSC2_PODRPSC3PSC20 (0x01)
+#define MCF_GPIO_PODR_PSC3PSC2_PODRPSC3PSC21 (0x02)
+#define MCF_GPIO_PODR_PSC3PSC2_PODRPSC3PSC22 (0x04)
+#define MCF_GPIO_PODR_PSC3PSC2_PODRPSC3PSC23 (0x08)
+#define MCF_GPIO_PODR_PSC3PSC2_PODRPSC3PSC24 (0x10)
+#define MCF_GPIO_PODR_PSC3PSC2_PODRPSC3PSC25 (0x20)
+#define MCF_GPIO_PODR_PSC3PSC2_PODRPSC3PSC26 (0x40)
+#define MCF_GPIO_PODR_PSC3PSC2_PODRPSC3PSC27 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_PSC1PSC0 */
+#define MCF_GPIO_PODR_PSC1PSC0_PODRPSC1PSC00 (0x01)
+#define MCF_GPIO_PODR_PSC1PSC0_PODRPSC1PSC01 (0x02)
+#define MCF_GPIO_PODR_PSC1PSC0_PODRPSC1PSC02 (0x04)
+#define MCF_GPIO_PODR_PSC1PSC0_PODRPSC1PSC03 (0x08)
+#define MCF_GPIO_PODR_PSC1PSC0_PODRPSC1PSC04 (0x10)
+#define MCF_GPIO_PODR_PSC1PSC0_PODRPSC1PSC05 (0x20)
+#define MCF_GPIO_PODR_PSC1PSC0_PODRPSC1PSC06 (0x40)
+#define MCF_GPIO_PODR_PSC1PSC0_PODRPSC1PSC07 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_DSPI */
+#define MCF_GPIO_PODR_DSPI_PODRDSPI0 (0x01)
+#define MCF_GPIO_PODR_DSPI_PODRDSPI1 (0x02)
+#define MCF_GPIO_PODR_DSPI_PODRDSPI2 (0x04)
+#define MCF_GPIO_PODR_DSPI_PODRDSPI3 (0x08)
+#define MCF_GPIO_PODR_DSPI_PODRDSPI4 (0x10)
+#define MCF_GPIO_PODR_DSPI_PODRDSPI5 (0x20)
+#define MCF_GPIO_PODR_DSPI_PODRDSPI6 (0x40)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_FBCTL */
+#define MCF_GPIO_PDDR_FBCTL_PDDRFBCTL0 (0x01)
+#define MCF_GPIO_PDDR_FBCTL_PDDRFBCTL1 (0x02)
+#define MCF_GPIO_PDDR_FBCTL_PDDRFBCTL2 (0x04)
+#define MCF_GPIO_PDDR_FBCTL_PDDRFBCTL3 (0x08)
+#define MCF_GPIO_PDDR_FBCTL_PDDRFBCTL4 (0x10)
+#define MCF_GPIO_PDDR_FBCTL_PDDRFBCTL5 (0x20)
+#define MCF_GPIO_PDDR_FBCTL_PDDRFBCTL6 (0x40)
+#define MCF_GPIO_PDDR_FBCTL_PDDRFBCTL7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_FBCS */
+#define MCF_GPIO_PDDR_FBCS_PDDRFBCS1 (0x02)
+#define MCF_GPIO_PDDR_FBCS_PDDRFBCS2 (0x04)
+#define MCF_GPIO_PDDR_FBCS_PDDRFBCS3 (0x08)
+#define MCF_GPIO_PDDR_FBCS_PDDRFBCS4 (0x10)
+#define MCF_GPIO_PDDR_FBCS_PDDRFBCS5 (0x20)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_DMA */
+#define MCF_GPIO_PDDR_DMA_PDDRDMA0 (0x01)
+#define MCF_GPIO_PDDR_DMA_PDDRDMA1 (0x02)
+#define MCF_GPIO_PDDR_DMA_PDDRDMA2 (0x04)
+#define MCF_GPIO_PDDR_DMA_PDDRDMA3 (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_FEC0H */
+#define MCF_GPIO_PDDR_FEC0H_PDDRFEC0H0 (0x01)
+#define MCF_GPIO_PDDR_FEC0H_PDDRFEC0H1 (0x02)
+#define MCF_GPIO_PDDR_FEC0H_PDDRFEC0H2 (0x04)
+#define MCF_GPIO_PDDR_FEC0H_PDDRFEC0H3 (0x08)
+#define MCF_GPIO_PDDR_FEC0H_PDDRFEC0H4 (0x10)
+#define MCF_GPIO_PDDR_FEC0H_PDDRFEC0H5 (0x20)
+#define MCF_GPIO_PDDR_FEC0H_PDDRFEC0H6 (0x40)
+#define MCF_GPIO_PDDR_FEC0H_PDDRFEC0H7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_FEC0L */
+#define MCF_GPIO_PDDR_FEC0L_PDDRFEC0L0 (0x01)
+#define MCF_GPIO_PDDR_FEC0L_PDDRFEC0L1 (0x02)
+#define MCF_GPIO_PDDR_FEC0L_PDDRFEC0L2 (0x04)
+#define MCF_GPIO_PDDR_FEC0L_PDDRFEC0L3 (0x08)
+#define MCF_GPIO_PDDR_FEC0L_PDDRFEC0L4 (0x10)
+#define MCF_GPIO_PDDR_FEC0L_PDDRFEC0L5 (0x20)
+#define MCF_GPIO_PDDR_FEC0L_PDDRFEC0L6 (0x40)
+#define MCF_GPIO_PDDR_FEC0L_PDDRFEC0L7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_FEC1H */
+#define MCF_GPIO_PDDR_FEC1H_PDDRFEC1H0 (0x01)
+#define MCF_GPIO_PDDR_FEC1H_PDDRFEC1H1 (0x02)
+#define MCF_GPIO_PDDR_FEC1H_PDDRFEC1H2 (0x04)
+#define MCF_GPIO_PDDR_FEC1H_PDDRFEC1H3 (0x08)
+#define MCF_GPIO_PDDR_FEC1H_PDDRFEC1H4 (0x10)
+#define MCF_GPIO_PDDR_FEC1H_PDDRFEC1H5 (0x20)
+#define MCF_GPIO_PDDR_FEC1H_PDDRFEC1H6 (0x40)
+#define MCF_GPIO_PDDR_FEC1H_PDDRFEC1H7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_FEC1L */
+#define MCF_GPIO_PDDR_FEC1L_PDDRFEC1L0 (0x01)
+#define MCF_GPIO_PDDR_FEC1L_PDDRFEC1L1 (0x02)
+#define MCF_GPIO_PDDR_FEC1L_PDDRFEC1L2 (0x04)
+#define MCF_GPIO_PDDR_FEC1L_PDDRFEC1L3 (0x08)
+#define MCF_GPIO_PDDR_FEC1L_PDDRFEC1L4 (0x10)
+#define MCF_GPIO_PDDR_FEC1L_PDDRFEC1L5 (0x20)
+#define MCF_GPIO_PDDR_FEC1L_PDDRFEC1L6 (0x40)
+#define MCF_GPIO_PDDR_FEC1L_PDDRFEC1L7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_FECI2C */
+#define MCF_GPIO_PDDR_FECI2C_PDDRFECI2C0 (0x01)
+#define MCF_GPIO_PDDR_FECI2C_PDDRFECI2C1 (0x02)
+#define MCF_GPIO_PDDR_FECI2C_PDDRFECI2C2 (0x04)
+#define MCF_GPIO_PDDR_FECI2C_PDDRFECI2C3 (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_PCIBG */
+#define MCF_GPIO_PDDR_PCIBG_PDDRPCIBG0 (0x01)
+#define MCF_GPIO_PDDR_PCIBG_PDDRPCIBG1 (0x02)
+#define MCF_GPIO_PDDR_PCIBG_PDDRPCIBG2 (0x04)
+#define MCF_GPIO_PDDR_PCIBG_PDDRPCIBG3 (0x08)
+#define MCF_GPIO_PDDR_PCIBG_PDDRPCIBG4 (0x10)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_PCIBR */
+#define MCF_GPIO_PDDR_PCIBR_PDDRPCIBR0 (0x01)
+#define MCF_GPIO_PDDR_PCIBR_PDDRPCIBR1 (0x02)
+#define MCF_GPIO_PDDR_PCIBR_PDDRPCIBR2 (0x04)
+#define MCF_GPIO_PDDR_PCIBR_PDDRPCIBR3 (0x08)
+#define MCF_GPIO_PDDR_PCIBR_PDDRPCIBR4 (0x10)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_PSC3PSC2 */
+#define MCF_GPIO_PDDR_PSC3PSC2_PDDRPSC3PSC20 (0x01)
+#define MCF_GPIO_PDDR_PSC3PSC2_PDDRPSC3PSC21 (0x02)
+#define MCF_GPIO_PDDR_PSC3PSC2_PDDRPSC3PSC22 (0x04)
+#define MCF_GPIO_PDDR_PSC3PSC2_PDDRPSC3PSC23 (0x08)
+#define MCF_GPIO_PDDR_PSC3PSC2_PDDRPSC3PSC24 (0x10)
+#define MCF_GPIO_PDDR_PSC3PSC2_PDDRPSC3PSC25 (0x20)
+#define MCF_GPIO_PDDR_PSC3PSC2_PDDRPSC3PSC26 (0x40)
+#define MCF_GPIO_PDDR_PSC3PSC2_PDDRPSC3PSC27 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_PSC1PSC0 */
+#define MCF_GPIO_PDDR_PSC1PSC0_PDDRPSC1PSC00 (0x01)
+#define MCF_GPIO_PDDR_PSC1PSC0_PDDRPSC1PSC01 (0x02)
+#define MCF_GPIO_PDDR_PSC1PSC0_PDDRPSC1PSC02 (0x04)
+#define MCF_GPIO_PDDR_PSC1PSC0_PDDRPSC1PSC03 (0x08)
+#define MCF_GPIO_PDDR_PSC1PSC0_PDDRPSC1PSC04 (0x10)
+#define MCF_GPIO_PDDR_PSC1PSC0_PDDRPSC1PSC05 (0x20)
+#define MCF_GPIO_PDDR_PSC1PSC0_PDDRPSC1PSC06 (0x40)
+#define MCF_GPIO_PDDR_PSC1PSC0_PDDRPSC1PSC07 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_DSPI */
+#define MCF_GPIO_PDDR_DSPI_PDDRDSPI0 (0x01)
+#define MCF_GPIO_PDDR_DSPI_PDDRDSPI1 (0x02)
+#define MCF_GPIO_PDDR_DSPI_PDDRDSPI2 (0x04)
+#define MCF_GPIO_PDDR_DSPI_PDDRDSPI3 (0x08)
+#define MCF_GPIO_PDDR_DSPI_PDDRDSPI4 (0x10)
+#define MCF_GPIO_PDDR_DSPI_PDDRDSPI5 (0x20)
+#define MCF_GPIO_PDDR_DSPI_PDDRDSPI6 (0x40)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_FBCTL */
+#define MCF_GPIO_PPDSDR_FBCTL_PPDSDRFBCTL0 (0x01)
+#define MCF_GPIO_PPDSDR_FBCTL_PPDSDRFBCTL1 (0x02)
+#define MCF_GPIO_PPDSDR_FBCTL_PPDSDRFBCTL2 (0x04)
+#define MCF_GPIO_PPDSDR_FBCTL_PPDSDRFBCTL3 (0x08)
+#define MCF_GPIO_PPDSDR_FBCTL_PPDSDRFBCTL4 (0x10)
+#define MCF_GPIO_PPDSDR_FBCTL_PPDSDRFBCTL5 (0x20)
+#define MCF_GPIO_PPDSDR_FBCTL_PPDSDRFBCTL6 (0x40)
+#define MCF_GPIO_PPDSDR_FBCTL_PPDSDRFBCTL7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_FBCS */
+#define MCF_GPIO_PPDSDR_FBCS_PPDSDRFBCS1 (0x02)
+#define MCF_GPIO_PPDSDR_FBCS_PPDSDRFBCS2 (0x04)
+#define MCF_GPIO_PPDSDR_FBCS_PPDSDRFBCS3 (0x08)
+#define MCF_GPIO_PPDSDR_FBCS_PPDSDRFBCS4 (0x10)
+#define MCF_GPIO_PPDSDR_FBCS_PPDSDRFBCS5 (0x20)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_DMA */
+#define MCF_GPIO_PPDSDR_DMA_PPDSDRDMA0 (0x01)
+#define MCF_GPIO_PPDSDR_DMA_PPDSDRDMA1 (0x02)
+#define MCF_GPIO_PPDSDR_DMA_PPDSDRDMA2 (0x04)
+#define MCF_GPIO_PPDSDR_DMA_PPDSDRDMA3 (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_FEC0H */
+#define MCF_GPIO_PPDSDR_FEC0H_PPDSDRFEC0H0 (0x01)
+#define MCF_GPIO_PPDSDR_FEC0H_PPDSDRFEC0H1 (0x02)
+#define MCF_GPIO_PPDSDR_FEC0H_PPDSDRFEC0H2 (0x04)
+#define MCF_GPIO_PPDSDR_FEC0H_PPDSDRFEC0H3 (0x08)
+#define MCF_GPIO_PPDSDR_FEC0H_PPDSDRFEC0H4 (0x10)
+#define MCF_GPIO_PPDSDR_FEC0H_PPDSDRFEC0H5 (0x20)
+#define MCF_GPIO_PPDSDR_FEC0H_PPDSDRFEC0H6 (0x40)
+#define MCF_GPIO_PPDSDR_FEC0H_PPDSDRFEC0H7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_FEC0L */
+#define MCF_GPIO_PPDSDR_FEC0L_PPDSDRFEC0L0 (0x01)
+#define MCF_GPIO_PPDSDR_FEC0L_PPDSDRFEC0L1 (0x02)
+#define MCF_GPIO_PPDSDR_FEC0L_PPDSDRFEC0L2 (0x04)
+#define MCF_GPIO_PPDSDR_FEC0L_PPDSDRFEC0L3 (0x08)
+#define MCF_GPIO_PPDSDR_FEC0L_PPDSDRFEC0L4 (0x10)
+#define MCF_GPIO_PPDSDR_FEC0L_PPDSDRFEC0L5 (0x20)
+#define MCF_GPIO_PPDSDR_FEC0L_PPDSDRFEC0L6 (0x40)
+#define MCF_GPIO_PPDSDR_FEC0L_PPDSDRFEC0L7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_FEC1H */
+#define MCF_GPIO_PPDSDR_FEC1H_PPDSDRFEC1H0 (0x01)
+#define MCF_GPIO_PPDSDR_FEC1H_PPDSDRFEC1H1 (0x02)
+#define MCF_GPIO_PPDSDR_FEC1H_PPDSDRFEC1H2 (0x04)
+#define MCF_GPIO_PPDSDR_FEC1H_PPDSDRFEC1H3 (0x08)
+#define MCF_GPIO_PPDSDR_FEC1H_PPDSDRFEC1H4 (0x10)
+#define MCF_GPIO_PPDSDR_FEC1H_PPDSDRFEC1H5 (0x20)
+#define MCF_GPIO_PPDSDR_FEC1H_PPDSDRFEC1H6 (0x40)
+#define MCF_GPIO_PPDSDR_FEC1H_PPDSDRFEC1H7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_FEC1L */
+#define MCF_GPIO_PPDSDR_FEC1L_PPDSDRFEC1L0 (0x01)
+#define MCF_GPIO_PPDSDR_FEC1L_PPDSDRFEC1L1 (0x02)
+#define MCF_GPIO_PPDSDR_FEC1L_PPDSDRFEC1L2 (0x04)
+#define MCF_GPIO_PPDSDR_FEC1L_PPDSDRFEC1L3 (0x08)
+#define MCF_GPIO_PPDSDR_FEC1L_PPDSDRFEC1L4 (0x10)
+#define MCF_GPIO_PPDSDR_FEC1L_PPDSDRFEC1L5 (0x20)
+#define MCF_GPIO_PPDSDR_FEC1L_PPDSDRFEC1L6 (0x40)
+#define MCF_GPIO_PPDSDR_FEC1L_PPDSDRFEC1L7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_FECI2C */
+#define MCF_GPIO_PPDSDR_FECI2C_PPDSDRFECI2C0 (0x01)
+#define MCF_GPIO_PPDSDR_FECI2C_PPDSDRFECI2C1 (0x02)
+#define MCF_GPIO_PPDSDR_FECI2C_PPDSDRFECI2C2 (0x04)
+#define MCF_GPIO_PPDSDR_FECI2C_PPDSDRFECI2C3 (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_PCIBG */
+#define MCF_GPIO_PPDSDR_PCIBG_PPDSDRPCIBG0 (0x01)
+#define MCF_GPIO_PPDSDR_PCIBG_PPDSDRPCIBG1 (0x02)
+#define MCF_GPIO_PPDSDR_PCIBG_PPDSDRPCIBG2 (0x04)
+#define MCF_GPIO_PPDSDR_PCIBG_PPDSDRPCIBG3 (0x08)
+#define MCF_GPIO_PPDSDR_PCIBG_PPDSDRPCIBG4 (0x10)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_PCIBR */
+#define MCF_GPIO_PPDSDR_PCIBR_PPDSDRPCIBR0 (0x01)
+#define MCF_GPIO_PPDSDR_PCIBR_PPDSDRPCIBR1 (0x02)
+#define MCF_GPIO_PPDSDR_PCIBR_PPDSDRPCIBR2 (0x04)
+#define MCF_GPIO_PPDSDR_PCIBR_PPDSDRPCIBR3 (0x08)
+#define MCF_GPIO_PPDSDR_PCIBR_PPDSDRPCIBR4 (0x10)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_PSC3PSC2 */
+#define MCF_GPIO_PPDSDR_PSC3PSC2_PPDSDRPSC3PSC20 (0x01)
+#define MCF_GPIO_PPDSDR_PSC3PSC2_PPDSDRPSC3PSC21 (0x02)
+#define MCF_GPIO_PPDSDR_PSC3PSC2_PPDSDRPSC3PSC22 (0x04)
+#define MCF_GPIO_PPDSDR_PSC3PSC2_PPDSDRPSC3PSC23 (0x08)
+#define MCF_GPIO_PPDSDR_PSC3PSC2_PDDRPSC3PSC24 (0x10)
+#define MCF_GPIO_PPDSDR_PSC3PSC2_PDDRPSC3PSC25 (0x20)
+#define MCF_GPIO_PPDSDR_PSC3PSC2_PPDSDRPSC3PSC26 (0x40)
+#define MCF_GPIO_PPDSDR_PSC3PSC2_PPDSDRPSC3PSC27 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_PSC1PSC0 */
+#define MCF_GPIO_PPDSDR_PSC1PSC0_PPDSDRPSC1PSC00 (0x01)
+#define MCF_GPIO_PPDSDR_PSC1PSC0_PDDRPSC1PSC01 (0x02)
+#define MCF_GPIO_PPDSDR_PSC1PSC0_PPDSDRPSC1PSC02 (0x04)
+#define MCF_GPIO_PPDSDR_PSC1PSC0_PDDRPSC1PSC03 (0x08)
+#define MCF_GPIO_PPDSDR_PSC1PSC0_PPDSDRPSC1PSC04 (0x10)
+#define MCF_GPIO_PPDSDR_PSC1PSC0_PPDSDRPSC1PSC05 (0x20)
+#define MCF_GPIO_PPDSDR_PSC1PSC0_PPDSDRPSC1PSC06 (0x40)
+#define MCF_GPIO_PPDSDR_PSC1PSC0_PPDSDRPSC1PSC07 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_DSPI */
+#define MCF_GPIO_PPDSDR_DSPI_PPDSDRDSPI0 (0x01)
+#define MCF_GPIO_PPDSDR_DSPI_PPDSDRDSPI1 (0x02)
+#define MCF_GPIO_PPDSDR_DSPI_PPDSDRDSPI2 (0x04)
+#define MCF_GPIO_PPDSDR_DSPI_PPDSDRDSPI3 (0x08)
+#define MCF_GPIO_PPDSDR_DSPI_PDDRDSPI4 (0x10)
+#define MCF_GPIO_PPDSDR_DSPI_PPDSDRDSPI5 (0x20)
+#define MCF_GPIO_PPDSDR_DSPI_PPDSDRDSPI6 (0x40)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_FBCTL */
+#define MCF_GPIO_PCLRR_FBCTL_PCLRRFBCTL0 (0x01)
+#define MCF_GPIO_PCLRR_FBCTL_PCLRRFBCTL1 (0x02)
+#define MCF_GPIO_PCLRR_FBCTL_PCLRRFBCTL2 (0x04)
+#define MCF_GPIO_PCLRR_FBCTL_PCLRRFBCTL3 (0x08)
+#define MCF_GPIO_PCLRR_FBCTL_PCLRRFBCTL4 (0x10)
+#define MCF_GPIO_PCLRR_FBCTL_PCLRRFBCTL5 (0x20)
+#define MCF_GPIO_PCLRR_FBCTL_PCLRRFBCTL6 (0x40)
+#define MCF_GPIO_PCLRR_FBCTL_PCLRRFBCTL7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_FBCS */
+#define MCF_GPIO_PCLRR_FBCS_PCLRRFBCS1 (0x02)
+#define MCF_GPIO_PCLRR_FBCS_PCLRRFBCS2 (0x04)
+#define MCF_GPIO_PCLRR_FBCS_PCLRRFBCS3 (0x08)
+#define MCF_GPIO_PCLRR_FBCS_PCLRRFBCS4 (0x10)
+#define MCF_GPIO_PCLRR_FBCS_PCLRRFBCS5 (0x20)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_DMA */
+#define MCF_GPIO_PCLRR_DMA_PCLRRDMA0 (0x01)
+#define MCF_GPIO_PCLRR_DMA_PCLRRDMA1 (0x02)
+#define MCF_GPIO_PCLRR_DMA_PCLRRDMA2 (0x04)
+#define MCF_GPIO_PCLRR_DMA_PCLRRDMA3 (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_FEC0H */
+#define MCF_GPIO_PCLRR_FEC0H_PCLRRFEC0H0 (0x01)
+#define MCF_GPIO_PCLRR_FEC0H_PCLRRFEC0H1 (0x02)
+#define MCF_GPIO_PCLRR_FEC0H_PCLRRFEC0H2 (0x04)
+#define MCF_GPIO_PCLRR_FEC0H_PCLRRFEC0H3 (0x08)
+#define MCF_GPIO_PCLRR_FEC0H_PCLRRFEC0H4 (0x10)
+#define MCF_GPIO_PCLRR_FEC0H_PCLRRFEC0H5 (0x20)
+#define MCF_GPIO_PCLRR_FEC0H_PCLRRFEC0H6 (0x40)
+#define MCF_GPIO_PCLRR_FEC0H_PCLRRFEC0H7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_FEC0L */
+#define MCF_GPIO_PCLRR_FEC0L_PCLRRFEC0L0 (0x01)
+#define MCF_GPIO_PCLRR_FEC0L_PODRFEC0L1 (0x02)
+#define MCF_GPIO_PCLRR_FEC0L_PCLRRFEC0L2 (0x04)
+#define MCF_GPIO_PCLRR_FEC0L_PCLRRFEC0L3 (0x08)
+#define MCF_GPIO_PCLRR_FEC0L_PODRFEC0L4 (0x10)
+#define MCF_GPIO_PCLRR_FEC0L_PODRFEC0L5 (0x20)
+#define MCF_GPIO_PCLRR_FEC0L_PODRFEC0L6 (0x40)
+#define MCF_GPIO_PCLRR_FEC0L_PCLRRFEC0L7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_FEC1H */
+#define MCF_GPIO_PCLRR_FEC1H_PCLRRFEC1H0 (0x01)
+#define MCF_GPIO_PCLRR_FEC1H_PCLRRFEC1H1 (0x02)
+#define MCF_GPIO_PCLRR_FEC1H_PCLRRFEC1H2 (0x04)
+#define MCF_GPIO_PCLRR_FEC1H_PODRFEC1H3 (0x08)
+#define MCF_GPIO_PCLRR_FEC1H_PODRFEC1H4 (0x10)
+#define MCF_GPIO_PCLRR_FEC1H_PCLRRFEC1H5 (0x20)
+#define MCF_GPIO_PCLRR_FEC1H_PCLRRFEC1H6 (0x40)
+#define MCF_GPIO_PCLRR_FEC1H_PCLRRFEC1H7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_FEC1L */
+#define MCF_GPIO_PCLRR_FEC1L_PCLRRFEC1L0 (0x01)
+#define MCF_GPIO_PCLRR_FEC1L_PCLRRFEC1L1 (0x02)
+#define MCF_GPIO_PCLRR_FEC1L_PCLRRFEC1L2 (0x04)
+#define MCF_GPIO_PCLRR_FEC1L_PCLRRFEC1L3 (0x08)
+#define MCF_GPIO_PCLRR_FEC1L_PODRFEC1L4 (0x10)
+#define MCF_GPIO_PCLRR_FEC1L_PCLRRFEC1L5 (0x20)
+#define MCF_GPIO_PCLRR_FEC1L_PCLRRFEC1L6 (0x40)
+#define MCF_GPIO_PCLRR_FEC1L_PCLRRFEC1L7 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_FECI2C */
+#define MCF_GPIO_PCLRR_FECI2C_PCLRRFECI2C0 (0x01)
+#define MCF_GPIO_PCLRR_FECI2C_PCLRRFECI2C1 (0x02)
+#define MCF_GPIO_PCLRR_FECI2C_PODRFECI2C2 (0x04)
+#define MCF_GPIO_PCLRR_FECI2C_PCLRRFECI2C3 (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_PCIBG */
+#define MCF_GPIO_PCLRR_PCIBG_PODRPCIBG0 (0x01)
+#define MCF_GPIO_PCLRR_PCIBG_PODRPCIBG1 (0x02)
+#define MCF_GPIO_PCLRR_PCIBG_PODRPCIBG2 (0x04)
+#define MCF_GPIO_PCLRR_PCIBG_PCLRRPCIBG3 (0x08)
+#define MCF_GPIO_PCLRR_PCIBG_PCLRRPCIBG4 (0x10)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_PCIBR */
+#define MCF_GPIO_PCLRR_PCIBR_PCLRRPCIBR0 (0x01)
+#define MCF_GPIO_PCLRR_PCIBR_PCLRRPCIBR1 (0x02)
+#define MCF_GPIO_PCLRR_PCIBR_PCLRRPCIBR2 (0x04)
+#define MCF_GPIO_PCLRR_PCIBR_PODRPCIBR3 (0x08)
+#define MCF_GPIO_PCLRR_PCIBR_PODRPCIBR4 (0x10)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_PSC3PSC2 */
+#define MCF_GPIO_PCLRR_PSC3PSC2_PODRPSC3PSC20 (0x01)
+#define MCF_GPIO_PCLRR_PSC3PSC2_PODRPSC3PSC21 (0x02)
+#define MCF_GPIO_PCLRR_PSC3PSC2_PCLRRPSC3PSC22 (0x04)
+#define MCF_GPIO_PCLRR_PSC3PSC2_PCLRRPSC3PSC23 (0x08)
+#define MCF_GPIO_PCLRR_PSC3PSC2_PCLRRPSC3PSC24 (0x10)
+#define MCF_GPIO_PCLRR_PSC3PSC2_PODRPSC3PSC25 (0x20)
+#define MCF_GPIO_PCLRR_PSC3PSC2_PODRPSC3PSC26 (0x40)
+#define MCF_GPIO_PCLRR_PSC3PSC2_PCLRRPSC3PSC27 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_PSC1PSC0 */
+#define MCF_GPIO_PCLRR_PSC1PSC0_PCLRRPSC1PSC00 (0x01)
+#define MCF_GPIO_PCLRR_PSC1PSC0_PCLRRPSC1PSC01 (0x02)
+#define MCF_GPIO_PCLRR_PSC1PSC0_PCLRRPSC1PSC02 (0x04)
+#define MCF_GPIO_PCLRR_PSC1PSC0_PCLRRPSC1PSC03 (0x08)
+#define MCF_GPIO_PCLRR_PSC1PSC0_PCLRRPSC1PSC04 (0x10)
+#define MCF_GPIO_PCLRR_PSC1PSC0_PCLRRPSC1PSC05 (0x20)
+#define MCF_GPIO_PCLRR_PSC1PSC0_PODRPSC1PSC06 (0x40)
+#define MCF_GPIO_PCLRR_PSC1PSC0_PCLRRPSC1PSC07 (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_DSPI */
+#define MCF_GPIO_PCLRR_DSPI_PCLRRDSPI0 (0x01)
+#define MCF_GPIO_PCLRR_DSPI_PCLRRDSPI1 (0x02)
+#define MCF_GPIO_PCLRR_DSPI_PCLRRDSPI2 (0x04)
+#define MCF_GPIO_PCLRR_DSPI_PCLRRDSPI3 (0x08)
+#define MCF_GPIO_PCLRR_DSPI_PCLRRDSPI4 (0x10)
+#define MCF_GPIO_PCLRR_DSPI_PCLRRDSPI5 (0x20)
+#define MCF_GPIO_PCLRR_DSPI_PCLRRDSPI6 (0x40)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_FBCTL */
+#define MCF_GPIO_PAR_FBCTL_PAR_TS(x) (((x)&0x0003)<<0)
+#define MCF_GPIO_PAR_FBCTL_PAR_TA (0x0004)
+#define MCF_GPIO_PAR_FBCTL_PAR_RWB (0x0010)
+#define MCF_GPIO_PAR_FBCTL_PAR_OE (0x0040)
+#define MCF_GPIO_PAR_FBCTL_PAR_BWE0 (0x0100)
+#define MCF_GPIO_PAR_FBCTL_PAR_BWE1 (0x0400)
+#define MCF_GPIO_PAR_FBCTL_PAR_BWE2 (0x1000)
+#define MCF_GPIO_PAR_FBCTL_PAR_BWE3 (0x4000)
+#define MCF_GPIO_PAR_FBCTL_PAR_TS_GPIO (0)
+#define MCF_GPIO_PAR_FBCTL_PAR_TS_TBST (2)
+#define MCF_GPIO_PAR_FBCTL_PAR_TS_TS (3)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_FBCS */
+#define MCF_GPIO_PAR_FBCS_PAR_CS1 (0x02)
+#define MCF_GPIO_PAR_FBCS_PAR_CS2 (0x04)
+#define MCF_GPIO_PAR_FBCS_PAR_CS3 (0x08)
+#define MCF_GPIO_PAR_FBCS_PAR_CS4 (0x10)
+#define MCF_GPIO_PAR_FBCS_PAR_CS5 (0x20)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_DMA */
+#define MCF_GPIO_PAR_DMA_PAR_DREQ0(x) (((x)&0x03)<<0)
+#define MCF_GPIO_PAR_DMA_PAR_DREQ1(x) (((x)&0x03)<<2)
+#define MCF_GPIO_PAR_DMA_PAR_DACK0(x) (((x)&0x03)<<4)
+#define MCF_GPIO_PAR_DMA_PAR_DACK1(x) (((x)&0x03)<<6)
+#define MCF_GPIO_PAR_DMA_PAR_DACKx_GPIO (0)
+#define MCF_GPIO_PAR_DMA_PAR_DACKx_TOUT (2)
+#define MCF_GPIO_PAR_DMA_PAR_DACKx_DACK (3)
+#define MCF_GPIO_PAR_DMA_PAR_DREQx_GPIO (0)
+#define MCF_GPIO_PAR_DMA_PAR_DREQx_TIN (2)
+#define MCF_GPIO_PAR_DMA_PAR_DREQx_DREQ (3)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_FECI2CIRQ */
+#define MCF_GPIO_PAR_FECI2CIRQ_PAR_IRQ5 (0x0001)
+#define MCF_GPIO_PAR_FECI2CIRQ_PAR_IRQ6 (0x0002)
+#define MCF_GPIO_PAR_FECI2CIRQ_PAR_SCL (0x0004)
+#define MCF_GPIO_PAR_FECI2CIRQ_PAR_SDA (0x0008)
+#define MCF_GPIO_PAR_FECI2CIRQ_PAR_E1MDC(x) (((x)&0x0003)<<6)
+#define MCF_GPIO_PAR_FECI2CIRQ_PAR_E1MDIO(x) (((x)&0x0003)<<8)
+#define MCF_GPIO_PAR_FECI2CIRQ_PAR_E1MII (0x0400)
+#define MCF_GPIO_PAR_FECI2CIRQ_PAR_E17 (0x0800)
+#define MCF_GPIO_PAR_FECI2CIRQ_PAR_E0MDC (0x1000)
+#define MCF_GPIO_PAR_FECI2CIRQ_PAR_E0MDIO (0x2000)
+#define MCF_GPIO_PAR_FECI2CIRQ_PAR_E0MII (0x4000)
+#define MCF_GPIO_PAR_FECI2CIRQ_PAR_E07 (0x8000)
+#define MCF_GPIO_PAR_FECI2CIRQ_PAR_E1MDIO_CANRX (0x0000)
+#define MCF_GPIO_PAR_FECI2CIRQ_PAR_E1MDIO_SDA (0x0200)
+#define MCF_GPIO_PAR_FECI2CIRQ_PAR_E1MDIO_EMDIO (0x0300)
+#define MCF_GPIO_PAR_FECI2CIRQ_PAR_E1MDC_CANTX (0x0000)
+#define MCF_GPIO_PAR_FECI2CIRQ_PAR_E1MDC_SCL (0x0080)
+#define MCF_GPIO_PAR_FECI2CIRQ_PAR_E1MDC_EMDC (0x00C0)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_PCIBG */
+#define MCF_GPIO_PAR_PCIBG_PAR_PCIBG0(x) (((x)&0x0003)<<0)
+#define MCF_GPIO_PAR_PCIBG_PAR_PCIBG1(x) (((x)&0x0003)<<2)
+#define MCF_GPIO_PAR_PCIBG_PAR_PCIBG2(x) (((x)&0x0003)<<4)
+#define MCF_GPIO_PAR_PCIBG_PAR_PCIBG3(x) (((x)&0x0003)<<6)
+#define MCF_GPIO_PAR_PCIBG_PAR_PCIBG4(x) (((x)&0x0003)<<8)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_PCIBR */
+#define MCF_GPIO_PAR_PCIBR_PAR_PCIBG0(x) (((x)&0x0003)<<0)
+#define MCF_GPIO_PAR_PCIBR_PAR_PCIBG1(x) (((x)&0x0003)<<2)
+#define MCF_GPIO_PAR_PCIBR_PAR_PCIBG2(x) (((x)&0x0003)<<4)
+#define MCF_GPIO_PAR_PCIBR_PAR_PCIBG3(x) (((x)&0x0003)<<6)
+#define MCF_GPIO_PAR_PCIBR_PAR_PCIBR4(x) (((x)&0x0003)<<8)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_PSC3 */
+#define MCF_GPIO_PAR_PSC3_PAR_TXD3 (0x04)
+#define MCF_GPIO_PAR_PSC3_PAR_RXD3 (0x08)
+#define MCF_GPIO_PAR_PSC3_PAR_RTS3(x) (((x)&0x03)<<4)
+#define MCF_GPIO_PAR_PSC3_PAR_CTS3(x) (((x)&0x03)<<6)
+#define MCF_GPIO_PAR_PSC3_PAR_CTS3_GPIO (0x00)
+#define MCF_GPIO_PAR_PSC3_PAR_CTS3_BCLK (0x80)
+#define MCF_GPIO_PAR_PSC3_PAR_CTS3_CTS (0xC0)
+#define MCF_GPIO_PAR_PSC3_PAR_RTS3_GPIO (0x00)
+#define MCF_GPIO_PAR_PSC3_PAR_RTS3_FSYNC (0x20)
+#define MCF_GPIO_PAR_PSC3_PAR_RTS3_RTS (0x30)
+#define MCF_GPIO_PAR_PSC3_PAR_CTS2_CANRX (0x40)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_PSC2 */
+#define MCF_GPIO_PAR_PSC2_PAR_TXD2 (0x04)
+#define MCF_GPIO_PAR_PSC2_PAR_RXD2 (0x08)
+#define MCF_GPIO_PAR_PSC2_PAR_RTS2(x) (((x)&0x03)<<4)
+#define MCF_GPIO_PAR_PSC2_PAR_CTS2(x) (((x)&0x03)<<6)
+#define MCF_GPIO_PAR_PSC2_PAR_CTS2_GPIO (0x00)
+#define MCF_GPIO_PAR_PSC2_PAR_CTS2_BCLK (0x80)
+#define MCF_GPIO_PAR_PSC2_PAR_CTS2_CTS (0xC0)
+#define MCF_GPIO_PAR_PSC2_PAR_RTS2_GPIO (0x00)
+#define MCF_GPIO_PAR_PSC2_PAR_RTS2_CANTX (0x10)
+#define MCF_GPIO_PAR_PSC2_PAR_RTS2_FSYNC (0x20)
+#define MCF_GPIO_PAR_PSC2_PAR_RTS2_RTS (0x30)
+#define MCF_GPIO_PAR_PSC2_PAR_RTS2_CANRX (0x40)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_PSC1 */
+#define MCF_GPIO_PAR_PSC1_PAR_TXD1 (0x04)
+#define MCF_GPIO_PAR_PSC1_PAR_RXD1 (0x08)
+#define MCF_GPIO_PAR_PSC1_PAR_RTS1(x) (((x)&0x03)<<4)
+#define MCF_GPIO_PAR_PSC1_PAR_CTS1(x) (((x)&0x03)<<6)
+#define MCF_GPIO_PAR_PSC1_PAR_CTS1_GPIO (0x00)
+#define MCF_GPIO_PAR_PSC1_PAR_CTS1_BCLK (0x80)
+#define MCF_GPIO_PAR_PSC1_PAR_CTS1_CTS (0xC0)
+#define MCF_GPIO_PAR_PSC1_PAR_RTS1_GPIO (0x00)
+#define MCF_GPIO_PAR_PSC1_PAR_RTS1_FSYNC (0x20)
+#define MCF_GPIO_PAR_PSC1_PAR_RTS1_RTS (0x30)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_PSC0 */
+#define MCF_GPIO_PAR_PSC0_PAR_TXD0 (0x04)
+#define MCF_GPIO_PAR_PSC0_PAR_RXD0 (0x08)
+#define MCF_GPIO_PAR_PSC0_PAR_RTS0(x) (((x)&0x03)<<4)
+#define MCF_GPIO_PAR_PSC0_PAR_CTS0(x) (((x)&0x03)<<6)
+#define MCF_GPIO_PAR_PSC0_PAR_CTS0_GPIO (0x00)
+#define MCF_GPIO_PAR_PSC0_PAR_CTS0_BCLK (0x80)
+#define MCF_GPIO_PAR_PSC0_PAR_CTS0_CTS (0xC0)
+#define MCF_GPIO_PAR_PSC0_PAR_RTS0_GPIO (0x00)
+#define MCF_GPIO_PAR_PSC0_PAR_RTS0_FSYNC (0x20)
+#define MCF_GPIO_PAR_PSC0_PAR_RTS0_RTS (0x30)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_DSPI */
+#define MCF_GPIO_PAR_DSPI_PAR_SOUT(x) (((x)&0x0003)<<0)
+#define MCF_GPIO_PAR_DSPI_PAR_SIN(x) (((x)&0x0003)<<2)
+#define MCF_GPIO_PAR_DSPI_PAR_SCK(x) (((x)&0x0003)<<4)
+#define MCF_GPIO_PAR_DSPI_PAR_CS0(x) (((x)&0x0003)<<6)
+#define MCF_GPIO_PAR_DSPI_PAR_CS2(x) (((x)&0x0003)<<8)
+#define MCF_GPIO_PAR_DSPI_PAR_CS3(x) (((x)&0x0003)<<10)
+#define MCF_GPIO_PAR_DSPI_PAR_CS5 (0x1000)
+#define MCF_GPIO_PAR_DSPI_PAR_CS3_GPIO (0x0000)
+#define MCF_GPIO_PAR_DSPI_PAR_CS3_CANTX (0x0400)
+#define MCF_GPIO_PAR_DSPI_PAR_CS3_TOUT (0x0800)
+#define MCF_GPIO_PAR_DSPI_PAR_CS3_DSPICS (0x0C00)
+#define MCF_GPIO_PAR_DSPI_PAR_CS2_GPIO (0x0000)
+#define MCF_GPIO_PAR_DSPI_PAR_CS2_CANTX (0x0100)
+#define MCF_GPIO_PAR_DSPI_PAR_CS2_TOUT (0x0200)
+#define MCF_GPIO_PAR_DSPI_PAR_CS2_DSPICS (0x0300)
+#define MCF_GPIO_PAR_DSPI_PAR_CS0_GPIO (0x0000)
+#define MCF_GPIO_PAR_DSPI_PAR_CS0_FSYNC (0x0040)
+#define MCF_GPIO_PAR_DSPI_PAR_CS0_RTS (0x0080)
+#define MCF_GPIO_PAR_DSPI_PAR_CS0_DSPICS (0x00C0)
+#define MCF_GPIO_PAR_DSPI_PAR_SCK_GPIO (0x0000)
+#define MCF_GPIO_PAR_DSPI_PAR_SCK_BCLK (0x0010)
+#define MCF_GPIO_PAR_DSPI_PAR_SCK_CTS (0x0020)
+#define MCF_GPIO_PAR_DSPI_PAR_SCK_SCK (0x0030)
+#define MCF_GPIO_PAR_DSPI_PAR_SIN_GPIO (0x0000)
+#define MCF_GPIO_PAR_DSPI_PAR_SIN_RXD (0x0008)
+#define MCF_GPIO_PAR_DSPI_PAR_SIN_SIN (0x000C)
+#define MCF_GPIO_PAR_DSPI_PAR_SOUT_GPIO (0x0000)
+#define MCF_GPIO_PAR_DSPI_PAR_SOUT_TXD (0x0002)
+#define MCF_GPIO_PAR_DSPI_PAR_SOUT_SOUT (0x0003)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_TIMER */
+#define MCF_GPIO_PAR_TIMER_PAR_TOUT2 (0x01)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN2(x) (((x)&0x03)<<1)
+#define MCF_GPIO_PAR_TIMER_PAR_TOUT3 (0x08)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN3(x) (((x)&0x03)<<4)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN3_CANRX (0x00)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN3_IRQ (0x20)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN3_TIN (0x30)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN2_CANRX (0x00)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN2_IRQ (0x04)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN2_TIN (0x06)
+
+/********************************************************************/
+
+#endif /* _M5485GPIO_H_ */
--- /dev/null
+/*
+ * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * File: mcf548x_gpt.h
+ * Purpose: Register and bit definitions for the MCF548X
+ *
+ * Notes:
+ *
+ */
+
+#ifndef __MCF548X_GPT_H__
+#define __MCF548X_GPT_H__
+
+/*********************************************************************
+*
+* General Purpose Timers (GPT)
+*
+*********************************************************************/
+
+/* Register read/write macros */
+#define MCF_GPT_GMS0 MCF_REG32(0x000800)
+#define MCF_GPT_GCIR0 MCF_REG32(0x000804)
+#define MCF_GPT_GPWM0 MCF_REG32(0x000808)
+#define MCF_GPT_GSR0 MCF_REG32(0x00080C)
+#define MCF_GPT_GMS1 MCF_REG32(0x000810)
+#define MCF_GPT_GCIR1 MCF_REG32(0x000814)
+#define MCF_GPT_GPWM1 MCF_REG32(0x000818)
+#define MCF_GPT_GSR1 MCF_REG32(0x00081C)
+#define MCF_GPT_GMS2 MCF_REG32(0x000820)
+#define MCF_GPT_GCIR2 MCF_REG32(0x000824)
+#define MCF_GPT_GPWM2 MCF_REG32(0x000828)
+#define MCF_GPT_GSR2 MCF_REG32(0x00082C)
+#define MCF_GPT_GMS3 MCF_REG32(0x000830)
+#define MCF_GPT_GCIR3 MCF_REG32(0x000834)
+#define MCF_GPT_GPWM3 MCF_REG32(0x000838)
+#define MCF_GPT_GSR3 MCF_REG32(0x00083C)
+#define MCF_GPT_GMS(x) MCF_REG32(0x000800+((x)*0x010))
+#define MCF_GPT_GCIR(x) MCF_REG32(0x000804+((x)*0x010))
+#define MCF_GPT_GPWM(x) MCF_REG32(0x000808+((x)*0x010))
+#define MCF_GPT_GSR(x) MCF_REG32(0x00080C+((x)*0x010))
+
+/* Bit definitions and macros for MCF_GPT_GMS */
+#define MCF_GPT_GMS_TMS(x) (((x)&0x00000007)<<0)
+#define MCF_GPT_GMS_GPIO(x) (((x)&0x00000003)<<4)
+#define MCF_GPT_GMS_IEN (0x00000100)
+#define MCF_GPT_GMS_OD (0x00000200)
+#define MCF_GPT_GMS_SC (0x00000400)
+#define MCF_GPT_GMS_CE (0x00001000)
+#define MCF_GPT_GMS_WDEN (0x00008000)
+#define MCF_GPT_GMS_ICT(x) (((x)&0x00000003)<<16)
+#define MCF_GPT_GMS_OCT(x) (((x)&0x00000003)<<20)
+#define MCF_GPT_GMS_OCPW(x) (((x)&0x000000FF)<<24)
+#define MCF_GPT_GMS_OCT_FRCLOW (0x00000000)
+#define MCF_GPT_GMS_OCT_PULSEHI (0x00100000)
+#define MCF_GPT_GMS_OCT_PULSELO (0x00200000)
+#define MCF_GPT_GMS_OCT_TOGGLE (0x00300000)
+#define MCF_GPT_GMS_ICT_ANY (0x00000000)
+#define MCF_GPT_GMS_ICT_RISE (0x00010000)
+#define MCF_GPT_GMS_ICT_FALL (0x00020000)
+#define MCF_GPT_GMS_ICT_PULSE (0x00030000)
+#define MCF_GPT_GMS_GPIO_INPUT (0x00000000)
+#define MCF_GPT_GMS_GPIO_OUTLO (0x00000020)
+#define MCF_GPT_GMS_GPIO_OUTHI (0x00000030)
+#define MCF_GPT_GMS_TMS_DISABLE (0x00000000)
+#define MCF_GPT_GMS_TMS_INCAPT (0x00000001)
+#define MCF_GPT_GMS_TMS_OUTCAPT (0x00000002)
+#define MCF_GPT_GMS_TMS_PWM (0x00000003)
+#define MCF_GPT_GMS_TMS_GPIO (0x00000004)
+
+/* Bit definitions and macros for MCF_GPT_GCIR */
+#define MCF_GPT_GCIR_CNT(x) (((x)&0x0000FFFF)<<0)
+#define MCF_GPT_GCIR_PRE(x) (((x)&0x0000FFFF)<<16)
+
+/* Bit definitions and macros for MCF_GPT_GPWM */
+#define MCF_GPT_GPWM_LOAD (0x00000001)
+#define MCF_GPT_GPWM_PWMOP (0x00000100)
+#define MCF_GPT_GPWM_WIDTH(x) (((x)&0x0000FFFF)<<16)
+
+/* Bit definitions and macros for MCF_GPT_GSR */
+#define MCF_GPT_GSR_CAPT (0x00000001)
+#define MCF_GPT_GSR_COMP (0x00000002)
+#define MCF_GPT_GSR_PWMP (0x00000004)
+#define MCF_GPT_GSR_TEXP (0x00000008)
+#define MCF_GPT_GSR_PIN (0x00000100)
+#define MCF_GPT_GSR_OVF(x) (((x)&0x00000007)<<12)
+#define MCF_GPT_GSR_CAPTURE(x) (((x)&0x0000FFFF)<<16)
+
+#define MCF_GPT_MAX_TIMEOUT 30
+/********************************************************************/
+
+#endif /* __MCF548X_GPT_H__ */
--- /dev/null
+/*
+ * m5485pci.h -- ColdFire 547x/548x PCI controller support.
+ * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+#ifndef __MCF548X_PCI_H__
+#define __MCF548X_PCI_H__
+
+
+/* PCI Type 0 Configuration Registers */
+#define MCF_PCIIDR MCF_REG32(0x000B00)
+/* PCI Device ID/Vendor ID */
+#define MCF_PCISCR MCF_REG32(0x000B04)
+/* PCI Status/Command */
+#define MCF_PCICCRIR MCF_REG32(0x000B08)
+/* PCI Class Code / Revision ID */
+#define MCF_PCICR1 MCF_REG32(0x000B0C)
+/* PCI Configuration 1 Register */
+#define MCF_PCIBAR0 MCF_REG32(0x000B10)
+/* PCI Base Address Register 0 */
+#define MCF_PCIBAR1 MCF_REG32(0x000B14)
+/* PCI Base Address Register 1 */
+#define MCF_PCICCPR MCF_REG32(0x000B28)
+/* PCI Cardbus CIS Pointer */
+#define MCF_PCISID MCF_REG32(0x000B2C)
+/* Subsystem ID/Subsystem Vendor ID*/
+#define MCF_PCIERBAR MCF_REG32(0x000B30)
+/* PCI Expansion ROM */
+#define MCF_PCICPR MCF_REG32(0x000B30)
+/* PCI Capabilities Pointer */
+#define MCF_PCICR2 MCF_REG32(0x000B3C)
+/* PCI Configuration Register 2 */
+
+/* General Control/Status Registers */
+#define MCF_PCIGSCR MCF_REG32(0x000B60)
+/* Global Status/Control Register */
+#define MCF_PCITBATR0 MCF_REG32(0x000B64)
+/* Target Base Address Translation 0*/
+#define MCF_PCITBATR1 MCF_REG32(0x000B68)
+/* Target Base Address Translation 1*/
+#define MCF_PCITCR MCF_REG32(0x000B6C)
+/* Target Control Register */
+#define MCF_PCIIW0BTAR MCF_REG32(0x000B70)
+/* Initiator Window 0 Base Address */
+#define MCF_PCIIW1BTAR MCF_REG32(0x000B74)
+/* Initiator Window 1 Base Address */
+#define MCF_PCIIW2BTAR MCF_REG32(0x000B78)
+/* Initiator Window 2 Base Address */
+#define MCF_PCIIWCR MCF_REG32(0x000B80)
+/* Initiator Window Configuration */
+#define MCF_PCIICR MCF_REG32(0x000B84)
+/* Initiator Control Register */
+#define MCF_PCIISR MCF_REG32(0x000B88)
+/* Initiator Status Register */
+#define MCF_PCICAR MCF_REG32(0x000BF8)
+/* Configuration Address Register */
+
+/* CommBus FIFO Transmit Interface Registers */
+#define MCF_PCITPSR MCF_REG32(0x008400)
+/* Tx Packet Size Register */
+#define MCF_PCITSAR MCF_REG32(0x008404)
+/* Tx Start Address Register */
+#define MCF_PCITTCR MCF_REG32(0x008408)
+/* Tx Transaction Control Register */
+#define MCF_PCITER MCF_REG32(0x00840C)
+/* Tx Enables Register */
+#define MCF_PCITNAR MCF_REG32(0x008410)
+/* Tx Next Address Register */
+#define MCF_PCITLWR MCF_REG32(0x008414)
+/* Tx Last Word Register */
+#define MCF_PCITDCR MCF_REG32(0x008418)
+/* Tx Done Counts Register */
+#define MCF_PCITSR MCF_REG32(0x00841C)
+/* Tx Status Register */
+#define MCF_PCITFDR MCF_REG32(0x008440)
+/* Tx FIFO Data Register */
+#define MCF_PCITFSR MCF_REG32(0x008444)
+/* Tx FIFO Status Register */
+#define MCF_PCITFCR MCF_REG32(0x008448)
+/* Tx FIFO Control Register */
+#define MCF_PCITFAR MCF_REG32(0x00844C)
+/* Tx FIFO Alarm Register */
+#define MCF_PCITFRPR MCF_REG32(0x008450)
+/* Tx FIFO Read Pointer Register */
+#define MCF_PCITFWPR MCF_REG32(0x008454)
+/* Tx FIFO Write Pointer Register */
+
+/* CommBus FIFO Receive Interface Registers */
+#define MCF_PCIRPSR MCF_REG32(0x008480)
+/* Tx Packet Size Register */
+#define MCF_PCIRSAR MCF_REG32(0x008484)
+/* Tx Start Address Register */
+#define MCF_PCIRTCR MCF_REG32(0x008488)
+/* Tx Transaction Control Register */
+#define MCF_PCIRER MCF_REG32(0x00848C)
+/* Tx Enables Register */
+#define MCF_PCIRNAR MCF_REG32(0x008490)
+/* Tx Next Address Register */
+#define MCF_PCIRDCR MCF_REG32(0x008498)
+/* Tx Done Counts Register */
+#define MCF_PCIRSR MCF_REG32(0x00849C)
+/* Tx Status Register */
+#define MCF_PCIRFDR MCF_REG32(0x0084C0)
+/* Tx FIFO Data Register */
+#define MCF_PCIRFSR MCF_REG32(0x0084C4)
+/* Tx FIFO Status Register */
+#define MCF_PCIRFCR MCF_REG32(0x0084C8)
+/* Tx FIFO Control Register */
+#define MCF_PCIRFAR MCF_REG32(0x0084CC)
+/* Tx FIFO Alarm Register */
+#define MCF_PCIRFRPR MCF_REG32(0x0084D0)
+/* Tx FIFO Read Pointer Register */
+#define MCF_PCIRFWPR MCF_REG32(0x0084D4)
+/* Tx FIFO Write Pointer Register */
+
+/* PCI Arbiter Registers */
+#define MCF_PCIARB_PACR MCF_REG32(0x000C00)
+#define MCF_PCIARB_PASR MCF_REG32(0x000C04)
+
+
+/* Bit definitions and macros for MCF_PCIIDR */
+#define MCF_PCIIDR_VENDORID(x) (((x)&0x0000FFFF)<<0)
+#define MCF_PCIIDR_DEVICEID(x) (((x)&0x0000FFFF)<<16)
+
+/* Bit definitions and macros for MCF_PCISCR */
+#define MCF_PCISCR_M (0x00000002)
+#define MCF_PCISCR_B (0x00000004)
+#define MCF_PCISCR_SP (0x00000008)
+#define MCF_PCISCR_MW (0x00000010)
+#define MCF_PCISCR_PER (0x00000040)
+#define MCF_PCISCR_S (0x00000100)
+#define MCF_PCISCR_F (0x00000200)
+#define MCF_PCISCR_C (0x00100000)
+#define MCF_PCISCR_66M (0x00200000)
+#define MCF_PCISCR_R (0x00400000)
+#define MCF_PCISCR_FC (0x00800000)
+#define MCF_PCISCR_DP (0x01000000)
+#define MCF_PCISCR_DT(x) (((x)&0x00000003)<<25)
+#define MCF_PCISCR_TS (0x08000000)
+#define MCF_PCISCR_TR (0x10000000)
+#define MCF_PCISCR_MA (0x20000000)
+#define MCF_PCISCR_SE (0x40000000)
+#define MCF_PCISCR_PE (0x80000000)
+
+/* Bit definitions and macros for MCF_PCICCRIR */
+#define MCF_PCICCRIR_REVID(x) (((x)&0x000000FF)<<0)
+#define MCF_PCICCRIR_CLASSCODE(x) (((x)&0x00FFFFFF)<<8)
+
+/* Bit definitions and macros for MCF_PCICR1 */
+#define MCF_PCICR1_CACHELINESIZE(x) (((x)&0x0000000F)<<0)
+#define MCF_PCICR1_LATTIMER(x) (((x)&0x000000FF)<<8)
+#define MCF_PCICR1_HEADERTYPE(x) (((x)&0x000000FF)<<16)
+#define MCF_PCICR1_BIST(x) (((x)&0x000000FF)<<24)
+
+/* Bit definitions and macros for MCF_PCIBAR# */
+#define MCF_PCIBAR0_ADDR(x) (((x)&0x00003FFF)<<18)
+#define MCF_PCIBAR1_ADDR(x) (((x)&0x00000003)<<30)
+
+/* Bit definitions and macros for MCF_PCICR2 */
+#define MCF_PCICR2_INTLINE(x) (((x)&0x000000FF)<<0)
+#define MCF_PCICR2_INTPIN(x) (((x)&0x000000FF)<<8)
+#define MCF_PCICR2_MINGNT(x) (((x)&0x000000FF)<<16)
+#define MCF_PCICR2_MAXLAT(x) (((x)&0x000000FF)<<24)
+
+/* Bit definitions and macros for MCF_PCIGSCR */
+#define MCF_PCIGSCR_PR (0x00000001)
+#define MCF_PCIGSCR_SEE (0x00001000)
+#define MCF_PCIGSCR_PEE (0x00002000)
+#define MCF_PCIGSCR_SE (0x10000000)
+#define MCF_PCIGSCR_PE (0x20000000)
+
+/* Bit definitions and macros for MCF_PCITBATR0 */
+#define MCF_PCITBATR0_EN (0x00000001)
+#define MCF_PCITBATR0_BAT0(x) (((x)&0x00003FFF)<<18)
+
+/* Bit definitions and macros for MCF_PCITBATR1 */
+#define MCF_PCITBATR1_EN (0x00000001)
+#define MCF_PCITBATR1_BAT1(x) (((x)&0x00000003)<<30)
+
+/* Bit definitions and macros for MCF_PCITCR */
+#define MCF_PCITCR_P (0x00010000)
+#define MCF_PCITCR_LD (0x01000000)
+
+/* Bit definitions and macros for MCF_PCIIW0BTAR */
+#define MCF_PCIIW0BTAR_WTA0(x) (((x)&0x000000FF)<<8)
+#define MCF_PCIIW0BTAR_WAM0(x) (((x)&0x000000FF)<<16)
+#define MCF_PCIIW0BTAR_WBA0(x) (((x)&0x000000FF)<<24)
+
+/* Bit definitions and macros for MCF_PCIIW1BTAR */
+#define MCF_PCIIW1BTAR_WTA1(x) (((x)&0x000000FF)<<8)
+#define MCF_PCIIW1BTAR_WAM1(x) (((x)&0x000000FF)<<16)
+#define MCF_PCIIW1BTAR_WBA1(x) (((x)&0x000000FF)<<24)
+
+/* Bit definitions and macros for MCF_PCIIW2BTAR */
+#define MCF_PCIIW2BTAR_WTA2(x) (((x)&0x000000FF)<<8)
+#define MCF_PCIIW2BTAR_WAM2(x) (((x)&0x000000FF)<<16)
+#define MCF_PCIIW2BTAR_WBA2(x) (((x)&0x000000FF)<<24)
+
+/* Bit definitions and macros for MCF_PCIIWCR */
+#define MCF_PCIIWCR_WINCTRL2(x) (((x)&0x0000000F)<<8)
+#define MCF_PCIIWCR_WINCTRL1(x) (((x)&0x0000000F)<<16)
+#define MCF_PCIIWCR_WINCTRL0(x) (((x)&0x0000000F)<<24)
+#define MCF_PCIIWCR_WINCTRL0_MEMREAD (0x01000000)
+#define MCF_PCIIWCR_WINCTRL0_MEMRDLINE (0x03000000)
+#define MCF_PCIIWCR_WINCTRL0_MEMRDMUL (0x05000000)
+#define MCF_PCIIWCR_WINCTRL0_IO (0x09000000)
+#define MCF_PCIIWCR_WINCTRL0_E (0x01000000)
+#define MCF_PCIIWCR_WINCTRL1_MEMREAD (0x00010000)
+#define MCF_PCIIWCR_WINCTRL1_MEMRDLINE (0x00030000)
+#define MCF_PCIIWCR_WINCTRL1_MEMRDMUL (0x00050000)
+#define MCF_PCIIWCR_WINCTRL1_IO (0x00090000)
+#define MCF_PCIIWCR_WINCTRL1_E (0x00010000)
+#define MCF_PCIIWCR_WINCTRL2_MEMREAD (0x00000100)
+#define MCF_PCIIWCR_WINCTRL2_MEMRDLINE (0x00000300)
+#define MCF_PCIIWCR_WINCTRL2_MEMRDMUL (0x00000500)
+#define MCF_PCIIWCR_WINCTRL2_IO (0x00000900)
+#define MCF_PCIIWCR_WINCTRL2_E (0x00000100)
+
+
+/* Bit definitions and macros for MCF_PCIICR */
+#define MCF_PCIICR_MAXRETRY(x) (((x)&0x000000FF)<<0)
+#define MCF_PCIICR_TAE (0x01000000)
+#define MCF_PCIICR_IAE (0x02000000)
+#define MCF_PCIICR_REE (0x04000000)
+
+/* Bit definitions and macros for MCF_PCIISR */
+#define MCF_PCIISR_TA (0x01000000)
+#define MCF_PCIISR_IA (0x02000000)
+#define MCF_PCIISR_RE (0x04000000)
+
+/* Bit definitions and macros for MCF_PCICAR */
+#define MCF_PCICAR_DWORD(x) (((x)&0x0000003F)<<2)
+#define MCF_PCICAR_FUNCNUM(x) (((x)&0x00000007)<<8)
+#define MCF_PCICAR_DEVNUM(x) (((x)&0x0000001F)<<11)
+#define MCF_PCICAR_BUSNUM(x) (((x)&0x000000FF)<<16)
+#define MCF_PCICAR_E (0x80000000)
+
+/* Bit definitions and macros for MCF_PCITPSR */
+#define MCF_PCITPSR_PKTSIZE(x) (((x)&0x0000FFFF)<<16)
+
+/* Bit definitions and macros for MCF_PCITTCR */
+#define MCF_PCITTCR_DI (0x00000001)
+#define MCF_PCITTCR_W (0x00000010)
+#define MCF_PCITTCR_MAXBEATS(x) (((x)&0x00000007)<<8)
+#define MCF_PCITTCR_MAXRETRY(x) (((x)&0x000000FF)<<16)
+#define MCF_PCITTCR_PCICMD(x) (((x)&0x0000000F)<<24)
+
+/* Bit definitions and macros for MCF_PCITER */
+#define MCF_PCITER_NE (0x00010000)
+#define MCF_PCITER_IAE (0x00020000)
+#define MCF_PCITER_TAE (0x00040000)
+#define MCF_PCITER_RE (0x00080000)
+#define MCF_PCITER_SE (0x00100000)
+#define MCF_PCITER_FEE (0x00200000)
+#define MCF_PCITER_ME (0x01000000)
+#define MCF_PCITER_BE (0x08000000)
+#define MCF_PCITER_CM (0x10000000)
+#define MCF_PCITER_RF (0x40000000)
+#define MCF_PCITER_RC (0x80000000)
+
+/* Bit definitions and macros for MCF_PCITDCR */
+#define MCF_PCITDCR_PKTSDONE(x) (((x)&0x0000FFFF)<<0)
+#define MCF_PCITDCR_BYTESDONE(x) (((x)&0x0000FFFF)<<16)
+
+/* Bit definitions and macros for MCF_PCITSR */
+#define MCF_PCITSR_IA (0x00010000)
+#define MCF_PCITSR_TA (0x00020000)
+#define MCF_PCITSR_RE (0x00040000)
+#define MCF_PCITSR_SE (0x00080000)
+#define MCF_PCITSR_FE (0x00100000)
+#define MCF_PCITSR_BE1 (0x00200000)
+#define MCF_PCITSR_BE2 (0x00400000)
+#define MCF_PCITSR_BE3 (0x00800000)
+#define MCF_PCITSR_NT (0x01000000)
+
+/* Bit definitions and macros for MCF_PCITFSR */
+#define MCF_PCITFSR_EMT (0x00010000)
+#define MCF_PCITFSR_ALARM (0x00020000)
+#define MCF_PCITFSR_FU (0x00040000)
+#define MCF_PCITFSR_FR (0x00080000)
+#define MCF_PCITFSR_OF (0x00100000)
+#define MCF_PCITFSR_UF (0x00200000)
+#define MCF_PCITFSR_RXW (0x00400000)
+
+/* Bit definitions and macros for MCF_PCITFCR */
+#define MCF_PCITFCR_OF_MSK (0x00080000)
+#define MCF_PCITFCR_UF_MSK (0x00100000)
+#define MCF_PCITFCR_RXW_MSK (0x00200000)
+#define MCF_PCITFCR_FAE_MSK (0x00400000)
+#define MCF_PCITFCR_IP_MSK (0x00800000)
+#define MCF_PCITFCR_GR(x) (((x)&0x00000007)<<24)
+
+/* Bit definitions and macros for MCF_PCITFAR */
+#define MCF_PCITFAR_ALARM(x) (((x)&0x0000007F)<<0)
+
+/* Bit definitions and macros for MCF_PCITFRPR */
+#define MCF_PCITFRPR_READ(x) (((x)&0x00000FFF)<<0)
+
+/* Bit definitions and macros for MCF_PCITFWPR */
+#define MCF_PCITFWPR_WRITE(x) (((x)&0x00000FFF)<<0)
+
+/* Bit definitions and macros for MCF_PCIRPSR */
+#define MCF_PCIRPSR_PKTSIZE(x) (((x)&0x0000FFFF)<<16)
+
+/* Bit definitions and macros for MCF_PCIRTCR */
+#define MCF_PCIRTCR_DI (0x00000001)
+#define MCF_PCIRTCR_W (0x00000010)
+#define MCF_PCIRTCR_MAXBEATS(x) (((x)&0x00000007)<<8)
+#define MCF_PCIRTCR_FB (0x00001000)
+#define MCF_PCIRTCR_MAXRETRY(x) (((x)&0x000000FF)<<16)
+#define MCF_PCIRTCR_PCICMD(x) (((x)&0x0000000F)<<24)
+
+/* Bit definitions and macros for MCF_PCIRER */
+#define MCF_PCIRER_NE (0x00010000)
+#define MCF_PCIRER_IAE (0x00020000)
+#define MCF_PCIRER_TAE (0x00040000)
+#define MCF_PCIRER_RE (0x00080000)
+#define MCF_PCIRER_SE (0x00100000)
+#define MCF_PCIRER_FEE (0x00200000)
+#define MCF_PCIRER_ME (0x01000000)
+#define MCF_PCIRER_BE (0x08000000)
+#define MCF_PCIRER_CM (0x10000000)
+#define MCF_PCIRER_FE (0x20000000)
+#define MCF_PCIRER_RF (0x40000000)
+#define MCF_PCIRER_RC (0x80000000)
+
+/* Bit definitions and macros for MCF_PCIRDCR */
+#define MCF_PCIRDCR_PKTSDONE(x) (((x)&0x0000FFFF)<<0)
+#define MCF_PCIRDCR_BYTESDONE(x) (((x)&0x0000FFFF)<<16)
+
+/* Bit definitions and macros for MCF_PCIRSR */
+#define MCF_PCIRSR_IA (0x00010000)
+#define MCF_PCIRSR_TA (0x00020000)
+#define MCF_PCIRSR_RE (0x00040000)
+#define MCF_PCIRSR_SE (0x00080000)
+#define MCF_PCIRSR_FE (0x00100000)
+#define MCF_PCIRSR_BE1 (0x00200000)
+#define MCF_PCIRSR_BE2 (0x00400000)
+#define MCF_PCIRSR_BE3 (0x00800000)
+#define MCF_PCIRSR_NT (0x01000000)
+
+/* Bit definitions and macros for MCF_PCIRFSR */
+#define MCF_PCIRFSR_EMT (0x00010000)
+#define MCF_PCIRFSR_ALARM (0x00020000)
+#define MCF_PCIRFSR_FU (0x00040000)
+#define MCF_PCIRFSR_FR (0x00080000)
+#define MCF_PCIRFSR_OF (0x00100000)
+#define MCF_PCIRFSR_UF (0x00200000)
+#define MCF_PCIRFSR_RXW (0x00400000)
+
+/* Bit definitions and macros for MCF_PCIRFCR */
+#define MCF_PCIRFCR_OF_MSK (0x00080000)
+#define MCF_PCIRFCR_UF_MSK (0x00100000)
+#define MCF_PCIRFCR_RXW_MSK (0x00200000)
+#define MCF_PCIRFCR_FAE_MSK (0x00400000)
+#define MCF_PCIRFCR_IP_MSK (0x00800000)
+#define MCF_PCIRFCR_GR(x) (((x)&0x00000007)<<24)
+
+/* Bit definitions and macros for MCF_PCIRFAR */
+#define MCF_PCIRFAR_ALARM(x) (((x)&0x0000007F)<<0)
+
+/* Bit definitions and macros for MCF_PCIRFRPR */
+#define MCF_PCIRFRPR_READ(x) (((x)&0x00000FFF)<<0)
+
+/* Bit definitions and macros for MCF_PCIRFWPR */
+#define MCF_PCIRFWPR_WRITE(x) (((x)&0x00000FFF)<<0)
+
+
+/* Bit definitions and macros for MCF_PCIARB_PACR */
+#define MCF_PCIARB_PACR_INTMPRI (0x00000001)
+#define MCF_PCIARB_PACR_EXTMPRI(x) (((x)&0x0000001F)<<1)
+#define MCF_PCIARB_PACR_INTMINTEN (0x00010000)
+#define MCF_PCIARB_PACR_EXTMINTEN(x) (((x)&0x0000001F)<<17)
+#define MCF_PCIARB_PACR_PKMD (0x40000000)
+#define MCF_PCIARB_PACR_DS (0x80000000)
+
+/* Bit definitions and macros for MCF_PCIARB_PASR */
+#define MCF_PCIARB_PASR_ITLMBK (0x00010000)
+#define MCF_PCIARB_PASR_EXTMBK(x) (((x)&0x0000001F)<<17)
+
+#endif /* __MCF548X_PCI_H__ */
--- /dev/null
+/*
+ * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * File: mcf548x_psc.h
+ * Purpose: Register and bit definitions for the MCF548X
+ *
+ * Notes
+ *
+ */
+
+#ifndef __MCF548X_PSC_H__
+#define __MCF548X_PSC_H__
+
+/*********************************************************************
+*
+* Programmable Serial Controller (PSC)
+*
+*********************************************************************/
+
+/* Register read/write macros */
+#define MCF_PSC_MR0 MCF_REG08(0x008600)
+#define MCF_PSC_SR0 MCF_REG16(0x008604)
+#define MCF_PSC_CSR0 MCF_REG08(0x008604)
+#define MCF_PSC_CR0 MCF_REG08(0x008608)
+#define MCF_PSC_RB0 MCF_REG32(0x00860C)
+#define MCF_PSC_TB0 MCF_REG32(0x00860C)
+#define MCF_PSC_TB_8BIT0 MCF_REG32(0x00860C)
+#define MCF_PSC_TB_16BIT0 MCF_REG32(0x00860C)
+#define MCF_PSC_TB_AC970 MCF_REG32(0x00860C)
+#define MCF_PSC_IPCR0 MCF_REG08(0x008610)
+#define MCF_PSC_ACR0 MCF_REG08(0x008610)
+#define MCF_PSC_ISR0 MCF_REG16(0x008614)
+#define MCF_PSC_IMR0 MCF_REG16(0x008614)
+#define MCF_PSC_CTUR0 MCF_REG08(0x008618)
+#define MCF_PSC_CTLR0 MCF_REG08(0x00861C)
+#define MCF_PSC_IP0 MCF_REG08(0x008634)
+#define MCF_PSC_OPSET0 MCF_REG08(0x008638)
+#define MCF_PSC_OPRESET0 MCF_REG08(0x00863C)
+#define MCF_PSC_SICR0 MCF_REG08(0x008640)
+#define MCF_PSC_IRCR10 MCF_REG08(0x008644)
+#define MCF_PSC_IRCR20 MCF_REG08(0x008648)
+#define MCF_PSC_IRSDR0 MCF_REG08(0x00864C)
+#define MCF_PSC_IRMDR0 MCF_REG08(0x008650)
+#define MCF_PSC_IRFDR0 MCF_REG08(0x008654)
+#define MCF_PSC_RFCNT0 MCF_REG16(0x008658)
+#define MCF_PSC_TFCNT0 MCF_REG16(0x00865C)
+#define MCF_PSC_RFSR0 MCF_REG16(0x008664)
+#define MCF_PSC_TFSR0 MCF_REG16(0x008684)
+#define MCF_PSC_RFCR0 MCF_REG32(0x008668)
+#define MCF_PSC_TFCR0 MCF_REG32(0x008688)
+#define MCF_PSC_RFAR0 MCF_REG16(0x00866E)
+#define MCF_PSC_TFAR0 MCF_REG16(0x00868E)
+#define MCF_PSC_RFRP0 MCF_REG16(0x008672)
+#define MCF_PSC_TFRP0 MCF_REG16(0x008692)
+#define MCF_PSC_RFWP0 MCF_REG16(0x008676)
+#define MCF_PSC_TFWP0 MCF_REG16(0x008696)
+#define MCF_PSC_RLRFP0 MCF_REG16(0x00867A)
+#define MCF_PSC_TLRFP0 MCF_REG16(0x00869A)
+#define MCF_PSC_RLWFP0 MCF_REG16(0x00867E)
+#define MCF_PSC_TLWFP0 MCF_REG16(0x00869E)
+#define MCF_PSC_MR1 MCF_REG08(0x008700)
+#define MCF_PSC_SR1 MCF_REG16(0x008704)
+#define MCF_PSC_CSR1 MCF_REG08(0x008704)
+#define MCF_PSC_CR1 MCF_REG08(0x008708)
+#define MCF_PSC_RB1 MCF_REG32(0x00870C)
+#define MCF_PSC_TB1 MCF_REG32(0x00870C)
+#define MCF_PSC_TB_8BIT1 MCF_REG32(0x00870C)
+#define MCF_PSC_TB_16BIT1 MCF_REG32(0x00870C)
+#define MCF_PSC_TB_AC971 MCF_REG32(0x00870C)
+#define MCF_PSC_IPCR1 MCF_REG08(0x008710)
+#define MCF_PSC_ACR1 MCF_REG08(0x008710)
+#define MCF_PSC_ISR1 MCF_REG16(0x008714)
+#define MCF_PSC_IMR1 MCF_REG16(0x008714)
+#define MCF_PSC_CTUR1 MCF_REG08(0x008718)
+#define MCF_PSC_CTLR1 MCF_REG08(0x00871C)
+#define MCF_PSC_IP1 MCF_REG08(0x008734)
+#define MCF_PSC_OPSET1 MCF_REG08(0x008738)
+#define MCF_PSC_OPRESET1 MCF_REG08(0x00873C)
+#define MCF_PSC_SICR1 MCF_REG08(0x008740)
+#define MCF_PSC_IRCR11 MCF_REG08(0x008744)
+#define MCF_PSC_IRCR21 MCF_REG08(0x008748)
+#define MCF_PSC_IRSDR1 MCF_REG08(0x00874C)
+#define MCF_PSC_IRMDR1 MCF_REG08(0x008750)
+#define MCF_PSC_IRFDR1 MCF_REG08(0x008754)
+#define MCF_PSC_RFCNT1 MCF_REG16(0x008758)
+#define MCF_PSC_TFCNT1 MCF_REG16(0x00875C)
+#define MCF_PSC_RFSR1 MCF_REG16(0x008764)
+#define MCF_PSC_TFSR1 MCF_REG16(0x008784)
+#define MCF_PSC_RFCR1 MCF_REG32(0x008768)
+#define MCF_PSC_TFCR1 MCF_REG32(0x008788)
+#define MCF_PSC_RFAR1 MCF_REG16(0x00876E)
+#define MCF_PSC_TFAR1 MCF_REG16(0x00878E)
+#define MCF_PSC_RFRP1 MCF_REG16(0x008772)
+#define MCF_PSC_TFRP1 MCF_REG16(0x008792)
+#define MCF_PSC_RFWP1 MCF_REG16(0x008776)
+#define MCF_PSC_TFWP1 MCF_REG16(0x008796)
+#define MCF_PSC_RLRFP1 MCF_REG16(0x00877A)
+#define MCF_PSC_TLRFP1 MCF_REG16(0x00879A)
+#define MCF_PSC_RLWFP1 MCF_REG16(0x00877E)
+#define MCF_PSC_TLWFP1 MCF_REG16(0x00879E)
+#define MCF_PSC_MR2 MCF_REG08(0x008800)
+#define MCF_PSC_SR2 MCF_REG16(0x008804)
+#define MCF_PSC_CSR2 MCF_REG08(0x008804)
+#define MCF_PSC_CR2 MCF_REG08(0x008808)
+#define MCF_PSC_RB2 MCF_REG32(0x00880C)
+#define MCF_PSC_TB2 MCF_REG32(0x00880C)
+#define MCF_PSC_TB_8BIT2 MCF_REG32(0x00880C)
+#define MCF_PSC_TB_16BIT2 MCF_REG32(0x00880C)
+#define MCF_PSC_TB_AC972 MCF_REG32(0x00880C)
+#define MCF_PSC_IPCR2 MCF_REG08(0x008810)
+#define MCF_PSC_ACR2 MCF_REG08(0x008810)
+#define MCF_PSC_ISR2 MCF_REG16(0x008814)
+#define MCF_PSC_IMR2 MCF_REG16(0x008814)
+#define MCF_PSC_CTUR2 MCF_REG08(0x008818)
+#define MCF_PSC_CTLR2 MCF_REG08(0x00881C)
+#define MCF_PSC_IP2 MCF_REG08(0x008834)
+#define MCF_PSC_OPSET2 MCF_REG08(0x008838)
+#define MCF_PSC_OPRESET2 MCF_REG08(0x00883C)
+#define MCF_PSC_SICR2 MCF_REG08(0x008840)
+#define MCF_PSC_IRCR12 MCF_REG08(0x008844)
+#define MCF_PSC_IRCR22 MCF_REG08(0x008848)
+#define MCF_PSC_IRSDR2 MCF_REG08(0x00884C)
+#define MCF_PSC_IRMDR2 MCF_REG08(0x008850)
+#define MCF_PSC_IRFDR2 MCF_REG08(0x008854)
+#define MCF_PSC_RFCNT2 MCF_REG16(0x008858)
+#define MCF_PSC_TFCNT2 MCF_REG16(0x00885C)
+#define MCF_PSC_RFSR2 MCF_REG16(0x008864)
+#define MCF_PSC_TFSR2 MCF_REG16(0x008884)
+#define MCF_PSC_RFCR2 MCF_REG32(0x008868)
+#define MCF_PSC_TFCR2 MCF_REG32(0x008888)
+#define MCF_PSC_RFAR2 MCF_REG16(0x00886E)
+#define MCF_PSC_TFAR2 MCF_REG16(0x00888E)
+#define MCF_PSC_RFRP2 MCF_REG16(0x008872)
+#define MCF_PSC_TFRP2 MCF_REG16(0x008892)
+#define MCF_PSC_RFWP2 MCF_REG16(0x008876)
+#define MCF_PSC_TFWP2 MCF_REG16(0x008896)
+#define MCF_PSC_RLRFP2 MCF_REG16(0x00887A)
+#define MCF_PSC_TLRFP2 MCF_REG16(0x00889A)
+#define MCF_PSC_RLWFP2 MCF_REG16(0x00887E)
+#define MCF_PSC_TLWFP2 MCF_REG16(0x00889E)
+#define MCF_PSC_MR3 MCF_REG08(0x008900)
+#define MCF_PSC_SR3 MCF_REG16(0x008904)
+#define MCF_PSC_CSR3 MCF_REG08(0x008904)
+#define MCF_PSC_CR3 MCF_REG08(0x008908)
+#define MCF_PSC_RB3 MCF_REG32(0x00890C)
+#define MCF_PSC_TB3 MCF_REG32(0x00890C)
+#define MCF_PSC_TB_8BIT3 MCF_REG32(0x00890C)
+#define MCF_PSC_TB_16BIT3 MCF_REG32(0x00890C)
+#define MCF_PSC_TB_AC973 MCF_REG32(0x00890C)
+#define MCF_PSC_IPCR3 MCF_REG08(0x008910)
+#define MCF_PSC_ACR3 MCF_REG08(0x008910)
+#define MCF_PSC_ISR3 MCF_REG16(0x008914)
+#define MCF_PSC_IMR3 MCF_REG16(0x008914)
+#define MCF_PSC_CTUR3 MCF_REG08(0x008918)
+#define MCF_PSC_CTLR3 MCF_REG08(0x00891C)
+#define MCF_PSC_IP3 MCF_REG08(0x008934)
+#define MCF_PSC_OPSET3 MCF_REG08(0x008938)
+#define MCF_PSC_OPRESET3 MCF_REG08(0x00893C)
+#define MCF_PSC_SICR3 MCF_REG08(0x008940)
+#define MCF_PSC_IRCR13 MCF_REG08(0x008944)
+#define MCF_PSC_IRCR23 MCF_REG08(0x008948)
+#define MCF_PSC_IRSDR3 MCF_REG08(0x00894C)
+#define MCF_PSC_IRMDR3 MCF_REG08(0x008950)
+#define MCF_PSC_IRFDR3 MCF_REG08(0x008954)
+#define MCF_PSC_RFCNT3 MCF_REG16(0x008958)
+#define MCF_PSC_TFCNT3 MCF_REG16(0x00895C)
+#define MCF_PSC_RFSR3 MCF_REG16(0x008964)
+#define MCF_PSC_TFSR3 MCF_REG16(0x008984)
+#define MCF_PSC_RFCR3 MCF_REG32(0x008968)
+#define MCF_PSC_TFCR3 MCF_REG32(0x008988)
+#define MCF_PSC_RFAR3 MCF_REG16(0x00896E)
+#define MCF_PSC_TFAR3 MCF_REG16(0x00898E)
+#define MCF_PSC_RFRP3 MCF_REG16(0x008972)
+#define MCF_PSC_TFRP3 MCF_REG16(0x008992)
+#define MCF_PSC_RFWP3 MCF_REG16(0x008976)
+#define MCF_PSC_TFWP3 MCF_REG16(0x008996)
+#define MCF_PSC_RLRFP3 MCF_REG16(0x00897A)
+#define MCF_PSC_TLRFP3 MCF_REG16(0x00899A)
+#define MCF_PSC_RLWFP3 MCF_REG16(0x00897E)
+#define MCF_PSC_TLWFP3 MCF_REG16(0x00899E)
+#define MCF_PSC_MR(x) MCF_REG08(0x008600+((x)*0x100))
+#define MCF_PSC_SR(x) MCF_REG16(0x008604+((x)*0x100))
+#define MCF_PSC_CSR(x) MCF_REG08(0x008604+((x)*0x100))
+#define MCF_PSC_CR(x) MCF_REG08(0x008608+((x)*0x100))
+#define MCF_PSC_RB(x) MCF_REG32(0x00860C+((x)*0x100))
+#define MCF_PSC_TB(x) MCF_REG32(0x00860C+((x)*0x100))
+#define MCF_PSC_TB_8BIT(x) MCF_REG32(0x00860C+((x)*0x100))
+#define MCF_PSC_TB_16BIT(x) MCF_REG32(0x00860C+((x)*0x100))
+#define MCF_PSC_TB_AC97(x) MCF_REG32(0x00860C+((x)*0x100))
+#define MCF_PSC_IPCR(x) MCF_REG08(0x008610+((x)*0x100))
+#define MCF_PSC_ACR(x) MCF_REG08(0x008610+((x)*0x100))
+#define MCF_PSC_ISR(x) MCF_REG16(0x008614+((x)*0x100))
+#define MCF_PSC_IMR(x) MCF_REG16(0x008614+((x)*0x100))
+#define MCF_PSC_CTUR(x) MCF_REG08(0x008618+((x)*0x100))
+#define MCF_PSC_CTLR(x) MCF_REG08(0x00861C+((x)*0x100))
+#define MCF_PSC_IP(x) MCF_REG08(0x008634+((x)*0x100))
+#define MCF_PSC_OPSET(x) MCF_REG08(0x008638+((x)*0x100))
+#define MCF_PSC_OPRESET(x) MCF_REG08(0x00863C+((x)*0x100))
+#define MCF_PSC_SICR(x) MCF_REG08(0x008640+((x)*0x100))
+#define MCF_PSC_IRCR1(x) MCF_REG08(0x008644+((x)*0x100))
+#define MCF_PSC_IRCR2(x) MCF_REG08(0x008648+((x)*0x100))
+#define MCF_PSC_IRSDR(x) MCF_REG08(0x00864C+((x)*0x100))
+#define MCF_PSC_IRMDR(x) MCF_REG08(0x008650+((x)*0x100))
+#define MCF_PSC_IRFDR(x) MCF_REG08(0x008654+((x)*0x100))
+#define MCF_PSC_RFCNT(x) MCF_REG16(0x008658+((x)*0x100))
+#define MCF_PSC_TFCNT(x) MCF_REG16(0x00865C+((x)*0x100))
+#define MCF_PSC_RFSR(x) MCF_REG16(0x008664+((x)*0x100))
+#define MCF_PSC_TFSR(x) MCF_REG16(0x008684+((x)*0x100))
+#define MCF_PSC_RFCR(x) MCF_REG32(0x008668+((x)*0x100))
+#define MCF_PSC_TFCR(x) MCF_REG32(0x008688+((x)*0x100))
+#define MCF_PSC_RFAR(x) MCF_REG16((0x00866E)+((x)*0x100))
+#define MCF_PSC_TFAR(x) MCF_REG16((0x00868E)+((x)*0x100))
+#define MCF_PSC_RFRP(x) MCF_REG16(0x008672+((x)*0x100))
+#define MCF_PSC_TFRP(x) MCF_REG16(0x008692+((x)*0x100))
+#define MCF_PSC_RFWP(x) MCF_REG16(0x008676+((x)*0x100))
+#define MCF_PSC_TFWP(x) MCF_REG16(0x008696+((x)*0x100))
+#define MCF_PSC_RLRFP(x) MCF_REG16(0x00867A+((x)*0x100))
+#define MCF_PSC_TLRFP(x) MCF_REG16(0x00869A+((x)*0x100))
+#define MCF_PSC_RLWFP(x) MCF_REG16(0x00867E+((x)*0x100))
+#define MCF_PSC_TLWFP(x) MCF_REG16(0x00869E+((x)*0x100))
+
+/* Bit definitions and macros for MCF_PSC_MR */
+#define MCF_PSC_MR_BC(x) (((x)&0x03)<<0)
+#define MCF_PSC_MR_PT (0x04)
+#define MCF_PSC_MR_PM(x) (((x)&0x03)<<3)
+#define MCF_PSC_MR_ERR (0x20)
+#define MCF_PSC_MR_RXIRQ (0x40)
+#define MCF_PSC_MR_RXRTS (0x80)
+#define MCF_PSC_MR_SB(x) (((x)&0x0F)<<0)
+#define MCF_PSC_MR_TXCTS (0x10)
+#define MCF_PSC_MR_TXRTS (0x20)
+#define MCF_PSC_MR_CM(x) (((x)&0x03)<<6)
+#define MCF_PSC_MR_PM_MULTI_ADDR (0x1C)
+#define MCF_PSC_MR_PM_MULTI_DATA (0x18)
+#define MCF_PSC_MR_PM_NONE (0x10)
+#define MCF_PSC_MR_PM_FORCE_HI (0x0C)
+#define MCF_PSC_MR_PM_FORCE_LO (0x08)
+#define MCF_PSC_MR_PM_ODD (0x04)
+#define MCF_PSC_MR_PM_EVEN (0x00)
+#define MCF_PSC_MR_BC_5 (0x00)
+#define MCF_PSC_MR_BC_6 (0x01)
+#define MCF_PSC_MR_BC_7 (0x02)
+#define MCF_PSC_MR_BC_8 (0x03)
+#define MCF_PSC_MR_CM_NORMAL (0x00)
+#define MCF_PSC_MR_CM_ECHO (0x40)
+#define MCF_PSC_MR_CM_LOCAL_LOOP (0x80)
+#define MCF_PSC_MR_CM_REMOTE_LOOP (0xC0)
+#define MCF_PSC_MR_SB_STOP_BITS_1 (0x07)
+#define MCF_PSC_MR_SB_STOP_BITS_15 (0x08)
+#define MCF_PSC_MR_SB_STOP_BITS_2 (0x0F)
+
+/* Bit definitions and macros for MCF_PSC_SR */
+#define MCF_PSC_SR_ERR (0x0040)
+#define MCF_PSC_SR_CDE_DEOF (0x0080)
+#define MCF_PSC_SR_RXRDY (0x0100)
+#define MCF_PSC_SR_FU (0x0200)
+#define MCF_PSC_SR_TXRDY (0x0400)
+#define MCF_PSC_SR_TXEMP_URERR (0x0800)
+#define MCF_PSC_SR_OE (0x1000)
+#define MCF_PSC_SR_PE_CRCERR (0x2000)
+#define MCF_PSC_SR_FE_PHYERR (0x4000)
+#define MCF_PSC_SR_RB_NEOF (0x8000)
+
+/* Bit definitions and macros for MCF_PSC_CSR */
+#define MCF_PSC_CSR_TCSEL(x) (((x)&0x0F)<<0)
+#define MCF_PSC_CSR_RCSEL(x) (((x)&0x0F)<<4)
+#define MCF_PSC_CSR_RCSEL_SYS_CLK (0xD0)
+#define MCF_PSC_CSR_RCSEL_CTM16 (0xE0)
+#define MCF_PSC_CSR_RCSEL_CTM (0xF0)
+#define MCF_PSC_CSR_TCSEL_SYS_CLK (0x0D)
+#define MCF_PSC_CSR_TCSEL_CTM16 (0x0E)
+#define MCF_PSC_CSR_TCSEL_CTM (0x0F)
+
+/* Bit definitions and macros for MCF_PSC_CR */
+#define MCF_PSC_CR_RXC(x) (((x)&0x03)<<0)
+#define MCF_PSC_CR_TXC(x) (((x)&0x03)<<2)
+#define MCF_PSC_CR_MISC(x) (((x)&0x07)<<4)
+#define MCF_PSC_CR_NONE (0x00)
+#define MCF_PSC_CR_STOP_BREAK (0x70)
+#define MCF_PSC_CR_START_BREAK (0x60)
+#define MCF_PSC_CR_BKCHGINT (0x50)
+#define MCF_PSC_CR_RESET_ERROR (0x40)
+#define MCF_PSC_CR_RESET_TX (0x30)
+#define MCF_PSC_CR_RESET_RX (0x20)
+#define MCF_PSC_CR_RESET_MR (0x10)
+#define MCF_PSC_CR_TX_DISABLED (0x08)
+#define MCF_PSC_CR_TX_ENABLED (0x04)
+#define MCF_PSC_CR_RX_DISABLED (0x02)
+#define MCF_PSC_CR_RX_ENABLED (0x01)
+
+/* Bit definitions and macros for MCF_PSC_TB_8BIT */
+#define MCF_PSC_TB_8BIT_TB3(x) (((x)&0x000000FF)<<0)
+#define MCF_PSC_TB_8BIT_TB2(x) (((x)&0x000000FF)<<8)
+#define MCF_PSC_TB_8BIT_TB1(x) (((x)&0x000000FF)<<16)
+#define MCF_PSC_TB_8BIT_TB0(x) (((x)&0x000000FF)<<24)
+
+/* Bit definitions and macros for MCF_PSC_TB_16BIT */
+#define MCF_PSC_TB_16BIT_TB1(x) (((x)&0x0000FFFF)<<0)
+#define MCF_PSC_TB_16BIT_TB0(x) (((x)&0x0000FFFF)<<16)
+
+/* Bit definitions and macros for MCF_PSC_TB_AC97 */
+#define MCF_PSC_TB_AC97_SOF (0x00000800)
+#define MCF_PSC_TB_AC97_TB(x) (((x)&0x000FFFFF)<<12)
+
+/* Bit definitions and macros for MCF_PSC_IPCR */
+#define MCF_PSC_IPCR_RESERVED (0x0C)
+#define MCF_PSC_IPCR_CTS (0x0D)
+#define MCF_PSC_IPCR_D_CTS (0x1C)
+#define MCF_PSC_IPCR_SYNC (0x8C)
+
+/* Bit definitions and macros for MCF_PSC_ACR */
+#define MCF_PSC_ACR_IEC0 (0x01)
+#define MCF_PSC_ACR_CTMS(x) (((x)&0x07)<<4)
+#define MCF_PSC_ACR_BRG (0x80)
+
+/* Bit definitions and macros for MCF_PSC_ISR */
+#define MCF_PSC_ISR_ERR (0x0040)
+#define MCF_PSC_ISR_DEOF (0x0080)
+#define MCF_PSC_ISR_TXRDY (0x0100)
+#define MCF_PSC_ISR_RXRDY_FU (0x0200)
+#define MCF_PSC_ISR_DB (0x0400)
+#define MCF_PSC_ISR_IPC (0x8000)
+
+/* Bit definitions and macros for MCF_PSC_IMR */
+#define MCF_PSC_IMR_ERR (0x0040)
+#define MCF_PSC_IMR_DEOF (0x0080)
+#define MCF_PSC_IMR_TXRDY (0x0100)
+#define MCF_PSC_IMR_RXRDY_FU (0x0200)
+#define MCF_PSC_IMR_DB (0x0400)
+#define MCF_PSC_IMR_IPC (0x8000)
+
+/* Bit definitions and macros for MCF_PSC_IP */
+#define MCF_PSC_IP_CTS (0x01)
+#define MCF_PSC_IP_TGL (0x40)
+#define MCF_PSC_IP_LWPR_B (0x80)
+
+/* Bit definitions and macros for MCF_PSC_OPSET */
+#define MCF_PSC_OPSET_RTS (0x01)
+
+/* Bit definitions and macros for MCF_PSC_OPRESET */
+#define MCF_PSC_OPRESET_RTS (0x01)
+
+/* Bit definitions and macros for MCF_PSC_SICR */
+#define MCF_PSC_SICR_SIM(x) (((x)&0x07)<<0)
+#define MCF_PSC_SICR_SHDIR (0x10)
+#define MCF_PSC_SICR_DTS (0x20)
+#define MCF_PSC_SICR_AWR (0x40)
+#define MCF_PSC_SICR_ACRB (0x80)
+#define MCF_PSC_SICR_SIM_UART (0x00)
+#define MCF_PSC_SICR_SIM_MODEM8 (0x01)
+#define MCF_PSC_SICR_SIM_MODEM16 (0x02)
+#define MCF_PSC_SICR_SIM_AC97 (0x03)
+#define MCF_PSC_SICR_SIM_SIR (0x04)
+#define MCF_PSC_SICR_SIM_MIR (0x05)
+#define MCF_PSC_SICR_SIM_FIR (0x06)
+
+/* Bit definitions and macros for MCF_PSC_IRCR1 */
+#define MCF_PSC_IRCR1_SPUL (0x01)
+#define MCF_PSC_IRCR1_SIPEN (0x02)
+#define MCF_PSC_IRCR1_FD (0x04)
+
+/* Bit definitions and macros for MCF_PSC_IRCR2 */
+#define MCF_PSC_IRCR2_NXTEOF (0x01)
+#define MCF_PSC_IRCR2_ABORT (0x02)
+#define MCF_PSC_IRCR2_SIPREQ (0x04)
+
+/* Bit definitions and macros for MCF_PSC_IRMDR */
+#define MCF_PSC_IRMDR_M_FDIV(x) (((x)&0x7F)<<0)
+#define MCF_PSC_IRMDR_FREQ (0x80)
+
+/* Bit definitions and macros for MCF_PSC_IRFDR */
+#define MCF_PSC_IRFDR_F_FDIV(x) (((x)&0x0F)<<0)
+
+/* Bit definitions and macros for MCF_PSC_RFCNT */
+#define MCF_PSC_RFCNT_CNT(x) (((x)&0x01FF)<<0)
+
+/* Bit definitions and macros for MCF_PSC_TFCNT */
+#define MCF_PSC_TFCNT_CNT(x) (((x)&0x01FF)<<0)
+
+/* Bit definitions and macros for MCF_PSC_RFSR */
+#define MCF_PSC_RFSR_EMT (0x0001)
+#define MCF_PSC_RFSR_ALARM (0x0002)
+#define MCF_PSC_RFSR_FU (0x0004)
+#define MCF_PSC_RFSR_FRMRY (0x0008)
+#define MCF_PSC_RFSR_OF (0x0010)
+#define MCF_PSC_RFSR_UF (0x0020)
+#define MCF_PSC_RFSR_RXW (0x0040)
+#define MCF_PSC_RFSR_FAE (0x0080)
+#define MCF_PSC_RFSR_FRM(x) (((x)&0x000F)<<8)
+#define MCF_PSC_RFSR_TAG (0x1000)
+#define MCF_PSC_RFSR_TXW (0x4000)
+#define MCF_PSC_RFSR_IP (0x8000)
+#define MCF_PSC_RFSR_FRM_BYTE0 (0x0800)
+#define MCF_PSC_RFSR_FRM_BYTE1 (0x0400)
+#define MCF_PSC_RFSR_FRM_BYTE2 (0x0200)
+#define MCF_PSC_RFSR_FRM_BYTE3 (0x0100)
+
+/* Bit definitions and macros for MCF_PSC_TFSR */
+#define MCF_PSC_TFSR_EMT (0x0001)
+#define MCF_PSC_TFSR_ALARM (0x0002)
+#define MCF_PSC_TFSR_FU (0x0004)
+#define MCF_PSC_TFSR_FRMRY (0x0008)
+#define MCF_PSC_TFSR_OF (0x0010)
+#define MCF_PSC_TFSR_UF (0x0020)
+#define MCF_PSC_TFSR_RXW (0x0040)
+#define MCF_PSC_TFSR_FAE (0x0080)
+#define MCF_PSC_TFSR_FRM(x) (((x)&0x000F)<<8)
+#define MCF_PSC_TFSR_TAG (0x1000)
+#define MCF_PSC_TFSR_TXW (0x4000)
+#define MCF_PSC_TFSR_IP (0x8000)
+#define MCF_PSC_TFSR_FRM_BYTE0 (0x0800)
+#define MCF_PSC_TFSR_FRM_BYTE1 (0x0400)
+#define MCF_PSC_TFSR_FRM_BYTE2 (0x0200)
+#define MCF_PSC_TFSR_FRM_BYTE3 (0x0100)
+
+/* Bit definitions and macros for MCF_PSC_RFCR */
+#define MCF_PSC_RFCR_CNTR(x) (((x)&0x0000FFFF)<<0)
+#define MCF_PSC_RFCR_TXW_MSK (0x00040000)
+#define MCF_PSC_RFCR_OF_MSK (0x00080000)
+#define MCF_PSC_RFCR_UF_MSK (0x00100000)
+#define MCF_PSC_RFCR_RXW_MSK (0x00200000)
+#define MCF_PSC_RFCR_FAE_MSK (0x00400000)
+#define MCF_PSC_RFCR_IP_MSK (0x00800000)
+#define MCF_PSC_RFCR_GR(x) (((x)&0x00000007)<<24)
+#define MCF_PSC_RFCR_FRMEN (0x08000000)
+#define MCF_PSC_RFCR_TIMER (0x10000000)
+#define MCF_PSC_RFCR_WRITETAG (0x20000000)
+#define MCF_PSC_RFCR_SHADOW (0x80000000)
+
+/* Bit definitions and macros for MCF_PSC_TFCR */
+#define MCF_PSC_TFCR_CNTR(x) (((x)&0x0000FFFF)<<0)
+#define MCF_PSC_TFCR_TXW_MSK (0x00040000)
+#define MCF_PSC_TFCR_OF_MSK (0x00080000)
+#define MCF_PSC_TFCR_UF_MSK (0x00100000)
+#define MCF_PSC_TFCR_RXW_MSK (0x00200000)
+#define MCF_PSC_TFCR_FAE_MSK (0x00400000)
+#define MCF_PSC_TFCR_IP_MSK (0x00800000)
+#define MCF_PSC_TFCR_GR(x) (((x)&0x00000007)<<24)
+#define MCF_PSC_TFCR_FRMEN (0x08000000)
+#define MCF_PSC_TFCR_TIMER (0x10000000)
+#define MCF_PSC_TFCR_WRITETAG (0x20000000)
+#define MCF_PSC_TFCR_SHADOW (0x80000000)
+
+/* Bit definitions and macros for MCF_PSC_RFAR */
+#define MCF_PSC_RFAR_ALARM(x) (((x)&0x01FF)<<0)
+
+/* Bit definitions and macros for MCF_PSC_TFAR */
+#define MCF_PSC_TFAR_ALARM(x) (((x)&0x01FF)<<0)
+
+/* Bit definitions and macros for MCF_PSC_RFRP */
+#define MCF_PSC_RFRP_READ(x) (((x)&0x01FF)<<0)
+
+/* Bit definitions and macros for MCF_PSC_TFRP */
+#define MCF_PSC_TFRP_READ(x) (((x)&0x01FF)<<0)
+
+/* Bit definitions and macros for MCF_PSC_RFWP */
+#define MCF_PSC_RFWP_WRITE(x) (((x)&0x01FF)<<0)
+
+/* Bit definitions and macros for MCF_PSC_TFWP */
+#define MCF_PSC_TFWP_WRITE(x) (((x)&0x01FF)<<0)
+
+/* Bit definitions and macros for MCF_PSC_RLRFP */
+#define MCF_PSC_RLRFP_LFP(x) (((x)&0x01FF)<<0)
+
+/* Bit definitions and macros for MCF_PSC_TLRFP */
+#define MCF_PSC_TLRFP_LFP(x) (((x)&0x01FF)<<0)
+
+/* Bit definitions and macros for MCF_PSC_RLWFP */
+#define MCF_PSC_RLWFP_LFP(x) (((x)&0x01FF)<<0)
+
+/* Bit definitions and macros for MCF_PSC_TLWFP */
+#define MCF_PSC_TLWFP_LFP(x) (((x)&0x01FF)<<0)
+
+/********************************************************************/
+
+#endif /* __MCF548X_PSC_H__ */
--- /dev/null
+/*
+ * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+#ifndef M5485SEC_H
+#define M5484SEC_H
+
+#define SEC_EUACR_U MCF_REG32(0x21000)
+#define SEC_EUACR_L MCF_REG32(0x21004)
+#define SEC_EUASR_U MCF_REG32(0x21028)
+#define SEC_EUASR_L MCF_REG32(0x2102C)
+#define SEC_SMCR MCF_REG32(0x21030)
+#define SEC_SISR_U MCF_REG32(0x21010)
+#define SEC_SISR_L MCF_REG32(0x21014)
+#define SEC_SICR_U MCF_REG32(0x21018)
+#define SEC_SICR_L MCF_REG32(0x2101C)
+#define SEC_SIMR_U MCF_REG32(0x21008)
+#define SEC_SIMR_L MCF_REG32(0x2100C)
+#define SEC_SID MCF_REG32(0x21020)
+
+#define SEC_SMCR_RESET 0x01000000
+#define SEC_SIMR_MASK_U 0x00000000
+#define SEC_SIMR_MASK_L 0x03333340
+
+#define SEC_CC0_FR MCF_REG32(0x2204C)
+#define SEC_CC0_CR MCF_REG32(0x2200C)
+#define SEC_CC0_CDPR MCF_REG32(0x22044)
+#define SEC_CC0_PSR_U MCF_REG32(0x22010)
+#define SEC_CC0_PSR_L MCF_REG32(0x22014)
+#define SEC_CC1_FR MCF_REG32(0x2304C)
+#define SEC_CC1_CR MCF_REG32(0x2300C)
+#define SEC_CC1_CDPR MCF_REG32(0x23044)
+#define SEC_CC1_PSR_U MCF_REG32(0x23010)
+#define SEC_CC1_PSR_L MCF_REG32(0x23014)
+
+#define SEC_CC_CR_RESET 0x00000001
+#define SEC_CC_CR_CONFIGURATION 0x0000001E
+#define SEC_CC_PSR_U_ERR_CH0 0x20000000
+#define SEC_CC_PSR_U_ERR_CH1 0x80000000
+#define SEC_CC_PSR_U_DN_CH0 0x10000000
+#define SEC_CC_PSR_U_DN_CH1 0x40000000
+
+#define SEC_DEU_DRCR MCF_REG32(0x2A018)
+#define SEC_DEU_DSR MCF_REG32(0x2A028)
+#define SEC_DEU_DISR MCF_REG32(0x2A030)
+#define SEC_DEU_DIMR MCF_REG32(0x2A038)
+
+#define SEC_DEU_DRCR_RESET 0x01000000
+#define SEC_DEU_DSR_RD 0x01000000
+#define SEC_DEU_DIMR_MASK 0xF63F0000
+
+#define SEC_AFEU_AFRCR MCF_REG32(0x28018)
+#define SEC_AFEU_AFSR MCF_REG32(0x28028)
+#define SEC_AFEU_AFISR MCF_REG32(0x28030)
+#define SEC_AFEU_AFIMR MCF_REG32(0x28038)
+
+#define SEC_AFEU_AFRCR_RESET 0x01000000
+#define SEC_AFEU_AFSR_RD 0x01000000
+#define SEC_AFEU_AFIMR_MASK 0xF61F0000
+
+
+#define SEC_MDEU_MDRCR MCF_REG32(0x2C018)
+#define SEC_MDEU_MDSR MCF_REG32(0x2C028)
+#define SEC_MDEU_MDISR MCF_REG32(0x2C030)
+#define SEC_MDEU_MDIMR MCF_REG32(0x2C038)
+
+#define SEC_MDEU_MDRCR_RESET 0x01000000
+#define SEC_MDEU_MDSR_RD 0x01000000
+#define SEC_MDEU_MDIMR_MASK 0xC41F0000
+
+
+#define SEC_RNG_RNGRCR MCF_REG32(0x2E018)
+#define SEC_RNG_RNGSR MCF_REG32(0x2E028)
+#define SEC_RNG_RNGISR MCF_REG32(0x2E030)
+#define SEC_RNG_RNGIMR MCF_REG32(0x2E038)
+
+#define SEC_RNG_RNGRCR_RESET 0x01000000
+#define SEC_RNG_RNGSR_RD 0x01000000
+#define SEC_RNG_RNGIMR_MASK 0xC2100000
+
+#define SEC_AESU_AESRCR MCF_REG32(0x32018)
+#define SEC_AESU_AESSR MCF_REG32(0x32028)
+#define SEC_AESU_AESISR MCF_REG32(0x32030)
+#define SEC_AESU_AESIMR MCF_REG32(0x32038)
+
+#define SEC_AESU_AESRCR_RESET 0x01000000
+#define SEC_AESU_AESSR_RD 0x01000000
+#define SEC_AESU_AESIMR_MASK 0xF61F0000
+
+
+#define SEC_DESC_NUM 20
+#define SEC_CHANNEL_NUMBER 2
+#define SEC_MAX_BUF_SIZE 32*1024
+#define SEC_INIT_TIMEOUT 1*HZ
+#define SEC_INTERRUPT 37
+
+/* Header descriptor values*/
+#define SEC_ALG_ENCR_DES_ECB_SINGLE 0x20100010
+#define SEC_ALG_DECR_DES_ECB_SINGLE 0x20000010
+#define SEC_ALG_ENCR_DES_ECB_TRIPLE 0x20300010
+#define SEC_ALG_DECR_DES_ECB_TRIPLE 0x20200010
+#define SEC_ALG_ENCR_DES_CBC_SINGLE 0x20500010
+#define SEC_ALG_DECR_DES_CBC_SINGLE 0x20400010
+#define SEC_ALG_ENCR_DES_CBC_TRIPLE 0x20700010
+#define SEC_ALG_DECR_DES_CBC_TRIPLE 0x20600010
+
+#define SEC_ALG_MDEU_SHA256 0x30500010
+#define SEC_ALG_MDEU_MD5 0x30600010
+#define SEC_ALG_MDEU_SHA 0x30400010
+#define SEC_ALG_MDEU_SHA256_HMAC 0x31D00010
+#define SEC_ALG_MDEU_MD5_HMAC 0x31E00010
+#define SEC_ALG_MDEU_SHA_HMAC 0x31C00010
+
+#define SEC_ALG_RNG 0x40000010
+
+
+#define SEC_ALG_AFEU_KEY 0x10200050
+#define SEC_ALG_AFEU_CONTEXT 0x10700050
+
+#define SEC_ALG_ENCR_AESU_CBC 0x60300010
+#define SEC_ALG_DECR_AESU_CBC 0x60200010
+#define SEC_ALG_ENCR_AESU_ECB 0x60100010
+#define SEC_ALG_DECR_AESU_ECB 0x60000010
+#define SEC_ALG_AESU_CTR 0x60600010
+
+
+
+#define SEC_DESCHEAD_ERROR 0xFE000000
+#define SEC_DESCHEAD_COMPLETED 0xFF000000
+
+#define SEC_DEVICE_NAME "cfsec"
+
+/*!!! This number must be changed*/
+#define SEC_MAJOR 130
+
+#define SEC_DEV_BUF 1024
+#define SEC_DEV_KEY_LEN 64
+#define SEC_DEV_VECTOR_LEN 259
+
+#define SEC_AES_BLCK_LEN 16
+#define SEC_DES_BLCK_LEN 8
+
+
+/* Descriptor structure of SEC*/
+struct sec_descriptor {
+ volatile unsigned long secdesc_header;
+ unsigned long secdesc_len1;
+ void *secdesc_ptr1;
+ unsigned long secdesc_iv_in_len;
+ void *secdesc_iv_in_ptr;
+ unsigned long secdesc_key_len;
+ void *secdesc_key_ptr;
+ unsigned long secdesc_data_in_len;
+ void *secdesc_data_in_ptr;
+ unsigned long secdesc_data_out_len;
+ void *secdesc_data_out_ptr;
+ unsigned long secdesc_iv_out_len;
+ void *secdesc_iv_out_ptr;
+ unsigned long secdesc_len7;
+ void *secdesc_ptr7;
+ void *secdesc_ptrnext;
+};
+
+struct sec_device_data {
+ unsigned char secdev_inbuf[SEC_DEV_BUF];
+ unsigned char secdev_outbuf[SEC_DEV_BUF];
+ unsigned char secdev_key[SEC_DEV_KEY_LEN];
+ unsigned char secdev_iv[SEC_DEV_VECTOR_LEN];
+ unsigned char secdev_ov[SEC_DEV_VECTOR_LEN];
+ struct sec_descriptor *secdev_desc;
+};
+
+struct sec_descriptor *sec_desc_alloc(void);
+inline void sec_desc_free(struct sec_descriptor *desc);
+int sec_execute(int channel, struct sec_descriptor *desc, int timeout);
+int sec_nonblock_execute(struct sec_descriptor *desc);
+#endif
--- /dev/null
+/*
+ * m5485sim.h -- ColdFire 547x/548x System Integration Unit support.
+ * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+#ifndef m5485sim_h
+#define m5485sim_h
+/*
+ * System Integration Unit Registers
+ */
+#define MCF_SDRAMDS MCF_REG32(0x000004)
+/* SDRAM Drive Strength */
+#define MCF_SBCR MCF_REG32(0x000010)
+/* System Breakpoint Control */
+#define MCF_CSnCFG(x) MCF_REG32(0x000020+(x*4))
+/* SDRAM Chip Select X */
+#define MCF_SECSACR MCF_REG32(0x000038)
+/* Sequential Access Control */
+#define MCF_RSR MCF_REG32(0x000044)
+/* Reset Status */
+#define MCF_JTAGID MCF_REG32(0x000050)
+/* JTAG Device Identification */
+#define MCF_XARB_PRIEN MCF_REG32(0x000264)
+/* Arbiter master pri enable */
+#define MCF_XARB_PRI MCF_REG32(0x000268)
+/* Arbiter master pri levels */
+
+/*
+ * FlexBus Chip Selects Registers
+ */
+#define MCF_CSARn(x) MCF_REG32(0x000500+(x*0xC))
+#define MCF_CSMRn(x) MCF_REG32(0x000504+(x*0xC))
+#define MCF_CSCRn(x) MCF_REG32(0x000508+(x*0xC))
+
+/*
+ * Interrupt Controller Registers
+ */
+#define MCF_IPRH MCF_REG32(0x000700)
+#define MCF_IPRL MCF_REG32(0x000704)
+#define MCF_IMRH MCF_REG32(0x000708)
+#define MCF_IMRL MCF_REG32(0x00070C)
+#define MCF_INTFRCH MCF_REG32(0x000710)
+#define MCF_INTFRCL MCF_REG32(0x000714)
+#define MCF_IRLR MCF_REG08(0x000718)
+#define MCF_IACKLPR MCF_REG08(0x000719)
+#define MCF_SWIACK MCF_REG08(0x0007E0)
+#define MCF_LnIACK(x) MCF_REG08(0x0007E4+((x)*0x004))
+#define MCF_ICR(x) MCF_REG08(0x000740+((x)*0x001))
+
+/*
+ * Slice Timers Registers
+ */
+#define MCF_SLTCNT(x) MCF_REG32(0x000900+((x)*0x010))
+#define MCF_SCR(x) MCF_REG32(0x000904+((x)*0x010))
+#define MCF_SCNT(x) MCF_REG32(0x000908+((x)*0x010))
+#define MCF_SSR(x) MCF_REG32(0x00090C+((x)*0x010))
+
+/*
+ * Interrupt sources
+ */
+#define ISC_EPORT_Fn(x) (x)
+/* EPORT Interrupts */
+#define ISC_USB_EPn(x) (15+(x))
+/* USB Endopint */
+#define ISC_USB_ISR (22)
+/* USB General source */
+#define ISC_USB_AISR (22)
+/* USB core source */
+#define ISC_DSPI_OVRFW (25)
+/* DSPI overflow */
+#define ISC_DSPI_RFOF (26)
+#define ISC_DSPI_RFDF (27)
+#define ISC_DSPI_TFUF (28)
+#define ISC_DSPI_TCF (29)
+#define ISC_DSPI_TFFF (30)
+#define ISC_DSPI_EOQF (31)
+#define ISC_PSCn(x) (35-(x))
+#define ISC_COMM_TIM (36)
+#define ISC_SEC (37)
+#define ISC_FEC1 (38)
+#define ISC_FEC0 (39)
+#define ISC_I2C (40)
+#define ISC_PCI_ARB (41)
+#define ISC_PCI_CB (42)
+#define ISC_PCI_XLB (43)
+#define ISC_DMA (48)
+#define ISC_CANn_ERR(x) (49+(6*(x)))
+#define ISC_CANn_BUSOFF(x) (50+(6*(x)))
+#define ISC_CANn_MBOR(x) (51+(6*(x)))
+#define ISC_CAN0_WAKEIN (52)
+#define ISC_SLTn(x) (54-(x))
+#define ISC_GPTn(x) (62-(x))
+
+/*
+ * Interrupt level and priorities
+ */
+#define ILP_TOP (MCF_ICR_IL(5) | MCF_ICR_IP(3))
+#define ILP_SLT0 (MCF_ICR_IL(5) | MCF_ICR_IP(2))
+#define ILP_SLT1 (MCF_ICR_IL(5) | MCF_ICR_IP(1))
+#define ILP_DMA (MCF_ICR_IL(5) | MCF_ICR_IP(0))
+#define ILP_SEC (MCF_ICR_IL(4) | MCF_ICR_IP(7))
+#define ILP_FEC0 (MCF_ICR_IL(4) | MCF_ICR_IP(6))
+#define ILP_FEC1 (MCF_ICR_IL(4) | MCF_ICR_IP(5))
+#define ILP_PCI_XLB (MCF_ICR_IL(4) | MCF_ICR_IP(4))
+#define ILP_PCI_ARB (MCF_ICR_IL(4) | MCF_ICR_IP(3))
+#define ILP_PCI_CB (MCF_ICR_IL(4) | MCF_ICR_IP(2))
+#define ILP_I2C (MCF_ICR_IL(4) | MCF_ICR_IP(1))
+
+#define ILP_USB_EPn(x) (MCF_ICR_IL(3) | MCF_ICR_IP(7-(x)))
+#define ILP_USB_EP0 (MCF_ICR_IL(3) | MCF_ICR_IP(7))
+#define ILP_USB_EP1 (MCF_ICR_IL(3) | MCF_ICR_IP(6))
+#define ILP_USB_EP2 (MCF_ICR_IL(3) | MCF_ICR_IP(5))
+#define ILP_USB_EP3 (MCF_ICR_IL(3) | MCF_ICR_IP(4))
+#define ILP_USB_EP4 (MCF_ICR_IL(3) | MCF_ICR_IP(3))
+#define ILP_USB_EP5 (MCF_ICR_IL(3) | MCF_ICR_IP(2))
+#define ILP_USB_EP6 (MCF_ICR_IL(3) | MCF_ICR_IP(1))
+#define ILP_USB_ISR (MCF_ICR_IL(3) | MCF_ICR_IP(0))
+
+#define ILP_USB_AISR (MCF_ICR_IL(2) | MCF_ICR_IP(7))
+#define ILP_DSPI_OVRFW (MCF_ICR_IL(2) | MCF_ICR_IP(6))
+#define ILP_DSPI_RFOF (MCF_ICR_IL(2) | MCF_ICR_IP(5))
+#define ILP_DSPI_RFDF (MCF_ICR_IL(2) | MCF_ICR_IP(4))
+#define ILP_DSPI_TFUF (MCF_ICR_IL(2) | MCF_ICR_IP(3))
+#define ILP_DSPI_TCF (MCF_ICR_IL(2) | MCF_ICR_IP(2))
+#define ILP_DSPI_TFFF (MCF_ICR_IL(2) | MCF_ICR_IP(1))
+#define ILP_DSPI_EOQF (MCF_ICR_IL(2) | MCF_ICR_IP(0))
+
+#define ILP_COMM_TIM (MCF_ICR_IL(1) | MCF_ICR_IP(7))
+#define ILP_PSCn(x) (MCF_ICR_IL(1) | MCF_ICR_IP(3-((x)&3)))
+#define ILP_PSC0 (MCF_ICR_IL(1) | MCF_ICR_IP(3))
+#define ILP_PSC1 (MCF_ICR_IL(1) | MCF_ICR_IP(2))
+#define ILP_PSC2 (MCF_ICR_IL(1) | MCF_ICR_IP(1))
+#define ILP_PSC3 (MCF_ICR_IL(1) | MCF_ICR_IP(0))
+
+
+
+
+
+/********************************************************************/
+
+/*
+ * System Integration Unit Bitfields
+ */
+
+/* SBCR */
+#define MCF_SBCR_PIN2DSPI (0x08000000)
+#define MCF_SBCR_DMA2CPU (0x10000000)
+#define MCF_SBCR_CPU2DMA (0x20000000)
+#define MCF_SBCR_PIN2DMA (0x40000000)
+#define MCF_SBCR_PIN2CPU (0x80000000)
+
+/* SECSACR */
+#define MCF_SECSACR_SEQEN (0x00000001)
+
+/* RSR */
+#define MCF_RSR_RST (0x00000001)
+#define MCF_RSR_RSTWD (0x00000002)
+#define MCF_RSR_RSTJTG (0x00000008)
+
+/* JTAGID */
+#define MCF_JTAGID_REV (0xF0000000)
+#define MCF_JTAGID_PROCESSOR (0x0FFFFFFF)
+#define MCF_JTAGID_MCF5485 (0x0800C01D)
+#define MCF_JTAGID_MCF5484 (0x0800D01D)
+#define MCF_JTAGID_MCF5483 (0x0800E01D)
+#define MCF_JTAGID_MCF5482 (0x0800F01D)
+#define MCF_JTAGID_MCF5481 (0x0801001D)
+#define MCF_JTAGID_MCF5480 (0x0801101D)
+#define MCF_JTAGID_MCF5475 (0x0801201D)
+#define MCF_JTAGID_MCF5474 (0x0801301D)
+#define MCF_JTAGID_MCF5473 (0x0801401D)
+#define MCF_JTAGID_MCF5472 (0x0801501D)
+#define MCF_JTAGID_MCF5471 (0x0801601D)
+#define MCF_JTAGID_MCF5470 (0x0801701D)
+
+
+/*
+ * Interrupt Controller Bitfields
+ */
+#define MCF_IRLR_IRQ(x) (((x)&0x7F)<<1)
+#define MCF_IACKLPR_PRI(x) (((x)&0x0F)<<0)
+#define MCF_IACKLPR_LEVEL(x) (((x)&0x07)<<4)
+#define MCF_ICR_IP(x) (((x)&0x07)<<0)
+#define MCF_ICR_IL(x) (((x)&0x07)<<3)
+
+/*
+ * Slice Timers Bitfields
+ */
+#define MCF_SCR_TEN (0x01000000)
+#define MCF_SCR_IEN (0x02000000)
+#define MCF_SCR_RUN (0x04000000)
+#define MCF_SSR_ST (0x01000000)
+#define MCF_SSR_BE (0x02000000)
+
+
+/*
+ * Some needed coldfire registers
+ */
+#define MCF_PAR_PCIBG MCF_REG16(0x000A48)
+#define MCF_PAR_PCIBR MCF_REG16(0x000A4A)
+#define MCF_PAR_PSCn(x) MCF_REG08(0x000A4F-((x)&0x3))
+#define MCF_PAR_FECI2CIRQ MCF_REG16(0x000A44)
+#define MCF_PAR_DSPI MCF_REG16(0x000A50)
+#define MCF_PAR_TIMER MCF_REG08(0X000A52)
+#define MCF_EPPAR MCF_REG16(0x000F00)
+#define MCF_EPDDR MCF_REG08(0x000F04)
+#define MCF_EPIER MCF_REG08(0x000F05)
+#define MCF_EPFR MCF_REG08(0x000F0C)
+
+/*
+ * Some GPIO bitfields
+ */
+#define MCF_PAR_SDA (0x0008)
+#define MCF_PAR_SCL (0x0004)
+#define MCF_PAR_PSC_TXD (0x04)
+#define MCF_PAR_PSC_RXD (0x08)
+#define MCF_PAR_PSC_RTS(x) (((x)&0x03)<<4)
+#define MCF_PAR_PSC_CTS(x) (((x)&0x03)<<6)
+#define MCF_PAR_PSC_CTS_GPIO (0x00)
+#define MCF_PAR_PSC_CTS_BCLK (0x80)
+#define MCF_PAR_PSC_CTS_CTS (0xC0)
+#define MCF_PAR_PSC_RTS_GPIO (0x00)
+#define MCF_PAR_PSC_RTS_FSYNC (0x20)
+#define MCF_PAR_PSC_RTS_RTS (0x30)
+#define MCF_PAR_PSC_CANRX (0x40)
+
+/*
+ * FlexCAN Module Configuration Register
+ */
+#define CANMCR_MDIS (0x80000000)
+#define CANMCR_FRZ (0x40000000)
+#define CANMCR_HALT (0x10000000)
+#define CANMCR_SOFTRST (0x02000000)
+#define CANMCR_NOTRDY (0x08000000)
+#define CANMCR_FRZACK (0x01000000)
+#define CANMCR_SUPV (0x00800000)
+#define CANMCR_MAXMB (0x0F)
+/*
+ * FlexCAN Control Register
+ */
+#define CANCTRL_PRESDIV(x) (((x)&0xFF)<<24)
+#define CANCTRL_RJW(x) (((x)&0x03)<<22)
+#define CANCTRL_PSEG1(x) (((x)&0x07)<<19)
+#define CANCTRL_PSEG2(x) (((x)&0x07)<<16)
+#define CANCTRL_BOFFMSK (0x00008000)
+#define CANCTRL_ERRMSK (0x00004000)
+#define CANCTRL_CLKSRC (0x00002000)
+#define CANCTRL_LPB (0x00001000)
+#define CANCTRL_SAMP(x) (((x)&0x01)<<7)
+#define CANCTRL_BOFFREC (0x00000040)
+#define CANCTRL_TSYNC (0x00000020)
+#define CANCTRL_LBUF (0x00000010)
+#define CANCTRL_LOM (0x00000008)
+#define CANCTRL_PROPSEG(x) ((x)&0x07)
+
+/*
+ * FlexCAN Error Counter Register
+ */
+#define ERRCNT_RXECTR(x) (((x)&0xFF)<<8)
+#define ERRCNT_TXECTR(x) ((x)&0xFF)
+
+/*
+ * FlexCAN Error and Status Register
+ */
+#define ERRSTAT_BITERR(x) (((x)&0x03)<<14)
+#define ERRSTAT_ACKERR (0x00002000)
+#define ERRSTAT_CRCERR (0x00001000)
+#define ERRSTAT_FRMERR (0x00000800)
+#define ERRSTAT_STFERR (0x00000400)
+#define ERRSTAT_TXWRN (0x00000200)
+#define ERRSTAT_RXWRN (0x00000100)
+#define ERRSTAT_IDLE (0x00000080)
+#define ERRSTAT_TXRX (0x00000040)
+#define ERRSTAT_FLTCONF(x) (((x)&0x03)<<4)
+#define ERRSTAT_BOFFINT (0x00000004)
+#define ERRSTAT_ERRINT (0x00000002)
+
+/*
+ * Interrupt Mask Register
+ */
+#define IMASK_BUF15M (0x8000)
+#define IMASK_BUF14M (0x4000)
+#define IMASK_BUF13M (0x2000)
+#define IMASK_BUF12M (0x1000)
+#define IMASK_BUF11M (0x0800)
+#define IMASK_BUF10M (0x0400)
+#define IMASK_BUF9M (0x0200)
+#define IMASK_BUF8M (0x0100)
+#define IMASK_BUF7M (0x0080)
+#define IMASK_BUF6M (0x0040)
+#define IMASK_BUF5M (0x0020)
+#define IMASK_BUF4M (0x0010)
+#define IMASK_BUF3M (0x0008)
+#define IMASK_BUF2M (0x0004)
+#define IMASK_BUF1M (0x0002)
+#define IMASK_BUF0M (0x0001)
+#define IMASK_BUFnM(x) (0x1<<(x))
+#define IMASK_BUFF_ENABLE_ALL (0xFFFF)
+#define IMASK_BUFF_DISABLE_ALL (0x0000)
+
+/*
+ * Interrupt Flag Register
+ */
+#define IFLAG_BUF15M (0x8000)
+#define IFLAG_BUF14M (0x4000)
+#define IFLAG_BUF13M (0x2000)
+#define IFLAG_BUF12M (0x1000)
+#define IFLAG_BUF11M (0x0800)
+#define IFLAG_BUF10M (0x0400)
+#define IFLAG_BUF9M (0x0200)
+#define IFLAG_BUF8M (0x0100)
+#define IFLAG_BUF7M (0x0080)
+#define IFLAG_BUF6M (0x0040)
+#define IFLAG_BUF5M (0x0020)
+#define IFLAG_BUF4M (0x0010)
+#define IFLAG_BUF3M (0x0008)
+#define IFLAG_BUF2M (0x0004)
+#define IFLAG_BUF1M (0x0002)
+#define IFLAG_BUF0M (0x0001)
+#define IFLAG_BUFF_SET_ALL (0xFFFF)
+#define IFLAG_BUFF_CLEAR_ALL (0x0000)
+#define IFLAG_BUFnM(x) (0x1<<(x))
+
+/*
+ * Message Buffers
+ */
+#define MB_CNT_CODE(x) (((x)&0x0F)<<24)
+#define MB_CNT_SRR (0x00400000)
+#define MB_CNT_IDE (0x00200000)
+#define MB_CNT_RTR (0x00100000)
+#define MB_CNT_TIMESTAMP(x) ((x)&0xFFFF)
+#define MB_ID_STD (0x07FF)
+#define MB_ID_EXT (0x1FFFFFFF)
+#define MB_CODE_MASK (0xF0FFFFFF)
+#define CAN_MB 16
+#define PDEV_MAX 2
+
+/*
+ * Some used coldfire values
+ */
+#define MCF_EPIER_EPIE(x) (0x01 << (x))
+#define MCF_EPPAR_EPPAx_FALLING (2)
+#define MCF_EPPAR_EPPA(n, x) (((x)&0x0003) << (2*n))
+
+
+#endif /* m5485sim_h */
--- /dev/null
+#ifndef SYS_SRAM_H
+#define SYS_SRAM_H
+
+
+#define SYS_SRAM_DMA_START (MCF_MBAR + 0x10000)
+#define SYS_SRAM_DMA_SIZE 8192
+#define SYS_SRAM_FEC_START (SYS_SRAM_DMA_START + SYS_SRAM_DMA_SIZE)
+#define SYS_SRAM_FEC_SIZE 2048
+#define SYS_SRAM_SEC_START (SYS_SRAM_FEC_START + SYS_SRAM_FEC_SIZE)
+#define SYS_SRAM_SEC_SIZE 1280
+
+#endif /* SYS_SRAM_H */
--- /dev/null
+/*
+ * Copyright (C) 2008-2009 Freescale Semiconductor, Inc. All rights reserved.
+ * Author: Chenghu Wu <b16972@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ */
+#ifndef __MCFFEC_H__
+#define __MCFFEC_H
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <asm/pgtable.h>
+
+/* The FEC stores dest/src/type, data, and checksum for receive packets.
+ */
+#define PKT_MAXBUF_SIZE 1518
+
+/*
+ * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
+ * size bits. Other FEC hardware does not, so we need to take that into
+ * account when setting it.
+ */
+#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
+ defined(CONFIG_M537x) || defined(CONFIG_M5301x) || \
+ defined(CONFIG_M5445X)
+#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
+#else
+#define OPT_FRAME_SIZE 0
+#endif
+/*
+ * Some hardware gets it MAC address out of local flash memory.
+ * if this is non-zero then assume it is the address to get MAC from.
+ */
+#if defined(CONFIG_NETtel)
+#define FEC_FLASHMAC 0xf0006006
+#elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
+#define FEC_FLASHMAC 0xf0006000
+#elif defined(CONFIG_CANCam)
+#define FEC_FLASHMAC 0xf0020000
+#elif defined(CONFIG_M5272C3)
+#define FEC_FLASHMAC (0xffe04000 + 4)
+#elif defined(CONFIG_MOD5272)
+#define FEC_FLASHMAC 0xffc0406b
+#else
+#define FEC_FLASHMAC 0
+#endif
+
+#ifdef CONFIG_FEC_DMA_USE_SRAM
+#define TX_RING_SIZE 8 /* Must be power of two */
+#define TX_RING_MOD_MASK 7 /* for this to work */
+#else
+#define TX_RING_SIZE 16 /* Must be power of two */
+#define TX_RING_MOD_MASK 15 /* for this to work */
+#endif
+
+typedef struct fec {
+ unsigned long fec_reserved0;
+ unsigned long fec_ievent; /* Interrupt event reg */
+ unsigned long fec_imask; /* Interrupt mask reg */
+ unsigned long fec_reserved1;
+ unsigned long fec_r_des_active; /* Receive descriptor reg */
+ unsigned long fec_x_des_active; /* Transmit descriptor reg */
+ unsigned long fec_reserved2[3];
+ unsigned long fec_ecntrl; /* Ethernet control reg */
+ unsigned long fec_reserved3[6];
+ unsigned long fec_mii_data; /* MII manage frame reg */
+ unsigned long fec_mii_speed; /* MII speed control reg */
+ unsigned long fec_reserved4[7];
+ unsigned long fec_mib_ctrlstat; /* MIB control/status reg */
+ unsigned long fec_reserved5[7];
+ unsigned long fec_r_cntrl; /* Receive control reg */
+ unsigned long fec_reserved6[15];
+ unsigned long fec_x_cntrl; /* Transmit Control reg */
+ unsigned long fec_reserved7[7];
+ unsigned long fec_addr_low; /* Low 32bits MAC address */
+ unsigned long fec_addr_high; /* High 16bits MAC address */
+ unsigned long fec_opd; /* Opcode + Pause duration */
+ unsigned long fec_reserved8[10];
+ unsigned long fec_hash_table_high; /* High 32bits hash table */
+ unsigned long fec_hash_table_low; /* Low 32bits hash table */
+ unsigned long fec_grp_hash_table_high;/* High 32bits hash table */
+ unsigned long fec_grp_hash_table_low; /* Low 32bits hash table */
+ unsigned long fec_reserved9[7];
+ unsigned long fec_x_wmrk; /* FIFO transmit water mark */
+ unsigned long fec_reserved10;
+ unsigned long fec_r_bound; /* FIFO receive bound reg */
+ unsigned long fec_r_fstart; /* FIFO receive start reg */
+ unsigned long fec_reserved11[11];
+ unsigned long fec_r_des_start; /* Receive descriptor ring */
+ unsigned long fec_x_des_start; /* Transmit descriptor ring */
+ unsigned long fec_r_buff_size; /* Maximum receive buff size */
+} fec_t;
+
+/*
+ * Define the buffer descriptor structure.
+ */
+typedef struct bufdesc {
+ unsigned short cbd_sc; /* Control and status info */
+ unsigned short cbd_datlen; /* Data length */
+ unsigned long cbd_bufaddr; /* Buffer address */
+} cbd_t;
+
+/* Forward declarations of some structures to support different PHYs
+ */
+typedef struct {
+ uint mii_data;
+ void (*funct)(uint mii_reg, struct net_device *dev);
+} phy_cmd_t;
+
+typedef struct {
+ uint id;
+ char *name;
+
+ const phy_cmd_t *config;
+ const phy_cmd_t *startup;
+ const phy_cmd_t *ack_int;
+ const phy_cmd_t *shutdown;
+} phy_info_t;
+
+/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
+ * tx_bd_base always point to the base of the buffer descriptors. The
+ * cur_rx and cur_tx point to the currently available buffer.
+ * The dirty_tx tracks the current buffer that is being sent by the
+ * controller. The cur_tx and dirty_tx are equal under both completely
+ * empty and completely full conditions. The empty/ready indicator in
+ * the buffer descriptor determines the actual condition.
+ */
+struct fec_enet_private {
+ /* Hardware registers of the FEC device */
+ volatile fec_t *hwp;
+
+ struct net_device *netdev;
+ struct platform_device *pdev;
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ unsigned char *tx_bounce[TX_RING_SIZE];
+ struct sk_buff *tx_skbuff[TX_RING_SIZE];
+ ushort skb_cur;
+ ushort skb_dirty;
+
+ /* CPM dual port RAM relative addresses.
+ */
+ cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */
+ cbd_t *tx_bd_base;
+ cbd_t *cur_rx, *cur_tx; /* The next free ring entry */
+ cbd_t *dirty_tx; /* The ring entries to be free()ed. */
+ uint tx_full;
+ /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
+ spinlock_t hw_lock;
+
+ /* hold while accessing the mii_list_t() elements */
+ spinlock_t mii_lock;
+ struct mii_bus *mdio_bus;
+ struct phy_device *phydev;
+
+ uint phy_id;
+ uint phy_id_done;
+ uint phy_status;
+ uint phy_speed;
+ phy_info_t const *phy;
+ struct work_struct phy_task;
+ volatile fec_t *phy_hwp;
+
+ uint sequence_done;
+ uint mii_phy_task_queued;
+
+ uint phy_addr;
+
+ int index;
+ int opened;
+ int link;
+ int old_link;
+ int full_duplex;
+ int duplex;
+ int speed;
+ int msg_enable;
+};
+
+struct fec_platform_private {
+ struct platform_device *pdev;
+
+ unsigned long quirks;
+ int num_slots; /* Slots on controller */
+ struct fec_enet_private *fep_host[0]; /* Pointers to hosts */
+};
+
+#endif
--- /dev/null
+/* ld script to make m68k Coldfire Linux kernel
+ *
+ * Derived from arch/m68k/kernel/vmlinux-std.lds
+ *
+ * Updated 11/26/2007 for new CodeSourcery toolset
+ * by Kurt Mahan <kmahan@freescale.com>
+ *
+ * Copyright Freescale Semiconductor, Inc. 2008-2009
+ * Jason Jin Jason.Jin@freescale.com
+ * Shrek Wu B16972@freescale.com
+ */
+
+#define LOAD_OFFSET 0x00000000
+
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/page_offset.h>
+
+#define START_OFFSET 0x00020000
+#define IMAGE_START PAGE_OFFSET_RAW + START_OFFSET
+
+OUTPUT_FORMAT("elf32-m68k", "elf32-m68k", "elf32-m68k")
+OUTPUT_ARCH(m68k)
+ENTRY(_stext)
+jiffies = jiffies_64 + 4;
+SECTIONS
+{
+ . = IMAGE_START;
+ .text.head : AT(ADDR(.text.head) - LOAD_OFFSET) {
+ _text = .; /* Text and read-only data */
+ HEAD_TEXT
+ } :text = 0x4e75
+
+ .text : AT(ADDR(.text) - LOAD_OFFSET) {
+ TEXT_TEXT
+ SCHED_TEXT
+ LOCK_TEXT
+ *(.fixup)
+ *(.gnu.warning)
+ } :text = 0x4e75
+
+ _etext = .; /* End of text section */
+
+ . = ALIGN(16);
+ __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
+ __start___ex_table = .;
+ *(__ex_table)
+ __stop___ex_table = .;
+ }
+
+ RODATA
+
+ . = ALIGN(8192);
+ .data : AT(ADDR(.data) - LOAD_OFFSET) { /* Data */
+ DATA_DATA
+ CONSTRUCTORS
+ } :data
+
+
+ . = ALIGN(16);
+ .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET ) {
+ *(.data.cacheline_aligned)
+ } :data
+
+ _edata = .; /* End of data section */
+
+ . = ALIGN(8192); /* Initrd */
+ .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
+ __init_begin = .;
+ _sinittext = .;
+ INIT_TEXT
+ _einittext = .;
+ }
+
+ .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
+ INIT_DATA
+ }
+
+ . = ALIGN(16);
+ .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
+ __setup_start = .;
+ *(.init.setup)
+ __setup_end = .;
+ }
+
+ .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
+ __initcall_start = .;
+ INITCALLS
+ __initcall_end = .;
+ }
+
+ .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
+ __con_initcall_start = .;
+ *(.con_initcall.init)
+ __con_initcall_end = .;
+ }
+
+ SECURITY_INIT
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ . = ALIGN(8192);
+ .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
+ __initramfs_start = .;
+ *(.init.ramfs)
+ __initramfs_end = .;
+ }
+#endif
+ NOTES
+ . = ALIGN(8192);
+ __init_end = .;
+
+ .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
+ *(.data.init_task) /* The initial task and kernel stack */
+ }
+
+ _sbss = .;
+ .bss : AT(ADDR(.bss) - LOAD_OFFSET) { /* BSS */
+ *(.bss)
+ }
+ _ebss = .;
+
+ _end = . ;
+
+ /DISCARD/ : {
+ EXIT_TEXT
+ EXIT_DATA
+ *(.exitcall.exit)
+ *(.discard)
+ }
+
+ STABS_DEBUG
+ .comment 0 : { *(.comment) }
+
+ /* Sections to be discarded */
+ /*DISCARDS*/
+
+}
--- /dev/null
+/*
+ * linux/arch/m68k/mm/cf-mmu.c
+ *
+ * Based upon linux/arch/m68k/mm/sun3mmu.c
+ * Based upon linux/arch/ppc/mm/mmu_context.c
+ *
+ * Implementations of mm routines specific to the Coldfire MMU.
+ *
+ * Copyright (c) 2008 Freescale Semiconductor, Inc.
+ * Copyright Freescale Semiconductor, Inc. 2008-2009
+ * Jason Jin Jason.Jin@freescale.com
+ * Shrek Wu B16972@freescale.com
+ */
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#ifdef CONFIG_BLK_DEV_RAM
+#include <linux/blkdev.h>
+#endif
+#include <linux/bootmem.h>
+
+#include <asm/setup.h>
+#include <asm/uaccess.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/machdep.h>
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm/cf_pgalloc.h>
+
+#include <asm/coldfire.h>
+#include <asm/tlbflush.h>
+
+#define KMAPAREA(x) ((x >= VMALLOC_START) && (x < KMAP_END))
+
+#undef DEBUG
+
+#ifdef CONFIG_VDSO
+unsigned long next_mmu_context;
+#else
+mm_context_t next_mmu_context;
+#endif
+
+unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
+
+atomic_t nr_free_contexts;
+struct mm_struct *context_mm[LAST_CONTEXT+1];
+void steal_context(void);
+#ifdef CONFIG_M5445X
+void m68k_setup_node(int);
+#endif
+const char bad_pmd_string[] = "Bad pmd in pte_alloc: %08lx\n";
+
+extern unsigned long empty_bad_page_table;
+extern unsigned long empty_bad_page;
+extern unsigned long num_pages;
+#ifdef CONFIG_M5445X
+extern unsigned long availmem;
+#endif
+extern char __init_begin, __init_end;
+
+/*
+ * Free memory used for system initialization.
+ */
+void free_initmem(void)
+{
+#if 0
+ unsigned long addr;
+ unsigned long start = (unsigned long)&__init_begin;
+ unsigned long end = (unsigned long)&__init_end;
+
+ printk(KERN_INFO "free_initmem: __init_begin = 0x%lx __init_end = 0x%lx\n", start, end);
+
+ addr = (unsigned long)&__init_begin;
+ for (; addr < (unsigned long)&__init_end; addr += PAGE_SIZE) {
+ /* not currently used */
+ virt_to_page(addr)->flags &= ~(1 << PG_reserved);
+ init_page_count(virt_to_page(addr));
+ free_page(addr);
+ totalram_pages++;
+ }
+#endif
+}
+
+/*
+ * Initialize the paging system.
+ */
+void __init paging_init(void)
+{
+ pgd_t * pg_dir;
+ pte_t * pg_table;
+ int i;
+ unsigned long address;
+ unsigned long next_pgtable;
+ unsigned long zones_size[MAX_NR_ZONES];
+ unsigned long size;
+ enum zone_type zone;
+
+ /* allocate zero page */
+ empty_zero_page = (void *)alloc_bootmem_pages(PAGE_SIZE);
+ memset((void *)empty_zero_page, 0, PAGE_SIZE);
+
+ /* zero kernel page directory */
+ pg_dir = swapper_pg_dir;
+ memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
+ /*
+ * setup page tables for PHYSRAM
+ */
+
+ /* starting loc in page directory */
+ pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
+
+ /* allocate page tables */
+ size = num_pages * sizeof(pte_t);
+ size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
+ next_pgtable = (unsigned long)alloc_bootmem_pages(size);
+ address = PAGE_OFFSET;
+ while (address < (unsigned long)high_memory) {
+ /* setup page table in page directory */
+ pg_table = (pte_t *)next_pgtable;
+ next_pgtable += PTRS_PER_PTE * sizeof(pte_t);
+ pgd_val(*pg_dir) = (unsigned long)pg_table;
+ pg_dir++;
+
+ /* create PTEs in page table */
+ for (i=0; i<PTRS_PER_PTE; ++i, ++pg_table) {
+ pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
+ if (address >= (unsigned long)high_memory)
+ pte_val (pte) = 0;
+
+ set_pte(pg_table, pte);
+ address += PAGE_SIZE;
+ }
+ }
+
+ /*
+ * setup page tables for DMA area
+ */
+
+ /* starting loc in page directory */
+ pg_dir = swapper_pg_dir;
+ pg_dir += CONFIG_DMA_BASE >> PGDIR_SHIFT;
+
+ /* allocate page tables */
+ size = (CONFIG_DMA_SIZE >> PAGE_SHIFT) * sizeof(pte_t);
+ size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
+ next_pgtable = (unsigned long)alloc_bootmem_pages(size);
+ address = CONFIG_DMA_BASE;
+ while (address < (CONFIG_DMA_BASE + CONFIG_DMA_SIZE)) {
+ /* setup page table in page directory */
+ pg_table = (pte_t *)next_pgtable;
+ next_pgtable += PTRS_PER_PTE * sizeof(pte_t);
+ pgd_val(*pg_dir) = (unsigned long)pg_table;
+ pg_dir++;
+
+ /* create PTEs in page table */
+ for (i=0; i<PTRS_PER_PTE; ++i, ++pg_table) {
+ pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
+ if (address >= (CONFIG_DMA_BASE + CONFIG_DMA_SIZE))
+ pte_val (pte) = 0;
+
+ set_pte(pg_table, pte);
+ address += PAGE_SIZE;
+ }
+ }
+
+ /*
+ * setup zones
+ */
+
+ current->mm = NULL;
+
+ /* clear zones */
+ for (zone = 0; zone < MAX_NR_ZONES; zone++)
+ zones_size[zone] = 0x0;
+
+ zones_size[ZONE_DMA] = CONFIG_DMA_SIZE >> PAGE_SHIFT;
+ zones_size[ZONE_NORMAL] = (((unsigned long)high_memory -
+ PAGE_OFFSET) >> PAGE_SHIFT) -
+ zones_size[ZONE_DMA];
+
+ free_area_init(zones_size);
+}
+/*
+ * Handle a missed TLB
+ */
+int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
+{
+ struct mm_struct *mm;
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned long mmuar;
+ int asid;
+ int flags;
+
+ local_save_flags(flags);
+ local_irq_disable();
+
+ mmuar = ( dtlb ) ? regs->mmuar
+ : regs->pc + (extension_word * sizeof(long));
+
+ mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm;
+
+ if (!mm) {
+ local_irq_restore(flags);
+ return (-1);
+ }
+
+ pgd = pgd_offset(mm, mmuar);
+ if (pgd_none(*pgd)) {
+ local_irq_restore(flags);
+ return (-1);
+ }
+
+ pmd = pmd_offset(pgd, mmuar);
+ if (pmd_none(*pmd)) {
+ local_irq_restore(flags);
+ return (-1);
+ }
+
+ pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar)
+ : pte_offset_map(pmd, mmuar);
+ if (pte_none(*pte) || !pte_present(*pte)) {
+ local_irq_restore(flags);
+ return (-1);
+ }
+
+ if (write) {
+ if (!pte_write(*pte)) {
+ local_irq_restore(flags);
+ return (-1);
+ }
+ set_pte(pte, pte_mkdirty(*pte));
+ }
+
+ set_pte(pte, pte_mkyoung(*pte));
+ asid = cpu_context(mm) & 0xff;
+ if (!pte_dirty(*pte) && !KMAPAREA(mmuar))
+ set_pte(pte, pte_wrprotect(*pte));
+
+ *MMUTR = (mmuar & PAGE_MASK) | (asid << CF_ASID_MMU_SHIFT)
+ | (((int)(pte->pte) & (int)CF_PAGE_MMUTR_MASK ) >> CF_PAGE_MMUTR_SHIFT)
+ | MMUTR_V;
+
+ *MMUDR = (pte_val(*pte) & PAGE_MASK)
+ | ((pte->pte) & CF_PAGE_MMUDR_MASK)
+ | MMUDR_SZ8K | MMUDR_X;
+
+ if ( dtlb )
+ *MMUOR = MMUOR_ACC | MMUOR_UAA;
+ else
+ *MMUOR = MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA;
+
+ asm("nop");
+
+#ifdef DEBUG
+ printk("cf_tlb_miss: va=%lx, pa=%lx\n", (mmuar & PAGE_MASK),
+ (pte_val(*pte) & PAGE_MASK));
+#endif
+ local_irq_restore(flags);
+ return (0);
+}
+
+
+/*
+ * Context Management
+ *
+ * Based on arch/ppc/mmu_context.c
+ */
+
+/*
+ * Initialize the context management system.
+ */
+void __init mmu_context_init(void)
+{
+ /*
+ * Some processors have too few contexts to reserve one for
+ * init_mm, and require using context 0 for a normal task.
+ * Other processors reserve the use of context zero for the kernel.
+ * This code assumes FIRST_CONTEXT < 32.
+ */
+ context_map[0] = (1 << FIRST_CONTEXT) - 1;
+ next_mmu_context = FIRST_CONTEXT;
+ atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
+}
+
+/*
+ * Steal a context from a task that has one at the moment.
+ * This is only used on 8xx and 4xx and we presently assume that
+ * they don't do SMP. If they do then thicfpgalloc.hs will have to check
+ * whether the MM we steal is in use.
+ * We also assume that this is only used on systems that don't
+ * use an MMU hash table - this is true for 8xx and 4xx.
+ * This isn't an LRU system, it just frees up each context in
+ * turn (sort-of pseudo-random replacement :). This would be the
+ * place to implement an LRU scheme if anyone was motivated to do it.
+ * -- paulus
+ */
+void steal_context(void)
+{
+ struct mm_struct *mm;
+ /* free up context `next_mmu_context' */
+ /* if we shouldn't free context 0, don't... */
+ if (next_mmu_context < FIRST_CONTEXT)
+ next_mmu_context = FIRST_CONTEXT;
+ mm = context_mm[next_mmu_context];
+ flush_tlb_mm(mm);
+ destroy_context(mm);
+}
--- /dev/null
+/*
+ * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Author: Kurt Mahan, kmahan@freescale.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/phy.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+
+#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
+
+#include <asm/dma.h>
+#include <asm/MCD_dma.h>
+#include <asm/m5485sram.h>
+#include <asm/virtconvert.h>
+#include <asm/irq.h>
+
+#include "fec_m547x.h"
+
+#ifdef CONFIG_FEC_548x_ENABLE_FEC2
+#define FEC_MAX_PORTS 2
+#define FEC_2
+#else
+#define FEC_MAX_PORTS 1
+#undef FEC_2
+#endif
+
+#define VERSION "0.20"
+MODULE_DESCRIPTION("DMA Fast Ethernet Controller driver ver " VERSION);
+
+/* fec private */
+struct fec_priv {
+ struct net_device *netdev; /* owning net device */
+ void *fecpriv_txbuf[FEC_TX_BUF_NUMBER]; /* tx buffer ptrs */
+ MCD_bufDescFec *fecpriv_txdesc; /* tx descriptor ptrs */
+ volatile unsigned int fecpriv_current_tx; /* current tx desc index */
+ volatile unsigned int fecpriv_next_tx; /* next tx desc index */
+ unsigned int fecpriv_current_rx; /* current rx desc index */
+ MCD_bufDescFec *fecpriv_rxdesc; /* rx descriptor ptrs */
+ struct sk_buff *askb_rx[FEC_RX_BUF_NUMBER]; /* rx SKB ptrs */
+ unsigned int fecpriv_initiator_rx; /* rx dma initiator */
+ unsigned int fecpriv_initiator_tx; /* tx dma initiator */
+ int fecpriv_fec_rx_channel; /* rx dma channel */
+ int fecpriv_fec_tx_channel; /* tx dma channel */
+ int fecpriv_rx_requestor; /* rx dma requestor */
+ int fecpriv_tx_requestor; /* tx dma requestor */
+ void *fecpriv_interrupt_fec_rx_handler; /* dma rx handler */
+ void *fecpriv_interrupt_fec_tx_handler; /* dma tx handler */
+ unsigned char *fecpriv_mac_addr; /* private fec mac addr */
+ struct net_device_stats fecpriv_stat; /* stats ptr */
+ spinlock_t fecpriv_lock;
+ int fecpriv_rxflag;
+ struct tasklet_struct fecpriv_tasklet_reinit;
+ int index; /* fec hw number */
+ struct phy_device *phydev;
+ struct mii_bus *mdio_bus;
+ int duplex;
+ int link;
+ int speed;
+};
+
+struct net_device *fec_dev[FEC_MAX_PORTS];
+
+/* FEC functions */
+static int __init fec_init(void);
+static struct net_device_stats *fec_get_stat(struct net_device *dev);
+static int fec_open(struct net_device *dev);
+static int fec_close(struct net_device *nd);
+static int fec_tx(struct sk_buff *skb, struct net_device *dev);
+static void fec_set_multicast_list(struct net_device *nd);
+static int fec_set_mac_address(struct net_device *dev, void *p);
+static void fec_tx_timeout(struct net_device *dev);
+static void fec_interrupt_fec_tx_handler(struct net_device *dev);
+static void fec_interrupt_fec_rx_handler(struct net_device *dev);
+static irqreturn_t fec_interrupt_handler(int irq, void *dev_id);
+static void fec_interrupt_fec_tx_handler_fec0(void);
+static void fec_interrupt_fec_rx_handler_fec0(void);
+static void fec_interrupt_fec_reinit(unsigned long data);
+
+/* default fec0 address */
+unsigned char fec_mac_addr_fec0[6] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x50 };
+
+#ifdef FEC_2
+/* default fec1 address */
+unsigned char fec_mac_addr_fec1[6] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x51 };
+#endif
+
+extern unsigned char uboot_enet0[];
+extern unsigned char uboot_enet1[];
+
+#ifndef MODULE
+int fec_str_to_mac(char *str_mac, unsigned char* addr);
+int __init fec_mac_setup0(char *s);
+#endif
+
+
+#ifdef FEC_2
+void fec_interrupt_fec_tx_handler_fec1(void);
+void fec_interrupt_fec_rx_handler_fec1(void);
+#endif
+
+#ifndef MODULE
+int __init fec_mac_setup1(char *s);
+#endif
+
+module_init(fec_init);
+/* module_exit(fec_cleanup); */
+
+__setup("mac0=", fec_mac_setup0);
+
+#ifdef FEC_2
+__setup("mac1=", fec_mac_setup1);
+#endif
+
+#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
+#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \
+ (VAL & 0xffff))
+/* ----------------------------------------------------------- */
+static int coldfire_fec_mdio_read(struct mii_bus *bus,
+ int phy_id, int reg)
+{
+ int ret;
+ struct net_device *dev = bus->priv;
+#ifdef CONFIG_FEC_548x_SHARED_PHY
+ unsigned long base_addr = (unsigned long)FEC_BASE_ADDR_FEC0;
+#else
+ unsigned long base_addr = (unsigned long) dev->base_addr;
+#endif
+ int tries = 100;
+
+ /* Clear the MII interrupt bit */
+ FEC_EIR(base_addr) = FEC_EIR_MII;
+
+ /* Write to the MII management frame register */
+ FEC_MMFR(base_addr) = mk_mii_read(reg) | (phy_id << 23);
+
+ /* Wait for the reading */
+ while (!(FEC_EIR(base_addr) & FEC_EIR_MII)) {
+ udelay(10);
+
+ if (!tries) {
+ printk(KERN_ERR "%s timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+ tries--;
+ }
+
+ /* Clear the MII interrupt bit */
+ FEC_EIR(base_addr) = FEC_EIR_MII;
+ ret = FEC_MMFR(base_addr) & 0x0000FFFF;
+ return ret;
+}
+
+static int coldfire_fec_mdio_write(struct mii_bus *bus,
+ int phy_id, int reg, u16 data)
+{
+ int ret;
+ struct net_device *dev = bus->priv;
+#ifdef CONFIG_FEC_548x_SHARED_PHY
+ unsigned long base_addr = (unsigned long)FEC_BASE_ADDR_FEC0;
+#else
+ unsigned long base_addr = (unsigned long) dev->base_addr;
+#endif
+ int tries = 100;
+
+ printk(KERN_ERR "%s base_addr %x, phy_id %x, reg %x, data %x\n",
+ __func__, base_addr, phy_id, reg, data);
+ /* Clear the MII interrupt bit */
+ FEC_EIR(base_addr) = FEC_EIR_MII;
+
+ /* Write to the MII management frame register */
+ FEC_MMFR(base_addr) = mk_mii_write(reg, data) | (phy_id << 23);
+
+ /* Wait for the writing */
+ while (!(FEC_EIR(base_addr) & FEC_EIR_MII)) {
+ udelay(10);
+ if (!tries) {
+ printk(KERN_ERR "%s timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+ tries--;
+ }
+ /* Clear the MII interrupt bit */
+ FEC_EIR(base_addr) = FEC_EIR_MII;
+ ret = FEC_MMFR(base_addr) & 0x0000FFFF;
+
+ return ret;
+}
+
+static void fec_adjust_link(struct net_device *dev)
+{
+ struct fec_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+ int new_state = 0;
+
+ if (phydev->link != PHY_DOWN) {
+ if (phydev->duplex != priv->duplex) {
+ new_state = 1;
+ priv->duplex = phydev->duplex;
+ }
+
+ if (phydev->speed != priv->speed) {
+ new_state = 1;
+ priv->speed = phydev->speed;
+ }
+
+ if (priv->link == PHY_DOWN) {
+ new_state = 1;
+ priv->link = phydev->link;
+ }
+ } else if (priv->link) {
+ new_state = 1;
+ priv->link = PHY_DOWN;
+ priv->speed = 0;
+ priv->duplex = -1;
+ }
+
+ if (new_state)
+ phy_print_status(phydev);
+}
+
+static int coldfire_fec_init_phy(struct net_device *dev)
+{
+ struct fec_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = NULL;
+ int i;
+ int startnode;
+
+#ifdef CONFIG_FEC_548x_SHARED_PHY
+ if (priv->index == 0)
+ startnode = 0;
+ else if (priv->index == 1) {
+ struct fec_priv *priv0 = netdev_priv(fec_dev[0]);
+ startnode = priv0->phydev->addr + 1;
+ } else
+ startnode = 0;
+#else
+ startnode = 0;
+#endif
+#ifdef FEC_DEBUG
+ printk(KERN_ERR "%s priv->index %x, startnode %x\n",
+ __func__, priv->index, startnode);
+#endif
+ /* search for connect PHY device */
+ for (i = startnode; i < PHY_MAX_ADDR; i++) {
+ struct phy_device *const tmp_phydev =
+ priv->mdio_bus->phy_map[i];
+
+ if (!tmp_phydev) {
+#ifdef FEC_DEBUG
+ printk(KERN_INFO "%s no PHY here at"
+ "mii_bus->phy_map[%d]\n",
+ __func__, i);
+#endif
+ continue; /* no PHY here... */
+ }
+ phydev = tmp_phydev;
+#ifdef FEC_DEBUG
+ printk(KERN_INFO "%s find PHY here at"
+ "mii_bus->phy_map[%d]\n",
+ __func__, i);
+#endif
+ break; /* found it */
+ }
+
+ /* now we are supposed to have a proper phydev, to attach to... */
+ if (!phydev) {
+ printk(KERN_INFO "%s: Don't found any phy device at all\n",
+ dev->name);
+ return -ENODEV;
+ }
+
+ priv->link = 0;
+ priv->speed = 0;
+ priv->duplex = 0;
+#ifdef FEC_DEBUG
+ printk(KERN_INFO "%s phydev_busid %s\n", __func__, dev_name(&phydev->dev));
+#endif
+ phydev = phy_connect(dev, dev_name(&phydev->dev),
+ &fec_adjust_link, 0, PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(phydev)) {
+ printk(KERN_ERR " %s phy_connect failed\n", __func__);
+ return PTR_ERR(phydev);
+ }
+
+ printk(KERN_INFO "attached phy %i to driver %s\n",
+ phydev->addr, phydev->drv->name);
+ priv->phydev = phydev;
+ return 0;
+}
+
+static int fec_mdio_register(struct net_device *dev,
+ int slot)
+{
+ int err = 0;
+ struct fec_priv *fp = netdev_priv(dev);
+
+ fp->mdio_bus = mdiobus_alloc();
+ if (!fp->mdio_bus) {
+ printk(KERN_ERR "ethernet mdiobus_alloc fail\n");
+ return -ENOMEM;
+ }
+
+ if (slot == 0) {
+ fp->mdio_bus->name = "Coldfire FEC MII 0 Bus";
+ strcpy(fp->mdio_bus->id, "0");
+ } else if (slot == 1) {
+ fp->mdio_bus->name = "Coldfire FEC MII 1 Bus";
+ strcpy(fp->mdio_bus->id, "1");
+ } else {
+ printk(KERN_ERR "Now coldfire can not"
+ "support more than 2 mii bus\n");
+ }
+
+ fp->mdio_bus->read = &coldfire_fec_mdio_read;
+ fp->mdio_bus->write = &coldfire_fec_mdio_write;
+ fp->mdio_bus->priv = dev;
+ err = mdiobus_register(fp->mdio_bus);
+ if (err) {
+ mdiobus_free(fp->mdio_bus);
+ printk(KERN_ERR "%s: ethernet mdiobus_register fail %d\n",
+ dev->name, err);
+ return -EIO;
+ }
+
+ printk(KERN_INFO "mdiobus_register %s ok\n",
+ fp->mdio_bus->name);
+ return err;
+}
+
+static const struct net_device_ops fec_netdev_ops = {
+ .ndo_open = fec_open,
+ .ndo_stop = fec_close,
+ .ndo_start_xmit = fec_tx,
+ .ndo_set_multicast_list = fec_set_multicast_list,
+ .ndo_tx_timeout = fec_tx_timeout,
+ .ndo_get_stats = fec_get_stat,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = fec_set_mac_address,
+};
+
+/*
+ * Initialize a FEC device
+ */
+int fec_enet_init(struct net_device *dev, int slot)
+{
+ struct fec_priv *fp = netdev_priv(dev);
+ int i;
+
+ fp->index = slot;
+ fp->netdev = dev;
+ fec_dev[slot] = dev;
+
+ if (slot == 0) {
+ /* disable fec0 */
+ FEC_ECR(FEC_BASE_ADDR_FEC0) = FEC_ECR_DISABLE;
+
+ /* setup the interrupt handler */
+ dev->irq = 64 + ISC_FEC0;
+
+ if (request_irq(dev->irq, fec_interrupt_handler,
+ IRQF_DISABLED, "ColdFire FEC 0", dev)) {
+ dev->irq = 0;
+ printk(KERN_ERR "Cannot allocate FEC0 IRQ\n");
+ } else {
+ /* interrupt priority and level */
+ MCF_ICR(ISC_FEC0) = ILP_FEC0;
+ }
+
+ /* fec base address */
+ dev->base_addr = FEC_BASE_ADDR_FEC0;
+
+ /* requestor numbers */
+ fp->fecpriv_rx_requestor = DMA_FEC0_RX;
+ fp->fecpriv_tx_requestor = DMA_FEC0_TX;
+
+ /* fec0 handlers */
+ fp->fecpriv_interrupt_fec_rx_handler =
+ fec_interrupt_fec_rx_handler_fec0;
+ fp->fecpriv_interrupt_fec_tx_handler =
+ fec_interrupt_fec_tx_handler_fec0;
+
+ /* tx descriptors */
+ fp->fecpriv_txdesc = (void *)FEC_TX_DESC_FEC0;
+
+ /* rx descriptors */
+ fp->fecpriv_rxdesc = (void *)FEC_RX_DESC_FEC0;
+
+ /* mac addr
+ if (uboot_enet0[0] || uboot_enet0[1] || uboot_enet0[2] ||
+ uboot_enet0[3] || uboot_enet0[4] || uboot_enet0[5]) {
+ use uboot enet 0 addr
+ memcpy(fec_mac_addr_fec0, uboot_enet0, 6);
+ }*/
+ fec_mac_addr_fec0[0] =
+ (FEC_PALR(FEC_BASE_ADDR_FEC0) >> 24) & 0xFF;
+ fec_mac_addr_fec0[1] =
+ (FEC_PALR(FEC_BASE_ADDR_FEC0) >> 16) & 0xFF;
+ fec_mac_addr_fec0[2] =
+ (FEC_PALR(FEC_BASE_ADDR_FEC0) >> 8) & 0xFF;
+ fec_mac_addr_fec0[3] =
+ (FEC_PALR(FEC_BASE_ADDR_FEC0)) & 0xFF;
+ fec_mac_addr_fec0[4] =
+ (FEC_PAUR(FEC_BASE_ADDR_FEC0) >> 24) & 0xFF;
+ fec_mac_addr_fec0[5] =
+ (FEC_PAUR(FEC_BASE_ADDR_FEC0) >> 16) & 0xFF;
+
+ fp->fecpriv_mac_addr = fec_mac_addr_fec0;
+ } else {
+ /* disable fec1 */
+ FEC_ECR(FEC_BASE_ADDR_FEC1) = FEC_ECR_DISABLE;
+#ifdef FEC_2
+ /* setup the interrupt handler */
+ dev->irq = 64 + ISC_FEC1;
+
+ if (request_irq(dev->irq, fec_interrupt_handler,
+ IRQF_DISABLED, "ColdFire FEC 1", dev)) {
+ dev->irq = 0;
+ printk(KERN_ERR "Cannot allocate FEC1 IRQ\n");
+ } else {
+ /* interrupt priority and level */
+ MCF_ICR(ISC_FEC1) = ILP_FEC1;
+ }
+
+ /* fec base address */
+ dev->base_addr = FEC_BASE_ADDR_FEC1;
+
+ /* requestor numbers */
+ fp->fecpriv_rx_requestor = DMA_FEC1_RX;
+ fp->fecpriv_tx_requestor = DMA_FEC1_TX;
+
+ /* fec1 handlers */
+ fp->fecpriv_interrupt_fec_rx_handler =
+ fec_interrupt_fec_rx_handler_fec1;
+ fp->fecpriv_interrupt_fec_tx_handler =
+ fec_interrupt_fec_tx_handler_fec1;
+
+ /* tx descriptors */
+ fp->fecpriv_txdesc = (void *)FEC_TX_DESC_FEC1;
+
+ /* rx descriptors */
+ fp->fecpriv_rxdesc = (void *)FEC_RX_DESC_FEC1;
+
+ /* mac addr
+ if (uboot_enet1[0] || uboot_enet1[1] || uboot_enet1[2] ||
+ uboot_enet1[3] || uboot_enet1[4] || uboot_enet1[5]) {
+ use uboot enet 1 addr
+ memcpy(fec_mac_addr_fec1, uboot_enet1, 6);
+ }*/
+ fec_mac_addr_fec1[0] =
+ (FEC_PALR(FEC_BASE_ADDR_FEC1) >> 24) & 0xFF;
+ fec_mac_addr_fec1[1] =
+ (FEC_PALR(FEC_BASE_ADDR_FEC1) >> 16) & 0xFF;
+ fec_mac_addr_fec1[2] =
+ (FEC_PALR(FEC_BASE_ADDR_FEC1) >> 8) & 0xFF;
+ fec_mac_addr_fec1[3] =
+ (FEC_PALR(FEC_BASE_ADDR_FEC1)) & 0xFF;
+ fec_mac_addr_fec1[4] =
+ (FEC_PAUR(FEC_BASE_ADDR_FEC1) >> 24) & 0xFF;
+ fec_mac_addr_fec1[5] =
+ (FEC_PAUR(FEC_BASE_ADDR_FEC1) >> 16) & 0xFF;
+
+ fp->fecpriv_mac_addr = fec_mac_addr_fec1;
+#endif
+ }
+
+ /* clear MIB */
+ memset((void *) (dev->base_addr + 0x200), 0, FEC_MIB_LEN);
+
+ /* clear the statistics structure */
+ memset((void *) &(fp->fecpriv_stat), 0,
+ sizeof(struct net_device_stats));
+
+ /* grab the FEC initiators */
+ dma_set_initiator(fp->fecpriv_tx_requestor);
+ fp->fecpriv_initiator_tx = dma_get_initiator(fp->fecpriv_tx_requestor);
+ dma_set_initiator(fp->fecpriv_rx_requestor);
+ fp->fecpriv_initiator_rx = dma_get_initiator(fp->fecpriv_rx_requestor);
+
+ /* reset the DMA channels */
+ fp->fecpriv_fec_rx_channel = -1;
+ fp->fecpriv_fec_tx_channel = -1;
+
+ for (i = 0; i < FEC_RX_BUF_NUMBER; i++)
+ fp->askb_rx[i] = NULL;
+
+ /* initialize the pointers to the socket buffers */
+ for (i = 0; i < FEC_TX_BUF_NUMBER; i++)
+ fp->fecpriv_txbuf[i] = NULL;
+
+ ether_setup(dev);
+
+ dev->netdev_ops = &fec_netdev_ops;
+ dev->watchdog_timeo = FEC_TX_TIMEOUT * HZ;
+
+ memcpy(dev->dev_addr, fp->fecpriv_mac_addr, ETH_ALEN);
+
+ spin_lock_init(&fp->fecpriv_lock);
+
+ /* Initialize FEC/I2C/IRQ Pin Assignment Register*/
+ FEC_GPIO_PAR_FECI2CIRQ &= 0xF;
+ FEC_GPIO_PAR_FECI2CIRQ |= FEC_FECI2CIRQ;
+
+ return 0;
+}
+
+/*
+ * Module Initialization
+ */
+int __init fec_init(void)
+{
+ struct net_device *dev;
+ int i;
+ int err;
+ struct fec_priv *fep;
+ DECLARE_MAC_BUF(mac);
+
+ printk(KERN_INFO "FEC ENET (DMA) Version %s\n", VERSION);
+
+ for (i = 0; i < FEC_MAX_PORTS; i++) {
+ dev = alloc_etherdev(sizeof(struct fec_priv));
+ if (!dev)
+ return -ENOMEM;
+ err = fec_enet_init(dev, i);
+ if (err) {
+ free_netdev(dev);
+ continue;
+ }
+
+ fep = netdev_priv(dev);
+ FEC_MSCR(dev->base_addr) = FEC_MII_SPEED;
+#ifdef CONFIG_FEC_548x_SHARED_PHY
+ if (i == 0)
+ err = fec_mdio_register(dev, i);
+ else {
+ struct fec_priv *priv0 = netdev_priv(fec_dev[0]);
+ fep->mdio_bus = priv0->mdio_bus;
+ printk(KERN_INFO "FEC%d SHARED the %s ok\n",
+ i, fep->mdio_bus->name);
+ }
+#else
+ err = fec_mdio_register(dev, i);
+#endif
+ if (err) {
+ printk(KERN_ERR "%s: ethernet fec_mdio_register\n",
+ dev->name);
+ free_netdev(dev);
+ return -ENOMEM;
+ }
+
+ if (register_netdev(dev) != 0) {
+ free_netdev(dev);
+ return -EIO;
+ }
+
+ printk(KERN_INFO "%s: ethernet %s\n",
+ dev->name, print_mac(mac, dev->dev_addr));
+ }
+ return 0;
+}
+
+/*
+ * Stop a device
+ */
+void fec_stop(struct net_device *dev)
+{
+ struct fec_priv *fp = netdev_priv(dev);
+
+ dma_remove_initiator(fp->fecpriv_initiator_tx);
+ dma_remove_initiator(fp->fecpriv_initiator_rx);
+
+ if (dev->irq)
+ free_irq(dev->irq, dev);
+}
+
+/************************************************************************
+* NAME: fec_open
+*
+* DESCRIPTION: This function performs the initialization of
+* of FEC and corresponding KS8721 transiver
+*
+* RETURNS: If no error occurs, this function returns zero.
+*************************************************************************/
+int fec_open(struct net_device *dev)
+{
+ struct fec_priv *fp = netdev_priv(dev);
+ unsigned long base_addr = (unsigned long) dev->base_addr;
+ int fduplex;
+ int i;
+ int channel;
+ int error_code = -EBUSY;
+
+ fp->link = 0;
+ fp->duplex = 0;
+ fp->speed = 0;
+ coldfire_fec_init_phy(dev);
+ phy_start(fp->phydev);
+
+ /* Receive the DMA channels */
+ channel = dma_set_channel_fec(fp->fecpriv_rx_requestor);
+
+ if (channel == -1) {
+ printk(KERN_ERR "Dma channel cannot be reserved\n");
+ goto ERRORS;
+ }
+
+ fp->fecpriv_fec_rx_channel = channel;
+
+ dma_connect(channel, (int) fp->fecpriv_interrupt_fec_rx_handler);
+
+ channel = dma_set_channel_fec(fp->fecpriv_tx_requestor);
+
+ if (channel == -1) {
+ printk(KERN_ERR "Dma channel cannot be reserved\n");
+ goto ERRORS;
+ }
+
+ fp->fecpriv_fec_tx_channel = channel;
+
+ dma_connect(channel, (int) fp->fecpriv_interrupt_fec_tx_handler);
+
+ /* init tasklet for controller reinitialization */
+ tasklet_init(&fp->fecpriv_tasklet_reinit,
+ fec_interrupt_fec_reinit, (unsigned long) dev);
+
+ /* Reset FIFOs */
+ FEC_FECFRST(base_addr) |= FEC_SW_RST | FEC_RST_CTL;
+ FEC_FECFRST(base_addr) &= ~FEC_SW_RST;
+
+ /* Reset and disable FEC */
+ FEC_ECR(base_addr) = FEC_ECR_RESET;
+
+ udelay(10);
+
+ /* Clear all events */
+ FEC_EIR(base_addr) = FEC_EIR_CLEAR;
+
+ /* Reset FIFO status */
+ FEC_FECTFSR(base_addr) = FEC_FECTFSR_MSK;
+ FEC_FECRFSR(base_addr) = FEC_FECRFSR_MSK;
+
+ /* Set the default address */
+ FEC_PALR(base_addr) = (fp->fecpriv_mac_addr[0] << 24) |
+ (fp->fecpriv_mac_addr[1] << 16) |
+ (fp->fecpriv_mac_addr[2] << 8) |
+ fp->fecpriv_mac_addr[3];
+ FEC_PAUR(base_addr) = (fp->fecpriv_mac_addr[4] << 24) |
+ (fp->fecpriv_mac_addr[5] << 16) | 0x8808;
+
+ /* Reset the group address descriptor */
+ FEC_GALR(base_addr) = 0x00000000;
+ FEC_GAUR(base_addr) = 0x00000000;
+
+ /* Reset the individual address descriptor */
+ FEC_IALR(base_addr) = 0x00000000;
+ FEC_IAUR(base_addr) = 0x00000000;
+
+ /* Set the receive control register */
+ FEC_RCR(base_addr) = FEC_RCR_MAX_FRM_SIZE | FEC_RCR_MII;
+
+ /* Set the receive FIFO control register */
+ /*FEC_FECRFCR(base_addr) =
+ * FEC_FECRFCR_FRM | FEC_FECRFCR_GR | FEC_FECRFCR_MSK;*/
+ FEC_FECRFCR(base_addr) = FEC_FECRFCR_FRM | FEC_FECRFCR_GR
+ | (FEC_FECRFCR_MSK
+ /* disable all but ...*/
+ & ~FEC_FECRFCR_FAE
+ /* enable frame accept error*/
+ & ~FEC_FECRFCR_RXW
+ /* enable receive wait condition*/
+ /*& ~FEC_FECRFCR_UF*/
+ /* enable FIFO underflow*/
+ );
+
+ /* Set the receive FIFO alarm register */
+ FEC_FECRFAR(base_addr) = FEC_FECRFAR_ALARM;
+
+ /* Set the transmit FIFO control register */
+ /*FEC_FECTFCR(base_addr) =
+ FEC_FECTFCR_FRM | FEC_FECTFCR_GR | FEC_FECTFCR_MSK;*/
+ FEC_FECTFCR(base_addr) = FEC_FECTFCR_FRM | FEC_FECTFCR_GR
+ | (FEC_FECTFCR_MSK
+ /* disable all but ... */
+ & ~FEC_FECTFCR_FAE
+ /* enable frame accept error */
+ /* & ~FEC_FECTFCR_TXW */
+ /*enable transmit wait condition*/
+ /*& ~FEC_FECTFCR_UF*/
+ /*enable FIFO underflow*/
+ & ~FEC_FECTFCR_OF);
+ /* enable FIFO overflow */
+
+ /* Set the transmit FIFO alarm register */
+ FEC_FECTFAR(base_addr) = FEC_FECTFAR_ALARM;
+
+ /* Set the Tx FIFO watermark */
+ FEC_FECTFWR(base_addr) = FEC_FECTFWR_XWMRK;
+
+ /* Enable the transmitter to append the CRC */
+ FEC_CTCWR(base_addr) = FEC_CTCWR_TFCW_CRC;
+
+ /* Enable the ethernet interrupts */
+ /*FEC_EIMR(base_addr) = FEC_EIMR_MASK;*/
+ FEC_EIMR(base_addr) = FEC_EIMR_DISABLE
+ | FEC_EIR_LC
+ | FEC_EIR_RL
+ | FEC_EIR_HBERR
+ | FEC_EIR_XFUN
+ | FEC_EIR_XFERR
+ | FEC_EIR_RFERR;
+
+#if 0
+ error_code = init_transceiver(base_addr, &fduplex);
+ if (error_code != 0) {
+ printk(KERN_ERR "Initialization of the "
+ "transceiver is failed\n");
+ goto ERRORS;
+ }
+#else
+ fduplex = 1;
+#endif
+ if (fduplex)
+ /* Enable the full duplex mode */
+ FEC_TCR(base_addr) = FEC_TCR_FDEN | FEC_TCR_HBC;
+ else
+ /* Disable reception of frames while transmitting */
+ FEC_RCR(base_addr) |= FEC_RCR_DRT;
+
+ /* Enable MIB */
+ FEC_MIBC(base_addr) = FEC_MIBC_ENABLE;
+
+ /* Enable FEC */
+ FEC_ECR(base_addr) |= FEC_ECR_ETHEREN;
+ FEC_MSCR(dev->base_addr) = FEC_MII_SPEED;
+ /* Initialize tx descriptors and start DMA for the transmission */
+ for (i = 0; i < FEC_TX_BUF_NUMBER; i++)
+ fp->fecpriv_txdesc[i].statCtrl = MCD_FEC_INTERRUPT;
+
+ fp->fecpriv_txdesc[i - 1].statCtrl |= MCD_FEC_WRAP;
+
+ fp->fecpriv_current_tx = fp->fecpriv_next_tx = 0;
+
+ MCD_startDma(fp->fecpriv_fec_tx_channel, (char *) fp->fecpriv_txdesc, 0,
+ (unsigned char *) &(FEC_FECTFDR(base_addr)), 0,
+ FEC_MAX_FRM_SIZE, 0, fp->fecpriv_initiator_tx,
+ FEC_TX_DMA_PRI, MCD_FECTX_DMA | MCD_INTERRUPT,
+ MCD_NO_CSUM | MCD_NO_BYTE_SWAP);
+
+ /* Initialize rx descriptors and start DMA for the reception */
+ for (i = 0; i < FEC_RX_BUF_NUMBER; i++) {
+ fp->askb_rx[i] = alloc_skb(FEC_MAXBUF_SIZE + 16, GFP_DMA);
+ if (!fp->askb_rx[i]) {
+ fp->fecpriv_rxdesc[i].dataPointer = 0;
+ fp->fecpriv_rxdesc[i].statCtrl = 0;
+ fp->fecpriv_rxdesc[i].length = 0;
+ } else {
+ skb_reserve(fp->askb_rx[i], 16);
+ fp->askb_rx[i]->dev = dev;
+ fp->fecpriv_rxdesc[i].dataPointer =
+ (unsigned int)virt_to_phys(fp->askb_rx[i]->tail);
+ fp->fecpriv_rxdesc[i].statCtrl =
+ MCD_FEC_BUF_READY | MCD_FEC_INTERRUPT;
+ fp->fecpriv_rxdesc[i].length = FEC_MAXBUF_SIZE;
+ }
+ }
+
+ fp->fecpriv_rxdesc[i - 1].statCtrl |= MCD_FEC_WRAP;
+ fp->fecpriv_current_rx = 0;
+
+ MCD_startDma(fp->fecpriv_fec_rx_channel, (char *) fp->fecpriv_rxdesc, 0,
+ (unsigned char *) &(FEC_FECRFDR(base_addr)), 0,
+ FEC_MAX_FRM_SIZE, 0, fp->fecpriv_initiator_rx,
+ FEC_RX_DMA_PRI, MCD_FECRX_DMA | MCD_INTERRUPT,
+ MCD_NO_CSUM | MCD_NO_BYTE_SWAP);
+
+ netif_start_queue(dev);
+ return 0;
+
+ERRORS:
+
+ /* Remove the channels and return with the error code */
+ if (fp->fecpriv_fec_rx_channel != -1) {
+ dma_disconnect(fp->fecpriv_fec_rx_channel);
+ dma_remove_channel_by_number(fp->fecpriv_fec_rx_channel);
+ fp->fecpriv_fec_rx_channel = -1;
+ }
+
+ if (fp->fecpriv_fec_tx_channel != -1) {
+ dma_disconnect(fp->fecpriv_fec_tx_channel);
+ dma_remove_channel_by_number(fp->fecpriv_fec_tx_channel);
+ fp->fecpriv_fec_tx_channel = -1;
+ }
+
+ return error_code;
+}
+
+/************************************************************************
+* NAME: fec_close
+*
+* DESCRIPTION: This function performs the graceful stop of the
+* transmission and disables FEC
+*
+* RETURNS: This function always returns zero.
+*************************************************************************/
+int fec_close(struct net_device *dev)
+{
+ struct fec_priv *fp = netdev_priv(dev);
+ unsigned long base_addr = (unsigned long) dev->base_addr;
+ unsigned long time;
+ int i;
+
+ netif_stop_queue(dev);
+ phy_disconnect(fp->phydev);
+ phy_stop(fp->phydev);
+ /* Perform the graceful stop */
+ FEC_TCR(base_addr) |= FEC_TCR_GTS;
+
+ time = jiffies;
+
+ /* Wait for the graceful stop */
+ while (!(FEC_EIR(base_addr) & FEC_EIR_GRA) && jiffies - time <
+ (FEC_GR_TIMEOUT * HZ))
+ schedule();
+
+ /* Disable FEC */
+ FEC_ECR(base_addr) = FEC_ECR_DISABLE;
+
+ /* Reset the DMA channels */
+ spin_lock_irq(&fp->fecpriv_lock);
+ MCD_killDma(fp->fecpriv_fec_tx_channel);
+ spin_unlock_irq(&fp->fecpriv_lock);
+ dma_remove_channel_by_number(fp->fecpriv_fec_tx_channel);
+ dma_disconnect(fp->fecpriv_fec_tx_channel);
+ fp->fecpriv_fec_tx_channel = -1;
+
+ for (i = 0; i < FEC_TX_BUF_NUMBER; i++) {
+ if (fp->fecpriv_txbuf[i]) {
+ kfree(fp->fecpriv_txbuf[i]);
+ fp->fecpriv_txbuf[i] = NULL;
+ }
+ }
+
+ spin_lock_irq(&fp->fecpriv_lock);
+ MCD_killDma(fp->fecpriv_fec_rx_channel);
+ spin_unlock_irq(&fp->fecpriv_lock);
+
+ dma_remove_channel_by_number(fp->fecpriv_fec_rx_channel);
+ dma_disconnect(fp->fecpriv_fec_rx_channel);
+ fp->fecpriv_fec_rx_channel = -1;
+
+ for (i = 0; i < FEC_RX_BUF_NUMBER; i++) {
+ if (fp->askb_rx[i]) {
+ kfree_skb(fp->askb_rx[i]);
+ fp->askb_rx[i] = NULL;
+ }
+ }
+
+ return 0;
+}
+
+/************************************************************************
+* +NAME: fec_get_stat
+*
+* RETURNS: This function returns the statistical information.
+*************************************************************************/
+struct net_device_stats *fec_get_stat(struct net_device *dev)
+{
+ struct fec_priv *fp = netdev_priv(dev);
+ unsigned long base_addr = dev->base_addr;
+
+ /* Receive the statistical information */
+ fp->fecpriv_stat.rx_packets = FECSTAT_RMON_R_PACKETS(base_addr);
+ fp->fecpriv_stat.tx_packets = FECSTAT_RMON_T_PACKETS(base_addr);
+ fp->fecpriv_stat.rx_bytes = FECSTAT_RMON_R_OCTETS(base_addr);
+ fp->fecpriv_stat.tx_bytes = FECSTAT_RMON_T_OCTETS(base_addr);
+
+ fp->fecpriv_stat.multicast = FECSTAT_RMON_R_MC_PKT(base_addr);
+ fp->fecpriv_stat.collisions = FECSTAT_RMON_T_COL(base_addr);
+
+ fp->fecpriv_stat.rx_length_errors =
+ FECSTAT_RMON_R_UNDERSIZE(base_addr) +
+ FECSTAT_RMON_R_OVERSIZE(base_addr) +
+ FECSTAT_RMON_R_FRAG(base_addr) +
+ FECSTAT_RMON_R_JAB(base_addr);
+ fp->fecpriv_stat.rx_crc_errors = FECSTAT_IEEE_R_CRC(base_addr);
+ fp->fecpriv_stat.rx_frame_errors = FECSTAT_IEEE_R_ALIGN(base_addr);
+ fp->fecpriv_stat.rx_over_errors = FECSTAT_IEEE_R_MACERR(base_addr);
+
+ fp->fecpriv_stat.tx_carrier_errors = FECSTAT_IEEE_T_CSERR(base_addr);
+ fp->fecpriv_stat.tx_fifo_errors = FECSTAT_IEEE_T_MACERR(base_addr);
+ fp->fecpriv_stat.tx_window_errors = FECSTAT_IEEE_T_LCOL(base_addr);
+
+ /* I hope that one frame doesn't have more than one error */
+ fp->fecpriv_stat.rx_errors = fp->fecpriv_stat.rx_length_errors +
+ fp->fecpriv_stat.rx_crc_errors +
+ fp->fecpriv_stat.rx_frame_errors +
+ fp->fecpriv_stat.rx_over_errors +
+ fp->fecpriv_stat.rx_dropped;
+ fp->fecpriv_stat.tx_errors = fp->fecpriv_stat.tx_carrier_errors +
+ fp->fecpriv_stat.tx_fifo_errors +
+ fp->fecpriv_stat.tx_window_errors +
+ fp->fecpriv_stat.tx_aborted_errors +
+ fp->fecpriv_stat.tx_heartbeat_errors +
+ fp->fecpriv_stat.tx_dropped;
+
+ return &fp->fecpriv_stat;
+}
+
+/************************************************************************
+* NAME: fec_set_multicast_list
+*
+* DESCRIPTION: This function sets the frame filtering parameters
+*************************************************************************/
+void fec_set_multicast_list(struct net_device *dev)
+{
+ struct dev_mc_list *dmi;
+ unsigned int crc, data;
+ int i, j, k;
+ unsigned long base_addr = (unsigned long) dev->base_addr;
+
+ if (dev->flags & IFF_PROMISC || dev->flags & IFF_ALLMULTI) {
+ /* Allow all incoming frames */
+ FEC_GALR(base_addr) = 0xFFFFFFFF;
+ FEC_GAUR(base_addr) = 0xFFFFFFFF;
+ return;
+ }
+
+ /* Reset the group address register */
+ FEC_GALR(base_addr) = 0x00000000;
+ FEC_GAUR(base_addr) = 0x00000000;
+
+ /* Process all addresses */
+ for (i = 0, dmi = dev->mc_list; i < dev->mc_count;
+ i++, dmi = dmi->next) {
+ /* Processing must be only for the group addresses */
+ if (!(dmi->dmi_addr[0] & 1))
+ continue;
+
+ /* Calculate crc value for the current address */
+ crc = 0xFFFFFFFF;
+ for (j = 0; j < dmi->dmi_addrlen; j++) {
+ for (k = 0, data = dmi->dmi_addr[j];
+ k < 8; k++, data >>= 1) {
+ if ((crc ^ data) & 1)
+ crc = (crc >> 1) ^ FEC_CRCPOL;
+ else
+ crc >>= 1;
+ }
+ }
+
+ /* Add this value */
+ crc >>= 26;
+ crc &= 0x3F;
+ if (crc > 31)
+ FEC_GAUR(base_addr) |= 0x1 << (crc - 32);
+ else
+ FEC_GALR(base_addr) |= 0x1 << crc;
+ }
+}
+
+/************************************************************************
+* NAME: fec_set_mac_address
+*
+* DESCRIPTION: This function sets the MAC address
+*************************************************************************/
+int fec_set_mac_address(struct net_device *dev, void *p)
+{
+ struct fec_priv *fp = netdev_priv(dev);
+ unsigned long base_addr = (unsigned long) dev->base_addr;
+ struct sockaddr *addr = p;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ /* Copy a new address to the device structure */
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ /* Copy a new address to the private structure */
+ memcpy(fp->fecpriv_mac_addr, addr->sa_data, 6);
+
+ /* Set the address to the registers */
+ FEC_PALR(base_addr) = (fp->fecpriv_mac_addr[0] << 24) |
+ (fp->fecpriv_mac_addr[1] << 16) |
+ (fp->fecpriv_mac_addr[2] << 8) |
+ fp->fecpriv_mac_addr[3];
+ FEC_PAUR(base_addr) = (fp->fecpriv_mac_addr[4] << 24) |
+ (fp->fecpriv_mac_addr[5] << 16) |
+ 0x8808;
+
+ return 0;
+}
+
+/************************************************************************
+* NAME: fec_tx
+*
+* DESCRIPTION: This function starts transmission of the frame using DMA
+*
+* RETURNS: This function always returns zero.
+*************************************************************************/
+int fec_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct fec_priv *fp = netdev_priv(dev);
+ void *data, *data_aligned;
+ int offset;
+
+ data = kmalloc(skb->len + 15, GFP_DMA | GFP_ATOMIC);
+
+ if (!data) {
+ fp->fecpriv_stat.tx_dropped++;
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ offset = (((unsigned long)virt_to_phys(data) + 15) & 0xFFFFFFF0) -
+ (unsigned long)virt_to_phys(data);
+ data_aligned = (void *)((unsigned long)data + offset);
+ memcpy(data_aligned, skb->data, skb->len);
+
+ /* flush data cache before initializing
+ * the descriptor and starting DMA */
+
+ spin_lock_irq(&fp->fecpriv_lock);
+
+ /* Initialize the descriptor */
+ fp->fecpriv_txbuf[fp->fecpriv_next_tx] = data;
+ fp->fecpriv_txdesc[fp->fecpriv_next_tx].dataPointer
+ = (unsigned int) virt_to_phys(data_aligned);
+ fp->fecpriv_txdesc[fp->fecpriv_next_tx].length = skb->len;
+ fp->fecpriv_txdesc[fp->fecpriv_next_tx].statCtrl
+ |= (MCD_FEC_END_FRAME | MCD_FEC_BUF_READY);
+ fp->fecpriv_next_tx = (fp->fecpriv_next_tx + 1) & FEC_TX_INDEX_MASK;
+
+ if (fp->fecpriv_txbuf[fp->fecpriv_current_tx]
+ && fp->fecpriv_current_tx == fp->fecpriv_next_tx)
+ netif_stop_queue(dev);
+
+ spin_unlock_irq(&fp->fecpriv_lock);
+
+ /* Tell the DMA to continue the transmission */
+ MCD_continDma(fp->fecpriv_fec_tx_channel);
+
+ dev_kfree_skb(skb);
+
+ dev->trans_start = jiffies;
+
+ return 0;
+}
+
+/************************************************************************
+* NAME: fec_tx_timeout
+*
+* DESCRIPTION: If the interrupt processing of received frames was lost
+* and DMA stopped the reception, this function clears
+* the transmission descriptors and starts DMA
+*
+*************************************************************************/
+void fec_tx_timeout(struct net_device *dev)
+{
+ int i;
+ struct fec_priv *fp = netdev_priv(dev);
+ unsigned long base_addr = (unsigned long) dev->base_addr;
+
+ spin_lock_irq(&fp->fecpriv_lock);
+ MCD_killDma(fp->fecpriv_fec_tx_channel);
+ for (i = 0; i < FEC_TX_BUF_NUMBER; i++) {
+ if (fp->fecpriv_txbuf[i]) {
+ kfree(fp->fecpriv_txbuf[i]);
+ fp->fecpriv_txbuf[i] = NULL;
+ }
+ fp->fecpriv_txdesc[i].statCtrl = MCD_FEC_INTERRUPT;
+ }
+ fp->fecpriv_txdesc[i - 1].statCtrl |= MCD_FEC_WRAP;
+
+ fp->fecpriv_current_tx = fp->fecpriv_next_tx = 0;
+
+ /* Reset FIFOs */
+ FEC_FECFRST(base_addr) |= FEC_SW_RST;
+ FEC_FECFRST(base_addr) &= ~FEC_SW_RST;
+
+ /* Reset and disable FEC */
+ /* FEC_ECR(base_addr) = FEC_ECR_RESET; */
+
+ /* Enable FEC */
+ FEC_ECR(base_addr) |= FEC_ECR_ETHEREN;
+
+ MCD_startDma(fp->fecpriv_fec_tx_channel, (char *) fp->fecpriv_txdesc, 0,
+ (unsigned char *) &(FEC_FECTFDR(base_addr)), 0,
+ FEC_MAX_FRM_SIZE, 0, fp->fecpriv_initiator_tx,
+ FEC_TX_DMA_PRI, MCD_FECTX_DMA | MCD_INTERRUPT,
+ MCD_NO_CSUM | MCD_NO_BYTE_SWAP);
+
+ spin_unlock_irq(&fp->fecpriv_lock);
+
+ netif_wake_queue(dev);
+
+}
+
+/************************************************************************
+* NAME: fec_interrupt_tx_handler
+*
+* DESCRIPTION: This function is called when the data
+* transmission from the buffer to the FEC is completed.
+*
+*************************************************************************/
+void fec_interrupt_fec_tx_handler(struct net_device *dev)
+{
+ struct fec_priv *fp = netdev_priv(dev);
+
+ /* Release the socket buffer */
+ if (fp->fecpriv_txbuf[fp->fecpriv_current_tx]) {
+ kfree(fp->fecpriv_txbuf[fp->fecpriv_current_tx]);
+ fp->fecpriv_txbuf[fp->fecpriv_current_tx] = NULL;
+ }
+ fp->fecpriv_current_tx =
+ (fp->fecpriv_current_tx + 1) & FEC_TX_INDEX_MASK;
+
+ if (MCD_dmaStatus(fp->fecpriv_fec_tx_channel) == MCD_DONE) {
+ for (; fp->fecpriv_current_tx != fp->fecpriv_next_tx;
+ fp->fecpriv_current_tx =
+ (fp->fecpriv_current_tx + 1)
+ & FEC_TX_INDEX_MASK) {
+ if (fp->fecpriv_txbuf[fp->fecpriv_current_tx]) {
+ kfree(fp->fecpriv_txbuf[
+ fp->fecpriv_current_tx]);
+ fp->fecpriv_txbuf[fp->fecpriv_current_tx]
+ = NULL;
+ }
+ }
+ }
+
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+}
+
+/************************************************************************
+* NAME: fec_interrupt_rx_handler
+*
+* DESCRIPTION: This function is called when the data
+* reception from the FEC to the reception buffer is completed.
+*
+*************************************************************************/
+void fec_interrupt_fec_rx_handler(struct net_device *dev)
+{
+ struct fec_priv *fp = netdev_priv(dev);
+ struct sk_buff *skb;
+ int i;
+
+ fp->fecpriv_rxflag = 1;
+ /* Some buffers can be missed */
+ if (!(fp->fecpriv_rxdesc[fp->fecpriv_current_rx].statCtrl
+ & MCD_FEC_END_FRAME)) {
+ /* Find a valid index */
+ for (i = 0; ((i < FEC_RX_BUF_NUMBER) &&
+ !(fp->fecpriv_rxdesc[
+ fp->fecpriv_current_rx].statCtrl
+ & MCD_FEC_END_FRAME)); i++,
+ (fp->fecpriv_current_rx =
+ (fp->fecpriv_current_rx + 1)
+ & FEC_RX_INDEX_MASK))
+ ;
+
+ if (i == FEC_RX_BUF_NUMBER) {
+ /* There are no data to process */
+ /* Tell the DMA to continue the reception */
+ MCD_continDma(fp->fecpriv_fec_rx_channel);
+
+ fp->fecpriv_rxflag = 0;
+
+ return;
+ }
+ }
+
+ for (; fp->fecpriv_rxdesc[fp->fecpriv_current_rx].statCtrl
+ & MCD_FEC_END_FRAME;
+ fp->fecpriv_current_rx = (fp->fecpriv_current_rx + 1)
+ & FEC_RX_INDEX_MASK) {
+ if ((fp->fecpriv_rxdesc[fp->fecpriv_current_rx].length
+ <= FEC_MAXBUF_SIZE) &&
+ (fp->fecpriv_rxdesc[fp->fecpriv_current_rx].length
+ > 4)) {
+ /* --tym-- */
+ skb = fp->askb_rx[fp->fecpriv_current_rx];
+ if (!skb)
+ fp->fecpriv_stat.rx_dropped++;
+ else {
+ /*
+ * flush data cache before initializing
+ * the descriptor and starting DMA
+ */
+ skb_put(skb,
+ (fp->fecpriv_rxdesc[
+ fp->fecpriv_current_rx].length - 4));
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ }
+ fp->fecpriv_rxdesc[fp->fecpriv_current_rx].statCtrl &=
+ ~MCD_FEC_END_FRAME;
+ /* allocate new skbuff */
+ fp->askb_rx[fp->fecpriv_current_rx] =
+ alloc_skb(FEC_MAXBUF_SIZE + 16,
+ /*GFP_ATOMIC |*/ GFP_DMA);
+ if (!fp->askb_rx[fp->fecpriv_current_rx]) {
+ fp->fecpriv_rxdesc[
+ fp->fecpriv_current_rx].dataPointer
+ = 0;
+ fp->fecpriv_rxdesc[
+ fp->fecpriv_current_rx].length = 0;
+ fp->fecpriv_stat.rx_dropped++;
+ } else {
+ skb_reserve(
+ fp->askb_rx[fp->fecpriv_current_rx], 16);
+ fp->askb_rx[fp->fecpriv_current_rx]->dev = dev;
+
+ /*
+ * flush data cache before initializing
+ * the descriptor and starting DMA
+ */
+
+ fp->fecpriv_rxdesc[
+ fp->fecpriv_current_rx].dataPointer =
+ (unsigned int) virt_to_phys(
+ fp->askb_rx[
+ fp->fecpriv_current_rx]->tail);
+ fp->fecpriv_rxdesc[
+ fp->fecpriv_current_rx].length =
+ FEC_MAXBUF_SIZE;
+ fp->fecpriv_rxdesc[
+ fp->fecpriv_current_rx].statCtrl |=
+ MCD_FEC_BUF_READY;
+
+ /*
+ * flush data cache before initializing
+ * the descriptor and starting DMA
+ */
+ }
+ }
+
+ }
+
+ /* Tell the DMA to continue the reception */
+ MCD_continDma(fp->fecpriv_fec_rx_channel);
+
+ fp->fecpriv_rxflag = 0;
+}
+
+/************************************************************************
+* NAME: fec_interrupt_handler
+*
+* DESCRIPTION: This function is called when some special errors occur
+*
+*************************************************************************/
+irqreturn_t fec_interrupt_handler(int irq, void *dev_id)
+{
+
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct fec_priv *fp = netdev_priv(dev);
+ unsigned long base_addr = (unsigned long) dev->base_addr;
+ unsigned long events;
+
+ /* Read and clear the events */
+ events = FEC_EIR(base_addr) & FEC_EIMR(base_addr);
+
+ if (events & FEC_EIR_HBERR) {
+ fp->fecpriv_stat.tx_heartbeat_errors++;
+ FEC_EIR(base_addr) = FEC_EIR_HBERR;
+ }
+
+ /* receive/transmit FIFO error */
+ if (((events & FEC_EIR_RFERR) != 0)
+ || ((events & FEC_EIR_XFERR) != 0)) {
+ /* kill DMA receive channel */
+ MCD_killDma(fp->fecpriv_fec_rx_channel);
+
+ /* kill running transmission by DMA */
+ MCD_killDma(fp->fecpriv_fec_tx_channel);
+
+ /* Reset FIFOs */
+ FEC_FECFRST(base_addr) |= FEC_SW_RST;
+ FEC_FECFRST(base_addr) &= ~FEC_SW_RST;
+
+ /* reset receive FIFO status register */
+ FEC_FECRFSR(base_addr) = FEC_FECRFSR_FAE |
+ FEC_FECRFSR_RXW |
+ FEC_FECRFSR_UF;
+
+ /* reset transmit FIFO status register */
+ FEC_FECTFSR(base_addr) = FEC_FECTFSR_FAE |
+ FEC_FECTFSR_TXW |
+ FEC_FECTFSR_UF |
+ FEC_FECTFSR_OF;
+
+ /* reset RFERR and XFERR event */
+ FEC_EIR(base_addr) = FEC_EIR_RFERR | FEC_EIR_XFERR;
+
+ /* stop queue */
+ netif_stop_queue(dev);
+
+ /* execute reinitialization as tasklet */
+ tasklet_schedule(&fp->fecpriv_tasklet_reinit);
+
+ fp->fecpriv_stat.rx_dropped++;
+ }
+
+ /* transmit FIFO underrun */
+ if ((events & FEC_EIR_XFUN) != 0) {
+ /* reset XFUN event */
+ FEC_EIR(base_addr) = FEC_EIR_XFUN;
+ fp->fecpriv_stat.tx_aborted_errors++;
+ }
+
+ /* late collision */
+ if ((events & FEC_EIR_LC) != 0) {
+ /* reset LC event */
+ FEC_EIR(base_addr) = FEC_EIR_LC;
+ fp->fecpriv_stat.tx_aborted_errors++;
+ }
+
+ /* collision retry limit */
+ if ((events & FEC_EIR_RL) != 0) {
+ /* reset RL event */
+ FEC_EIR(base_addr) = FEC_EIR_RL;
+ fp->fecpriv_stat.tx_aborted_errors++;
+ }
+ return 0;
+}
+
+/************************************************************************
+* NAME: fec_interrupt_reinit
+*
+* DESCRIPTION: This function is called from interrupt handler
+* when controller must be reinitialized.
+*
+*************************************************************************/
+void fec_interrupt_fec_reinit(unsigned long data)
+{
+ int i;
+ struct net_device *dev = (struct net_device *)data;
+ struct fec_priv *fp = netdev_priv(dev);
+ unsigned long base_addr = (unsigned long) dev->base_addr;
+
+ /* Initialize reception descriptors and start DMA for the reception */
+ for (i = 0; i < FEC_RX_BUF_NUMBER; i++) {
+ if (!fp->askb_rx[i]) {
+ fp->askb_rx[i] = alloc_skb(FEC_MAXBUF_SIZE + 16,
+ GFP_ATOMIC | GFP_DMA);
+ if (!fp->askb_rx[i]) {
+ fp->fecpriv_rxdesc[i].dataPointer = 0;
+ fp->fecpriv_rxdesc[i].statCtrl = 0;
+ fp->fecpriv_rxdesc[i].length = 0;
+ continue;
+ }
+ fp->askb_rx[i]->dev = dev;
+ skb_reserve(fp->askb_rx[i], 16);
+ }
+ fp->fecpriv_rxdesc[i].dataPointer =
+ (unsigned int) virt_to_phys(fp->askb_rx[i]->tail);
+ fp->fecpriv_rxdesc[i].statCtrl =
+ MCD_FEC_BUF_READY | MCD_FEC_INTERRUPT;
+ fp->fecpriv_rxdesc[i].length = FEC_MAXBUF_SIZE;
+ }
+
+ fp->fecpriv_rxdesc[i - 1].statCtrl |= MCD_FEC_WRAP;
+ fp->fecpriv_current_rx = 0;
+
+ /* restart frame transmission */
+ for (i = 0; i < FEC_TX_BUF_NUMBER; i++) {
+ if (fp->fecpriv_txbuf[i]) {
+ kfree(fp->fecpriv_txbuf[i]);
+ fp->fecpriv_txbuf[i] = NULL;
+ fp->fecpriv_stat.tx_dropped++;
+ }
+ fp->fecpriv_txdesc[i].statCtrl = MCD_FEC_INTERRUPT;
+ }
+ fp->fecpriv_txdesc[i - 1].statCtrl |= MCD_FEC_WRAP;
+ fp->fecpriv_current_tx = fp->fecpriv_next_tx = 0;
+
+ /* flush entire data cache before restarting the DMA */
+
+ /* restart DMA from beginning */
+ MCD_startDma(fp->fecpriv_fec_rx_channel,
+ (char *) fp->fecpriv_rxdesc, 0,
+ (unsigned char *) &(FEC_FECRFDR(base_addr)), 0,
+ FEC_MAX_FRM_SIZE, 0, fp->fecpriv_initiator_rx,
+ FEC_RX_DMA_PRI, MCD_FECRX_DMA | MCD_INTERRUPT,
+ MCD_NO_CSUM | MCD_NO_BYTE_SWAP);
+
+ MCD_startDma(fp->fecpriv_fec_tx_channel, (char *) fp->fecpriv_txdesc, 0,
+ (unsigned char *) &(FEC_FECTFDR(base_addr)), 0,
+ FEC_MAX_FRM_SIZE, 0, fp->fecpriv_initiator_tx,
+ FEC_TX_DMA_PRI, MCD_FECTX_DMA | MCD_INTERRUPT,
+ MCD_NO_CSUM | MCD_NO_BYTE_SWAP);
+
+ /* Enable FEC */
+ FEC_ECR(base_addr) |= FEC_ECR_ETHEREN;
+
+ netif_wake_queue(dev);
+}
+
+/************************************************************************
+* NAME: fec_interrupt_tx_handler_fec0
+*
+* DESCRIPTION: This is the DMA interrupt handler using for FEC0
+* transmission.
+*
+*************************************************************************/
+void fec_interrupt_fec_tx_handler_fec0(void)
+{
+ fec_interrupt_fec_tx_handler(fec_dev[0]);
+}
+
+#ifdef FEC_2
+/************************************************************************
+* NAME: fec_interrupt_tx_handler_fec1
+*
+* DESCRIPTION: This is the DMA interrupt handler using for the FEC1
+* transmission.
+*
+*************************************************************************/
+void fec_interrupt_fec_tx_handler_fec1(void)
+{
+ fec_interrupt_fec_tx_handler(fec_dev[1]);
+}
+#endif
+
+/************************************************************************
+* NAME: fec_interrupt_rx_handler_fec0
+*
+* DESCRIPTION: This is the DMA interrupt handler using for the FEC0
+* reception.
+*
+*************************************************************************/
+void fec_interrupt_fec_rx_handler_fec0(void)
+{
+ fec_interrupt_fec_rx_handler(fec_dev[0]);
+}
+
+#ifdef FEC_2
+/************************************************************************
+* NAME: fec_interrupt_rx_handler_fec1
+*
+* DESCRIPTION: This is the DMA interrupt handler using for the FEC1
+* reception.
+*
+*************************************************************************/
+void fec_interrupt_fec_rx_handler_fec1(void)
+{
+ fec_interrupt_fec_rx_handler(fec_dev[1]);
+}
+
+#endif
+
+#ifndef MODULE
+/************************************************************************
+* NAME: fec_mac_setup0
+*
+* DESCRIPTION: This function sets the MAC address of FEC0 from command line
+*
+*************************************************************************/
+int __init fec_mac_setup0(char *s)
+{
+ if (!s || !*s)
+ return 1;
+
+ if (fec_str_to_mac(s, fec_mac_addr_fec0))
+ printk(KERN_ERR "The MAC address of FEC0 "
+ "cannot be set from command line");
+ return 1;
+}
+
+#ifdef FEC_2
+
+/************************************************************************
+* NAME: fec_mac_setup1
+*
+* DESCRIPTION: This function sets the MAC address of FEC1 from command line
+*
+*************************************************************************/
+int __init fec_mac_setup1(char *s)
+{
+ if (!s || !*s)
+ return 1;
+
+ if (fec_str_to_mac(s, fec_mac_addr_fec1))
+ printk(KERN_ERR "The MAC address of FEC1 "
+ "cannot be set from command line\n");
+ return 1;
+}
+#endif
+
+/************************************************************************
+* NAME: fec_str_to_mac
+*
+* DESCRIPTION: This function interprets the character string into MAC addr
+*
+*************************************************************************/
+int fec_str_to_mac(char *str_mac, unsigned char* addr)
+{
+ unsigned long val;
+ char c;
+ unsigned long octet[6], *octetptr = octet;
+ int i;
+
+again:
+ val = 0;
+ while ((c = *str_mac) != '\0') {
+ if ((c >= '0') && (c <= '9')) {
+ val = (val * 16) + (c - '0');
+ str_mac++;
+ continue;
+ } else if (((c >= 'a') && (c <= 'f'))
+ || ((c >= 'A') && (c <= 'F'))) {
+ val = (val << 4) +
+ (c + 10 -
+ (((c >= 'a') && (c <= 'f')) ? 'a' : 'A'));
+ str_mac++;
+ continue;
+ }
+ break;
+ }
+ if (*str_mac == ':') {
+ *octetptr++ = val, str_mac++;
+ if (octetptr >= octet + 6)
+ return 1;
+ goto again;
+ }
+
+ /* Check for trailing characters */
+ if (*str_mac && !(*str_mac == ' '))
+ return 1;
+
+ *octetptr++ = val;
+
+ if ((octetptr - octet) == 6) {
+ for (i = 0; i <= 6; i++)
+ addr[i] = octet[i];
+ } else
+ return 1;
+
+ return 0;
+}
+#endif
--- /dev/null
+
+#define FEC_BASE_ADDR_FEC0 ((unsigned int)MCF_MBAR + 0x9000)
+#define FEC_BASE_ADDR_FEC1 ((unsigned int)MCF_MBAR + 0x9800)
+
+/*
+#define FEC_INTC_IMRH_INT_MASK38 (0x00000040)
+#define FEC_INTC_IMRH_INT_MASK39 (0x00000080)
+#define FEC_INTC_ICR_FEC0 (0x30)
+#define FEC_INTC_ICR_FEC1 (0x31)
+*/
+#define FEC_FECI2CIRQ (0xFFC0)
+#define FEC_GPIO_PAR_FECI2CIRQ \
+ (*(volatile unsigned short *)((unsigned int)MCF_MBAR + 0xA44))
+/*
+#define FEC_INTC_ICRn(x) \
+(*(volatile unsigned char *)(void*)
+((unsigned int) MCF_MBAR + 0x000740+((x)*0x001)))
+#define FEC_INTC_IMRH \
+ *(volatile unsigned int*)((unsigned int)MCF_MBAR + 0x000708)
+*/
+#define FEC_ECR_DISABLE (0x00000000)
+
+#define FEC_ECR(x) \
+ (*(volatile unsigned int *)(x + 0x024))
+#define FEC_EIR(x) \
+ (*(volatile unsigned int *)(x + 0x004))
+#define FEC_PALR(x) \
+ (*(volatile unsigned int *)(x + 0x0E4))
+#define FEC_PAUR(x) \
+ (*(volatile unsigned int *)(x + 0x0E8))
+#define FEC_IALR(x) \
+ (*(volatile unsigned int *)(x + 0x11C))
+#define FEC_IAUR(x) \
+ (*(volatile unsigned int *)(x + 0x118))
+#define FEC_GALR(x) \
+ (*(volatile unsigned int *)(x + 0x124))
+#define FEC_GAUR(x) \
+ (*(volatile unsigned int *)(x + 0x120))
+#define FEC_RCR(x) \
+ (*(volatile unsigned int *)(x + 0x084))
+#define FEC_FECRFCR(x) \
+ (*(volatile unsigned int *)(x + 0x18C))
+#define FEC_FECRFAR(x) \
+ (*(volatile unsigned int *)(x + 0x198))
+#define FEC_FECTFCR(x) \
+ (*(volatile unsigned int *)(x + 0x1AC))
+#define FEC_FECTFAR(x) \
+ (*(volatile unsigned int *)(x + 0x1B8))
+#define FEC_FECTFWR(x) \
+ (*(volatile unsigned int *)(x + 0x144))
+#define FEC_CTCWR(x) \
+ (*(volatile unsigned int *)(x + 0x1C8))
+#define FEC_EIMR(x) \
+ (*(volatile unsigned int *)(x + 0x008))
+#define FEC_TCR(x) \
+ (*(volatile unsigned int *)(x + 0x0C4))
+#define FEC_MIBC(x) \
+ (*(volatile unsigned int *)(x + 0x064))
+#define FEC_MSCR(x) \
+ (*(volatile unsigned int *)(x + 0x044))
+#define FEC_FECTFDR(x) \
+ (*(volatile unsigned int *)(x + 0x1A4))
+#define FEC_FECRFDR(x) \
+ (*(volatile unsigned int *)(x + 0x184))
+#define FEC_FECTFSR(x) \
+ (*(volatile unsigned int *)(x + 0x1A8))
+#define FEC_FECRFSR(x) \
+ (*(volatile unsigned int *)(x + 0x188))
+#define FECSTAT_RMON_R_PACKETS(x) \
+ (*(volatile unsigned int *)(x + 0x284))
+#define FECSTAT_RMON_T_PACKETS(x) \
+ (*(volatile unsigned int *)(x + 0x204))
+#define FECSTAT_RMON_R_OCTETS(x) \
+ (*(volatile unsigned int *)(x + 0x2C4))
+#define FECSTAT_RMON_T_OCTETS(x) \
+ (*(volatile unsigned int *)(x + 0x244))
+#define FECSTAT_RMON_R_UNDERSIZE(x) \
+ (*(volatile unsigned int *)(x + 0x294))
+#define FECSTAT_RMON_R_OVERSIZE(x) \
+ (*(volatile unsigned int *)(x + 0x298))
+#define FECSTAT_RMON_R_FRAG(x) \
+ (*(volatile unsigned int *)(x + 0x29C))
+#define FECSTAT_RMON_R_JAB(x) \
+ (*(volatile unsigned int *)(x + 0x2A0))
+#define FECSTAT_RMON_R_MC_PKT(x) \
+ (*(volatile unsigned int *)(x + 0x28C))
+#define FECSTAT_RMON_T_COL(x) \
+ (*(volatile unsigned int *)(x + 0x224))
+#define FECSTAT_IEEE_R_ALIGN(x) \
+ (*(volatile unsigned int *)(x + 0x2D4))
+#define FECSTAT_IEEE_R_CRC(x) \
+ (*(volatile unsigned int *)(x + 0x2D0))
+#define FECSTAT_IEEE_R_MACERR(x) \
+ (*(volatile unsigned int *)(x + 0x2D8))
+#define FECSTAT_IEEE_T_CSERR(x) \
+ (*(volatile unsigned int *)(x + 0x268))
+#define FECSTAT_IEEE_T_MACERR(x) \
+ (*(volatile unsigned int *)(x + 0x264))
+#define FECSTAT_IEEE_T_LCOL(x) \
+ (*(volatile unsigned int *)(x + 0x25C))
+#define FECSTAT_IEEE_R_OCTETS_OK(x) \
+ (*(volatile unsigned int *)(x + 0x2E0))
+#define FECSTAT_IEEE_T_OCTETS_OK(x) \
+ (*(volatile unsigned int *)(x + 0x274))
+#define FECSTAT_IEEE_R_DROP(x) \
+ (*(volatile unsigned int *)(x + 0x2C8))
+#define FECSTAT_IEEE_T_DROP(x) \
+ (*(volatile unsigned int *)(x + 0x248))
+#define FECSTAT_IEEE_R_FRAME_OK(x) \
+ (*(volatile unsigned int *)(x + 0x2CC))
+#define FECSTAT_IEEE_T_FRAME_OK(x) \
+ (*(volatile unsigned int *)(x + 0x24C))
+#define FEC_MMFR(x) \
+ (*(volatile unsigned int *)(x + 0x040))
+#define FEC_FECFRST(x) \
+ (*(volatile unsigned int *)(x + 0x1C4))
+
+#define FEC_MAX_FRM_SIZE (1518)
+#define FEC_MAXBUF_SIZE (1520)
+
+/* Register values */
+#define FEC_ECR_RESET (0x00000001)
+#define FEC_EIR_CLEAR (0xFFFFFFFF)
+#define FEC_EIR_RL (0x00100000)
+#define FEC_EIR_HBERR (0x80000000)
+#define FEC_EIR_BABR (0x40000000)
+/* babbling receive error */
+#define FEC_EIR_BABT (0x20000000)
+/* babbling transmit error */
+#define FEC_EIR_TXF (0x08000000)
+/* transmit frame interrupt */
+#define FEC_EIR_MII (0x00800000)
+/* MII interrupt */
+#define FEC_EIR_LC (0x00200000)
+/* late collision */
+#define FEC_EIR_XFUN (0x00080000)
+/* transmit FIFO underrun */
+#define FEC_EIR_XFERR (0x00040000)
+/* transmit FIFO error */
+#define FEC_EIR_RFERR (0x00020000)
+/* receive FIFO error */
+#define FEC_RCR_MAX_FRM_SIZE (FEC_MAX_FRM_SIZE << 16)
+#define FEC_RCR_MII (0x00000004)
+#define FEC_FECRFCR_FAE (0x00400000)
+/* frame accept error */
+#define FEC_FECRFCR_RXW (0x00200000)
+/* receive wait condition */
+#define FEC_FECRFCR_UF (0x00100000)
+/* receive FIFO underflow */
+#define FEC_FECRFCR_FRM (0x08000000)
+#define FEC_FECRFCR_GR (0x7 << 24)
+
+#define FEC_EIMR_DISABLE (0x00000000)
+
+#define FEC_FECRFAR_ALARM (0x300)
+#define FEC_FECTFCR_FRM (0x08000000)
+#define FEC_FECTFCR_GR (0x7 << 24)
+#define FEC_FECTFCR_FAE (0x00400000)
+/* frame accept error */
+#define FEC_FECTFCR_TXW (0x00040000)
+/* transmit wait condition */
+#define FEC_FECTFCR_UF (0x00100000)
+/* transmit FIFO underflow */
+#define FEC_FECTFCR_OF (0x00080000)
+/* transmit FIFO overflow */
+
+#define FEC_FECTFAR_ALARM (0x100)
+#define FEC_FECTFWR_XWMRK (0x00000000)
+
+#define FEC_FECTFSR_MSK (0xC0B00000)
+#define FEC_FECTFSR_TXW (0x40000000)
+/* transmit wait condition */
+#define FEC_FECTFSR_FAE (0x00800000)
+/* frame accept error */
+#define FEC_FECTFSR_UF (0x00200000)
+/* transmit FIFO underflow */
+#define FEC_FECTFSR_OF (0x00100000)
+/* transmit FIFO overflow */
+
+#define FEC_FECRFSR_MSK (0x80F00000)
+#define FEC_FECRFSR_FAE (0x00800000)
+/* frame accept error */
+#define FEC_FECRFSR_RXW (0x00400000)
+/* receive wait condition */
+#define FEC_FECRFSR_UF (0x00200000)
+/* receive FIFO underflow */
+
+#define FEC_CTCWR_TFCW_CRC (0x03000000)
+#define FEC_TCR_FDEN (0x00000004)
+#define FEC_TCR_HBC (0x00000002)
+#define FEC_RCR_DRT (0x00000002)
+#define FEC_EIMR_MASK (FEC_EIR_RL | FEC_EIR_HBERR)
+#define FEC_ECR_ETHEREN (0x00000002)
+#define FEC_FECTFCR_MSK (0x00FC0000)
+#define FEC_FECRFCR_MSK (0x00F80000)
+#define FEC_EIR_GRA (0x10000000)
+#define FEC_TCR_GTS (0x00000001)
+#define FEC_MIBC_ENABLE (0x00000000)
+#define FEC_MIB_LEN (228)
+#define FEC_PHY_ADDR (0x01)
+
+#define FEC_RX_DMA_PRI (6)
+#define FEC_TX_DMA_PRI (6)
+
+#define FEC_TX_BUF_NUMBER (8)
+#define FEC_RX_BUF_NUMBER (64)
+
+#define FEC_TX_INDEX_MASK (0x7)
+#define FEC_RX_INDEX_MASK (0x3f)
+
+#define FEC_RX_DESC_FEC0 SYS_SRAM_FEC_START
+#define FEC_TX_DESC_FEC0 \
+ (FEC_RX_DESC_FEC0 + FEC_RX_BUF_NUMBER * sizeof(MCD_bufDescFec))
+
+#define FEC_RX_DESC_FEC1 \
+ (SYS_SRAM_FEC_START + SYS_SRAM_FEC_SIZE/2)
+#define FEC_TX_DESC_FEC1 \
+ (FEC_RX_DESC_FEC1 + FEC_RX_BUF_NUMBER * sizeof(MCD_bufDescFec))
+
+#define FEC_EIR_MII (0x00800000)
+#define FEC_MMFR_READ (0x60020000)
+#define FEC_MMFR_WRITE (0x50020000)
+
+#define FEC_FLAGS_RX (0x00000001)
+
+#define FEC_CRCPOL (0xEDB88320)
+
+#define FEC_MII_TIMEOUT (2)
+#define FEC_GR_TIMEOUT (1)
+#define FEC_TX_TIMEOUT (1)
+#define FEC_RX_TIMEOUT (1)
+
+#define FEC_SW_RST 0x2000000
+#define FEC_RST_CTL 0x1000000
+
+int fec_read_mii(unsigned int base_addr, unsigned int pa, unsigned int ra,
+ unsigned int *data);
+int fec_write_mii(unsigned int base_addr, unsigned int pa, unsigned int ra,
+ unsigned int data);
+
+#define FEC_MII_SPEED \
+ ((MCF_CLK / 2) / ((2500000 / 2) * 2))
--- /dev/null
+/*
+ *Copyright (C) 2009 Freescale Semiconductor, Inc. All rights reserved.
+ * Chenghu Wu <b16972@freescale.com>
+ *
+ * Driver for broadcom PHYs 522x
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+#include <linux/netdevice.h>
+
+/* DP83865 phy identifier values */
+#define BCM5222_PHY_ID 0x00406320
+
+/* PHY Register */
+#define BCM5222_TIMEOUT 0x100
+
+/* MII Registers */
+#define BCM5222_CTRL 0x00
+#define BCM5222_STATUS 0x01
+#define BCM5222_ID_HIGH 0x02
+#define BCM5222_ID_LOW 0x03
+#define BCM5222_AN_ADV 0x04
+#define BCM5222_AN_LP 0x05
+#define BCM5222_AN_EXP 0x06
+#define BCM5222_AN_NEXTPG 0x07
+#define BCM5222_AN_LP_NPTX 0x08
+#define BCM5222_AUX_CS 0x18
+#define BCM5222_AUX_STATUS 0x19
+
+/* CONTROL Bits */
+#define BCM5222_CTRL_RESET 0x8000
+#define BCM5222_CTRL_LOOPBACK 0x4000
+#define BCM5222_CTRL_FORCE 0x2000
+#define BCM5222_CTRL_AUTOEN 0x1000
+#define BCM5222_CTRL_PWRDN 0x0800
+#define BCM5222_CTRL_ISOLATE 0x0400
+#define BCM5222_CTRL_RESTART 0x0200
+#define BCM5222_CTRL_DUPLEX 0x0100
+#define BCM5222_CTRL_COLLEN 0x0080
+
+/* STATUS Bits */
+#define BCM5222_STATUS_100T4 0x8000
+#define BCM5222_STATUS_100TXFDX 0x4000
+#define BCM5222_STATUS_100TX 0x2000
+#define BCM5222_STATUS_10FDX 0x1000
+#define BCM5222_STATUS_10 0x0800
+#define BCM5222_STATUS_MF_PREAMBLE 0x0040
+#define BCM5222_STATUS_AN_COMPLETE 0x0020
+#define BCM5222_STATUS_REMOTE_FAULT 0x0010
+#define BCM5222_STATUS_AN_CAPABLE 0x0008
+#define BCM5222_STATUS_LINK 0x0004
+#define BCM5222_STATUS_JABBER 0x0002
+#define BCM5222_STATUS_EXT_CAP 0x0001
+
+/* ID Values */
+#define BCM5222_ID_HIGH_VAL 0x0040
+#define BCM5222_ID_LOW_VAL 0x6320
+
+/* Advertise Bits */
+#define BCM5222_AN_ADV_NEXTPG 0x8000
+#define BCM5222_AN_ADV_REMOTE_FAULT 0x2000
+#define BCM5222_AN_ADV_PAUSE 0x0400
+#define BCM5222_AN_ADV_100T4 0x0200
+#define BCM5222_AN_ADV_100TXFDX 0x0100
+#define BCM5222_AN_ADV_100TX 0x0080
+#define BCM5222_AN_ADV_10FDX 0x0040
+#define BCM5222_AN_ADV_10 0x0020
+#define BCM5222_AN_ADV_8023 0x0001
+#define BCM5222_AN_ADV_ALL \
+ (BCM5222_AN_ADV_100TXFDX | \
+ BCM5222_AN_ADV_100TXFDX | \
+ BCM5222_AN_ADV_100TX | \
+ BCM5222_AN_ADV_10FDX | \
+ BCM5222_AN_ADV_10 | \
+ BCM5222_AN_ADV_8023)
+
+/* AUX CTRL/STATUS Bits */
+#define BCM5222_AUX_CS_JABBER_DIS 0x8000
+#define BCM5222_AUX_CS_FORCE_LINK 0x4000
+#define BCM5222_AUX_CS_10M_TX_PWR 0x0100
+#define BCM5222_AUX_CS_HSQ_LSQ_MASK 0x00c0
+#define BCM5222_AUX_CS_EDGE_RATE_MASK 0x0030
+#define BCM5222_AUX_CS_AN_IND 0x0008
+#define BCM5222_AUX_CS_SPEED_FORCE 0x0004
+#define BCM5222_AUX_CS_SPEED 0x0002
+#define BCM5222_AUX_CS_DUPLEX 0x0001
+
+/* AUX STATUS Bits */
+#define BCM5222_AUX_STATUS_AN_COMP 0x8000
+#define BCM5222_AUX_STATUS_AN_COMPACK 0x4000
+#define BCM5222_AUX_STATUS_AN_ACKDET 0x2000
+#define BCM5222_AUX_STATUS_AN_ABDET 0x1000
+#define BCM5222_AUX_STATUS_AN_PAUSE 0x0800
+#define BCM5222_AUX_STATUS_AN_HCDMASK 0x0700
+#define BCM5222_AUX_STATUS_AN_PDFAULT 0x0080
+#define BCM5222_AUX_STATUS_LP_RMTFAULT 0x0040
+#define BCM5222_AUX_STATUS_LP_PGRX 0x0020
+#define BCM5222_AUX_STATUS_LP_NEGABLE 0x0010
+#define BCM5222_AUX_STATUS_SPEED 0x0008
+#define BCM5222_AUX_STATUS_LINK 0x0004
+#define BCM5222_AUX_STATUS_AN_EN 0x0002
+#define BCM5222_AUX_STATUS_JABBER 0x0001
+
+static int bcm5222_config_intr(struct phy_device *phydev)
+{
+ int err = 0;
+ printk(KERN_INFO "%s PHY_INTERRUPT %x\n",
+ __func__, phydev->interrupts);
+
+ return err;
+}
+
+static int bcm5222_ack_interrupt(struct phy_device *phydev)
+{
+ return 0;
+}
+
+static int bcm5222_config_init(struct phy_device *phydev)
+{
+ return bcm5222_ack_interrupt(phydev);
+}
+
+static int bcm5222_config_init_old(struct phy_device *phydev)
+{
+ int timeout;
+ int flag = 1;
+ int ret = phy_read(phydev, BCM5222_AUX_STATUS);
+ if (ret < 0) {
+ printk(KERN_INFO "%s MII_BCM5222_ISR %x\n",
+ __func__, ret);
+ }
+ /*
+ * reset
+ */
+ phy_write(phydev, BCM5222_CTRL, BCM5222_CTRL_RESET);
+
+ /* check that it cleared */
+ ret = phy_read(phydev, BCM5222_CTRL);
+ printk(KERN_INFO "%s BCM5222_CTRL %x\n",
+ __func__, ret);
+ /*if reset bit is set, return */
+ if (ret & BCM5222_CTRL_RESET) {
+ printk(KERN_ERR "%s %x = BCM5222_CTRL_RESET(%x)\n",
+ __func__, ret, BCM5222_CTRL_RESET);
+ return -ETIME;
+ }
+
+ /*
+ * setup auto-negotiation
+ */
+
+ /* disable */
+ phy_write(phydev, BCM5222_CTRL, 0);
+ ret = phy_read(phydev, BCM5222_CTRL);
+ printk(KERN_INFO "%s BCM5222_CTRL %x\n",
+ __func__, ret);
+ /* set the auto-negotiation advertisement register */
+ phy_write(phydev, BCM5222_AN_ADV, BCM5222_AN_ADV_ALL);
+ ret = phy_read(phydev, BCM5222_AN_ADV);
+ printk(KERN_INFO "%s BCM5222_AN_ADV %x, BCM5222_AN_ADV_ALL %x\n",
+ __func__, ret, BCM5222_AN_ADV_ALL);
+ /* enable */
+ phy_write(phydev, BCM5222_CTRL, BCM5222_CTRL_AUTOEN);
+ ret = phy_read(phydev, BCM5222_CTRL);
+ printk(KERN_INFO "%s BCM5222_CTRL %x\n",
+ __func__, ret);
+ printk(KERN_INFO "** wait for complete\n");
+
+ /* read aux status reg */
+ ret = phy_read(phydev, BCM5222_AUX_STATUS);
+ /* Wait for the auto-negotiation completion */
+ timeout = BCM5222_TIMEOUT;
+ while (!(ret & BCM5222_AUX_STATUS_AN_COMP)) {
+ if (!timeout--) {
+ flag = 0;
+ printk(KERN_INFO "BCM5222: TIMEOUT\n");
+ break;
+ }
+
+ mdelay(10);
+ /* Read PHY status register */
+ ret = phy_read(phydev, BCM5222_AUX_STATUS);
+ }
+
+ ret = phy_read(phydev, BCM5222_AUX_STATUS);
+ ret = phy_read(phydev, BCM5222_AN_ADV);
+ return 0;
+}
+
+static int bcm5222_read_status(struct phy_device *phydev)
+{
+ int ret;
+ ret = phy_read(phydev, BCM5222_AUX_STATUS);
+ printk(KERN_INFO "%s ret %x\n", __func__, ret);
+
+ if (ret & BCM5222_AUX_STATUS_LINK)
+ phydev->link = 1;
+ else
+ phydev->link = 0;
+
+ if (ret & BCM5222_AUX_STATUS_SPEED)
+ phydev->speed = SPEED_100;
+ else
+ phydev->speed = SPEED_10;
+
+ ret = phy_read(phydev, BCM5222_AUX_CS);
+ printk(KERN_INFO "%s ret %x\n", __func__, ret);
+ if (ret & BCM5222_AUX_CS_DUPLEX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+ return 0;
+}
+
+static int bcm5222_config_aneg(struct phy_device *phydev)
+{
+ phy_read(phydev, BCM5222_AUX_STATUS);
+ phy_read(phydev, BCM5222_AN_ADV);
+ return 0;
+}
+
+static struct phy_driver bcm5222_driver = {
+ .phy_id = BCM5222_PHY_ID,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM5222",
+ .features = PHY_BASIC_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = bcm5222_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = bcm5222_ack_interrupt,
+ .config_intr = bcm5222_config_intr,
+ .driver = {.owner = THIS_MODULE,}
+};
+
+static int __init bcm5222_init(void)
+{
+ int ret;
+
+ ret = phy_driver_register(&bcm5222_driver);
+ if (ret)
+ goto err1;
+
+ return 0;
+err1:
+ printk(KERN_INFO "register bcm5222 PHY driver fail\n");
+ return ret;
+}
+
+static void __exit bcm5222_exit(void)
+{
+ phy_driver_unregister(&bcm5222_driver);
+}
+
+MODULE_DESCRIPTION("Broadcom PHY driver");
+MODULE_LICENSE("GPL v2");
+
+module_init(bcm5222_init);
+module_exit(bcm5222_exit);
--- /dev/null
+--- a/arch/m68k/include/asm/atomic_mm.h
++++ b/arch/m68k/include/asm/atomic_mm.h
+@@ -20,12 +20,20 @@
+
+ static inline void atomic_add(int i, atomic_t *v)
+ {
++#ifndef CONFIG_COLDFIRE
+ __asm__ __volatile__("addl %1,%0" : "+m" (*v) : "id" (i));
++#else
++ __asm__ __volatile__("addl %1,%0" : "=m" (*v) : "d" (i), "m" (*v));
++#endif
+ }
+
+ static inline void atomic_sub(int i, atomic_t *v)
+ {
++#ifndef CONFIG_COLDFIRE
+ __asm__ __volatile__("subl %1,%0" : "+m" (*v) : "id" (i));
++#else
++ __asm__ __volatile__("subl %1,%0" : "=m" (*v) : "d" (i), "m" (*v));
++#endif
+ }
+
+ static inline void atomic_inc(atomic_t *v)
+@@ -45,6 +53,14 @@ static inline int atomic_dec_and_test(at
+ return c != 0;
+ }
+
++static __inline__ int atomic_dec_and_test_lt(volatile atomic_t *v)
++{
++ char c;
++ __asm__ __volatile__("subql #1,%1; slt %0" : "=d" (c), "=m" (*v)
++ : "m" (*v));
++ return c != 0 ;
++}
++
+ static inline int atomic_inc_and_test(atomic_t *v)
+ {
+ char c;
+@@ -155,7 +171,12 @@ static inline int atomic_sub_and_test(in
+ static inline int atomic_add_negative(int i, atomic_t *v)
+ {
+ char c;
++#ifndef CONFIG_COLDFIRE
+ __asm__ __volatile__("addl %2,%1; smi %0" : "=d" (c), "+m" (*v): "g" (i));
++#else
++ __asm__ __volatile__("addl %2,%1; smi %0" : "=d" (c), "=m" (*v)
++ : "d" (i) , "m" (*v));
++#endif
+ return c != 0;
+ }
+
+--- a/arch/m68k/include/asm/bitops_mm.h
++++ b/arch/m68k/include/asm/bitops_mm.h
+@@ -8,6 +8,10 @@
+ * for more details.
+ */
+
++#ifdef CONFIG_COLDFIRE
++#include <asm/cf_bitops.h>
++#else
++
+ #ifndef _LINUX_BITOPS_H
+ #error only <linux/bitops.h> can be included directly
+ #endif
+@@ -461,4 +465,6 @@ static inline int ext2_find_next_bit(con
+
+ #endif /* __KERNEL__ */
+
++#endif /* CONFIG_COLDFIRE */
++
+ #endif /* _M68K_BITOPS_H */
+--- a/arch/m68k/include/asm/bootinfo.h
++++ b/arch/m68k/include/asm/bootinfo.h
+@@ -25,6 +25,51 @@
+ #define _M68K_BOOTINFO_H
+
+
++#ifndef __ASSEMBLY__
++/*
++ * UBoot Support
++ *
++ * bd_info structure from uboot1.3.2/arch/m68k/include/asm/u-boot.h
++ */
++
++struct bd_info {
++ unsigned long bi_memstart; /* start of DRAM memory */
++ unsigned long bi_memsize; /* size of DRAM memory in bytes */
++ unsigned long bi_flashstart; /* start of FLASH memory */
++ unsigned long bi_flashsize; /* size of FLASH memory */
++ unsigned long bi_flashoffset; /* reserved area for startup monitor */
++ unsigned long bi_sramstart; /* start of SRAM memory */
++ unsigned long bi_sramsize; /* size of SRAM memory */
++ unsigned long bi_mbar_base; /* base of internal registers */
++ unsigned long bi_bootflags; /* boot / reboot flag (for LynxOS) */
++ unsigned long bi_boot_params; /* where this board expects params */
++ unsigned long bi_ip_addr; /* IP Address */
++ unsigned char bi_enet0addr[6]; /* Ethernet 0 mac address */
++ unsigned short bi_ethspeed; /* Ethernet speed in Mbps */
++ unsigned long bi_intfreq; /* Internal Freq, in MHz */
++ unsigned long bi_busfreq; /* Bus Freq, in MHz */
++#ifdef UBOOT_EXTRA_CLOCK
++ unsigned long bi_inpfreq; /* input Freq in MHz */
++ unsigned long bi_vcofreq; /* vco Freq in MHz */
++ unsigned long bi_flbfreq; /* Flexbus Freq in MHz */
++#endif
++ unsigned long bi_baudrate; /* Console Baudrate */
++ unsigned char bi_enet1addr[6]; /* eth1 mac address */
++ unsigned char bi_enet2addr[6]; /* eth2 mac address */
++ unsigned char bi_enet3addr[6]; /* eth3 mac address */
++};
++
++struct uboot_record {
++ struct bd_info *bdi;
++ unsigned long initrd_start;
++ unsigned long initrd_end;
++ unsigned long cmd_line_start;
++ unsigned long cmd_line_stop;
++};
++
++#endif /* __ASSEMBLY__ */
++
++
+ /*
+ * Bootinfo definitions
+ *
+--- a/arch/m68k/include/asm/cacheflush_mm.h
++++ b/arch/m68k/include/asm/cacheflush_mm.h
+@@ -6,6 +6,9 @@
+ /* cache code */
+ #define FLUSH_I_AND_D (0x00000808)
+ #define FLUSH_I (0x00000008)
++#ifdef CONFIG_COLDFIRE
++#include <asm/cf_cacheflush.h>
++#else /* !CONFIG_COLDFIRE */
+
+ /*
+ * Cache handling functions
+@@ -153,4 +156,5 @@ static inline void copy_from_user_page(s
+ memcpy(dst, src, len);
+ }
+
++#endif /* !CONFIG_COLDFIRE */
+ #endif /* _M68K_CACHEFLUSH_H */
+--- a/arch/m68k/include/asm/checksum_mm.h
++++ b/arch/m68k/include/asm/checksum_mm.h
+@@ -34,6 +34,7 @@ extern __wsum csum_partial_copy_nocheck(
+ void *dst, int len,
+ __wsum sum);
+
++#ifndef CONFIG_COLDFIRE /* CF has own copy in arch/m68k/lib/checksum.c */
+ /*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+@@ -59,6 +60,9 @@ static inline __sum16 ip_fast_csum(const
+ : "memory");
+ return (__force __sum16)~sum;
+ }
++#else
++extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
++#endif
+
+ /*
+ * Fold a partial checksum
+@@ -67,6 +71,11 @@ static inline __sum16 ip_fast_csum(const
+ static inline __sum16 csum_fold(__wsum sum)
+ {
+ unsigned int tmp = (__force u32)sum;
++#ifdef CONFIG_COLDFIRE
++ tmp = (tmp & 0xffff) + (tmp >> 16);
++ tmp = (tmp & 0xffff) + (tmp >> 16);
++ return (__force __sum16) ~tmp;
++#else
+ __asm__("swap %1\n\t"
+ "addw %1, %0\n\t"
+ "clrw %1\n\t"
+@@ -74,6 +83,7 @@ static inline __sum16 csum_fold(__wsum s
+ : "=&d" (sum), "=&d" (tmp)
+ : "0" (sum), "1" (tmp));
+ return (__force __sum16)~sum;
++#endif
+ }
+
+
+--- a/arch/m68k/include/asm/coldfire.h
++++ b/arch/m68k/include/asm/coldfire.h
+@@ -5,6 +5,9 @@
+ *
+ * (C) Copyright 1999-2006, Greg Ungerer (gerg@snapgear.com)
+ * (C) Copyright 2000, Lineo (www.lineo.com)
++ *
++ * Shrek Wu b16972@freescale.com
++ * Copyright Freescale Semiconductor, Inc. 2009
+ */
+
+ /****************************************************************************/
+@@ -19,25 +22,78 @@
+ * here. Also the peripheral clock (bus clock) divide ratio is set
+ * at config time too.
+ */
++/*FIXME Jason*/
++#if 0
+ #ifdef CONFIG_CLOCK_SET
+ #define MCF_CLK CONFIG_CLOCK_FREQ
+ #define MCF_BUSCLK (CONFIG_CLOCK_FREQ / CONFIG_CLOCK_DIV)
+ #else
+ #error "Don't know what your ColdFire CPU clock frequency is??"
+ #endif
++#endif
++
++
++#define MCF_CLK CONFIG_MCFCLK
++#define MCF_BUSCLK (CONFIG_MCFCLK/2)
++
++
++#if defined(CONFIG_M520x)
++#define MCF_IPSBAR 0xFC000000
++#else
++#define MCF_IPSBAR 0x40000000
++#endif
+
++#if defined(CONFIG_M5445X)
++#define MCF_MBAR 0x0
++/*
++ * Even though RAMBAR1 macro should be in the 0x8xxxxxxx range,
++ * here set the CONFIG_SDRAM_BASE value to it to use
++ * SDRAM memory, not SRAM memory.
++ */
++#define MCF_RAMBAR1 (CONFIG_SDRAM_BASE)
++#elif defined(CONFIG_M547X_8X)
++#define MCF_MBAR 0xF0000000
++#define MCF_MMUBAR 0xF1000000
++#define MCF_RAMBAR0 0xF3000000
++#define MCF_RAMBAR1 0xF3001000
++#else
+ /*
+ * Define the processor support peripherals base address.
+ * This is generally setup by the boards start up code.
+ */
+ #define MCF_MBAR 0x10000000
+ #define MCF_MBAR2 0x80000000
+-#if defined(CONFIG_M520x)
+-#define MCF_IPSBAR 0xFC000000
+-#else
+-#define MCF_IPSBAR 0x40000000
+ #endif
+
++#ifdef __ASSEMBLY__
++#define REG32
++#define REG16
++#define REG08
++#else /* __ASSEMBLY__ */
++#define REG32(x) ((volatile unsigned long *)(x))
++#define REG16(x) ((volatile unsigned short *)(x))
++#define REG08(x) ((volatile unsigned char *)(x))
++
++#define MCF_REG32(x) *(volatile unsigned long *)(MCF_MBAR+(x))
++#define MCF_REG16(x) *(volatile unsigned short *)(MCF_MBAR+(x))
++#define MCF_REG08(x) *(volatile unsigned char *)(MCF_MBAR+(x))
++
++void cacr_set(unsigned long);
++unsigned long cacr_get(void);
++
++#define coldfire_enable_irq0(irq) MCF_INTC0_CIMR = (irq);
++
++#define coldfire_enable_irq1(irq) MCF_INTC1_CIMR = (irq);
++
++#define coldfire_disable_irq0(irq) MCF_INTC0_SIMR = (irq);
++
++#define coldfire_disable_irq1(irq) MCF_INTC1_SIMR = (irq);
++
++#define getiprh() MCF_INTC0_IPRH
++
++#endif /* __ASSEMBLY__ */
++
++
+ #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+ defined(CONFIG_M520x)
+ #undef MCF_MBAR
+--- a/arch/m68k/include/asm/delay_mm.h
++++ b/arch/m68k/include/asm/delay_mm.h
+@@ -11,8 +11,25 @@
+
+ static inline void __delay(unsigned long loops)
+ {
++#if defined(CONFIG_COLDFIRE)
++ /* The coldfire runs this loop at significantly different speeds
++ * depending upon long word alignment or not. We'll pad it to
++ * long word alignment which is the faster version.
++ * The 0x4a8e is of course a 'tstl %fp' instruction. This is better
++ * than using a NOP (0x4e71) instruction because it executes in one
++ * cycle not three and doesn't allow for an arbitary delay waiting
++ * for bus cycles to finish. Also fp/a6 isn't likely to cause a
++ * stall waiting for the register to become valid if such is added
++ * to the coldfire at some stage.
++ */
++ __asm__ __volatile__ (".balignw 4, 0x4a8e\n\t"
++ "1: subql #1, %0\n\t"
++ "jcc 1b"
++ : "=d" (loops) : "0" (loops));
++#else
+ __asm__ __volatile__ ("1: subql #1,%0; jcc 1b"
+ : "=d" (loops) : "0" (loops));
++#endif
+ }
+
+ extern void __bad_udelay(void);
+@@ -26,12 +43,17 @@ extern void __bad_udelay(void);
+ */
+ static inline void __const_udelay(unsigned long xloops)
+ {
++#if defined(CONFIG_COLDFIRE)
++
++ __delay(((((unsigned long long) xloops * loops_per_jiffy))>>32)*HZ);
++#else
+ unsigned long tmp;
+
+ __asm__ ("mulul %2,%0:%1"
+ : "=d" (xloops), "=d" (tmp)
+ : "d" (xloops), "1" (loops_per_jiffy));
+ __delay(xloops * HZ);
++#endif
+ }
+
+ static inline void __udelay(unsigned long usecs)
+@@ -46,12 +68,16 @@ static inline void __udelay(unsigned lon
+ static inline unsigned long muldiv(unsigned long a, unsigned long b,
+ unsigned long c)
+ {
++#if defined(CONFIG_COLDFIRE)
++ return (long)(((unsigned long long)a * b)/c);
++#else
+ unsigned long tmp;
+
+ __asm__ ("mulul %2,%0:%1; divul %3,%0:%1"
+ : "=d" (tmp), "=d" (a)
+ : "d" (b), "d" (c), "1" (a));
+ return a;
++#endif
+ }
+
+ #endif /* defined(_M68K_DELAY_H) */
+--- a/arch/m68k/include/asm/div64.h
++++ b/arch/m68k/include/asm/div64.h
+@@ -1,7 +1,7 @@
+ #ifndef _M68K_DIV64_H
+ #define _M68K_DIV64_H
+
+-#ifdef CONFIG_MMU
++#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
+
+ #include <linux/types.h>
+
+--- a/arch/m68k/include/asm/dma_mm.h
++++ b/arch/m68k/include/asm/dma_mm.h
+@@ -4,13 +4,126 @@
+
+ /* it's useless on the m68k, but unfortunately needed by the new
+ bootmem allocator (but this should do it for this) */
++/*#ifdef CONFIG_COLDFIRE*/
++#if defined(CONFIG_M5445X) || defined(CONFIG_M547X_8X)
++#define MAX_DMA_ADDRESS 0xefffffff
++#else
+ #define MAX_DMA_ADDRESS PAGE_OFFSET
++#endif
+
++#ifndef CONFIG_COLDFIRE
+ #define MAX_DMA_CHANNELS 8
+
+ extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
+ extern void free_dma(unsigned int dmanr); /* release it again */
+
++#else /* not (defined(CONFIG_MCF5474) || defined(CONFIG_MCF5484)
++ || defined(CONFIG_MCF5475) || defined(CONFIG_MCF5485)) */
++/************************************************
++ * Multichannel DMA definitions *
++ ************************************************/
++#ifdef CONFIG_MCD_DMA
++#include <asm/MCD_dma.h>
++#include <asm/m5485dma.h>
++
++struct scatterlist;
++
++#define MAX_DMA_CHANNELS NCHANNELS
++/*
++ * identifiers for each initiator/requestor
++ */
++#define DMA_ALWAYS (0)
++#define DMA_DSPI_RX (1)
++#define DMA_DSPI_TX (2)
++#define DMA_DREQ0 (3)
++#define DMA_PSC0_RX (4)
++#define DMA_PSC0_TX (5)
++#define DMA_USBEP0 (6)
++#define DMA_USBEP1 (7)
++#define DMA_USBEP2 (8)
++#define DMA_USBEP3 (9)
++#define DMA_PCI_TX (10)
++#define DMA_PCI_RX (11)
++#define DMA_PSC1_RX (12)
++#define DMA_PSC1_TX (13)
++#define DMA_I2C_RX (14)
++#define DMA_I2C_TX (15)
++#define DMA_FEC0_RX (16)
++#define DMA_FEC0_TX (17)
++#define DMA_FEC1_RX (18)
++#define DMA_FEC1_TX (19)
++#define DMA_DREQ1 (20)
++#define DMA_CTM0 (21)
++#define DMA_CTM1 (22)
++#define DMA_CTM2 (23)
++#define DMA_CTM3 (24)
++#define DMA_CTM4 (25)
++#define DMA_CTM5 (26)
++#define DMA_CTM6 (27)
++#define DMA_CTM7 (28)
++#define DMA_USBEP4 (29)
++#define DMA_USBEP5 (30)
++#define DMA_USBEP6 (31)
++#define DMA_PSC2_RX (32)
++#define DMA_PSC2_TX (33)
++#define DMA_PSC3_RX (34)
++#define DMA_PSC3_TX (35)
++#define DMA_FEC_RX(x) ((x == 0) ? DMA_FEC0_RX : DMA_FEC1_RX)
++#define DMA_FEC_TX(x) ((x == 0) ? DMA_FEC0_TX : DMA_FEC1_TX)
++
++int dma_set_initiator(int);
++unsigned int dma_get_initiator(int);
++void dma_remove_initiator(int);
++int dma_set_channel(int);
++int dma_get_channel(int);
++void dma_remove_channel(int);
++int dma_set_channel_fec(int requestor);
++int dma_connect(int channel, int address);
++int dma_disconnect(int channel);
++void dma_remove_channel_by_number(int channel);
++int dma_init(void);
++#endif /* CONFIG_MCD_DMA */
++
++extern spinlock_t dma_spin_lock;
++
++static __inline__ unsigned long claim_dma_lock(void)
++{
++ unsigned long flags;
++ spin_lock_irqsave(&dma_spin_lock, flags);
++ return flags;
++}
++
++static __inline__ void release_dma_lock(unsigned long flags)
++{
++ spin_unlock_irqrestore(&dma_spin_lock, flags);
++}
++
++
++/*
++ * Linux standard DMA stuff
++ */
++#if 0
++int request_dma(unsigned int channel, const char * device_id);
++void free_dma(unsigned int channel);
++void enable_dma(unsigned int channel);
++void disable_dma(unsigned int channel);
++int dma_channel_active(unsigned int channel);
++void set_dma_sg(unsigned int channel, struct scatterlist *sg, int nr_sg);
++void set_dma_page(unsigned int channel, char pagenr);
++void set_dma_addr(unsigned int channel, unsigned long physaddr);
++void set_dma_count(unsigned int channel, unsigned long count);
++void set_dma_mode(unsigned int channel, unsigned int mode);
++void set_dma_speed(unsigned int channel, int cycle_ns);
++int get_dma_residue(unsigned int channel);
++#endif
++#define clear_dma_ff(channel)
++
++#endif
++
++#ifdef CONFIG_PCI
++extern int isa_dma_bridge_buggy;
++#else
+ #define isa_dma_bridge_buggy (0)
++#endif
+
+ #endif /* _M68K_DMA_H */
+--- a/arch/m68k/include/asm/elf.h
++++ b/arch/m68k/include/asm/elf.h
+@@ -35,6 +35,27 @@
+ #define R_68K_JMP_SLOT 21
+ #define R_68K_RELATIVE 22
+
++/* TLS static relocations */
++#define R_68K_TLS_GD32 25
++#define R_68K_TLS_GD16 26
++#define R_68K_TLS_GD8 27
++#define R_68K_TLS_LDM32 28
++#define R_68K_TLS_LDM16 29
++#define R_68K_TLS_LDM8 30
++#define R_68K_TLS_LDO32 31
++#define R_68K_TLS_LDO16 32
++#define R_68K_TLS_LDO8 33
++#define R_68K_TLS_IE32 34
++#define R_68K_TLS_IE16 35
++#define R_68K_TLS_IE8 36
++#define R_68K_TLS_LE32 37
++#define R_68K_TLS_LE16 38
++#define R_68K_TLS_LE8 39
++/* TLS dynamic relocations */
++#define R_68K_TLS_DTPMOD32 40
++#define R_68K_TLS_DTPREL32 41
++#define R_68K_TLS_TPREL32 42
++
+ typedef unsigned long elf_greg_t;
+
+ #define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t))
+@@ -60,7 +81,7 @@ typedef struct user_m68kfp_struct elf_fp
+ #define ELF_PLAT_INIT(_r, load_addr) _r->a1 = 0
+
+ #define USE_ELF_CORE_DUMP
+-#ifndef CONFIG_SUN3
++#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
+ #define ELF_EXEC_PAGESIZE 4096
+ #else
+ #define ELF_EXEC_PAGESIZE 8192
+@@ -71,8 +92,10 @@ typedef struct user_m68kfp_struct elf_fp
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-#ifndef CONFIG_SUN3
++#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
+ #define ELF_ET_DYN_BASE 0xD0000000UL
++#elif defined(CONFIG_COLDFIRE)
++#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x10000000)
+ #else
+ #define ELF_ET_DYN_BASE 0x0D800000UL
+ #endif
+@@ -116,4 +139,35 @@ typedef struct user_m68kfp_struct elf_fp
+
+ #define SET_PERSONALITY(ex) set_personality(PER_LINUX)
+
++/*
++ * VDSO
++ */
++#ifdef CONFIG_VDSO
++extern unsigned int vdso_enabled;
++
++#define VDSO_BASE ((unsigned long)current->mm->context.vdso)
++#define VDSO_SYM(x) (VDSO_BASE + (unsigned long)(x))
++
++#define VDSO_AUX_ENT \
++ if (vdso_enabled) \
++ NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE);
++
++/* additional pages */
++#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
++
++struct linux_binprm;
++extern int arch_setup_additional_pages(struct linux_binprm *bprm,
++ int executable_stack);
++
++#else
++/* no VDSO_AUX_ENT */
++#define VDSO_AUX_ENT
++#endif
++
++#define ARCH_DLINFO \
++do { \
++ /* vdso entry */ \
++ VDSO_AUX_ENT; \
++} while (0);
++
+ #endif
+--- a/arch/m68k/include/asm/io_mm.h
++++ b/arch/m68k/include/asm/io_mm.h
+@@ -7,17 +7,24 @@
+ * - added skeleton for GG-II and Amiga PCMCIA
+ * 2/3/01 RZ: - moved a few more defs into raw_io.h
+ *
+- * inX/outX should not be used by any driver unless it does
+- * ISA access. Other drivers should use function defined in raw_io.h
++ * inX/outX/readX/writeX should not be used by any driver unless it does
++ * ISA or PCI access. Other drivers should use function defined in raw_io.h
+ * or define its own macros on top of these.
+ *
+- * inX(),outX() are for ISA I/O
++ * inX(),outX() are for PCI and ISA I/O
++ * readX(),writeX() are for PCI memory
+ * isa_readX(),isa_writeX() are for ISA memory
++ *
++ * moved mem{cpy,set}_*io inside CONFIG_PCI
+ */
+
+ #ifndef _IO_H
+ #define _IO_H
+
++#ifdef CONFIG_COLDFIRE
++#include <asm/cf_io.h>
++#else
++
+ #ifdef __KERNEL__
+
+ #include <linux/compiler.h>
+@@ -88,20 +95,20 @@ extern unsigned long gg2_isa_base;
+ #undef MULTI_ISA
+ #endif
+
+-#define ISA_TYPE_Q40 (1)
+-#define ISA_TYPE_GG2 (2)
+-#define ISA_TYPE_AG (3)
++#define Q40_ISA (1)
++#define GG2_ISA (2)
++#define AG_ISA (3)
+
+ #if defined(CONFIG_Q40) && !defined(MULTI_ISA)
+-#define ISA_TYPE ISA_TYPE_Q40
++#define ISA_TYPE Q40_ISA
+ #define ISA_SEX 0
+ #endif
+ #if defined(CONFIG_AMIGA_PCMCIA) && !defined(MULTI_ISA)
+-#define ISA_TYPE ISA_TYPE_AG
++#define ISA_TYPE AG_ISA
+ #define ISA_SEX 1
+ #endif
+ #if defined(CONFIG_GG2) && !defined(MULTI_ISA)
+-#define ISA_TYPE ISA_TYPE_GG2
++#define ISA_TYPE GG2_ISA
+ #define ISA_SEX 0
+ #endif
+
+@@ -123,13 +130,13 @@ static inline u8 __iomem *isa_itb(unsign
+ switch(ISA_TYPE)
+ {
+ #ifdef CONFIG_Q40
+- case ISA_TYPE_Q40: return (u8 __iomem *)Q40_ISA_IO_B(addr);
++ case Q40_ISA: return (u8 __iomem *)Q40_ISA_IO_B(addr);
+ #endif
+ #ifdef CONFIG_GG2
+- case ISA_TYPE_GG2: return (u8 __iomem *)GG2_ISA_IO_B(addr);
++ case GG2_ISA: return (u8 __iomem *)GG2_ISA_IO_B(addr);
+ #endif
+ #ifdef CONFIG_AMIGA_PCMCIA
+- case ISA_TYPE_AG: return (u8 __iomem *)AG_ISA_IO_B(addr);
++ case AG_ISA: return (u8 __iomem *)AG_ISA_IO_B(addr);
+ #endif
+ default: return NULL; /* avoid warnings, just in case */
+ }
+@@ -139,13 +146,13 @@ static inline u16 __iomem *isa_itw(unsig
+ switch(ISA_TYPE)
+ {
+ #ifdef CONFIG_Q40
+- case ISA_TYPE_Q40: return (u16 __iomem *)Q40_ISA_IO_W(addr);
++ case Q40_ISA: return (u16 __iomem *)Q40_ISA_IO_W(addr);
+ #endif
+ #ifdef CONFIG_GG2
+- case ISA_TYPE_GG2: return (u16 __iomem *)GG2_ISA_IO_W(addr);
++ case GG2_ISA: return (u16 __iomem *)GG2_ISA_IO_W(addr);
+ #endif
+ #ifdef CONFIG_AMIGA_PCMCIA
+- case ISA_TYPE_AG: return (u16 __iomem *)AG_ISA_IO_W(addr);
++ case AG_ISA: return (u16 __iomem *)AG_ISA_IO_W(addr);
+ #endif
+ default: return NULL; /* avoid warnings, just in case */
+ }
+@@ -155,7 +162,7 @@ static inline u32 __iomem *isa_itl(unsig
+ switch(ISA_TYPE)
+ {
+ #ifdef CONFIG_AMIGA_PCMCIA
+- case ISA_TYPE_AG: return (u32 __iomem *)AG_ISA_IO_W(addr);
++ case AG_ISA: return (u32 __iomem *)AG_ISA_IO_W(addr);
+ #endif
+ default: return 0; /* avoid warnings, just in case */
+ }
+@@ -165,13 +172,13 @@ static inline u8 __iomem *isa_mtb(unsign
+ switch(ISA_TYPE)
+ {
+ #ifdef CONFIG_Q40
+- case ISA_TYPE_Q40: return (u8 __iomem *)Q40_ISA_MEM_B(addr);
++ case Q40_ISA: return (u8 __iomem *)Q40_ISA_MEM_B(addr);
+ #endif
+ #ifdef CONFIG_GG2
+- case ISA_TYPE_GG2: return (u8 __iomem *)GG2_ISA_MEM_B(addr);
++ case GG2_ISA: return (u8 __iomem *)GG2_ISA_MEM_B(addr);
+ #endif
+ #ifdef CONFIG_AMIGA_PCMCIA
+- case ISA_TYPE_AG: return (u8 __iomem *)addr;
++ case AG_ISA: return (u8 __iomem *)addr;
+ #endif
+ default: return NULL; /* avoid warnings, just in case */
+ }
+@@ -181,13 +188,13 @@ static inline u16 __iomem *isa_mtw(unsig
+ switch(ISA_TYPE)
+ {
+ #ifdef CONFIG_Q40
+- case ISA_TYPE_Q40: return (u16 __iomem *)Q40_ISA_MEM_W(addr);
++ case Q40_ISA: return (u16 __iomem *)Q40_ISA_MEM_W(addr);
+ #endif
+ #ifdef CONFIG_GG2
+- case ISA_TYPE_GG2: return (u16 __iomem *)GG2_ISA_MEM_W(addr);
++ case GG2_ISA: return (u16 __iomem *)GG2_ISA_MEM_W(addr);
+ #endif
+ #ifdef CONFIG_AMIGA_PCMCIA
+- case ISA_TYPE_AG: return (u16 __iomem *)addr;
++ case AG_ISA: return (u16 __iomem *)addr;
+ #endif
+ default: return NULL; /* avoid warnings, just in case */
+ }
+@@ -201,30 +208,29 @@ static inline u16 __iomem *isa_mtw(unsig
+ #define isa_outw(val,port) (ISA_SEX ? out_be16(isa_itw(port),(val)) : out_le16(isa_itw(port),(val)))
+ #define isa_outl(val,port) (ISA_SEX ? out_be32(isa_itl(port),(val)) : out_le32(isa_itl(port),(val)))
+
+-#define isa_readb(p) in_8(isa_mtb((unsigned long)(p)))
+-#define isa_readw(p) \
+- (ISA_SEX ? in_be16(isa_mtw((unsigned long)(p))) \
+- : in_le16(isa_mtw((unsigned long)(p))))
+-#define isa_writeb(val,p) out_8(isa_mtb((unsigned long)(p)),(val))
+-#define isa_writew(val,p) \
+- (ISA_SEX ? out_be16(isa_mtw((unsigned long)(p)),(val)) \
+- : out_le16(isa_mtw((unsigned long)(p)),(val)))
+-
++#define isa_readb(p) in_8(isa_mtb(p))
++#define isa_readw(p) (ISA_SEX ? in_be16(isa_mtw(p)) : in_le16(isa_mtw(p)))
++#define isa_writeb(val,p) out_8(isa_mtb(p),(val))
++#define isa_writew(val,p) (ISA_SEX ? out_be16(isa_mtw(p),(val)) : out_le16(isa_mtw(p),(val)))
+ static inline void isa_delay(void)
+ {
+- switch(ISA_TYPE)
+- {
++ switch (ISA_TYPE) {
+ #ifdef CONFIG_Q40
+- case ISA_TYPE_Q40: isa_outb(0,0x80); break;
++ case Q40_ISA:
++ isa_outb(0, 0x80);
++ break;
+ #endif
+ #ifdef CONFIG_GG2
+- case ISA_TYPE_GG2: break;
++ case GG2_ISA:
++ break;
+ #endif
+ #ifdef CONFIG_AMIGA_PCMCIA
+- case ISA_TYPE_AG: break;
++ case AG_ISA:
++ break;
+ #endif
+- default: break; /* avoid warnings */
+- }
++ default:
++ break; /* avoid warnings */
++ }
+ }
+
+ #define isa_inb_p(p) ({u8 v=isa_inb(p);isa_delay();v;})
+@@ -253,7 +259,10 @@ static inline void isa_delay(void)
+ (ISA_SEX ? raw_outsl(isa_itl(port), (u32 *)(buf), (nr)) : \
+ raw_outsw_swapw(isa_itw(port), (u16 *)(buf), (nr)<<1))
+
++#endif /* CONFIG_ISA */
+
++
++#if defined(CONFIG_ISA) && !defined(CONFIG_PCI)
+ #define inb isa_inb
+ #define inb_p isa_inb_p
+ #define outb isa_outb
+@@ -276,9 +285,80 @@ static inline void isa_delay(void)
+ #define readw isa_readw
+ #define writeb isa_writeb
+ #define writew isa_writew
++#endif /* CONFIG_ISA */
++
++#if defined(CONFIG_PCI)
++
++#define readl(addr) in_le32(addr)
++#define writel(val, addr) out_le32((addr), (val))
++
++/* those can be defined for both ISA and PCI - it won't work though */
++#define readb(addr) in_8(addr)
++#define readw(addr) in_le16(addr)
++#define writeb(val, addr) out_8((addr), (val))
++#define writew(val, addr) out_le16((addr), (val))
++
++#define readb_relaxed(addr) readb(addr)
++#define readw_relaxed(addr) readw(addr)
++#define readl_relaxed(addr) readl(addr)
++
++#ifndef CONFIG_ISA
++#define inb(port) in_8(port)
++#define outb(val, port) out_8((port), (val))
++#define inw(port) in_le16(port)
++#define outw(val, port) out_le16((port), (val))
++#define inl(port) in_le32(port)
++#define outl(val, port) out_le32((port), (val))
++#define insb(port, buf, nr) \
++ raw_insb((u8 *)(port), (u8 *)(buf), (nr))
++#define outsb(port, buf, nr) \
++ raw_outsb((u8 *)(port), (u8 *)(buf), (nr))
++#define insw(port, buf, nr) \
++ raw_insw_swapw((u16 *)(port), (u16 *)(buf), (nr))
++#define outsw(port, buf, nr) \
++ raw_outsw_swapw((u16 *)(port), (u16 *)(buf), (nr))
++#define insl(port, buf, nr) \
++ raw_insw_swapw((u16 *)(port), (u16 *)(buf), (nr)<<1)
++#define outsl(port, buf, nr) \
++ raw_outsw_swapw((u16 *)(port), (u16 *)(buf), (nr)<<1)
++
++#define __raw_readb readb
++#define __raw_readw readw
++#define __raw_readl readl
++#define __raw_writeb writeb
++#define __raw_writew writew
++#define __raw_writel writel
+
+-#else /* CONFIG_ISA */
++#else
++/*
++ * kernel with both ISA and PCI compiled in, those have
++ * conflicting defs for in/out. Simply consider port < 1024
++ * ISA and everything else PCI. read,write not defined
++ * in this case
++ */
++#define inb(port) ((port) < 1024 ? isa_inb(port) : in_8(port))
++#define inb_p(port) ((port) < 1024 ? isa_inb_p(port) : in_8(port))
++#define inw(port) ((port) < 1024 ? isa_inw(port) : in_le16(port))
++#define inw_p(port) ((port) < 1024 ? isa_inw_p(port) : in_le16(port))
++#define inl(port) ((port) < 1024 ? isa_inl(port) : in_le32(port))
++#define inl_p(port) ((port) < 1024 ? isa_inl_p(port) : in_le32(port))
++
++#define outb(val, port) (((port) < 1024) ? isa_outb((val), (port))
++ : out_8((port), (val)))
++#define outb_p(val, port) (((port) < 1024) ? isa_outb_p((val), (port))
++ : out_8((port), (val)))
++#define outw(val, port) (((port) < 1024) ? isa_outw((val), (port))
++ : out_le16((port), (val)))
++#define outw_p(val, port) (((port) < 1024) ? isa_outw_p((val), (port))
++ : out_le16((port), (val)))
++#define outl(val, port) (((port) < 1024) ? isa_outl((val), (port))
++ : out_le32((port), (val)))
++#define outl_p(val, port) (((port) < 1024) ? isa_outl_p((val), (port))
++ : out_le32((port), (val)))
++#endif
++#endif /* CONFIG_PCI */
+
++#if !defined(CONFIG_ISA) && !defined(CONFIG_PCI)
+ /*
+ * We need to define dummy functions for GENERIC_IOMAP support.
+ */
+@@ -305,11 +385,11 @@ static inline void isa_delay(void)
+ #define writeb(val,addr) out_8((addr),(val))
+ #define readw(addr) in_le16(addr)
+ #define writew(val,addr) out_le16((addr),(val))
+-
+-#endif /* CONFIG_ISA */
+-
++#endif
++#if !defined(CONFIG_PCI)
+ #define readl(addr) in_le32(addr)
+ #define writel(val,addr) out_le32((addr),(val))
++#endif
+
+ #define mmiowb()
+
+@@ -345,10 +425,10 @@ static inline void memcpy_toio(volatile
+ __builtin_memcpy((void __force *) dst, src, count);
+ }
+
+-#ifndef CONFIG_SUN3
+-#define IO_SPACE_LIMIT 0xffff
+-#else
++#if defined(CONFIG_SUN3)
+ #define IO_SPACE_LIMIT 0x0fffffff
++#else
++#define IO_SPACE_LIMIT 0xffff
+ #endif
+
+ #endif /* __KERNEL__ */
+@@ -366,4 +446,5 @@ static inline void memcpy_toio(volatile
+ */
+ #define xlate_dev_kmem_ptr(p) p
+
++#endif /* CONFIG_COLDFIRE */
+ #endif /* _IO_H */
+--- a/arch/m68k/include/asm/irq_mm.h
++++ b/arch/m68k/include/asm/irq_mm.h
+@@ -12,7 +12,10 @@
+ * Currently the Atari has 72 and the Amiga 24, but if both are
+ * supported in the kernel it is better to make room for 72.
+ */
+-#if defined(CONFIG_VME) || defined(CONFIG_SUN3) || defined(CONFIG_SUN3X)
++#if defined(CONFIG_COLDFIRE)
++#define SYS_IRQS 256
++#define NR_IRQS SYS_IRQS
++#elif defined(CONFIG_VME) || defined(CONFIG_SUN3) || defined(CONFIG_SUN3X)
+ #define NR_IRQS 200
+ #elif defined(CONFIG_ATARI) || defined(CONFIG_MAC)
+ #define NR_IRQS 72
+--- a/arch/m68k/include/asm/machdep_mm.h
++++ b/arch/m68k/include/asm/machdep_mm.h
+@@ -32,4 +32,11 @@ extern void (*mach_heartbeat) (int);
+ extern void (*mach_l2_flush) (int);
+ extern void (*mach_beep) (unsigned int, unsigned int);
+
++#ifdef CONFIG_COLDFIRE
++extern void __init config_coldfire(void);
++extern void __init mmu_context_init(void);
++extern irq_handler_t mach_default_handler;
++extern void (*mach_tick)(void);
++#endif
++
+ #endif /* _M68K_MACHDEP_H */
+--- a/arch/m68k/include/asm/mcfsim.h
++++ b/arch/m68k/include/asm/mcfsim.h
+@@ -39,6 +39,25 @@
+ #include <asm/m5407sim.h>
+ #endif
+
++#if defined(CONFIG_COLDFIRE)
++#include <asm/coldfire.h>
++#endif
++
++#if defined(CONFIG_M5445X)
++#include <asm/mcf5445x_intc.h>
++#include <asm/mcf5445x_gpio.h>
++#include <asm/mcf5445x_ccm.h>
++#include <asm/mcf5445x_eport.h>
++#include <asm/mcf5445x_fbcs.h>
++#include <asm/mcf5445x_xbs.h>
++#include <asm/mcf5445x_dtim.h>
++#include <asm/mcf5445x_rtc.h>
++#include <asm/mcf5445x_scm.h>
++#elif defined(CONFIG_M547X_8X)
++#include <asm/m5485sim.h>
++#include <asm/m5485gpio.h>
++#include <asm/m5485gpt.h>
++#endif
+
+ /*
+ * Define the base address of the SIM within the MBAR address space.
+--- a/arch/m68k/include/asm/mmu_context.h
++++ b/arch/m68k/include/asm/mmu_context.h
+@@ -8,7 +8,7 @@ static inline void enter_lazy_tlb(struct
+ }
+
+ #ifdef CONFIG_MMU
+-#ifndef CONFIG_SUN3
++#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
+
+ #include <asm/setup.h>
+ #include <asm/page.h>
+@@ -103,7 +103,7 @@ static inline void activate_mm(struct mm
+ switch_mm_0460(next_mm);
+ }
+
+-#else /* CONFIG_SUN3 */
++#elif defined(CONFIG_SUN3)
+ #include <asm/sun3mmu.h>
+ #include <linux/sched.h>
+
+@@ -151,7 +151,179 @@ static inline void activate_mm(struct mm
+ activate_context(next_mm);
+ }
+
++#else /* CONFIG_COLDFIRE */
++
++#include <asm/coldfire.h>
++#include <asm/atomic.h>
++#include <asm/bitops.h>
++#include <asm/mmu.h>
++
++#define NO_CONTEXT 256
++#define LAST_CONTEXT 255
++#define FIRST_CONTEXT 1
++
++#ifdef CONFIG_VDSO
++#define cpu_context(mm) ((mm)->context.id)
++#else
++#define cpu_context(mm) ((mm)->context)
++#endif
++
++#ifdef CONFIG_VDSO
++extern void set_context(unsigned long context, pgd_t *pgd);
++#else
++extern void set_context(mm_context_t context, pgd_t *pgd);
++#endif
++extern unsigned long context_map[];
++#ifdef CONFIG_VDSO
++extern unsigned long next_mmu_context;
++#else
++extern mm_context_t next_mmu_context;
++#endif
++
++
++extern atomic_t nr_free_contexts;
++extern struct mm_struct *context_mm[LAST_CONTEXT+1];
++extern void steal_context(void);
++
++static inline void get_mmu_context(struct mm_struct *mm)
++{
++#ifdef CONFIG_VDSO
++ unsigned long ctx;
++#else
++ mm_context_t ctx;
+ #endif
++
++ if (cpu_context(mm) != NO_CONTEXT)
++ return;
++ while (atomic_dec_and_test_lt(&nr_free_contexts)) {
++ atomic_inc(&nr_free_contexts);
++ steal_context();
++ }
++ ctx = next_mmu_context;
++ while (test_and_set_bit(ctx, context_map)) {
++ ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
++ if (ctx > LAST_CONTEXT)
++ ctx = 0;
++ }
++ next_mmu_context = (ctx + 1) & LAST_CONTEXT;
++ cpu_context(mm) = ctx;
++ context_mm[ctx] = mm;
++}
++
++/*
++ * Set up the context for a new address space.
++ */
++#define init_new_context(tsk, mm) ((cpu_context(mm) = NO_CONTEXT), 0)
++/* #define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) */
++
++/*
++ * We're finished using the context for an address space.
++ */
++static inline void destroy_context(struct mm_struct *mm)
++{
++ if (cpu_context(mm) != NO_CONTEXT) {
++ clear_bit(cpu_context(mm), context_map);
++ cpu_context(mm) = NO_CONTEXT;
++ atomic_inc(&nr_free_contexts);
++ }
++}
++
++static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
++ struct task_struct *tsk)
++{
++ get_mmu_context(tsk->mm);
++ set_context(cpu_context(tsk->mm), next->pgd);
++}
++
++/*
++ * After we have set current->mm to a new value, this activates
++ * the context for the new mm so we see the new mappings.
++ */
++static inline void activate_mm(struct mm_struct *active_mm,
++ struct mm_struct *mm)
++{
++ get_mmu_context(mm);
++ set_context(cpu_context(mm), mm->pgd);
++}
++
++#define deactivate_mm(tsk, mm) do { } while (0)
++
++extern void mmu_context_init(void);
++#if defined(CONFIG_M547X_8X)
++#define prepare_arch_switch(next) load_ksp_mmu(next)
++
++static inline void load_ksp_mmu(struct task_struct *task)
++{
++ int flags;
++ struct mm_struct *mm;
++ int asid;
++ pgd_t *pgd;
++ pmd_t *pmd;
++ pte_t *pte;
++ unsigned long mmuar;
++
++ local_irq_save(flags);
++ mmuar = task->thread.ksp;
++
++ /* Search for a valid TLB entry, if one is found, don't remap */
++ *MMUAR = mmuar;
++ *MMUOR = MMUOR_STLB | MMUOR_ADR;
++ if ((*MMUSR) & MMUSR_HIT)
++ goto end;
++
++ if (mmuar >= PAGE_OFFSET) {
++ mm = &init_mm;
++ } else {
++ printk(KERN_INFO "load_ksp_mmu: non-kernel"
++ " mm found: 0x%08x\n", (unsigned int) task->mm);
++ mm = task->mm;
++ }
++
++ if (!mm)
++ goto bug;
++
++ pgd = pgd_offset(mm, mmuar);
++ if (pgd_none(*pgd))
++ goto bug;
++
++ pmd = pmd_offset(pgd, mmuar);
++ if (pmd_none(*pmd))
++ goto bug;
++
++ pte = (mmuar >= PAGE_OFFSET) ? pte_offset_kernel(pmd, mmuar)
++ : pte_offset_map(pmd, mmuar);
++ if (pte_none(*pte) || !pte_present(*pte))
++ goto bug;
++
++ set_pte(pte, pte_mkyoung(*pte));
++ asid = cpu_context(mm) & 0xff;
++ if (!pte_dirty(*pte) && mmuar <= PAGE_OFFSET)
++ set_pte(pte, pte_wrprotect(*pte));
++
++ *MMUTR = (mmuar & PAGE_MASK) | (asid << CF_ASID_MMU_SHIFT)
++ | (((int)(pte->pte) & (int)CF_PAGE_MMUTR_MASK)
++ >> CF_PAGE_MMUTR_SHIFT)
++ | MMUTR_V;
++
++ *MMUDR = (pte_val(*pte) & PAGE_MASK)
++ | ((pte->pte) & CF_PAGE_MMUDR_MASK)
++ | MMUDR_SZ8K | MMUDR_X;
++
++ *MMUOR = MMUOR_ACC | MMUOR_UAA;
++ asm ("nop");
++
++ goto end;
++
++bug:
++ printk(KERN_ERR "ksp load failed: mm=0x%08x ksp=0x%08x\n",
++ (unsigned int) mm, (unsigned int) mmuar);
++end:
++ local_irq_restore(flags);
++}
++#endif /* CONFIG_M547X_8X */
++
++#endif /* CONFIG_COLDFIRE */
++
+ #else /* !CONFIG_MMU */
+
+ static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+--- a/arch/m68k/include/asm/page_mm.h
++++ b/arch/m68k/include/asm/page_mm.h
+@@ -1,10 +1,15 @@
+ #ifndef _M68K_PAGE_H
+ #define _M68K_PAGE_H
+
++/*#if defined(CONFIG_COLDFIRE)*/
++#if defined(CONFIG_M5445X) || defined(CONFIG_M547X_8X)
++#include <asm/cf_page.h>
++#else
++
+ #include <linux/const.h>
+
+ /* PAGE_SHIFT determines the page size */
+-#ifndef CONFIG_SUN3
++#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
+ #define PAGE_SHIFT (12)
+ #else
+ #define PAGE_SHIFT (13)
+@@ -113,10 +118,31 @@ typedef struct page *pgtable_t;
+
+ extern unsigned long m68k_memoffset;
+
+-#ifndef CONFIG_SUN3
++#if !defined(CONFIG_SUN3)
+
+ #define WANT_PAGE_VIRTUAL
+
++#if defined(CONFIG_COLDFIRE)
++static inline unsigned long ___pa(void *vaddr)
++{
++#if CONFIG_SDRAM_BASE != PAGE_OFFSET
++ return (((unsigned long)vaddr & 0x0fffffff) + CONFIG_SDRAM_BASE);
++#else
++ return (unsigned long)vaddr;
++#endif
++}
++#define __pa(vaddr) ___pa((void *)(vaddr))
++
++static inline void *__va(unsigned long paddr)
++{
++#if CONFIG_SDRAM_BASE != PAGE_OFFSET
++ return (void *)((paddr & 0x0fffffff) + PAGE_OFFSET);
++#else
++ return (void *)paddr;
++#endif
++}
++
++#else
+ static inline unsigned long ___pa(void *vaddr)
+ {
+ unsigned long paddr;
+@@ -138,6 +164,7 @@ static inline void *__va(unsigned long p
+ : "0" (paddr), "i" (m68k_fixup_memoffset));
+ return vaddr;
+ }
++#endif
+
+ #else /* !CONFIG_SUN3 */
+ /* This #define is a horrible hack to suppress lots of warnings. --m */
+@@ -169,6 +196,8 @@ static inline void *__va(unsigned long x
+ * memory node, but we have no highmem, so that works for now.
+ * TODO: implement (fast) pfn<->pgdat_idx conversion functions, this makes lots
+ * of the shifts unnecessary.
++ *
++ * PFNs are used to map physical pages. So PFN[0] maps to the base phys addr.
+ */
+ #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
+ #define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
+@@ -225,4 +254,10 @@ static inline __attribute_const__ int __
+
+ #include <asm-generic/getorder.h>
+
++#ifdef CONFIG_VDSO
++/* vDSO support */
++#define __HAVE_ARCH_GATE_AREA
++#endif
++
++#endif /* !CONFIG_COLDFIRE */
+ #endif /* _M68K_PAGE_H */
+--- a/arch/m68k/include/asm/page_offset.h
++++ b/arch/m68k/include/asm/page_offset.h
+@@ -1,10 +1,13 @@
+ /* This handles the memory map.. */
+
+ #ifdef CONFIG_MMU
+-#ifndef CONFIG_SUN3
+-#define PAGE_OFFSET_RAW 0x00000000
+-#else
++#if defined(CONFIG_SUN3)
+ #define PAGE_OFFSET_RAW 0x0E000000
++#elif defined(CONFIG_M5445X) || defined(CONFIG_M547X_8X)
++#define PHYS_OFFSET CONFIG_SDRAM_BASE
++#define PAGE_OFFSET_RAW (PHYS_OFFSET)
++#else
++#define PAGE_OFFSET_RAW 0x00000000
+ #endif
+ #else
+ #define PAGE_OFFSET_RAW CONFIG_RAMBASE
+--- a/arch/m68k/include/asm/pgalloc.h
++++ b/arch/m68k/include/asm/pgalloc.h
+@@ -7,8 +7,10 @@
+
+ #ifdef CONFIG_MMU
+ #include <asm/virtconvert.h>
+-#ifdef CONFIG_SUN3
++#if defined (CONFIG_SUN3)
+ #include <asm/sun3_pgalloc.h>
++#elif defined(CONFIG_COLDFIRE)
++#include <asm/cf_pgalloc.h>
+ #else
+ #include <asm/motorola_pgalloc.h>
+ #endif
+--- a/arch/m68k/include/asm/pgtable_mm.h
++++ b/arch/m68k/include/asm/pgtable_mm.h
+@@ -40,6 +40,8 @@
+ /* PGDIR_SHIFT determines what a third-level page table entry can map */
+ #ifdef CONFIG_SUN3
+ #define PGDIR_SHIFT 17
++#elif defined(CONFIG_COLDFIRE)
++#define PGDIR_SHIFT 22
+ #else
+ #define PGDIR_SHIFT 25
+ #endif
+@@ -54,6 +56,10 @@
+ #define PTRS_PER_PTE 16
+ #define PTRS_PER_PMD 1
+ #define PTRS_PER_PGD 2048
++#elif defined(CONFIG_COLDFIRE)
++#define PTRS_PER_PTE 512
++#define PTRS_PER_PMD 1
++#define PTRS_PER_PGD 1024
+ #else
+ #define PTRS_PER_PTE 1024
+ #define PTRS_PER_PMD 8
+@@ -66,6 +72,11 @@
+ #ifdef CONFIG_SUN3
+ #define KMAP_START 0x0DC00000
+ #define KMAP_END 0x0E000000
++#elif defined(CONFIG_COLDFIRE)
++#define VMALLOC_START 0xc0000000
++#define VMALLOC_END 0xcfffffff
++#define KMAP_START (VMALLOC_END + 1)
++#define KMAP_END 0xe8000000
+ #else
+ #define KMAP_START 0xd0000000
+ #define KMAP_END 0xf0000000
+@@ -79,9 +90,11 @@
+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+ * area for the same reason. ;)
+ */
++#if !defined(CONFIG_COLDFIRE)
+ #define VMALLOC_OFFSET (8*1024*1024)
+ #define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
+ #define VMALLOC_END KMAP_START
++#endif
+ #else
+ extern unsigned long vmalloc_end;
+ #define VMALLOC_START 0x0f800000
+@@ -130,6 +143,8 @@ static inline void update_mmu_cache(stru
+
+ #ifdef CONFIG_SUN3
+ #include <asm/sun3_pgtable.h>
++#elif defined(CONFIG_COLDFIRE)
++#include <asm/cf_pgtable.h>
+ #else
+ #include <asm/motorola_pgtable.h>
+ #endif
+@@ -138,6 +153,9 @@ static inline void update_mmu_cache(stru
+ /*
+ * Macro to mark a page protection value as "uncacheable".
+ */
++#ifdef CONFIG_COLDFIRE
++# define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | CF_PAGE_NOCACHE))
++#else /* CONFIG_COLDFIRE */
+ #ifdef SUN3_PAGE_NOCACHE
+ # define __SUN3_PAGE_NOCACHE SUN3_PAGE_NOCACHE
+ #else
+@@ -152,6 +170,7 @@ static inline void update_mmu_cache(stru
+ ? (__pgprot((pgprot_val(prot) & _CACHEMASK040) | _PAGE_NOCACHE_S)) \
+ : (prot)))
+
++#endif /* CONFIG_COLDFIRE */
+ #include <asm-generic/pgtable.h>
+ #endif /* !__ASSEMBLY__ */
+
+--- a/arch/m68k/include/asm/processor_mm.h
++++ b/arch/m68k/include/asm/processor_mm.h
+@@ -2,6 +2,7 @@
+ * include/asm-m68k/processor.h
+ *
+ * Copyright (C) 1995 Hamish Macdonald
++ * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+ #ifndef __ASM_M68K_PROCESSOR_H
+@@ -22,24 +23,38 @@ static inline unsigned long rdusp(void)
+ {
+ unsigned long usp;
+
++#ifndef CONFIG_COLDFIRE
+ __asm__ __volatile__("move %/usp,%0" : "=a" (usp));
++#else
++ __asm__ __volatile__("movel %/usp,%0" : "=a" (usp));
++#endif
+ return usp;
+ }
+
+ static inline void wrusp(unsigned long usp)
+ {
++#ifndef CONFIG_COLDFIRE
+ __asm__ __volatile__("move %0,%/usp" : : "a" (usp));
++#else
++ __asm__ __volatile__("movel %0,%/usp" : : "a" (usp));
++#endif
+ }
+
+ /*
+ * User space process size: 3.75GB. This is hardcoded into a few places,
+ * so don't change it unless you know what you are doing.
+ */
+-#ifndef CONFIG_SUN3
++#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
+ #define TASK_SIZE (0xF0000000UL)
++#elif defined(CONFIG_COLDFIRE)
++#define TASK_SIZE (0xC0000000UL)
++#else /* CONFIG_SUN3 */
++#ifdef __ASSEMBLY__
++#define TASK_SIZE (0x0E000000)
+ #else
+ #define TASK_SIZE (0x0E000000UL)
+ #endif
++#endif
+
+ #ifdef __KERNEL__
+ #define STACK_TOP TASK_SIZE
+@@ -49,9 +64,11 @@ static inline void wrusp(unsigned long u
+ /* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+-#ifndef CONFIG_SUN3
+-#define TASK_UNMAPPED_BASE 0xC0000000UL
+-#else
++#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
++#define TASK_UNMAPPED_BASE 0xC0000000UL
++#elif defined(CONFIG_COLDFIRE)
++#define TASK_UNMAPPED_BASE 0x60000000UL
++#else /* CONFIG_SUN3 */
+ #define TASK_UNMAPPED_BASE 0x0A000000UL
+ #endif
+ #define TASK_UNMAPPED_ALIGN(addr, off) PAGE_ALIGN(addr)
+@@ -60,7 +77,11 @@ struct thread_struct {
+ unsigned long ksp; /* kernel stack pointer */
+ unsigned long usp; /* user stack pointer */
+ unsigned short sr; /* saved status register */
++#ifndef CONFIG_COLDFIRE
+ unsigned short fs; /* saved fs (sfc, dfc) */
++#else
++ mm_segment_t fs;
++#endif
+ unsigned long crp[2]; /* cpu root pointer */
+ unsigned long esp0; /* points to SR of stack frame */
+ unsigned long faddr; /* info about last fault */
+@@ -81,6 +102,7 @@ struct thread_struct {
+ /*
+ * Do necessary setup to start up a newly executed thread.
+ */
++#ifndef CONFIG_COLDFIRE
+ static inline void start_thread(struct pt_regs * regs, unsigned long pc,
+ unsigned long usp)
+ {
+@@ -91,6 +113,23 @@ static inline void start_thread(struct p
+ regs->sr &= ~0x2000;
+ wrusp(usp);
+ }
++#else
++/*
++ * Do necessary setup to start up a newly executed thread.
++ *
++ * pass the data segment into user programs if it exists,
++ * it can't hurt anything as far as I can tell
++ */
++#define start_thread(_regs, _pc, _usp) \
++do { \
++ set_fs(USER_DS); /* reads from user space */ \
++ (_regs)->pc = (_pc); \
++ if (current->mm) \
++ (_regs)->d5 = current->mm->start_data; \
++ (_regs)->sr &= ~0x2000; \
++ wrusp(_usp); \
++} while (0)
++#endif
+
+ /* Forward declaration, a strange C thing */
+ struct task_struct;
+--- a/arch/m68k/include/asm/ptrace.h
++++ b/arch/m68k/include/asm/ptrace.h
+@@ -39,10 +39,21 @@ struct pt_regs {
+ long orig_d0;
+ long stkadj;
+ #ifdef CONFIG_COLDFIRE
++#if 0
+ unsigned format : 4; /* frame format specifier */
+ unsigned vector : 12; /* vector offset */
+ unsigned short sr;
+ unsigned long pc;
++#endif
++/*FROM BSP*/
++ unsigned long mmuar;
++ unsigned long mmusr;
++ unsigned format : 4; /* frame format specifier */
++ unsigned fs2 : 2;
++ unsigned vector: 8;
++ unsigned fs1 : 2;
++ unsigned short sr;
++ unsigned long pc;
+ #else
+ unsigned short sr;
+ unsigned long pc;
+@@ -71,6 +82,8 @@ struct switch_stack {
+ #define PTRACE_GETFPREGS 14
+ #define PTRACE_SETFPREGS 15
+
++#define PTRACE_GET_THREAD_AREA 25
++
+ #ifdef __KERNEL__
+
+ #ifndef PS_S
+--- a/arch/m68k/include/asm/raw_io.h
++++ b/arch/m68k/include/asm/raw_io.h
+@@ -8,6 +8,10 @@
+ #ifndef _RAW_IO_H
+ #define _RAW_IO_H
+
++#ifdef CONFIG_COLDFIRE
++#include <asm/cf_raw_io.h>
++#else
++
+ #ifdef __KERNEL__
+
+ #include <asm/types.h>
+@@ -60,6 +64,9 @@ extern void __iounmap(void *addr, unsign
+ #define __raw_writew(val,addr) out_be16((addr),(val))
+ #define __raw_writel(val,addr) out_be32((addr),(val))
+
++#define swap_inw(port) in_le16((port))
++#define swap_outw(val,port) out_le16((port),(val))
++
+ static inline void raw_insb(volatile u8 __iomem *port, u8 *buf, unsigned int len)
+ {
+ unsigned int i;
+@@ -344,4 +351,6 @@ static inline void raw_outsw_swapw(volat
+
+ #endif /* __KERNEL__ */
+
++#endif /* CONFIG_COLDFIRE */
++
+ #endif /* _RAW_IO_H */
+--- a/arch/m68k/include/asm/segment.h
++++ b/arch/m68k/include/asm/segment.h
+@@ -29,6 +29,7 @@ typedef struct {
+ * Get/set the SFC/DFC registers for MOVES instructions
+ */
+
++#ifndef CONFIG_COLDFIRE
+ static inline mm_segment_t get_fs(void)
+ {
+ #ifdef CONFIG_MMU
+@@ -56,6 +57,15 @@ static inline void set_fs(mm_segment_t v
+ #endif
+ }
+
++#else /* CONFIG_COLDFIRE */
++
++#include <asm/current.h>
++#define get_fs() (current->thread.fs)
++#define set_fs(val) (current->thread.fs = (val))
++#define get_ds() (KERNEL_DS)
++
++#endif /* CONFIG_COLDFIRE */
++
+ #define segment_eq(a,b) ((a).seg == (b).seg)
+
+ #endif /* __ASSEMBLY__ */
+--- a/arch/m68k/include/asm/setup.h
++++ b/arch/m68k/include/asm/setup.h
+@@ -2,6 +2,7 @@
+ ** asm/setup.h -- Definition of the Linux/m68k setup information
+ **
+ ** Copyright 1992 by Greg Harp
++ * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ **
+ ** This file is subject to the terms and conditions of the GNU General Public
+ ** License. See the file COPYING in the main directory of this archive
+@@ -40,6 +41,7 @@
+ #define MACH_HP300 9
+ #define MACH_Q40 10
+ #define MACH_SUN3X 11
++#define MACH_CFMMU 12
+
+ #define COMMAND_LINE_SIZE 256
+
+@@ -189,6 +191,14 @@ extern unsigned long m68k_machtype;
+ # define MACH_TYPE (MACH_SUN3X)
+ #endif
+
++#if !defined(CONFIG_COLDFIRE)
++# define MACH_IS_COLDFIRE (0)
++#else
++# define CONFIG_COLDFIRE_ONLY
++# define MACH_IS_COLDFIRE (1)
++# define MACH_TYPE (MACH_CFMMU)
++#endif
++
+ #ifndef MACH_TYPE
+ # define MACH_TYPE (m68k_machtype)
+ #endif
+@@ -211,23 +221,31 @@ extern unsigned long m68k_machtype;
+ #define CPUB_68030 1
+ #define CPUB_68040 2
+ #define CPUB_68060 3
++#define CPUB_CFV4E 4
+
+ #define CPU_68020 (1<<CPUB_68020)
+ #define CPU_68030 (1<<CPUB_68030)
+ #define CPU_68040 (1<<CPUB_68040)
+ #define CPU_68060 (1<<CPUB_68060)
++#define CPU_CFV4E (1<<CPUB_CFV4E)
+
+ #define FPUB_68881 0
+ #define FPUB_68882 1
+ #define FPUB_68040 2 /* Internal FPU */
+ #define FPUB_68060 3 /* Internal FPU */
+ #define FPUB_SUNFPA 4 /* Sun-3 FPA */
++#define FPUB_CFV4E 5
+
+ #define FPU_68881 (1<<FPUB_68881)
+ #define FPU_68882 (1<<FPUB_68882)
+ #define FPU_68040 (1<<FPUB_68040)
+ #define FPU_68060 (1<<FPUB_68060)
+ #define FPU_SUNFPA (1<<FPUB_SUNFPA)
++#ifdef CONFIG_M547X_8X
++#define FPU_CFV4E (1<<FPUB_CFV4E)
++#else
++#define FPU_CFV4E 0
++#endif
+
+ #define MMUB_68851 0
+ #define MMUB_68030 1 /* Internal MMU */
+@@ -235,6 +253,7 @@ extern unsigned long m68k_machtype;
+ #define MMUB_68060 3 /* Internal MMU */
+ #define MMUB_APOLLO 4 /* Custom Apollo */
+ #define MMUB_SUN3 5 /* Custom Sun-3 */
++#define MMUB_CFV4E 6
+
+ #define MMU_68851 (1<<MMUB_68851)
+ #define MMU_68030 (1<<MMUB_68030)
+@@ -242,6 +261,7 @@ extern unsigned long m68k_machtype;
+ #define MMU_68060 (1<<MMUB_68060)
+ #define MMU_SUN3 (1<<MMUB_SUN3)
+ #define MMU_APOLLO (1<<MMUB_APOLLO)
++#define MMU_CFV4E (1<<MMUB_CFV4E)
+
+ #ifdef __KERNEL__
+
+@@ -341,6 +361,14 @@ extern int m68k_is040or060;
+ # endif
+ #endif
+
++#if !defined(CONFIG_CFV4E)
++# define CPU_IS_COLDFIRE (0)
++#else
++# define CPU_IS_COLDFIRE (m68k_cputype & CPU_CFV4E)
++# define CPU_IS_CFV4E (m68k_cputype & CPU_CFV4E)
++# define MMU_IS_CFV4E (m68k_mmutype & MMU_CFV4E)
++#endif
++
+ #define CPU_TYPE (m68k_cputype)
+
+ #ifdef CONFIG_M68KFPU_EMU
+@@ -371,6 +399,14 @@ extern int m68k_realnum_memory; /* real
+ extern struct mem_info m68k_memory[NUM_MEMINFO];/* memory description */
+ #endif
+
++#ifdef CONFIG_CFV4E
++#define QCHIP_RESTORE_DIRECTIVE ".chip 547x"
++#define CHIP_RESTORE_DIRECTIVE .chip 547x
++#else
++#define QCHIP_RESTORE_DIRECTIVE ".chip 68k"
++#define CHIP_RESTORE_DIRECTIVE .chip 68k
++#endif
++
+ #endif /* __KERNEL__ */
+
+ #endif /* _M68K_SETUP_H */
+--- a/arch/m68k/include/asm/sigcontext.h
++++ b/arch/m68k/include/asm/sigcontext.h
+@@ -15,9 +15,15 @@ struct sigcontext {
+ unsigned long sc_pc;
+ unsigned short sc_formatvec;
+ #ifndef __uClinux__
++# ifdef __mcoldfire__
++ unsigned long sc_fpregs[2][2]; /* room for two fp registers */
++ unsigned long sc_fpcntl[3];
++ unsigned char sc_fpstate[16+6*8];
++# else
+ unsigned long sc_fpregs[2*3]; /* room for two fp registers */
+ unsigned long sc_fpcntl[3];
+ unsigned char sc_fpstate[216];
++# endif
+ #endif
+ };
+
+--- a/arch/m68k/include/asm/siginfo.h
++++ b/arch/m68k/include/asm/siginfo.h
+@@ -29,7 +29,8 @@ typedef struct siginfo {
+ struct {
+ timer_t _tid; /* timer id */
+ int _overrun; /* overrun count */
+- char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)];
++ char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)
++ + sizeof(__kernel_uid_t)];
+ sigval_t _sigval; /* same as below */
+ int _sys_private; /* not to be passed to user */
+ } _timer;
+@@ -38,18 +39,18 @@ typedef struct siginfo {
+ struct {
+ __kernel_pid_t _pid; /* sender's pid */
+ __kernel_uid_t _uid; /* backwards compatibility */
+- sigval_t _sigval;
+ __kernel_uid32_t _uid32; /* sender's uid */
++ sigval_t _sigval;
+ } _rt;
+
+ /* SIGCHLD */
+ struct {
+ __kernel_pid_t _pid; /* which child */
+ __kernel_uid_t _uid; /* backwards compatibility */
+- int _status; /* exit code */
++ __kernel_uid32_t _uid32; /* sender's uid */
+ clock_t _utime;
+ clock_t _stime;
+- __kernel_uid32_t _uid32; /* sender's uid */
++ int _status; /* exit code */
+ } _sigchld;
+
+ /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
+--- a/arch/m68k/include/asm/signal.h
++++ b/arch/m68k/include/asm/signal.h
+@@ -150,7 +150,8 @@ typedef struct sigaltstack {
+ #ifdef __KERNEL__
+ #include <asm/sigcontext.h>
+
+-#ifndef __uClinux__
++//#ifndef __uClinux__
++#ifndef CONFIG_COLDFIRE /*FIXME Jason*/
+ #define __HAVE_ARCH_SIG_BITOPS
+
+ static inline void sigaddset(sigset_t *set, int _sig)
+@@ -201,7 +202,6 @@ static inline int sigfindinword(unsigned
+
+ struct pt_regs;
+ extern void ptrace_signal_deliver(struct pt_regs *regs, void *cookie);
+-
+ #else
+
+ #undef __HAVE_ARCH_SIG_BITOPS
+--- a/arch/m68k/include/asm/string_mm.h
++++ b/arch/m68k/include/asm/string_mm.h
+@@ -93,6 +93,7 @@ static inline char *strchr(const char *s
+ return (char *)s - 1;
+ }
+
++#ifndef CONFIG_COLDFIRE
+ #define __HAVE_ARCH_STRCMP
+ static inline int strcmp(const char *cs, const char *ct)
+ {
+@@ -110,6 +111,7 @@ static inline int strcmp(const char *cs,
+ : "+a" (cs), "+a" (ct), "=d" (res));
+ return res;
+ }
++#endif
+
+ #define __HAVE_ARCH_MEMSET
+ extern void *memset(void *, int, __kernel_size_t);
+--- a/arch/m68k/include/asm/swab.h
++++ b/arch/m68k/include/asm/swab.h
+@@ -4,7 +4,7 @@
+ #include <linux/types.h>
+ #include <linux/compiler.h>
+
+-#define __SWAB_64_THRU_32__
++/*#define __SWAB_64_THRU_32__
+
+ #if defined (__mcfisaaplus__) || defined (__mcfisac__)
+ static inline __attribute_const__ __u32 __arch_swab32(__u32 val)
+@@ -23,5 +23,29 @@ static inline __attribute_const__ __u32
+ }
+ #define __arch_swab32 __arch_swab32
+ #endif
++*/
++#if defined(__GNUC__)
++#if defined(__mcfisaaplus__) || defined(__mcfisac__)
++static inline __attribute_const__ __u32 ___arch__swab32(__u32 val)
++{
++ __asm__ ("byterev %0" : "=d" (val) : "0" (val));
++ return val;
++}
++#define __arch__swab32(x) ___arch__swab32(x)
++#elif !defined(__mcoldfire__)
++static inline __attribute_const__ __u32 ___arch__swab32(__u32 val)
++{
++ __asm__("rolw #8,%0; swap %0; rolw #8,%0" : "=d" (val) : "0" (val));
++ return val;
++}
++#define __arch__swab32(x) ___arch__swab32(x)
++
++#endif
++#endif
++
++#if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__)
++# define __BYTEORDER_HAS_U64__
++# define __SWAB_64_THRU_32__
++#endif
+
+ #endif /* _M68K_SWAB_H */
+--- a/arch/m68k/include/asm/system_mm.h
++++ b/arch/m68k/include/asm/system_mm.h
+@@ -5,9 +5,24 @@
+ #include <linux/kernel.h>
+ #include <asm/segment.h>
+ #include <asm/entry.h>
++#include <asm/cfcache.h>
+
+ #ifdef __KERNEL__
+
++#ifdef CONFIG_COLDFIRE
++#define FLUSH_BC (0x00040000)
++
++#define finish_arch_switch(prev) do { \
++ unsigned long tmpreg; \
++ asm volatile ( "move.l %2,%0\n" \
++ "orl %1,%0\n" \
++ "movec %0,%%cacr" \
++ : "=&d" (tmpreg) \
++ : "id" (FLUSH_BC), "m" (shadow_cacr)); \
++ } while(0)
++
++#endif
++
+ /*
+ * switch_to(n) should switch tasks to task ptr, first checking that
+ * ptr isn't the current task, in which case it does nothing. This
+@@ -63,16 +78,25 @@ asmlinkage void resume(void);
+ #define smp_read_barrier_depends() ((void)0)
+
+ /* interrupt control.. */
+-#if 0
+-#define local_irq_enable() asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory")
+-#else
+ #include <linux/hardirq.h>
++#ifndef CONFIG_COLDFIRE
+ #define local_irq_enable() ({ \
+ if (MACH_IS_Q40 || !hardirq_count()) \
+ asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory"); \
+ })
+-#endif
+ #define local_irq_disable() asm volatile ("oriw #0x0700,%%sr": : : "memory")
++#else /* CONFIG_COLDFIRE */
++#define local_irq_enable() \
++ asm volatile ("move.w %%sr, %%d0\n\t" \
++ "andil #0xf8ff,%%d0\n\t" \
++ "move.w %%d0, %%sr\n" \
++ : : : "cc", "d0", "memory")
++#define local_irq_disable() \
++ asm volatile ("move %/sr,%%d0\n\t" \
++ "ori.l #0x0700,%%d0\n\t" \
++ "move %%d0,%/sr\n" \
++ : : : "cc", "%d0", "memory")
++#endif
+ #define local_save_flags(x) asm volatile ("movew %%sr,%0":"=d" (x) : : "memory")
+ #define local_irq_restore(x) asm volatile ("movew %0,%%sr": :"d" (x) : "memory")
+
+--- a/arch/m68k/include/asm/thread_info_mm.h
++++ b/arch/m68k/include/asm/thread_info_mm.h
+@@ -10,6 +10,7 @@ struct thread_info {
+ struct exec_domain *exec_domain; /* execution domain */
+ int preempt_count; /* 0 => preemptable, <0 => BUG */
+ __u32 cpu; /* should always be 0 on m68k */
++ unsigned long tp_value;
+ struct restart_block restart_block;
+ };
+
+--- a/arch/m68k/include/asm/tlbflush.h
++++ b/arch/m68k/include/asm/tlbflush.h
+@@ -2,7 +2,7 @@
+ #define _M68K_TLBFLUSH_H
+
+ #ifdef CONFIG_MMU
+-#ifndef CONFIG_SUN3
++#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
+
+ #include <asm/current.h>
+
+@@ -92,7 +92,12 @@ static inline void flush_tlb_kernel_rang
+ flush_tlb_all();
+ }
+
+-#else
++static inline void flush_tlb_pgtables(struct mm_struct *mm,
++ unsigned long start, unsigned long end)
++{
++}
++
++#elif defined(CONFIG_SUN3)
+
+
+ /* Reserved PMEGs. */
+@@ -214,6 +219,15 @@ static inline void flush_tlb_kernel_page
+ sun3_put_segmap (addr & ~(SUN3_PMEG_SIZE - 1), SUN3_INVALID_PMEG);
+ }
+
++static inline void flush_tlb_pgtables(struct mm_struct *mm,
++ unsigned long start, unsigned long end)
++{
++}
++
++#else /* CONFIG_COLDFIRE */
++
++#include <asm/cf_tlbflush.h>
++
+ #endif
+
+ #else /* !CONFIG_MMU */
+--- a/arch/m68k/include/asm/uaccess_mm.h
++++ b/arch/m68k/include/asm/uaccess_mm.h
+@@ -1,6 +1,9 @@
+ #ifndef __M68K_UACCESS_H
+ #define __M68K_UACCESS_H
+
++#ifdef CONFIG_COLDFIRE
++#include <asm/cf_uaccess.h>
++#else
+ /*
+ * User space memory access functions
+ */
+@@ -371,4 +374,5 @@ unsigned long __clear_user(void __user *
+
+ #define strlen_user(str) strnlen_user(str, 32767)
+
++#endif /* CONFIG_COLDFIRE */
+ #endif /* _M68K_UACCESS_H */
+--- a/arch/m68k/include/asm/ucontext.h
++++ b/arch/m68k/include/asm/ucontext.h
+@@ -7,7 +7,11 @@ typedef greg_t gregset_t[NGREG];
+
+ typedef struct fpregset {
+ int f_fpcntl[3];
++#ifdef __mcoldfire__
++ int f_fpregs[8][2];
++#else
+ int f_fpregs[8*3];
++#endif
+ } fpregset_t;
+
+ struct mcontext {
+--- a/arch/m68k/include/asm/unistd.h
++++ b/arch/m68k/include/asm/unistd.h
+@@ -336,10 +336,14 @@
+ #define __NR_pwritev 330
+ #define __NR_rt_tgsigqueueinfo 331
+ #define __NR_perf_counter_open 332
++#define __NR_read_tp 333
++#define __NR_write_tp 334
++#define __NR_atomic_cmpxchg_32 335
++#define __NR_atomic_barrier 336
+
+ #ifdef __KERNEL__
+
+-#define NR_syscalls 333
++#define NR_syscalls 337
+
+ #define __ARCH_WANT_IPC_PARSE_VERSION
+ #define __ARCH_WANT_OLD_READDIR
+--- a/arch/m68k/include/asm/virtconvert.h
++++ b/arch/m68k/include/asm/virtconvert.h
+@@ -1,6 +1,10 @@
+ #ifndef __VIRT_CONVERT__
+ #define __VIRT_CONVERT__
+
++#ifdef CONFIG_COLDFIRE
++#include <asm/cf_virtconvert.h>
++#else
++
+ /*
+ * Macros used for converting between virtual and physical mappings.
+ */
+@@ -46,3 +50,4 @@ static inline void *phys_to_virt(unsigne
+
+ #endif
+ #endif
++#endif
+--- a/arch/m68k/Kconfig
++++ b/arch/m68k/Kconfig
+@@ -12,6 +12,14 @@ config MMU
+ bool
+ default y
+
++config GENERIC_TIME
++ bool "Enable generic timer"
++ default n
++
++config GENERIC_CLOCKEVENTS
++ bool "Enable generic clockevents"
++ default n
++
+ config RWSEM_GENERIC_SPINLOCK
+ bool
+ default y
+@@ -37,7 +45,7 @@ config GENERIC_CALIBRATE_DELAY
+
+ config TIME_LOW_RES
+ bool
+- default y
++ default n
+
+ config GENERIC_IOMAP
+ bool
+@@ -49,7 +57,7 @@ config ARCH_MAY_HAVE_PC_FDC
+ default y
+
+ config NO_IOPORT
+- def_bool y
++ def_bool !(M5445X || M547X_8X)
+
+ config NO_DMA
+ def_bool SUN3
+@@ -107,6 +115,35 @@ config PCMCIA
+ To compile this driver as modules, choose M here: the
+ modules will be called pcmcia_core and ds.
+
++config COLDFIRE
++ bool "ColdFire V4e support"
++ default y
++ select CFV4E
++ help
++ Say Y if you want to build a kernel to run on one of the ColdFire
++ V4e boards.
++
++config CFV4E
++ bool
++ depends on COLDFIRE
++ select MMU_CFV4E if MMU
++ default y
++
++config FPU
++ bool "ColdFire V4e FPU support"
++ default n
++ help
++ This enables support for CFV4E FPU feature.
++
++config MCD_DMA
++ bool "ColdFire MCD DMA support"
++ depends on M547X_8X
++ default y
++ help
++ This enables support for the ColdFire 547x/548x family
++ multichannel DMA support. Many drivers need it.
++ If you want it, say Y
++
+ config AMIGA
+ bool "Amiga support"
+ select MMU_MOTOROLA if MMU
+@@ -124,6 +161,16 @@ config ATARI
+ this kernel on an Atari, say Y here and browse the material
+ available in <file:Documentation/m68k>; otherwise say N.
+
++config PCI
++ bool "PCI bus support"
++ depends on M54455 || M547X_8X
++ default n
++ help
++ Find out whether you have a PCI motherboard. PCI is the name of a
++ bus system, i.e. the way the CPU talks to the other stuff inside
++ your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or
++ VESA. If you have PCI, say Y, otherwise N.
++
+ config MAC
+ bool "Macintosh support"
+ select MMU_MOTOROLA if MMU
+@@ -278,6 +325,118 @@ config M68060
+ If you anticipate running this kernel on a computer with a MC68060
+ processor, say Y. Otherwise, say N.
+
++config M5445X
++ bool "MCF5445x support"
++ depends on COLDFIRE
++ select GENERIC_TIME
++ select USB_EHCI_FSL
++ select HAVE_FSL_USB_DR
++ help
++ This option will add support for the MCF544x processor with mmu.
++
++config M54451
++ bool
++ depends on M5445X
++ default n
++
++config M54455
++ bool
++ depends on M5445X
++ default n
++
++choice
++ prompt "Model"
++ depends on M5445X
++ default M54451EVB
++ config M54451EVB
++ bool "M54451EVB"
++ select M54451
++ config M54455EVB
++ bool "M54455EVB"
++ select M54455
++endchoice
++
++config HAVE_FSL_USB_DR
++ bool
++ default n
++
++config M547X_8X
++ bool "MCF547x/MCF548x support"
++ depends on COLDFIRE
++ help
++ This option will add support for the MCF547x/MCF548x processor with mmu.
++
++config M547X
++ bool
++ depends on M547X_8X
++ default n
++
++config M548X
++ bool
++ depends on M547X_8X
++ default n
++
++choice
++ prompt "Model"
++ depends on M547X_8X
++ default M5485CFE
++
++config M5475AFE
++ bool "MCF5475AFE"
++ select M547X
++config M5475BFE
++ bool "MCF5475BFE"
++ select M547X
++config M5475CFE
++ bool "MCF5475CFE"
++ select M547X
++config M5475DFE
++ bool "MCF5475DFE"
++ select M547X
++config M5475EFE
++ bool "MCF5475EFE"
++ select M547X
++config M5475FFE
++ bool "MCF5475FFE"
++ select M547X
++config M5485AFE
++ bool "MCF5485AFE"
++ select M548X
++config M5485BFE
++ bool "MCF5485BFE"
++ select M548X
++config M5485CFE
++ bool "MCF5485CFE"
++ select M548X
++config M5485DFE
++ bool "MCF5485DFE"
++ select M548X
++config M5485EFE
++ bool "MCF5485EFE"
++ select M548X
++config M5485FFE
++ bool "MCF5485FFE"
++ select M548X
++
++endchoice
++
++
++config MCFCLK
++ int
++ default 240000000 if M54451EVB
++ default 266666666 if M54455EVB
++ default 266000000 if M547X
++ default 200000000 if M548X
++ help
++ Coldfire System clock.
++
++config MCF_USER_HALT
++ bool "Coldfire User Halt Enable"
++ depends on M5445X || M547X_8X
++ default n
++ help
++ Enables the HALT instruction in User Mode.
++
+ config MMU_MOTOROLA
+ bool
+
+@@ -285,6 +444,70 @@ config MMU_SUN3
+ bool
+ depends on MMU && !MMU_MOTOROLA
+
++config MMU_CFV4E
++ bool
++
++config SDRAM_BASE
++ hex
++ depends on COLDFIRE
++ default 0x40000000 if M5445X
++ default 0x00000000 if M547X_8X
++
++config SDRAM_SIZE
++ hex
++ depends on COLDFIRE
++ default 0x08000000 if M54451EVB
++ default 0x10000000 if M54455EVB
++ default 0x04000000 if M547X_8X
++
++config NOR_FLASH_BASE
++ hex "NOR Flash Base Address"
++ depends on COLDFIRE
++ default 0x00000000 if M54451EVB
++ default 0x00000000 if M54455EVB
++ default 0xE0000000 if M547X_8X
++
++config DMA_BASE
++ hex
++ depends on COLDFIRE
++ default 0xef000000
++
++config DMA_SIZE
++ hex
++ depends on COLDFIRE
++ default 0x1000000 if M5445X
++ default 0x800000 if M547X_8X
++
++config SRAM
++ bool "SRAM allocation APIs support on mcfv4 platform"
++ depends on COLDFIRE && M5445X
++ default y
++ select GENERIC_ALLOCATOR
++
++config SRAM_BASE
++ hex
++ depends on COLDFIRE && SRAM
++ default 0x8ff00000 if M5445X
++
++config SRAM_SIZE
++ hex
++ depends on COLDFIRE && SRAM
++ default 0x8000 if M5445X
++
++config SRAM_ALLOC_GRANULARITY
++ hex
++ depends on SRAM
++ default 0x200 if M5445X
++
++config VDSO
++ bool "Support VDSO page"
++ depends on MMU
++ default n
++ help
++ This will enable support for the kernel mapping a vDSO page
++ in process space, and subsequently handing down the entry point
++ to the libc through the ELF auxiliary vector.
++
+ config M68KFPU_EMU
+ bool "Math emulation support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+@@ -451,6 +674,14 @@ config ZONE_DMA
+ source "drivers/pci/Kconfig"
+
+ source "drivers/zorro/Kconfig"
++endmenu
++
++menu "Power management options"
++
++config PM
++ bool "Power Management support"
++ help
++ Support processor power management modes
+
+ endmenu
+
+@@ -589,7 +820,7 @@ config DN_SERIAL
+
+ config SERIAL_CONSOLE
+ bool "Support for serial port console"
+- depends on (AMIGA || ATARI || MAC || SUN3 || SUN3X || VME || APOLLO) && (ATARI_MFPSER=y || ATARI_MIDI=y || MAC_SCC=y || AMIGA_BUILTIN_SERIAL=y || GVPIOEXT=y || MULTIFACE_III_TTY=y || SERIAL=y || MVME147_SCC || SERIAL167 || MVME162_SCC || BVME6000_SCC || DN_SERIAL)
++ depends on (AMIGA || ATARI || MAC || SUN3 || SUN3X || VME || APOLLO || COLDFIRE) && (ATARI_MFPSER=y || ATARI_MIDI=y || MAC_SCC=y || AMIGA_BUILTIN_SERIAL=y || GVPIOEXT=y || MULTIFACE_III_TTY=y || SERIAL=y || MVME147_SCC || SERIAL167 || MVME162_SCC || BVME6000_SCC || DN_SERIAL || SERIAL_COLDFIRE)
+ ---help---
+ If you say Y here, it will be possible to use a serial port as the
+ system console (the system console is the device which receives all
+@@ -612,6 +843,8 @@ config SERIAL_CONSOLE
+
+ endmenu
+
++source "kernel/time/Kconfig"
++
+ source "fs/Kconfig"
+
+ source "arch/m68k/Kconfig.debug"
+--- a/arch/m68k/kernel/asm-offsets.c
++++ b/arch/m68k/kernel/asm-offsets.c
+@@ -2,6 +2,11 @@
+ * This program is used to generate definitions needed by
+ * assembly language modules.
+ *
++ * Copyright Freescale Semiconductor, Inc. 2008-2009
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
++ * Add Codlfire support
++ *
+ * We use the technique used in the OSF Mach kernel code:
+ * generate asm statements containing #defines,
+ * compile this file to assembler, and then extract the
+@@ -56,8 +61,15 @@ int main(void)
+ DEFINE(PT_A2, offsetof(struct pt_regs, a2));
+ DEFINE(PT_PC, offsetof(struct pt_regs, pc));
+ DEFINE(PT_SR, offsetof(struct pt_regs, sr));
++#ifdef CONFIG_COLDFIRE
++ /* Need to get the context out of struct mm for ASID setting */
++ DEFINE(MM_CONTEXT, offsetof(struct mm_struct, context));
++ /* Coldfire exception frame has vector *before* pc */
++ DEFINE(PT_VECTOR, offsetof(struct pt_regs, pc) - 4);
++#else
+ /* bitfields are a bit difficult */
+ DEFINE(PT_VECTOR, offsetof(struct pt_regs, pc) + 4);
++#endif
+
+ /* offsets into the irq_handler struct */
+ DEFINE(IRQ_HANDLER, offsetof(struct irq_node, handler));
+--- a/arch/m68k/kernel/dma.c
++++ b/arch/m68k/kernel/dma.c
+@@ -1,4 +1,7 @@
+ /*
++ * Copyright Freescale Semiconductor, Inc. 2008, 2009
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+@@ -11,12 +14,24 @@
+ #include <linux/kernel.h>
+ #include <linux/scatterlist.h>
+ #include <linux/vmalloc.h>
+-
++#include <linux/pci.h>
+ #include <asm/pgalloc.h>
+
+ void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t flag)
+ {
++#if defined(CONFIG_M5445X) | defined(CONFIG_M547X_8X)
++ /*
++ * On the M5445x platform the memory allocated with GFP_DMA
++ * is guaranteed to be DMA'able.
++ */
++ void *addr;
++
++ size = PAGE_ALIGN(size);
++ addr = kmalloc(size, GFP_DMA);
++ *handle = virt_to_phys(addr);
++ return addr;
++#else
+ struct page *page, **map;
+ pgprot_t pgprot;
+ void *addr;
+@@ -55,6 +70,7 @@ void *dma_alloc_coherent(struct device *
+ kfree(map);
+
+ return addr;
++#endif
+ }
+ EXPORT_SYMBOL(dma_alloc_coherent);
+
+@@ -62,7 +78,11 @@ void dma_free_coherent(struct device *de
+ void *addr, dma_addr_t handle)
+ {
+ pr_debug("dma_free_coherent: %p, %x\n", addr, handle);
++#if defined(CONFIG_M5445X) | defined(CONFIG_M547X_8X)
++ kfree(addr);
++#else
+ vfree(addr);
++#endif
+ }
+ EXPORT_SYMBOL(dma_free_coherent);
+
+@@ -88,9 +108,16 @@ void dma_sync_sg_for_device(struct devic
+ enum dma_data_direction dir)
+ {
+ int i;
++#ifdef CONFIG_COLDFIRE
++ struct scatterlist *_sg;
+
++ for_each_sg(sg, _sg, nents, i)
++ dma_sync_single_for_device(dev, _sg->dma_address,
++ _sg->length, dir);
++#else
+ for (i = 0; i < nents; sg++, i++)
+ dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir);
++#endif
+ }
+ EXPORT_SYMBOL(dma_sync_sg_for_device);
+
+@@ -119,10 +146,19 @@ int dma_map_sg(struct device *dev, struc
+ enum dma_data_direction dir)
+ {
+ int i;
+-
++#ifdef CONFIG_COLDFIRE
++ struct scatterlist *_sg;
++#endif
++#ifndef CONFIG_COLDFIRE
+ for (i = 0; i < nents; sg++, i++) {
+ sg->dma_address = sg_phys(sg);
+ dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir);
++#else
++ for_each_sg(sg, _sg, nents, i) {
++ _sg->dma_address = sg_phys(_sg);
++ dma_sync_single_for_device(dev, _sg->dma_address,
++ _sg->length, dir);
++#endif
+ }
+ return nents;
+ }
+--- a/arch/m68k/kernel/Makefile
++++ b/arch/m68k/kernel/Makefile
+@@ -2,16 +2,26 @@
+ # Makefile for the linux kernel.
+ #
+
+-ifndef CONFIG_SUN3
+- extra-y := head.o
++ifdef CONFIG_SUN3
++ extra-y := sun3-head.o vmlinux.lds
++ obj-y := entry.o signal.o ints.o time.o
+ else
+- extra-y := sun3-head.o
++ifndef CONFIG_COLDFIRE
++ extra-y := head.o vmlinux.lds
++ obj-y := entry.o signal.o traps.o ints.o time.o
++else # CONFIG_COLDFIRE
++ extra-y := vmlinux.lds
++ ifdef CONFIG_M547X_8X
++ obj-$(CONFIG_PCI) += bios32_mcf548x.o
++ endif
++endif
+ endif
+-extra-y += vmlinux.lds
+
+-obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o module.o \
+- sys_m68k.o time.o setup.o m68k_ksyms.o devres.o
++obj-y += process.o ptrace.o module.o \
++ sys_m68k.o setup.o m68k_ksyms.o devres.o# semaphore.o
+
+ devres-y = ../../../kernel/irq/devres.o
+
+ obj-y$(CONFIG_MMU_SUN3) += dma.o # no, it's not a typo
++
++EXTRA_AFLAGS := -traditional
+--- a/arch/m68k/kernel/process.c
++++ b/arch/m68k/kernel/process.c
+@@ -4,6 +4,11 @@
+ * Copyright (C) 1995 Hamish Macdonald
+ *
+ * 68060 fixes by Jesper Skov
++ *
++ * Copyright 2008-2009 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Kurt.Mahan@freescale.com
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
+ */
+
+ /*
+@@ -186,12 +191,21 @@ EXPORT_SYMBOL(kernel_thread);
+ void flush_thread(void)
+ {
+ unsigned long zero = 0;
++#if !defined(CONFIG_COLDFIRE)
+ set_fs(USER_DS);
+ current->thread.fs = __USER_DS;
+ if (!FPU_IS_EMU)
+ asm volatile (".chip 68k/68881\n\t"
+ "frestore %0@\n\t"
+ ".chip 68k" : : "a" (&zero));
++#else
++ set_fs(USER_DS);
++ current->thread.fs = USER_DS;
++#if defined(CONFIG_FPU)
++ if (!FPU_IS_EMU)
++ asm volatile ("frestore %0@\n\t" : : "a" (&zero));
++#endif
++#endif
+ }
+
+ /*
+@@ -251,10 +265,15 @@ int copy_thread(unsigned long clone_flag
+
+ p->thread.usp = usp;
+ p->thread.ksp = (unsigned long)childstack;
++
++ if (clone_flags & CLONE_SETTLS)
++ task_thread_info(p)->tp_value = regs->d5;
++
+ /*
+ * Must save the current SFC/DFC value, NOT the value when
+ * the parent was last descheduled - RGH 10-08-96
+ */
++#if !defined(CONFIG_COLDFIRE)
+ p->thread.fs = get_fs().seg;
+
+ if (!FPU_IS_EMU) {
+@@ -266,9 +285,34 @@ int copy_thread(unsigned long clone_flag
+ "fmoveml %/fpiar/%/fpcr/%/fpsr,%1"
+ : : "m" (p->thread.fp[0]), "m" (p->thread.fpcntl[0])
+ : "memory");
++#else
++ p->thread.fs = get_fs();
++
++#if defined(CONFIG_FPU)
++ if (!FPU_IS_EMU) {
++ /* Copy the current fpu state */
++ asm volatile ("fsave %0" : : "m" (p->thread.fpstate[0])
++ : "memory");
++
++ if (p->thread.fpstate[0]) {
++ asm volatile ("fmovemd %/fp0-%/fp7,%0"
++ : : "m" (p->thread.fp[0])
++ : "memory");
++ asm volatile ("fmovel %/fpiar,%0"
++ : : "m" (p->thread.fpcntl[0])
++ : "memory");
++ asm volatile ("fmovel %/fpcr,%0"
++ : : "m" (p->thread.fpcntl[1])
++ : "memory");
++ asm volatile ("fmovel %/fpsr,%0"
++ : : "m" (p->thread.fpcntl[2])
++ : "memory");
++ }
+ /* Restore the state in case the fpu was busy */
+ asm volatile ("frestore %0" : : "m" (p->thread.fpstate[0]));
+ }
++#endif
++#endif
+
+ return 0;
+ }
+@@ -277,7 +321,9 @@ int copy_thread(unsigned long clone_flag
+
+ int dump_fpu (struct pt_regs *regs, struct user_m68kfp_struct *fpu)
+ {
++#if !defined(CONFIG_COLDFIRE) || defined(CONFIG_FPU)
+ char fpustate[216];
++#endif
+
+ if (FPU_IS_EMU) {
+ int i;
+@@ -294,6 +340,7 @@ int dump_fpu (struct pt_regs *regs, stru
+ }
+
+ /* First dump the fpu context to avoid protocol violation. */
++#if !defined(CONFIG_COLDFIRE)
+ asm volatile ("fsave %0" :: "m" (fpustate[0]) : "memory");
+ if (!CPU_IS_060 ? !fpustate[0] : !fpustate[2])
+ return 0;
+@@ -304,6 +351,25 @@ int dump_fpu (struct pt_regs *regs, stru
+ asm volatile ("fmovemx %/fp0-%/fp7,%0"
+ :: "m" (fpu->fpregs[0])
+ : "memory");
++#elif defined(CONFIG_FPU)
++ asm volatile ("fsave %0" :: "m" (fpustate[0]) : "memory");
++ if (!CPU_IS_060 ? !fpustate[0] : !fpustate[2])
++ return 0;
++
++ asm volatile ("fmovel %/fpiar,%0"
++ : : "m" (fpu->fpcntl[0])
++ : "memory");
++ asm volatile ("fmovel %/fpcr,%0"
++ : : "m" (fpu->fpcntl[1])
++ : "memory");
++ asm volatile ("fmovel %/fpsr,%0"
++ : : "m" (fpu->fpcntl[2])
++ : "memory");
++ asm volatile ("fmovemd %/fp0-%/fp7,%0"
++ : : "m" (fpu->fpregs[0])
++ : "memory");
++#endif
++
+ return 1;
+ }
+ EXPORT_SYMBOL(dump_fpu);
+--- a/arch/m68k/kernel/ptrace.c
++++ b/arch/m68k/kernel/ptrace.c
+@@ -265,6 +265,11 @@ long arch_ptrace(struct task_struct *chi
+ ret = -EFAULT;
+ break;
+
++ case PTRACE_GET_THREAD_AREA:
++ ret = put_user(task_thread_info(child)->tp_value,
++ (unsigned long __user *) data);
++ break;
++
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
+--- a/arch/m68k/kernel/setup.c
++++ b/arch/m68k/kernel/setup.c
+@@ -2,6 +2,9 @@
+ * linux/arch/m68k/kernel/setup.c
+ *
+ * Copyright (C) 1995 Hamish Macdonald
++ * Copyright Freescale Semiconductor, Inc. 2008, 2009
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
+ */
+
+ /*
+@@ -75,13 +78,24 @@ EXPORT_SYMBOL(m68k_memory);
+
+ struct mem_info m68k_ramdisk;
+
++#if !defined(CONFIG_COLDFIRE)
+ static char m68k_command_line[CL_SIZE];
++#else
++char m68k_command_line[CL_SIZE];
++unsigned long uboot_info_stk;
++EXPORT_SYMBOL(uboot_info_stk);
++#endif
+
+ void (*mach_sched_init) (irq_handler_t handler) __initdata = NULL;
+ /* machine dependent irq functions */
+ void (*mach_init_IRQ) (void) __initdata = NULL;
+ void (*mach_get_model) (char *model);
+ void (*mach_get_hardware_list) (struct seq_file *m);
++
++#ifdef CONFIG_COLDFIRE
++void (*mach_tick)(void);
++#endif
++
+ /* machine dependent timer functions */
+ unsigned long (*mach_gettimeoffset) (void);
+ int (*mach_hwclk) (int, struct rtc_time*);
+@@ -137,13 +151,17 @@ extern void config_hp300(void);
+ extern void config_q40(void);
+ extern void config_sun3x(void);
+
++#ifdef CONFIG_COLDFIRE
++void coldfire_sort_memrec(void);
++#endif
++
+ #define MASK_256K 0xfffc0000
+
+ extern void paging_init(void);
+
+ static void __init m68k_parse_bootinfo(const struct bi_record *record)
+ {
+- while (record->tag != BI_LAST) {
++ while ((record->tag != BI_LAST)) {
+ int unknown = 0;
+ const unsigned long *data = record->data;
+
+@@ -203,6 +221,10 @@ static void __init m68k_parse_bootinfo(c
+ record->size);
+ }
+
++#ifdef CONFIG_COLDFIRE
++ coldfire_sort_memrec();
++#endif
++
+ m68k_realnum_memory = m68k_num_memory;
+ #ifdef CONFIG_SINGLE_MEMORY_CHUNK
+ if (m68k_num_memory > 1) {
+@@ -215,8 +237,11 @@ static void __init m68k_parse_bootinfo(c
+
+ void __init setup_arch(char **cmdline_p)
+ {
+- int i;
+
++#if !defined(CONFIG_SUN3)
++ int i;
++#endif
++
+ /* The bootinfo is located right after the kernel bss */
+ m68k_parse_bootinfo((const struct bi_record *)_end);
+
+@@ -230,9 +255,10 @@ void __init setup_arch(char **cmdline_p)
+ * We should really do our own FPU check at startup.
+ * [what do we do with buggy 68LC040s? if we have problems
+ * with them, we should add a test to check_bugs() below] */
+-#ifndef CONFIG_M68KFPU_EMU_ONLY
++#if !defined(CONFIG_M68KFPU_EMU_ONLY) && defined(CONFIG_FPU)
+ /* clear the fpu if we have one */
+- if (m68k_fputype & (FPU_68881|FPU_68882|FPU_68040|FPU_68060)) {
++ if (m68k_fputype & (FPU_68881|FPU_68882|FPU_68040|FPU_68060|
++ FPU_CFV4E)) {
+ volatile int zero = 0;
+ asm volatile ("frestore %0" : : "m" (zero));
+ }
+@@ -320,13 +346,18 @@ void __init setup_arch(char **cmdline_p)
+ config_sun3x();
+ break;
+ #endif
++#ifdef CONFIG_COLDFIRE
++ case MACH_CFMMU:
++ config_coldfire();
++ break;
++#endif
+ default:
+ panic("No configuration setup");
+ }
+
+ paging_init();
+
+-#ifndef CONFIG_SUN3
++#if !defined(CONFIG_SUN3)
+ for (i = 1; i < m68k_num_memory; i++)
+ free_bootmem_node(NODE_DATA(i), m68k_memory[i].addr,
+ m68k_memory[i].size);
+@@ -353,6 +384,10 @@ void __init setup_arch(char **cmdline_p)
+
+ #endif /* !CONFIG_SUN3 */
+
++#ifdef CONFIG_COLDFIRE
++ mmu_context_init();
++#endif
++
+ /* set ISA defs early as possible */
+ #if defined(CONFIG_ISA) && defined(MULTI_ISA)
+ if (MACH_IS_Q40) {
+@@ -383,6 +418,7 @@ static int show_cpuinfo(struct seq_file
+ #define LOOP_CYCLES_68030 (8)
+ #define LOOP_CYCLES_68040 (3)
+ #define LOOP_CYCLES_68060 (1)
++#define LOOP_CYCLES_COLDFIRE (2)
+
+ if (CPU_IS_020) {
+ cpu = "68020";
+@@ -396,6 +432,9 @@ static int show_cpuinfo(struct seq_file
+ } else if (CPU_IS_060) {
+ cpu = "68060";
+ clockfactor = LOOP_CYCLES_68060;
++ } else if (CPU_IS_CFV4E) {
++ cpu = "ColdFire V4e";
++ clockfactor = LOOP_CYCLES_COLDFIRE;
+ } else {
+ cpu = "680x0";
+ clockfactor = 0;
+@@ -414,6 +453,8 @@ static int show_cpuinfo(struct seq_file
+ fpu = "68060";
+ else if (m68k_fputype & FPU_SUNFPA)
+ fpu = "Sun FPA";
++ else if (m68k_fputype & FPU_CFV4E)
++ fpu = "ColdFire V4e";
+ else
+ fpu = "none";
+ #endif
+@@ -430,6 +471,8 @@ static int show_cpuinfo(struct seq_file
+ mmu = "Sun-3";
+ else if (m68k_mmutype & MMU_APOLLO)
+ mmu = "Apollo";
++ else if (m68k_mmutype & MMU_CFV4E)
++ mmu = "ColdFire";
+ else
+ mmu = "unknown";
+
+@@ -512,7 +555,7 @@ module_init(proc_hardware_init);
+
+ void check_bugs(void)
+ {
+-#ifndef CONFIG_M68KFPU_EMU
++#if !defined(CONFIG_M68KFPU_EMU) && !defined(CONFIG_M5445X)
+ if (m68k_fputype == 0) {
+ printk(KERN_EMERG "*** YOU DO NOT HAVE A FLOATING POINT UNIT, "
+ "WHICH IS REQUIRED BY LINUX/M68K ***\n");
+--- a/arch/m68k/kernel/sys_m68k.c
++++ b/arch/m68k/kernel/sys_m68k.c
+@@ -1,5 +1,8 @@
+ /*
+ * linux/arch/m68k/kernel/sys_m68k.c
++ * Copyright Freescale Semiconductor, Inc. 2008-2009
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
+ *
+ * This file contains various random system calls that
+ * have a non-standard calling sequence on the Linux/m68k
+@@ -29,6 +32,14 @@
+ #include <asm/traps.h>
+ #include <asm/page.h>
+ #include <asm/unistd.h>
++#include <linux/elf.h>
++#include <asm/tlb.h>
++#ifdef CONFIG_COLDFIRE
++#include <asm/cacheflush.h>
++#endif
++
++asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
++ unsigned long error_code);
+
+ /* common code for old and new mmaps */
+ static inline long do_mmap2(
+@@ -240,6 +251,7 @@ asmlinkage int sys_ipc (uint call, int f
+ return -EINVAL;
+ }
+
++#ifndef CONFIG_COLDFIRE
+ /* Convert virtual (user) address VADDR to physical address PADDR */
+ #define virt_to_phys_040(vaddr) \
+ ({ \
+@@ -563,6 +575,7 @@ cache_flush_060 (unsigned long addr, int
+ }
+ return 0;
+ }
++#endif /* CONFIG_COLDFIRE */
+
+ /* sys_cacheflush -- flush (part of) the processor cache. */
+ asmlinkage int
+@@ -595,6 +608,7 @@ sys_cacheflush (unsigned long addr, int
+ goto out;
+ }
+
++#ifndef CONFIG_COLDFIRE
+ if (CPU_IS_020_OR_030) {
+ if (scope == FLUSH_SCOPE_LINE && len < 256) {
+ unsigned long cacr;
+@@ -639,6 +653,16 @@ sys_cacheflush (unsigned long addr, int
+ ret = cache_flush_060 (addr, scope, cache, len);
+ }
+ }
++#else /* CONFIG_COLDFIRE */
++ if ((cache & FLUSH_CACHE_INSN) && (cache & FLUSH_CACHE_DATA))
++ flush_bcache();
++ else if (cache & FLUSH_CACHE_INSN)
++ flush_icache();
++ else
++ flush_dcache();
++
++ ret = 0;
++#endif /* CONFIG_COLDFIRE */
+ out:
+ unlock_kernel();
+ return ret;
+@@ -663,3 +687,79 @@ int kernel_execve(const char *filename,
+ : "d" (__a), "d" (__b), "d" (__c));
+ return __res;
+ }
++
++asmlinkage unsigned long
++sys_read_tp(void)
++{
++ return current_thread_info()->tp_value;
++}
++
++asmlinkage int
++sys_write_tp(unsigned long tp)
++{
++ current_thread_info()->tp_value = tp;
++ return 0;
++}
++
++/* This syscall gets its arguments in A0 (mem), D2 (oldval) and
++ D1 (newval). */
++asmlinkage int
++sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
++ unsigned long __user *mem)
++{
++ /* This was borrowed from ARM's implementation. */
++ for (;;) {
++ struct mm_struct *mm = current->mm;
++ pgd_t *pgd; pmd_t *pmd; pte_t *pte;
++ spinlock_t *ptl;
++ unsigned long mem_value;
++
++ down_read(&mm->mmap_sem);
++ pgd = pgd_offset(mm, (unsigned long)mem);
++ if (!pgd_present(*pgd))
++ goto bad_access;
++ pmd = pmd_offset(pgd, (unsigned long)mem);
++ if (!pmd_present(*pmd))
++ goto bad_access;
++ pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl);
++ if (!pte_present(*pte) || !pte_dirty(*pte)) {
++ pte_unmap_unlock(pte, ptl);
++ goto bad_access;
++ }
++
++ mem_value = *mem;
++ if (mem_value == oldval)
++ *mem = newval;
++
++ pte_unmap_unlock(pte, ptl);
++ up_read(&mm->mmap_sem);
++ return mem_value;
++
++bad_access:
++ up_read(&mm->mmap_sem);
++ /* This is not necessarily a bad access, we can get here if
++ a memory we're trying to write to should be copied-on-write.
++ Make the kernel do the necessary page stuff, then re-iterate.
++ Simulate a write access fault to do that. */
++ {
++ /* The first argument of the function corresponds to
++ D1, which is the first field of struct pt_regs. */
++ struct pt_regs *fp = (struct pt_regs *)&newval;
++
++ /* '3' is an RMW flag. */
++ if (do_page_fault(fp, (unsigned long)mem, 3))
++ /* If the do_page_fault() failed, we don't
++ have anything meaningful to return.
++ There should be a SIGSEGV pending for
++ the process. */
++ return 0xdeadbeef;
++ }
++ }
++}
++
++asmlinkage int
++sys_atomic_barrier(void)
++{
++ /* no code needed for uniprocs */
++ return 0;
++}
+--- a/arch/m68k/kernel/time.c
++++ b/arch/m68k/kernel/time.c
+@@ -2,6 +2,9 @@
+ * linux/arch/m68k/kernel/time.c
+ *
+ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
++ * Copyright Freescale Semiconductor, Inc. 2008-2009
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
+ *
+ * This file contains the m68k-specific time handling details.
+ * Most of the stuff is located in the machine specific files.
+@@ -41,6 +44,11 @@ static inline int set_rtc_mmss(unsigned
+ */
+ static irqreturn_t timer_interrupt(int irq, void *dummy)
+ {
++#ifdef CONFIG_COLDFIRE
++ /* kick hardware timer if necessary */
++ if (mach_tick)
++ mach_tick();
++#endif
+ do_timer(1);
+ #ifndef CONFIG_SMP
+ update_process_times(user_mode(get_irq_regs()));
+--- a/arch/m68k/kernel/vmlinux.lds.S
++++ b/arch/m68k/kernel/vmlinux.lds.S
+@@ -1,10 +1,13 @@
+ PHDRS
+ {
+- text PT_LOAD FILEHDR PHDRS FLAGS (7);
++ headers PT_PHDR PHDRS ;
++ text PT_LOAD FILEHDR PHDRS FLAGS (5);
+ data PT_LOAD FLAGS (7);
+ }
+ #ifdef CONFIG_SUN3
+ #include "vmlinux-sun3.lds"
++#elif CONFIG_COLDFIRE
++#include "vmlinux-cf.lds"
+ #else
+ #include "vmlinux-std.lds"
+ #endif
+--- a/arch/m68k/lib/checksum.c
++++ b/arch/m68k/lib/checksum.c
+@@ -30,6 +30,10 @@
+ * 1998/8/31 Andreas Schwab:
+ * Zero out rest of buffer on exception in
+ * csum_partial_copy_from_user.
++ *
++ * Copyright Freescale Semiconductor, Inc. 2008-2009
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
+ */
+
+ #include <linux/module.h>
+@@ -39,8 +43,131 @@
+ * computes a partial checksum, e.g. for TCP/UDP fragments
+ */
+
++#ifdef CONFIG_COLDFIRE
++
++static inline unsigned short from32to16(unsigned long x)
++{
++ /* add up 16-bit and 16-bit for 16+c bit */
++ x = (x & 0xffff) + (x >> 16);
++ /* add up carry.. */
++ x = (x & 0xffff) + (x >> 16);
++ return x;
++}
++
++static unsigned long do_csum(const unsigned char *buff, int len)
++{
++ int odd, count;
++ unsigned long result = 0;
++
++ if (len <= 0)
++ goto out;
++ odd = 1 & (unsigned long) buff;
++ if (odd) {
++ result = *buff;
++ len--;
++ buff++;
++ }
++ count = len >> 1; /* nr of 16-bit words.. */
++ if (count) {
++ if (2 & (unsigned long) buff) {
++ result += *(unsigned short *) buff;
++ count--;
++ len -= 2;
++ buff += 2;
++ }
++ count >>= 1; /* nr of 32-bit words.. */
++ if (count) {
++ unsigned long carry = 0;
++ do {
++ unsigned long w = *(unsigned long *) buff;
++ count--;
++ buff += 4;
++ result += carry;
++ result += w;
++ carry = (w > result);
++ } while (count);
++ result += carry;
++ result = (result & 0xffff) + (result >> 16);
++ }
++ if (len & 2) {
++ result += *(unsigned short *) buff;
++ buff += 2;
++ }
++ }
++ if (len & 1)
++ result += (*buff << 8);
++ result = from32to16(result);
++ if (odd)
++ result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
++out:
++ return result;
++}
++
++/*
++ * This is a version of ip_compute_csum() optimized for IP headers,
++ * which always checksum on 4 octet boundaries.
++ */
++__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
++{
++ return ~do_csum(iph, ihl*4);
++}
++EXPORT_SYMBOL(ip_fast_csum);
++
++/*
++ * computes the checksum of a memory block at buff, length len,
++ * and adds in "sum" (32-bit)
++ *
++ * returns a 32-bit number suitable for feeding into itself
++ * or csum_tcpudp_magic
++ *
++ * this function must be called with even lengths, except
++ * for the last fragment, which may be odd
++ *
++ * it's best to have buff aligned on a 32-bit boundary
++ */
+ __wsum csum_partial(const void *buff, int len, __wsum sum)
+ {
++ unsigned int result = do_csum(buff, len);
++
++ /* add in old sum, and carry.. */
++ result += sum;
++ if (sum > result)
++ result += 1;
++ return result;
++}
++EXPORT_SYMBOL(csum_partial);
++
++/*
++ * copy from fs while checksumming, otherwise like csum_partial
++ */
++
++__wsum
++csum_partial_copy_from_user(const void __user *src, void *dst, int len,
++ __wsum sum, int *csum_err)
++{
++ if (csum_err) *csum_err = 0;
++ memcpy(dst, src, len);
++ return csum_partial(dst, len, sum);
++}
++EXPORT_SYMBOL(csum_partial_copy_from_user);
++
++/*
++ * copy from ds while checksumming, otherwise like csum_partial
++ */
++
++__wsum
++csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
++{
++ memcpy(dst, src, len);
++ return csum_partial(dst, len, sum);
++}
++EXPORT_SYMBOL(csum_partial_copy_nocheck);
++
++#else /* !CONFIG_COLDFIRE */
++
++unsigned int
++csum_partial(const unsigned char *buff, int len, unsigned int sum)
++{
+ unsigned long tmp1, tmp2;
+ /*
+ * Experiments with ethernet and slip connections show that buff
+@@ -423,3 +550,4 @@ csum_partial_copy_nocheck(const void *sr
+ return(sum);
+ }
+ EXPORT_SYMBOL(csum_partial_copy_nocheck);
++#endif /* CONFIG_COLDFIRE */
+--- a/arch/m68k/lib/muldi3.c
++++ b/arch/m68k/lib/muldi3.c
+@@ -1,6 +1,9 @@
+ /* muldi3.c extracted from gcc-2.7.2.3/libgcc2.c and
+ gcc-2.7.2.3/longlong.h which is: */
+ /* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
++ Copyright Freescale Semiconductor, Inc. 2008-2009
++ Jason Jin Jason.Jin@freescale.com
++ Shrek Wu B16972@freescale.com
+
+ This file is part of GNU CC.
+
+@@ -21,12 +24,22 @@ Boston, MA 02111-1307, USA. */
+
+ #define BITS_PER_UNIT 8
+
++#ifdef CONFIG_COLDFIRE
++#define umul_ppmm(w1, w0, u, v) \
++ do { \
++ unsigned long long x; \
++ x = (unsigned long long)u * v; \
++ w0 = (unsigned long)(x & 0x00000000ffffffff); \
++ w1 = (unsigned long)(x & 0xffffffff00000000) >> 32; \
++ } while (0)
++#else /* CONFIG_COLDFIRE */
+ #define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("mulu%.l %3,%1:%0" \
+ : "=d" ((USItype)(w0)), \
+ "=d" ((USItype)(w1)) \
+ : "%0" ((USItype)(u)), \
+ "dmi" ((USItype)(v)))
++#endif /* CONFIG_COLDFIRE */
+
+ #define __umulsidi3(u, v) \
+ ({DIunion __w; \
+--- a/arch/m68k/lib/string.c
++++ b/arch/m68k/lib/string.c
+@@ -1,4 +1,8 @@
+ /*
++ * Copyright Freescale Semiconductor, Inc. 2008-2009
++ * * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
++ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+@@ -21,6 +25,7 @@ char *strcat(char *dest, const char *src
+ }
+ EXPORT_SYMBOL(strcat);
+
++#ifndef CONFIG_COLDFIRE
+ void *memset(void *s, int c, size_t count)
+ {
+ void *xs = s;
+@@ -149,6 +154,69 @@ void *memcpy(void *to, const void *from,
+ }
+ EXPORT_SYMBOL(memcpy);
+
++#else /* CONFIG_COLDFIRE */
++
++void *memset(void *s, int c, size_t count)
++{
++ unsigned long x;
++ void *originalTo = s;
++
++ for (x = 0; x < count; x++)
++ *(unsigned char *)s++ = (unsigned char)c;
++
++ return originalTo;
++}
++EXPORT_SYMBOL(memset);
++
++void *memcpy(void *to, const void *from, size_t n)
++{
++ void *xto = to;
++ size_t temp;
++
++ if (!n)
++ return xto;
++ if ((long) to & 1) {
++ char *cto = to;
++ const char *cfrom = from;
++ *cto++ = *cfrom++;
++ to = cto;
++ from = cfrom;
++ n--;
++ }
++ if (n > 2 && (long) to & 2) {
++ short *sto = to;
++ const short *sfrom = from;
++ *sto++ = *sfrom++;
++ to = sto;
++ from = sfrom;
++ n -= 2;
++ }
++ temp = n >> 2;
++ if (temp) {
++ long *lto = to;
++ const long *lfrom = from;
++ for (; temp; temp--)
++ *lto++ = *lfrom++;
++ to = lto;
++ from = lfrom;
++ }
++ if (n & 2) {
++ short *sto = to;
++ const short *sfrom = from;
++ *sto++ = *sfrom++;
++ to = sto;
++ from = sfrom;
++ }
++ if (n & 1) {
++ char *cto = to;
++ const char *cfrom = from;
++ *cto = *cfrom;
++ }
++ return xto;
++}
++EXPORT_SYMBOL(memcpy);
++#endif /* CONFIG_COLDFIRE */
++
+ void *memmove(void *dest, const void *src, size_t n)
+ {
+ void *xdest = dest;
+--- a/arch/m68k/lib/uaccess.c
++++ b/arch/m68k/lib/uaccess.c
+@@ -1,10 +1,15 @@
+ /*
++ * Copyright Freescale Semiconductor, Inc. 2008-2009
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
++ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+ #include <linux/module.h>
++#ifndef CONFIG_COLDFIRE
+ #include <asm/uaccess.h>
+
+ unsigned long __generic_copy_from_user(void *to, const void __user *from,
+@@ -220,3 +225,244 @@ unsigned long __clear_user(void __user *
+ return res;
+ }
+ EXPORT_SYMBOL(__clear_user);
++
++#else /* CONFIG_COLDFIRE */
++
++#include <asm/cf_uaccess.h>
++
++unsigned long __generic_copy_from_user(void *to, const void *from,
++ unsigned long n)
++{
++ unsigned long tmp;
++ __asm__ __volatile__
++ (" tstl %2\n"
++ " jeq 2f\n"
++ "1: movel (%1)+,%3\n"
++ " movel %3,(%0)+\n"
++ " subql #1,%2\n"
++ " jne 1b\n"
++ "2: movel %4,%2\n"
++ " bclr #1,%2\n"
++ " jeq 4f\n"
++ "3: movew (%1)+,%3\n"
++ " movew %3,(%0)+\n"
++ "4: bclr #0,%2\n"
++ " jeq 6f\n"
++ "5: moveb (%1)+,%3\n"
++ " moveb %3,(%0)+\n"
++ "6:\n"
++ ".section .fixup,\"ax\"\n"
++ " .even\n"
++ "7: movel %2,%%d0\n"
++ "71:clrl (%0)+\n"
++ " subql #1,%%d0\n"
++ " jne 71b\n"
++ " lsll #2,%2\n"
++ " addl %4,%2\n"
++ " btst #1,%4\n"
++ " jne 81f\n"
++ " btst #0,%4\n"
++ " jne 91f\n"
++ " jra 6b\n"
++ "8: addql #2,%2\n"
++ "81:clrw (%0)+\n"
++ " btst #0,%4\n"
++ " jne 91f\n"
++ " jra 6b\n"
++ "9: addql #1,%2\n"
++ "91:clrb (%0)+\n"
++ " jra 6b\n"
++ ".previous\n"
++ ".section __ex_table,\"a\"\n"
++ " .align 4\n"
++ " .long 1b,7b\n"
++ " .long 3b,8b\n"
++ " .long 5b,9b\n"
++ ".previous"
++ : "=a"(to), "=a"(from), "=d"(n), "=&d"(tmp)
++ : "d"(n & 3), "0"(to), "1"(from), "2"(n/4)
++ : "d0", "memory");
++ return n;
++}
++EXPORT_SYMBOL(__generic_copy_from_user);
++
++
++unsigned long __generic_copy_to_user(void *to, const void *from,
++ unsigned long n)
++{
++ unsigned long tmp;
++ __asm__ __volatile__
++ (" tstl %2\n"
++ " jeq 3f\n"
++ "1: movel (%1)+,%3\n"
++ "22:movel %3,(%0)+\n"
++ "2: subql #1,%2\n"
++ " jne 1b\n"
++ "3: movel %4,%2\n"
++ " bclr #1,%2\n"
++ " jeq 4f\n"
++ " movew (%1)+,%3\n"
++ "24:movew %3,(%0)+\n"
++ "4: bclr #0,%2\n"
++ " jeq 5f\n"
++ " moveb (%1)+,%3\n"
++ "25:moveb %3,(%0)+\n"
++ "5:\n"
++ ".section .fixup,\"ax\"\n"
++ " .even\n"
++ "60:addql #1,%2\n"
++ "6: lsll #2,%2\n"
++ " addl %4,%2\n"
++ " jra 5b\n"
++ "7: addql #2,%2\n"
++ " jra 5b\n"
++ "8: addql #1,%2\n"
++ " jra 5b\n"
++ ".previous\n"
++ ".section __ex_table,\"a\"\n"
++ " .align 4\n"
++ " .long 1b,60b\n"
++ " .long 22b,6b\n"
++ " .long 2b,6b\n"
++ " .long 24b,7b\n"
++ " .long 3b,60b\n"
++ " .long 4b,7b\n"
++ " .long 25b,8b\n"
++ " .long 5b,8b\n"
++ ".previous"
++ : "=a"(to), "=a"(from), "=d"(n), "=&d"(tmp)
++ : "r"(n & 3), "0"(to), "1"(from), "2"(n / 4)
++ : "memory");
++ return n;
++}
++EXPORT_SYMBOL(__generic_copy_to_user);
++
++/*
++ * Copy a null terminated string from userspace.
++ */
++
++long strncpy_from_user(char *dst, const char *src, long count)
++{
++ long res = -EFAULT;
++ if (!(access_ok(VERIFY_READ, src, 1))) /* --tym-- */
++ return res;
++ if (count == 0) return count;
++ __asm__ __volatile__
++ ("1: moveb (%2)+,%%d0\n"
++ "12:moveb %%d0,(%1)+\n"
++ " jeq 2f\n"
++ " subql #1,%3\n"
++ " jne 1b\n"
++ "2: subl %3,%0\n"
++ "3:\n"
++ ".section .fixup,\"ax\"\n"
++ " .even\n"
++ "4: movel %4,%0\n"
++ " jra 3b\n"
++ ".previous\n"
++ ".section __ex_table,\"a\"\n"
++ " .align 4\n"
++ " .long 1b,4b\n"
++ " .long 12b,4b\n"
++ ".previous"
++ : "=d"(res), "=a"(dst), "=a"(src), "=d"(count)
++ : "i"(-EFAULT), "0"(count), "1"(dst), "2"(src), "3"(count)
++ : "d0", "memory");
++ return res;
++}
++EXPORT_SYMBOL(strncpy_from_user);
++
++/*
++ * Return the size of a string (including the ending 0)
++ *
++ * Return 0 on exception, a value greater than N if too long
++ */
++long strnlen_user(const char *src, long n)
++{
++ long res = -EFAULT;
++ if (!(access_ok(VERIFY_READ, src, 1))) /* --tym-- */
++ return res;
++
++ res = -(long)src;
++ __asm__ __volatile__
++ ("1:\n"
++ " tstl %2\n"
++ " jeq 3f\n"
++ "2: moveb (%1)+,%%d0\n"
++ "22:\n"
++ " subql #1,%2\n"
++ " tstb %%d0\n"
++ " jne 1b\n"
++ " jra 4f\n"
++ "3:\n"
++ " addql #1,%0\n"
++ "4:\n"
++ " addl %1,%0\n"
++ "5:\n"
++ ".section .fixup,\"ax\"\n"
++ " .even\n"
++ "6: moveq %3,%0\n"
++ " jra 5b\n"
++ ".previous\n"
++ ".section __ex_table,\"a\"\n"
++ " .align 4\n"
++ " .long 2b,6b\n"
++ " .long 22b,6b\n"
++ ".previous"
++ : "=d"(res), "=a"(src), "=d"(n)
++ : "i"(0), "0"(res), "1"(src), "2"(n)
++ : "d0");
++ return res;
++}
++EXPORT_SYMBOL(strnlen_user);
++
++
++/*
++ * Zero Userspace
++ */
++
++unsigned long __clear_user(void *to, unsigned long n)
++{
++ __asm__ __volatile__
++ (" tstl %1\n"
++ " jeq 3f\n"
++ "1: movel %3,(%0)+\n"
++ "2: subql #1,%1\n"
++ " jne 1b\n"
++ "3: movel %2,%1\n"
++ " bclr #1,%1\n"
++ " jeq 4f\n"
++ "24:movew %3,(%0)+\n"
++ "4: bclr #0,%1\n"
++ " jeq 5f\n"
++ "25:moveb %3,(%0)+\n"
++ "5:\n"
++ ".section .fixup,\"ax\"\n"
++ " .even\n"
++ "61:addql #1,%1\n"
++ "6: lsll #2,%1\n"
++ " addl %2,%1\n"
++ " jra 5b\n"
++ "7: addql #2,%1\n"
++ " jra 5b\n"
++ "8: addql #1,%1\n"
++ " jra 5b\n"
++ ".previous\n"
++ ".section __ex_table,\"a\"\n"
++ " .align 4\n"
++ " .long 1b,61b\n"
++ " .long 2b,6b\n"
++ " .long 3b,61b\n"
++ " .long 24b,7b\n"
++ " .long 4b,7b\n"
++ " .long 25b,8b\n"
++ " .long 5b,8b\n"
++ ".previous"
++ : "=a"(to), "=d"(n)
++ : "r"(n & 3), "d"(0), "0"(to), "1"(n/4));
++ return n;
++}
++EXPORT_SYMBOL(__clear_user);
++
++#endif /* CONFIG_COLDFIRE */
++
+--- a/arch/m68k/Makefile
++++ b/arch/m68k/Makefile
+@@ -1,6 +1,8 @@
+ #
+ # m68k/Makefile
+ #
++# Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
++#
+ # This file is included by the global makefile so that you can add your own
+ # architecture-specific flags and dependencies. Remember to do have actions
+ # for "archclean" and "archdep" for cleaning up and making dependencies for
+@@ -10,13 +12,13 @@
+ # License. See the file "COPYING" in the main directory of this archive
+ # for more details.
+ #
+-# Copyright (C) 1994 by Hamish Macdonald
+-#
+
+-KBUILD_DEFCONFIG := multi_defconfig
++KBUILD_DEFCONFIG := amiga_defconfig#multi_defconfig
+
+ # override top level makefile
++ifndef CONFIG_COLDFIRE
+ AS += -m68020
++endif
+ LDFLAGS := -m m68kelf
+ LDFLAGS_MODULE += -T $(srctree)/arch/m68k/kernel/module.lds
+ ifneq ($(SUBARCH),$(ARCH))
+@@ -30,12 +32,18 @@ ifdef CONFIG_SUN3
+ LDFLAGS_vmlinux = -N
+ endif
+
++ifdef CONFIG_COLDFIRE
++OBJCOPYFLAGS := -O binary -R .note -R .note.gnu.build-id -R .comment -S
++# LDFLAGS_vmlinux = --verbose
++endif
++
+ CHECKFLAGS += -D__mc68000__
+
+ # without -fno-strength-reduce the 53c7xx.c driver fails ;-(
+ KBUILD_CFLAGS += -pipe -fno-strength-reduce -ffixed-a2
+
+ # enable processor switch if compiled only for a single cpu
++ifndef CONFIG_COLDFIRE
+ ifndef CONFIG_M68020
+ ifndef CONFIG_M68030
+
+@@ -49,6 +57,17 @@ endif
+
+ endif
+ endif
++endif
++
++ifdef CONFIG_M5445X
++KBUILD_CFLAGS += -march=isac -mcpu=54455 -msoft-float -g
++KBUILD_AFLAGS += -march=isac -mcpu=54455 -msoft-float
++endif
++
++ifdef CONFIG_M547X_8X
++KBUILD_CFLAGS += -mcfv4e -g
++KBUILD_AFLAGS += -mcfv4e
++endif
+
+ ifdef CONFIG_KGDB
+ # If configured for kgdb support, include debugging infos and keep the
+@@ -57,8 +76,12 @@ KBUILD_CFLAGS := $(subst -fomit-frame-po
+ endif
+
+ ifndef CONFIG_SUN3
++ifndef CONFIG_COLDFIRE
+ head-y := arch/m68k/kernel/head.o
+ else
++head-y := arch/m68k/coldfire/common/head.o
++endif
++else
+ head-y := arch/m68k/kernel/sun3-head.o
+ endif
+
+@@ -79,7 +102,20 @@ core-$(CONFIG_SUN3) += arch/m68k/sun3/
+ core-$(CONFIG_M68040) += arch/m68k/fpsp040/
+ core-$(CONFIG_M68060) += arch/m68k/ifpsp060/
+ core-$(CONFIG_M68KFPU_EMU) += arch/m68k/math-emu/
++core-$(CONFIG_COLDFIRE) += arch/m68k/coldfire/
++
++ifdef CONFIG_COLDFIRE
++boot := arch/m68k/boot
++
++all: uImage
++
++zImage zImage.srec uImage uImage.srec vmlinux.srec: vmlinux
++ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
++archclean:
++ $(Q)$(MAKE) $(clean)=$(boot)
++
++else
+ all: zImage
+
+ lilo: vmlinux
+@@ -117,6 +153,7 @@ endif
+
+ archclean:
+ rm -f vmlinux.gz vmlinux.bz2
++endif
+
+ install:
+ sh $(srctree)/arch/m68k/install.sh $(KERNELRELEASE) vmlinux.gz System.map "$(INSTALL_PATH)"
+--- a/arch/m68k/mm/cache.c
++++ b/arch/m68k/mm/cache.c
+@@ -4,13 +4,20 @@
+ * Instruction cache handling
+ *
+ * Copyright (C) 1995 Hamish Macdonald
++ * Copyright Freescale Semiconductor, Inc. 2008-2009
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
+ */
+
+ #include <linux/module.h>
+ #include <asm/pgalloc.h>
+ #include <asm/traps.h>
+
++#ifdef CONFIG_COLDFIRE
++#include <asm/cfcache.h>
++#endif /* CONFIG_COLDFIRE */
+
++#ifndef CONFIG_COLDFIRE
+ static unsigned long virt_to_phys_slow(unsigned long vaddr)
+ {
+ if (CPU_IS_060) {
+@@ -69,11 +76,18 @@ static unsigned long virt_to_phys_slow(u
+ }
+ return 0;
+ }
++#endif /* CONFIG_COLDFIRE */
++
+
+ /* Push n pages at kernel virtual address and clear the icache */
+ /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
+ void flush_icache_range(unsigned long address, unsigned long endaddr)
+ {
++#ifdef CONFIG_COLDFIRE
++// JKM -- hack until new cpushl stuff is in
++// cf_icache_flush_range(address, endaddr);
++ flush_icache();
++#else /* !CONFIG_COLDFIRE */
+
+ if (CPU_IS_040_OR_060) {
+ address &= PAGE_MASK;
+@@ -94,9 +108,11 @@ void flush_icache_range(unsigned long ad
+ : "=&d" (tmp)
+ : "di" (FLUSH_I));
+ }
++#endif /* CONFIG_COLDFIRE */
+ }
+ EXPORT_SYMBOL(flush_icache_range);
+
++#ifndef CONFIG_COLDFIRE
+ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+ unsigned long addr, int len)
+ {
+@@ -115,4 +131,5 @@ void flush_icache_user_range(struct vm_a
+ : "di" (FLUSH_I));
+ }
+ }
++#endif /* CONFIG_COLDFIRE */
+
+--- a/arch/m68k/mm/hwtest.c
++++ b/arch/m68k/mm/hwtest.c
+@@ -12,6 +12,10 @@
+ * them here complete with the comments from the original atari
+ * config.c...
+ * -- PMM <pmaydell@chiark.greenend.org.uk>, 05/1998
++ *
++ * Copyright Freescale Semiconductor, Inc. 2008-2009
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
+ */
+
+ /* This function tests for the presence of an address, specially a
+@@ -25,6 +29,7 @@
+
+ #include <linux/module.h>
+
++#ifndef CONFIG_COLDFIRE
+ int hwreg_present( volatile void *regp )
+ {
+ int ret = 0;
+@@ -82,4 +87,5 @@ int hwreg_write( volatile void *regp, un
+ return( ret );
+ }
+ EXPORT_SYMBOL(hwreg_write);
++#endif
+
+--- a/arch/m68k/mm/init.c
++++ b/arch/m68k/mm/init.c
+@@ -2,6 +2,9 @@
+ * linux/arch/m68k/mm/init.c
+ *
+ * Copyright (C) 1995 Hamish Macdonald
++ * Copyright Freescale Semiconductor, Inc. 2008-2009
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
+ *
+ * Contains common initialization routines, specific init code moved
+ * to motorola.c and sun3mmu.c
+@@ -31,6 +34,10 @@
+ #include <asm/sections.h>
+ #include <asm/tlb.h>
+
++#ifdef CONFIG_VDSO
++int vdso_init(void);
++#endif
++
+ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+
+ pg_data_t pg_data_map[MAX_NUMNODES];
+@@ -88,7 +95,6 @@ void __init mem_init(void)
+ if (MACH_IS_ATARI)
+ atari_stram_mem_init_hook();
+ #endif
+-
+ /* this will put all memory onto the freelists */
+ totalram_pages = num_physpages = 0;
+ for_each_online_pgdat(pgdat) {
+@@ -112,7 +118,7 @@ void __init mem_init(void)
+ }
+ }
+
+-#ifndef CONFIG_SUN3
++#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
+ /* insert pointer tables allocated so far into the tablelist */
+ init_pointer_table((unsigned long)kernel_pg_dir);
+ for (i = 0; i < PTRS_PER_PGD; i++) {
+@@ -131,6 +137,11 @@ void __init mem_init(void)
+ codepages << (PAGE_SHIFT-10),
+ datapages << (PAGE_SHIFT-10),
+ initpages << (PAGE_SHIFT-10));
++
++#ifdef CONFIG_VDSO
++ /* init the vdso page */
++ vdso_init();
++#endif
+ }
+
+ #ifdef CONFIG_BLK_DEV_INITRD
+--- a/arch/m68k/mm/kmap.c
++++ b/arch/m68k/mm/kmap.c
+@@ -2,6 +2,9 @@
+ * linux/arch/m68k/mm/kmap.c
+ *
+ * Copyright (C) 1997 Roman Hodek
++ * Copyright Freescale Semiconductor, Inc. 2008, 2009
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
+ *
+ * 10/01/99 cleaned up the code and changing to the same interface
+ * used by other architectures /Roman Zippel
+@@ -24,7 +27,11 @@
+
+ #undef DEBUG
+
++#ifndef CONFIG_COLDFIRE
+ #define PTRTREESIZE (256*1024)
++#else
++#define PTRTREESIZE PAGE_SIZE
++#endif
+
+ /*
+ * For 040/060 we can use the virtual memory area like other architectures,
+@@ -50,7 +57,11 @@ static inline void free_io_area(void *ad
+
+ #else
+
++#ifdef CONFIG_COLDFIRE
++#define IO_SIZE PAGE_SIZE
++#else
+ #define IO_SIZE (256*1024)
++#endif
+
+ static struct vm_struct *iolist;
+
+@@ -127,8 +138,41 @@ void __iomem *__ioremap(unsigned long ph
+ }
+ #endif
+
++#ifdef CONFIG_M5445X
++ if (physaddr >= 0xf0000000) {
++ /*
++ * On the M5445x processors an ACR is setup to map
++ * the 0xF0000000 range into kernel memory as
++ * non-cacheable.
++ */
++ return (void __iomem *)physaddr;
++ }
++ if ((physaddr >= KMAP_START) && (physaddr <= KMAP_END)) {
++ /* if physaddr belongs to virtual address range for ioremap,
++ * then return physaddr because it has been ioremapped
++ */
++ return (void __iomem *)physaddr;
++ }
++#endif
++#ifdef CONFIG_M547X_8X
++ if (physaddr >= 0xf0000000) {
++ /*
++ * On the M547x/M548x processors an ACR is setup to map
++ * the 0xF0000000 range into kernel memory as
++ * non-cacheable.
++ */
++ return (void __iomem *)physaddr;
++ }
++
++ if ((physaddr >= 0xd0000000) && (physaddr + size < 0xd800ffff)) {
++ printk(KERN_ERR "ioremap:PCI 0x%lx,0x%lx(%d)"
++ " - PCI area hit\n", physaddr, size, cacheflag);
++ return (void *)physaddr;
++ }
++#endif
+ #ifdef DEBUG
+- printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
++ printk(KERN_ERR "ioremap: paddr=0x%lx,size=0x%lx(%d) - ",
++ physaddr, size, cacheflag);
+ #endif
+ /*
+ * Mappings have to be aligned
+@@ -147,7 +191,8 @@ void __iomem *__ioremap(unsigned long ph
+ virtaddr = (unsigned long)area->addr;
+ retaddr = virtaddr + offset;
+ #ifdef DEBUG
+- printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
++ printk(KERN_ERR " paddr=0x%lx,vaddr=0x%lx,retaddr=0x%lx",
++ physaddr, virtaddr, retaddr);
+ #endif
+
+ /*
+@@ -172,7 +217,12 @@ void __iomem *__ioremap(unsigned long ph
+ break;
+ }
+ } else {
++#ifndef CONFIG_COLDFIRE
+ physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
++#else
++ physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY | \
++ _PAGE_READWRITE);
++#endif
+ switch (cacheflag) {
+ case IOMAP_NOCACHE_SER:
+ case IOMAP_NOCACHE_NONSER:
+@@ -252,6 +302,13 @@ void __iounmap(void *addr, unsigned long
+ pmd_t *pmd_dir;
+ pte_t *pte_dir;
+
++#ifdef CONFIG_M547X_8X
++ if ((addr >= (void *)0xd0000000)
++ && (addr + size < (void *)0xd800ffff)) {
++ printk(KERN_ERR "%s: PCI address\n", __func__);
++ return;
++ }
++#endif
+ while ((long)size > 0) {
+ pgd_dir = pgd_offset_k(virtaddr);
+ if (pgd_bad(*pgd_dir)) {
+--- a/arch/m68k/mm/Makefile
++++ b/arch/m68k/mm/Makefile
+@@ -6,3 +6,5 @@ obj-y := cache.o init.o fault.o hwtest.
+
+ obj-$(CONFIG_MMU_MOTOROLA) += kmap.o memory.o motorola.o
+ obj-$(CONFIG_MMU_SUN3) += sun3kmap.o sun3mmu.o
++obj-$(CONFIG_MMU_CFV4E) += cf-mmu.o kmap.o memory.o
++obj-$(CONFIG_SRAM) += cf-sram.o
+--- a/arch/m68k/mm/memory.c
++++ b/arch/m68k/mm/memory.c
+@@ -2,6 +2,10 @@
+ * linux/arch/m68k/mm/memory.c
+ *
+ * Copyright (C) 1995 Hamish Macdonald
++ * Copyright Freescale Semiconductor, Inc. 2008-2009
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
++ *
+ */
+
+ #include <linux/module.h>
+@@ -127,6 +131,7 @@ int free_pointer_table (pmd_t *ptable)
+ return 0;
+ }
+
++#ifndef CONFIG_COLDFIRE
+ /* invalidate page in both caches */
+ static inline void clear040(unsigned long paddr)
+ {
+@@ -173,6 +178,7 @@ static inline void pushcl040(unsigned lo
+ clear040(paddr);
+ local_irq_restore(flags);
+ }
++#endif /* CONFIG_COLDFIRE */
+
+ /*
+ * 040: Hit every page containing an address in the range paddr..paddr+len-1.
+@@ -203,6 +209,11 @@ static inline void pushcl040(unsigned lo
+
+ void cache_clear (unsigned long paddr, int len)
+ {
++#ifdef CONFIG_COLDFIRE
++// JKM -- revise to use proper caching
++// cf_cache_clear(paddr, len);
++ flush_bcache();
++#else
+ if (CPU_IS_040_OR_060) {
+ int tmp;
+
+@@ -237,6 +248,7 @@ void cache_clear (unsigned long paddr, i
+ if(mach_l2_flush)
+ mach_l2_flush(0);
+ #endif
++#endif /* CONFIG_COLDFIRE */
+ }
+ EXPORT_SYMBOL(cache_clear);
+
+@@ -250,6 +262,11 @@ EXPORT_SYMBOL(cache_clear);
+
+ void cache_push (unsigned long paddr, int len)
+ {
++#ifdef CONFIG_COLDFIRE
++// JKM -- revise to use proper caching
++// cf_cache_push(paddr, len);
++ flush_bcache();
++#else
+ if (CPU_IS_040_OR_060) {
+ int tmp = PAGE_SIZE;
+
+@@ -290,6 +307,7 @@ void cache_push (unsigned long paddr, in
+ if(mach_l2_flush)
+ mach_l2_flush(1);
+ #endif
++#endif /* CONFIG_COLDFIRE */
+ }
+ EXPORT_SYMBOL(cache_push);
+
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -3,6 +3,10 @@
+ *
+ * (C) Copyright Al Viro 2000, 2001
+ * Released under GPL v2.
++ * (c) Copyright Freescale Semiconductor, Inc. 2008, 2009
++ * Change to align on page size for coldfire
++ * Jason Jin Jason.Jin@freescale.com
++ * Shrek Wu B16972@freescale.com
+ *
+ * Based on code from fs/super.c, copyright Linus Torvalds and others.
+ * Heavily rewritten.
+@@ -1858,7 +1862,11 @@ int copy_mount_options(const void __user
+ /* copy_from_user cannot cross TASK_SIZE ! */
+ size = TASK_SIZE - (unsigned long)data;
+ if (size > PAGE_SIZE)
++#ifndef CONFIG_COLDFIRE
+ size = PAGE_SIZE;
++#else
++ size = PAGE_SIZE - ((unsigned long)data & ~PAGE_MASK);
++#endif
+
+ i = size - exact_copy_from_user((void *)page, data, size);
+ if (!i) {
+--- a/include/linux/fsl_devices.h
++++ b/include/linux/fsl_devices.h
+@@ -6,7 +6,7 @@
+ *
+ * Maintainer: Kumar Gala <galak@kernel.crashing.org>
+ *
+- * Copyright 2004 Freescale Semiconductor, Inc
++ * Copyright 2004-2008 Freescale Semiconductor, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+@@ -18,6 +18,7 @@
+ #define _FSL_DEVICE_H_
+
+ #include <linux/types.h>
++#include <linux/interrupt.h>
+
+ /*
+ * Some conventions on how we handle peripherals on Freescale chips
+@@ -58,11 +59,42 @@ enum fsl_usb2_phy_modes {
+ FSL_USB2_PHY_SERIAL,
+ };
+
++struct platform_device;
+ struct fsl_usb2_platform_data {
+ /* board specific information */
+ enum fsl_usb2_operating_modes operating_mode;
+ enum fsl_usb2_phy_modes phy_mode;
+ unsigned int port_enables;
++
++ char *name; /* pretty print */
++ int (*platform_init) (struct platform_device *);
++ void (*platform_uninit) (struct fsl_usb2_platform_data *);
++ void __iomem *regs; /* ioremap'd register base */
++ u32 xcvr_type; /* PORTSC_PTS_* */
++ char *transceiver; /* transceiver name */
++ unsigned power_budget; /* for hcd->power_budget */
++ struct platform_device *pdev;
++ struct fsl_xcvr_ops *xcvr_ops;
++ int (*gpio_usb_active) (void);
++ void (*gpio_usb_inactive) (void);
++ unsigned big_endian_mmio : 1;
++ unsigned big_endian_desc : 1;
++ unsigned es : 1; /* need USBMODE:ES */
++ unsigned have_sysif_regs : 1;
++ unsigned le_setup_buf : 1;
++ unsigned suspended : 1;
++ unsigned already_suspended : 1;
++
++ /* register save area for suspend/resume */
++ u32 pm_command;
++ u32 pm_status;
++ u32 pm_intr_enable;
++ u32 pm_frame_index;
++ u32 pm_segment;
++ u32 pm_frame_list;
++ u32 pm_async_next;
++ u32 pm_configured_flag;
++ u32 pm_portsc;
+ };
+
+ /* Flags in fsl_usb2_mph_platform_data */
+@@ -92,4 +124,30 @@ struct mpc8xx_pcmcia_ops {
+ */
+ int fsl_deep_sleep(void);
+
++struct fsl_ata_platform_data {
++#ifdef CONFIG_FSL_PATA_USE_DMA
++ int udma_mask; /* UDMA modes h/w can handle */
++ int fifo_alarm; /* value for fifo_alarm reg */
++ int max_sg; /* longest sglist h/w can handle */
++#endif
++ int (*init)(struct platform_device *pdev);
++ void (*exit)(void);
++ int (*get_clk_rate)(void);
++};
++
++struct coldfire_fec_platform_data {
++ int hash_table;
++ unsigned int *fec_hw;
++ void (*request_intrs)(struct net_device *dev,
++ irqreturn_t (*)(int, void *),
++ void *irq_privatedata);
++ void (*set_mii)(struct net_device *dev);
++ void (*get_mac)(struct net_device *dev);
++ void (*enable_phy_intr)(void);
++ void (*disable_phy_intr)(void);
++ void (*phy_ack_intr)(void);
++ void (*localhw_setup)(void);
++ void (*uncache)(unsigned long addr);
++ void (*platform_flush_cache)(void);
++};
+ #endif /* _FSL_DEVICE_H_ */
--- /dev/null
+From 9eaa978feb942497c4542cc82e63d5468dc8f184 Mon Sep 17 00:00:00 2001
+From: Wang Huan <wanghuan@zch06.freescale.net>
+Date: Thu, 25 Feb 2010 15:27:21 +0800
+Subject: [PATCH 05/23] Add common serial driver and add IRDA support for m547x_8x
+
+Add common serial driver for mcf5445x board and mcf547x, mcf548x boards.
+Also add IRDA support for mcf547x, mcf548x boards.
+
+Signed-off-by: Shrek Wu <b16972@freescale.com>
+Signed-off-by: Jason Jin <jason.jin@freescale.com>
+Signed-off-by: Chengju-Cai <b22600@freescale.com>
+---
+ arch/m68k/include/asm/mcfuart.h | 35 +++++++++++++---
+ drivers/serial/Kconfig | 13 ++++++
+ drivers/serial/mcf.c | 88 +++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 130 insertions(+), 6 deletions(-)
+
+--- a/arch/m68k/include/asm/mcfuart.h
++++ b/arch/m68k/include/asm/mcfuart.h
+@@ -47,18 +48,35 @@
+ #define MCFUART_BASE1 0xfc060000 /* Base address of UART1 */
+ #define MCFUART_BASE2 0xfc064000 /* Base address of UART2 */
+ #define MCFUART_BASE3 0xfc068000 /* Base address of UART3 */
++#elif defined(CONFIG_M5445X)
++#include <asm/mcf5445x_intc.h>
++#define MCFUART_BASE1 0xfc060000 /* Base address of UART1 */
++#define MCFUART_BASE2 0xfc064000 /* Base address of UART2 */
++#define MCFUART_BASE3 0xfc068000 /* Base address of UART3 */
++#define MCFINT_VECBASE 64
++#define MCFINT_UART0 26
++#elif defined(CONFIG_M547X_8X)
++#define MCFUART_BASE1 0x8600 /* Base address of UART1 */
++#define MCFUART_BASE2 0x8700 /* Base address of UART2 */
++#define MCFUART_BASE3 0x8800 /* Base address of UART3 */
++#define MCFUART_BASE4 0x8900 /* Base address of UART4 */
++#define MCFINT_VECBASE 64
++#define MCFINT_UART0 35
++#define MCFINT_UART1 34
++#define MCFINT_UART2 33
++#define MCFINT_UART3 32
+ #endif
+
+-
++#ifndef __ASSEMBLY__
+ #include <linux/serial_core.h>
+ #include <linux/platform_device.h>
+-
+ struct mcf_platform_uart {
+- unsigned long mapbase; /* Physical address base */
+- void __iomem *membase; /* Virtual address if mapped */
+- unsigned int irq; /* Interrupt vector */
+- unsigned int uartclk; /* UART clock rate */
++ unsigned long mapbase; /* Physical address base */
++ void __iomem *membase; /* Virtual address if mapped */
++ unsigned int irq; /* Interrupt vector */
++ unsigned int uartclk; /* UART clock rate */
+ };
++#endif
+
+ /*
+ * Define the ColdFire UART register set addresses.
--- /dev/null
+--- a/drivers/net/Kconfig
++++ b/drivers/net/Kconfig
+@@ -2010,6 +2010,29 @@ config FEC2
+ Say Y here if you want to use the second built-in 10/100 Fast
+ ethernet controller on some Motorola ColdFire processors.
+
++config FEC_548x
++ tristate "MCF547x/MCF548x Fast Ethernet Controller support"
++ depends on M547X_8X
++ select PHYLIB
++ help
++ The MCF547x and MCF548x have a built-in Fast Ethernet Controller.
++ Saying Y here will include support for this device in the kernel.
++
++ To compile this driver as a module, choose M here: the module
++ will be called fecm.
++
++config FEC_548x_ENABLE_FEC2
++ bool "Enable the second FEC"
++ depends on FEC_548x
++ help
++ This enables the second FEC on the 547x/548x.
++
++config FEC_548x_SHARED_PHY
++ bool "Shared PHY interface(on some ColdFire designs)"
++ depends on FEC_548x_ENABLE_FEC2
++ help
++ Say Y here if both PHYs are controlled via a single channel.+
++
+ config FEC_MPC52xx
+ tristate "MPC52xx FEC driver"
+ depends on PPC_MPC52xx && PPC_BESTCOMM
+--- a/drivers/net/Makefile
++++ b/drivers/net/Makefile
+@@ -114,6 +114,7 @@ obj-$(CONFIG_PCMCIA_PCNET) += 8390.o
+ obj-$(CONFIG_HP100) += hp100.o
+ obj-$(CONFIG_SMC9194) += smc9194.o
+ obj-$(CONFIG_FEC) += fec.o
++obj-$(CONFIG_FEC_548x) += fec_m547x.o
+ obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
+ ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
+ obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o
--- /dev/null
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -62,6 +62,11 @@ config BROADCOM_PHY
+ Currently supports the BCM5411, BCM5421, BCM5461, BCM5464, BCM5481
+ and BCM5482 PHYs.
+
++config BROADCOM5222_PHY
++ tristate "Drivers for Broadcom5222 PHY"
++ ---help---
++ Currently supports the BCM5222 PHYs.
++
+ config ICPLUS_PHY
+ tristate "Drivers for ICPlus PHYs"
+ ---help---
+--- a/drivers/net/phy/Makefile
++++ b/drivers/net/phy/Makefile
+@@ -12,6 +12,7 @@ obj-$(CONFIG_QSEMI_PHY) += qsemi.o
+ obj-$(CONFIG_SMSC_PHY) += smsc.o
+ obj-$(CONFIG_VITESSE_PHY) += vitesse.o
+ obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
++obj-$(CONFIG_BROADCOM5222_PHY) += broadcom522x.o
+ obj-$(CONFIG_ICPLUS_PHY) += icplus.o
+ obj-$(CONFIG_ADM6996_PHY) += adm6996.o
+ obj-$(CONFIG_MVSWITCH_PHY) += mvswitch.o
--- /dev/null
+From a324800cc0ac1a0bd2a596751d276f5daa9d17d2 Mon Sep 17 00:00:00 2001
+From: Wang Huan <wanghuan@zch06.freescale.net>
+Date: Thu, 4 Feb 2010 16:42:07 +0800
+Subject: [PATCH 20/25] Add CFV4E FPU support for MCF547X_8X
+
+Porting the fpu support from ltib-mcf547x_8x-20070107 (2.6.10)
+
+Signed-off-by: Lanttor Guo <lanttor.guo@freescale.com>
+---
+ arch/m68k/include/asm/fpu.h | 2 ++
+ 1 files changed, 2 insertions(+), 0 deletions(-)
+
+--- a/arch/m68k/include/asm/fpu.h
++++ b/arch/m68k/include/asm/fpu.h
+@@ -14,6 +14,8 @@
+ #define FPSTATESIZE (28)
+ #elif defined(CONFIG_M68060)
+ #define FPSTATESIZE (12)
++#elif defined(CONFIG_CFV4E)
++#define FPSTATESIZE (16)
+ #else
+ #define FPSTATESIZE (0)
+ #endif
--- /dev/null
+--- a/arch/m68k/Kconfig
++++ b/arch/m68k/Kconfig
+@@ -381,6 +381,9 @@ choice
+ depends on M547X_8X
+ default M5485CFE
+
++config M5474LITE
++ bool "MCF5474LITE"
++ select M547X
+ config M5475AFE
+ bool "MCF5475AFE"
+ select M547X
+@@ -399,6 +402,9 @@ config M5475EFE
+ config M5475FFE
+ bool "MCF5475FFE"
+ select M547X
++config M5484LITE
++ bool "MCF5484LITE"
++ select M548X
+ config M5485AFE
+ bool "MCF5485AFE"
+ select M548X
+@@ -465,7 +471,10 @@ config NOR_FLASH_BASE
+ depends on COLDFIRE
+ default 0x00000000 if M54451EVB
+ default 0x00000000 if M54455EVB
+- default 0xE0000000 if M547X_8X
++ default 0xE0000000 if M5475CFE
++ default 0xE0000000 if M5485CFE
++ default 0xFF800000 if M5484LITE
++ default 0xFF800000 if M5474LITE
+
+ config DMA_BASE
+ hex