From: Imre Kaloz Date: Wed, 21 Jul 2010 11:20:53 +0000 (+0000) Subject: add support for the Gateworks Laguna family (Cavium Networks Econa CNS3xxx) X-Git-Url: http://git.lede-project.org./?a=commitdiff_plain;h=a1cdb24a3bd55a1ef90799ee10e04d67fc36dc71;p=openwrt%2Fstaging%2Fthess.git add support for the Gateworks Laguna family (Cavium Networks Econa CNS3xxx) SVN-Revision: 22323 --- diff --git a/package/madwifi/Makefile b/package/madwifi/Makefile index f28254f661..bb621ddf33 100644 --- a/package/madwifi/Makefile +++ b/package/madwifi/Makefile @@ -85,6 +85,9 @@ endif ifeq ($(BOARD),orion) HAL_TARGET:=xscale-le-elf$(if $(CONFIG_EABI_SUPPORT),gnueabi) endif +ifeq ($(BOARD),cns3xxx) + HAL_TARGET:=arm11-le-elf$(if $(CONFIG_EABI_SUPPORT),gnueabi) +endif ifeq ($(ARCH),powerpc) HAL_TARGET:=powerpc-be-elf endif diff --git a/target/linux/cns3xxx/Makefile b/target/linux/cns3xxx/Makefile new file mode 100644 index 0000000000..45faa103c1 --- /dev/null +++ b/target/linux/cns3xxx/Makefile @@ -0,0 +1,26 @@ +# +# Copyright (C) 2010 OpenWrt.org +# +# This is free software, licensed under the GNU General Public License v2. +# See /LICENSE for more information. +# +include $(TOPDIR)/rules.mk + +ARCH:=arm +BOARD:=cns3xxx +BOARDNAME:=Cavium Networks Econa CNS3xxx +FEATURES:=squashfs fpu gpio +CFLAGS:=-Os -pipe -march=armv6k -mtune=mpcore -mfloat-abi=softfp -mfpu=vfp -funit-at-a-time + +LINUX_VERSION:=2.6.31.14 + +include $(INCLUDE_DIR)/target.mk + +define Target/Description + Build images for Cavium Networks Econa CNS3xxx based boards, + eg. the Gateworks Laguna family +endef + +KERNELNAME:="uImage" + +$(eval $(call BuildTarget)) diff --git a/target/linux/cns3xxx/config-default b/target/linux/cns3xxx/config-default new file mode 100644 index 0000000000..527a465102 --- /dev/null +++ b/target/linux/cns3xxx/config-default @@ -0,0 +1,210 @@ +CONFIG_AEABI=y +CONFIG_ALIGNMENT_TRAP=y +CONFIG_ARCH_CNS3XXX=y +CONFIG_ARCH_REQUIRE_GPIOLIB=y +# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set +# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set +# CONFIG_ARCH_SUPPORTS_MSI is not set +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARM=y +CONFIG_ARM_AMBA=y +CONFIG_ARM_GIC=y +CONFIG_ARM_THUMB=y +CONFIG_ASYNC_CORE=y +CONFIG_ASYNC_MEMCPY=y +CONFIG_ASYNC_XOR=y +CONFIG_ATA=y +# CONFIG_ATA_SFF is not set +CONFIG_BITREVERSE=y +# CONFIG_BLK_DEV_DM is not set +CONFIG_BLK_DEV_MD=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=2 +CONFIG_BLK_DEV_RAM_SIZE=32768 +CONFIG_BLK_DEV_SD=y +CONFIG_CACHE_L2CC=y +# CONFIG_CACHE_L2CC_128KB is not set +CONFIG_CACHE_L2CC_256KB=y +# CONFIG_CACHE_L2CC_32KB is not set +# CONFIG_CACHE_L2CC_64KB is not set +# CONFIG_CACHE_L2CC_96KB is not set +CONFIG_CACHE_L2_I_PREFETCH=y +CONFIG_CNS3XXX_DMAC=y +# CONFIG_CNS3XXX_GPU_ENVIRONMENT is not set +CONFIG_CNS3XXX_GSW=y +# CONFIG_CNS3XXX_HCIE_TEST is not set +CONFIG_CNS3XXX_PM_API=y +CONFIG_CNS3XXX_RAID=y +CONFIG_CNS3XXX_SPPE=y +CONFIG_CNS3XXX_WATCHDOG=y +CONFIG_COMMON_CLKDEV=y +CONFIG_CPU_32=y +CONFIG_CPU_32v6=y +CONFIG_CPU_32v6K=y +CONFIG_CPU_ABRT_EV6=y +# CONFIG_CPU_BPREDICT_DISABLE is not set +CONFIG_CPU_CACHE_V6=y +CONFIG_CPU_CACHE_VIPT=y +CONFIG_CPU_COPY_V6=y +CONFIG_CPU_CP15=y +CONFIG_CPU_CP15_MMU=y +CONFIG_CPU_HAS_ASID=y +# CONFIG_CPU_ICACHE_DISABLE is not set +CONFIG_CPU_NO_CACHE_BCAST=y +CONFIG_CPU_NO_CACHE_BCAST_DEBUG=y +CONFIG_CPU_PABRT_NOIFAR=y +CONFIG_CPU_TLB_V6=y +CONFIG_CPU_V6=y +# CONFIG_DEBUG_USER is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DEVPORT=y +# CONFIG_DM9000 is not set +CONFIG_EEPROM_AT24=y +# CONFIG_FPE_FASTFPE is not set +# CONFIG_FPE_NWFPE is not set +# CONFIG_FPGA is not set +CONFIG_FRAME_POINTER=y +# CONFIG_FSNOTIFY is not set +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y +CONFIG_GENERIC_FIND_LAST_BIT=y +CONFIG_GENERIC_GPIO=y +CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y +CONFIG_GENERIC_LOCKBREAK=y +CONFIG_GPIOLIB=y +CONFIG_GPIO_DEVICE=y +CONFIG_GPIO_PCA953X=y +# CONFIG_GPIO_PL061 is not set +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_HAS_DMA=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_TLS_REG=y +CONFIG_HAVE_AOUT=y +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_HAVE_ARM_SCU=y +CONFIG_HAVE_ARM_TWD=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_HAVE_IDE=y +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_OPROFILE=y +CONFIG_HOTPLUG_CPU=y +CONFIG_HWMON=y +# CONFIG_HWMON_DEBUG_CHIP is not set +CONFIG_HW_RANDOM=m +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_CNS3XXX=y +# CONFIG_I2C_DESIGNWARE is not set +CONFIG_INITRAMFS_SOURCE="" +# CONFIG_ISDN_CAPI is not set +# CONFIG_ISDN_I4L is not set +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_LZMA is not set +CONFIG_LEDS_GPIO=y +# CONFIG_LEDS_TRIGGER_NETDEV is not set +CONFIG_LOCAL_TIMERS=y +CONFIG_LOCK_KERNEL=y +CONFIG_M25PXX_USE_FAST_READ=y +CONFIG_MAC80211_DEFAULT_PS_VALUE=0 +CONFIG_MACH_GW2388=y +CONFIG_MD=y +CONFIG_MD_AUTODETECT=y +# CONFIG_MD_FAULTY is not set +# CONFIG_MD_LINEAR is not set +# CONFIG_MD_MULTIPATH is not set +CONFIG_MD_RAID0=y +CONFIG_MD_RAID1=y +# CONFIG_MD_RAID10 is not set +CONFIG_MD_RAID456=y +CONFIG_MD_RAID6_PQ=y +# CONFIG_MFD_T7L66XB is not set +CONFIG_MMC=y +CONFIG_MMC_BLOCK=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_CNS3XXX=y +# CONFIG_MMC_SDHCI_PCI is not set +CONFIG_MMC_SDHCI_PLTFM=y +# CONFIG_MMC_TIFM_SD is not set +CONFIG_MTD_M25P80=y +CONFIG_MTD_PHYSMAP=y +CONFIG_NLS=y +CONFIG_NR_CPUS=2 +CONFIG_OABI_COMPAT=y +CONFIG_OUTER_CACHE=y +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_PAGE_OFFSET=0xC0000000 +CONFIG_PCI=y +CONFIG_PCIEAER=y +# CONFIG_PCIEAER_INJECT is not set +CONFIG_PCIEPORTBUS=y +# CONFIG_PCIE_ECRC is not set +CONFIG_PCI_DOMAINS=y +CONFIG_PREEMPT=y +CONFIG_RAID_ATTRS=y +CONFIG_RD_GZIP=y +# CONFIG_RD_LZMA is not set +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_DS1672=y +# CONFIG_RTC_DRV_PL030 is not set +# CONFIG_RTC_DRV_PL031 is not set +CONFIG_SATA_AHCI=y +CONFIG_SATA_CNS3XXX_AHCI=y +CONFIG_SCSI=y +# CONFIG_SCSI_MULTI_LUN is not set +# CONFIG_SDIO_UART is not set +CONFIG_SENSORS_AD7418=y +CONFIG_SENSORS_GSP=y +# CONFIG_SERIAL_8250_EXTENDED is not set +CONFIG_SERIAL_8250_NR_UARTS=8 +CONFIG_SERIAL_8250_RUNTIME_UARTS=8 +# CONFIG_SERIAL_AMBA_PL010 is not set +# CONFIG_SERIAL_AMBA_PL011 is not set +CONFIG_SILICON=y +CONFIG_SMP=y +CONFIG_SPI=y +CONFIG_SPI_BITBANG=y +CONFIG_SPI_CNS3XXX=y +CONFIG_SPI_CNS3XXX_2IOREAD=y +# CONFIG_SPI_CNS3XXX_DEBUG is not set +CONFIG_SPI_CNS3XXX_USEDMA=y +# CONFIG_SPI_CNS3XXX_USEDMA_DEBUG is not set +# CONFIG_SPI_GPIO is not set +CONFIG_SPI_MASTER=y +# CONFIG_SPI_PL022 is not set +# CONFIG_SPI_SPIDEV is not set +# CONFIG_STAGING is not set +CONFIG_STOP_MACHINE=y +CONFIG_SYS_SUPPORTS_APM_EMULATION=y +CONFIG_UID16=y +CONFIG_USB=y +CONFIG_USB_CNS3XXX_EHCI=y +CONFIG_USB_CNS3XXX_OHCI=y +CONFIG_USB_CNS3XXX_OTG=y +CONFIG_USB_CNS3XXX_OTG_BOTH=y +CONFIG_USB_CNS3XXX_OTG_ENABLE_OTG_DRVVBUS=y +# CONFIG_USB_CNS3XXX_OTG_HCD_ONLY is not set +# CONFIG_USB_CNS3XXX_OTG_PCD_ONLY is not set +CONFIG_USB_EHCI_HCD=y +# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set +# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_SUPPORT=y +# CONFIG_USB_UHCI_HCD is not set +CONFIG_USE_GENERIC_SMP_HELPERS=y +CONFIG_VB=y +CONFIG_VECTORS_BASE=0xffff0000 +CONFIG_VFP=y +CONFIG_WATCHDOG_NOWAYOUT=y +CONFIG_XOR_BLOCKS=y +CONFIG_ZBOOT_ROM_BSS=0 +CONFIG_ZBOOT_ROM_TEXT=0 +CONFIG_ZONE_DMA_FLAG=0 diff --git a/target/linux/cns3xxx/image/Makefile b/target/linux/cns3xxx/image/Makefile new file mode 100644 index 0000000000..0265d7c545 --- /dev/null +++ b/target/linux/cns3xxx/image/Makefile @@ -0,0 +1,35 @@ +# +# Copyright (C) 2010 OpenWrt.org +# +# This is free software, licensed under the GNU General Public License v2. +# See /LICENSE for more information. +# +include $(TOPDIR)/rules.mk +include $(INCLUDE_DIR)/image.mk + +define Image/Prepare + cp $(LINUX_DIR)/arch/arm/boot/uImage $(KDIR)/uImage +endef + +define Image/BuildKernel + cp $(KDIR)/uImage $(BIN_DIR)/openwrt-$(BOARD)-uImage +endef + +define Image/Build + $(call Image/Build/$(1),$(1)) +endef + +define Image/Build/jffs2-64k + dd if=$(KDIR)/root.$(1) of=$(BIN_DIR)/openwrt-$(BOARD)-$(1).img bs=65536 conv=sync +endef + +define Image/Build/jffs2-128k + dd if=$(KDIR)/root.$(1) of=$(BIN_DIR)/openwrt-$(BOARD)-$(1).img bs=131072 conv=sync +endef + +define Image/Build/squashfs + $(call prepare_generic_squashfs,$(KDIR)/root.squashfs) + dd if=$(KDIR)/root.$(1) of=$(BIN_DIR)/openwrt-$(BOARD)-$(1).img bs=131072 conv=sync +endef + +$(eval $(call BuildImage)) diff --git a/target/linux/cns3xxx/patches-2.6.31/100-cns3xxx_support.patch b/target/linux/cns3xxx/patches-2.6.31/100-cns3xxx_support.patch new file mode 100644 index 0000000000..771061ad81 --- /dev/null +++ b/target/linux/cns3xxx/patches-2.6.31/100-cns3xxx_support.patch @@ -0,0 +1,11001 @@ +--- a/arch/arm/common/gic.c ++++ b/arch/arm/common/gic.c +@@ -32,6 +32,8 @@ + #include + #include + #include ++#include ++ + + static DEFINE_SPINLOCK(irq_controller_lock); + +@@ -90,7 +92,7 @@ static void gic_ack_irq(unsigned int irq + spin_unlock(&irq_controller_lock); + } + +-static void gic_mask_irq(unsigned int irq) ++void gic_mask_irq(unsigned int irq) + { + u32 mask = 1 << (irq % 32); + +@@ -175,6 +177,109 @@ void __init gic_cascade_irq(unsigned int + set_irq_chained_handler(irq, gic_handle_cascade_irq); + } + ++ ++// type: level or edge ++// 0 - level high active, 1 - rising edge sensitive ++void set_interrupt_type_by_base(void __iomem *base, int id, u32 type) ++{ ++ unsigned char int_type_bit=0; ++ u32 gic_v=0; ++ ++ // judge gic offset ++ //printk("gic addr: %#x\n", id/16*4 + 0xc00); ++ //printk("gic addr bits: %#x\n", id%16*2); ++ int_type_bit=(id%16*2+1); ++ ++ gic_v = readl(base + GIC_DIST_CONFIG + id/16*4); ++ ++ gic_v &= (~(1 << int_type_bit)); ++ gic_v |= ( type << int_type_bit); ++ ++ writel(gic_v, base + GIC_DIST_CONFIG + id/16*4); ++} ++ ++// type: level or edge ++// 0 - level high active, 1 - rising edge sensitive ++void set_interrupt_type(int id, u32 type) ++{ ++ set_interrupt_type_by_base((void __iomem *) CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT, id, type); ++} ++ ++void get_interrupt_type_by_base(void __iomem *base, u32 id, u32 *type) ++{ ++ unsigned char int_type_bit=0; ++ u32 gic_v=0; ++ ++ // judge gic offset ++ int_type_bit=(id%16*2+1); ++ ++ //gic_v = readl(base + GIC_DIST_CONFIG + 4); ++ gic_v = readl(base + GIC_DIST_CONFIG + id/16*4); ++ ++ *type = ((gic_v >> int_type_bit) & 0x1); ++ ++ //writel(0, base + GIC_DIST_CONFIG + id/16*4); ++} ++ ++void get_interrupt_type(u32 id, u32 *type) ++{ ++ get_interrupt_type_by_base((void __iomem *) CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT, id, type); ++} ++ ++ ++ ++// set interrupt priority ++void set_interrupt_pri_by_base(void __iomem *base, u32 id, u32 pri) ++{ ++ unsigned char int_type_bit=0; ++ u32 gic_v=0; ++ ++ ++ // judge gic offset ++ int_type_bit=(id%4*8+4); ++ ++ gic_v = readl(base + GIC_DIST_PRI + id/4*4); ++ ++ gic_v &= (~(0xf << int_type_bit)); ++ gic_v |= (pri << int_type_bit); ++ ++ writel(gic_v, base + GIC_DIST_PRI + id/4*4); ++ ++ gic_v = 0; ++ gic_v = readl(base + GIC_DIST_PRI + id/4*4); ++ //printk("read gic_v: %x\n", gic_v); ++} ++ ++void set_interrupt_pri(u32 id, u32 pri) ++{ ++ set_interrupt_pri_by_base((void __iomem *) CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT, id, pri); ++} ++ ++void get_interrupt_pri_by_base(void __iomem *base, int id, u32 *type) ++{ ++ unsigned char int_type_bit=0; ++ u32 gic_v=0; ++ ++ // judge gic offset ++ int_type_bit=(id%4*8+4); ++ ++ gic_v = readl(base + GIC_DIST_PRI + id/4*4); ++ ++ //printk("int_type_bit: %d\n", int_type_bit); ++ //printk("gic_v: %#x\n", gic_v); ++ *type = ((gic_v >> int_type_bit) & 0xf); ++ //gic_v &= (~(1 << int_type_bit)); ++ //gic_v |= ( type << int_type_bit); ++ ++ //writel(0, base + GIC_DIST_CONFIG + id/16*4); ++} ++ ++void get_interrupt_pri(int id, u32 *pri) ++{ ++ get_interrupt_pri_by_base((void __iomem *) CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT, id, pri); ++} ++ ++ + void __init gic_dist_init(unsigned int gic_nr, void __iomem *base, + unsigned int irq_start) + { +@@ -254,6 +359,12 @@ void __cpuinit gic_cpu_init(unsigned int + writel(1, base + GIC_CPU_CTRL); + } + ++void cns3xxx_write_pri_mask(u8 pri_mask) ++{ ++ writel(pri_mask, (void __iomem *) CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT + GIC_CPU_PRIMASK); ++} ++ ++ + #ifdef CONFIG_SMP + void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) + { +--- a/arch/arm/include/asm/cacheflush.h ++++ b/arch/arm/include/asm/cacheflush.h +@@ -280,6 +280,35 @@ extern void dmac_flush_range(const void + + #endif + ++#ifdef CONFIG_CPU_NO_CACHE_BCAST ++enum smp_dma_cache_type { ++ SMP_DMA_CACHE_INV, ++ SMP_DMA_CACHE_CLEAN, ++ SMP_DMA_CACHE_FLUSH, ++}; ++ ++extern void smp_dma_cache_op(int type, const void *start, const void *end); ++ ++static inline void smp_dma_inv_range(const void *start, const void *end) ++{ ++ smp_dma_cache_op(SMP_DMA_CACHE_INV, start, end); ++} ++ ++static inline void smp_dma_clean_range(const void *start, const void *end) ++{ ++ smp_dma_cache_op(SMP_DMA_CACHE_CLEAN, start, end); ++} ++ ++static inline void smp_dma_flush_range(const void *start, const void *end) ++{ ++ smp_dma_cache_op(SMP_DMA_CACHE_FLUSH, start, end); ++} ++#else ++#define smp_dma_inv_range dmac_inv_range ++#define smp_dma_clean_range dmac_clean_range ++#define smp_dma_flush_range dmac_flush_range ++#endif ++ + #ifdef CONFIG_OUTER_CACHE + + extern struct outer_cache_fns outer_cache; +--- /dev/null ++++ b/arch/arm/include/asm/hardware/arm_twd.h +@@ -0,0 +1,21 @@ ++#ifndef __ASM_HARDWARE_TWD_H ++#define __ASM_HARDWARE_TWD_H ++ ++#define TWD_TIMER_LOAD 0x00 ++#define TWD_TIMER_COUNTER 0x04 ++#define TWD_TIMER_CONTROL 0x08 ++#define TWD_TIMER_INTSTAT 0x0C ++ ++#define TWD_WDOG_LOAD 0x20 ++#define TWD_WDOG_COUNTER 0x24 ++#define TWD_WDOG_CONTROL 0x28 ++#define TWD_WDOG_INTSTAT 0x2C ++#define TWD_WDOG_RESETSTAT 0x30 ++#define TWD_WDOG_DISABLE 0x34 ++ ++#define TWD_TIMER_CONTROL_ENABLE (1 << 0) ++#define TWD_TIMER_CONTROL_ONESHOT (0 << 1) ++#define TWD_TIMER_CONTROL_PERIODIC (1 << 1) ++#define TWD_TIMER_CONTROL_IT_ENABLE (1 << 2) ++ ++#endif +--- /dev/null ++++ b/arch/arm/include/asm/hardware/cache-l2cc.h +@@ -0,0 +1,79 @@ ++/******************************************************************************* ++ * ++ * arch/arm/include/asm/hardware/cache-l2cc.h ++ * ++ * Copyright (c) 2008 Cavium Networks ++ * ++ * This file is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, Version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or ++ * NONINFRINGEMENT. See the GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this file; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA or ++ * visit http://www.gnu.org/licenses/. ++ * ++ * This file may also be available under a different license from Cavium. ++ * Contact Cavium Networks for more information ++ * ++ ******************************************************************************/ ++ ++#ifndef __ASM_ARM_HARDWARE_L2_H ++#define __ASM_ARM_HARDWARE_L2_H ++ ++#define L2CC_CACHE_ID 0x000 ++#define L2CC_CACHE_TYPE 0x004 ++#define L2CC_CTRL 0x100 ++#define L2CC_AUX_CTRL 0x104 ++#define L2CC_TAG_RAM_LATENCY_CTRL 0x108 ++#define L2CC_DATA_RAM_LATENCY_CTRL 0x10C ++#define L2CC_EVENT_CNT_CTRL 0x200 ++#define L2CC_EVENT_CNT1_CFG 0x204 ++#define L2CC_EVENT_CNT0_CFG 0x208 ++#define L2CC_EVENT_CNT1_VAL 0x20C ++#define L2CC_EVENT_CNT0_VAL 0x210 ++#define L2CC_INTR_MASK 0x214 ++#define L2CC_MASKED_INTR_STAT 0x218 ++#define L2CC_RAW_INTR_STAT 0x21C ++#define L2CC_INTR_CLEAR 0x220 ++#define L2CC_CACHE_SYNC 0x730 ++#define L2CC_INV_LINE_PA 0x770 ++#define L2CC_INV_WAY 0x77C ++#define L2CC_CLEAN_LINE_PA 0x7B0 ++#define L2CC_CLEAN_LINE_IDX 0x7B8 ++#define L2CC_CLEAN_WAY 0x7BC ++#define L2CC_CLEAN_INV_LINE_PA 0x7F0 ++#define L2CC_CLEAN_INV_LINE_IDX 0x7F8 ++#define L2CC_CLEAN_INV_WAY 0x7FC ++#define L2CC_LOCKDOWN_0_WAY_D 0x900 ++#define L2CC_LOCKDOWN_0_WAY_I 0x904 ++#define L2CC_LOCKDOWN_1_WAY_D 0x908 ++#define L2CC_LOCKDOWN_1_WAY_I 0x90C ++#define L2CC_LOCKDOWN_2_WAY_D 0x910 ++#define L2CC_LOCKDOWN_2_WAY_I 0x914 ++#define L2CC_LOCKDOWN_3_WAY_D 0x918 ++#define L2CC_LOCKDOWN_3_WAY_I 0x91C ++#define L2CC_LOCKDOWN_4_WAY_D 0x920 ++#define L2CC_LOCKDOWN_4_WAY_I 0x924 ++#define L2CC_LOCKDOWN_5_WAY_D 0x928 ++#define L2CC_LOCKDOWN_5_WAY_I 0x92C ++#define L2CC_LOCKDOWN_6_WAY_D 0x930 ++#define L2CC_LOCKDOWN_6_WAY_I 0x934 ++#define L2CC_LOCKDOWN_7_WAY_D 0x938 ++#define L2CC_LOCKDOWN_7_WAY_I 0x93C ++#define L2CC_LOCKDOWN_LINE_EN 0x950 ++#define L2CC_UNLOCK_ALL_LINE_WAY 0x954 ++#define L2CC_ADDR_FILTER_START 0xC00 ++#define L2CC_ADDR_FILTER_END 0xC04 ++#define L2CC_DEBUG_CTRL 0xF40 ++ ++#ifndef __ASSEMBLY__ ++extern void __init l2cc_init(void __iomem *base); ++#endif ++ ++#endif +--- a/arch/arm/include/asm/hardware/gic.h ++++ b/arch/arm/include/asm/hardware/gic.h +@@ -37,6 +37,13 @@ void gic_dist_init(unsigned int gic_nr, + void gic_cpu_init(unsigned int gic_nr, void __iomem *base); + void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); + void gic_raise_softirq(const struct cpumask *mask, unsigned int irq); ++ ++void cns3xxx_write_pri_mask(u8 pri_mask); ++void set_interrupt_type(int id, u32 type); ++void get_interrupt_type(u32 id, u32 *type); ++void set_interrupt_pri(u32 id, u32 pri); ++void get_interrupt_pri(int id, u32 *pri); ++ + #endif + + #endif +--- a/arch/arm/include/asm/mach/pci.h ++++ b/arch/arm/include/asm/mach/pci.h +@@ -20,6 +20,9 @@ struct hw_pci { + void (*postinit)(void); + u8 (*swizzle)(struct pci_dev *dev, u8 *pin); + int (*map_irq)(struct pci_dev *dev, u8 slot, u8 pin); ++#ifdef CONFIG_PCI_DOMAINS ++ int nr_domains; ++#endif + }; + + /* +@@ -37,8 +40,12 @@ struct pci_sys_data { + /* IRQ mapping */ + int (*map_irq)(struct pci_dev *, u8, u8); + struct hw_pci *hw; ++#ifdef CONFIG_PCI_DOMAINS ++ int domain; ++#endif + }; + ++ + /* + * This is the standard PCI-PCI bridge swizzling algorithm. + */ +--- a/arch/arm/include/asm/pci.h ++++ b/arch/arm/include/asm/pci.h +@@ -25,6 +25,11 @@ static inline void pcibios_penalize_isa_ + /* We don't do dynamic PCI IRQ allocation */ + } + ++#ifdef CONFIG_PCI_DOMAINS ++int pci_domain_nr(struct pci_bus *bus); ++int pci_proc_domain(struct pci_bus *bus); ++#endif ++ + /* + * The PCI address space does equal the physical memory address space. + * The networking and block device layers use this boolean for bounce +--- a/arch/arm/include/asm/xor.h ++++ b/arch/arm/include/asm/xor.h +@@ -132,10 +132,43 @@ static struct xor_block_template xor_blo + .do_5 = xor_arm4regs_5, + }; + ++#ifdef CONFIG_CNS3XXX_RAID ++extern void do_cns_rdma_xorgen(unsigned int src_no, unsigned int bytes, ++ void **bh_ptr, void *dst_ptr); ++/* ++ * We create these funcs/template just for benchmark reference. ++ */ ++static void xor_cns_raid_2(unsigned long bytes, unsigned long *p1, ++ unsigned long *p2) ++{ ++ void *src[2]; ++ ++ src[0] = p2; ++ src[1] = p1; ++ do_cns_rdma_xorgen(2, bytes, src, (void *)p2); ++} ++ ++static struct xor_block_template xor_block_cnsraid = { ++ .name = "CNS-RAID", ++ .do_2 = xor_cns_raid_2, ++}; ++#endif /* CONFIG_CNS3XXX_RAID */ ++ + #undef XOR_TRY_TEMPLATES ++ ++#ifdef CONFIG_CNS3XXX_RAID ++#define XOR_TRY_TEMPLATES \ ++ do { \ ++ xor_speed(&xor_block_arm4regs); \ ++ xor_speed(&xor_block_8regs); \ ++ xor_speed(&xor_block_32regs); \ ++ xor_speed(&xor_block_cnsraid); \ ++ } while (0) ++#else + #define XOR_TRY_TEMPLATES \ + do { \ + xor_speed(&xor_block_arm4regs); \ + xor_speed(&xor_block_8regs); \ + xor_speed(&xor_block_32regs); \ + } while (0) ++#endif /* CONFIG_CNS3XXX_RAID */ +--- a/arch/arm/Kconfig ++++ b/arch/arm/Kconfig +@@ -193,7 +193,7 @@ menu "System Type" + + choice + prompt "ARM system type" +- default ARCH_VERSATILE ++ default ARCH_CNS3XXX + + config ARCH_AAEC2000 + bool "Agilent AAEC-2000 based" +@@ -235,6 +235,17 @@ config ARCH_VERSATILE + help + This enables support for ARM Ltd Versatile board. + ++config ARCH_CNS3XXX ++ bool "Cavium Networks CNS3XXX family" ++ select ARM_AMBA ++ select HAVE_CLK ++ select COMMON_CLKDEV ++ select GENERIC_TIME ++ select GENERIC_CLOCKEVENTS ++ select ARCH_REQUIRE_GPIOLIB ++ help ++ This enables support for Cavium Networks CNS3XXX boards. ++ + config ARCH_AT91 + bool "Atmel AT91" + select GENERIC_GPIO +@@ -715,6 +726,8 @@ source "arch/arm/mach-aaec2000/Kconfig" + + source "arch/arm/mach-realview/Kconfig" + ++source "arch/arm/mach-cns3xxx/Kconfig" ++ + source "arch/arm/mach-at91/Kconfig" + + source "arch/arm/plat-mxc/Kconfig" +@@ -768,7 +781,7 @@ endif + + config ARM_ERRATA_411920 + bool "ARM errata: Invalidation of the Instruction Cache operation can fail" +- depends on CPU_V6 && !SMP ++ depends on CPU_V6 && !SMP && !ARCH_CNS3XXX + help + Invalidation of the Instruction Cache operation can + fail. This erratum is present in 1136 (before r1p4), 1156 and 1176. +@@ -849,13 +862,17 @@ config ISA_DMA_API + bool + + config PCI +- bool "PCI support" if ARCH_INTEGRATOR_AP || ARCH_VERSATILE_PB || ARCH_IXP4XX || ARCH_KS8695 || MACH_ARMCORE ++ bool "PCI support" if ARCH_INTEGRATOR_AP || ARCH_VERSATILE_PB || ARCH_CNS3XXX || ARCH_IXP4XX || ARCH_KS8695 || MACH_ARMCORE + help + Find out whether you have a PCI motherboard. PCI is the name of a + bus system, i.e. the way the CPU talks to the other stuff inside + your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or + VESA. If you have PCI, say Y, otherwise N. + ++config PCI_DOMAINS ++ def_bool y ++ depends on PCI && ARCH_CNS3XXX ++ + config PCI_SYSCALL + def_bool PCI + +@@ -873,6 +890,8 @@ config PCI_HOST_ITE8152 + + source "drivers/pci/Kconfig" + ++source "drivers/pci/pcie/Kconfig" ++ + source "drivers/pcmcia/Kconfig" + + endmenu +@@ -884,10 +903,10 @@ source "kernel/time/Kconfig" + config SMP + bool "Symmetric Multi-Processing (EXPERIMENTAL)" + depends on EXPERIMENTAL && (REALVIEW_EB_ARM11MP || REALVIEW_EB_A9MP ||\ +- MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4) ++ MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_CNS3XXX || ARCH_OMAP4) + depends on GENERIC_CLOCKEVENTS + select USE_GENERIC_SMP_HELPERS +- select HAVE_ARM_SCU if (ARCH_REALVIEW || ARCH_OMAP4) ++ select HAVE_ARM_SCU if (ARCH_REALVIEW || ARCH_CNS3XXX || ARCH_OMAP4) + help + This enables support for systems with more than one CPU. If you have + a system with only one CPU, like most personal computers, say N. If +@@ -944,7 +963,7 @@ config NR_CPUS + int "Maximum number of CPUs (2-32)" + range 2 32 + depends on SMP +- default "4" ++ default "2" + + config HOTPLUG_CPU + bool "Support for hot-pluggable CPUs (EXPERIMENTAL)" +@@ -955,10 +974,10 @@ config HOTPLUG_CPU + + config LOCAL_TIMERS + bool "Use local timer interrupts" +- depends on SMP && (REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || \ ++ depends on SMP && (REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || ARCH_CNS3XXX || \ + REALVIEW_EB_A9MP || MACH_REALVIEW_PBX || ARCH_OMAP4) + default y +- select HAVE_ARM_TWD if (ARCH_REALVIEW || ARCH_OMAP4) ++ select HAVE_ARM_TWD if (ARCH_REALVIEW || ARCH_CNS3XXX || ARCH_OMAP4) + help + Enable support for local timers on SMP platforms, rather then the + legacy IPI broadcast method. Local timers allows the system +--- a/arch/arm/kernel/bios32.c ++++ b/arch/arm/kernel/bios32.c +@@ -531,6 +531,7 @@ static void __init pcibios_init_hw(struc + sys->busnr = busnr; + sys->swizzle = hw->swizzle; + sys->map_irq = hw->map_irq; ++ sys->domain = hw->nr_domains; + sys->resource[0] = &ioport_resource; + sys->resource[1] = &iomem_resource; + +@@ -694,3 +695,20 @@ int pci_mmap_page_range(struct pci_dev * + + return 0; + } ++#ifdef CONFIG_PCI_DOMAINS ++int pci_domain_nr(struct pci_bus *bus) ++{ ++ ++ //struct pci_sysdata *sd = bus->sysdata; ++ struct pci_sys_data *sd = bus->sysdata; ++ return sd->domain; ++ ++} ++EXPORT_SYMBOL(pci_domain_nr); ++ ++int pci_proc_domain(struct pci_bus *bus) ++{ ++ return pci_domain_nr(bus); ++} ++EXPORT_SYMBOL(pci_proc_domain); ++#endif +--- a/arch/arm/kernel/entry-armv.S ++++ b/arch/arm/kernel/entry-armv.S +@@ -38,6 +38,12 @@ + bne asm_do_IRQ + + #ifdef CONFIG_SMP ++ ++ test_for_cache_ipi r0, r6, r5, lr ++ movne r0, sp ++ adrne lr, 1b ++ bne do_cache_IPI ++ + /* + * XXX + * +--- a/arch/arm/kernel/smp.c ++++ b/arch/arm/kernel/smp.c +@@ -58,12 +58,20 @@ static DEFINE_PER_CPU(struct ipi_data, i + .lock = SPIN_LOCK_UNLOCKED, + }; + ++#ifdef CONFIG_CPU_NO_CACHE_BCAST_DEBUG ++static DEFINE_PER_CPU(unsigned long,dma_cache_counter) = 0; ++unsigned long bcache_bitmap = 0; ++#endif ++ + enum ipi_msg_type { + IPI_TIMER, + IPI_RESCHEDULE, + IPI_CALL_FUNC, + IPI_CALL_FUNC_SINGLE, + IPI_CPU_STOP, ++#ifdef CONFIG_CPU_NO_CACHE_BCAST ++ IPI_DMA_CACHE, ++#endif + }; + + int __cpuinit __cpu_up(unsigned int cpu) +@@ -349,10 +357,17 @@ static void send_ipi_message(const struc + * Call the platform specific cross-CPU call function. + */ + smp_cross_call(mask); +- + local_irq_restore(flags); + } + ++static void send_ipi_message_cache(const struct cpumask *mask) ++{ ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ smp_cross_call_cache(mask); ++ local_irq_restore(flags); ++} + void arch_send_call_function_ipi_mask(const struct cpumask *mask) + { + send_ipi_message(mask, IPI_CALL_FUNC); +@@ -373,6 +388,13 @@ void show_ipi_list(struct seq_file *p) + seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count); + + seq_putc(p, '\n'); ++ ++#ifdef CONFIG_CPU_NO_CACHE_BCAST_DEBUG ++ seq_puts(p, " dc: "); ++ for_each_present_cpu(cpu) ++ seq_printf(p, " %10lu", per_cpu(dma_cache_counter, cpu)); ++ seq_putc(p, '\n'); ++#endif + } + + void show_local_irqs(struct seq_file *p) +@@ -472,6 +494,10 @@ static void ipi_cpu_stop(unsigned int cp + cpu_relax(); + } + ++#ifdef CONFIG_CPU_NO_CACHE_BCAST ++static void ipi_dma_cache_op(unsigned int cpu); ++#endif ++ + /* + * Main handler for inter-processor interrupts + * +@@ -531,6 +557,16 @@ asmlinkage void __exception do_IPI(struc + ipi_cpu_stop(cpu); + break; + ++#ifdef CONFIG_CPU_NO_CACHE_BCAST ++ case IPI_DMA_CACHE: ++#ifdef CONFIG_CPU_NO_CACHE_BCAST_DEBUG ++ //get_cpu_var(dma_cache_counter)++; ++ //put_cpu_var(dma_cache_counter); ++#endif ++ ipi_dma_cache_op(cpu); ++ break; ++#endif ++ + default: + printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n", + cpu, nextmsg); +@@ -542,6 +578,19 @@ asmlinkage void __exception do_IPI(struc + set_irq_regs(old_regs); + } + ++asmlinkage void __exception do_cache_IPI(struct pt_regs *regs) ++{ ++ unsigned int cpu = smp_processor_id(); ++ struct ipi_data *ipi = &per_cpu(ipi_data, cpu); ++ struct pt_regs *old_regs = set_irq_regs(regs); ++ ++ ipi->ipi_count++; ++ ++ ipi_dma_cache_op(cpu); ++ ++ set_irq_regs(old_regs); ++} ++ + void smp_send_reschedule(int cpu) + { + send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); +@@ -692,3 +741,115 @@ void flush_tlb_kernel_range(unsigned lon + } else + local_flush_tlb_kernel_range(start, end); + } ++ ++#ifdef CONFIG_CPU_NO_CACHE_BCAST ++/* ++ * DMA cache maintenance operations on SMP if the automatic hardware ++ * broadcasting is not available ++ */ ++struct smp_dma_cache_struct { ++ int type; ++ const void *start; ++ const void *end; ++ char unfinished; ++}; ++ ++static struct smp_dma_cache_struct smp_dma_cache_data[3]; ++static DEFINE_SPINLOCK(smp_dma_cache_lock); ++ ++static void local_dma_cache_op(int type, const void *start, const void *end) ++{ ++ switch (type) { ++ case SMP_DMA_CACHE_INV: ++ dmac_inv_range(start, end); ++ break; ++ case SMP_DMA_CACHE_CLEAN: ++ dmac_clean_range(start, end); ++ break; ++ case SMP_DMA_CACHE_FLUSH: ++ dmac_flush_range(start, end); ++ break; ++ default: ++ printk(KERN_CRIT "CPU%u: Unknown SMP DMA cache type %d\n", ++ smp_processor_id(), type); ++ } ++} ++ ++/* ++ * This function must be executed with interrupts disabled. ++ */ ++static void ipi_dma_cache_op(unsigned int cpu) ++{ ++ unsigned long flags; ++ int type; ++ const void *start; ++ const void *end; ++ ++ /* check for spurious IPI */ ++ spin_lock_irqsave(&smp_dma_cache_lock, flags); ++ if (!test_bit(cpu, &bcache_bitmap)) ++ goto out; ++ ++ type = smp_dma_cache_data[cpu].type; ++ start = smp_dma_cache_data[cpu].start; ++ end = smp_dma_cache_data[cpu].end; ++ spin_unlock_irqrestore(&smp_dma_cache_lock, flags); ++ ++ ++ local_dma_cache_op(type, start, end); ++ ++ spin_lock_irqsave(&smp_dma_cache_lock, flags); ++ clear_bit(cpu, &bcache_bitmap); ++ smp_dma_cache_data[cpu].type = 0; ++ smp_dma_cache_data[cpu].start = 0; ++ smp_dma_cache_data[cpu].end = 0; ++ smp_dma_cache_data[cpu].unfinished = 0; ++out: ++ spin_unlock_irqrestore(&smp_dma_cache_lock, flags); ++} ++ ++/* ++ * Execute the DMA cache operations on all online CPUs. This function ++ * can be called with interrupts disabled or from interrupt context. ++ */ ++static void __smp_dma_cache_op(int type, const void *start, const void *end) ++{ ++ cpumask_t callmap = cpu_online_map; ++ unsigned int cpu = get_cpu(); ++ unsigned long flags; ++ unsigned long cpu_check; ++ cpu_clear(cpu, callmap); ++ cpu_check = *cpus_addr(callmap) >> 1; ++ ++ while (test_bit(cpu, &bcache_bitmap)) ++ ipi_dma_cache_op(cpu); ++ ++ while (test_bit(cpu_check, &bcache_bitmap)) ++ barrier(); ++ ++ spin_lock_irqsave(&smp_dma_cache_lock, flags); ++ smp_dma_cache_data[cpu_check].type = type; ++ smp_dma_cache_data[cpu_check].start = start; ++ smp_dma_cache_data[cpu_check].end = end; ++ smp_dma_cache_data[cpu_check].unfinished = 1; ++ set_bit(cpu_check, &bcache_bitmap); ++ send_ipi_message_cache(&callmap); ++ spin_unlock_irqrestore(&smp_dma_cache_lock, flags); ++ ++ /* run the local operation in parallel with the other CPUs */ ++ local_dma_cache_op(type, start, end); ++ put_cpu(); ++} ++ ++#define DMA_MAX_RANGE SZ_4K ++ ++/* ++ * Split the cache range in smaller pieces if interrupts are enabled ++ * to reduce the latency caused by disabling the interrupts during the ++ * broadcast. ++ */ ++void smp_dma_cache_op(int type, const void *start, const void *end) ++{ ++ __smp_dma_cache_op(type, start, end); ++} ++#endif +--- a/arch/arm/kernel/smp_twd.c ++++ b/arch/arm/kernel/smp_twd.c +@@ -41,7 +41,8 @@ + /* set up by the platform code */ + void __iomem *twd_base; + +-static unsigned long twd_timer_rate; ++unsigned long twd_timer_rate; ++EXPORT_SYMBOL(twd_timer_rate); + + static void twd_set_mode(enum clock_event_mode mode, + struct clock_event_device *clk) +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/core.c +@@ -0,0 +1,629 @@ ++/* ++ * linux/arch/arm/mach-cns3xxx/cns3xxx.c ++ * ++ * Copyright (c) 2008 Cavium Networks ++ * Copyright (C) 1999 - 2003 ARM Limited ++ * Copyright (C) 2000 Deep Blue Solutions Ltd ++ * ++ * This file is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, Version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or ++ * NONINFRINGEMENT. See the GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this file; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA or ++ * visit http://www.gnu.org/licenses/. ++ * ++ * This file may also be available under a different license from Cavium. ++ * Contact Cavium Networks for more information ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "core.h" ++#include "rdma.h" ++ ++static struct map_desc cns3xxx_io_desc[] __initdata = { ++ { ++ .virtual = CNS3XXX_TC11MP_TWD_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_TC11MP_TWD_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_CPU_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_DIST_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_I2S_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_I2S_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_TIMER1_2_3_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_TIMER1_2_3_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_TC11MP_L220_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_TC11MP_L220_BASE), ++ .length = SZ_8K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_SWITCH_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_SWITCH_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_SSP_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_SSP_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_DMC_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_DMC_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_SMC_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_SMC_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_GPIOA_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_GPIOA_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_GPIOB_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_GPIOB_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_RTC_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_RTC_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_MISC_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_MISC_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_PM_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_PM_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_UART0_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_UART0_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_UART1_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_UART1_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_UART2_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_UART2_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_UART3_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_UART3_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_DMAC_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_DMAC_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_CRYPTO_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_CRYPTO_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_HCIE_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_HCIE_BASE), ++ .length = SZ_32K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_RAID_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_RAID_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_AXI_IXC_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_AXI_IXC_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_CLCD_BASE_VIRT, ++ .pfn = __phys_to_pfn( CNS3XXX_CLCD_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_USBOTG_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_USBOTG_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_USB_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_USB_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_SATA2_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_SATA2_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_CAMERA_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_CAMERA_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_I2S_TDM_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_I2S_TDM_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_2DG_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_2DG_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_USB_OHCI_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_USB_OHCI_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_PCIE0_MEM_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_PCIE0_MEM_BASE), ++ .length = SZ_16M, // 176MB ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_PCIE0_HOST_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_PCIE0_HOST_BASE), ++ .length = SZ_16M, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_PCIE0_CFG0_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_PCIE0_CFG0_BASE), ++ .length = SZ_16M, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_PCIE0_CFG1_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_PCIE0_CFG1_BASE), ++ .length = SZ_16M, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_PCIE0_MSG_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_PCIE0_MSG_BASE), ++ .length = SZ_16M, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_PCIE0_IO_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_PCIE0_IO_BASE), ++ .length = SZ_16M, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_PCIE1_MEM_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_PCIE1_MEM_BASE), ++ .length = SZ_16M, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_PCIE1_HOST_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_PCIE1_HOST_BASE), ++ .length = SZ_16M, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_PCIE1_CFG0_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_PCIE1_CFG0_BASE), ++ .length = SZ_16M, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_PCIE1_CFG1_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_PCIE1_CFG1_BASE), ++ .length = SZ_16M, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_PCIE1_MSG_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_PCIE1_MSG_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_PCIE1_IO_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_PCIE1_IO_BASE), ++ .length = SZ_16M, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_L2C_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_L2C_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_PPE_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_PPE_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_EMBEDDED_SRAM_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_EMBEDDED_SRAM_BASE), ++ .length = SZ_8K, ++ .type = MT_DEVICE, ++ }, ++}; ++ ++void __init cns3xxx_map_io(void) ++{ ++ iotable_init(cns3xxx_io_desc, ARRAY_SIZE(cns3xxx_io_desc)); ++} ++ ++/* used by entry-macro.S */ ++void __iomem *gic_cpu_base_addr; ++ ++void __init cns3xxx_init_irq(void) ++{ ++ /* ARM11 MPCore test chip GIC */ ++ gic_cpu_base_addr = (void __iomem *) CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT; ++ gic_dist_init(0, (void __iomem *) CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT, 29); ++ gic_cpu_init(0, gic_cpu_base_addr); ++ set_interrupt_pri(1, 0); // Set cache broadcast priority to the highest priority ++} ++ ++int gpio_to_irq(int gpio) ++{ ++ if (gpio > 63) ++ return -EINVAL; ++ ++ if (gpio < 32) ++ return IRQ_CNS3XXX_GPIOA; ++ else ++ return IRQ_CNS3XXX_GPIOB; ++} ++ ++int irq2gpio(int irq) ++{ ++ if (irq == IRQ_CNS3XXX_GPIOA) ++ return 0; ++ else if (irq == IRQ_CNS3XXX_GPIOB) ++ return 32; ++ else ++ return -EINVAL; ++} ++ ++static inline void gpio_line_config(u8 line, u32 direction) ++{ ++ u32 reg; ++ if (direction) { ++ if (line < 32) { ++ reg = __raw_readl(CNS3XXX_GPIOA_BASE_VIRT + CNS3XXX_GPIO_DIR); ++ reg |= (1 << line); ++ __raw_writel(reg, CNS3XXX_GPIOA_BASE_VIRT + CNS3XXX_GPIO_DIR); ++ } else { ++ reg = __raw_readl(CNS3XXX_GPIOB_BASE_VIRT + CNS3XXX_GPIO_DIR); ++ reg |= (1 << (line - 32)); ++ __raw_writel(reg, CNS3XXX_GPIOB_BASE_VIRT + CNS3XXX_GPIO_DIR); ++ } ++ } else { ++ if (line < 32) { ++ reg = __raw_readl(CNS3XXX_GPIOA_BASE_VIRT + CNS3XXX_GPIO_DIR); ++ reg &= ~(1 << line); ++ __raw_writel(reg, CNS3XXX_GPIOA_BASE_VIRT + CNS3XXX_GPIO_DIR); ++ } else { ++ reg = __raw_readl(CNS3XXX_GPIOB_BASE_VIRT + CNS3XXX_GPIO_DIR); ++ reg &= ~(1 << (line - 32)); ++ __raw_writel(reg, CNS3XXX_GPIOB_BASE_VIRT + CNS3XXX_GPIO_DIR); ++ } ++ } ++} ++ ++static int cns3xxx_gpio_direction_input(struct gpio_chip *chip, unsigned gpio) ++{ ++ gpio_line_config(gpio, CNS3XXX_GPIO_IN); ++ return 0; ++} ++ ++static int cns3xxx_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, int level) ++{ ++ gpio_line_set(gpio, level); ++ gpio_line_config(gpio, CNS3XXX_GPIO_OUT); ++ return 0; ++} ++ ++static int cns3xxx_gpio_get_value(struct gpio_chip *chip, unsigned gpio) ++{ ++ return gpio_get_value(gpio); ++} ++ ++static void cns3xxx_gpio_set_value(struct gpio_chip *chip, unsigned gpio, int value) ++{ ++ gpio_set_value(gpio, value); ++} ++ ++static struct gpio_chip cns3xxx_gpio_chip = { ++ .label = "CNS3XXX_GPIO_CHIP", ++ .direction_input = cns3xxx_gpio_direction_input, ++ .direction_output = cns3xxx_gpio_direction_output, ++ .get = cns3xxx_gpio_get_value, ++ .set = cns3xxx_gpio_set_value, ++ .base = 0, ++ .ngpio = 64, ++}; ++ ++/* Watchdog */ ++static struct resource cns3xxx_watchdog_resources[] = { ++ { ++ .start = CNS3XXX_TC11MP_TWD_BASE, ++ .end = CNS3XXX_TC11MP_TWD_BASE + SZ_4K - 1, ++ .flags = IORESOURCE_MEM, ++ },{ ++ .start = IRQ_LOCALWDOG, ++ .end = IRQ_LOCALWDOG, ++ .flags = IORESOURCE_IRQ, ++ } ++}; ++ ++static struct platform_device cns3xxx_watchdog_device = { ++ .name = "cns3xxx-wdt", ++ .id = -1, ++ .num_resources = ARRAY_SIZE(cns3xxx_watchdog_resources), ++ .resource = cns3xxx_watchdog_resources, ++}; ++ ++static struct resource cns3xxx_gpio_resources[] = { ++ { ++ .name = "gpio", ++ .start = 0xFFFFFFFF, ++ .end = 0xFFFFFFFF, ++ .flags = 0, ++ }, ++}; ++ ++static struct platform_device cns3xxx_gpio = { ++ .name = "GPIODEV", ++ .id = -1, ++ .num_resources = ARRAY_SIZE(cns3xxx_gpio_resources), ++ .resource = cns3xxx_gpio_resources, ++}; ++ ++void __init cns3xxx_sys_init(void) ++{ ++ l2cc_init((void __iomem *) CNS3XXX_L2C_BASE_VIRT); ++ ++ dmac_init(); ++ cns_rdma_init(); ++ ++ platform_device_register(&cns3xxx_gpio); ++ platform_device_register(&cns3xxx_watchdog_device); ++ gpiochip_add(&cns3xxx_gpio_chip); ++} ++ ++void __iomem *timer1_va_base; ++ ++static void timer_set_mode(enum clock_event_mode mode, ++ struct clock_event_device *clk) ++{ ++ unsigned long ctrl = readl(timer1_va_base + TIMER1_2_CONTROL_OFFSET); ++ int reload; ++ int pclk = (cns3xxx_cpu_clock() >> 3); ++ ++ switch(mode) { ++ case CLOCK_EVT_MODE_PERIODIC: ++ /* pclk is cpu clock/8 */ ++ reload=pclk*1000000/HZ; ++ writel(reload, timer1_va_base + TIMER1_AUTO_RELOAD_OFFSET); ++ ctrl |= (1 << 0) | (1 << 2) | (1 << 9); ++ break; ++ case CLOCK_EVT_MODE_ONESHOT: ++ /* period set, and timer enabled in 'next_event' hook */ ++ writel(0, timer1_va_base + TIMER1_AUTO_RELOAD_OFFSET); ++ ctrl |= (1 << 2) | (1 << 9); ++ break; ++ case CLOCK_EVT_MODE_UNUSED: ++ case CLOCK_EVT_MODE_SHUTDOWN: ++ default: ++ ctrl = 0; ++ } ++ ++ writel(ctrl, timer1_va_base + TIMER1_2_CONTROL_OFFSET); ++} ++ ++static int timer_set_next_event(unsigned long evt, ++ struct clock_event_device *unused) ++{ ++ unsigned long ctrl = readl(timer1_va_base + TIMER1_2_CONTROL_OFFSET); ++ ++ writel(evt, timer1_va_base + TIMER1_COUNTER_OFFSET); ++ writel(ctrl | (1 << 0), timer1_va_base + TIMER1_2_CONTROL_OFFSET); ++ ++ return 0; ++} ++ ++static struct clock_event_device timer1_clockevent = { ++ .name = "timer1", ++ .shift = 32, ++ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, ++ .set_mode = timer_set_mode, ++ .set_next_event = timer_set_next_event, ++ .rating = 300, ++ .cpumask = cpu_all_mask, ++}; ++ ++static void __init cns3xxx_clockevents_init(unsigned int timer_irq) ++{ ++ timer1_clockevent.irq = timer_irq; ++ timer1_clockevent.mult = ++ div_sc( (cns3xxx_cpu_clock() >> 3)*1000000, NSEC_PER_SEC, timer1_clockevent.shift); ++ timer1_clockevent.max_delta_ns = ++ clockevent_delta2ns(0xffffffff, &timer1_clockevent); ++ timer1_clockevent.min_delta_ns = ++ clockevent_delta2ns(0xf, &timer1_clockevent); ++ ++ clockevents_register_device(&timer1_clockevent); ++} ++ ++/* ++ * IRQ handler for the timer ++ */ ++static irqreturn_t cns3xxx_timer_interrupt(int irq, void *dev_id) ++{ ++ u32 val; ++ struct clock_event_device *evt = &timer1_clockevent; ++ ++ /* Clear the interrupt */ ++ val = readl(timer1_va_base + TIMER1_2_INTERRUPT_STATUS_OFFSET); ++ writel(val & ~(1 << 2), timer1_va_base + TIMER1_2_INTERRUPT_STATUS_OFFSET); ++ ++ evt->event_handler(evt); ++ ++ return IRQ_HANDLED; ++} ++ ++static struct irqaction cns3xxx_timer_irq = { ++ .name = "timer", ++ .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, ++ .handler = cns3xxx_timer_interrupt, ++}; ++ ++static cycle_t cns3xxx_get_cycles(struct clocksource *cs) ++{ ++ u64 val; ++ ++ val = readl(timer1_va_base + TIMER_FREERUN_CONTROL_OFFSET); ++ val &= 0xffff; ++ ++ return ((val << 32) | readl(timer1_va_base + TIMER_FREERUN_OFFSET)); ++} ++ ++static struct clocksource clocksource_cns3xxx = { ++ .name = "freerun", ++ .rating = 200, ++ .read = cns3xxx_get_cycles, ++ .mask = CLOCKSOURCE_MASK(48), ++ .shift = 16, ++ .flags = CLOCK_SOURCE_IS_CONTINUOUS, ++}; ++ ++ ++static void __init cns3xxx_clocksource_init(void) ++{ ++ /* Reset the FreeRunning counter */ ++ writel((1 << 16), timer1_va_base + TIMER_FREERUN_CONTROL_OFFSET); ++ ++ clocksource_cns3xxx.mult = ++ clocksource_khz2mult(100, clocksource_cns3xxx.shift); ++ clocksource_register(&clocksource_cns3xxx); ++} ++ ++/* ++ * Set up the clock source and clock events devices ++ */ ++void __init __cns3xxx_timer_init(unsigned int timer_irq) ++{ ++ unsigned long val, irq_mask; ++ ++ /* ++ * Initialise to a known state (all timers off) ++ */ ++ writel(0, timer1_va_base + TIMER1_2_CONTROL_OFFSET); /* disable timer1 and timer2 */ ++ writel(0, timer1_va_base + TIMER_FREERUN_CONTROL_OFFSET); /* stop free running timer3 */ ++ writel(0, timer1_va_base + TIMER1_MATCH_V1_OFFSET); ++ writel(0, timer1_va_base + TIMER1_MATCH_V2_OFFSET); ++ ++ val = (cns3xxx_cpu_clock() >> 3) * 1000000 / HZ; ++ writel(val, timer1_va_base + TIMER1_COUNTER_OFFSET); ++ ++ /* mask irq, non-mask timer1 overflow */ ++ irq_mask = readl(timer1_va_base + TIMER1_2_INTERRUPT_MASK_OFFSET); ++ irq_mask &= ~(1 << 2); ++ irq_mask |= 0x03; ++ writel(irq_mask, timer1_va_base + TIMER1_2_INTERRUPT_MASK_OFFSET); ++ /* down counter */ ++ val = readl(timer1_va_base + TIMER1_2_CONTROL_OFFSET); ++ val |= (1 << 9); ++ writel(val, timer1_va_base + TIMER1_2_CONTROL_OFFSET); ++ ++ /* ++ * Make irqs happen for the system timer ++ */ ++ setup_irq(timer_irq, &cns3xxx_timer_irq); ++ ++ cns3xxx_clocksource_init(); ++ cns3xxx_clockevents_init(timer_irq); ++} ++ ++void __init cns3xxx_timer_init(void) ++{ ++ timer1_va_base = (void __iomem *) CNS3XXX_TIMER1_2_3_BASE_VIRT; ++ twd_base = (void __iomem *) CNS3XXX_TC11MP_TWD_BASE_VIRT; ++ __cns3xxx_timer_init(IRQ_CNS3XXX_TIMER0); ++} ++ ++struct sys_timer cns3xxx_timer = { ++ .init = cns3xxx_timer_init, ++}; ++ ++ ++void cns3xxx_power_off(void) ++{ ++ __u32 clkctrl; ++ ++ printk(KERN_INFO "powering system down...\n"); ++ ++ clkctrl = readl(CNS3XXX_PM_BASE_VIRT + PM_SYS_CLK_CTRL_OFFSET); ++ clkctrl &= 0xfffff1ff; ++ clkctrl |= (0x5 << 9); /* Hibernate */ ++ writel(clkctrl, CNS3XXX_PM_BASE_VIRT + PM_SYS_CLK_CTRL_OFFSET); ++} +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/core.h +@@ -0,0 +1,34 @@ ++/* ++ * linux/arch/arm/mach-cns3xxx/core.h ++ * ++ * Copyright (c) 2008 Cavium Networks ++ * Copyright (C) 2004 ARM Limited ++ * Copyright (C) 2000 Deep Blue Solutions Ltd ++ * ++ * This file is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, Version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or ++ * NONINFRINGEMENT. See the GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this file; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA or ++ * visit http://www.gnu.org/licenses/. ++ * ++ * This file may also be available under a different license from Cavium. ++ * Contact Cavium Networks for more information ++ */ ++ ++#ifndef __ASM_ARCH_CNS3XXX_H ++#define __ASM_ARCH_CNS3XXX_H ++ ++void __init cns3xxx_map_io(void); ++void cns3xxx_power_off(void); ++void __init cns3xxx_init_irq(void); ++ ++extern struct sys_timer cns3xxx_timer; ++#endif +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/dmac.c +@@ -0,0 +1,1464 @@ ++/******************************************************************************* ++ * ++ * Copyright (c) 2008 Cavium Networks ++ * ++ * This file is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, Version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or ++ * NONINFRINGEMENT. See the GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this file; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA or ++ * visit http://www.gnu.org/licenses/. ++ * ++ * This file may also be available under a different license from Cavium. ++ * Contact Cavium Networks for more information ++ * ++ ******************************************************************************/ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++ ++#include ++ ++//#define DEBUG_GDMA ++ ++#define DMAC_MEM_MAP_VALUE(reg_offset) (*((uint32_t volatile *)(CNS3XXX_DMAC_BASE_VIRT + reg_offset))) ++ ++#define DMAC_INTEN DMAC_MEM_MAP_VALUE(0x020) ++#define DMAC_INTSTATUS DMAC_MEM_MAP_VALUE(0x028) ++#define DMAC_INTCLR DMAC_MEM_MAP_VALUE(0x02C) ++ ++/* DMAC Debug registers */ ++#define DMAC_DBGSTATUS DMAC_MEM_MAP_VALUE(0xD00) /* Debug Status Register */ ++#define DMAC_DBGCMD DMAC_MEM_MAP_VALUE(0xD04) /* Debug Command Register */ ++#define DMAC_DBGINST0 DMAC_MEM_MAP_VALUE(0xD08) /* Debug Instrucion-0 Register */ ++#define DMAC_DBGINST1 DMAC_MEM_MAP_VALUE(0xD0C) /* Debug Instrucion-1 Register */ ++ ++#define CHANNEL_AND_MANAGER 0x1ff ++#define CHANNEL_ONLY 0xff ++#define MANAGER_ONLY 0x100 ++ ++#define MAX_MICROCODE_SIZE 2048 ++ ++#if 0 ++#define ERROR_INTR 45 ++#define DMAC_IRQNO_BASE 46 ++#else ++#define ERROR_INTR 68 ++#define DMAC_IRQNO_BASE 69 ++#endif ++ ++#define MAX_INTR_EVENTS 32 ++ ++#define MIN_EVENT_NUM 8 //2 ++ ++/* Debug Status Register */ ++#define DMAC_DBG_BUSY_BIT (1<<0) ++#define DMAC_DBG_INSTR_0_SHIFT 16 ++#define DMAC_DBG_INSTR_2_SHIFT 0 ++#define DMAC_DBG_THREAD_BIT (1<<0) ++#define DMAC_DBG_CH_NUM_SHIFT 8 ++#define DMAC_DBG_CH_NUM_BIT_MASK 0x7 ++#define DMAC_CHMGR 8 ++ ++spinlock_t dma_mgr_lock; ++ ++typedef enum { ++// DMAC_INSTR_DMAADDH = 0, /* Add Halfword */ /*** No implement ***/ ++ DMAC_INSTR_DMAEND = 0, /* End */ ++ DMAC_INSTR_DMAFLUSHP, /* Flash and notify Peripheral */ ++ DMAC_INSTR_DMAGO, /* Go */ ++ DMAC_INSTR_DMALD, /* Load */ ++ DMAC_INSTR_DMALDP, /* Load aPeripheral */ ++ DMAC_INSTR_DMALP, /* Loop */ ++ DMAC_INSTR_DMALPEND, /* Loop End */ ++// DMAC_INSTR_DMALPFE, /* Loop Forever */ ++ DMAC_INSTR_DMAKILL, /* kill */ ++ DMAC_INSTR_DMAMOV, /* Move */ ++ DMAC_INSTR_DMANOP, /* No operation */ ++// DMAC_INSTR_DMARMB, /* Read Memory Barrier */ ++ DMAC_INSTR_DMASEV, /* Send Event */ ++ DMAC_INSTR_DMAST, /* Store */ ++ DMAC_INSTR_DMASTP, /* Store and notify Peripheral */ ++ DMAC_INSTR_DMASTZ, /* Store Zero */ ++ DMAC_INSTR_DMAWFE, /* Wait For Event */ ++ DMAC_INSTR_DMAWFP, /* Wait For Peripheral */ ++ DMAC_INSTR_DMAWMB /* Wait For Barrier */ ++} dmac_instr_t; ++ ++typedef struct { ++ const char *enc_buf; ++ int enc_buf_len; ++ int chan_or_mgr; /* 0xff for DMA manager and DMA channel, ++ 0x7f for DMA channel, ++ 0x80 for DMA manager */ ++} dmac_instr_encode_t; ++ ++typedef struct { ++ uint32_t sa:1; /* source address increment: 0 - FIXED / 1 - INCR */ ++ uint32_t ss:3; /* source burst size in bytes: mapping value TBD with designer */ ++ uint32_t sb:4; /* source burst length */ ++ uint32_t sp:3; /* source protection */ ++ uint32_t sc:3; /* source cache */ ++ uint32_t da:1; /* destination address increment: 0 - FIXED / 1 - INCR */ ++ uint32_t ds:3; /* destination burst size in bytes: mapping value TBD with designer */ ++ uint32_t db:4; /* destination burst length */ ++ uint32_t dp:3; /* destination protection */ ++ uint32_t dc:3; /* destination cache */ ++ uint32_t es:3; /* endian swap size, in bits */ ++ uint32_t padding:1; ++} dmac_ch_ctrl_t; ++ ++typedef struct { ++ union { ++ dmac_ch_ctrl_t ccr; ++ uint32_t val; ++ } i; ++} dmac_cmd_imm32_t; ++ ++typedef struct { ++ uint16_t bs:1; /* burst/single bit */ ++ uint16_t x:1; /* x bit */ ++ uint16_t ns:1; /* not secure bit */ ++ uint16_t lc:1; /* loop counter bit */ ++ uint16_t p:1; /* p bit */ ++ uint16_t nf:1; /* no-finite bit */ ++ uint16_t i:1; /* invalid bit */ ++ uint16_t padding:9; ++} dmac_cmd_bits_t; ++ ++typedef struct { ++ uint8_t periph; /* peripheral ID */ ++ uint8_t cn; /* Channel Number */ ++ uint8_t iter; /* iteration count */ ++ uint8_t backwards_jump; /* backwards jump length */ ++ uint8_t rd; /* destination register, */ ++ uint8_t event_num; /* event number */ ++ ++ union { ++ dmac_cmd_bits_t b; ++ uint16_t val; ++ } bits; ++ ++ dmac_cmd_imm32_t imm32; /* immediate 32bit value */ ++} dmac_instr_param_t; ++ ++typedef struct { ++ int in_use; /* Channel in use or not */ ++ int channel; /* Channel number */ ++ int microcode_size; /* Microcode size */ ++ uint8_t *microcode; /* TODO */ ++ dma_addr_t microcode_dma; ++ int (*intr_handler) (void *); ++ void *handler_args; ++ int notifications_used; /* 32 bits for every interrupt/event */ ++} dmac_channel_t; ++ ++/* TODO: Not protected as of now */ ++dmac_channel_t *dmac_channels[MAX_DMA_CHANNELS]; ++ ++int dmac_events[MAX_INTR_EVENTS]; ++ ++static int dmac_create_instr(int chan, dmac_instr_t instr, ++ dmac_instr_param_t * param); ++static int dmac_exec_ucode(int ucode_channel, int ch); ++void pl330_dump_regs(void); ++ ++/****************************************************************************** ++ * ++ * Instruction: DMAEND ++ * Description: ++ * | 7 6 5 4 | 3 2 1 0 | ++ * 0 0 0 0 0 0 0 0 ++ * Example: ++ * DMAEND ++ * 00 ++ ******************************************************************************/ ++const char dmac_code_DMAEND[] = { 0x00 }; ++ ++int DMAC_DMAEND(int ch_num) ++{ ++ dmac_instr_param_t param; ++ int instr_len; ++ memset(¶m, 0, sizeof(dmac_instr_param_t)); ++ instr_len = dmac_create_instr(ch_num, DMAC_INSTR_DMAEND, ¶m); ++ if (instr_len < 0) { ++ printk("dmac_create_instr failed \n"); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++EXPORT_SYMBOL(DMAC_DMAEND); ++ ++/****************************************************************************** ++ * ++ * Instruction: DMAFLUSHP ++ * Description: ++ * | 15 14 13 12 | 11 10 9 8 | 7 6 5 4 | 3 2 1 0 | ++ * 0 0 0 0 0 1 1 0 1 0 1 ++ * Example: ++ * DMAFLUSHP P0 ++ * 35 00 ++ ******************************************************************************/ ++const char dmac_code_DMAFLUSHP[] = { 0x35, 0x00 }; ++ ++int DMAC_DMAFLUSHP(int ch_num, int periph) ++{ ++ dmac_instr_param_t param; ++ int instr_len; ++ memset(¶m, 0, sizeof(dmac_instr_param_t)); ++ param.periph = periph; ++ instr_len = dmac_create_instr(ch_num, DMAC_INSTR_DMAFLUSHP, ¶m); ++ if (instr_len < 0) { ++ printk("dmac_create_instr failed \n"); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++EXPORT_SYMBOL(DMAC_DMAFLUSHP); ++ ++/****************************************************************************** ++ * ++ * Instruction: DMAGO ++ * Description: ++ * | 15 14 13 12 | 11 10 9 8 | 7 6 5 4 | 3 2 1 0 | ++ * 0 0 0 0 0 1 0 1 0 0 0 ns 0 ++ * ++ * | 47 16 | ++ * < imm[31:0] > ++ * Example: ++ * DMAGO C0, 0x40000000 ++ * A0 00 00 00 00 40 ++ ******************************************************************************/ ++const char dmac_code_DMAGO[] = { 0xA0, 0x00, 0x00, 0x00, 0x00, 0x40 }; ++ ++int DMAC_DMAGO(int ch_num) ++{ ++ dmac_instr_param_t param; ++ int instr_len; ++ dmac_channel_t *dma_ch = dmac_channels[ch_num]; ++ ++ if(!dma_ch->in_use) { ++ printk("DMAC_DMAGO an unused channel\n"); ++ return -1; ++ } ++ ++ memset(¶m, 0, sizeof(dmac_instr_param_t)); ++ param.bits.b.ns = 1; ++ param.cn = ch_num; ++ param.imm32.i.val = dma_ch->microcode_dma; ++#ifdef DEBUG_GDMA ++ printk("%s:%d: microcode Physical Address *(%x)==[%x]\n", __FUNCTION__, ++ __LINE__, param.imm32.i.val, ++ *((uint32_t *) phys_to_virt(dma_ch->microcode_dma))); ++#endif ++ instr_len = dmac_create_instr(DMAC_CHMGR, DMAC_INSTR_DMAGO, ¶m); ++ if (instr_len < 0) { ++ printk("dmac_create_instr failed \n"); ++ return -1; ++ } ++ ++ dmac_exec_ucode(DMAC_CHMGR, DMAC_CHMGR); // DMAC_CHMGR); ++ if (dmac_channels[DMAC_CHMGR]) ++ dmac_channels[DMAC_CHMGR]->microcode_size = 0; ++ else ++ printk("BUG HERE !! DEBUG .. \n"); ++ ++ return 0; ++} ++ ++EXPORT_SYMBOL(DMAC_DMAGO); ++ ++/****************************************************************************** ++ * ++ * Instruction: DMALD ++ * Description: ++ * | 7 6 5 4 | 3 2 1 0 | ++ * 0 0 0 0 0 1 bs x ++ * Example: ++ * DMALD ++ * 04 ++ ******************************************************************************/ ++const char dmac_code_DMALD[] = { 0x04 }; ++ ++int DMAC_DMALD(int ch_num) ++{ ++ dmac_instr_param_t param; ++ int instr_len; ++ memset(¶m, 0, sizeof(dmac_instr_param_t)); ++ /* param.bits.b.x = param.bits.b.bs = 0; */ ++ instr_len = dmac_create_instr(ch_num, DMAC_INSTR_DMALD, ¶m); ++ if (instr_len < 0) { ++ printk("dmac_create_instr failed \n"); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++EXPORT_SYMBOL(DMAC_DMALD); ++ ++int DMAC_DMALDB(int ch_num) ++{ ++ dmac_instr_param_t param; ++ int instr_len; ++ memset(¶m, 0, sizeof(dmac_instr_param_t)); ++ /* param.bits.b.x = param.bits.b.bs = 0; */ ++ param.bits.b.x = 1; ++ param.bits.b.bs = 1; ++ instr_len = dmac_create_instr(ch_num, DMAC_INSTR_DMALD, ¶m); ++ if (instr_len < 0) { ++ printk("dmac_create_instr failed \n"); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++EXPORT_SYMBOL(DMAC_DMALDB); ++ ++int DMAC_DMALDS(int ch_num) ++{ ++ dmac_instr_param_t param; ++ int instr_len; ++ memset(¶m, 0, sizeof(dmac_instr_param_t)); ++ /* param.bits.b.x = param.bits.b.bs = 0; */ ++ param.bits.b.x = 1; ++ param.bits.b.bs = 0; ++ instr_len = dmac_create_instr(ch_num, DMAC_INSTR_DMALD, ¶m); ++ if (instr_len < 0) { ++ printk("dmac_create_instr failed \n"); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++EXPORT_SYMBOL(DMAC_DMALDS); ++ ++/****************************************************************************** ++ * ++ * Instruction: DMALP ++ * Description: ++ * | 15 14 13 12 | 11 10 9 8 | 7 6 5 4 | 3 2 1 0 | ++ * < iter[7:0] > 0 0 1 0 0 0 lc 0 ++ * Example: ++ * DMALP 8 ++ * 20 07 ++ ******************************************************************************/ ++const char dmac_code_DMALP[] = { 0x20, 0x07 }; ++ ++int DMAC_DMALP(int ch_num, int loop_reg_idx, int iter) ++{ ++ dmac_instr_param_t param; ++ int instr_len; ++ memset(¶m, 0, sizeof(dmac_instr_param_t)); ++ param.bits.b.lc = loop_reg_idx; ++ param.iter = (uint8_t) (iter - 1); ++ instr_len = dmac_create_instr(ch_num, DMAC_INSTR_DMALP, ¶m); ++ if (instr_len < 0) { ++ printk("dmac_create_instr failed \n"); ++ return -1; ++ } ++ return 0; ++} ++ ++EXPORT_SYMBOL(DMAC_DMALP); ++ ++/****************************************************************************** ++ * ++ * Instruction: DMALPEND ++ * Description: ++ * | 15 14 13 12 | 11 10 9 8 | 7 6 5 4 | 3 2 1 0 | ++ * < backwards_jump[7:0] > 0 0 1 nf 1 lc bs x ++ * Example: ++ * DMALPEND ++ * 38 04 ++ ******************************************************************************/ ++const char dmac_code_DMALPEND[] = { 0x38, 0x04 }; ++ ++int DMAC_DMALPEND(int ch_num, int loop_reg_idx, int jump, int lpfe) ++{ ++ dmac_instr_param_t param; ++ int instr_len; ++ memset(¶m, 0, sizeof(dmac_instr_param_t)); ++ /* param.bits.b.x = param.bits.b.bs = 0; */ ++ param.bits.b.lc = loop_reg_idx; ++ param.bits.b.nf = lpfe; ++ param.backwards_jump = jump; ++ instr_len = dmac_create_instr(ch_num, DMAC_INSTR_DMALPEND, ¶m); ++ if (instr_len < 0) { ++ printk("dmac_create_instr failed \n"); ++ return -1; ++ } ++ return 0; ++} ++ ++EXPORT_SYMBOL(DMAC_DMALPEND); ++ ++/****************************************************************************** ++ * ++ * Instruction: DMAMOV ++ * Description: ++ * | 15 14 13 12 | 11 10 9 8 | 7 6 5 4 | 3 2 1 0 | ++ * 0 0 0 0 0 1 0 1 1 1 1 0 0 ++ * ++ * | 47 16 | ++ * < imm[31:0] > ++ * ++ * # CCR Description ++ * # [30:28] Endian swap size ++ * # [27:25] AWCACHE[3,1:0] value ++ * # [24:22] AWPROT value ++ * # [21:18] AWLEN value ++ * # [17:15] AWSIZE value ++ * # [14] AWBURST[0] value ++ * 0 - FIXED / 1 - INCR ++ * # [13:11] ARCACHE[2:0] value ++ * # [10:8] ARPROT value ++ * # [7:4] ARLEN value ++ * # [3:1] ARSIZE value ++ * # [0] ARBURST[0] value ++ * 0 - FIXED / 1 - INCR ++ * Example: ++ * DMAMOV CCR, SB1 SS32 DB1 DS32 ++ * BC 01 05 40 01 00 ++ ******************************************************************************/ ++const char dmac_code_DMAMOV[] = { 0xBC, 0x01, 0x05, 0x40, 0x01, 0x00 }; ++ ++/* ccr_sar_dar: 0 for SAR, 1, for CCR, 2 for DAR */ ++//typedef enum { SAR = 0, CCR = 1, DAR = 2} dmamov_arg_t; ++int DMAC_DMAMOV(int ch_num, dmamov_arg_t ccr_sar_dar, uint32_t value) ++{ ++ dmac_instr_param_t param; ++ int instr_len; ++ memset(¶m, 0, sizeof(dmac_instr_param_t)); ++ param.rd = ccr_sar_dar; ++ param.imm32.i.val = value; ++ instr_len = dmac_create_instr(ch_num, DMAC_INSTR_DMAMOV, ¶m); ++ if (instr_len < 0) { ++ printk("dmac_create_instr failed \n"); ++ return -1; ++ } ++ return 0; ++} ++ ++EXPORT_SYMBOL(DMAC_DMAMOV); ++ ++/****************************************************************************** ++ * ++ * Instruction: DMAST ++ * Description: ++ * | 7 6 5 4 | 3 2 1 0 | ++ * 0 0 0 0 1 0 bs x ++ * Example: ++ * DMAST ++ * 08 ++ ******************************************************************************/ ++const char dmac_code_DMAST[] = { 0x08 }; ++ ++int DMAC_DMAST(int ch_num) ++{ ++ dmac_instr_param_t param; ++ int instr_len; ++ memset(¶m, 0, sizeof(dmac_instr_param_t)); ++ /* param.bits.b.x = param.bits.b.bs = 0; */ ++ instr_len = dmac_create_instr(ch_num, DMAC_INSTR_DMAST, ¶m); ++ if (instr_len < 0) { ++ printk("dmac_create_instr failed \n"); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++EXPORT_SYMBOL(DMAC_DMAST); ++ ++const char dmac_code_DMAWMB[] = { 0x13 }; ++ ++int DMAC_DMAWMB(int ch_num) ++{ ++ dmac_instr_param_t param; ++ int instr_len; ++ memset(¶m, 0, sizeof(dmac_instr_param_t)); ++ instr_len = dmac_create_instr(ch_num, DMAC_INSTR_DMAWMB, ¶m); ++ if (instr_len < 0) { ++ printk("dmac_create_instr failed\n"); ++ return -1; ++ } ++ return 0; ++} ++ ++EXPORT_SYMBOL(DMAC_DMAWMB); ++ ++const char dmac_code_DMANOP[] = { 0x18 }; ++ ++int DMAC_DMANOP(int ch_num) ++{ ++ dmac_instr_param_t param; ++ int instr_len; ++ memset(¶m, 0, sizeof(dmac_instr_param_t)); ++ instr_len = dmac_create_instr(ch_num, DMAC_INSTR_DMANOP, ¶m); ++ if (instr_len < 0) { ++ printk("dmac_create_instr failed\n"); ++ return -1; ++ } ++ return 0; ++} ++ ++EXPORT_SYMBOL(DMAC_DMANOP); ++ ++int DMAC_DMASTB(int ch_num) ++{ ++ dmac_instr_param_t param; ++ int instr_len; ++ memset(¶m, 0, sizeof(dmac_instr_param_t)); ++ param.bits.b.x = 1; ++ param.bits.b.bs = 1; ++ instr_len = dmac_create_instr(ch_num, DMAC_INSTR_DMAST, ¶m); ++ if (instr_len < 0) { ++ printk("dmac_create_instr failed \n"); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++EXPORT_SYMBOL(DMAC_DMASTB); ++ ++int DMAC_DMASTS(int ch_num) ++{ ++ dmac_instr_param_t param; ++ int instr_len; ++ memset(¶m, 0, sizeof(dmac_instr_param_t)); ++ param.bits.b.x = 1; ++ param.bits.b.bs = 0; ++ instr_len = dmac_create_instr(ch_num, DMAC_INSTR_DMAST, ¶m); ++ if (instr_len < 0) { ++ printk("dmac_create_instr failed \n"); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++EXPORT_SYMBOL(DMAC_DMASTS); ++ ++/****************************************************************************** ++ * ++ * Instruction: DMASTZ ++ * Description: ++ * | 7 6 5 4 | 3 2 1 0 | ++ * 0 0 0 0 1 1 0 0 ++ * Example: ++ * DMASTZ ++ * 08 ++ ******************************************************************************/ ++const char dmac_code_DMASTZ[] = { 0x0C }; ++ ++/****************************************************************************** ++ * ++ * Instruction: DMAWFE ++ * Description: ++ * | 15 14 13 12 | 11 10 9 8 | 7 6 5 4 | 3 2 1 0 | ++ * 0 i 0 0 0 1 1 0 1 1 0 ++ * Example: ++ * DMAWFE E0 ++ * 36 00 ++ ******************************************************************************/ ++const char dmac_code_DMAWFE[] = { 0x36, 0x00 }; ++ ++int DMAC_WFE(int chan, int event_num) ++{ ++ dmac_instr_param_t param; ++ int instr_len; ++ memset(¶m, 0, sizeof(dmac_instr_param_t)); ++ /* param.bits.b.x = param.bits.b.bs = 0; */ ++//#warning "to set bits" ++ param.event_num = event_num; ++ instr_len = dmac_create_instr(chan, DMAC_INSTR_DMAWFE, ¶m); ++ if (instr_len < 0) { ++ printk("dmac_create_instr failed \n"); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++EXPORT_SYMBOL(DMAC_WFE); ++ ++/****************************************************************************** ++ * ++ * Instruction: DMAWFP ++ * Description: ++ * | 15 14 13 12 | 11 10 9 8 | 7 6 5 4 | 3 2 1 0 | ++ * < periph[4:0] > 0 0 0 0 0 1 1 0 0 bs p ++ * Example: ++ * DMAWFP P0, periph ++ * 31 00 ++ ******************************************************************************/ ++const char dmac_code_DMAWFP[] = { 0x31, 0x00 }; ++ ++int DMAC_DMAWFP(int ch_num, int periph_id, dmawfp_burst_type s) ++{ ++ dmac_instr_param_t param; ++ int instr_len; ++ memset(¶m, 0, sizeof(dmac_instr_param_t)); ++ if (s == SINGLE) { ++ param.bits.b.bs = 0; ++ param.bits.b.p = 0; ++ } ++ if (s == BURST) { ++ param.bits.b.bs = 1; ++ param.bits.b.p = 0; ++ } ++ if (s == PERIPHERAL) { ++ param.bits.b.bs = 0; ++ param.bits.b.p = 1; ++ } ++ param.periph = periph_id; ++ instr_len = dmac_create_instr(ch_num, DMAC_INSTR_DMAWFP, ¶m); ++ if (instr_len < 0) { ++ printk("dmac_create_instr failed \n"); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++EXPORT_SYMBOL(DMAC_DMAWFP); ++ ++/****************************************************************************** ++ * ++ * Instruction: DMAKILL ++ * Description: ++ * | 7 6 5 4 | 3 2 1 0 | ++ * 0 0 0 0 0 0 0 1 ++ * Example: ++ * DMAKILL ++ * 01 ++ ******************************************************************************/ ++const char dmac_code_DMAKILL[] = { 0x01 }; ++ ++/****************************************************************************** ++ * ++ * Instruction: DMASEV ++ * Description: ++ * | 15 14 13 12 | 11 10 9 8 | 7 6 5 4 | 3 2 1 0 | ++ * 0 i 0 0 0 1 1 0 1 0 0 ++ * Example: ++ * DMASEV E0 ++ * 34 00 ++ ******************************************************************************/ ++const char dmac_code_DMASEV[] = { 0x34, 0x00 }; ++ ++int DMAC_DMASEV(int ch_num, int event_num) ++{ ++ dmac_instr_param_t param; ++ int instr_len; ++ dmac_channel_t *dma_ch = dmac_channels[ch_num]; ++ if ((event_num >= MIN_EVENT_NUM) ++ && !(dma_ch->notifications_used & (1 << event_num))) { ++ printk("DMAC_DMASEV failed event number request not done\n"); ++ return -1; ++ } else if ((event_num < MIN_EVENT_NUM) && (event_num != ch_num)) { ++ printk ++ ("%s:%d - Presently, we have this hard restriction that each channel can signal irq event == channel_no\n", ++ __FUNCTION__, __LINE__); ++ return -1; ++ } ++ memset(¶m, 0, sizeof(dmac_instr_param_t)); ++ param.event_num = event_num; ++ instr_len = dmac_create_instr(ch_num, DMAC_INSTR_DMASEV, ¶m); ++ if (instr_len < 0) { ++ printk("dmac_create_instr failed \n"); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++EXPORT_SYMBOL(DMAC_DMASEV); ++ ++/****************************************************************************** ++ * ++ * Instruction: DMALDP ++ * Description: ++ * | 15 14 13 12 | 11 10 9 8 | 7 6 5 4 | 3 2 1 0 | ++ * < periph[4:0] > 0 0 0 0 0 1 0 0 1 bs 1 ++ * Example: ++ * DMALDPS P0 ++ * 25 00 ++ ******************************************************************************/ ++const char dmac_code_DMALDP[] = { 0x25, 0x00 }; ++ ++int DMAC_DMALDP(int ch_num, int periph_id, int burst) ++{ ++ dmac_instr_param_t param; ++ int instr_len; ++ memset(¶m, 0, sizeof(dmac_instr_param_t)); ++ /* param.bits.b.x = param.bits.b.bs = 0; */ ++ param.periph = periph_id; ++ param.bits.b.bs = burst; ++ instr_len = dmac_create_instr(ch_num, DMAC_INSTR_DMALDP, ¶m); ++ if (instr_len < 0) { ++ printk("dmac_create_instr failed \n"); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++EXPORT_SYMBOL(DMAC_DMALDP); ++ ++/****************************************************************************** ++ * ++ * Instruction: DMASTP ++ * Description: ++ * | 15 14 13 12 | 11 10 9 8 | 7 6 5 4 | 3 2 1 0 | ++ * < periph[4:0] > 0 0 0 0 0 1 0 1 0 bs 1 ++ * Example: ++ * DMASTPS P0 ++ * 29 00 ++ ******************************************************************************/ ++const char dmac_code_DMASTP[] = { 0x29, 0x00 }; ++ ++int DMAC_DMASTP(int ch_num, int periph_id, int burst) ++{ ++ dmac_instr_param_t param; ++ int instr_len; ++ memset(¶m, 0, sizeof(dmac_instr_param_t)); ++ /* param.bits.b.x = param.bits.b.bs = 0; */ ++ param.periph = periph_id; ++ param.bits.b.bs = burst; ++ instr_len = dmac_create_instr(ch_num, DMAC_INSTR_DMASTP, ¶m); ++ if (instr_len < 0) { ++ printk("dmac_create_instr failed \n"); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++EXPORT_SYMBOL(DMAC_DMASTP); ++ ++dmac_instr_encode_t dmac_codes[] = { ++ {dmac_code_DMAEND, sizeof(dmac_code_DMAEND), CHANNEL_AND_MANAGER} ++ , ++ {dmac_code_DMAFLUSHP, sizeof(dmac_code_DMAFLUSHP), CHANNEL_ONLY} ++ , ++ {dmac_code_DMAGO, sizeof(dmac_code_DMAGO), MANAGER_ONLY} ++ , ++ {dmac_code_DMALD, sizeof(dmac_code_DMALD), CHANNEL_ONLY} ++ , ++ {dmac_code_DMALDP, sizeof(dmac_code_DMALDP), CHANNEL_ONLY} ++ , ++ {dmac_code_DMALP, sizeof(dmac_code_DMALP), CHANNEL_ONLY} ++ , ++ {dmac_code_DMALPEND, sizeof(dmac_code_DMALPEND), CHANNEL_ONLY} ++ , ++ {dmac_code_DMAKILL, sizeof(dmac_code_DMAKILL), CHANNEL_AND_MANAGER} ++ , ++ {dmac_code_DMAMOV, sizeof(dmac_code_DMAMOV), CHANNEL_ONLY} ++ , ++ {dmac_code_DMANOP, sizeof(dmac_code_DMANOP), CHANNEL_AND_MANAGER} ++ , ++ {dmac_code_DMASEV, sizeof(dmac_code_DMASEV), CHANNEL_AND_MANAGER} ++ , ++ {dmac_code_DMAST, sizeof(dmac_code_DMAST), CHANNEL_ONLY} ++ , ++ {dmac_code_DMASTP, sizeof(dmac_code_DMASTP), CHANNEL_ONLY} ++ , ++ {dmac_code_DMASTZ, sizeof(dmac_code_DMASTZ), CHANNEL_ONLY} ++ , ++ {dmac_code_DMAWFE, sizeof(dmac_code_DMAWFE), CHANNEL_AND_MANAGER} ++ , ++ {dmac_code_DMAWFP, sizeof(dmac_code_DMAWFP), CHANNEL_ONLY} ++ , ++ {dmac_code_DMAWMB, sizeof(dmac_code_DMAWMB), CHANNEL_ONLY} ++ , ++}; ++ ++static void Dmac_Cmd_Write32(uint8_t * buf, uint32_t val) ++{ ++ buf[0] = (uint8_t) (val); ++ buf[1] = (uint8_t) (val >> 8); ++ buf[2] = (uint8_t) (val >> 16); ++ buf[3] = (uint8_t) (val >> 24); ++ ++ return; ++} ++ ++static int ++dmac_create_instr(int chan, dmac_instr_t instr, dmac_instr_param_t * param) ++{ ++ int len = 0; ++ dmac_channel_t *dma_ch = dmac_channels[chan]; ++ uint8_t *buf = NULL; ++#ifdef DEBUG_GDMA ++ printk("%s:%d: In with channel no %d\n", __FUNCTION__, __LINE__, chan); ++#endif ++ ++ if (!((0x1 << chan) & dmac_codes[instr].chan_or_mgr)) { ++ printk("Channel %d does not support this instruction %d\n", ++ chan, instr); ++ return -1; ++ } ++#ifdef DEBUG_GDMA ++ if (!dma_ch) ++ printk("%s:%d: Bug here !!\n", __FUNCTION__, __LINE__); ++#endif ++ ++ if (dma_ch->microcode == NULL) { ++ buf = dma_ch->microcode = ++ dma_alloc_coherent(NULL, MAX_MICROCODE_SIZE, ++ &dma_ch->microcode_dma, GFP_KERNEL); ++ printk ++ ("First time microcode alloc for channel %d done @phy:%x\n", ++ chan, dma_ch->microcode_dma); ++ dma_ch->microcode_size = 0; ++ } else { ++ if ((dmac_codes[instr].enc_buf_len + dma_ch->microcode_size) > ++ MAX_MICROCODE_SIZE) { ++ printk ++ ("We have a buffer overflow [%d]issue here ... BUG !!\n", ++ dma_ch->microcode_size); ++ return -1; ++ } ++ buf = dma_ch->microcode + dma_ch->microcode_size; ++ } ++#ifdef DEBUG_GDMA ++ printk("%s:%d: Microcode alloc for channel %d\n", __FUNCTION__, ++ __LINE__, chan); ++#endif ++ ++ if (buf == NULL) { ++ printk("%s: Unable to allocate memory for microocode space\n", ++ __FUNCTION__); ++ return -1; ++ } ++#ifdef DEBUG_GDMA ++ printk("%s:%d: allocated microcode buffer%p [@phy: %x]\n", __FUNCTION__, ++ __LINE__, buf, dma_ch->microcode_dma + dma_ch->microcode_size); ++#endif ++ /* TODO: buf_space checking */ ++ memcpy(buf, dmac_codes[instr].enc_buf, dmac_codes[instr].enc_buf_len); ++ len += dmac_codes[instr].enc_buf_len; ++ ++ /* TODO: Parameter checking */ ++ switch (instr) { ++ case DMAC_INSTR_DMAEND: ++ case DMAC_INSTR_DMASTZ: ++ case DMAC_INSTR_DMAKILL: ++ case DMAC_INSTR_DMAWMB: ++ case DMAC_INSTR_DMANOP: ++ /* no parameter needed */ ++ break; ++ ++ case DMAC_INSTR_DMAFLUSHP: ++ /* Fill additional parameters */ ++ buf[1] |= (param->periph) << 3; // shift to bit 11 ++ break; ++ ++ case DMAC_INSTR_DMAGO: ++ // Fill additional parameters ++ if (param->bits.b.ns) ++ buf[0] |= 0x2; ++ else ++ buf[0] &= ~0x2; ++ buf[1] = param->cn & 0x7; ++//#warning "rewrite this" ++ Dmac_Cmd_Write32(&buf[2], param->imm32.i.val); ++ //memcpy (&buf[2],&(param->imm32.i.val),4); ++ break; ++ ++ case DMAC_INSTR_DMALD: ++ case DMAC_INSTR_DMAST: ++ // Fill additional parameters ++ buf[0] &= 0xFC; ++ if (param->bits.b.x) ++ buf[0] |= 0x1; ++ else ++ buf[0] &= ~0x1; ++ if (param->bits.b.bs) ++ buf[0] |= 0x2; ++ else ++ buf[0] &= ~0x2; ++ break; ++ ++ case DMAC_INSTR_DMALP: ++ buf[0] &= (~0x2); ++ if (param->bits.b.lc) ++ buf[0] |= 0x2; ++ buf[1] = param->iter; ++ break; ++ ++ case DMAC_INSTR_DMALPEND: ++ // Fill additional parameters ++ buf[0] = 0x28; ++ if (param->bits.b.x) ++ buf[0] |= 0x1; ++ if (param->bits.b.bs) ++ buf[0] |= 0x2; ++ if (param->bits.b.lc) ++ buf[0] |= 0x4; ++ if (param->bits.b.nf) ++ buf[0] |= 0x10; ++ buf[1] = param->backwards_jump; ++ break; ++ ++ case DMAC_INSTR_DMAMOV: ++ // Fill additional parameters ++ buf[1] = (param->rd) & 0x7; ++//#warning "rewrite this" ++ Dmac_Cmd_Write32(&buf[2], param->imm32.i.val); ++ //memcpy (&buf[2],&(param->imm32.i.val),4); ++ break; ++ ++ case DMAC_INSTR_DMAWFE: ++ buf[1] = 0x0; ++ if (param->bits.b.i) ++ buf[1] |= 0x2; ++ buf[1] |= (param->event_num) << 3; // shift to bit 11 ++ break; ++ ++ case DMAC_INSTR_DMASEV: ++ buf[1] |= (param->event_num) << 3; // shift to bit 11 ++ break; ++ ++ case DMAC_INSTR_DMAWFP: ++ if (param->bits.b.p) ++ buf[0] |= 0x1; ++ else ++ buf[0] &= ~0x1; ++ if (param->bits.b.bs) ++ buf[0] |= 0x2; ++ else ++ buf[0] &= ~0x2; ++ buf[1] |= (param->periph) << 3; // shift to bit 11 ++ break; ++ ++ case DMAC_INSTR_DMALDP: ++ case DMAC_INSTR_DMASTP: ++ // Fill additional parameters ++ if (param->bits.b.bs) ++ buf[0] |= 0x2; ++ else ++ buf[0] &= ~0x2; ++ buf[1] |= (param->periph) << 3; // shift to bit 11 ++ break; ++ ++ default: ++ printk("%s: unknown instr (%d)\r\n", __FUNCTION__, instr); ++ break; ++ } ++ dma_ch->microcode_size += len; ++#ifdef DEBUG_GDMA ++ printk("%s:%d: out with length %d\n", __FUNCTION__, __LINE__, ++ dma_ch->microcode_size); ++ { ++ int foo = 0; ++ uint8_t *foop = dma_ch->microcode; ++ printk("Dumping the buffer -- "); ++ for (foo = 0; foo < dma_ch->microcode_size; foo++) ++ printk("%x ", *(foop + foo)); ++ printk(" -- done.\n"); ++ } ++#endif ++ return len; ++} ++ ++static int dmac_exec_ucode(int ucode_channel, int ch) ++{ ++ uint8_t i, dbg_instr_0_shift_base, dbg_instr_2_shift_base, dbg_cmd_len, ++ *dbg_cmd_buf; ++ uint32_t dbg1_val, dbg2_val; ++ dmac_channel_t *dma_ch = dmac_channels[ucode_channel]; ++ ++ if (!dma_ch->microcode_size) { ++ printk("%s: No instructions have been created\n", __FUNCTION__); ++ return -1; ++ } ++ ++ dbg_cmd_buf = dma_ch->microcode; ++ dbg_cmd_len = dma_ch->microcode_size; ++#ifdef DEBUG_GDMA ++ { ++ int tmp; ++ uint8_t *tmpp = dbg_cmd_buf; ++ printk ++ ("Executing the code for channel %d, with instrn len %d\n", ++ ch, dma_ch->microcode_size); ++ printk("Dumping microcode : "); ++ for (tmp = 0; tmp < dbg_cmd_len; tmp++) ++ printk("%x ", *tmpp++); ++ printk("\n"); ++ } ++#endif ++ ++ spin_lock(&dma_mgr_lock); ++ ++ /* 3. Poll the Debug Status Register */ ++ while (DMAC_DBGSTATUS & DMAC_DBG_BUSY_BIT) ; ++ ++ /* 4. Write to the Debug Instrution-X Register */ ++ dbg1_val = 0; ++ dbg2_val = 0; ++ ++ dbg_instr_0_shift_base = DMAC_DBG_INSTR_0_SHIFT; ++ dbg_instr_2_shift_base = DMAC_DBG_INSTR_2_SHIFT; ++ for (i = 0; i < dbg_cmd_len; i++) { ++ uint8_t tmp_val = dbg_cmd_buf[i]; ++ switch (i) { ++ case 0: ++ case 1: ++ dbg1_val |= (tmp_val << dbg_instr_0_shift_base); ++ dbg_instr_0_shift_base += 8; ++ break; ++ case 2: ++ case 3: ++ case 4: ++ case 5: ++ tmp_val = dbg_cmd_buf[i]; ++ dbg2_val |= (tmp_val << dbg_instr_2_shift_base); ++ dbg_instr_2_shift_base += 8; ++ break; ++ default: ++ printk("BUG here ... DEBUG\n"); ++ break; ++ } ++ } ++ ++ // Fill channel field ++ if (ch == DMAC_CHMGR) { ++ dbg1_val &= (~DMAC_DBG_THREAD_BIT); ++ } else { ++ dbg1_val |= DMAC_DBG_THREAD_BIT; ++ dbg1_val |= ++ ((ch & DMAC_DBG_CH_NUM_BIT_MASK) << DMAC_DBG_CH_NUM_SHIFT); ++ } ++ ++#ifdef DEBUG_GDMA ++ { ++ printk("dbg1_val: %x, dbg2_val: %x\n", dbg1_val, dbg2_val); ++ } ++#endif ++ ++ DMAC_DBGINST0 = dbg1_val; ++ DMAC_DBGINST1 = dbg2_val; ++ ++ /* 5. Writing zero to the Debug Command Register */ ++ DMAC_DBGCMD = 0x0; ++ ++ spin_unlock(&dma_mgr_lock); ++ return 0; ++} ++ ++#define MAX_SINGLE_INSTR_LEN 8 /* TODO */ ++ ++static int dmac_channel_state_init(int ch_num) ++{ ++ int instr_len = dmac_create_instr(ch_num, DMAC_INSTR_DMAKILL, NULL); ++ ++ if (instr_len < 0) { ++ printk("dmac_create_instr failed \n"); ++ return -1; ++ } ++ ++ dmac_exec_ucode(ch_num, ch_num); ++ ++ if (dmac_channels[ch_num]) ++ dmac_channels[ch_num]->microcode_size = 0; ++ else ++ printk("BUG HERE !! DEBUG .. \n"); ++ ++ return 0; ++} ++ ++static irqreturn_t dmac_irq_handler(int irq, void *dev_id) ++{ ++ uint32_t irq_status = 0; ++ uint8_t event_status = 0, channel_no = 0; ++ dmac_channel_t *chan = NULL; ++ ++ irq_status = DMAC_INTSTATUS; /* TODO: Get Interrupt status */ ++#ifdef DEBUG_GDMA ++ printk("Dumping the interrupt status register %x\n", irq_status); ++#endif ++ ++ if (!irq_status) { ++#ifdef DEBUG_GDMA ++ printk("%s: Probably a DMAC Fault !!%x\n", __FUNCTION__, ++ irq_status); ++ pl330_dump_regs(); ++#endif ++ return IRQ_NONE; ++ } ++ ++// if (irq_status >= MIN_EVENT_NUM) { ++// printk(KERN_CRIT ++// "Event interrupt handler..(%d) Not implemented\n", ++// irq_status); ++// return IRQ_NONE; ++// } ++ ++ event_status = irq_status & 0xff; ++ /* Clear Interrupt */ ++ DMAC_INTCLR |= (irq_status & 0xff); ++ ++ while (event_status) { ++ if (event_status & 0x1) { ++ chan = dmac_channels[channel_no]; ++ if (chan->intr_handler && chan->in_use) ++ chan->intr_handler(chan->handler_args); ++ } ++ event_status >>= 1; ++ channel_no++; ++ } ++ return IRQ_HANDLED; ++} ++ ++static void cns3xxx_dmac_hw_init(void) ++{ ++#ifdef CONFIG_CNS3XXX_PM_API ++ /* enable GDMA clock*/ ++ cns3xxx_pwr_clk_en(CNS3XXX_PWR_CLK_EN(GDMA)); ++ /* check clok status and power status */ ++ #if 0 ++ PM_PWR_STA_REG & (0x1 << PM_PWR_STA_REG_REG_OFFSET_GDMA) ++ PM_CACTIVE_STA_REG & (0x1 << PM_CACTIVE_STA_REG_OFFSET_GDMA) ++ #endif ++ /* do software reset*/ ++ cns3xxx_pwr_soft_rst(CNS3XXX_PWR_SOFTWARE_RST(GDMA)); ++#else ++#error "CNS3XXX PM API support should be enabled in Linux kernel" ++#endif ++} ++ ++/* ++ * dmac_init ++ */ ++int __init dmac_init(void) ++{ ++ int i, irqno = DMAC_IRQNO_BASE; ++ ++ printk(KERN_INFO "Initializing CNS3XXX DMA controller \n"); ++ ++ cns3xxx_dmac_hw_init(); ++ ++ memset(dmac_channels, 0, sizeof(dmac_channel_t *) * MAX_DMA_CHANNELS); ++ ++ spin_lock_init(&dma_mgr_lock); ++ ++ for (i = 0; i < MAX_DMA_CHANNELS; i++) { ++ dmac_channels[i] = kmalloc(sizeof(dmac_channel_t), GFP_KERNEL); ++ ++ if (dmac_channels[i] == NULL) { ++ printk("Unable to allocate memory for channel %d \n", ++ i); ++ return -ENOMEM; ++ } ++ ++ memset(dmac_channels[i], 0, sizeof(dmac_channel_t)); ++ } ++ ++ /* Moves all the DMA channels to the Stopped state */ ++ for (i = 0; i < MAX_DMA_CHANNELS; i++) ++ dmac_channel_state_init(i); ++ ++ for (i = 0; i < MAX_INTR_EVENTS; i++) ++ dmac_events[i] = -1; ++ ++ /* Clear spurious interrupts */ ++ DMAC_INTCLR = 0xffffffff; ++ DMAC_INTEN = 0xff; //Enable 8 interrupt 0x03; /* Enabling interrupts IRQ[0], IRQ[1] */ ++ ++ /* TODO: error interrupt Right now using the same irq handler, ++ * and reporting error inside the handler ++ */ ++ if (request_irq(ERROR_INTR, dmac_irq_handler, 0, "DMAC-ERR", NULL)) { ++ printk(KERN_CRIT "failed to request DMAC-ERR interrupt.\n"); ++ return -ENOENT; ++ } ++ ++ do { ++ if (request_irq(irqno, dmac_irq_handler, 0, "DMAC", NULL)) { ++ printk(KERN_CRIT "failed to request DMAC interrupt.\n"); ++ return -ENOENT; ++ } ++ } while (++irqno < (DMAC_IRQNO_BASE + MIN_EVENT_NUM)); ++ ++ return 0; ++} ++ ++/* ++ * dmac_get_channel ++ */ ++int dmac_get_channel(int (*handler) (void *), void *handler_args) ++{ ++ int i; ++ ++ for (i = 0; i < MAX_DMA_CHANNELS; i++) ++ if (dmac_channels[i]->in_use == 0) { ++ dmac_channel_t *dmac_ch = dmac_channels[i]; ++ ++ dmac_ch->microcode_size = 0; ++ dmac_ch->in_use = 1; ++ dmac_ch->intr_handler = handler; ++ dmac_ch->handler_args = handler_args; ++ ++ /* TODO enable interrupts for that channel */ ++// dmac_channel_state_init(i); ++ return i; ++ } ++ ++ return -1; ++} ++ ++int dmac_get_channel_ex(int channel, int (*handler) (void *), void *handler_args) ++{ ++ if((channel >= 0) && (channel < MAX_DMA_CHANNELS) && (dmac_channels[channel]->in_use == 0)) { ++ dmac_channel_t *dmac_ch = dmac_channels[channel]; ++ ++ dmac_ch->microcode_size = 0; ++ dmac_ch->in_use = 1; ++ dmac_ch->intr_handler = handler; ++ dmac_ch->handler_args = handler_args; ++ ++ /* TODO enable interrupts for that channel */ ++// dmac_channel_state_init(channel); ++ return channel; ++ } ++ ++ return -1; ++} ++ ++EXPORT_SYMBOL(dmac_get_channel); ++EXPORT_SYMBOL(dmac_get_channel_ex); ++ ++/* ++ * dmac_release_channel ++ */ ++int dmac_release_channel(int chan) ++{ ++ dmac_channel_t *dma_ch; ++ ++ if (chan < 0 || chan > 7) ++ return -1; ++ ++ dma_ch = dmac_channels[chan]; ++ if (!dma_ch->in_use) ++ return -1; ++ ++ dma_ch->in_use = 0; ++ dma_ch->microcode_size = 0; ++ dma_ch->intr_handler = 0; ++ dma_ch->handler_args = 0; ++ ++ /* TODO enable interrupts for that channel */ ++ dmac_channel_state_init(chan); ++ ++ return 0; ++} ++ ++EXPORT_SYMBOL(dmac_release_channel); ++ ++/* ++ * ++ */ ++int dmac_get_event(int chan, int event_num) ++{ ++ if ((event_num < MIN_EVENT_NUM) || (event_num > MAX_INTR_EVENTS)) { ++ return -1; ++ } ++ ++ if (dmac_events[event_num] == -1) { ++ dmac_channel_t *dmac_ch = dmac_channels[chan]; ++ dmac_events[event_num] = chan; ++ dmac_ch->notifications_used |= (1 << event_num); ++ return 0; ++ } ++ return -1; ++} ++ ++EXPORT_SYMBOL(dmac_get_event); ++ ++/* ++ * ++ */ ++int dmac_release_event(int chan, int event_num) ++{ ++ if (dmac_events[event_num] != chan) ++ return -1; ++ ++ dmac_events[event_num] = -1; ++ dmac_channels[chan]->notifications_used ^= (1 << event_num); ++ return 0; ++} ++ ++EXPORT_SYMBOL(dmac_release_event); ++ ++static int get_bpb_val(int bpb) ++{ ++ int i = bpb; ++ int retval = -1; ++ while (i) { ++ retval += 0x1; ++ i /= 2; ++ } ++ return retval; ++} ++ ++/* @src_inc - src address auto increment ++ * @s_bpb - src bytes per burst ++ * @s_dt - src num of data transfers ++ * @dst_inc - dst address auto increment ++ * @d_bpb - dst bytes per burst ++ * @d_dt - dst data transfers ++ * @swap - swapping bytes ++ */ ++uint32_t dmac_create_ctrlval(int src_inc, int s_bpb, int s_dt, int dst_inc, ++ int d_bpb, int d_dt, int swap) ++{ ++ if (! ++ ((s_bpb == 1) || (s_bpb == 2) || (s_bpb == 4) || (s_bpb == 8) ++ || (s_bpb == 16) ++ || (s_bpb == 32) || (s_bpb == 64) || (s_bpb == 128))) { ++ printk ++ ("INVALID s_bpb parameter ... setting default and proceeding\n"); ++ s_bpb = 4; ++ } ++ if (! ++ ((d_bpb == 1) || (d_bpb == 2) || (d_bpb == 4) || (d_bpb == 8) ++ || (d_bpb == 16) ++ || (d_bpb == 32) || (d_bpb == 64) || (d_bpb == 128))) { ++ printk ++ ("INVALID d_bpb parameter ... setting default and proceeding\n"); ++ d_bpb = 4; ++ } ++ ++ if ((s_dt < 1) || (s_dt > 16)) { ++ printk ++ ("INVALID s_dt parameter ... setting default and proceeding\n"); ++ s_dt = 1; ++ } ++ if ((d_dt < 1) || (d_dt > 16)) { ++ printk ++ ("INVALID d_dt parameter ... setting default and proceeding\n"); ++ d_dt = 1; ++ } ++ return (((src_inc & 0x1) << 0) | ++ ((get_bpb_val(s_bpb) & 0x7) << 1) | ++ ((s_dt - 1) << 4) | ++ (0x2 << 8) | ++ (0x0 << 11) | ++ ((dst_inc & 0x1) << 14) | ++ ((get_bpb_val(d_bpb) & 0x7) << 15) | ++ ((d_dt - 1) << 18) | (0x2 << 22) | (0x0 << 25) | (swap << 28) ++ ); ++} ++ ++EXPORT_SYMBOL(dmac_create_ctrlval); ++ ++void pl330_dump_regs(void) ++{ ++ printk("Read Periph Id 0 for GDMAC is %x\n", DMAC_MEM_MAP_VALUE(0xFE0)); ++ printk("DS Register: %x\n", DMAC_MEM_MAP_VALUE(0x0)); ++ printk("Conf Reg 0 : %x\n", DMAC_MEM_MAP_VALUE(0xE00)); ++ printk("Conf Reg 1 : %x\n", DMAC_MEM_MAP_VALUE(0xE04)); ++ printk("Conf Reg 2 : %x\n", DMAC_MEM_MAP_VALUE(0xE08)); ++ printk("Conf Reg 3 : %x\n", DMAC_MEM_MAP_VALUE(0xE0C)); ++ printk("Conf Reg 4 : %x\n", DMAC_MEM_MAP_VALUE(0xE10)); ++ printk("Conf Reg d : %x\n", DMAC_MEM_MAP_VALUE(0xE14)); ++ ++ printk("Dumping the status registers \n"); ++ printk("INTEN Register: %x\n", DMAC_MEM_MAP_VALUE(0x20)); ++ printk("ES Register: %x\n", DMAC_MEM_MAP_VALUE(0x24)); ++ printk("INTSTAT Register: %x\n", DMAC_MEM_MAP_VALUE(0x28)); ++ printk("FSDM Register: %x\n", DMAC_MEM_MAP_VALUE(0x30)); ++ printk("FSC Register: %x\n", DMAC_MEM_MAP_VALUE(0x34)); ++ printk("FTM Register: %x\n", DMAC_MEM_MAP_VALUE(0x38)); ++ printk("FTC0 Register: %x\n", DMAC_MEM_MAP_VALUE(0x40)); ++ printk("FTC1 Register: %x\n", DMAC_MEM_MAP_VALUE(0x44)); ++ printk("CS0 Register: %x\n", DMAC_MEM_MAP_VALUE(0x100)); ++ printk("CPC0 Register: %x\n", DMAC_MEM_MAP_VALUE(0x104)); ++ printk("CS1 Register: %x\n", DMAC_MEM_MAP_VALUE(0x108)); ++ printk("CPC1 Register: %x\n", DMAC_MEM_MAP_VALUE(0x10C)); ++ printk("SA0 Register: %x\n", DMAC_MEM_MAP_VALUE(0x400)); ++ printk("SA1 Register: %x\n", DMAC_MEM_MAP_VALUE(0x420)); ++ printk("DA0 Register: %x\n", DMAC_MEM_MAP_VALUE(0x404)); ++ printk("DA1 Register: %x\n", DMAC_MEM_MAP_VALUE(0x424)); ++ return; ++} ++ ++EXPORT_SYMBOL(pl330_dump_regs); ++ ++/* ++ * ++ */ ++uint32_t DMAC_READ_CHREGS(int chan, chregs_t reg) ++{ ++ int step = 0, base = 0; ++ ++ switch (reg) { ++ case PL330_FTC: ++ base = 0x40; ++ step = chan * 0x4; ++ break; ++ case PL330_CS: ++ base = 0x100; ++ step = chan * 0x8; ++ break; ++ case PL330_CPC: ++ base = 0x104; ++ step = chan * 0x8; ++ break; ++ case PL330_SA: ++ base = 0x400; ++ step = chan * 0x20; ++ break; ++ case PL330_DA: ++ base = 0x404; ++ step = chan * 0x20; ++ break; ++ case PL330_CC: ++ base = 0x408; ++ step = chan * 0x20; ++ break; ++ case PL330_LC0: ++ base = 0x40C; ++ step = chan * 0x20; ++ break; ++ case PL330_LC1: ++ base = 0x410; ++ step = chan * 0x20; ++ break; ++ default: ++ printk("Wrong argument to function %s\n", __FUNCTION__); ++ } ++ return DMAC_MEM_MAP_VALUE(base + step); ++} ++ ++EXPORT_SYMBOL(DMAC_READ_CHREGS); +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/headsmp.S +@@ -0,0 +1,54 @@ ++/* ++ * linux/arch/arm/mach-cns3xxx/headsmp.S ++ * ++ * Copyright (c) 2008 Cavium Networks ++ * Copyright (c) 2003 ARM Limited ++ * All Rights Reserved ++ * ++ * This file is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, Version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or ++ * NONINFRINGEMENT. See the GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this file; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA or ++ * visit http://www.gnu.org/licenses/. ++ * ++ * This file may also be available under a different license from Cavium. ++ * Contact Cavium Networks for more information ++ */ ++ ++#include ++#include ++ ++ __INIT ++ ++/* ++ * CNS3XXX specific entry point for secondary CPUs. This provides ++ * a "holding pen" into which all secondary cores are held until we're ++ * ready for them to initialise. ++ */ ++ENTRY(cns3xxx_secondary_startup) ++ mrc p15, 0, r0, c0, c0, 5 ++ and r0, r0, #15 ++ adr r4, 1f ++ ldmia r4, {r5, r6} ++ sub r4, r4, r5 ++ add r6, r6, r4 ++pen: ldr r7, [r6] ++ cmp r7, r0 ++ bne pen ++ ++ /* ++ * we've been released from the holding pen: secondary_stack ++ * should now contain the SVC stack for this core ++ */ ++ b secondary_startup ++ ++1: .long . ++ .long pen_release +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/hotplug.c +@@ -0,0 +1,155 @@ ++/* ++ * linux/arch/arm/mach-cns3xxx/hotplug.c ++ * ++ * Copyright (c) 2008 Cavium Networks ++ * Copyright (C) 2002 ARM Ltd. ++ * All Rights Reserved ++ * ++ * This file is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, Version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or ++ * NONINFRINGEMENT. See the GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this file; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA or ++ * visit http://www.gnu.org/licenses/. ++ * ++ * This file may also be available under a different license from Cavium. ++ * Contact Cavium Networks for more information ++ */ ++#include ++#include ++#include ++#include ++ ++#include ++ ++extern volatile int pen_release; ++ ++static DECLARE_COMPLETION(cpu_killed); ++ ++static inline void cpu_enter_lowpower(void) ++{ ++ unsigned int v; ++ ++ flush_cache_all(); ++ asm volatile( ++ " mcr p15, 0, %1, c7, c5, 0\n" ++ " mcr p15, 0, %1, c7, c10, 4\n" ++ /* ++ * Turn off coherency ++ */ ++ " mrc p15, 0, %0, c1, c0, 1\n" ++ " bic %0, %0, #0x20\n" ++ " mcr p15, 0, %0, c1, c0, 1\n" ++ " mrc p15, 0, %0, c1, c0, 0\n" ++ " bic %0, %0, #0x04\n" ++ " mcr p15, 0, %0, c1, c0, 0\n" ++ : "=&r" (v) ++ : "r" (0) ++ : "cc"); ++} ++ ++static inline void cpu_leave_lowpower(void) ++{ ++ unsigned int v; ++ ++ asm volatile( "mrc p15, 0, %0, c1, c0, 0\n" ++ " orr %0, %0, #0x04\n" ++ " mcr p15, 0, %0, c1, c0, 0\n" ++ " mrc p15, 0, %0, c1, c0, 1\n" ++ " orr %0, %0, #0x20\n" ++ " mcr p15, 0, %0, c1, c0, 1\n" ++ : "=&r" (v) ++ : ++ : "cc"); ++} ++ ++static inline void platform_do_lowpower(unsigned int cpu) ++{ ++ /* ++ * there is no power-control hardware on this platform, so all ++ * we can do is put the core into WFI; this is safe as the calling ++ * code will have already disabled interrupts ++ */ ++ for (;;) { ++ /* ++ * here's the WFI ++ */ ++ asm(".word 0xe320f003\n" ++ : ++ : ++ : "memory", "cc"); ++ ++ if (pen_release == cpu) { ++ /* ++ * OK, proper wakeup, we're done ++ */ ++ break; ++ } ++ ++ /* ++ * getting here, means that we have come out of WFI without ++ * having been woken up - this shouldn't happen ++ * ++ * The trouble is, letting people know about this is not really ++ * possible, since we are currently running incoherently, and ++ * therefore cannot safely call printk() or anything else ++ */ ++#ifdef DEBUG ++ printk("CPU%u: spurious wakeup call\n", cpu); ++#endif ++ } ++} ++ ++int platform_cpu_kill(unsigned int cpu) ++{ ++ return wait_for_completion_timeout(&cpu_killed, 5000); ++} ++ ++/* ++ * platform-specific code to shutdown a CPU ++ * ++ * Called with IRQs disabled ++ */ ++void platform_cpu_die(unsigned int cpu) ++{ ++#ifdef DEBUG ++ unsigned int this_cpu = hard_smp_processor_id(); ++ ++ if (cpu != this_cpu) { ++ printk(KERN_CRIT "Eek! platform_cpu_die running on %u, should be %u\n", ++ this_cpu, cpu); ++ BUG(); ++ } ++#endif ++ ++ printk(KERN_NOTICE "CPU%u: shutdown\n", cpu); ++ complete(&cpu_killed); ++ ++ /* ++ * we're ready for shutdown now, so do it ++ */ ++ cpu_enter_lowpower(); ++ platform_do_lowpower(cpu); ++ ++ /* ++ * bring this CPU back into the world of cache ++ * coherency, and then restore interrupts ++ */ ++ cpu_leave_lowpower(); ++} ++ ++int mach_cpu_disable(unsigned int cpu) ++{ ++ /* ++ * we don't allow CPU 0 to be shutdown (it is still too special ++ * e.g. clock tick interrupts) ++ */ ++ return cpu == 0 ? -EPERM : 0; ++} +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/include/mach/board.h +@@ -0,0 +1,386 @@ ++/* ++ * arch/arm/mach-cns3xxx/include/mach/board.h ++ * ++ * Copyright (c) 2008 Cavium Networks ++ * ++ * This file is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, Version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or ++ * NONINFRINGEMENT. See the GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this file; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA or ++ * visit http://www.gnu.org/licenses/. ++ * ++ * This file may also be available under a different license from Cavium. ++ * Contact Cavium Networks for more information ++ */ ++ ++#ifndef __ASM_ARCH_BOARD_CNS3XXXH ++#define __ASM_ARCH_BOARD_CNS3XXXH ++ ++/* ++ * Cavium Networks CNS3XXX Linux Memory Map: ++ * ++ * Phy Size Virt Description ++ * ========================================================================= ++ * ++ * 0x00000000 0x10000000(max) PAGE_OFFSET Alien RAM (??) ++ * ++ * 0x78000000 0x00400000 0xFFF09000 UART0 ++ * ++ */ ++ ++/* ++ * Peripheral addresses ++ */ ++#define CNS3XXX_FLASH0_BASE 0x10000000 /* Flash/SRAM Memory Bank 0 */ ++#define CNS3XXX_FLASH0_SIZE SZ_128M ++ ++#define CNS3XXX_FLASH1_BASE 0x11000000 /* Flash/SRAM Memory Bank 1 */ ++#define CNS3XXX_FLASH1_SIZE SZ_16M ++#define CNS3XXX_FLASH2_BASE 0x12000000 /* Flash/SRAM Memory Bank 2 */ ++#define CNS3XXX_FLASH2_SIZE SZ_16M ++#define CNS3XXX_FLASH3_BASE 0x13000000 /* Flash/SRAM Memory Bank 3 */ ++#define CNS3XXX_FLASH3_SIZE SZ_16M ++ ++#define CNS3XXX_DDR2SDRAM_BASE 0x20000000 /* DDR2 SDRAM Memory */ ++ ++#define CNS3XXX_SPI_FLASH_BASE 0x60000000 /* SPI Serial Flash Memory */ ++ ++#define CNS3XXX_SWITCH_BASE 0x70000000 /* Switch and HNAT Control */ ++#define CNS3XXX_SWITCH_BASE_VIRT 0xFFF00000 ++ ++#define CNS3XXX_PPE_BASE 0x70001000 /* HANT */ ++#define CNS3XXX_PPE_BASE_VIRT 0xFFF50000 ++ ++#define CNS3XXX_EMBEDDED_SRAM_BASE 0x70002000 /* HANT Embedded SRAM */ ++#define CNS3XXX_EMBEDDED_SRAM_BASE_VIRT 0xFFF60000 ++ ++#define CNS3XXX_SSP_BASE 0x71000000 /* Synchronous Serial Port - SPI/PCM/I2C */ ++#define CNS3XXX_SSP_BASE_VIRT 0xFFF01000 ++ ++#define CNS3XXX_DMC_BASE 0x72000000 /* DMC Control (DDR2 SDRAM) */ ++#define CNS3XXX_DMC_BASE_VIRT 0xFFF02000 ++ ++#define CNS3XXX_SMC_BASE 0x73000000 /* SMC Control */ ++#define CNS3XXX_SMC_BASE_VIRT 0xFFF03000 ++ ++#define SMC_MEMC_STATUS_OFFSET 0x000 ++#define SMC_MEMIF_CFG_OFFSET 0x004 ++#define SMC_MEMC_CFG_SET_OFFSET 0x008 ++#define SMC_MEMC_CFG_CLR_OFFSET 0x00C ++#define SMC_DIRECT_CMD_OFFSET 0x010 ++#define SMC_SET_CYCLES_OFFSET 0x014 ++#define SMC_SET_OPMODE_OFFSET 0x018 ++#define SMC_REFRESH_PERIOD_0_OFFSET 0x020 ++#define SMC_REFRESH_PERIOD_1_OFFSET 0x024 ++#define SMC_SRAM_CYCLES0_0_OFFSET 0x100 ++#define SMC_NAND_CYCLES0_0_OFFSET 0x100 ++#define SMC_OPMODE0_0_OFFSET 0x104 ++#define SMC_SRAM_CYCLES0_1_OFFSET 0x120 ++#define SMC_NAND_CYCLES0_1_OFFSET 0x120 ++#define SMC_OPMODE0_1_OFFSET 0x124 ++#define SMC_USER_STATUS_OFFSET 0x200 ++#define SMC_USER_CONFIG_OFFSET 0x204 ++#define SMC_ECC_STATUS_OFFSET 0x300 ++#define SMC_ECC_MEMCFG_OFFSET 0x304 ++#define SMC_ECC_MEMCOMMAND1_OFFSET 0x308 ++#define SMC_ECC_MEMCOMMAND2_OFFSET 0x30C ++#define SMC_ECC_ADDR0_OFFSET 0x310 ++#define SMC_ECC_ADDR1_OFFSET 0x314 ++#define SMC_ECC_VALUE0_OFFSET 0x318 ++#define SMC_ECC_VALUE1_OFFSET 0x31C ++#define SMC_ECC_VALUE2_OFFSET 0x320 ++#define SMC_ECC_VALUE3_OFFSET 0x324 ++#define SMC_PERIPH_ID_0_OFFSET 0xFE0 ++#define SMC_PERIPH_ID_1_OFFSET 0xFE4 ++#define SMC_PERIPH_ID_2_OFFSET 0xFE8 ++#define SMC_PERIPH_ID_3_OFFSET 0xFEC ++#define SMC_PCELL_ID_0_OFFSET 0xFF0 ++#define SMC_PCELL_ID_1_OFFSET 0xFF4 ++#define SMC_PCELL_ID_2_OFFSET 0xFF8 ++#define SMC_PCELL_ID_3_OFFSET 0xFFC ++ ++#define CNS3XXX_GPIOA_BASE 0x74000000 /* GPIO port A */ ++#define CNS3XXX_GPIOA_BASE_VIRT 0xFFF04000 ++ ++#define CNS3XXX_GPIOB_BASE 0x74800000 /* GPIO port B */ ++#define CNS3XXX_GPIOB_BASE_VIRT 0xFFF05000 ++ ++#define CNS3XXX_RTC_BASE 0x75000000 /* Real Time Clock */ ++#define CNS3XXX_RTC_BASE_VIRT 0xFFF06000 ++ ++#define RTC_SEC_OFFSET 0x00 ++#define RTC_MIN_OFFSET 0x04 ++#define RTC_HOUR_OFFSET 0x08 ++#define RTC_DAY_OFFSET 0x0C ++#define RTC_SEC_ALM_OFFSET 0x10 ++#define RTC_MIN_ALM_OFFSET 0x14 ++#define RTC_HOUR_ALM_OFFSET 0x18 ++#define RTC_REC_OFFSET 0x1C ++#define RTC_CTRL_OFFSET 0x20 ++#define RTC_INTR_STS_OFFSET 0x34 ++ ++#define CNS3XXX_MISC_BASE 0x76000000 /* Misc Control */ ++#define CNS3XXX_MISC_BASE_VIRT 0xFFF07000 /* Misc Control */ ++ ++#define CNS3XXX_PM_BASE 0x77000000 /* Power Management Control */ ++#define CNS3XXX_PM_BASE_VIRT 0xFFF08000 ++ ++#define PM_CLK_GATE_OFFSET 0x00 ++#define PM_SOFT_RST_OFFSET 0x04 ++#define PM_HS_CFG_OFFSET 0x08 ++#define PM_CACTIVE_STA_OFFSET 0x0C ++#define PM_PWR_STA_OFFSET 0x10 ++#define PM_SYS_CLK_CTRL_OFFSET 0x14 ++#define PM_PLL_LCD_I2S_CTRL_OFFSET 0x18 ++#define PM_PLL_HM_PD_OFFSET 0x1C ++ ++#define CNS3XXX_UART0_BASE 0x78000000 /* UART 0 */ ++#define CNS3XXX_UART0_BASE_VIRT 0xFFF09000 ++ ++#define CNS3XXX_UART1_BASE 0x78400000 /* UART 1 */ ++#define CNS3XXX_UART1_BASE_VIRT 0xFFF0A000 ++ ++#define CNS3XXX_UART2_BASE 0x78800000 /* UART 2 */ ++#define CNS3XXX_UART2_BASE_VIRT 0xFFF0B000 ++ ++#define CNS3XXX_UART3_BASE 0x78C00000 /* UART 3 */ ++#define CNS3XXX_UART3_BASE_VIRT 0xFFF0C000 ++ ++#define CNS3XXX_DMAC_BASE 0x79000000 /* Generic DMA Control */ ++#define CNS3XXX_DMAC_BASE_VIRT 0xFFF0D000 ++ ++#define CNS3XXX_CORESIGHT_BASE 0x7A000000 /* CoreSight */ ++#define CNS3XXX_CORESIGHT_BASE_VIRT 0xFFF0E000 ++ ++#define CNS3XXX_CRYPTO_BASE 0x7B000000 /* Crypto */ ++#define CNS3XXX_CRYPTO_BASE_VIRT 0xFFF0F000 ++ ++#define CNS3XXX_I2S_BASE 0x7C000000 /* I2S */ ++#define CNS3XXX_I2S_BASE_VIRT 0xFFF10000 ++ ++#define CNS3XXX_TIMER1_2_3_BASE 0x7C800000 /* Timer */ ++#define CNS3XXX_TIMER1_2_3_BASE_VIRT 0xFFF10800 ++ ++#define TIMER1_COUNTER_OFFSET 0x00 ++#define TIMER1_AUTO_RELOAD_OFFSET 0x04 ++#define TIMER1_MATCH_V1_OFFSET 0x08 ++#define TIMER1_MATCH_V2_OFFSET 0x0C ++ ++#define TIMER2_COUNTER_OFFSET 0x10 ++#define TIMER2_AUTO_RELOAD_OFFSET 0x14 ++#define TIMER2_MATCH_V1_OFFSET 0x18 ++#define TIMER2_MATCH_V2_OFFSET 0x1C ++ ++#define TIMER1_2_CONTROL_OFFSET 0x30 ++#define TIMER1_2_INTERRUPT_STATUS_OFFSET 0x34 ++#define TIMER1_2_INTERRUPT_MASK_OFFSET 0x38 ++ ++#define TIMER_FREERUN_OFFSET 0x40 ++#define TIMER_FREERUN_CONTROL_OFFSET 0x44 ++ ++#define CNS3XXX_HCIE_BASE 0x7D000000 /* HCIE Control */ ++#if 0 ++#define CNS3XXX_HCIE_BASE_VIRT 0xFFF11000 ++#else ++#define CNS3XXX_HCIE_BASE_VIRT 0xFFF30000 ++#endif ++ ++#define CNS3XXX_RAID_BASE 0x7E000000 /* RAID Control */ ++#define CNS3XXX_RAID_BASE_VIRT 0xFFF12000 ++ ++#define CNS3XXX_AXI_IXC_BASE 0x7F000000 /* AXI IXC */ ++#define CNS3XXX_AXI_IXC_BASE_VIRT 0xFFF13000 ++ ++#define CNS3XXX_CLCD_BASE 0x80000000 /* LCD Control */ ++#define CNS3XXX_CLCD_BASE_VIRT 0xFFF14000 ++ ++#define CNS3XXX_USBOTG_BASE 0x81000000 /* USB OTG Control */ ++#define CNS3XXX_USBOTG_BASE_VIRT 0xFFF15000 ++ ++#define CNS3XXX_USB_BASE 0x82000000 /* USB Host Control */ ++#define CNS3XXX_USB_BASE_VIRT 0xFFF16000 ++ ++#define CNS3XXX_SATA2_BASE 0x83000000 /* SATA */ ++#define CNS3XXX_SATA2_SIZE SZ_16M ++#define CNS3XXX_SATA2_BASE_VIRT 0xFFF17000 ++ ++#define CNS3XXX_CAMERA_BASE 0x84000000 /* Camera Interface */ ++#define CNS3XXX_CAMERA_BASE_VIRT 0xFFF18000 ++ ++#define CNS3XXX_SDIO_BASE 0x85000000 /* SDIO */ ++#define CNS3XXX_SDIO_BASE_VIRT 0xFFF19000 ++ ++#define CNS3XXX_I2S_TDM_BASE 0x86000000 /* I2S TDM */ ++#define CNS3XXX_I2S_TDM_BASE_VIRT 0xFFF1A000 ++ ++#define CNS3XXX_2DG_BASE 0x87000000 /* 2D Graphic Control */ ++#define CNS3XXX_2DG_BASE_VIRT 0xFFF1B000 ++ ++#define CNS3XXX_USB_OHCI_BASE 0x88000000 /* USB OHCI */ ++#define CNS3XXX_USB_OHCI_BASE_VIRT 0xFFF1C000 ++ ++#define CNS3XXX_L2C_BASE 0x92000000 /* L2 Cache Control */ ++#define CNS3XXX_L2C_BASE_VIRT 0xFFF27000 ++ ++#define CNS3XXX_PCIE0_MEM_BASE 0xA0000000 /* PCIe Port 0 IO/Memory Space */ ++#define CNS3XXX_PCIE0_MEM_BASE_VIRT 0xE0000000 ++ ++#define CNS3XXX_PCIE0_HOST_BASE 0xAB000000 /* PCIe Port 0 RC Base */ ++#define CNS3XXX_PCIE0_HOST_BASE_VIRT 0xE1000000 ++ ++#define CNS3XXX_PCIE0_IO_BASE 0xAC000000 /* PCIe Port 0 */ ++#define CNS3XXX_PCIE0_IO_BASE_VIRT 0xE2000000 ++ ++#define CNS3XXX_PCIE0_CFG0_BASE 0xAD000000 /* PCIe Port 0 CFG Type 0 */ ++#define CNS3XXX_PCIE0_CFG0_BASE_VIRT 0xE3000000 ++ ++#define CNS3XXX_PCIE0_CFG1_BASE 0xAE000000 /* PCIe Port 0 CFG Type 1 */ ++#define CNS3XXX_PCIE0_CFG1_BASE_VIRT 0xE4000000 ++ ++#define CNS3XXX_PCIE0_MSG_BASE 0xAF000000 /* PCIe Port 0 Message Space */ ++#define CNS3XXX_PCIE0_MSG_BASE_VIRT 0xE5000000 ++ ++#define CNS3XXX_PCIE1_MEM_BASE 0xB0000000 /* PCIe Port 1 IO/Memory Space */ ++#define CNS3XXX_PCIE1_MEM_BASE_VIRT 0xE8000000 ++ ++#define CNS3XXX_PCIE1_HOST_BASE 0xBB000000 /* PCIe Port 1 RC Base */ ++#define CNS3XXX_PCIE1_HOST_BASE_VIRT 0xE9000000 ++ ++#define CNS3XXX_PCIE1_IO_BASE 0xBC000000 /* PCIe Port 1 */ ++#define CNS3XXX_PCIE1_IO_BASE_VIRT 0xEA000000 ++ ++#define CNS3XXX_PCIE1_CFG0_BASE 0xBD000000 /* PCIe Port 1 CFG Type 0 */ ++#define CNS3XXX_PCIE1_CFG0_BASE_VIRT 0xEB000000 ++ ++#define CNS3XXX_PCIE1_CFG1_BASE 0xBE000000 /* PCIe Port 1 CFG Type 1 */ ++#define CNS3XXX_PCIE1_CFG1_BASE_VIRT 0xEC000000 ++ ++#define CNS3XXX_PCIE1_MSG_BASE 0xBF000000 /* PCIe Port 1 Message Space */ ++#define CNS3XXX_PCIE1_MSG_BASE_VIRT 0xED000000 ++ ++/* ++ * Testchip peripheral and fpga gic regions ++ */ ++//#define CNS3XXX_TC11MP_SCU_BASE 0x1F000000 /* IRQ, Test chip */ ++#define CNS3XXX_TC11MP_SCU_BASE 0x90000000 /* IRQ, Test chip */ ++#define CNS3XXX_TC11MP_SCU_BASE_VIRT 0xFF000000 ++ ++//#define CNS3XXX_TC11MP_GIC_CPU_BASE 0x1F000100 /* Test chip interrupt controller CPU interface */ ++#define CNS3XXX_TC11MP_GIC_CPU_BASE 0x90000100 /* Test chip interrupt controller CPU interface */ ++#define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT 0xFF000100 ++ ++//#define CNS3XXX_TC11MP_TWD_BASE 0x1F000600 ++#define CNS3XXX_TC11MP_TWD_BASE 0x90000600 ++#define CNS3XXX_TC11MP_TWD_BASE_VIRT 0xFF000600 ++ ++//#define CNS3XXX_TC11MP_GIC_DIST_BASE 0x1F001000 /* Test chip interrupt controller distributor */ ++#define CNS3XXX_TC11MP_GIC_DIST_BASE 0x90001000 /* Test chip interrupt controller distributor */ ++#define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT 0xFF001000 ++ ++//#define CNS3XXX_TC11MP_L220_BASE 0x1F002000 /* L220 registers */ ++#define CNS3XXX_TC11MP_L220_BASE 0x92002000 /* L220 registers */ ++#define CNS3XXX_TC11MP_L220_BASE_VIRT 0xFF002000 ++ ++/* ++ * Irqs ++ */ ++#define IRQ_TC11MP_GIC_START 32 ++ ++/* ++ * ARM11 MPCore test chip interrupt sources (primary GIC on the test chip) ++ */ ++#define IRQ_CNS3XXX_PMU (IRQ_TC11MP_GIC_START + 0) ++#define IRQ_CNS3XXX_SDIO (IRQ_TC11MP_GIC_START + 1) ++#define IRQ_CNS3XXX_L2CC (IRQ_TC11MP_GIC_START + 2) ++#define IRQ_CNS3XXX_RTC (IRQ_TC11MP_GIC_START + 3) ++#define IRQ_CNS3XXX_I2S (IRQ_TC11MP_GIC_START + 4) ++#define IRQ_CNS3XXX_PCM (IRQ_TC11MP_GIC_START + 5) ++#define IRQ_CNS3XXX_SPI (IRQ_TC11MP_GIC_START + 6) ++#define IRQ_CNS3XXX_I2C (IRQ_TC11MP_GIC_START + 7) ++#define IRQ_CNS3XXX_CIM (IRQ_TC11MP_GIC_START + 8) ++#define IRQ_CNS3XXX_GPU (IRQ_TC11MP_GIC_START + 9) ++#define IRQ_CNS3XXX_LCD (IRQ_TC11MP_GIC_START + 10) ++#define IRQ_CNS3XXX_GPIOA (IRQ_TC11MP_GIC_START + 11) ++#define IRQ_CNS3XXX_GPIOB (IRQ_TC11MP_GIC_START + 12) ++#define IRQ_CNS3XXX_UART0 (IRQ_TC11MP_GIC_START + 13) ++#define IRQ_CNS3XXX_UART1 (IRQ_TC11MP_GIC_START + 14) ++#define IRQ_CNS3XXX_UART2 (IRQ_TC11MP_GIC_START + 15) ++#define IRQ_CNS3XXX_ARM11 (IRQ_TC11MP_GIC_START + 16) ++ ++#define IRQ_CNS3XXX_SW_STATUS (IRQ_TC11MP_GIC_START + 17) ++#define IRQ_CNS3XXX_SW_R0TXC (IRQ_TC11MP_GIC_START + 18) ++#define IRQ_CNS3XXX_SW_R0RXC (IRQ_TC11MP_GIC_START + 19) ++#define IRQ_CNS3XXX_SW_R0QE (IRQ_TC11MP_GIC_START + 20) ++#define IRQ_CNS3XXX_SW_R0QF (IRQ_TC11MP_GIC_START + 21) ++#define IRQ_CNS3XXX_SW_R1TXC (IRQ_TC11MP_GIC_START + 22) ++#define IRQ_CNS3XXX_SW_R1RXC (IRQ_TC11MP_GIC_START + 23) ++#define IRQ_CNS3XXX_SW_R1QE (IRQ_TC11MP_GIC_START + 24) ++#define IRQ_CNS3XXX_SW_R1QF (IRQ_TC11MP_GIC_START + 25) ++#define IRQ_CNS3XXX_SW_PPE (IRQ_TC11MP_GIC_START + 26) ++ ++#define IRQ_CNS3XXX_CRYPTO (IRQ_TC11MP_GIC_START + 27) ++#define IRQ_CNS3XXX_HCIE (IRQ_TC11MP_GIC_START + 28) ++#define IRQ_CNS3XXX_PCIE0_DEVICE (IRQ_TC11MP_GIC_START + 29) ++#define IRQ_CNS3XXX_PCIE1_DEVICE (IRQ_TC11MP_GIC_START + 30) ++#define IRQ_CNS3XXX_USB_OTG (IRQ_TC11MP_GIC_START + 31) ++#define IRQ_CNS3XXX_USB_EHCI (IRQ_TC11MP_GIC_START + 32) ++#define IRQ_CNS3XXX_SATA (IRQ_TC11MP_GIC_START + 33) ++#define IRQ_CNS3XXX_RAID (IRQ_TC11MP_GIC_START + 34) ++#define IRQ_CNS3XXX_SMC (IRQ_TC11MP_GIC_START + 35) ++ ++#define IRQ_CNS3XXX_DMAC_ABORT (IRQ_TC11MP_GIC_START + 36) ++#define IRQ_CNS3XXX_DMAC0 (IRQ_TC11MP_GIC_START + 37) ++#define IRQ_CNS3XXX_DMAC1 (IRQ_TC11MP_GIC_START + 38) ++#define IRQ_CNS3XXX_DMAC2 (IRQ_TC11MP_GIC_START + 39) ++#define IRQ_CNS3XXX_DMAC3 (IRQ_TC11MP_GIC_START + 40) ++#define IRQ_CNS3XXX_DMAC4 (IRQ_TC11MP_GIC_START + 41) ++#define IRQ_CNS3XXX_DMAC5 (IRQ_TC11MP_GIC_START + 42) ++#define IRQ_CNS3XXX_DMAC6 (IRQ_TC11MP_GIC_START + 43) ++#define IRQ_CNS3XXX_DMAC7 (IRQ_TC11MP_GIC_START + 44) ++#define IRQ_CNS3XXX_DMAC8 (IRQ_TC11MP_GIC_START + 45) ++#define IRQ_CNS3XXX_DMAC9 (IRQ_TC11MP_GIC_START + 46) ++#define IRQ_CNS3XXX_DMAC10 (IRQ_TC11MP_GIC_START + 47) ++#define IRQ_CNS3XXX_DMAC11 (IRQ_TC11MP_GIC_START + 48) ++#define IRQ_CNS3XXX_DMAC12 (IRQ_TC11MP_GIC_START + 49) ++#define IRQ_CNS3XXX_DMAC13 (IRQ_TC11MP_GIC_START + 50) ++#define IRQ_CNS3XXX_DMAC14 (IRQ_TC11MP_GIC_START + 51) ++#define IRQ_CNS3XXX_DMAC15 (IRQ_TC11MP_GIC_START + 52) ++#define IRQ_CNS3XXX_DMAC16 (IRQ_TC11MP_GIC_START + 53) ++#define IRQ_CNS3XXX_DMAC17 (IRQ_TC11MP_GIC_START + 54) ++ ++#define IRQ_CNS3XXX_PCIE0_RC (IRQ_TC11MP_GIC_START + 55) ++#define IRQ_CNS3XXX_PCIE1_RC (IRQ_TC11MP_GIC_START + 56) ++#define IRQ_CNS3XXX_TIMER0 (IRQ_TC11MP_GIC_START + 57) ++#define IRQ_CNS3XXX_TIMER1 (IRQ_TC11MP_GIC_START + 58) ++#define IRQ_CNS3XXX_USB_OHCI (IRQ_TC11MP_GIC_START + 59) ++#define IRQ_CNS3XXX_TIMER2 (IRQ_TC11MP_GIC_START + 60) ++#define IRQ_CNS3XXX_EXTERNAL_PIN0 (IRQ_TC11MP_GIC_START + 61) ++#define IRQ_CNS3XXX_EXTERNAL_PIN1 (IRQ_TC11MP_GIC_START + 62) ++#define IRQ_CNS3XXX_EXTERNAL_PIN2 (IRQ_TC11MP_GIC_START + 63) ++ ++#define NR_GIC_CNS3XXX 1 ++ ++/* ++ * Only define NR_IRQS if less than NR_IRQS_CNS3XXX ++ */ ++#define NR_IRQS_CNS3XXX (IRQ_TC11MP_GIC_START + 64) ++ ++#if !defined(NR_IRQS) || (NR_IRQS < NR_IRQS_CNS3XXX) ++#undef NR_IRQS ++#define NR_IRQS NR_IRQS_CNS3XXX ++#endif ++ ++#if !defined(MAX_GIC_NR) || (MAX_GIC_NR < NR_GIC_CNS3XXX) ++#undef MAX_GIC_NR ++#define MAX_GIC_NR NR_GIC_CNS3XXX ++#endif ++ ++#endif /* __ASM_ARCH_BOARD_CNS3XXX_H */ +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/include/mach/camera.h +@@ -0,0 +1,97 @@ ++/* ++ camera.h - CNS3XXX camera driver header file ++ ++ Copyright (C) 2003, Intel Corporation ++ Copyright (C) 2008, Guennadi Liakhovetski ++ ++ This program is free software; you can redistribute it and/or modify ++ it under the terms of the GNU General Public License as published by ++ the Free Software Foundation; either version 2 of the License, or ++ (at your option) any later version. ++ ++ This program is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ GNU General Public License for more details. ++ ++ You should have received a copy of the GNU General Public License ++ along with this program; if not, write to the Free Software ++ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ++*/ ++ ++#ifndef __ASM_ARCH_CAMERA_H_ ++#define __ASM_ARCH_CAMERA_H_ ++ ++#define CNS3XXX_CAMERA_MASTER 0x01 ++#define CNS3XXX_CAMERA_DATAWIDTH_4 0x02 ++#define CNS3XXX_CAMERA_DATAWIDTH_5 0x04 ++#define CNS3XXX_CAMERA_DATAWIDTH_8 0x08 ++#define CNS3XXX_CAMERA_DATAWIDTH_9 0x10 ++#define CNS3XXX_CAMERA_DATAWIDTH_10 0x20 ++#define CNS3XXX_CAMERA_PCLK_EN 0x40 ++#define CNS3XXX_CAMERA_MCLK_EN 0x80 ++#define CNS3XXX_CAMERA_PCP 0x100 ++#define CNS3XXX_CAMERA_HSP 0x200 ++#define CNS3XXX_CAMERA_VSP 0x400 ++ ++/* Camera Interface */ ++#define CIM_GLOBAL_REG 0x00 /* CIM control*/ ++#define CIM_TIMING_V_REG 0x04 /* Vertical capture range setting */ ++#define CIM_TIMING_H_REG 0x08 /* Horizontal capture range setting */ ++#define CIM_CCIR656_0_REG 0x0C /* CCIR656 detect and control setting*/ ++#define CIM_CCIR656_1_REG 0x10 /* CCIR656 self test setting */ ++#define CIM_SERIAL_SRC_REG 0x14 /* Serial pix capture module control settings */ ++#define CIM_INT_MASK_REG 0x28 /* CIM interrupt mask register. */ ++#define CIM_INT_STATUS_REG 0x2C /* CIM interrupt status register. */ ++#define CIM_INT_CLEAR_REG 0x30 /* CIM interrupt clear register. */ ++#define CIM_DATAPATH_CTL_REG 0x34 /* CIM data path options and control settings */ ++#define CIM_VIDEO_PORT_REG 0x100 /* CIM¡¦s video port */ ++#define CIM_CORRECTION_R_REG 0x200 /* Internal programmable table for R component. */ ++#define CIM_CORRECTION_G_REG 0x600 /* Internal programmable table for G component. */ ++#define CIM_CORRECTION_B_REG 0xA00 /* Internal programmable table for B component. */ ++ ++#define SRC_DATA_FMT_CCIR656 0x00 ++#define SRC_DATA_FMT_YCBCR_A 0x01 ++#define SRC_DATA_FMT_YCBCR_B 0x02 ++#define SRC_DATA_FMT_RGB565 0x03 ++#define SRC_DATA_FMT_RGB555 0x04 ++#define SRC_DATA_FMT_BAYER_82 0x05 ++#define SRC_DATA_FMT_BAYER_10 0x06 ++ ++#define DST_DATA_FMT_RGB888 0x00 ++#define DST_DATA_FMT_RGB565 0x01 ++#define DST_DATA_FMT_RGB1555 0x02 ++#define DST_DATA_FMT_RGB444 0x03 ++ ++#define CISR_LAST_LINE (1 << 0) /* Last line */ ++#define CISR_FIRST_LINE (1 << 1) /* First line */ ++#define CISR_LINE_END (1 << 2) /* Line end */ ++#define CISR_LINE_START (1 << 3) /* Line start */ ++#define CISR_FIELD_CHG (1 << 4) /* Field Change */ ++#define CISR_FIFO_OVERRUN (1 << 5) /* FIFO overrun */ ++ ++ ++#define CIMR_LAST_LINE_M (1 << 0) /* Last line mask*/ ++#define CIMR_FIRST_LINE_M (1 << 1) /* First line mask*/ ++#define CIMR_LINE_END_M (1 << 2) /* Line end mask*/ ++#define CIMR_LINE_START_M (1 << 3) /* Line start mask*/ ++#define CIMR_FIELD_CHG_M (1 << 4) /* Field Change mask*/ ++#define CIMR_FIFO_OVERRUN_M (1 << 5) /* FIFO overrun mask*/ ++ ++ ++struct cns3xxx_camera_platform_data { ++#if 0 ++ int (*init)(struct device *); ++ int (*power)(struct device *, int); ++ int (*reset)(struct device *, int); ++#endif ++ ++ unsigned long flags; ++ unsigned long mclk_10khz; ++ unsigned long lcd_base; ++ unsigned long misc_base; ++}; ++ ++//extern void cns3xxx_set_camera_info(struct pxacamera_platform_data *); ++ ++#endif /* __ASM_ARCH_CAMERA_H_ */ +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/include/mach/clkdev.h +@@ -0,0 +1,7 @@ ++#ifndef __ASM_MACH_CLKDEV_H ++#define __ASM_MACH_CLKDEV_H ++ ++#define __clk_get(clk) ({ 1; }) ++#define __clk_put(clk) do { } while (0) ++ ++#endif +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/include/mach/debug-macro.S +@@ -0,0 +1,35 @@ ++/* arch/arm/mach-cns3xxx/include/mach/debug-macro.S ++ * ++ * Debugging macro include header ++ * ++ * Copyright (c) 2008 Cavium Networks ++ * Copyright (C) 1994-1999 Russell King ++ * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks ++ * ++ * This file is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, Version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or ++ * NONINFRINGEMENT. See the GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this file; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA or ++ * visit http://www.gnu.org/licenses/. ++ * ++ * This file may also be available under a different license from Cavium. ++ * Contact Cavium Networks for more information ++ */ ++ ++ .macro addruart,rx ++ mrc p15, 0, \rx, c1, c0 ++ tst \rx, #1 @ MMU enabled? ++ moveq \rx, #0x10000000 ++ movne \rx, #0xf0000000 @ virtual base ++ orr \rx, \rx, #0x00009000 ++ .endm ++ ++#include +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/include/mach/dmac.h +@@ -0,0 +1,295 @@ ++/******************************************************************************* ++ * ++ * arch/arm/mach-cns3xxx/dmac.h ++ * ++ * Copyright (c) 2008 Cavium Networks ++ * ++ * This file is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, Version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or ++ * NONINFRINGEMENT. See the GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this file; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA or ++ * visit http://www.gnu.org/licenses/. ++ * ++ * This file may also be available under a different license from Cavium. ++ * Contact Cavium Networks for more information ++ * ++ ******************************************************************************/ ++ ++#ifndef _CNS3XXX_DMAC_H_ ++#define _CNS3XXX_DMAC_H_ ++ ++#define MAX_DMA_CHANNELS 9 ++#define DMAC_PCM1_PERIPH_ID_0 4 ++#define DMAC_SPI_PERIPH_ID 8 ++#define DMAC_PCM_PERIPH_ID_0 9 ++#define CNS3XXX_DMAC_I2STX_PID 12 ++#define CNS3XXX_DMAC_I2SRX_PID 13 ++ ++/* APIs */ ++int __init dmac_init(void); ++extern int dmac_get_channel (int (*handler)(void*), void *handler_args); ++extern int dmac_get_channel_ex(int channel, int (*handler) (void *), void *handler_args); ++extern int dmac_release_channel(int chan); ++ ++extern int dmac_get_event (int chan, int ev); ++extern int dmac_release_event (int chan, int ev); ++ ++extern uint32_t dmac_create_ctrlval (int src_inc, int s_bpb, int s_dt, int dst_inc, int d_bpb, int d_dt, int swap); ++/* enum - reg ? 0 => PL330_FTC, 1 => PL330_CS, 2 => PL330_CPC, 3 => PL330_SA, ++ * 4 => PL330_DA, 5=>PL330_CC, 6 => PL330_LC0, 7 => PL330_LC1 ++ */ ++typedef enum { PL330_FTC =0, ++ PL330_CS, ++ PL330_CPC, ++ PL330_SA, ++ PL330_DA, ++ PL330_CC, ++ PL330_LC0, ++ PL330_LC1 ++} chregs_t; ++ ++extern uint32_t DMAC_READ_CHREGS (int chan, chregs_t reg); ++ ++/* Instruction Set */ ++ ++/****************************************************************************** ++ * ++ * Instruction: DMAEND ++ * Description: ++ * | 7 6 5 4 | 3 2 1 0 | ++ * 0 0 0 0 0 0 0 0 ++ * Example: ++ * DMAEND ++ * 00 ++ ******************************************************************************/ ++int DMAC_DMAEND(int ch_num); ++ ++/****************************************************************************** ++ * ++ * Instruction: DMAFLUSHP ++ * Description: ++ * | 15 14 13 12 | 11 10 9 8 | 7 6 5 4 | 3 2 1 0 | ++ * 0 0 0 0 0 1 1 0 1 0 1 ++ * Example: ++ * DMAFLUSHP P0 ++ * 35 00 ++ ******************************************************************************/ ++#define DMAFLUSHP_INSTR_SIZE 2 ++int DMAC_DMAFLUSHP(int ch_num, int periph); ++ ++/****************************************************************************** ++ * ++ * Instruction: DMAGO ++ * Description: ++ * | 15 14 13 12 | 11 10 9 8 | 7 6 5 4 | 3 2 1 0 | ++ * 0 0 0 0 0 1 0 1 0 0 0 ns 0 ++ * ++ * | 47 16 | ++ * < imm[31:0] > ++ * Example: ++ * DMAGO C0, 0x40000000 ++ * A0 00 00 00 00 40 ++ ******************************************************************************/ ++int DMAC_DMAGO(int ch_num); ++ ++/****************************************************************************** ++ * ++ * Instruction: DMALD ++ * Description: ++ * | 7 6 5 4 | 3 2 1 0 | ++ * 0 0 0 0 0 1 bs x ++ * Example: ++ * DMALD ++ * 04 ++ ******************************************************************************/ ++#define DMALD_INSTR_SIZE 1 ++#define DMALDB_INSTR_SIZE 1 ++#define DMALDS_INSTR_SIZE 1 ++int DMAC_DMALD(int ch_num); ++ ++int DMAC_DMALDB(int ch_num); ++ ++int DMAC_DMALDS(int ch_num); ++ ++/****************************************************************************** ++ * ++ * Instruction: DMALP ++ * Description: ++ * | 15 14 13 12 | 11 10 9 8 | 7 6 5 4 | 3 2 1 0 | ++ * < iter[7:0] > 0 0 1 0 0 0 lc 0 ++ * Example: ++ * DMALP 8 ++ * 20 07 ++ ******************************************************************************/ ++#define DMALP_INSTR_SIZE 2 ++int DMAC_DMALP(int ch_num, int loop_reg_idx, int iter); ++ ++/****************************************************************************** ++ * ++ * Instruction: DMALPEND ++ * Description: ++ * | 15 14 13 12 | 11 10 9 8 | 7 6 5 4 | 3 2 1 0 | ++ * < backwards_jump[7:0] > 0 0 1 nf 1 lc bs x ++ * Example: ++ * DMALPEND ++ * 38 04 ++ ******************************************************************************/ ++#define DMALPEND_INSTR_SIZE 2 ++int DMAC_DMALPEND(int ch_num, int loop_reg_idx, int jump, int lpfe); ++ ++/****************************************************************************** ++ * ++ * Instruction: DMAMOV ++ * Description: ++ * | 15 14 13 12 | 11 10 9 8 | 7 6 5 4 | 3 2 1 0 | ++ * 0 0 0 0 0 1 0 1 1 1 1 0 0 ++ * ++ * | 47 16 | ++ * < imm[31:0] > ++ * ++ * # CCR Description ++ * # [30:28] Endian swap size ++ * # [27:25] AWCACHE[3,1:0] value ++ * # [24:22] AWPROT value ++ * # [21:18] AWLEN value ++ * # [17:15] AWSIZE value ++ * # [14] AWBURST[0] value ++ * 0 - FIXED / 1 - INCR ++ * # [13:11] ARCACHE[2:0] value ++ * # [10:8] ARPROT value ++ * # [7:4] ARLEN value ++ * # [3:1] ARSIZE value ++ * # [0] ARBURST[0] value ++ * 0 - FIXED / 1 - INCR ++ * Example: ++ * DMAMOV CCR, SB1 SS32 DB1 DS32 ++ * BC 01 05 40 01 00 ++ ******************************************************************************/ ++ ++#define DMAMOV_INSTR_SIZE 6 ++/* ccr_sar_dar: 0 for SAR, 1, for CCR, 2 for DAR */ ++typedef enum { SAR = 0, CCR = 1, DAR = 2 } dmamov_arg_t; ++int DMAC_DMAMOV(int ch_num, dmamov_arg_t ccr_sar_dar, uint32_t value); ++ ++#define DMAWMB_INSTR_SIZE 1 ++int DMAC_DMAWMB (int ch_num); ++ ++#define DMANOP_INSTR_SIZE 1 ++int DMAC_DMANOP (int ch_num); ++/****************************************************************************** ++ * ++ * Instruction: DMAST ++ * Description: ++ * | 7 6 5 4 | 3 2 1 0 | ++ * 0 0 0 0 1 0 bs x ++ * Example: ++ * DMAST ++ * 08 ++ ******************************************************************************/ ++#define DMAST_INSTR_SIZE 1 /* 1 Byte */ ++int DMAC_DMAST(int ch_num); ++ ++#define DMASTB_INSTR_SIZE 1 /* 1 Byte */ ++int DMAC_DMASTB(int ch_num); ++ ++#define DMASTS_INSTR_SIZE 1 /* 1 Byte */ ++int DMAC_DMASTS(int ch_num); ++ ++/****************************************************************************** ++ * ++ * Instruction: DMASTZ ++ * Description: ++ * | 7 6 5 4 | 3 2 1 0 | ++ * 0 0 0 0 1 1 0 0 ++ * Example: ++ * DMASTZ ++ * 08 ++ ******************************************************************************/ ++/* Not done */ ++ ++/****************************************************************************** ++ * ++ * Instruction: DMAWFE ++ * Description: ++ * | 15 14 13 12 | 11 10 9 8 | 7 6 5 4 | 3 2 1 0 | ++ * 0 i 0 0 0 1 1 0 1 1 0 ++ * Example: ++ * DMAWFE E0 ++ * 36 00 ++ ******************************************************************************/ ++int DMAC_WFE(int ch_num, int event); ++#define DMAWFE_INSTR_SIZE 2 ++ ++/****************************************************************************** ++ * ++ * Instruction: DMAWFP ++ * Description: ++ * | 15 14 13 12 | 11 10 9 8 | 7 6 5 4 | 3 2 1 0 | ++ * < periph[4:0] > 0 0 0 0 0 1 1 0 0 bs p ++ * Example: ++ * DMAWFP P0, periph ++ * 31 00 ++ ******************************************************************************/ ++typedef enum { SINGLE = 0, BURST = 1, PERIPHERAL = 2} dmawfp_burst_type; ++int DMAC_DMAWFP(int ch_num, int periph_id,dmawfp_burst_type b); ++#define DMAWFP_INSTR_SIZE 2 ++ ++/****************************************************************************** ++ * ++ * Instruction: DMAKILL ++ * Description: ++ * | 7 6 5 4 | 3 2 1 0 | ++ * 0 0 0 0 0 0 0 1 ++ * Example: ++ * DMAKILL ++ * 01 ++ ******************************************************************************/ ++ ++/****************************************************************************** ++ * ++ * Instruction: DMASEV ++ * Description: ++ * | 15 14 13 12 | 11 10 9 8 | 7 6 5 4 | 3 2 1 0 | ++ * 0 i 0 0 0 1 1 0 1 0 0 ++ * Example: ++ * DMASEV E0 ++ * 34 00 ++ ******************************************************************************/ ++int DMAC_DMASEV(int ch_num, int event_num); ++#define DMASEV_INSTR_SIZE 2 ++ ++/****************************************************************************** ++ * ++ * Instruction: DMALDP ++ * Description: ++ * | 15 14 13 12 | 11 10 9 8 | 7 6 5 4 | 3 2 1 0 | ++ * < periph[4:0] > 0 0 0 0 0 1 0 0 1 bs 1 ++ * Example: ++ * DMALDPS P0 ++ * 25 00 ++ ******************************************************************************/ ++int DMAC_DMALDP(int ch_num, int periph_id, int burst); ++#define DMALDP_INSTR_SIZE 2 ++ ++/****************************************************************************** ++ * ++ * Instruction: DMASTP ++ * Description: ++ * | 15 14 13 12 | 11 10 9 8 | 7 6 5 4 | 3 2 1 0 | ++ * < periph[4:0] > 0 0 0 0 0 1 0 1 0 bs 1 ++ * Example: ++ * DMASTPS P0 ++ * 29 00 ++ ******************************************************************************/ ++int DMAC_DMASTP(int ch_num, int periph_id, int burst); ++#define DMASTP_INSTR_SIZE 2 ++ ++#endif +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/include/mach/entry-macro.S +@@ -0,0 +1,105 @@ ++/* ++ * arch/arm/mach-cns3xxx/include/mach/entry-macro.S ++ * ++ * Low-level IRQ helper macros for Cavium Networks platforms ++ * ++ * Copyright (c) 2008 Cavium Networks ++ * ++ * This file is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, Version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or ++ * NONINFRINGEMENT. See the GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this file; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA or ++ * visit http://www.gnu.org/licenses/. ++ * ++ * This file may also be available under a different license from Cavium. ++ * Contact Cavium Networks for more information ++ */ ++#include ++#include ++ ++ .macro disable_fiq ++ .endm ++ ++ .macro get_irqnr_preamble, base, tmp ++ ldr \base, =gic_cpu_base_addr ++ ldr \base, [\base] ++ .endm ++ ++ .macro arch_ret_to_user, tmp1, tmp2 ++ .endm ++ ++ /* ++ * The interrupt numbering scheme is defined in the ++ * interrupt controller spec. To wit: ++ * ++ * Interrupts 0-15 are IPI ++ * 16-28 are reserved ++ * 29-31 are local. We allow 30 to be used for the watchdog. ++ * 32-1020 are global ++ * 1021-1022 are reserved ++ * 1023 is "spurious" (no interrupt) ++ * ++ * For now, we ignore all local interrupts so only return an interrupt if it's ++ * between 30 and 1020. The test_for_ipi routine below will pick up on IPIs. ++ * ++ * A simple read from the controller will tell us the number of the highest ++ * priority enabled interrupt. We then just need to check whether it is in the ++ * valid range for an IRQ (30-1020 inclusive). ++ */ ++ ++ .macro get_irqnr_and_base, irqnr, irqstat, base, tmp ++ ++ ldr \irqstat, [\base, #GIC_CPU_INTACK] /* bits 12-10 = src CPU, 9-0 = int # */ ++ ++ ldr \tmp, =1021 ++ ++ bic \irqnr, \irqstat, #0x1c00 ++ ++ cmp \irqnr, #29 ++ cmpcc \irqnr, \irqnr ++ cmpne \irqnr, \tmp ++ cmpcs \irqnr, \irqnr ++ ++ .endm ++ ++ /* We assume that irqstat (the raw value of the IRQ acknowledge ++ * register) is preserved from the macro above. ++ * If there is an IPI, we immediately signal end of interrupt on the ++ * controller, since this requires the original irqstat value which ++ * we won't easily be able to recreate later. ++ */ ++ ++ .macro test_for_ipi, irqnr, irqstat, base, tmp ++ bic \irqnr, \irqstat, #0x1c00 ++ cmp \irqnr, #16 ++ strcc \irqstat, [\base, #GIC_CPU_EOI] ++ cmpcs \irqnr, \irqnr ++ .endm ++ ++ /* As above, this assumes that irqstat and base are preserved.. */ ++ ++ .macro test_for_ltirq, irqnr, irqstat, base, tmp ++ bic \irqnr, \irqstat, #0x1c00 ++ mov \tmp, #0 ++ cmp \irqnr, #29 ++ moveq \tmp, #1 ++ streq \irqstat, [\base, #GIC_CPU_EOI] ++ cmp \tmp, #0 ++ .endm ++ ++ .macro test_for_cache_ipi, irqnr, irqstat, base, tmp ++ bic \irqnr, \irqstat, #0x1c00 ++ mov \tmp, #0 ++ cmp \irqnr, #1 ++ moveq \tmp, #1 ++ streq \irqstat, [\base, #GIC_CPU_EOI] ++ cmp \tmp, #0 ++ .endm +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/include/mach/gpio.h +@@ -0,0 +1,94 @@ ++/* ++ * arch/arm/mach-ixp4xx/include/mach/gpio.h ++ * ++ * IXP4XX GPIO wrappers for arch-neutral GPIO calls ++ * ++ * Written by Milan Svoboda ++ * Based on PXA implementation by Philipp Zabel ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ */ ++ ++#ifndef __ASM_ARCH_IXP4XX_GPIO_H ++#define __ASM_ARCH_IXP4XX_GPIO_H ++ ++#include ++#include ++#include /* cansleep wrappers */ ++ ++#define NR_BUILTIN_GPIO 64 ++ ++#define CNS3XXX_GPIO_IN 0x0 ++#define CNS3XXX_GPIO_OUT 0x1 ++ ++#define CNS3XXX_GPIO_LO 0 ++#define CNS3XXX_GPIO_HI 1 ++ ++#define CNS3XXX_GPIO_OUTPUT 0x00 ++#define CNS3XXX_GPIO_INPUT 0x04 ++#define CNS3XXX_GPIO_DIR 0x08 ++#define CNS3XXX_GPIO_SET 0x10 ++#define CNS3XXX_GPIO_CLEAR 0x14 ++ ++static inline void gpio_line_get(u8 line, int *value) ++{ ++ if (line < 32) ++ *value = ((__raw_readl(CNS3XXX_GPIOA_BASE_VIRT + CNS3XXX_GPIO_INPUT) >> line) & 0x1); ++ else ++ *value = ((__raw_readl(CNS3XXX_GPIOB_BASE_VIRT + CNS3XXX_GPIO_INPUT) >> (line - 32)) & 0x1); ++} ++ ++static inline void gpio_line_set(u8 line, int value) ++{ ++ if (line < 32) { ++ if (value) ++ __raw_writel((1 << line), CNS3XXX_GPIOA_BASE_VIRT + CNS3XXX_GPIO_SET); ++ else ++ __raw_writel((1 << line), CNS3XXX_GPIOA_BASE_VIRT + CNS3XXX_GPIO_CLEAR); ++ } else { ++ if (value) ++ __raw_writel((1 << line), CNS3XXX_GPIOB_BASE_VIRT + CNS3XXX_GPIO_SET); ++ else ++ __raw_writel((1 << line), CNS3XXX_GPIOB_BASE_VIRT + CNS3XXX_GPIO_CLEAR); ++ } ++} ++ ++static inline int gpio_get_value(unsigned gpio) ++{ ++ if (gpio < NR_BUILTIN_GPIO) ++ { ++ int value; ++ gpio_line_get(gpio, &value); ++ return value; ++ } ++ else ++ return __gpio_get_value(gpio); ++} ++ ++static inline void gpio_set_value(unsigned gpio, int value) ++{ ++ if (gpio < NR_BUILTIN_GPIO) ++ gpio_line_set(gpio, value); ++ else ++ __gpio_set_value(gpio, value); ++} ++ ++#define gpio_cansleep __gpio_cansleep ++ ++extern int gpio_to_irq(int gpio); ++extern int irq_to_gpio(int gpio); ++ ++#endif +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/include/mach/hardware.h +@@ -0,0 +1,40 @@ ++/* ++ * arch/arm/mach-cns3xxx/include/mach/hardware.h ++ * ++ * This file contains the hardware definitions of the Cavium Networks boards. ++ * ++ * Copyright (c) 2008 Cavium Networks ++ * Copyright (C) 2003 ARM Limited. ++ * ++ * This file is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, Version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or ++ * NONINFRINGEMENT. See the GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this file; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA or ++ * visit http://www.gnu.org/licenses/. ++ * ++ * This file may also be available under a different license from Cavium. ++ * Contact Cavium Networks for more information ++ */ ++ ++#ifndef __ASM_ARCH_HARDWARE_H ++#define __ASM_ARCH_HARDWARE_H ++ ++/* macro to get at IO space when running virtually */ ++#define PCIBIOS_MIN_IO 0x00000000 ++#define PCIBIOS_MIN_MEM 0x00000000 ++ ++#define pcibios_assign_all_busses() 0 ++ ++#include "board.h" ++ ++#include "platform.h" ++ ++#endif +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/include/mach/io.h +@@ -0,0 +1,41 @@ ++/* ++ * arch/arm/mach-cns3xxx/include/mach/io.h ++ * ++ * Copyright (c) 2008 Cavium Networks ++ * Copyright (C) 2003 ARM Limited ++ * ++ * This file is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, Version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or ++ * NONINFRINGEMENT. See the GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this file; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA or ++ * visit http://www.gnu.org/licenses/. ++ * ++ * This file may also be available under a different license from Cavium. ++ * Contact Cavium Networks for more information ++ */ ++#ifndef __ASM_ARM_ARCH_IO_H ++#define __ASM_ARM_ARCH_IO_H ++ ++#include "board.h" ++ ++#define IO_SPACE_LIMIT 0xffffffff ++ ++#if 1 ++static inline void __iomem *__io(unsigned long addr) ++{ ++ return (void __iomem *)((addr - CNS3XXX_PCIE0_IO_BASE) ++ + CNS3XXX_PCIE0_IO_BASE_VIRT); ++} ++#endif ++#define __io(a) __io(a) ++#define __mem_pci(a) (a) ++ ++#endif +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/include/mach/irqs.h +@@ -0,0 +1,45 @@ ++/* ++ * arch/arm/mach-cns3xxx/include/mach/irqs.h ++ * ++ * Copyright (c) 2008 Cavium Networks ++ * Copyright (C) 2003 ARM Limited ++ * Copyright (C) 2000 Deep Blue Solutions Ltd. ++ * ++ * This file is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, Version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or ++ * NONINFRINGEMENT. See the GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this file; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA or ++ * visit http://www.gnu.org/licenses/. ++ * ++ * This file may also be available under a different license from Cavium. ++ * Contact Cavium Networks for more information ++ */ ++ ++#ifndef __ASM_ARCH_IRQS_H ++#define __ASM_ARCH_IRQS_H ++ ++#include ++ ++#define IRQ_LOCALTIMER 29 ++#define IRQ_LOCALWDOG 30 ++ ++#define IRQ_GIC_START 32 ++#define IRQ_CLCD 44 ++ ++#ifdef CONFIG_CNS_RAID ++#define IRQ_CNS_RAID (43) ++#endif /* CONFIG_CNS_RAID */ ++ ++#ifndef NR_IRQS ++#error "NR_IRQS not defined by the board-specific files" ++#endif ++ ++#endif +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/include/mach/lm.h +@@ -0,0 +1,32 @@ ++#include ++ ++struct lm_device { ++ struct device dev; ++ struct resource resource; ++ unsigned int irq; ++ unsigned int id; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ void *lm_drvdata; ++#endif ++}; ++ ++struct lm_driver { ++ struct device_driver drv; ++ int (*probe)(struct lm_device *); ++ void (*remove)(struct lm_device *); ++ int (*suspend)(struct lm_device *, pm_message_t); ++ int (*resume)(struct lm_device *); ++}; ++ ++int lm_driver_register(struct lm_driver *drv); ++void lm_driver_unregister(struct lm_driver *drv); ++ ++int lm_device_register(struct lm_device *dev); ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++# define lm_get_drvdata(lm) ((lm)->lm_drvdata) ++# define lm_set_drvdata(lm,d) do { (lm)->lm_drvdata = (d); } while (0) ++#else ++# define lm_get_drvdata(lm) dev_get_drvdata(&(lm)->dev) ++# define lm_set_drvdata(lm,d) dev_set_drvdata(&(lm)->dev, d) ++#endif +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/include/mach/memory.h +@@ -0,0 +1,43 @@ ++/* ++ * arch/arm/mach-cns3xxx/include/mach/memory.h ++ * ++ * Copyright (c) 2008 Cavium Networks ++ * Copyright (C) 2003 ARM Limited ++ * ++ * This file is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, Version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or ++ * NONINFRINGEMENT. See the GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this file; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA or ++ * visit http://www.gnu.org/licenses/. ++ * ++ * This file may also be available under a different license from Cavium. ++ * Contact Cavium Networks for more information ++ */ ++ ++#ifndef __ASM_ARCH_MEMORY_H ++#define __ASM_ARCH_MEMORY_H ++ ++/* ++ * Physical DRAM offset. ++ */ ++#define PHYS_OFFSET UL(0x00000000) ++ ++/* ++ * Virtual view <-> DMA view memory address translations ++ * virt_to_bus: Used to translate the virtual address to an ++ * address suitable to be passed to set_dma_addr ++ * bus_to_virt: Used to convert an address for DMA operations ++ * to an address that the kernel can use. ++ */ ++#define __virt_to_bus(x) ((x) - PAGE_OFFSET) ++#define __bus_to_virt(x) ((x) + PAGE_OFFSET) ++ ++#endif +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/include/mach/misc.h +@@ -0,0 +1,670 @@ ++/****************************************************************************** ++ * MODULE NAME: star_misc.h ++ * PROJECT CODE: Vega ++ * DESCRIPTION: ++ * MAINTAINER: Jacky Hou ++ * DATE: 9 February 2009 ++ * ++ * SOURCE CONTROL: ++ * ++ * LICENSE: ++ * This source code is copyright (c) 2008-2009 Cavium Networks Inc. ++ * All rights reserved. ++ * ++ * REVISION HISTORY: ++ * ++ * ++ * SOURCE: ++ * ISSUES: ++ * NOTES TO USERS: ++ ******************************************************************************/ ++ ++#ifndef _CNS3XXX_MISC_H_ ++#define _CNS3XXX_MISC_H_ ++#include ++#define MISC_MEM_MAP_VALUE(offset) (*((volatile unsigned int *)(CNS3XXX_MISC_BASE_VIRT + offset))) ++ ++ ++/* ++ * define access macros ++ */ ++#define MISC_MEMORY_REMAP_REG MISC_MEM_MAP_VALUE(0x00) ++#define MISC_CHIP_CONFIG_REG MISC_MEM_MAP_VALUE(0x04) ++#define MISC_DEBUG_PROBE_DATA_REG MISC_MEM_MAP_VALUE(0x08) ++#define MISC_DEBUG_PROBE_SELECTION_REG MISC_MEM_MAP_VALUE(0x0C) ++#define MISC_IO_PIN_FUNC_SELECTION_REG MISC_MEM_MAP_VALUE(0x10) ++#define MISC_GPIOA_PIN_ENABLE_REG MISC_MEM_MAP_VALUE(0x14) ++#define MISC_GPIOB_PIN_ENABLE_REG MISC_MEM_MAP_VALUE(0x18) ++#define MISC_IO_PAD_DRIVE_STRENGTH_CTRL_A MISC_MEM_MAP_VALUE(0x1C) ++#define MISC_IO_PAD_DRIVE_STRENGTH_CTRL_B MISC_MEM_MAP_VALUE(0x20) ++#define MISC_GPIOA_15_0_PULL_CTRL_REG MISC_MEM_MAP_VALUE(0x24) ++#define MISC_GPIOA_16_31_PULL_CTRL_REG MISC_MEM_MAP_VALUE(0x28) ++#define MISC_GPIOB_15_0_PULL_CTRL_REG MISC_MEM_MAP_VALUE(0x2C) ++#define MISC_GPIOB_16_31_PULL_CTRL_REG MISC_MEM_MAP_VALUE(0x30) ++#define MISC_IO_PULL_CTRL_REG MISC_MEM_MAP_VALUE(0x34) ++#define MISC_E_FUSE_31_0_REG MISC_MEM_MAP_VALUE(0x40) ++#define MISC_E_FUSE_63_32_REG MISC_MEM_MAP_VALUE(0x44) ++#define MISC_E_FUSE_95_64_REG MISC_MEM_MAP_VALUE(0x48) ++#define MISC_E_FUSE_127_96_REG MISC_MEM_MAP_VALUE(0x4C) ++#define MISC_SOFTWARE_TEST_1_REG MISC_MEM_MAP_VALUE(0x50) ++#define MISC_SOFTWARE_TEST_2_REG MISC_MEM_MAP_VALUE(0x54) ++ ++ ++ ++// USB MISC ++#define MISC_USB_CFG_REG MISC_MEM_MAP_VALUE(0x800) ++#define MISC_USB_STS_REG MISC_MEM_MAP_VALUE(0x804) ++#define MISC_USBPHY00_CFG_REG MISC_MEM_MAP_VALUE(0x808) ++#define MISC_USBPHY01_CFG_REG MISC_MEM_MAP_VALUE(0x80c) ++#define MISC_USBPHY10_CFG_REG MISC_MEM_MAP_VALUE(0x810) ++#define MISC_USBPHY11_CFG_REG MISC_MEM_MAP_VALUE(0x814) ++ ++#define MISC_PCIEPHY_CMCTL0_REG MISC_MEM_MAP_VALUE(0x900) ++#define MISC_PCIEPHY_CMCTL1_REG MISC_MEM_MAP_VALUE(0x904) ++ ++#define MISC_PCIEPHY0_CTL_REG MISC_MEM_MAP_VALUE(0x940) ++#define MISC_PCIE0_AXIS_AWMISC_REG MISC_MEM_MAP_VALUE(0x944) ++#define MISC_PCIE0_AXIS_ARMISC_REG MISC_MEM_MAP_VALUE(0x948) ++#define MISC_PCIE0_AXIS_RMISC_REG MISC_MEM_MAP_VALUE(0x94C) ++#define MISC_PCIE0_AXIS_BMISC_REG MISC_MEM_MAP_VALUE(0x950) ++#define MISC_PCIE0_AXIM_RMISC_REG MISC_MEM_MAP_VALUE(0x954) ++#define MISC_PCIE0_AXIM_BMISC_REG MISC_MEM_MAP_VALUE(0x958) ++#define MISC_PCIE0_CTRL_REG MISC_MEM_MAP_VALUE(0x95C) ++#define MISC_PCIE0_PM_DEBUG_REG MISC_MEM_MAP_VALUE(0x960) ++#define MISC_PCIE0_RFC_DEBUG_REG MISC_MEM_MAP_VALUE(0x964) ++#define MISC_PCIE0_CXPL_DEBUGL_REG MISC_MEM_MAP_VALUE(0x968) ++#define MISC_PCIE0_CXPL_DEBUGH_REG MISC_MEM_MAP_VALUE(0x96C) ++#define MISC_PCIE0_DIAG_DEBUGH_REG MISC_MEM_MAP_VALUE(0x970) ++#define MISC_PCIE0_W1CLR_REG MISC_MEM_MAP_VALUE(0x974) ++#define MISC_PCIE0_INT_MASK_REG MISC_MEM_MAP_VALUE(0x978) ++#define MISC_PCIE0_INT_STATUS_REG MISC_MEM_MAP_VALUE(0x97C) ++ ++#define MISC_PCIEPHY1_CTL_REG MISC_MEM_MAP_VALUE(0xa40) ++#define MISC_PCIE1_AXIS_AWMISC_REG MISC_MEM_MAP_VALUE(0xa44) ++#define MISC_PCIE1_AXIS_ARMISC_REG MISC_MEM_MAP_VALUE(0xa48) ++#define MISC_PCIE1_AXIS_RMISC_REG MISC_MEM_MAP_VALUE(0xa4C) ++#define MISC_PCIE1_AXIS_BMISC_REG MISC_MEM_MAP_VALUE(0xa50) ++#define MISC_PCIE1_AXIM_RMISC_REG MISC_MEM_MAP_VALUE(0xa54) ++#define MISC_PCIE1_AXIM_BMISC_REG MISC_MEM_MAP_VALUE(0xa58) ++#define MISC_PCIE1_CTRL_REG MISC_MEM_MAP_VALUE(0xa5C) ++#define MISC_PCIE1_PM_DEBUG_REG MISC_MEM_MAP_VALUE(0xa60) ++#define MISC_PCIE1_RFC_DEBUG_REG MISC_MEM_MAP_VALUE(0xa64) ++#define MISC_PCIE1_CXPL_DEBUGL_REG MISC_MEM_MAP_VALUE(0xa68) ++#define MISC_PCIE1_CXPL_DEBUGH_REG MISC_MEM_MAP_VALUE(0xa6C) ++#define MISC_PCIE1_DIAG_DEBUGH_REG MISC_MEM_MAP_VALUE(0xa70) ++#define MISC_PCIE1_W1CLR_REG MISC_MEM_MAP_VALUE(0xa74) ++#define MISC_PCIE1_INT_MASK_REG MISC_MEM_MAP_VALUE(0xa78) ++#define MISC_PCIE1_INT_STATUS_REG MISC_MEM_MAP_VALUE(0xa7C) ++ ++ ++ ++ ++ ++ ++/* ++ * define constants macros ++ */ ++#define MISC_PARALLEL_FLASH_BOOT (0x0) ++#define MISC_SPI_SERIAL_FLASH_BOOT (0x1) ++#define MISC_NAND_FLASH_BOOT (0x2) ++ ++#define MISC_ALIGN_LITTLE_ENDIAN (0x0) ++#define MISC_UNALIGN_LITTLE_ENDIAN (0x2) ++#define MISC_UNALIGN_BIG_ENDIAN (0x3) ++ ++#define MISC_CPU_CLOCK_333_MHZ (0) ++#define MISC_CPU_CLOCK_366_MHZ (1) ++#define MISC_CPU_CLOCK_400_MHZ (2) ++#define MISC_CPU_CLOCK_433_MHZ (3) ++#define MISC_CPU_CLOCK_466_MHZ (4) ++#define MISC_CPU_CLOCK_500_MHZ (5) ++#define MISC_CPU_CLOCK_533_MHZ (6) ++#define MISC_CPU_CLOCK_566_MHZ (7) ++#define MISC_CPU_CLOCK_600_MHZ (8) ++#define MISC_CPU_CLOCK_633_MHZ (9) ++#define MISC_CPU_CLOCK_666_MHZ (10) ++#define MISC_CPU_CLOCK_700_MHZ (11) ++ ++/* ++ * Macro-defines for shared pins with GPIO_A ++ */ ++#if 0 ++#define MISC_LCD_PWR_PIN ((0x1 << 0)) ++#define MISC_CIM_OE_PIN ((0x1 << 1)) ++ ++#define MISC_SMC_PINS ((0x1 << 2) | (0x1 << 3) | (0x1 << 4) | (0x1 << 5)| (0x1 << 6)) ++#define MISC_SMC_CS3_PIN ((0x1 << 2)) ++#define MISC_SMC_CS2_PIN ((0x1 << 3)) ++#define MISC_SMC_CLK_PIN ((0x1 << 4)) ++#define MISC_SMC_ADV_PIN ((0x1 << 5)) ++#define MISC_SMC_CRE_PIN ((0x1 << 6)) ++ ++ ++#define MISC_NFI_PINS ((0x1 << 7) | (0x1 << 8) | (0x1 << 9) | (0x1 << 10)| (0x1 << 11)) ++#define MISC_NFI_BUSY_PIN ((0x1 << 7)) ++#define MISC_NFI_CS3_PIN ((0x1 << 8)) ++#define MISC_NFI_CS2_PIN ((0x1 << 9)) ++#define MISC_NFI_CE1_PIN ((0x1 << 10)) ++#define MISC_NFI_CE0_PIN ((0x1 << 11)) ++ ++#define MISC_EXT_INT2_PIN ((0x1 << 12)) ++#define MISC_EXT_INT1_PIN ((0x1 << 13)) ++#define MISC_EXT_INT0_PIN ((0x1 << 14)) ++ ++ ++#define MISC_UART0_PINS ((0x1 << 15) | (0x1 << 16) | (0x1 << 17) | (0x1 << 18)) ++#define MISC_UART0_RTS_PIN ((0x1 << 15)) ++#define MISC_UART0_CTS_PIN ((0x1 << 16)) ++#define MISC_UART0_TXD_PIN ((0x1 << 17)) ++#define MISC_UART0_RXD_PIN ((0x1 << 18)) ++ ++#define MISC_UART1_PINS ((0x1 << 19) | (0x1 << 20) | (0x1 << 21) | (0x1 << 22)) ++#define MISC_UART1_RTS_PIN ((0x1 << 19)) ++#define MISC_UART1_CTS_PIN ((0x1 << 20)) ++#define MISC_UART1_RXD_PIN ((0x1 << 21)) ++#define MISC_UART1_TXD_PIN ((0x1 << 22)) ++ ++#define MISC_UART2_PINS ((0x1 << 23) | (0x1 << 24)) ++#define MISC_UART2_RXD_PIN ((0x1 << 23)) ++#define MISC_UART2_TXD_PIN ((0x1 << 24)) ++ ++#define MISC_PCM_PINS ((0x1 << 25) | (0x1 << 26) | (0x1 << 27) | (0x1 << 28)) ++#define MISC_PCM_CLK_PIN ((0x1 << 25)) ++#define MISC_PCM_FS_PIN ((0x1 << 26)) ++#define MISC_PCM_DT_PIN ((0x1 << 27)) ++#define MISC_PCM_DR_PIN ((0x1 << 28)) ++ ++#define MISC_SPI_CS1_PIN ((0x1 << 29)) ++#define MISC_SPI_CS0_PIN ((0x1 << 30)) ++#define MISC_SPI_CLK_PIN ((0x1 << 31)) ++#else ++#define MISC_SD_PWR_ON_PIN ((0x1 << 2)) ++#define MISC_OTG_DRVVBUS_PIN ((0x1 << 3)) ++#define MISC_CIM_OE_PIN ((0x1 << 8)) ++#define MISC_LCD_PWR_PIN ((0x1 << 9)) ++#define MISC_SMC_CS3_PIN ((0x1 << 10)) ++#define MISC_SMC_CS2_PIN ((0x1 << 11)) ++#define MISC_SMC_CLK_PIN ((0x1 << 12)) ++#define MISC_SMC_ADV_PIN ((0x1 << 13)) ++#define MISC_SMC_CRE_PIN ((0x1 << 14)) ++#define MISC_SMC_ADDR_26_PIN ((0x1 << 15)) ++ ++#define MISC_SD_nCD_PIN ((0x1 << 16)) ++#define MISC_SD_nWP_PIN ((0x1 << 17)) ++#define MISC_SD_CLK_PIN ((0x1 << 18)) ++#define MISC_SD_CMD_PIN ((0x1 << 19)) ++#define MISC_SD_DT7_PIN ((0x1 << 20)) ++#define MISC_SD_DT6_PIN ((0x1 << 21)) ++#define MISC_SD_DT5_PIN ((0x1 << 22)) ++#define MISC_SD_DT4_PIN ((0x1 << 23)) ++#define MISC_SD_DT3_PIN ((0x1 << 24)) ++#define MISC_SD_DT2_PIN ((0x1 << 25)) ++#define MISC_SD_DT1_PIN ((0x1 << 26)) ++#define MISC_SD_DT0_PIN ((0x1 << 27)) ++#define MISC_SD_LED_PIN ((0x1 << 28)) ++ ++#define MISC_UR_RXD1_PIN ((0x1 << 29)) ++#define MISC_UR_TXD1_PIN ((0x1 << 30)) ++#define MISC_UR_RTS2_PIN ((0x1 << 31)) ++ ++#endif ++ ++ ++/* ++ * Macro-defines for shared pins with GPIO_B ++ */ ++#if 0 ++#define MISC_SPI_DT_PIN ((0x1 << 0)) ++#define MISC_SPI_DR_PIN ((0x1 << 1)) ++ ++#define MISC_SD_CD_PIN ((0x1 << 2)) ++#define MISC_SD_WP_PIN ((0x1 << 3)) ++#define MISC_SD_CLK_PIN ((0x1 << 4)) ++#define MISC_SD_CMD_PIN ((0x1 << 5)) ++#define MISC_SD_DT7_PIN ((0x1 << 6)) ++#define MISC_SD_DT6_PIN ((0x1 << 7)) ++#define MISC_SD_DT5_PIN ((0x1 << 8)) ++#define MISC_SD_DT4_PIN ((0x1 << 9)) ++#define MISC_SD_DT3_PIN ((0x1 << 10)) ++#define MISC_SD_DT2_PIN ((0x1 << 11)) ++#define MISC_SD_DT1_PIN ((0x1 << 12)) ++#define MISC_SD_DT0_PIN ((0x1 << 13)) ++#define MISC_SD_LED_PIN ((0x1 << 14)) ++ ++ ++#define MISC_I2S_CLK_PIN ((0x1 << 15)) ++#define MISC_I2S_FS_PIN ((0x1 << 16)) ++#define MISC_I2S_DT_PIN ((0x1 << 17)) ++#define MISC_I2S_DR_PIN ((0x1 << 18)) ++ ++//Tim.Liao modify ++#define MISC_I2C_SCL_PIN ((0x1 << 19)) ++#define MISC_I2C_SDA_PIN ((0x1 << 20)) ++ ++#define MISC_GSW_P2_CRS_PIN ((0x1 << 21)) ++#define MISC_GSW_P2_COL_PIN ((0x1 << 22)) ++#define MISC_GSW_P1_CRS_PIN ((0x1 << 23)) ++#define MISC_GSW_P1_COL_PIN ((0x1 << 24)) ++#define MISC_GSW_P0_CRS_PIN ((0x1 << 25)) ++#define MISC_GSW_P0_COL_PIN ((0x1 << 26)) ++ ++#define MISC_GSW_MDC_PIN ((0x1 << 27)) ++#define MISC_GSW_MDIO_PIN ((0x1 << 28)) ++ ++#define MISC_CLOCK_OUTPUT_PIN ((0x1 << 29)) ++ ++#define MISC_SATA_LED1_PIN ((0x1 << 30)) ++#define MISC_SATA_LED0_PIN ((0x1 << 31)) ++#else ++#define MISC_UR_CTS2_PIN ((0x1 << 0)) ++#define MISC_UR_RXD2_PIN ((0x1 << 1)) ++#define MISC_UR_TXD2_PIN ((0x1 << 2)) ++#define MISC_PCMCLK_PIN ((0x1 << 3)) ++#define MISC_PCMFS_PIN ((0x1 << 4)) ++#define MISC_PCMDT_PIN ((0x1 << 5)) ++#define MISC_PCMDR_PIN ((0x1 << 6)) ++#define MISC_PCM_PINS (MISC_PCMCLK_PIN|MISC_PCMFS_PIN|MISC_PCMDT_PIN|MISC_PCMDR_PIN) ++ ++#define MISC_SPInCS1_PIN ((0x1 << 7)) ++#define MISC_SPInCS0_PIN ((0x1 << 8)) ++#define MISC_SPICLK_PIN ((0x1 << 9)) ++#define MISC_SPIDT_PIN ((0x1 << 10)) ++#define MISC_SPIDR_PIN ((0x1 << 11)) ++ ++#define MISC_I2C_SCL_PIN ((0x1 << 12)) ++#define MISC_I2C_SDA_PIN ((0x1 << 13)) ++ ++#define MISC_GSW_P2_CRS_PIN ((0x1 << 14)) ++#define MISC_GSW_P2_COL_PIN ((0x1 << 15)) ++#define MISC_GSW_P1_CRS_PIN ((0x1 << 16)) ++#define MISC_GSW_P1_COL_PIN ((0x1 << 17)) ++#define MISC_GSW_P0_CRS_PIN ((0x1 << 18)) ++#define MISC_GSW_P0_COL_PIN ((0x1 << 19)) ++ ++#define MISC_GSW_MDC_PIN ((0x1 << 20)) ++#define MISC_GSW_MDIO_PIN ((0x1 << 21)) ++ ++#define MISC_I2S_CLK_PIN (0x1 << 22) ++#define MISC_I2S_FS_PIN (0x1 << 23) ++#define MISC_I2S_DT_PIN (0x1 << 24) ++#define MISC_I2S_DR_PIN (0x1 << 25) ++ ++#define MISC_CLOCK_OUTPUT_PIN ((0x1 << 26)) ++ ++#define MISC_EXT_INT2_PIN ((0x1 << 27)) ++#define MISC_EXT_INT1_PIN ((0x1 << 28)) ++#define MISC_EXT_INT0_PIN ((0x1 << 29)) ++ ++#define MISC_SATA_LED1_PIN ((0x1 << 30)) ++#define MISC_SATA_LED0_PIN ((0x1 << 31)) ++ ++#define MISC_CLOCK_OUTPUT_PIN ((0x1 << 26)) ++ ++#define MISC_EXT_INT2_PIN ((0x1 << 27)) ++#define MISC_EXT_INT1_PIN ((0x1 << 28)) ++#define MISC_EXT_INT0_PIN ((0x1 << 29)) ++ ++#define MISC_SATA_LED1_PIN ((0x1 << 30)) ++#define MISC_SATA_LED0_PIN ((0x1 << 31)) ++ ++#define MISC_CLOCK_OUTPUT_PIN ((0x1 << 26)) ++ ++#define MISC_EXT_INT2_PIN ((0x1 << 27)) ++#define MISC_EXT_INT1_PIN ((0x1 << 28)) ++#define MISC_EXT_INT0_PIN ((0x1 << 29)) ++ ++#define MISC_SATA_LED1_PIN ((0x1 << 30)) ++#define MISC_SATA_LED0_PIN ((0x1 << 31)) ++ ++#define MISC_CLOCK_OUTPUT_PIN ((0x1 << 26)) ++ ++#define MISC_EXT_INT2_PIN ((0x1 << 27)) ++#define MISC_EXT_INT1_PIN ((0x1 << 28)) ++#define MISC_EXT_INT0_PIN ((0x1 << 29)) ++ ++#define MISC_SATA_LED1_PIN ((0x1 << 30)) ++#define MISC_SATA_LED0_PIN ((0x1 << 31)) ++ ++#define MISC_CLOCK_OUTPUT_PIN ((0x1 << 26)) ++ ++#define MISC_EXT_INT2_PIN ((0x1 << 27)) ++#define MISC_EXT_INT1_PIN ((0x1 << 28)) ++#define MISC_EXT_INT0_PIN ((0x1 << 29)) ++ ++#define MISC_SATA_LED1_PIN ((0x1 << 30)) ++#define MISC_SATA_LED0_PIN ((0x1 << 31)) ++ ++#define MISC_CLOCK_OUTPUT_PIN ((0x1 << 26)) ++ ++#define MISC_EXT_INT2_PIN ((0x1 << 27)) ++#define MISC_EXT_INT1_PIN ((0x1 << 28)) ++#define MISC_EXT_INT0_PIN ((0x1 << 29)) ++ ++#define MISC_SATA_LED1_PIN ((0x1 << 30)) ++#define MISC_SATA_LED0_PIN ((0x1 << 31)) ++ ++#define MISC_CLOCK_OUTPUT_PIN ((0x1 << 26)) ++ ++#define MISC_EXT_INT2_PIN ((0x1 << 27)) ++#define MISC_EXT_INT1_PIN ((0x1 << 28)) ++#define MISC_EXT_INT0_PIN ((0x1 << 29)) ++ ++#define MISC_SATA_LED1_PIN ((0x1 << 30)) ++#define MISC_SATA_LED0_PIN ((0x1 << 31)) ++ ++#endif ++/* ++ * Other defines ++ */ ++#define MISC_GPIOA_PIN_0 (0) ++#define MISC_GPIOA_PIN_1 (1) ++#define MISC_GPIOA_PIN_2 (2) ++#define MISC_GPIOA_PIN_3 (3) ++#define MISC_GPIOA_PIN_4 (4) ++#define MISC_GPIOA_PIN_5 (5) ++#define MISC_GPIOA_PIN_6 (6) ++#define MISC_GPIOA_PIN_7 (7) ++#define MISC_GPIOA_PIN_8 (8) ++#define MISC_GPIOA_PIN_9 (9) ++#define MISC_GPIOA_PIN_10 (10) ++#define MISC_GPIOA_PIN_11 (11) ++#define MISC_GPIOA_PIN_12 (12) ++#define MISC_GPIOA_PIN_13 (13) ++#define MISC_GPIOA_PIN_14 (14) ++#define MISC_GPIOA_PIN_15 (15) ++ ++ ++#define MISC_GPIOA_RESISTOR_PULL_DOWN (1) ++#define MISC_GPIOA_RESISTOR_PULL_UP (1) ++ ++ ++ ++/* ++ * function declarations ++ */ ++ ++ ++/* ++ * macro declarations ++ */ ++#define HAL_MISC_GET_SYSTEM_ALIGN_ENDIAN_MODE(mode) \ ++{ \ ++ (mode) = (MISC_CHIP_CONFIG_REG) & 0x3; \ ++} ++ ++ ++#define HAL_MISC_GET_SYSTEM_CPU_CLOCK(cpu_clock) \ ++{ \ ++ (cpu_clock) = (MISC_CHIP_CONFIG_REG >> 5) & 0xF; \ ++} ++ ++ ++#define HAL_MISC_ENABLE_SPI_SERIAL_FLASH_BANK_ACCESS() \ ++{ \ ++ (MISC_CHIP_CONFIG_REG) |= (0x1 << 16); \ ++} ++ ++#define HAL_MISC_DISABLE_SPI_SERIAL_FLASH_BANK_ACCESS() \ ++{ \ ++ (MISC_CHIP_CONFIG_REG) &= ~(0x1 << 16); \ ++} ++ ++ ++/* ++ * Macro defines for GPIOA and GPIOB Pin Enable Register ++ */ ++#define HAL_MISC_ENABLE_EXT_INT0_PIN() \ ++{ \ ++ (MISC_GPIOB_PIN_ENABLE_REG) |= (MISC_EXT_INT0_PIN); \ ++} ++ ++#define HAL_MISC_DISABLE_EXT_INT1_PIN() \ ++{ \ ++ (MISC_GPIOB_PIN_ENABLE_REG) &= ~(MISC_EXT_INT1_PIN); \ ++} ++ ++#define HAL_MISC_ENABLE_EXT_INT2_PIN() \ ++{ \ ++ (MISC_GPIOB_PIN_ENABLE_REG) |= (MISC_EXT_INT2_PIN); \ ++} ++ ++#define HAL_MISC_DISABLE_EXT_INT2_PIN() \ ++{ \ ++ (MISC_GPIOB_PIN_ENABLE_REG) &= ~(MISC_EXT_INT2_PIN); \ ++} ++ ++#define HAL_MISC_ENABLE_EXT_INT1_PIN() \ ++{ \ ++ (MISC_GPIOB_PIN_ENABLE_REG) |= (MISC_EXT_INT1_PIN); \ ++} ++ ++#define HAL_MISC_DISABLE_EXT_INT0_PIN() \ ++{ \ ++ (MISC_GPIOB_PIN_ENABLE_REG) &= ~(MISC_EXT_INT0_PIN); \ ++} ++ ++ ++#define HAL_MISC_ENABLE_PCM_PINS() \ ++{ \ ++ (MISC_GPIOB_PIN_ENABLE_REG) |= (MISC_PCM_PINS); \ ++} ++ ++#define HAL_MISC_DISABLE_PCM_PINS() \ ++{ \ ++ (MISC_GPIOB_PIN_ENABLE_REG) &= ~(MISC_PCM_PINS); \ ++} ++ ++ ++#define HAL_MISC_ENABLE_CIM_OE_PIN() \ ++{ \ ++ (MISC_GPIOA_PIN_ENABLE_REG) |= (MISC_CIM_OE_PIN); \ ++} ++ ++#define HAL_MISC_DISABLE_CIM_OE_PIN() \ ++{ \ ++ (MISC_GPIOA_PIN_ENABLE_REG) &= ~(MISC_CIM_OE_PIN); \ ++} ++ ++ ++#define HAL_MISC_ENABLE_LCD_PWR_PIN() \ ++{ \ ++ (MISC_GPIOA_PIN_ENABLE_REG) |= (MISC_LCD_PWR_PIN); \ ++} ++ ++#define HAL_MISC_DISABLE_LCD_PWR_PIN() \ ++{ \ ++ (MISC_GPIOA_PIN_ENABLE_REG) &= ~(MISC_LCD_PWR_PIN); \ ++} ++ ++ ++#define HAL_MISC_ENABLE_NFI_PINS() \ ++{ \ ++ (MISC_GPIOA_PIN_ENABLE_REG) |= (MISC_NFI_PINS); \ ++} ++ ++#define HAL_MISC_DISABLE_NFI_PINS() \ ++{ \ ++ (MISC_GPIOA_PIN_ENABLE_REG) &= ~(MISC_NFI_PINS); \ ++} ++ ++ ++ ++#define HAL_MISC_ENABLE_SMC_PINS() \ ++{ \ ++ (MISC_GPIOA_PIN_ENABLE_REG) |= (MISC_SMC_PINS); \ ++} ++ ++#define HAL_MISC_DISABLE_SMC_PINS() \ ++{ \ ++ (MISC_GPIOA_PIN_ENABLE_REG) &= ~(MISC_SMC_PINS); \ ++} ++ ++#define HAL_MISC_ENABLE_UART0_PINS() \ ++{ \ ++ (MISC_GPIOA_PIN_ENABLE_REG) |= (MISC_UART0_PINS); \ ++} ++ ++#define HAL_MISC_DISABLE_UART0_PINS() \ ++{ \ ++ (MISC_GPIOA_PIN_ENABLE_REG) &= ~(MISC_UART0_PINS); \ ++} ++ ++#define HAL_MISC_ENABLE_UART1_PINS() \ ++{ \ ++ (MISC_GPIOA_PIN_ENABLE_REG) |= (MISC_UART1_PINS); \ ++} ++ ++#define HAL_MISC_DISABLE_UART1_PINS() \ ++{ \ ++ (MISC_GPIOA_PIN_ENABLE_REG) &= ~(MISC_UART1_PINS); \ ++} ++ ++#define HAL_MISC_ENABLE_UART2_PINS() \ ++{ \ ++ (MISC_GPIOA_PIN_ENABLE_REG) |= (MISC_UART2_PINS); \ ++} ++ ++#define HAL_MISC_DISABLE_UART2_PINS() \ ++{ \ ++ (MISC_GPIOA_PIN_ENABLE_REG) &= ~(MISC_UART2_PINS); \ ++} ++ ++ ++ ++ ++ ++/* ++ * Macro-defines for GPIO_B ++ */ ++#define HAL_MISC_ENABLE_SPI_PINS() \ ++{ \ ++ (MISC_GPIOB_PIN_ENABLE_REG) |= \ ++ (MISC_SPInCS1_PIN | MISC_SPInCS0_PIN | \ ++ MISC_SPICLK_PIN | MISC_SPIDT_PIN | MISC_SPIDR_PIN); \ ++} ++ ++#define HAL_MISC_DISABLE_SPI_PINS() \ ++{ \ ++ (MISC_GPIOB_PIN_ENABLE_REG) &= \ ++ ~(MISC_SPInCS1_PIN | MISC_SPInCS0_PIN | \ ++ MISC_SPICLK_PIN | MISC_SPIDT_PIN | MISC_SPIDR_PIN); \ ++} ++ ++#define HAL_MISC_ENABLE_SD_PINS() \ ++{ \ ++ (MISC_GPIOB_PIN_ENABLE_REG) |= (MISC_SD_CD_PIN | MISC_SD_WP_PIN | MISC_SD_CLK_PIN |MISC_SD_CMD_PIN |MISC_SD_DT7_PIN|MISC_SD_DT6_PIN | \ ++ MISC_SD_DT5_PIN | MISC_SD_DT4_PIN |MISC_SD_DT3_PIN | MISC_SD_DT2_PIN| MISC_SD_DT1_PIN | MISC_SD_DT0_PIN | MISC_SD_LED_PIN); \ ++} ++ ++#define HAL_MISC_DISABLE_SD_PINS() \ ++{ \ ++ (MISC_GPIOB_PIN_ENABLE_REG) &= ~(MISC_SD_CD_PIN | MISC_SD_WP_PIN | MISC_SD_CLK_PIN |MISC_SD_CMD_PIN |MISC_SD_DT7_PIN|MISC_SD_DT6_PIN |\ ++ MISC_SD_DT5_PIN | MISC_SD_DT4_PIN |MISC_SD_DT3_PIN | MISC_SD_DT2_PIN| MISC_SD_DT1_PIN | MISC_SD_DT0_PIN | MISC_SD_LED_PIN); \ ++} ++ ++ ++#define HAL_MISC_ENABLE_I2S_PINS() \ ++{ \ ++ (MISC_GPIOB_PIN_ENABLE_REG) |= (MISC_I2S_CLK_PIN | MISC_I2S_FS_PIN | MISC_I2S_DT_PIN |MISC_I2S_DR_PIN |MISC_I2S_DR_PIN); \ ++} ++ ++#define HAL_MISC_DISABLE_I2S_PINS() \ ++{ \ ++ (MISC_GPIOB_PIN_ENABLE_REG) &= ~(MISC_I2S_CLK_PIN | MISC_I2S_FS_PIN | MISC_I2S_DT_PIN |MISC_I2S_DR_PIN |MISC_I2S_DR_PIN); \ ++} ++ ++//Tim.Liao modify I2C pin ++#define HAL_MISC_ENABLE_I2C_PINS() \ ++{ \ ++ (MISC_GPIOA_PIN_ENABLE_REG) |= (MISC_I2C_SCL_PIN | MISC_I2C_SDA_PIN); \ ++} ++ ++#define HAL_MISC_DISABLE_I2C_PINS() \ ++{ \ ++ (MISC_GPIOA_PIN_ENABLE_REG) &= ~(MISC_I2C_SCL_PIN | MISC_I2C_SDA_PIN); \ ++} ++ ++#define HAL_MISC_ENABLE_GSW_P2_CRS_COL_PINS() \ ++{ \ ++ (MISC_GPIOB_PIN_ENABLE_REG) |= (MISC_GSW_P2_CRS_PIN | MISC_GSW_P2_COL_PIN); \ ++} ++ ++#define HAL_MISC_DISABLE_GSW_P2_CRS_COL_PINS() \ ++{ \ ++ (MISC_GPIOB_PIN_ENABLE_REG) &= ~(MISC_GSW_P2_CRS_PIN | MISC_GSW_P2_COL_PIN); \ ++} ++ ++ ++#define HAL_MISC_ENABLE_GSW_P1_CRS_COL_PINS() \ ++{ \ ++ (MISC_GPIOB_PIN_ENABLE_REG) |= (MISC_GSW_P1_CRS_PIN | MISC_GSW_P1_COL_PIN); \ ++} ++ ++#define HAL_MISC_DISABLE_GSW_P1_CRS_COL_PINS() \ ++{ \ ++ (MISC_GPIOB_PIN_ENABLE_REG) &= ~(MISC_GSW_P1_CRS_PIN | MISC_GSW_P1_COL_PIN); \ ++} ++ ++ ++ ++#define HAL_MISC_ENABLE_GSW_P0_CRS_COL_PINS() \ ++{ \ ++ (MISC_GPIOB_PIN_ENABLE_REG) |= (MISC_GSW_P0_CRS_PIN | MISC_GSW_P0_COL_PIN); \ ++} ++ ++#define HAL_MISC_DISABLE_GSW_P0_CRS_COL_PINS() \ ++{ \ ++ (MISC_GPIOB_PIN_ENABLE_REG) &= ~(MISC_GSW_P0_CRS_PIN | MISC_GSW_P0_COL_PIN); \ ++} ++ ++ ++#define HAL_MISC_ENABLE_MDC_MDIO_PINS() \ ++{ \ ++ (MISC_GPIOB_PIN_ENABLE_REG) |= (MISC_GSW_MDC_PIN | MISC_GSW_MDIO_PIN); \ ++} ++ ++#define HAL_MISC_DISABLE_MDC_MDIO_PINS() \ ++{ \ ++ (MISC_GPIOB_PIN_ENABLE_REG) &= ~(MISC_GSW_MDC_PIN | MISC_GSW_MDIO_PIN); \ ++} ++ ++ ++ ++#define HAL_MISC_ENABLE_SATA_LED_PINS() \ ++{ \ ++ (MISC_GPIOB_PIN_ENABLE_REG) |= (MISC_SATA_LED1_PIN | MISC_SATA_LED0_PIN); \ ++} ++ ++#define HAL_MISC_DISABLE_SATA_LED_PINS() \ ++{ \ ++ (MISC_GPIOB_PIN_ENABLE_REG) &= ~(MISC_SATA_LED1_PIN | MISC_SATA_LED0_PIN); \ ++} ++ ++ ++ ++#define HAL_MISC_ENABLE_CLOCK_OUTPUT_PIN() \ ++{ \ ++ (MISC_GPIOB_PIN_ENABLE_REG) |= (MISC_CLOCK_OUTPUT_PIN); \ ++} ++ ++#define HAL_MISC_DISABLE_CLOCK_OUTPUT_PIN() \ ++{ \ ++ (MISC_GPIOB_PIN_ENABLE_REG) &= ~(MISC_CLOCK_OUTPUT_PIN); \ ++} ++ ++ ++#define HAL_MISC_ENABLE_ALL_SHARED_GPIO_PINS() \ ++{ \ ++ (MISC_GPIOA_PIN_ENABLE_REG) = (0x0); \ ++ (MISC_GPIOB_PIN_ENABLE_REG) = (0x0); \ ++} ++ ++#define HAL_MISC_DISABLE_ALL_SHARED_GPIO_PINS() \ ++{ \ ++ (MISC_GPIOA_PIN_ENABLE_REG) = (0xFFFFFFFF); \ ++ (MISC_GPIOB_PIN_ENABLE_REG) = (0xFFFFFFFF); \ ++} ++ ++ ++ ++#endif // end of #ifndef _STAR_MISC_H_ +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/include/mach/pcie.h +@@ -0,0 +1,149 @@ ++/******************************************************************************* ++ * ++ * Copyright (c) 2008 Cavium Networks ++ * ++ * This file is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, Version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or ++ * NONINFRINGEMENT. See the GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this file; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA or ++ * visit http://www.gnu.org/licenses/. ++ * ++ * This file may also be available under a different license from Cavium. ++ * Contact Cavium Networks for more information ++ * ++ ******************************************************************************/ ++ ++#ifndef _CNS3XXX_PCIE_H_ ++#define _CNS3XXX_PCIE_H_ ++ ++#include "mach/board.h" ++ ++#define PCIE0_IO_SPACE_START (CNS3XXX_PCIE0_IO_BASE) ++#define PCIE0_IO_SPACE_SIZE 0x01000000 /* 16MB */ ++#define PCIE0_IO_SPACE_END (CNS3XXX_PCIE0_IO_BASE + PCIE0_IO_SPACE_SIZE - 1) ++ ++#define PCIE0_MEM_SPACE_START (CNS3XXX_PCIE0_MEM_BASE) ++#define PCIE0_MEM_SPACE_SIZE 0x01000000 /* 176MB */ ++#define PCIE0_MEM_SPACE_END (CNS3XXX_PCIE0_MEM_BASE + PCIE0_MEM_SPACE_SIZE - 1) ++ ++#define PCIE1_IO_SPACE_START (CNS3XXX_PCIE1_IO_BASE) ++#define PCIE1_IO_SPACE_SIZE 0x01000000 /* 16MB */ ++#define PCIE1_IO_SPACE_END (CNS3XXX_PCIE1_IO_BASE + PCIE1_IO_SPACE_SIZE - 1) ++ ++#define PCIE1_MEM_SPACE_START (CNS3XXX_PCIE1_MEM_BASE) ++#define PCIE1_MEM_SPACE_SIZE 0x01000000 /* 16MB */ ++#define PCIE1_MEM_SPACE_END (CNS3XXX_PCIE1_MEM_BASE + PCIE1_MEM_SPACE_SIZE - 1) ++ ++#define PCIB_MEM_MAP_VALUE(base, reg_offset) (*((u32 volatile *)(SYSVA_PCI_BRIDGE_##base##_ADDR + reg_offset))) ++ ++/* ++ * define access macros ++ */ ++#define PCI_BRIDGE_CONFIG_DATA PCIB_MEM_MAP_VALUE(CONFIG_DATA_BASE, 0x2C) ++#define PCI_BRIDGE_CONFIG_ADDR PCIB_MEM_MAP_VALUE(CONFIG_ADDR_BASE, 0x28) ++ ++#define PCI_BRIDGE_CONFIG_DATA_REG_OFFSET 0x2C ++#define PCI_BRIDGE_CONFIG_ADDR_REG_OFFSET 0x28 ++ ++ ++/* PCIe MISC 0 Register */ ++#define CNS3XXX_PCIEPHY0_CMCTL0 (CNS3XXX_MISC_BASE_VIRT + 0x900) ++#define CNS3XXX_PCIEPHY0_CMCTL1 (CNS3XXX_MISC_BASE_VIRT + 0x904) ++#define CNS3XXX_PCIEPHY0_CTL1 (CNS3XXX_MISC_BASE_VIRT + 0x940) ++#define CNS3XXX_PCIE0_AXIS_AWMISC (CNS3XXX_MISC_BASE_VIRT + 0x944) ++#define CNS3XXX_PCIE0_AXIS_ARMISC (CNS3XXX_MISC_BASE_VIRT + 0x948) ++#define CNS3XXX_PCIE0_AXIS_RMISC (CNS3XXX_MISC_BASE_VIRT + 0x94C) ++#define CNS3XXX_PCIE0_AXIS_BMISC (CNS3XXX_MISC_BASE_VIRT + 0x950) ++#define CNS3XXX_PCIE0_AXIM_RMISC (CNS3XXX_MISC_BASE_VIRT + 0x954) ++#define CNS3XXX_PCIE0_AXIM_BMISC (CNS3XXX_MISC_BASE_VIRT + 0x958) ++#define CNS3XXX_PCIE0_CTRL (CNS3XXX_MISC_BASE_VIRT + 0x95C) ++#define CNS3XXX_PCIE0_PM_DEBUG (CNS3XXX_MISC_BASE_VIRT + 0x960) ++#define CNS3XXX_PCIE0_RFC_DEBUG (CNS3XXX_MISC_BASE_VIRT + 0x964) ++#define CNS3XXX_PCIE0_CXPL_DEBUGL (CNS3XXX_MISC_BASE_VIRT + 0x968) ++#define CNS3XXX_PCIE0_CXPL_DEBUGH (CNS3XXX_MISC_BASE_VIRT + 0x96C) ++#define CNS3XXX_PCIE0_DIAG (CNS3XXX_MISC_BASE_VIRT + 0x970) ++#define CNS3XXX_PCIE0_INT_STATUS (CNS3XXX_MISC_BASE_VIRT + 0x974) ++#define CNS3XXX_PCIE0_INT_MASK (CNS3XXX_MISC_BASE_VIRT + 0x978) ++ ++ ++/* PCIe MISC 1 Register */ ++#define CNS3XXX_PCIEPHY1_CMCTL0 (CNS3XXX_MISC_BASE_VIRT + 0xA00) ++#define CNS3XXX_PCIEPHY1_CMCTL1 (CNS3XXX_MISC_BASE_VIRT + 0xA04) ++#define CNS3XXX_PCIEPHY1_CTL1 (CNS3XXX_MISC_BASE_VIRT + 0xA40) ++#define CNS3XXX_PCIE1_AXIS_AWMISC (CNS3XXX_MISC_BASE_VIRT + 0xA44) ++#define CNS3XXX_PCIE1_AXIS_ARMISC (CNS3XXX_MISC_BASE_VIRT + 0xA48) ++#define CNS3XXX_PCIE1_AXIS_RMISC (CNS3XXX_MISC_BASE_VIRT + 0xA4C) ++#define CNS3XXX_PCIE1_AXIS_BMISC (CNS3XXX_MISC_BASE_VIRT + 0xA50) ++#define CNS3XXX_PCIE1_AXIM_RMISC (CNS3XXX_MISC_BASE_VIRT + 0xA54) ++#define CNS3XXX_PCIE1_AXIM_BMISC (CNS3XXX_MISC_BASE_VIRT + 0x958) ++#define CNS3XXX_PCIE1_CTRL (CNS3XXX_MISC_BASE_VIRT + 0xA5C) ++#define CNS3XXX_PCIE1_PM_DEBUG (CNS3XXX_MISC_BASE_VIRT + 0xA60) ++#define CNS3XXX_PCIE1_RFC_DEBUG (CNS3XXX_MISC_BASE_VIRT + 0xA64) ++#define CNS3XXX_PCIE1_CXPL_DEBUGL (CNS3XXX_MISC_BASE_VIRT + 0xA68) ++#define CNS3XXX_PCIE1_CXPL_DEBUGH (CNS3XXX_MISC_BASE_VIRT + 0xA6C) ++#define CNS3XXX_PCIE1_DIAG (CNS3XXX_MISC_BASE_VIRT + 0xA70) ++#define CNS3XXX_PCIE1_INT_STATUS (CNS3XXX_MISC_BASE_VIRT + 0xA74) ++#define CNS3XXX_PCIE1_INT_MASK (CNS3XXX_MISC_BASE_VIRT + 0xA78) ++ ++ ++/* ++ * define constants macros ++ */ ++ ++#define PCIB_DEVICE_ID 0x3400 ++#define PCIB_VENDOR_ID 0x177D ++#define PCIB_CLASS_CODE 0xFF0000 ++#define PCIB_REVISION_ID 0x00 ++#define PCIB_BAR0_MEMORY_SPACE_BASE 0x20000000 ++#define PCIB_BAR1_IO_SPACE_BASE 0x20000000 ++#define PCI_MEMORY_SPACE_BASE 0xB0000000 ++#define PCI_IO_SPACE_BASE 0xA8000000 ++#define PCI_MAX_BUS_NUM 0x01 ++#define PCI_MAX_DEVICE_NUM 0x14 ++#define PCI_MAX_FUNCTION_NUM 0x01 ++#define PCI_MAX_REG_NUM 0x3C ++ ++#define PCI_MAX_DEVICE_TYPE_NUM 0x13 ++#define PCI_MAX_BAR_NUM 0x06 ++ ++#define PCI_CSH_VENDOR_ID_REG_ADDR 0x00 ++#define PCI_CSH_DEVICE_ID_REG_ADDR 0x02 ++#define PCI_CSH_COMMAND_REG_ADDR 0x04 ++#define PCI_CSH_STATUS_REG_ADDR 0x06 ++#define PCI_CSH_REVISION_CLASS_REG_ADDR 0x08 ++#define PCI_CSH_CACHE_LINE_SIZE_REG_ADDR 0x0C ++#define PCI_CSH_LATENCY_TIMER_REG_ADDR 0x0D ++#define PCI_CSH_HEADER_TYPE_REG_ADDR 0x0E ++#define PCI_CSH_BIST_REG_ADDR 0x0F ++#define PCI_CSH_BAR_REG_ADDR 0x10 ++ ++ ++#define PCI_IO_SPACE_SIZE_1M 0x00 ++#define PCI_IO_SPACE_SIZE_2M 0x01 ++#define PCI_IO_SPACE_SIZE_4M 0x02 ++#define PCI_IO_SPACE_SIZE_8M 0x03 ++#define PCI_IO_SPACE_SIZE_16M 0x04 ++#define PCI_IO_SPACE_SIZE_32M 0x05 ++#define PCI_IO_SPACE_SIZE_64M 0x06 ++#define PCI_IO_SPACE_SIZE_128M 0x07 ++#define PCI_IO_SPACE_SIZE_256M 0x08 ++#define PCI_IO_SPACE_SIZE_512M 0x09 ++#define PCI_IO_SPACE_SIZE_1G 0x0A ++#define PCI_IO_SPACE_SIZE_2G 0x0B ++ ++ ++struct pcie_dbgfs_reg{ ++ char *name; ++ u32 *addr; ++}; ++ ++#endif /* end of #ifndef _STAR_PCIE_H_ */ ++ +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/include/mach/pcm.h +@@ -0,0 +1,277 @@ ++/****************************************************************************** ++ * ++ * Copyright (c) 2008 Cavium Networks ++ * ++ * This file is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, Version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or ++ * NONINFRINGEMENT. See the GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this file; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA or ++ * visit http://www.gnu.org/licenses/. ++ * ++ * This file may also be available under a different license from Cavium. ++ * Contact Cavium Networks for more information ++ * ++ ******************************************************************************/ ++ ++#ifndef _STAR_PCM_H_ ++#define _STAR_PCM_H_ ++ ++/****************************************************************************** ++ * MODULE NAME: star_pcm.h ++ * PROJECT CODE: Orion ++ * DESCRIPTION: ++ * MAINTAINER: MJLIU ++ * DATE: 15 September 2005 ++ * ++ * SOURCE CONTROL: ++ * ++ * LICENSE: ++ * This source code is copyright (c) 2005 Star Semi Inc. ++ * All rights reserved. ++ * ++ * REVISION HISTORY: ++ * 15 September 2005 - MJLIU - Initial Version v1.0 ++ * ++ * ++ * SOURCE: ++ * ISSUES: ++ * NOTES TO USERS: ++ ******************************************************************************/ ++ ++//#include ++ ++#define PCM_BASE_ADDR (CNS3XXX_SSP_BASE_VIRT) ++#define PCM_MEM_MAP_ADDR(reg_offset) (PCM_BASE_ADDR + reg_offset) ++#define PCM_MEM_MAP_VALUE(reg_offset) (*((u32 volatile *)PCM_MEM_MAP_ADDR(reg_offset))) ++ ++ ++/* ++ * define access macros ++ */ ++#define PCM_CONFIGURATION_0_REG PCM_MEM_MAP_VALUE(0x80) ++#define PCM_CONFIGURATION_1_REG PCM_MEM_MAP_VALUE(0x84) ++ ++#define PCM_CHANNEL_0_CONFIG_REG PCM_MEM_MAP_VALUE(0x88) ++#define PCM_CHANNEL_1_CONFIG_REG PCM_MEM_MAP_VALUE(0x8C) ++#define PCM_CHANNEL_2_CONFIG_REG PCM_MEM_MAP_VALUE(0x90) ++#define PCM_CHANNEL_3_CONFIG_REG PCM_MEM_MAP_VALUE(0x94) ++ ++#define PCM_TX_DATA_31_0_REG PCM_MEM_MAP_VALUE(0x98) ++#define PCM_TX_DATA_63_32_REG PCM_MEM_MAP_VALUE(0x9C) ++ ++#define PCM_RX_DATA_31_0_REG PCM_MEM_MAP_VALUE(0xA0) ++#define PCM_RX_DATA_63_32_REG PCM_MEM_MAP_VALUE(0xA4) ++ ++#define PCM_INTERRUPT_STATUS_REG PCM_MEM_MAP_VALUE(0xA8) ++#define PCM_INTERRUPT_ENABLE_REG PCM_MEM_MAP_VALUE(0xAC) ++ ++ ++ ++/* ++ * define constants macros ++ */ ++#define CH0_BIT_INDEX (0x1) ++#define CH1_BIT_INDEX (0x2) ++#define CH2_BIT_INDEX (0x4) ++#define CH3_BIT_INDEX (0x8) ++ ++#define PCM_RXBUF_FULL_FG (0x1) ++#define PCM_TXBUF_EMPTY_FG (0x2) ++#define PCM_RXBUF_OVERRUN_FG (0x4) ++#define PCM_TXBUF_UNDERRUN_FG (0x8) ++ ++#define PCM_ENABLE_FG (0x1 << 23) ++ ++#define PCM_IDL_MODE (0) ++#define PCM_GCI_MODE (1) ++ ++#define PCM_DATA_BIT_8 (0) ++#define PCM_DATA_BIT_16 (1) ++ ++ ++/* ++ * Set Commands Variables ++ */ ++#define Software_Reset (0x02) ++#define Hardware_Reset (0x04) ++#define Write_Transmit_Time_Slot (0x40) ++#define Read_Transmit_Time_Slot (0x41) ++#define Write_Receive_Time_Slot (0x42) ++#define Read_Receive_Time_Slot (0x43) ++#define Write_Tx_Rx_CLK_Slot_Tx_CLK_Edge (0x44) ++#define Read_Tx_Rx_CLK_Slot_Tx_CLK_Edge (0x45) ++#define Write_Device_Configure_Reg (0x46) ++#define Read_Device_Configure_Reg (0x47) ++#define Write_Channel_Enable_Operating_Mode_Reg (0x4A) ++#define Read_Channel_Enable_Operating_Mode_Reg (0x4B) ++#define Read_Signal_Reg (0x4D) ++#define Input_Data_Reg (0x52) ++#define Output_Data_Reg (0x53) ++#define Input_Direction_Reg (0x54) ++#define Output_Direction_Reg (0x55) ++#define Write_System_State (0x56) ++#define Read_System_State (0x57) ++#define Write_Operating_Functon (0x60) ++#define Read_Operating_Functon (0x61) ++#define Write_System_State_Config (0x68) ++#define Read_System_State_Config (0x69) ++#define Write_Interrupt_Mask_Reg (0x6C) ++#define Read_Interrupt_Mask_Reg (0x6D) ++#define Write_Operating_Condition (0x70) ++#define Write_Loop_Supervision_Parameter (0xC2) ++#define Write_DC_Feed_Parameter (0xC6) ++#define Write_Signal_A_B_Parameter (0xD2) ++#define Write_Switching_Reg_Parameter (0xE4) ++#define Write_Switching_Reg_Control (0xE6) ++ ++ ++/* ++ * define data structure ++ */ ++typedef struct _PCM_CHANNEL_OBJECT_ PCM_CHANNEL_OBJECT_T; ++ ++struct _PCM_CHANNEL_OBJECT_ ++{ ++ u16 channel_0_tx_data; ++ u16 channel_0_rx_data; ++ u32 channel_0_data_width; /* 0 : 8-bit, 1 : 16-bit */ ++ ++ u16 channel_1_tx_data; ++ u16 channel_1_rx_data; ++ u32 channel_1_data_width; ++ ++ u16 channel_2_tx_data; ++ u16 channel_2_rx_data; ++ u32 channel_2_data_width; ++ ++ u16 channel_3_tx_data; ++ u16 channel_3_rx_data; ++ u32 channel_3_data_width; ++ ++ u32 channel_enable_config; /* bit[0] = 0 : channel 0 disabled ++ [0] = 1 : channel 0 enabled ++ bit[1] = 0 : channel 1 disabled ++ [1] = 1 : channel 1 enabled ++ bit[2] = 0 : channel 2 disabled ++ [2] = 1 : channel 2 enabled ++ bit[3] = 0 : channel 3 disabled ++ [3] = 1 : channel 3 enabled */ ++}; ++ ++ ++typedef struct _PCM_OBJECT_ PCM_OBJECT_T; ++ ++struct _PCM_OBJECT_ ++{ ++ u32 config_0; ++ u32 config_1; ++ ++ u32 channel_0_config; ++ u32 channel_1_config; ++ u32 channel_2_config; ++ u32 channel_3_config; ++ ++ u32 interrupt_config; ++ ++ /* ++ * For interrupt setting ++ */ ++// INTC_OBJECT_T intc_obj; ++}; ++ ++ ++ ++/* ++ * function declarations ++ */ ++void Hal_Pcm_Initialize(PCM_OBJECT_T *); ++ ++ ++/* ++ * macro declarations ++ */ ++#define HAL_PCM_ENABLE_PCM() \ ++{ \ ++ (PCM_CONFIGURATION_0_REG) |= ((u32)0x1 << 31); \ ++} ++ ++#define HAL_PCM_DISABLE_PCM() \ ++{ \ ++ (PCM_CONFIGURATION_0_REG) &= ~((u32)0x1 << 31); \ ++} ++ ++#define HAL_PCM_ENABLE_DATA_SWAP() \ ++{ \ ++ (PCM_CONFIGURATION_0_REG) |= (0x1 << 24); \ ++} ++ ++#define HAL_PCM_DISABLE_DATA_SWAP() \ ++{ \ ++ (PCM_CONFIGURATION_0_REG) &= ~(0x1 << 24); \ ++} ++ ++#define HAL_PCM_WRITE_TX_DATA_0(tx_data_0) \ ++{ \ ++ (PCM_TX_DATA_31_0_REG) = tx_data_0; \ ++} ++ ++#define HAL_PCM_WRITE_TX_DATA_1(tx_data_1) \ ++{ \ ++ (PCM_TX_DATA_63_32_REG) = tx_data_1; \ ++} ++ ++#define HAL_PCM_READ_RX_DATA_0(rx_data_0) \ ++{ \ ++ (rx_data_0) = PCM_RX_DATA_31_0_REG; \ ++} ++ ++#define HAL_PCM_READ_RX_DATA_1(rx_data_1) \ ++{ \ ++ (rx_data_1) = PCM_RX_DATA_63_32_REG; \ ++} ++ ++#define HAL_PCM_READ_INTERRUPT_STATUS(status) \ ++{ \ ++ (status) = PCM_INTERRUPT_STATUS_REG; \ ++} ++ ++#define HAL_PCM_CLEAR_INTERRUPT_STATUS(status) \ ++{ \ ++ (PCM_INTERRUPT_STATUS_REG) = (status & 0xC); \ ++} ++ ++#define HAL_PCM_DISABLE_RECEIVE_BUFFER_FULL_INTERRUPT() \ ++{ \ ++ (PCM_INTERRUPT_ENABLE_REG) &= ~(0x1 << 0); \ ++} ++ ++#define HAL_PCM_DISABLE_TRANSMIT_BUFFER_EMPTY_INTERRUPT() \ ++{ \ ++ (PCM_INTERRUPT_ENABLE_REG) &= ~(0x1 << 1); \ ++} ++ ++#define HAL_PCM_DISABLE_RECEIVE_BUFFER_OVERRUN_INTERRUPT() \ ++{ \ ++ (PCM_INTERRUPT_ENABLE_REG) &= ~(0x1 << 2); \ ++} ++ ++#define HAL_PCM_DISABLE_TRANSMIT_BUFFER_UNDERRUN_INTERRUPT() \ ++{ \ ++ (PCM_INTERRUPT_ENABLE_REG) &= ~(0x1 << 3); \ ++} ++ ++#define HAL_PCM_DISABLE_ALL_INTERRUPT_SOURCES() \ ++{ \ ++ (PCM_INTERRUPT_ENABLE_REG) = 0; \ ++} ++ ++#endif // end of #ifndef _STAR_PCM_H_ ++ +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/include/mach/platform.h +@@ -0,0 +1,297 @@ ++/* ++ * arch/arm/mach-cns3xxx/include/mach/platform.h ++ * ++ * Copyright (c) 2008 Cavium Networks ++ * Copyright (c) ARM Limited 2003. All rights reserved. ++ * ++ * This file is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, Version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or ++ * NONINFRINGEMENT. See the GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this file; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA or ++ * visit http://www.gnu.org/licenses/. ++ * ++ * This file may also be available under a different license from Cavium. ++ * Contact Cavium Networks for more information ++ */ ++ ++#ifndef __ASM_ARCH_PLATFORM_H ++#define __ASM_ARCH_PLATFORM_H ++ ++#ifndef __ASSEMBLY__ ++ ++#include ++ ++/* ++ * SDRAM ++ */ ++#define CNS3XXX_SDRAM_BASE 0x00000000 ++ ++/* ------------------------------------------------------------------------ ++ * Cavium Networks Registers ++ * ------------------------------------------------------------------------ ++ * ++ */ ++#define CNS3XXX_SYS_ID_OFFSET 0x00 ++#define CNS3XXX_SYS_SW_OFFSET 0x04 ++#define CNS3XXX_SYS_LED_OFFSET 0x08 ++#define CNS3XXX_SYS_OSC0_OFFSET 0x0C ++ ++#define CNS3XXX_SYS_OSC1_OFFSET 0x10 ++#define CNS3XXX_SYS_OSC2_OFFSET 0x14 ++#define CNS3XXX_SYS_OSC3_OFFSET 0x18 ++#define CNS3XXX_SYS_OSC4_OFFSET 0x1C /* OSC1 for Cavium Networks/AB */ ++ ++#define CNS3XXX_SYS_LOCK_OFFSET 0x20 ++#define CNS3XXX_SYS_100HZ_OFFSET 0x24 ++#define CNS3XXX_SYS_CFGDATA1_OFFSET 0x28 ++#define CNS3XXX_SYS_CFGDATA2_OFFSET 0x2C ++#define CNS3XXX_SYS_FLAGS_OFFSET 0x30 ++#define CNS3XXX_SYS_FLAGSSET_OFFSET 0x30 ++#define CNS3XXX_SYS_FLAGSCLR_OFFSET 0x34 ++#define CNS3XXX_SYS_NVFLAGS_OFFSET 0x38 ++#define CNS3XXX_SYS_NVFLAGSSET_OFFSET 0x38 ++#define CNS3XXX_SYS_NVFLAGSCLR_OFFSET 0x3C ++#define CNS3XXX_SYS_RESETCTL_OFFSET 0x40 ++#define CNS3XXX_SYS_PCICTL_OFFSET 0x44 ++#define CNS3XXX_SYS_MCI_OFFSET 0x48 ++#define CNS3XXX_SYS_FLASH_OFFSET 0x4C ++#define CNS3XXX_SYS_CLCD_OFFSET 0x50 ++#define CNS3XXX_SYS_CLCDSER_OFFSET 0x54 ++#define CNS3XXX_SYS_BOOTCS_OFFSET 0x58 ++#define CNS3XXX_SYS_24MHz_OFFSET 0x5C ++#define CNS3XXX_SYS_MISC_OFFSET 0x60 ++#define CNS3XXX_SYS_IOSEL_OFFSET 0x70 ++#define CNS3XXX_SYS_PROCID_OFFSET 0x84 ++#define CNS3XXX_SYS_TEST_OSC0_OFFSET 0xC0 ++#define CNS3XXX_SYS_TEST_OSC1_OFFSET 0xC4 ++#define CNS3XXX_SYS_TEST_OSC2_OFFSET 0xC8 ++#define CNS3XXX_SYS_TEST_OSC3_OFFSET 0xCC ++#define CNS3XXX_SYS_TEST_OSC4_OFFSET 0xD0 ++ ++#define CNS3XXX_SYS_BASE 0x10000000 ++#define CNS3XXX_SYS_ID (CNS3XXX_SYS_BASE + CNS3XXX_SYS_ID_OFFSET) ++#define CNS3XXX_SYS_SW (CNS3XXX_SYS_BASE + CNS3XXX_SYS_SW_OFFSET) ++#define CNS3XXX_SYS_LED (CNS3XXX_SYS_BASE + CNS3XXX_SYS_LED_OFFSET) ++#define CNS3XXX_SYS_OSC0 (CNS3XXX_SYS_BASE + CNS3XXX_SYS_OSC0_OFFSET) ++#define CNS3XXX_SYS_OSC1 (CNS3XXX_SYS_BASE + CNS3XXX_SYS_OSC1_OFFSET) ++ ++#define CNS3XXX_SYS_LOCK (CNS3XXX_SYS_BASE + CNS3XXX_SYS_LOCK_OFFSET) ++#define CNS3XXX_SYS_100HZ (CNS3XXX_SYS_BASE + CNS3XXX_SYS_100HZ_OFFSET) ++#define CNS3XXX_SYS_CFGDATA1 (CNS3XXX_SYS_BASE + CNS3XXX_SYS_CFGDATA1_OFFSET) ++#define CNS3XXX_SYS_CFGDATA2 (CNS3XXX_SYS_BASE + CNS3XXX_SYS_CFGDATA2_OFFSET) ++#define CNS3XXX_SYS_FLAGS (CNS3XXX_SYS_BASE + CNS3XXX_SYS_FLAGS_OFFSET) ++#define CNS3XXX_SYS_FLAGSSET (CNS3XXX_SYS_BASE + CNS3XXX_SYS_FLAGSSET_OFFSET) ++#define CNS3XXX_SYS_FLAGSCLR (CNS3XXX_SYS_BASE + CNS3XXX_SYS_FLAGSCLR_OFFSET) ++#define CNS3XXX_SYS_NVFLAGS (CNS3XXX_SYS_BASE + CNS3XXX_SYS_NVFLAGS_OFFSET) ++#define CNS3XXX_SYS_NVFLAGSSET (CNS3XXX_SYS_BASE + CNS3XXX_SYS_NVFLAGSSET_OFFSET) ++#define CNS3XXX_SYS_NVFLAGSCLR (CNS3XXX_SYS_BASE + CNS3XXX_SYS_NVFLAGSCLR_OFFSET) ++#define CNS3XXX_SYS_RESETCTL (CNS3XXX_SYS_BASE + CNS3XXX_SYS_RESETCTL_OFFSET) ++#define CNS3XXX_SYS_PCICTL (CNS3XXX_SYS_BASE + CNS3XXX_SYS_PCICTL_OFFSET) ++#define CNS3XXX_SYS_MCI (CNS3XXX_SYS_BASE + CNS3XXX_SYS_MCI_OFFSET) ++#define CNS3XXX_SYS_FLASH (CNS3XXX_SYS_BASE + CNS3XXX_SYS_FLASH_OFFSET) ++#define CNS3XXX_SYS_CLCD (CNS3XXX_SYS_BASE + CNS3XXX_SYS_CLCD_OFFSET) ++#define CNS3XXX_SYS_CLCDSER (CNS3XXX_SYS_BASE + CNS3XXX_SYS_CLCDSER_OFFSET) ++#define CNS3XXX_SYS_BOOTCS (CNS3XXX_SYS_BASE + CNS3XXX_SYS_BOOTCS_OFFSET) ++#define CNS3XXX_SYS_24MHz (CNS3XXX_SYS_BASE + CNS3XXX_SYS_24MHz_OFFSET) ++#define CNS3XXX_SYS_MISC (CNS3XXX_SYS_BASE + CNS3XXX_SYS_MISC_OFFSET) ++#define CNS3XXX_SYS_IOSEL (CNS3XXX_SYS_BASE + CNS3XXX_SYS_IOSEL_OFFSET) ++#define CNS3XXX_SYS_PROCID (CNS3XXX_SYS_BASE + CNS3XXX_SYS_PROCID_OFFSET) ++#define CNS3XXX_SYS_TEST_OSC0 (CNS3XXX_SYS_BASE + CNS3XXX_SYS_TEST_OSC0_OFFSET) ++#define CNS3XXX_SYS_TEST_OSC1 (CNS3XXX_SYS_BASE + CNS3XXX_SYS_TEST_OSC1_OFFSET) ++#define CNS3XXX_SYS_TEST_OSC2 (CNS3XXX_SYS_BASE + CNS3XXX_SYS_TEST_OSC2_OFFSET) ++#define CNS3XXX_SYS_TEST_OSC3 (CNS3XXX_SYS_BASE + CNS3XXX_SYS_TEST_OSC3_OFFSET) ++#define CNS3XXX_SYS_TEST_OSC4 (CNS3XXX_SYS_BASE + CNS3XXX_SYS_TEST_OSC4_OFFSET) ++ ++/* ++ * Values for CNS3XXX_SYS_RESET_CTRL ++ */ ++#define CNS3XXX_SYS_CTRL_RESET_CONFIGCLR 0x01 ++#define CNS3XXX_SYS_CTRL_RESET_CONFIGINIT 0x02 ++#define CNS3XXX_SYS_CTRL_RESET_DLLRESET 0x03 ++#define CNS3XXX_SYS_CTRL_RESET_PLLRESET 0x04 ++#define CNS3XXX_SYS_CTRL_RESET_POR 0x05 ++#define CNS3XXX_SYS_CTRL_RESET_DoC 0x06 ++ ++#define CNS3XXX_SYS_CTRL_LED (1 << 0) ++ ++ ++/* ------------------------------------------------------------------------ ++ * Cavium Networks control registers ++ * ------------------------------------------------------------------------ ++ */ ++ ++/* ++ * CNS3XXX_IDFIELD ++ * ++ * 31:24 = manufacturer (0x41 = ARM) ++ * 23:16 = architecture (0x08 = AHB system bus, ASB processor bus) ++ * 15:12 = FPGA (0x3 = XVC600 or XVC600E) ++ * 11:4 = build value ++ * 3:0 = revision number (0x1 = rev B (AHB)) ++ */ ++ ++/* ++ * CNS3XXX_SYS_LOCK ++ * control access to SYS_OSCx, SYS_CFGDATAx, SYS_RESETCTL, ++ * SYS_CLD, SYS_BOOTCS ++ */ ++#define CNS3XXX_SYS_LOCK_LOCKED (1 << 16) ++#define CNS3XXX_SYS_LOCKVAL_MASK 0xFFFF /* write 0xA05F to enable write access */ ++ ++/* ++ * CNS3XXX_SYS_FLASH ++ */ ++#define CNS3XXX_FLASHPROG_FLVPPEN (1 << 0) /* Enable writing to flash */ ++ ++/* ++ * CNS3XXX_INTREG ++ * - used to acknowledge and control MMCI and UART interrupts ++ */ ++#define CNS3XXX_INTREG_WPROT 0x00 /* MMC protection status (no interrupt generated) */ ++#define CNS3XXX_INTREG_RI0 0x01 /* Ring indicator UART0 is asserted, */ ++#define CNS3XXX_INTREG_CARDIN 0x08 /* MMCI card in detect */ ++ /* write 1 to acknowledge and clear */ ++#define CNS3XXX_INTREG_RI1 0x02 /* Ring indicator UART1 is asserted, */ ++#define CNS3XXX_INTREG_CARDINSERT 0x03 /* Signal insertion of MMC card */ ++ ++/* ++ * Cavium Networks common peripheral addresses ++ */ ++#define CNS3XXX_SCTL_BASE 0x10001000 /* System controller */ ++ ++/* PCI space */ ++#define CNS3XXX_PCI_BASE 0x41000000 /* PCI Interface */ ++#define CNS3XXX_PCI_CFG_BASE 0x42000000 ++#define CNS3XXX_PCI_MEM_BASE0 0x44000000 ++#define CNS3XXX_PCI_MEM_BASE1 0x50000000 ++#define CNS3XXX_PCI_MEM_BASE2 0x60000000 ++/* Sizes of above maps */ ++#define CNS3XXX_PCI_BASE_SIZE 0x01000000 ++#define CNS3XXX_PCI_CFG_BASE_SIZE 0x02000000 ++#define CNS3XXX_PCI_MEM_BASE0_SIZE 0x0c000000 /* 32Mb */ ++#define CNS3XXX_PCI_MEM_BASE1_SIZE 0x10000000 /* 256Mb */ ++#define CNS3XXX_PCI_MEM_BASE2_SIZE 0x10000000 /* 256Mb */ ++ ++#define CNS3XXX_SDRAM67_BASE 0x70000000 /* SDRAM banks 6 and 7 */ ++#define CNS3XXX_LT_BASE 0x80000000 /* Logic Tile expansion */ ++ ++/* ++ * LED settings, bits [7:0] ++ */ ++#define CNS3XXX_SYS_LED0 (1 << 0) ++#define CNS3XXX_SYS_LED1 (1 << 1) ++#define CNS3XXX_SYS_LED2 (1 << 2) ++#define CNS3XXX_SYS_LED3 (1 << 3) ++#define CNS3XXX_SYS_LED4 (1 << 4) ++#define CNS3XXX_SYS_LED5 (1 << 5) ++#define CNS3XXX_SYS_LED6 (1 << 6) ++#define CNS3XXX_SYS_LED7 (1 << 7) ++ ++#define ALL_LEDS 0xFF ++ ++#define LED_BANK CNS3XXX_SYS_LED ++ ++/* ++ * Control registers ++ */ ++#define CNS3XXX_IDFIELD_OFFSET 0x0 /* Cavium Networks build information */ ++#define CNS3XXX_FLASHPROG_OFFSET 0x4 /* Flash devices */ ++#define CNS3XXX_INTREG_OFFSET 0x8 /* Interrupt control */ ++#define CNS3XXX_DECODE_OFFSET 0xC /* Fitted logic modules */ ++ ++/* ++ * System controller bit assignment ++ */ ++#define CNS3XXX_REFCLK 0 ++#define CNS3XXX_TIMCLK 1 ++ ++#define CNS3XXX_TIMER1_EnSel 15 ++#define CNS3XXX_TIMER2_EnSel 17 ++#define CNS3XXX_TIMER3_EnSel 19 ++#define CNS3XXX_TIMER4_EnSel 21 ++ ++ ++#define MAX_TIMER 2 ++#define MAX_PERIOD 699050 ++#define TICKS_PER_uSEC 1 ++ ++/* ++ * These are useconds NOT ticks. ++ * ++ */ ++#define mSEC_1 1000 ++#define mSEC_5 (mSEC_1 * 5) ++#define mSEC_10 (mSEC_1 * 10) ++#define mSEC_25 (mSEC_1 * 25) ++#define SEC_1 (mSEC_1 * 1000) ++ ++#define CNS3XXX_CSR_BASE 0x10000000 ++#define CNS3XXX_CSR_SIZE 0x10000000 ++ ++/* Platform Level Setup Functions */ ++ ++extern void cns3xxx_sys_init(void); ++extern int cns3xxx_pcie_init(u8 ports); ++ ++/* Information about built-in Ethernet MAC interfaces */ ++struct eth_plat_info { ++ u8 ports; /* Bitmap of enabled Ports */ ++ u8 eth0_hwaddr[6]; ++ u8 eth1_hwaddr[6]; ++ u8 eth2_hwaddr[6]; ++ u8 cpu_hwaddr[6]; ++}; ++ ++// Config 1 Bitmap ++#define ETH0_LOAD BIT(0) ++#define ETH1_LOAD BIT(1) ++#define ETH2_LOAD BIT(2) ++#define SATA0_LOAD BIT(3) ++#define SATA1_LOAD BIT(4) ++#define PCM_LOAD BIT(5) ++#define I2S_LOAD BIT(6) ++#define SPI0_LOAD BIT(7) ++#define SPI1_LOAD BIT(8) ++#define PCIe0_LOAD BIT(9) ++#define PCIe1_LOAD BIT(10) ++#define USB0_LOAD BIT(11) ++#define USB1_LOAD BIT(12) ++#define USB1_ROUTE BIT(13) ++#define SD_LOAD BIT(14) ++#define UART0_LOAD BIT(15) ++#define UART1_LOAD BIT(16) ++#define UART2_LOAD BIT(17) ++#define mPCI0_LOAD BIT(18) ++#define mPCI1_LOAD BIT(19) ++#define mPCI2_LOAD BIT(20) ++#define mPCI3_LOAD BIT(21) ++#define FP_BUT_LOAD BIT(22) ++#define FP_BUT_HEADER_LOAD BIT(23) ++#define FP_LED_LOAD BIT(24) ++#define FP_LED_HEADER_LOAD BIT(25) ++#define FP_TAMPER_LOAD BIT(26) ++#define HEADER_33v_LOAD BIT(27) ++#define SATA_POWER_LOAD BIT(28) ++#define FP_POWER_LOAD BIT(29) ++#define GPIO_HEADER_LOAD BIT(30) ++#define GSP_BAT_LOAD BIT(31) ++ ++// Config 2 Bitmap ++#define FAN_LOAD BIT(0) ++#define SPI_FLASH_LOAD BIT(1) ++#define NOR_FLASH_LOAD BIT(2) ++#define GPS_LOAD BIT(3) ++#define SUPPLY_5v_LOAD BIT(6) ++#define SUPPLY_33v_LOAD BIT(7) ++ ++ ++#endif /* __ASM_ARCH_PLATFORM_H */ ++#endif +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/include/mach/pm.h +@@ -0,0 +1,333 @@ ++/****************************************************************************** ++ * ++ * Copyright (c) 2008 Cavium Networks ++ * ++ * This file is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, Version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or ++ * NONINFRINGEMENT. See the GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this file; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA or ++ * visit http://www.gnu.org/licenses/. ++ * ++ * This file may also be available under a different license from Cavium. ++ * Contact Cavium Networks for more information ++ * ++ ******************************************************************************/ ++ ++#ifndef _CNS3XXX_PM_H_ ++#define _CNS3XXX_PM_H_ ++#include ++#define PMU_REG_VALUE(offset) (*((volatile unsigned int *)(CNS3XXX_PM_BASE_VIRT+offset))) ++ ++#define PM_CLK_GATE_REG PMU_REG_VALUE(0x000) ++#define PM_SOFT_RST_REG PMU_REG_VALUE(0x004) ++#define PM_HS_CFG_REG PMU_REG_VALUE(0x008) ++#define PM_CACTIVE_STA_REG PMU_REG_VALUE(0x00C) ++#define PM_PWR_STA_REG PMU_REG_VALUE(0x010) ++#define PM_CLK_CTRL_REG PMU_REG_VALUE(0x014) ++#define PM_PLL_LCD_I2S_CTRL_REG PMU_REG_VALUE(0x018) ++#define PM_PLL_HM_PD_CTRL_REG PMU_REG_VALUE(0x01C) ++#define PM_REGULAT_CTRL_REG PMU_REG_VALUE(0x020) ++#define PM_WDT_CTRL_REG PMU_REG_VALUE(0x024) ++#define PM_WU_CTRL0_REG PMU_REG_VALUE(0x028) ++#define PM_WU_CTRL1_REG PMU_REG_VALUE(0x02C) ++#define PM_CSR_REG PMU_REG_VALUE(0x030) ++ ++/* PM_CLK_GATE_REG */ ++#define PM_CLK_GATE_REG_OFFSET_SDIO (25) ++#define PM_CLK_GATE_REG_OFFSET_GPU (24) ++#define PM_CLK_GATE_REG_OFFSET_CIM (23) ++#define PM_CLK_GATE_REG_OFFSET_LCDC (22) ++#define PM_CLK_GATE_REG_OFFSET_I2S (21) ++#define PM_CLK_GATE_REG_OFFSET_RAID (20) ++#define PM_CLK_GATE_REG_OFFSET_SATA (19) ++#define PM_CLK_GATE_REG_OFFSET_PCIE0 (17) ++#define PM_CLK_GATE_REG_OFFSET_PCIE1 (18) ++#define PM_CLK_GATE_REG_OFFSET_USB_HOST (16) ++#define PM_CLK_GATE_REG_OFFSET_USB_OTG (15) ++#define PM_CLK_GATE_REG_OFFSET_TIMER (14) ++#define PM_CLK_GATE_REG_OFFSET_CRYPTO (13) ++#define PM_CLK_GATE_REG_OFFSET_HCIE (12) ++#define PM_CLK_GATE_REG_OFFSET_SWITCH (11) ++#define PM_CLK_GATE_REG_OFFSET_GPIO (10) ++#define PM_CLK_GATE_REG_OFFSET_UART3 (9) ++#define PM_CLK_GATE_REG_OFFSET_UART2 (8) ++#define PM_CLK_GATE_REG_OFFSET_UART1 (7) ++#define PM_CLK_GATE_REG_OFFSET_RTC (5) ++#define PM_CLK_GATE_REG_OFFSET_GDMA (4) ++#define PM_CLK_GATE_REG_OFFSET_SPI_PCM_I2C (3) ++#define PM_CLK_GATE_REG_OFFSET_SMC_NFI (1) ++#define PM_CLK_GATE_REG_MASK (0x03FFFFBA) ++ ++/* PM_SOFT_RST_REG */ ++#define PM_SOFT_RST_REG_OFFST_WARM_RST_FLAG (31) ++#define PM_SOFT_RST_REG_OFFST_CPU1 (29) ++#define PM_SOFT_RST_REG_OFFST_CPU0 (28) ++#define PM_SOFT_RST_REG_OFFST_SDIO (25) ++#define PM_SOFT_RST_REG_OFFST_GPU (24) ++#define PM_SOFT_RST_REG_OFFST_CIM (23) ++#define PM_SOFT_RST_REG_OFFST_LCDC (22) ++#define PM_SOFT_RST_REG_OFFST_I2S (21) ++#define PM_SOFT_RST_REG_OFFST_RAID (20) ++#define PM_SOFT_RST_REG_OFFST_SATA (19) ++#define PM_SOFT_RST_REG_OFFST_PCIE1 (18) ++#define PM_SOFT_RST_REG_OFFST_PCIE0 (17) ++#define PM_SOFT_RST_REG_OFFST_USB_HOST (16) ++#define PM_SOFT_RST_REG_OFFST_USB_OTG (15) ++#define PM_SOFT_RST_REG_OFFST_TIMER (14) ++#define PM_SOFT_RST_REG_OFFST_CRYPTO (13) ++#define PM_SOFT_RST_REG_OFFST_HCIE (12) ++#define PM_SOFT_RST_REG_OFFST_SWITCH (11) ++#define PM_SOFT_RST_REG_OFFST_GPIO (10) ++#define PM_SOFT_RST_REG_OFFST_UART3 (9) ++#define PM_SOFT_RST_REG_OFFST_UART2 (8) ++#define PM_SOFT_RST_REG_OFFST_UART1 (7) ++#define PM_SOFT_RST_REG_OFFST_RTC (5) ++#define PM_SOFT_RST_REG_OFFST_GDMA (4) ++#define PM_SOFT_RST_REG_OFFST_SPI_PCM_I2C (3) ++#define PM_SOFT_RST_REG_OFFST_DMC (2) ++#define PM_SOFT_RST_REG_OFFST_SMC_NFI (1) ++#define PM_SOFT_RST_REG_OFFST_GLOBAL (0) ++#define PM_SOFT_RST_REG_MASK (0xF3FFFFBF) ++ ++/* PMHS_CFG_REG */ ++#define PM_HS_CFG_REG_OFFSET_SDIO (25) ++#define PM_HS_CFG_REG_OFFSET_GPU (24) ++#define PM_HS_CFG_REG_OFFSET_CIM (23) ++#define PM_HS_CFG_REG_OFFSET_LCDC (22) ++#define PM_HS_CFG_REG_OFFSET_I2S (21) ++#define PM_HS_CFG_REG_OFFSET_RAID (20) ++#define PM_HS_CFG_REG_OFFSET_SATA (19) ++#define PM_HS_CFG_REG_OFFSET_PCIE1 (18) ++#define PM_HS_CFG_REG_OFFSET_PCIE0 (17) ++#define PM_HS_CFG_REG_OFFSET_USB_HOST (16) ++#define PM_HS_CFG_REG_OFFSET_USB_OTG (15) ++#define PM_HS_CFG_REG_OFFSET_TIMER (14) ++#define PM_HS_CFG_REG_OFFSET_CRYPTO (13) ++#define PM_HS_CFG_REG_OFFSET_HCIE (12) ++#define PM_HS_CFG_REG_OFFSET_SWITCH (11) ++#define PM_HS_CFG_REG_OFFSET_GPIO (10) ++#define PM_HS_CFG_REG_OFFSET_UART3 (9) ++#define PM_HS_CFG_REG_OFFSET_UART2 (8) ++#define PM_HS_CFG_REG_OFFSET_UART1 (7) ++#define PM_HS_CFG_REG_OFFSET_RTC (5) ++#define PM_HS_CFG_REG_OFFSET_GDMA (4) ++#define PM_HS_CFG_REG_OFFSET_SPI_PCM_I2S (3) ++#define PM_HS_CFG_REG_OFFSET_DMC (2) ++#define PM_HS_CFG_REG_OFFSET_SMC_NFI (1) ++#define PM_HS_CFG_REG_MASK (0x03FFFFBE) ++#define PM_HS_CFG_REG_MASK_SUPPORT (0x01100806) ++ ++/* PM_CACTIVE_STA_REG */ ++#define PM_CACTIVE_STA_REG_OFFSET_SDIO (25) ++#define PM_CACTIVE_STA_REG_OFFSET_GPU (24) ++#define PM_CACTIVE_STA_REG_OFFSET_CIM (23) ++#define PM_CACTIVE_STA_REG_OFFSET_LCDC (22) ++#define PM_CACTIVE_STA_REG_OFFSET_I2S (21) ++#define PM_CACTIVE_STA_REG_OFFSET_RAID (20) ++#define PM_CACTIVE_STA_REG_OFFSET_SATA (19) ++#define PM_CACTIVE_STA_REG_OFFSET_PCIE1 (18) ++#define PM_CACTIVE_STA_REG_OFFSET_PCIE0 (17) ++#define PM_CACTIVE_STA_REG_OFFSET_USB_HOST (16) ++#define PM_CACTIVE_STA_REG_OFFSET_USB_OTG (15) ++#define PM_CACTIVE_STA_REG_OFFSET_TIMER (14) ++#define PM_CACTIVE_STA_REG_OFFSET_CRYPTO (13) ++#define PM_CACTIVE_STA_REG_OFFSET_HCIE (12) ++#define PM_CACTIVE_STA_REG_OFFSET_SWITCH (11) ++#define PM_CACTIVE_STA_REG_OFFSET_GPIO (10) ++#define PM_CACTIVE_STA_REG_OFFSET_UART3 (9) ++#define PM_CACTIVE_STA_REG_OFFSET_UART2 (8) ++#define PM_CACTIVE_STA_REG_OFFSET_UART1 (7) ++#define PM_CACTIVE_STA_REG_OFFSET_RTC (5) ++#define PM_CACTIVE_STA_REG_OFFSET_GDMA (4) ++#define PM_CACTIVE_STA_REG_OFFSET_SPI_PCM_I2S (3) ++#define PM_CACTIVE_STA_REG_OFFSET_DMC (2) ++#define PM_CACTIVE_STA_REG_OFFSET_SMC_NFI (1) ++#define PM_CACTIVE_STA_REG_MASK (0x03FFFFBE) ++ ++/* PM_PWR_STA_REG */ ++#define PM_PWR_STA_REG_REG_OFFSET_SDIO (25) ++#define PM_PWR_STA_REG_REG_OFFSET_GPU (24) ++#define PM_PWR_STA_REG_REG_OFFSET_CIM (23) ++#define PM_PWR_STA_REG_REG_OFFSET_LCDC (22) ++#define PM_PWR_STA_REG_REG_OFFSET_I2S (21) ++#define PM_PWR_STA_REG_REG_OFFSET_RAID (20) ++#define PM_PWR_STA_REG_REG_OFFSET_SATA (19) ++#define PM_PWR_STA_REG_REG_OFFSET_PCIE1 (18) ++#define PM_PWR_STA_REG_REG_OFFSET_PCIE0 (17) ++#define PM_PWR_STA_REG_REG_OFFSET_USB_HOST (16) ++#define PM_PWR_STA_REG_REG_OFFSET_USB_OTG (15) ++#define PM_PWR_STA_REG_REG_OFFSET_TIMER (14) ++#define PM_PWR_STA_REG_REG_OFFSET_CRYPTO (13) ++#define PM_PWR_STA_REG_REG_OFFSET_HCIE (12) ++#define PM_PWR_STA_REG_REG_OFFSET_SWITCH (11) ++#define PM_PWR_STA_REG_REG_OFFSET_GPIO (10) ++#define PM_PWR_STA_REG_REG_OFFSET_UART3 (9) ++#define PM_PWR_STA_REG_REG_OFFSET_UART2 (8) ++#define PM_PWR_STA_REG_REG_OFFSET_UART1 (7) ++#define PM_PWR_STA_REG_REG_OFFSET_RTC (5) ++#define PM_PWR_STA_REG_REG_OFFSET_GDMA (4) ++#define PM_PWR_STA_REG_REG_OFFSET_SPI_PCM_I2S (3) ++#define PM_PWR_STA_REG_REG_OFFSET_DMC (2) ++#define PM_PWR_STA_REG_REG_OFFSET_SMC_NFI (1) ++#define PM_PWR_STA_REG_REG_MASK (0x03FFFFBE) ++ ++/* PM_CLK_CTRL_REG */ ++#define PM_CLK_CTRL_REG_OFFSET_I2S_MCLK (31) ++#define PM_CLK_CTRL_REG_OFFSET_DDR2_CHG_EN (30) ++#define PM_CLK_CTRL_REG_OFFSET_PCIE_REF1_EN (29) ++#define PM_CLK_CTRL_REG_OFFSET_PCIE_REF0_EN (28) ++#define PM_CLK_CTRL_REG_OFFSET_TIMER_SIM_MODE (27) ++#define PM_CLK_CTRL_REG_OFFSET_I2SCLK_DIV (24) ++#define PM_CLK_CTRL_REG_OFFSET_I2SCLK_SEL (22) ++#define PM_CLK_CTRL_REG_OFFSET_CLKOUT_DIV (20) ++#define PM_CLK_CTRL_REG_OFFSET_CLKOUT_SEL (16) ++#define PM_CLK_CTRL_REG_OFFSET_MDC_DIV (14) ++#define PM_CLK_CTRL_REG_OFFSET_CRYPTO_CLK_SEL (12) ++#define PM_CLK_CTRL_REG_OFFSET_CPU_PWR_MODE (9) ++#define PM_CLK_CTRL_REG_OFFSET_PLL_DDR2_SEL (7) ++#define PM_CLK_CTRL_REG_OFFSET_DIV_IMMEDIATE (6) ++#define PM_CLK_CTRL_REG_OFFSET_CPU_CLK_DIV (4) ++#define PM_CLK_CTRL_REG_OFFSET_PLL_CPU_SEL (0) ++ ++#define PM_CPU_CLK_DIV(DIV) { \ ++ PM_CLK_CTRL_REG &= ~((0x3) << PM_CLK_CTRL_REG_OFFSET_CPU_CLK_DIV); \ ++ PM_CLK_CTRL_REG |= (((DIV)&0x3) << PM_CLK_CTRL_REG_OFFSET_CPU_CLK_DIV); \ ++} ++ ++#define PM_PLL_CPU_SEL(CPU) { \ ++ PM_CLK_CTRL_REG &= ~((0xF) << PM_CLK_CTRL_REG_OFFSET_PLL_CPU_SEL); \ ++ PM_CLK_CTRL_REG |= (((CPU)&0xF) << PM_CLK_CTRL_REG_OFFSET_PLL_CPU_SEL); \ ++} ++ ++/* PM_PLL_LCD_I2S_CTRL_REG */ ++#define PM_PLL_LCD_I2S_CTRL_REG_OFFSET_MCLK_SMC_DIV (22) ++#define PM_PLL_LCD_I2S_CTRL_REG_OFFSET_R_SEL (17) ++#define PM_PLL_LCD_I2S_CTRL_REG_OFFSET_PLL_LCD_P (11) ++#define PM_PLL_LCD_I2S_CTRL_REG_OFFSET_PLL_LCD_M (3) ++#define PM_PLL_LCD_I2S_CTRL_REG_OFFSET_PLL_LCD_S (0) ++ ++/* PM_PLL_HM_PD_CTRL_REG */ ++/* ++#define PM_PLL_HM_PD_CTRL_REG_OFFSET_PCIE_PHY1 (13) ++#define PM_PLL_HM_PD_CTRL_REG_OFFSET_PCIE_PHY0 (12) ++*/ ++#define PM_PLL_HM_PD_CTRL_REG_OFFSET_SATA_PHY1 (11) ++#define PM_PLL_HM_PD_CTRL_REG_OFFSET_SATA_PHY0 (10) ++/* ++#define PM_PLL_HM_PD_CTRL_REG_OFFSET_USB_PHY1 (9) ++#define PM_PLL_HM_PD_CTRL_REG_OFFSET_USB_PHY0 (8) ++*/ ++#define PM_PLL_HM_PD_CTRL_REG_OFFSET_PLL_I2SCD (6) ++#define PM_PLL_HM_PD_CTRL_REG_OFFSET_PLL_I2S (5) ++#define PM_PLL_HM_PD_CTRL_REG_OFFSET_PLL_LCD (4) ++#define PM_PLL_HM_PD_CTRL_REG_OFFSET_PLL_USB (3) ++#define PM_PLL_HM_PD_CTRL_REG_OFFSET_PLL_RGMII (2) ++#define PM_PLL_HM_PD_CTRL_REG_MASK (0x00000C7C) ++ ++/* PM_REGULAT_CTRL_REG */ ++ ++/* PM_WDT_CTRL_REG */ ++#define PM_WDT_CTRL_REG_OFFSET_RESET_CPU_ONLY (0) ++ ++/* PM_WU_CTRL0_REG */ ++ ++/* PM_WU_CTRL1_REG */ ++ ++/* PM_CSR_REG - Clock Scaling Register*/ ++#define PM_CSR_REG_OFFSET_CSR_EN (30) ++#define PM_CSR_REG_OFFSET_CSR_NUM (0) ++ ++ ++#define CNS3XXX_PWR_CLK_EN(BLOCK) (0x1< ++ ++#define hard_smp_processor_id() \ ++ ({ \ ++ unsigned int cpunum; \ ++ __asm__("mrc p15, 0, %0, c0, c0, 5" \ ++ : "=r" (cpunum)); \ ++ cpunum &= 0x0F; \ ++ }) ++ ++/* ++ * We use IRQ1 as the IPI ++ */ ++static inline void smp_cross_call(const struct cpumask *mask) ++{ ++ gic_raise_softirq(mask, 2); ++} ++ ++static inline void smp_cross_call_cache(const struct cpumask *mask) ++{ ++ gic_raise_softirq(mask, 1); ++} ++ ++#endif +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/include/mach/system.h +@@ -0,0 +1,51 @@ ++/* ++ * arch/arm/mach-cns3xxx/include/mach/system.h ++ * ++ * Copyright (c) 2008 Cavium Networks ++ * Copyright (C) 2003 ARM Limited ++ * Copyright (C) 2000 Deep Blue Solutions Ltd ++ * ++ * This file is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, Version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or ++ * NONINFRINGEMENT. See the GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this file; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA or ++ * visit http://www.gnu.org/licenses/. ++ * ++ * This file may also be available under a different license from Cavium. ++ * Contact Cavium Networks for more information ++ */ ++#ifndef __ASM_ARCH_SYSTEM_H ++#define __ASM_ARCH_SYSTEM_H ++ ++#include ++#include ++#include ++#include ++ ++static inline void arch_idle(void) ++{ ++ /* ++ * This should do all the clock switching ++ * and wait for interrupt tricks ++ */ ++ cpu_do_idle(); ++} ++ ++static inline void arch_reset(char mode, const char *cmd) ++{ ++ /* ++ * To reset, we hit the on-board reset register ++ * in the system FPGA ++ */ ++ cns3xxx_pwr_soft_rst(CNS3XXX_PWR_SOFTWARE_RST(GLOBAL)); ++} ++ ++#endif +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/include/mach/timex.h +@@ -0,0 +1,27 @@ ++/* ++ * arch/arm/mach-cns3xxx/include/mach/timex.h ++ * ++ * Cavium Networks architecture timex specifications ++ * ++ * Copyright (c) 2008 Cavium Networks ++ * Copyright (C) 2003 ARM Limited ++ * ++ * This file is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, Version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or ++ * NONINFRINGEMENT. See the GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this file; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA or ++ * visit http://www.gnu.org/licenses/. ++ * ++ * This file may also be available under a different license from Cavium. ++ * Contact Cavium Networks for more information ++ */ ++ ++#define CLOCK_TICK_RATE (50000000 / 16) +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/include/mach/uncompress.h +@@ -0,0 +1,68 @@ ++/* ++ * arch/arm/mach-cns3xxx/include/mach/uncompress.h ++ * ++ * Copyright (c) 2008 Cavium Networks ++ * Copyright (C) 2003 ARM Limited ++ * ++ * This file is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, Version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or ++ * NONINFRINGEMENT. See the GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this file; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA or ++ * visit http://www.gnu.org/licenses/. ++ * ++ * This file may also be available under a different license from Cavium. ++ * Contact Cavium Networks for more information ++ */ ++ ++#include ++#include ++ ++#include ++ ++#define AMBA_UART_DR(base) (*(volatile unsigned char *)((base) + 0x00)) ++#define AMBA_UART_LCRH(base) (*(volatile unsigned char *)((base) + 0x2c)) ++#define AMBA_UART_CR(base) (*(volatile unsigned char *)((base) + 0x30)) ++#define AMBA_UART_FR(base) (*(volatile unsigned char *)((base) + 0x18)) ++ ++/* ++ * Return the UART base address ++ */ ++static inline unsigned long get_uart_base(void) ++{ ++ return CNS3XXX_UART0_BASE; ++} ++ ++/* ++ * This does not append a newline ++ */ ++static inline void putc(int c) ++{ ++ unsigned long base = get_uart_base(); ++ ++ while (AMBA_UART_FR(base) & (1 << 5)) ++ barrier(); ++ ++ AMBA_UART_DR(base) = c; ++} ++ ++static inline void flush(void) ++{ ++ unsigned long base = get_uart_base(); ++ ++ while (AMBA_UART_FR(base) & (1 << 3)) ++ barrier(); ++} ++ ++/* ++ * nothing to do ++ */ ++#define arch_decomp_setup() ++#define arch_decomp_wdog() +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/include/mach/vmalloc.h +@@ -0,0 +1,26 @@ ++/* ++ * arch/arm/mach-cns3xxx/include/mach/vmalloc.h ++ * ++ * Copyright (c) 2008 Cavium Networks ++ * Copyright (C) 2003 ARM Limited ++ * Copyright (C) 2000 Russell King. ++ * ++ * This file is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, Version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or ++ * NONINFRINGEMENT. See the GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this file; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA or ++ * visit http://www.gnu.org/licenses/. ++ * ++ * This file may also be available under a different license from Cavium. ++ * Contact Cavium Networks for more information ++ */ ++ ++#define VMALLOC_END (PAGE_OFFSET + 0x18000000) +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/Kconfig +@@ -0,0 +1,101 @@ ++menu "CNS3XXX platform type" ++ depends on ARCH_CNS3XXX ++ ++config MACH_GW2388 ++ bool "Support Gateworks Laguna Platform" ++ select ARM_GIC ++ help ++ Include support for the Cavium Networks CNS3XXX MPCore Platform Baseboard. ++ This is a platform with an on-board ARM11 MPCore and has support for USB, ++ USB-OTG, MMC/SD/SDIO and PCI-E, etc. ++ ++config CNS3XXX_PM_API ++ bool "Support for CNS3XXX Power Managemnet API" ++ depends on ARCH_CNS3XXX ++ default y ++ help ++ Enable support for the CNS3XXX Power Managemnet API. ++ ++config CNS3XXX_RAID ++ bool "Support for CNS3XXX RAID" ++ depends on ARCH_CNS3XXX ++ help ++ Enable RAID 4/5/6 Hardware accelartion in CNS3XXX. ++ If unsure, say N. ++ ++config CNS3XXX_DMAC ++ bool "Support for CNS3XXX DMAC" ++ depends on ARCH_CNS3XXX ++ help ++ Enable support for the CNS3XXX DMA controllers. ++ ++choice ++ prompt "PROM VERSTION" ++ default SILICON ++ help ++ Select the PROM interrupt ID mapping. ++config SILICON ++ bool "CNS3XXX_SILICON" ++ help ++ Temporary option. ++ Interrupt ++ ID Source Function Trigger Type ++ --- ------------- ------------- ---------------- ++ 32 clkscale_intr PMU rising edge ++ 33 sdio_intr SDIO high level ++ 34 l2cc_intr L2CC high level ++ 35 rtc_intr RTC high level ++ 36 i2s_intr I2S high level ++ 37 pcm_intr_n PCM high level ++ 38 spi_intr_n SPI high level ++ 39 i2c_intr_n I2C high level ++ 40 cim_intr CIM high level ++ 41 gpu_intr GPU high level ++ 42 lcd_intr LCD high level ++ 43 gpioa_intr GPIOA programmable ++ 44 gpiob_intr GPIOB programmable ++ 45 irda0_intr UART0 high level ++ 46 irda1_intr UART1 high level ++ 47 irda2_intr UART2 high level ++ 48 arm11_intr ARM11 high level ++ 49 swsta_intr PSE Status high level ++ 50 tstc_r0_intr PSE R0TxComplete rising edge ++ 51 fstc_r0_intr PSE R0RxComplete rising edge ++ 52 tsqe_r0_intr PSE R0QEmpty rising edge ++ 53 tsqe_r0_intr PSE R0QFull rising edge ++ 54 tstc_r1_intr PSE R1TxComplete rising edge ++ 55 fstc_r1_intr PSE R1RxComplete rising edge ++ 56 tsqe_r1_intr PSE R1QEmpty rising edge ++ 57 tsqe_r1_intr PSE R1QFull rising edge ++ 58 hnat_intr PPE high level ++ 59 crypto_intr CRYPTO high level ++ 60 hcie_intr HCIE rising edge ++ 61 pcie0_intr PCIE0 Device high level ++ 62 pcie1_intr PCIE1 Device high level ++ 63 usbotg_intr USB OTG high level ++ 64 ehci_intr USB EHCI high level ++ 65 sata_intr SATA high level ++ 66 raid_intr_n RAID high level ++ 67 smc_intr_n SMC high level ++ 68 dmac_abort_intr DMAC high level ++ 86:69 dmac_intr[17:0] DMAC high level ++ 87 pcie0_rc_intr PCIE0 RC high level ++ 88 pcie1_rc_intr PCIE1 RC high level ++ 89 timer1_intr TIMER 1 high level ++ 90 timer2_intr TIMER 2 high level ++ 91 ochi_intr_n USB OCHI high level ++ 92 timer3_intr TIMER 3 high level ++ 93 ext_intr0 Extrenal Pin programmable ++ 94 ext_intr1 Extrenal Pin programmable ++ 95 ext_intr2 Extrenal Pin programmable ++ ++endchoice ++ ++config CNS3XXX_GPU_ENVIRONMENT ++ bool "CNS3XXX GPU(GC300 2D Acceleration) Support" ++ default n ++ help ++ Say Y if you want to support 2D acceleration. ++ ++endmenu ++ +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/laguna-setup.c +@@ -0,0 +1,593 @@ ++/* ++ * linux/arch/arm/mach-cns3xxx/laguna.c ++ * ++ * Copyright (c) 2008 Cavium Networks ++ * Copyright (C) 2008 ARM Limited ++ * Copyright (C) 2000 Deep Blue Solutions Ltd ++ * ++ * This file is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, Version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or ++ * NONINFRINGEMENT. See the GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this file; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA or ++ * visit http://www.gnu.org/licenses/. ++ * ++ * This file may also be available under a different license from Cavium. ++ * Contact Cavium Networks for more information ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "core.h" ++ ++struct laguna_board_info { ++ char model[6]; ++ u32 config_bitmap; ++ u32 config2_bitmap; ++ u8 nor_flash_size; ++ u8 spi_flash_size; ++}; ++ ++static struct laguna_board_info laguna_info __initdata; ++ ++/* ++ * Cavium Networks ARM11 MPCore platform devices ++ */ ++ ++static struct mtd_partition laguna_norflash_partitions[] = { ++ /* Bootloader */ ++ { ++ .name = "bootloader", ++ .offset = 0, ++ .size = SZ_256K, ++ .mask_flags = MTD_WRITEABLE, /* force read-only */ ++ }, ++ /* Bootloader params */ ++ { ++ .name = "params", ++ .offset = SZ_256K, ++ .size = SZ_128K, ++ .mask_flags = 0, ++ }, ++ /* linux */ ++ { ++ .name = "linux", ++ .offset = SZ_256K + SZ_128K, ++ .size = SZ_2M, ++ .mask_flags = 0, ++ }, ++ /* Root FS */ ++ { ++ .name = "rootfs", ++ .offset = SZ_256K + SZ_128K + SZ_2M, ++ .size = SZ_16M - SZ_256K - SZ_128K - SZ_2M, ++ .mask_flags = 0, ++ } ++}; ++ ++static struct physmap_flash_data laguna_norflash_data = { ++ .width = 2, ++ .parts = laguna_norflash_partitions, ++ .nr_parts = ARRAY_SIZE(laguna_norflash_partitions), ++}; ++ ++static struct resource laguna_norflash_resource = { ++ .start = CNS3XXX_FLASH0_BASE, ++ .end = CNS3XXX_FLASH0_BASE + SZ_16M - 1, ++ .flags = IORESOURCE_MEM, ++}; ++ ++static struct platform_device laguna_norflash_device = { ++ .name = "physmap-flash", ++ .id = 0, ++ .dev = { ++ .platform_data = &laguna_norflash_data, ++ }, ++ .num_resources = 1, ++ .resource = &laguna_norflash_resource, ++}; ++ ++/* UART0 */ ++static struct resource laguna_uart_resources[] = { ++ { ++ .start = CNS3XXX_UART0_BASE, ++ .end = CNS3XXX_UART0_BASE + SZ_4K - 1, ++ .flags = IORESOURCE_MEM ++ },{ ++ .start = CNS3XXX_UART1_BASE, ++ .end = CNS3XXX_UART1_BASE + SZ_4K - 1, ++ .flags = IORESOURCE_MEM ++ },{ ++ .start = CNS3XXX_UART2_BASE, ++ .end = CNS3XXX_UART2_BASE + SZ_4K - 1, ++ .flags = IORESOURCE_MEM ++ }, ++}; ++ ++static struct plat_serial8250_port laguna_uart_data[] = { ++ { ++ .membase = (char*) (CNS3XXX_UART0_BASE_VIRT), ++ .mapbase = (CNS3XXX_UART0_BASE), ++ .irq = IRQ_CNS3XXX_UART0, ++ .iotype = UPIO_MEM, ++ .flags = UPF_BOOT_AUTOCONF | UPF_FIXED_TYPE | UPF_NO_TXEN_TEST, ++ .regshift = 2, ++ .uartclk = 24000000, ++ .type = PORT_16550A, ++ },{ ++ .membase = (char*) (CNS3XXX_UART1_BASE_VIRT), ++ .mapbase = (CNS3XXX_UART1_BASE), ++ .irq = IRQ_CNS3XXX_UART1, ++ .iotype = UPIO_MEM, ++ .flags = UPF_BOOT_AUTOCONF | UPF_FIXED_TYPE | UPF_NO_TXEN_TEST, ++ .regshift = 2, ++ .uartclk = 24000000, ++ .type = PORT_16550A, ++ },{ ++ .membase = (char*) (CNS3XXX_UART2_BASE_VIRT), ++ .mapbase = (CNS3XXX_UART2_BASE), ++ .irq = IRQ_CNS3XXX_UART2, ++ .iotype = UPIO_MEM, ++ .flags = UPF_BOOT_AUTOCONF | UPF_FIXED_TYPE | UPF_NO_TXEN_TEST, ++ .regshift = 2, ++ .uartclk = 24000000, ++ .type = PORT_16550A, ++ }, ++ { }, ++}; ++ ++static struct platform_device laguna_uart = { ++ .name = "serial8250", ++ .id = PLAT8250_DEV_PLATFORM, ++ .dev.platform_data = laguna_uart_data, ++ .num_resources = 3, ++ .resource = laguna_uart_resources ++}; ++ ++/* SDIO, MMC/SD */ ++static struct resource laguna_sdio_resource[] = { ++ { ++ .start = CNS3XXX_SDIO_BASE, ++ .end = CNS3XXX_SDIO_BASE + SZ_4K - 1, ++ .flags = IORESOURCE_MEM, ++ },{ ++ .start = IRQ_CNS3XXX_SDIO, ++ .end = IRQ_CNS3XXX_SDIO, ++ .flags = IORESOURCE_IRQ, ++ }, ++}; ++ ++struct cns3xxx_sdhci_platdata laguna_sdio_platform_data = { ++ .max_width = 4, ++ .host_caps = (MMC_CAP_4_BIT_DATA | MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED), ++}; ++ ++static u64 laguna_device_sdhci_dmamask = 0xffffffffUL; ++ ++static struct platform_device laguna_sdio_device = { ++ .name = "cns3xxx-sdhci", ++ .id = 0, ++ .num_resources = ARRAY_SIZE(laguna_sdio_resource), ++ .resource = laguna_sdio_resource, ++ .dev = { ++ .dma_mask = &laguna_device_sdhci_dmamask, ++ .coherent_dma_mask = 0xffffffffUL, ++ .platform_data = &laguna_sdio_platform_data, ++ } ++}; ++ ++static struct pca953x_platform_data laguna_pca_data = { ++ .gpio_base = 100, ++}; ++ ++static struct resource laguna_i2c_resource[] = { ++ { ++ .start = CNS3XXX_SSP_BASE + 0x20, ++ .end = 0x7100003f, ++ .flags = IORESOURCE_MEM, ++ },{ ++ .start = IRQ_CNS3XXX_I2C, ++ .flags = IORESOURCE_IRQ, ++ }, ++}; ++ ++static struct platform_device laguna_i2c_controller_device = { ++ .name = "cns3xxx-i2c", ++ .num_resources = 2, ++ .resource = laguna_i2c_resource, ++}; ++ ++static struct resource laguna_usb_ehci_resource[] = { ++ { ++ .start = CNS3XXX_USB_BASE, ++ .end = CNS3XXX_USB_BASE + SZ_16M - 1, ++ .flags = IORESOURCE_MEM, ++ },{ ++ .start = IRQ_CNS3XXX_USB_EHCI, ++ .flags = IORESOURCE_IRQ, ++ }, ++}; ++ ++static u64 laguna_usb_dma_mask = 0xffffffffULL; ++ ++static struct platform_device laguna_usb_ehci_device = { ++ .name = "cns3xxx-ehci", ++ .num_resources = ARRAY_SIZE(laguna_usb_ehci_resource), ++ .resource = laguna_usb_ehci_resource, ++ .dev = { ++ .dma_mask = &laguna_usb_dma_mask, ++ .coherent_dma_mask = 0xffffffffULL, ++ }, ++}; ++ ++static struct resource laguna_usb_ohci_resource[] = { ++ { ++ .start = CNS3XXX_USB_OHCI_BASE, ++ .end = CNS3XXX_USB_OHCI_BASE + SZ_16M - 1, ++ .flags = IORESOURCE_MEM, ++ },{ ++ .start = IRQ_CNS3XXX_USB_OHCI, ++ .flags = IORESOURCE_IRQ, ++ }, ++}; ++ ++static u64 laguna_usb_ohci_dma_mask = 0xffffffffULL; ++static struct platform_device laguna_usb_ohci_device = { ++ .name = "cns3xxx-ohci", ++ .dev = { ++ .dma_mask = &laguna_usb_ohci_dma_mask, ++ .coherent_dma_mask = 0xffffffffULL, ++ }, ++ .num_resources = 2, ++ .resource = laguna_usb_ohci_resource, ++}; ++ ++static u64 laguna_usbotg_dma_mask = 0xffffffffULL; ++static struct lm_device laguna_usb_otg_device = { ++ .dev = { ++ .dma_mask = &laguna_usbotg_dma_mask, ++ .coherent_dma_mask = 0xffffffffULL, ++ }, ++ .resource = { ++ .start = CNS3XXX_USBOTG_BASE, ++ .end = CNS3XXX_USBOTG_BASE + SZ_16M - 1, ++ .flags = IORESOURCE_MEM, ++ }, ++ .irq = IRQ_CNS3XXX_USB_OTG, ++}; ++ ++static struct resource laguna_ahci_resource[] = { ++ { ++ .start = CNS3XXX_SATA2_BASE, ++ .end = CNS3XXX_SATA2_BASE + CNS3XXX_SATA2_SIZE - 1, ++ .flags = IORESOURCE_MEM, ++ }, ++ { ++ .start = IRQ_CNS3XXX_SATA, ++ .end = IRQ_CNS3XXX_SATA, ++ .flags = IORESOURCE_IRQ, ++ }, ++}; ++ ++static u64 laguna_device_ahci_dmamask = 0xffffffffUL; ++ ++static struct platform_device laguna_ahci = { ++ .name = "cns3xxx_ahci", ++ .id = -1, ++ .dev = { ++ .dma_mask = &laguna_device_ahci_dmamask, ++ .coherent_dma_mask = 0xffffffffUL, ++ }, ++ .resource = laguna_ahci_resource, ++ .num_resources = ARRAY_SIZE(laguna_ahci_resource), ++}; ++ ++/* SPI Flash */ ++static struct mtd_partition laguna_spiflash_partitions[] = { ++ /* Bootloader */ ++ { ++ .name = "bootloader", ++ .offset = 0, ++ .size = SZ_128K, ++ }, ++ /* Bootloader params */ ++ { ++ .name = "params", ++ .offset = SZ_128K, ++ .size = SZ_128K, ++ }, ++ /* linux */ ++ { ++ .name = "linux", ++ .offset = SZ_256K, ++ .size = 0x180000, ++ .mask_flags = 0, ++ }, ++ /* FileSystem */ ++ { ++ .name = "rootfs", ++ .offset = SZ_256K + 0x180000, ++ .size = SZ_4M - SZ_256K - 0x180000, ++ } ++}; ++ ++static struct flash_platform_data laguna_spiflash_data = { ++ .parts = laguna_spiflash_partitions, ++ .nr_parts = ARRAY_SIZE(laguna_spiflash_partitions), ++}; ++ ++static struct spi_board_info __initdata laguna_spi_devices[] = { ++ { ++ .modalias = "m25p80", ++ .platform_data = &laguna_spiflash_data, ++ .max_speed_hz = 50000000, ++ .bus_num = 1, ++ .chip_select = 0, ++ }, ++}; ++ ++static struct platform_device laguna_spi_controller_device = { ++ .name = "cns3xxx_spi", ++}; ++ ++static struct gpio_led laguna_gpio_leds[] = { ++ { ++ .name = "user1", /* Green Led */ ++ .gpio = 115, ++ .active_low = 1, ++ }, ++ { ++ .name = "user2", /* Red Led */ ++ .gpio = 114, ++ .active_low = 1, ++ }, ++}; ++ ++static struct gpio_led_platform_data laguna_gpio_leds_data = { ++ .num_leds = 2, ++ .leds = laguna_gpio_leds, ++}; ++ ++static struct platform_device laguna_gpio_leds_device = { ++ .name = "leds-gpio", ++ .id = -1, ++ .dev.platform_data = &laguna_gpio_leds_data, ++}; ++ ++static struct eth_plat_info laguna_net_data = { ++ .ports = 3, // Bring Up both Eth port by Default ++}; ++ ++static struct platform_device laguna_net_device = { ++ .name = "cns3xxx-net", ++ .id = -1, ++ .dev.platform_data = &laguna_net_data, ++}; ++ ++static struct memory_accessor *at24_mem_acc; ++ ++static void at24_setup(struct memory_accessor *mem_acc, void *context) ++{ ++ char buf[8]; ++ ++ at24_mem_acc = mem_acc; ++ ++ /* Read MAC addresses */ ++ if (at24_mem_acc->read(at24_mem_acc, buf, 0x100, 6) == 6) ++ memcpy(&laguna_net_data.eth0_hwaddr, buf, ETH_ALEN); ++ if (at24_mem_acc->read(at24_mem_acc, buf, 0x106, 6) == 6) ++ memcpy(&laguna_net_data.eth1_hwaddr, buf, ETH_ALEN); ++ if (at24_mem_acc->read(at24_mem_acc, buf, 0x10C, 6) == 6) ++ memcpy(&laguna_net_data.eth2_hwaddr, buf, ETH_ALEN); ++ if (at24_mem_acc->read(at24_mem_acc, buf, 0x112, 6) == 6) ++ memcpy(&laguna_net_data.cpu_hwaddr, buf, ETH_ALEN); ++ ++ /* Read out Model Information */ ++ if (at24_mem_acc->read(at24_mem_acc, buf, 0x130, 16) == 16) ++ memcpy(&laguna_info.model, buf, 16); ++ if (at24_mem_acc->read(at24_mem_acc, buf, 0x140, 1) == 1) ++ memcpy(&laguna_info.nor_flash_size, buf, 1); ++ if (at24_mem_acc->read(at24_mem_acc, buf, 0x141, 1) == 1) ++ memcpy(&laguna_info.spi_flash_size, buf, 1); ++ if (at24_mem_acc->read(at24_mem_acc, buf, 0x142, 4) == 4) ++ memcpy(&laguna_info.config_bitmap, buf, 8); ++ if (at24_mem_acc->read(at24_mem_acc, buf, 0x146, 4) == 4) ++ memcpy(&laguna_info.config2_bitmap, buf, 8); ++}; ++ ++static struct at24_platform_data laguna_eeprom_info = { ++ .byte_len = 1024, ++ .page_size = 16, ++ .flags = AT24_FLAG_READONLY, ++ .setup = at24_setup, ++}; ++ ++static struct i2c_board_info __initdata laguna_i2c_devices[] = { ++ { ++ I2C_BOARD_INFO("pca9555", 0x23), ++ .platform_data = &laguna_pca_data, ++ }, ++ { ++ I2C_BOARD_INFO("gsp", 0x29), ++ }, ++ { ++ I2C_BOARD_INFO ("24c08",0x50), ++ .platform_data = &laguna_eeprom_info, ++ }, ++ { ++ I2C_BOARD_INFO("ds1672", 0x68), ++ }, ++}; ++ ++static void __init laguna_init(void) ++{ ++ cns3xxx_sys_init(); ++ ++ platform_device_register(&laguna_i2c_controller_device); ++ ++ i2c_register_board_info(0, laguna_i2c_devices, ARRAY_SIZE(laguna_i2c_devices)); ++ ++ pm_power_off = cns3xxx_power_off; ++} ++ ++static int __init laguna_model_setup(void) ++{ ++ if (!machine_is_gw2388()) ++ return 0; ++ ++ printk("Running on Gateworks Laguna %s\n", laguna_info.model); ++ ++ if (strncmp(laguna_info.model, "GW", 2) == 0) { ++ if (laguna_info.config_bitmap & ETH0_LOAD) ++ laguna_net_data.ports |= BIT(0); ++ if (laguna_info.config_bitmap & ETH1_LOAD) ++ laguna_net_data.ports |= BIT(1); ++ if (laguna_info.config_bitmap & ETH2_LOAD) ++ laguna_net_data.ports |= BIT(2); ++ if (laguna_net_data.ports) ++ platform_device_register(&laguna_net_device); ++ ++ if (laguna_info.config_bitmap & (SATA0_LOAD | SATA1_LOAD)) ++ platform_device_register(&laguna_ahci); ++ ++ if (laguna_info.config_bitmap & (PCIe0_LOAD)) ++ cns3xxx_pcie_init(1); ++ ++ if (laguna_info.config_bitmap & (PCIe1_LOAD)) ++ cns3xxx_pcie_init(2); ++ ++ if (laguna_info.config_bitmap & (USB0_LOAD)) ++ lm_device_register(&laguna_usb_otg_device); ++ ++ if (laguna_info.config_bitmap & (USB1_LOAD)) { ++ platform_device_register(&laguna_usb_ehci_device); ++ platform_device_register(&laguna_usb_ohci_device); ++ } ++ ++ if (laguna_info.config_bitmap & (SD_LOAD)) ++ platform_device_register(&laguna_sdio_device); ++ ++ if (laguna_info.config_bitmap & (UART0_LOAD)) ++ laguna_uart.num_resources = 1; ++ if (laguna_info.config_bitmap & (UART1_LOAD)) ++ laguna_uart.num_resources = 2; ++ if (laguna_info.config_bitmap & (UART2_LOAD)) ++ laguna_uart.num_resources = 3; ++ platform_device_register(&laguna_uart); ++ ++ if (laguna_info.config2_bitmap & (NOR_FLASH_LOAD)) { ++ switch (laguna_info.nor_flash_size) { ++ case 1: ++ laguna_norflash_partitions[3].size = SZ_8M - SZ_256K - SZ_128K - SZ_2M; ++ laguna_norflash_resource.end = CNS3XXX_FLASH0_BASE + SZ_8M - 1; ++ break; ++ case 2: ++ laguna_norflash_partitions[3].size = SZ_16M - SZ_256K - SZ_128K - SZ_2M; ++ laguna_norflash_resource.end = CNS3XXX_FLASH0_BASE + SZ_16M - 1; ++ break; ++ case 3: ++ laguna_norflash_partitions[3].size = SZ_32M - SZ_256K - SZ_128K - SZ_2M; ++ laguna_norflash_resource.end = CNS3XXX_FLASH0_BASE + SZ_32M - 1; ++ break; ++ case 4: ++ laguna_norflash_partitions[3].size = SZ_64M - SZ_256K - SZ_128K - SZ_2M; ++ laguna_norflash_resource.end = CNS3XXX_FLASH0_BASE + SZ_64M - 1; ++ break; ++ case 5: ++ laguna_norflash_partitions[3].size = SZ_128M - SZ_256K - SZ_128K - SZ_2M; ++ laguna_norflash_resource.end = CNS3XXX_FLASH0_BASE + SZ_128M - 1; ++ break; ++ } ++ platform_device_register(&laguna_norflash_device); ++ } ++ ++ if (laguna_info.config2_bitmap & (SPI_FLASH_LOAD)) { ++ switch (laguna_info.spi_flash_size) { ++ case 1: ++ laguna_spiflash_partitions[3].size = SZ_4M - SZ_256K - 0x180000; ++ break; ++ case 2: ++ laguna_spiflash_partitions[3].size = SZ_8M - SZ_256K - 0x180000; ++ break; ++ case 3: ++ laguna_spiflash_partitions[3].size = SZ_16M - SZ_256K - 0x180000; ++ break; ++ case 4: ++ laguna_spiflash_partitions[3].size = SZ_32M - SZ_256K - 0x180000; ++ break; ++ case 5: ++ laguna_spiflash_partitions[3].size = SZ_64M - SZ_256K - 0x180000; ++ break; ++ } ++ spi_register_board_info(laguna_spi_devices, ARRAY_SIZE(laguna_spi_devices)); ++ } ++ ++ if (laguna_info.config_bitmap & (SPI0_LOAD | SPI1_LOAD)) ++ { ++ platform_device_register(&laguna_spi_controller_device); ++ } ++ ++ /* ++ * Do any model specific setup not known by the bitmap by matching ++ * the first 6 characters of the model name ++ */ ++ ++ if (strncmp(laguna_info.model, "GW2388", 6) == 0) ++ { ++ platform_device_register(&laguna_gpio_leds_device); ++ } ++ } else { ++ // Do some defaults here, not sure what yet ++ } ++ ++ return 0; ++} ++late_initcall(laguna_model_setup); ++ ++MACHINE_START(GW2388, "Gateworks Laguna Platform") ++ .phys_io = CNS3XXX_UART0_BASE, ++ .io_pg_offst = (CNS3XXX_UART0_BASE_VIRT >> 18) & 0xfffc, ++ .boot_params = 0x00000100, ++ .map_io = cns3xxx_map_io, ++ .init_irq = cns3xxx_init_irq, ++ .timer = &cns3xxx_timer, ++ .init_machine = laguna_init, ++MACHINE_END +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/lm.c +@@ -0,0 +1,98 @@ ++/* ++ * linux/arch/arm/mach-integrator/lm.c ++ * ++ * Copyright (C) 2003 Deep Blue Solutions Ltd, All Rights Reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#define to_lm_device(d) container_of(d, struct lm_device, dev) ++#define to_lm_driver(d) container_of(d, struct lm_driver, drv) ++ ++static int lm_match(struct device *dev, struct device_driver *drv) ++{ ++ return 1; ++} ++ ++static int lm_bus_probe(struct device *dev) ++{ ++ struct lm_device *lmdev = to_lm_device(dev); ++ struct lm_driver *lmdrv = to_lm_driver(dev->driver); ++ ++ return lmdrv->probe(lmdev); ++} ++ ++static int lm_bus_remove(struct device *dev) ++{ ++ struct lm_device *lmdev = to_lm_device(dev); ++ struct lm_driver *lmdrv = to_lm_driver(dev->driver); ++ ++ if (lmdrv->remove) ++ lmdrv->remove(lmdev); ++ return 0; ++} ++ ++static struct bus_type lm_bustype = { ++ .name = "logicmodule", ++ .match = lm_match, ++ .probe = lm_bus_probe, ++ .remove = lm_bus_remove, ++}; ++ ++static int __init lm_init(void) ++{ ++ return bus_register(&lm_bustype); ++} ++ ++postcore_initcall(lm_init); ++ ++int lm_driver_register(struct lm_driver *drv) ++{ ++ drv->drv.bus = &lm_bustype; ++ return driver_register(&drv->drv); ++} ++ ++void lm_driver_unregister(struct lm_driver *drv) ++{ ++ driver_unregister(&drv->drv); ++} ++ ++static void lm_device_release(struct device *dev) ++{ ++ struct lm_device *d = to_lm_device(dev); ++ ++ kfree(d); ++} ++ ++int lm_device_register(struct lm_device *dev) ++{ ++ int ret; ++ ++ dev->dev.release = lm_device_release; ++ dev->dev.bus = &lm_bustype; ++ ++ ret = dev_set_name(&dev->dev, "lm%d", dev->id); ++ if (ret) ++ return ret; ++ dev->resource.name = dev_name(&dev->dev); ++ ++ ret = request_resource(&iomem_resource, &dev->resource); ++ if (ret == 0) { ++ ret = device_register(&dev->dev); ++ if (ret) ++ release_resource(&dev->resource); ++ } ++ return ret; ++} ++ ++EXPORT_SYMBOL(lm_driver_register); ++EXPORT_SYMBOL(lm_driver_unregister); +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/localtimer.c +@@ -0,0 +1,26 @@ ++/* ++ * linux/arch/arm/mach-cns3xxx/localtimer.c ++ * ++ * Copyright (C) 2002 ARM Ltd. ++ * All Rights Reserved ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++#include ++#include ++#include ++ ++#include ++#include ++#include ++ ++/* ++ * Setup the local clock events for a CPU. ++ */ ++void __cpuinit local_timer_setup(struct clock_event_device *evt) ++{ ++ evt->irq = IRQ_LOCALTIMER; ++ twd_timer_setup(evt); ++} +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/Makefile +@@ -0,0 +1,14 @@ ++# ++# Makefile for the linux kernel. ++# ++ ++obj-y := core.o lm.o ++obj-$(CONFIG_MACH_GW2388) += laguna-setup.o ++obj-$(CONFIG_SMP) += platsmp.o headsmp.o ++obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o ++obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o ++obj-$(CONFIG_PCIEPORTBUS) += pcie.o ++obj-$(CONFIG_CNS3XXX_RAID) += rdma.o ++obj-$(CONFIG_CNS3XXX_DMAC) += dmac.o ++obj-$(CONFIG_CNS3XXX_PM_API) += pm.o ++ +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/Makefile.boot +@@ -0,0 +1,4 @@ ++ zreladdr-y := 0x00008000 ++params_phys-y := 0x00000100 ++initrd_phys-y := 0x00C00000 ++kernel_phys-y := 0x00600000 +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/platsmp.c +@@ -0,0 +1,220 @@ ++/* ++ * linux/arch/arm/mach-cns3xxx/platsmp.c ++ * ++ * Copyright (c) 2008 Cavium Networks ++ * Copyright (C) 2002 ARM Ltd. ++ * All Rights Reserved ++ * ++ * This file is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, Version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or ++ * NONINFRINGEMENT. See the GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this file; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA or ++ * visit http://www.gnu.org/licenses/. ++ * ++ * This file may also be available under a different license from Cavium. ++ * Contact Cavium Networks for more information ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include "core.h" ++ ++extern void cns3xxx_secondary_startup(void); ++ ++/* ++ * control for which core is the next to come out of the secondary ++ * boot "holding pen" ++ */ ++volatile int __cpuinitdata pen_release = -1; ++ ++static void __iomem *scu_base_addr(void) ++{ ++ return (void __iomem *)(CNS3XXX_TC11MP_SCU_BASE_VIRT); ++} ++ ++static inline unsigned int get_core_count(void) ++{ ++ void __iomem *scu_base = scu_base_addr(); ++ if (scu_base) ++ return scu_get_core_count(scu_base); ++ return 1; ++} ++ ++static DEFINE_SPINLOCK(boot_lock); ++ ++void __cpuinit platform_secondary_init(unsigned int cpu) ++{ ++ trace_hardirqs_off(); ++ ++ /* ++ * if any interrupts are already enabled for the primary ++ * core (e.g. timer irq), then they will not have been enabled ++ * for us: do so ++ */ ++ gic_cpu_init(0, (void __iomem *)(CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT)); ++ set_interrupt_pri(1, 0); // set cache broadcast ipi to highest priority ++ ++ /* ++ * let the primary processor know we're out of the ++ * pen, then head off into the C entry point ++ */ ++ pen_release = -1; ++ smp_wmb(); ++ ++ /* ++ * Synchronise with the boot thread. ++ */ ++ spin_lock(&boot_lock); ++ spin_unlock(&boot_lock); ++} ++ ++int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) ++{ ++ unsigned long timeout; ++ ++ /* ++ * set synchronisation state between this boot processor ++ * and the secondary one ++ */ ++ spin_lock(&boot_lock); ++ ++ /* ++ * The secondary processor is waiting to be released from ++ * the holding pen - release it, then wait for it to flag ++ * that it has been released by resetting pen_release. ++ * ++ * Note that "pen_release" is the hardware CPU ID, whereas ++ * "cpu" is Linux's internal ID. ++ */ ++ pen_release = cpu; ++ flush_cache_all(); ++ ++ /* ++ * XXX ++ * ++ * This is a later addition to the booting protocol: the ++ * bootMonitor now puts secondary cores into WFI, so ++ * poke_milo() no longer gets the cores moving; we need ++ * to send a soft interrupt to wake the secondary core. ++ * Use smp_cross_call() for this, since there's little ++ * point duplicating the code here ++ */ ++ smp_cross_call(cpumask_of(cpu)); ++ ++ timeout = jiffies + (1 * HZ); ++ while (time_before(jiffies, timeout)) { ++ smp_rmb(); ++ if (pen_release == -1) ++ break; ++ ++ udelay(10); ++ } ++ ++ /* ++ * now the secondary core is starting up let it run its ++ * calibrations, then wait for it to finish ++ */ ++ spin_unlock(&boot_lock); ++ ++ return pen_release != -1 ? -ENOSYS : 0; ++} ++ ++static void __init poke_milo(void) ++{ ++ /* nobody is to be released from the pen yet */ ++ pen_release = -1; ++ ++ /* write the address of secondary startup into the general purpose register */ ++ __raw_writel(virt_to_phys(cns3xxx_secondary_startup), (void __iomem *)(0xFFF07000 + 0x0600)); ++ ++ mb(); ++} ++ ++/* ++ * Initialise the CPU possible map early - this describes the CPUs ++ * which may be present or become present in the system. ++ */ ++void __init smp_init_cpus(void) ++{ ++ unsigned int i, ncores = get_core_count(); ++ ++ for (i = 0; i < ncores; i++) ++ set_cpu_possible(i, true); ++} ++ ++void __init smp_prepare_cpus(unsigned int max_cpus) ++{ ++ unsigned int ncores = get_core_count(); ++ unsigned int cpu = smp_processor_id(); ++ int i; ++ ++ /* sanity check */ ++ if (ncores == 0) { ++ printk(KERN_ERR ++ "CNS3XXX: strange CM count of 0? Default to 1\n"); ++ ++ ncores = 1; ++ } ++ ++ if (ncores > NR_CPUS) { ++ printk(KERN_WARNING ++ "CNS3XXX: no. of cores (%d) greater than configured " ++ "maximum of %d - clipping\n", ++ ncores, NR_CPUS); ++ ncores = NR_CPUS; ++ } ++ ++ smp_store_cpu_info(cpu); ++ ++ /* ++ * are we trying to boot more cores than exist? ++ */ ++ if (max_cpus > ncores) ++ max_cpus = ncores; ++ ++ /* ++ * Initialise the present map, which describes the set of CPUs ++ * actually populated at the present time. ++ */ ++ for (i = 0; i < max_cpus; i++) ++ set_cpu_present(i, true); ++ ++ /* ++ * Initialise the SCU if there are more than one CPU and let ++ * them know where to start. Note that, on modern versions of ++ * MILO, the "poke" doesn't actually do anything until each ++ * individual core is sent a soft interrupt to get it out of ++ * WFI ++ */ ++ if (max_cpus > 1) { ++ /* ++ * Enable the local timer or broadcast device for the ++ * boot CPU, but only if we have more than one CPU. ++ */ ++ percpu_timer_setup(); ++ ++ scu_enable(scu_base_addr()); ++ poke_milo(); ++ } ++} +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/pm.c +@@ -0,0 +1,476 @@ ++/****************************************************************************** ++ * ++ * Copyright (c) 2008 Cavium Networks ++ * ++ * This file is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, Version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or ++ * NONINFRINGEMENT. See the GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this file; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA or ++ * visit http://www.gnu.org/licenses/. ++ * ++ * This file may also be available under a different license from Cavium. ++ * Contact Cavium Networks for more information ++ * ++ ******************************************************************************/ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* ++ * cns3xxx_pwr_clk_en - clock enable ++ * @block: bitmap for peripheral ++ */ ++void cns3xxx_pwr_clk_en(unsigned int block) ++{ ++ PM_CLK_GATE_REG |= (block&PM_CLK_GATE_REG_MASK); ++} ++ ++/* ++ * cns3xxx_pwr_soft_rst - software reset ++ * @block: bitmap for peripheral ++ */ ++void cns3xxx_pwr_soft_rst_force(unsigned int block) ++{ ++ /* bit 0, 28, 29 => program low to reset, ++ * the other else program low and then high ++ */ ++ if (block & 0x30000001) { ++ PM_SOFT_RST_REG &= ~(block&PM_SOFT_RST_REG_MASK); ++ } else { ++ PM_SOFT_RST_REG &= ~(block&PM_SOFT_RST_REG_MASK); ++ PM_SOFT_RST_REG |= (block&PM_SOFT_RST_REG_MASK); ++ } ++} ++ ++void cns3xxx_pwr_soft_rst(unsigned int block) ++{ ++ static unsigned int soft_reset = 0; ++ ++ if(soft_reset & block) { ++ //Because SPI/I2C/GPIO use the same block, just only reset once... ++ return; ++ } ++ else { ++ soft_reset |= block; ++ } ++ cns3xxx_pwr_soft_rst_force(block); ++} ++ ++/* ++ * void cns3xxx_pwr_lp_hs - lower power handshake ++ * @dev: bitmap for device ++ * ++ */ ++void cns3xxx_lp_hs(unsigned int dev) ++{ ++ ++ if (PM_HS_CFG_REG_MASK_SUPPORT & dev) { ++ PM_HS_CFG_REG |= dev; ++ ++ /* TODO: disable clock */ ++ } ++} ++ ++/* ++ * cns3xxx_pwr_mode - change CPU power mode ++ * @pwr_mode: CPU power mode ++ * CNS3XXX_PWR_CPU_MODE_DFS, CNS3XXX_PWR_CPU_MODE_IDLE ++ * CNS3XXX_PWR_CPU_MODE_HALT, CNS3XXX_PWR_CPU_MODE_DOZE ++ * CNS3XXX_PWR_CPU_MODE_SLEEP, CNS3XXX_PWR_CPU_MODE_HIBERNATE ++ */ ++static void cns3xxx_pwr_mode(unsigned int pwr_mode) ++{ ++ if (CNS3XXX_PWR_CPU_MODE_HIBERNATE < pwr_mode) { ++ return; ++ } ++ ++ PM_CLK_CTRL_REG &= ++ ~(0x7<> PM_CLK_CTRL_REG_OFFSET_PLL_CPU_SEL) &0xf; ++ old_div = (PM_CLK_CTRL_REG >> PM_CLK_CTRL_REG_OFFSET_CPU_CLK_DIV) & 0x3; ++ ++ if ((cpu_sel == old_cpu) ++ && (div_sel == old_div)) { ++ return; ++ } ++ ++ /* 1. Set PLL_CPU_SEL */ ++ PM_PLL_CPU_SEL(cpu_sel); ++ PM_CPU_CLK_DIV(div_sel); ++ ++ /* 2. Set in DFS mode */ ++ cns3xxx_pwr_mode(CNS3XXX_PWR_CPU_MODE_DFS); ++ ++ /* 3. disable all interrupt except interrupt ID-32 (clkscale_intr) */ ++ /* disable all interrupt */ ++ GIC_REG_VALUE(0x184) = 0xffffffff; ++ GIC_REG_VALUE(0x188) = 0xffffffff; ++ /* enable interrupt id 32*/ ++ GIC_REG_VALUE(0x104) = 0x00000001; ++ GIC_REG_VALUE(0x108) = 0x80000000; ++ ++ /* 4. Let CPU enter into WFI state */ ++ asm volatile( ++ "mov r0, #0\n" ++ "mcr p15, 0, r0, c7, c0, 4\n" ++ ); ++ ++ ++#if 0 ++ { ++ int i; ++ for (i=IRQ_CNS3XXX_PMU+1; i> 3); ++ *(volatile unsigned int *) (CNS3XXX_TIMER1_2_3_BASE_VIRT + TIMER1_AUTO_RELOAD_OFFSET) ++ = pclk/15*0x25000; ++ } ++ ++} ++ ++ ++/* ++ * clock_out_sel - select clock source to ClkOut pin ++ * This function just select pll_cpu to ClkOut pin, ++ * we can measure the ClkOut frequency to make sure whether pll_cpu is change ++ * ++ */ ++void clock_out_sel(void) ++{ ++ ++ int temp = PM_CLK_CTRL_REG; ++ //MISC_GPIOB_PIN_ENABLE_REG |= (0x1 << 26); /* Set GPIOB26 to ClkOut*/ ++ /* debug purpose, use ext intr 1 and 2 to generate interrupt */ ++ //MISC_GPIOB_PIN_ENABLE_REG |= (0x1 << 27); /* Set GPIOB27 to external interrupt 2*/ ++ //MISC_GPIOB_PIN_ENABLE_REG |= (0x1 << 28); /* Set GPIOB28 to external interrupt 1*/ ++ /* select ClkOut source as pll_cpu_clk and ClkOut divider is by 16 */ ++ temp &=~(0x3 << 20); ++ temp &=~(0xf << 16); ++ temp |= (0x3 << 20); ++ temp |= (0x1 << 16); ++ PM_CLK_CTRL_REG = temp; ++} ++ ++void cns3xxx_wfi(void) ++{ ++ mb(); ++ asm volatile( ++ "mov r0, #0\n" ++ "mcr p15, 0, r0, c7, c10, 4\n" ++ "mcr p15, 0, r0, c7, c0, 4\n" ++ ); ++} ++ ++/* ++ * cns3xxx_pwr_sleep - ++ */ ++void cns3xxx_pwr_sleep(void) ++{ ++ /* 1. Set in sleep mode ++ * 2. disable all functional block ++ * 3. make sure that all function block are in power off state ++ * 4. power down all PLL ++ * 5. Let CPU enter into WFI state ++ * 6. Wait PMU to change PLL_CPU and divider and wake up CPU ++ */ ++ int i, j, count = 0; ++ /* 1. Set in SLEEP mode */ ++ cns3xxx_pwr_mode(CNS3XXX_PWR_CPU_MODE_SLEEP); ++ ++ /* 2. disable all functional block */ ++ i = PM_CLK_GATE_REG; ++ PM_CLK_GATE_REG = 0x0; ++ ++ /* 3. make sure that all function block are in power off state */ ++ while (0x4 != PM_PWR_STA_REG) { ++ count++; ++ if (1000 == count) { ++ count = PM_PWR_STA_REG; ++ break; ++ } ++ }; ++ ++ /* 4. power down all PLL */ ++ j = PM_PLL_HM_PD_CTRL_REG; ++ PM_PLL_HM_PD_CTRL_REG = 0x00003FFC; ++ ++#if 0 ++ /* set DMC to low power hand shake */ ++ PM_HS_CFG_REG |= (0x1 << 2); ++ /* disable DMC */ ++ PM_CLK_GATE_REG &= ~(0x1<<2); ++#endif ++ ++ /* set wake up interrupt source, use ext_intr1 to wake up*/ ++ PM_WU_CTRL0_REG = 0x0; PM_WU_CTRL1_REG = 0x40000000; ++ //MISC_GPIOB_PIN_ENABLE_REG |= (0x1 << 27); ++ ++ /* 5. Let CPU enter into WFI state */ ++ GIC_REG_VALUE(0x104) = 0x1; /* enable clock scaling interrupt */ ++ printk("<0>enter WFI\n"); ++ cns3xxx_wfi(); ++ PM_CLK_GATE_REG = i; ++ PM_PLL_HM_PD_CTRL_REG = j; ++ printk("<0>leave WFI\n"); ++ GIC_REG_VALUE(0x104) = 0xffffffff; ++ GIC_REG_VALUE(0x108) = 0xffffffff; ++ cns3xxx_pwr_mode(CNS3XXX_PWR_CPU_MODE_DFS); ++} ++ ++/* ++ * cns3xxx_pwr_sleep_test - enter into sleep and won't be wake up ++ */ ++void cns3xxx_pwr_sleep_test(void) ++{ ++ int i, j, count = 0; ++ /* 1. Set in SLEEP mode */ ++ cns3xxx_pwr_mode(CNS3XXX_PWR_CPU_MODE_SLEEP); ++ ++ /* 2. disable all functional block */ ++ i = PM_CLK_GATE_REG; ++ PM_CLK_GATE_REG = 0x0; ++ ++ /* 3. make sure that all function block are in power off state */ ++ while (0x4 != PM_PWR_STA_REG) { ++ count++; ++ if (1000 == count) { ++ count = PM_PWR_STA_REG; ++ break; ++ } ++ }; ++ /* 4. power down all PLL */ ++ j = PM_PLL_HM_PD_CTRL_REG; ++ PM_PLL_HM_PD_CTRL_REG = 0x00003FFC; ++ ++ /* set wake up interrupt source, do nothing */ ++ PM_WU_CTRL0_REG = 0x0; PM_WU_CTRL1_REG = 0x00000000; ++ ++ /* 5. Let CPU enter into WFI state */ ++ GIC_REG_VALUE(0x104) = 0x1; /* enable clock scaling interrupt */ ++ printk("<0>enter WFI\n"); ++ cns3xxx_wfi(); ++ PM_CLK_GATE_REG = i; ++ PM_PLL_HM_PD_CTRL_REG = j; ++ printk("<0>leave WFI, count 0x%.8x\n", count); ++ GIC_REG_VALUE(0x104) = 0xffffffff; ++ GIC_REG_VALUE(0x108) = 0xffffffff; ++ cns3xxx_pwr_mode(CNS3XXX_PWR_CPU_MODE_DFS); ++} ++ ++/* ++ * cns3xxx_pwr_doze - ++ */ ++void cns3xxx_pwr_doze(void) ++{ ++ /* 1. Set in doze mode */ ++ cns3xxx_pwr_mode(CNS3XXX_PWR_CPU_MODE_DOZE); ++ ++ ++ /* set wake up interrupt source*/ ++ PM_WU_CTRL0_REG = 0x0; PM_WU_CTRL1_REG = 0x40000000; ++ //MISC_GPIOB_PIN_ENABLE_REG |= (0x1 << 27); ++ ++ /* 5. Let CPU enter into WFI state */ ++ GIC_REG_VALUE(0x104) = 0x1; /* enable clock scaling interrupt */ ++ printk("<0>enter WFI\n"); ++ cns3xxx_wfi(); ++ printk("<0>leave WFI\n"); ++ cns3xxx_pwr_mode(CNS3XXX_PWR_CPU_MODE_DFS); ++} ++ ++/* ++ * cns3xxx_pwr_idle - ++ * IDLE mode just turn off CPU clock. ++ * L2 cache, peripheral, PLL, external DRAM and chip power are still on ++ */ ++void cns3xxx_pwr_idle(void) ++{ ++ /* 1. Set in IDLE mode */ ++ cns3xxx_pwr_mode(CNS3XXX_PWR_CPU_MODE_IDLE); ++ ++#if 1 ++ /* disable all interrupt except interrupt ID-32 (clkscale_intr) ++ * ++ * CPU can be wake up by any interrupt here, ++ * we disable all interrupt is just for testing ++ */ ++ ++ /* disable all interrupt */ ++ GIC_REG_VALUE(0x184) = 0xffffffff; GIC_REG_VALUE(0x188) = 0xffffffff; ++ /* enable interrupt id 32*/ ++ GIC_REG_VALUE(0x104) = 0x00000001; GIC_REG_VALUE(0x108) = 0x00000000; ++#endif ++ ++ /* set wake up interrupt source*/ ++ PM_WU_CTRL0_REG = 0x0; PM_WU_CTRL1_REG = 0x40000000; ++ //MISC_GPIOB_PIN_ENABLE_REG |= (0x1 << 27); ++ ++ /* 5. Let CPU enter into WFI state */ ++ printk("<0>enter WFI\n"); ++ cns3xxx_wfi(); ++ printk("<0>leave WFI\n"); ++ cns3xxx_pwr_mode(CNS3XXX_PWR_CPU_MODE_DFS); ++ GIC_REG_VALUE(0x104) = 0xffffffff; ++ GIC_REG_VALUE(0x108) = 0xffffffff; ++} ++ ++/* ++ * cns3xxx_pwr_halt - ++ * HALT mode just turn off CPU and L2 cache clock. ++ * peripheral, PLL, external DRAM and chip power are still on ++ */ ++ ++void cns3xxx_pwr_halt(void) ++{ ++ /* 1. Set in HALT mode */ ++ cns3xxx_pwr_mode(CNS3XXX_PWR_CPU_MODE_HALT); ++ ++ /* ++ * CPU can be wake up by any interrupt here, ++ * for test, we disable all interrupt except ID-32 ++ */ ++ /* disable all interrupt */ ++ GIC_REG_VALUE(0x184) = 0xffffffff; GIC_REG_VALUE(0x188) = 0xffffffff; ++ /* enable interrupt id 32*/ ++ GIC_REG_VALUE(0x104) = 0x00000001; GIC_REG_VALUE(0x108) = 0x00000000; ++ ++ /* set wake up interrupt source to trigger clock scaling interrupt */ ++ PM_WU_CTRL0_REG = 0x0; PM_WU_CTRL1_REG = 0x40000000; ++ //MISC_GPIOB_PIN_ENABLE_REG |= (0x1 << 27); ++ ++ /* 5. Let CPU enter into WFI state */ ++ cns3xxx_wfi(); ++ cns3xxx_pwr_mode(CNS3XXX_PWR_CPU_MODE_DFS); ++ GIC_REG_VALUE(0x104) = 0xffffffff; ++ GIC_REG_VALUE(0x108) = 0xffffffff; ++} ++ ++/* ++ * cns3xxx_cpu_clock - return CPU/L2 clock ++ * aclk: cpu clock/2 ++ * hclk: cpu clock/4 ++ * pclk: cpu clock/8 ++ */ ++int cns3xxx_cpu_clock(void) ++{ ++#define CPU_BASE 300 ++ int cpu, cpu_sel, div_sel; ++ ++ cpu_sel = (PM_CLK_CTRL_REG >> PM_CLK_CTRL_REG_OFFSET_PLL_CPU_SEL) & 0xf; ++ div_sel = (PM_CLK_CTRL_REG >> PM_CLK_CTRL_REG_OFFSET_CPU_CLK_DIV) & 0x3; ++ ++ cpu = (CPU_BASE + ((cpu_sel/3) * 100) + ((cpu_sel %3) *33)) >> div_sel; ++ return cpu; ++} ++ ++static int __init cns3xxx_pmu_init(void) ++{ ++ return 0; ++} ++ ++ ++EXPORT_SYMBOL(cns3xxx_pwr_power_up); ++EXPORT_SYMBOL(cns3xxx_pwr_clk_en); ++EXPORT_SYMBOL(cns3xxx_pwr_soft_rst); ++EXPORT_SYMBOL(cns3xxx_pwr_soft_rst_force); ++EXPORT_SYMBOL(cns3xxx_cpu_clock); ++ ++module_init(cns3xxx_pmu_init); +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/rdma.c +@@ -0,0 +1,901 @@ ++/* ++ * rdma.c - CNS3XXX RAID-DMA h/w acceleration ++ * ++ * Revision History: arch/arm/mach-cns3xxx/ChangeLog.cns_raid.txt ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "rdma.h" ++#include ++ ++int rdma_verbose; ++u8 rdma_test_ptn[32] = {0}; ++unsigned int dma_timeout_jiffies; ++mempool_t *rdma_sg_pool = NULL; /* pool */ ++rdma_chan_t *dma = NULL; /* dma channel */ ++ ++static DEFINE_SPINLOCK(process_lock); ++ ++/* Debug Printk */ ++#define dprintk(x...) ((void)(rdma_verbose && printk(KERN_WARNING x))) ++#define dump_regs(x) \ ++do { \ ++ dprintk("pa:%08x sg:%08x bp:%08x fp:%08x st:%08x qp:%08x sz:%08x\n", \ ++ *((x)->cregs->para), \ ++ *((x)->cregs->sgad), \ ++ *((x)->cregs->back), \ ++ *((x)->cregs->frnt), \ ++ *((x)->cregs->stat), \ ++ *((x)->cregs->qpar), \ ++ *((x)->cregs->blsz)); \ ++} while (0) ++ ++ ++#define rdma_dmac_flush_range(start, bytes) \ ++ do { \ ++ dma_cache_maint(start, bytes, DMA_BIDIRECTIONAL); \ ++ } while (0); ++ ++#define rdma_dmac_inv_range(start, bytes) \ ++ do { \ ++ dma_cache_maint(start, bytes, DMA_FROM_DEVICE); \ ++ } while (0); ++ ++#define rdma_dmac_clean_range(start, bytes) \ ++ do { \ ++ dma_cache_maint(start, bytes, DMA_TO_DEVICE); \ ++ } while (0); ++ ++ ++ ++extern void *acs_mempool_alloc(mempool_t *pool); ++ ++/** ++ * rdma_timeout_handle ++ */ ++static void rdma_timeout_handle(rdma_chan_t *rdma) ++{ ++ printk("%s: timeout handling\n", __FUNCTION__); ++ spin_lock_irq(&process_lock); ++ ++ if (!list_empty(&rdma->process_q)) { ++ sg_t *sg_fin = list_entry(rdma->process_q.next, sg_t, lru); ++ list_del_init(&sg_fin->lru); ++ sg_fin->status = SG_STATUS_DONE; ++ } ++ ++ *(dma->cregs->para) = 0; ++ *(dma->cregs->back) = rdma->q_first_phys; ++ *(dma->cregs->frnt) = rdma->q_first_phys; ++ flush_cache_all(); ++ spin_unlock_irq(&process_lock); ++} ++ ++/** ++ * rdma_mempool_alloc - return a sg from pool ++ * @gfp_mask: gfp flag ++ * ++ * Return: ++ * sg table ++ */ ++static void *rdma_sg_mempool_alloc(unsigned int gfp_mask) ++{ ++ void *element; ++ int exception_timeout = 30; ++ ++repeat: ++ element = acs_mempool_alloc(rdma_sg_pool); ++ if (likely(element)) ++ return element; ++ ++ if (!(gfp_mask & __GFP_WAIT)) { ++ return NULL; ++ } else { ++ msleep(1000); ++ exception_timeout--; ++ WARN_ON(exception_timeout < 0); /* Thresh check, we should check or increase if any warning */ ++ goto repeat; ++ } ++} ++ ++#define rdma_mempool_create(pool, name, size, min_nr, alloc_fn, free_fn, privp) \ ++do { \ ++ printk("%s: pre-allocating %s: %d*%d=%d\n", \ ++ __FUNCTION__, (name), (min_nr), (size), (min_nr) * (size)); \ ++ pool = mempool_create((min_nr), (mempool_alloc_t *)(alloc_fn), free_fn, (privp)); \ ++ if (!pool) \ ++ goto abort; \ ++} while(0); ++ ++#define rdma_mempool_destroy(pool) \ ++do { \ ++ if (pool) \ ++ mempool_destroy(pool); \ ++} while(0); ++ ++#define rdma_kfree_obj(obj) \ ++do { \ ++ if (obj) \ ++ kfree(obj); \ ++} while(0); ++ ++/** ++ * rdma_sg_prealloc_fn - sg mempool pre-allocation callback ++ * @gfp_flags: GFP_ flags ++ * @data: private data, reserved ++ * ++ * Return: ++ * pre-alloc sg table ++ */ ++static void *rdma_sg_prealloc_fn(int gfp_flags, void *data) ++{ ++ sg_t *sg = NULL; ++ sg = kzalloc(sizeof(sg_t), gfp_flags); ++ INIT_LIST_HEAD(&sg->lru); ++ init_waitqueue_head(&sg->wait); ++ sg->status = SG_STATUS_FREE; ++ ++ /* Remove Debug Message */ ++#if 0 ++ printk("%s: pre-allocating sg=0x%p, phy=0x%p\n", ++ __FUNCTION__, (void *)sg, (void *)virt_to_phys(sg)); ++#endif ++ ++ WARN_ON(!sg); ++ return (void *)sg; ++} ++ ++/** ++ * rdma_sg_deconstruct_fn - sg mempool de-allocation callback ++ * @sg: sg elements ++ * @data: private data, reserved ++ */ ++static void rdma_sg_deconstruct_fn(void *sg, void *data) ++{ ++ if (sg) { ++ printk("%s: de-allocating sg=0x%p, phy=0x%p\n", ++ __FUNCTION__, (void *)sg, (void *)virt_to_phys(sg)); ++ kfree(sg); ++ } ++ return; ++} ++ ++ ++ ++/*-------------------------------------------------------- */ ++/** ++ * rdma_get_sg - alloc an SG ++ * @dma: dma chan ++ */ ++static sg_t *rdma_get_sg(rdma_chan_t *dma) ++{ ++ sg_t *sg = (sg_t *)rdma_sg_mempool_alloc(GFP_KERNEL); ++ ++ /* ++ * No need to zero rest of un-used SG entries; ++ * we detect the src+dst by parameter + sg, not by zero-valued sg. ++ */ ++ // memzero(&(sg->entry[0]), SG_ENTRY_BYTES); ++ ++ sg->status = SG_STATUS_ACQUIRED; ++ ++ return sg; ++} ++ ++ ++/** ++ * rdma_queue_sg - queue an SG, wait done and put it. ++ * @dma: dma chan ++ * @sg: sg ++ * @q_para: parameter ++ * @q_blsz: block size ++ * @q_sgad: SG Addr ++ * @sg_cnt: count of (src_cnt + dst_cnt) ++ */ ++#define QUEUE_MODE ++static void rdma_queue_sg(rdma_chan_t *rdma, sg_t *sg, u32 q_para, u32 q_blsz, u32 q_sgad, int sg_cnt) ++{ ++ cmdq_t *this_virt = NULL; ++ ++ spin_lock_irq(&process_lock); ++ ++ sg->status = SG_STATUS_SCHEDULED; ++ list_add_tail(&sg->lru, &rdma->process_q); ++ ++ dump_regs(rdma); ++ ++#ifdef QUEUE_MODE ++ /* Setup BP */ ++ this_virt = (cmdq_t *)(phys_to_virt(*(rdma->cregs->back))); ++ this_virt->parameter = q_para; ++ this_virt->block_size = q_blsz; ++ this_virt->sg_addr = q_sgad; ++ this_virt->reserved = 0; ++ dump_regs(rdma); ++ ++ /* FP++ */ ++ *(rdma->cregs->frnt) = *(rdma->cregs->frnt) + 16; ++ dump_regs(rdma); ++ ++ /* FIXME */ ++ { ++ void *sgp = (void *)sg; ++ void *cqp = (void *)this_virt; ++ ++ rdma_dmac_flush_range(sgp, (sg_cnt * sizeof(u64))); ++ rdma_dmac_flush_range(cqp, sizeof(cmdq_t)); ++ } ++ ++ /* Queue Enable */ ++ *(rdma->cregs->stat) = REG_STAT_CMD_QUEUE_ENABLE; ++ dump_regs(rdma); ++ ++#else ++ *(dma->cregs->blsz) = q_blsz; ++ *(rdma->cregs->sgad) = q_sgad; ++ *(rdma->cregs->para) = q_para; ++ dump_regs(rdma); ++#endif /* QUEUE_MODE */ ++ ++ spin_unlock_irq(&process_lock); ++ dump_regs(rdma); ++ ++ wait_event_timeout(sg->wait, ++ sg->status & (SG_STATUS_DONE | SG_STATUS_ERROR), ++ dma_timeout_jiffies); ++ dump_regs(rdma); ++ ++ /* timed out */ ++ if (unlikely(sg->status & SG_STATUS_SCHEDULED)) { ++ printk("%s: operation timeout\n", __FUNCTION__); ++ rdma_timeout_handle(rdma); ++ } ++ ++ sg->status = SG_STATUS_FREE; ++ mempool_free(sg, rdma_sg_pool); ++ return; ++} ++ ++ ++#define R6_RECOV_PD 1 ++#define R6_RECOV_DD 2 ++#define R6_RECOV_DQ 3 ++/** ++ * @src_no: source count ++ * @bytes: len in bytes ++ * @bh_ptr: srcs PA ++ * @w1_dst: pd: P, dd: DD1, qd: DD ++ * @w2_dst: pd: DD, dd: DD2, qd: Q ++ * @pd_dd_qd: failed layout to recover ++ * @w1_idx: idx of w1_dst ++ * @w2_idx: idx of w2_dst ++ * @src_idx: source index; utilize data index only. ++ * ++ * Desc: ++ * Recover P+DD / DD1+DD2 / DD+Q from bh_ptr ++ */ ++void do_cns_rdma_gfgen_pd_dd_dq(unsigned int src_no, unsigned int bytes, ++ void **bh_ptr, void *w1_dst, void *w2_dst, ++ int pd_dd_qd, unsigned int w1_idx, unsigned int w2_idx, ++ unsigned int *src_idx) ++{ ++ int i; ++ sg_t *sg = NULL; ++ u32 q_sgad, q_blsz, q_para; ++ ++ /* clean src/dst */ ++ for (i=0; i PD */ ++ for (i=0; i<(src_no - 1); i++) { ++ sg->entry[i] = (SG_ADDR_MASK & ((u64)virt_to_phys(bh_ptr[i]))) ++ | (SG_READ_IDX_MASK & ((u64)src_idx[i]) << SG_IDX_SHIFT) ++ | (RWI_RD_D); ++ } ++ sg->entry[src_no-1] = (SG_ADDR_MASK & ((u64)virt_to_phys(bh_ptr[i]))) ++ | (RWI_RD_Q); ++ ++ /* pd */ ++ sg->entry[src_no] = (SG_ADDR_MASK & ((u64)virt_to_phys(w1_dst))) | (RWI_W_P1); ++ sg->entry[src_no+1] = (SG_ADDR_MASK & ((u64)virt_to_phys(w2_dst))) | (RWI_W_D2); ++ ++ q_para = REG_PARA_ENABLE ++ | REG_PARA_XFER_END ++ | REG_PARA_CALC_P ++ | (REG_PARA_FAULTY_DISKS_CNT * 2) ++ | w2_idx * REG_PARA_FDISK_2_Q_IDX; ++ break; ++ ++ case R6_RECOV_DD: ++ /* dd...PQ -> DD */ ++ for (i=0; i<(src_no - 2); i++) { ++ sg->entry[i] = (SG_ADDR_MASK & ((u64)virt_to_phys(bh_ptr[i]))) ++ | (SG_READ_IDX_MASK & ((u64)src_idx[i]) << SG_IDX_SHIFT) ++ | (RWI_RD_D); ++ } ++ ++ sg->entry[src_no-2] = (SG_ADDR_MASK & ((u64)virt_to_phys(bh_ptr[i]))) ++ | (RWI_RD_P); ++ sg->entry[src_no-1] = (SG_ADDR_MASK & ((u64)virt_to_phys(bh_ptr[i+1]))) ++ | (RWI_RD_Q); ++ ++ /* dd */ ++ sg->entry[src_no] = (SG_ADDR_MASK & ((u64)virt_to_phys(w1_dst))) | (RWI_W_D1); ++ sg->entry[src_no+1] = (SG_ADDR_MASK & ((u64)virt_to_phys(w2_dst))) | (RWI_W_D2); ++ ++ q_para = REG_PARA_ENABLE ++ | REG_PARA_XFER_END ++ | REG_PARA_CALC_DATA ++ | (REG_PARA_FAULTY_DISKS_CNT * 2) ++ | w1_idx * REG_PARA_FDISK_1_P_IDX ++ | w2_idx * REG_PARA_FDISK_2_Q_IDX; ++ ++ break; ++ ++ case R6_RECOV_DQ: ++ /* dd...dP -> DQ */ ++ for (i=0; i<(src_no - 1); i++) { ++ sg->entry[i] = (SG_ADDR_MASK & ((u64)virt_to_phys(bh_ptr[i]))) ++ | (SG_READ_IDX_MASK & ((u64)src_idx[i]) << SG_IDX_SHIFT) ++ | (RWI_RD_D); ++ } ++ sg->entry[src_no-1] = (SG_ADDR_MASK & ((u64)virt_to_phys(bh_ptr[i]))) ++ | (RWI_RD_P); ++ ++ /* qd */ ++ sg->entry[src_no] = (SG_ADDR_MASK & ((u64)virt_to_phys(w1_dst))) | (RWI_W_D1); ++ sg->entry[src_no+1] = (SG_ADDR_MASK & ((u64)virt_to_phys(w2_dst))) | (RWI_W_Q2); ++ ++ q_para = REG_PARA_ENABLE ++ | REG_PARA_XFER_END ++ | REG_PARA_CALC_Q ++ | (REG_PARA_FAULTY_DISKS_CNT * 2) ++ | w1_idx * REG_PARA_FDISK_1_P_IDX; ++ break; ++ ++ default: ++ BUG(); ++ break; ++ ++ } ++ ++ q_sgad = virt_to_phys(&(sg->entry[0])); ++ q_blsz = bytes & REG_BLSZ_MASK; ++ ++ if (unlikely(rdma_verbose)) { ++ for (i=0; ientry[i]); ++ printk("set-SG::DST1ptr= 0x%016llx\n", sg->entry[src_no]); ++ printk("set-SG::DST2ptr= 0x%016llx\n", sg->entry[src_no+1]); ++ } ++ ++ /* Queue SG */ ++ rdma_queue_sg(dma, sg, q_para, q_blsz, q_sgad, (src_no + 2)); ++ ++ /* Invalidate dst */ ++ rdma_dmac_inv_range(w1_dst, bytes); ++ rdma_dmac_inv_range(w2_dst, bytes); ++ ++abort: ++ return; ++} ++ ++/** ++ * @src_no: source count ++ * @bytes: len in bytes ++ * @bh_ptr: srcs PA ++ * @p_dst: P dest PA ++ * @q_dst: Q dest PA ++ * ++ * Desc: ++ * p/q_dst = XOR/GFMUL(bh_ptr[0 ... src_no-1]), in Page Addr ++ */ ++void do_cns_rdma_gfgen(unsigned int src_no, unsigned int bytes, void **bh_ptr, ++ void *p_dst, void *q_dst) // u8 *gfmr ++{ ++ int i; ++ sg_t *sg = NULL; ++ u32 q_sgad, q_blsz, q_para; ++ ++ /* clean src/dst */ ++ for (i=0; ientry[i] = (SG_ADDR_MASK & ((u64)virt_to_phys(bh_ptr[i]))) ++ | (SG_READ_IDX_MASK & ((u64)i + 1) << SG_IDX_SHIFT) ++ | (RWI_RD_D); ++ } ++ ++ /* Setup SG::Write::P1 + Q2 */ ++ sg->entry[src_no] = (SG_ADDR_MASK & ((u64)virt_to_phys(p_dst))) | (RWI_W_P1); ++ sg->entry[src_no+1] = (SG_ADDR_MASK & ((u64)virt_to_phys(q_dst))) | (RWI_W_Q2); ++ ++ /* Setup SGAD, BLSZ, PARAMETER */ ++ q_sgad = virt_to_phys(&(sg->entry[0])); ++ q_blsz = bytes & REG_BLSZ_MASK; ++ q_para = REG_PARA_ENABLE ++ | REG_PARA_XFER_END ++ | REG_PARA_CALC_PQ ++ | (REG_PARA_FAULTY_DISKS_CNT * 2); ++ ++ if (unlikely(rdma_verbose)) { ++ for (i=0; ientry[i]); ++ printk("set-SG::DST1ptr= 0x%016llx\n", sg->entry[src_no]); ++ printk("set-SG::DST2ptr= 0x%016llx\n", sg->entry[src_no+1]); ++ } ++ ++ /* Queue SG */ ++ rdma_queue_sg(dma, sg, q_para, q_blsz, q_sgad, (src_no + 2)); ++ ++ /* Invalidate dst */ ++ rdma_dmac_inv_range(p_dst, bytes); ++ rdma_dmac_inv_range(q_dst, bytes); ++ ++abort: ++ return; ++} ++ ++/** ++ * @src_no: source count ++ * @bytes: len in bytes ++ * @bh_ptr: srcs PA ++ * @dst_ptr: dest PA ++ * ++ * Desc: ++ * dst_ptr = XOR(bh_ptr[0 ... src_no-1]), in Page Addr ++ */ ++void do_cns_rdma_xorgen(unsigned int src_no, unsigned int bytes, void **bh_ptr, void *dst_ptr) ++{ ++ int i; ++ sg_t *sg = NULL; ++ u32 q_sgad, q_blsz, q_para; ++ ++ /* clean src/dst */ ++ for (i=0; ientry[i] = (SG_ADDR_MASK & ((u64)virt_to_phys(bh_ptr[i]))) ++ | (SG_READ_IDX_MASK & ((u64)i + 1) << SG_IDX_SHIFT) ++ | (RWI_RD_D); ++ } ++ ++ /* Setup SG::Write::P1 */ ++ sg->entry[src_no] = (SG_ADDR_MASK & ((u64)virt_to_phys(dst_ptr))) ++ | (RWI_W_P1); ++ ++ /* Setup SGAD, BLSZ, PARAMETER */ ++ q_sgad = virt_to_phys(&(sg->entry[0])); ++ q_blsz = bytes & REG_BLSZ_MASK; ++ q_para = REG_PARA_ENABLE ++ | REG_PARA_XFER_END ++ | REG_PARA_CALC_P ++ | (REG_PARA_FAULTY_DISKS_CNT * 1); ++ ++ if (unlikely(rdma_verbose)) { ++ for (i=0; ientry[i]); ++ printk("set-SG::DST1ptr= 0x%016llx\n", sg->entry[src_no]); ++ } ++ ++ /* Queue SG */ ++ rdma_queue_sg(dma, sg, q_para, q_blsz, q_sgad, (src_no + 1)); ++ ++ /* Invalidate dst */ ++ rdma_dmac_inv_range(dst_ptr, bytes); ++ ++abort: ++ return; ++} ++ ++ ++/** ++ * rdma_isr - rdma isr ++ * @irq: irq# ++ * @dev_id: private data ++ */ ++static irqreturn_t rdma_isr(int irq, void *dev_id) ++{ ++ unsigned long flags; ++ rdma_chan_t *this_dma = (rdma_chan_t *)dev_id; ++ ++ /* Make sure the INT is for us */ ++ if (unlikely(dma != this_dma)) ++ { ++ printk(KERN_ERR "Unexpected Interrupt, irq=%d, dma=%p, dev_id=%p\n", irq, dma, dev_id); ++ return IRQ_NONE; ++ } ++ ++ dprintk("%s: pstat=0x%08x\n", __FUNCTION__, *(this_dma->cregs->stat)); ++ ++ spin_lock_irqsave(&process_lock, flags); ++ ++ /* clear */ ++ *(this_dma->cregs->stat) = REG_STAT_XFER_COMPLETE | REG_STAT_INTERRUPT_FLAG; ++ ++ if (!list_empty(&this_dma->process_q)) { ++ sg_t *sg_fin = list_entry(this_dma->process_q.next, sg_t, lru); ++ ++ BUG_ON(!(sg_fin->status & SG_STATUS_SCHEDULED)); ++ ++ list_del_init(&sg_fin->lru); ++ sg_fin->status = SG_STATUS_DONE; // TODO: slave/decoder error handling ++ ++ /* FP rewind */ ++ if (*(dma->cregs->frnt) == this_dma->q_last_phys) { ++ *(dma->cregs->back) = this_dma->q_first_phys; ++ *(dma->cregs->frnt) = this_dma->q_first_phys; ++ } ++ ++ wake_up(&sg_fin->wait); ++ } ++ spin_unlock_irqrestore(&process_lock, flags); ++ ++ return IRQ_HANDLED; ++} ++ ++/** ++ * test_show - show unit test result ++ */ ++static void test_show(void **src, unsigned int bytes, void *p, void *q, unsigned int src_cnt, int stage) ++{ ++ int i; ++ char *buf; ++ ++ for (i=0; i= (MAX_ENTRIES_PER_SG - 2)) ++ src_cnt = MAX_ENTRIES_PER_SG - 2; ++ ++ if (src_cnt < 2) ++ src_cnt = 2; ++ ++ if (bytes > 65536) ++ bytes = 65536; ++ ++ if (bytes < 4096) ++ bytes = 4096; ++ ++ for (i = 0; i < MAX_ENTRIES_PER_SG; i++) { ++ if (i < src_cnt) { ++ src_ptrs[i] = kmalloc(bytes, GFP_KERNEL); ++ } else { ++ src_ptrs[i] = NULL; ++ } ++ } ++ p_dst = kmalloc(bytes, GFP_KERNEL); ++ q_dst = kmalloc(bytes, GFP_KERNEL); ++ ++ printk("%s: ACTION=%d, src_cnt=%u, bytes=%u p/w1=0x%p, q/w2=0x%p\n", ++ __FUNCTION__, action, src_cnt, bytes, p_dst, q_dst); ++ ++ /* Shuffle the src and dst */ ++ for (i = 0; i < src_cnt; i++) { ++ if (rdma_test_ptn[0] == 0) { ++ memset(src_ptrs[i], (jiffies % 240)+1, bytes); ++ msleep(10 + 10 * i); ++ } else { ++ memset(src_ptrs[i], rdma_test_ptn[i], bytes); ++ } ++ } ++ memset(p_dst, 0xff, bytes); ++ memset(q_dst, 0xff, bytes); ++ ++ // flush_cache_all(); ++ test_show(src_ptrs, bytes, p_dst, q_dst, src_cnt, 1); ++ ++ switch (action) ++ { ++ /* P */ ++ case 1: ++ printk("\n%s: XORgen\n\n", __FUNCTION__); ++ do_cns_rdma_xorgen(src_cnt, bytes, src_ptrs, p_dst); ++ break; ++ ++ /* PQ */ ++ case 2: ++ printk("\n%s: PQgen\n\n", __FUNCTION__); ++ do_cns_rdma_gfgen(src_cnt, bytes, src_ptrs, p_dst, q_dst); ++ break; ++ ++ /* PD */ ++ case 3: ++ w1_idx = src_cnt + 1; ++ w2_idx = 1; ++ cnt = 0; ++ ++ printk("read_idx: "); ++ for (i=1; i<=(src_cnt+2); i++) ++ if (i != w1_idx && i != w2_idx) { ++ read_idx[cnt] = i; ++ printk("%d ", i); ++ cnt++; ++ } ++ printk("\n%s: PDgen w1/w2_idx=%u/%u\n\n", __FUNCTION__, w1_idx, w2_idx); ++ do_cns_rdma_gfgen_pd_dd_dq(src_cnt, bytes, src_ptrs, p_dst, q_dst, ++ R6_RECOV_PD, w1_idx, w2_idx, read_idx); ++ break; ++ ++ /* DD */ ++ case 4: ++ w1_idx = 1; ++ w2_idx = 2; ++ cnt = 0; ++ ++ printk("read_idx: "); ++ for (i=1; i<=(src_cnt+2); i++) ++ if (i != w1_idx && i != w2_idx) { ++ read_idx[cnt] = i; ++ printk("%d ", i); ++ cnt++; ++ } ++ printk("\n%s: DDgen w1/w2_idx=%u/%u\n\n", __FUNCTION__, w1_idx, w2_idx); ++ do_cns_rdma_gfgen_pd_dd_dq(src_cnt, bytes, src_ptrs, p_dst, q_dst, ++ R6_RECOV_DD, w1_idx, w2_idx, read_idx); ++ break; ++ ++ /* DQ */ ++ case 5: ++ w1_idx = 1; ++ w2_idx = src_cnt + 2; ++ cnt = 0; ++ ++ printk("read_idx: "); ++ for (i=1; i<=(src_cnt+2); i++) ++ if (i != w1_idx && i != w2_idx) { ++ read_idx[cnt] = i; ++ printk("%d ", i); ++ cnt++; ++ } ++ printk("\n%s: DQgen w1/w2_idx=%u/%u\n\n", __FUNCTION__, w1_idx, w2_idx); ++ do_cns_rdma_gfgen_pd_dd_dq(src_cnt, bytes, src_ptrs, p_dst, q_dst, ++ R6_RECOV_DQ, w1_idx, w2_idx, read_idx); ++ break; ++ ++ /* Verbose */ ++ case 9999: ++ rdma_verbose = (rdma_verbose == 1 ? 0 : 1); ++ printk("\n%s: Setup verbose mode => %d\n\n", __FUNCTION__, rdma_verbose); ++ break; ++ ++ /* ++ * SRC Pattern Assign ++ * e.g. 0x00000000 <-- do not assign ++ * e.g. 0xbbccddee <-- 4 src: bb cc dd ee ++ */ ++ default: ++ rdma_test_ptn[0] = (u8)(action >> 24 & 0x000000FF); ++ rdma_test_ptn[1] = (u8)(action >> 16 & 0x000000FF); ++ rdma_test_ptn[2] = (u8)(action >> 8 & 0x000000FF); ++ rdma_test_ptn[3] = (u8)(action & 0x000000FF); ++ ++ printk("\n%s: Setup src test pattern => 0x%02x %02x %02x %02x\n\n", __FUNCTION__, ++ rdma_test_ptn[0], ++ rdma_test_ptn[1], ++ rdma_test_ptn[2], ++ rdma_test_ptn[3]); ++ break; ++ } ++ ++ // flush_cache_all(); ++ test_show(src_ptrs, bytes, p_dst, q_dst, src_cnt, 2); ++ ++ for (i = 0; i < MAX_ENTRIES_PER_SG; i++) { ++ rdma_kfree_obj(src_ptrs[i]); ++ } ++ rdma_kfree_obj(p_dst); ++ rdma_kfree_obj(q_dst); ++ ++} ++ ++void cns_rdma_hw_init(void){ ++ ++ cns3xxx_pwr_clk_en(0x1 << PM_CLK_GATE_REG_OFFSET_RAID); ++ cns3xxx_pwr_soft_rst(0x1 << PM_SOFT_RST_REG_OFFST_RAID); ++} ++ ++/** ++ * cns_rdma_init - module init ++ */ ++int __init cns_rdma_init(void) ++{ ++ int err = 0; ++ ++ printk("%s: start\n", __FUNCTION__); ++ ++ cns_rdma_hw_init(); ++ ++ rdma_test_ptn[0] = 0; ++ rdma_verbose = 0; ++ dma_timeout_jiffies = HZ; ++ ++ /* DMA chan */ ++ dma = (rdma_chan_t *) kzalloc(sizeof(rdma_chan_t), GFP_KERNEL); ++ if (dma == NULL) ++ goto abort; ++ ++ INIT_LIST_HEAD(&(dma->process_q)); ++ ++ //static DEFINE_SPINLOCK(dma->process_lock); ++ dma->irq = IRQ_CNS3XXX_RAID; ++ dma->irq_str = "CNS3XXX RAID acceleration"; ++ dma->cregs = NULL; ++ dma->q_virt = NULL; ++ ++ /* control register */ ++ dma->cregs = (struct ctrl_regs *) kzalloc(sizeof(struct ctrl_regs) + GENERIC_ALIGN, GFP_KERNEL); ++ dma->cregs = (struct ctrl_regs *) (((u32) dma->cregs & GENERIC_ALIGN_MASK) + GENERIC_ALIGN); ++ ++ if (dma->cregs == NULL) ++ goto abort; ++ ++ printk("%s: reg1: virt=0x%p\n", ++ __FUNCTION__, (void *)dma->cregs); ++ ++ dma->cregs->para = RDMA_REGS_VIRT(REG_PARA_OFFSET); ++ dma->cregs->blsz = RDMA_REGS_VIRT(REG_BLSZ_OFFSET); ++ dma->cregs->sgad = RDMA_REGS_VIRT(REG_SGAD_OFFSET); ++ dma->cregs->stat = RDMA_REGS_VIRT(REG_STAT_OFFSET); ++ dma->cregs->frnt = RDMA_REGS_VIRT(REG_FRNT_OFFSET); ++ dma->cregs->back = RDMA_REGS_VIRT(REG_BACK_OFFSET); ++ dma->cregs->qpar = RDMA_REGS_VIRT(REG_QPAR_OFFSET); ++ ++ /* Pre-allocate S/G table */ ++ rdma_mempool_create(rdma_sg_pool, "rdma_sg", sizeof(sg_t), ++ MAX_SG, rdma_sg_prealloc_fn, rdma_sg_deconstruct_fn, NULL); ++ ++ /* Pre-allocate Queue Cmds */ ++ dma->q_virt = (cmdq_t *) kzalloc(sizeof(cmdq_t) * CURR_Q_DEPTH + CURR_Q_DEPTH_ALIGN, GFP_KERNEL); ++ dma->q_virt = (cmdq_t *) (((u32) dma->q_virt & CURR_Q_DEPTH_ALIGN_MASK) + CURR_Q_DEPTH_ALIGN); ++ ++ if (dma->q_virt == NULL) ++ goto abort; ++ ++ dma->q_first_phys = virt_to_phys((void *)dma->q_virt); ++ dma->q_last_phys = dma->q_first_phys + sizeof(cmdq_t) * (CURR_Q_DEPTH - 1); ++ ++ printk("%s: q1: virt=0x%p, phy=0x%x -> 0x%x\n", ++ __FUNCTION__, (void *)dma->q_virt, dma->q_first_phys, dma->q_last_phys); ++ ++ *(dma->cregs->qpar) = REG_QPAR_DEPTH_32; ++ *(dma->cregs->back) = dma->q_first_phys; ++ *(dma->cregs->frnt) = dma->q_first_phys; ++ ++ /* Register IRQ */ ++ err = request_irq(dma->irq, rdma_isr, 0, dma->irq_str, dma); ++ if (err) { ++ printk("%s: request irq failed\n", __FUNCTION__); ++ goto abort; ++ } ++ ++ /* Clear 31 & 0 */ ++ *(dma->cregs->stat) = REG_STAT_INTERRUPT_FLAG; ++ ++ err = 0; ++ goto done; ++ ++abort: ++ rdma_mempool_destroy(rdma_sg_pool); ++ rdma_kfree_obj(dma->cregs); ++ rdma_kfree_obj(dma); ++ ++ ++done: ++ printk("%s: done, err=%d\n", __FUNCTION__, err); ++ return err; ++} ++ ++/** ++ * cns_rdma_exit - module exit ++ */ ++void cns_rdma_exit(void) ++{ ++ printk("%s: start\n", __FUNCTION__); ++ ++ rdma_mempool_destroy(rdma_sg_pool); ++ rdma_kfree_obj(dma->cregs); ++ rdma_kfree_obj(dma); ++ printk("%s: done\n", __FUNCTION__); ++} ++ ++//module_init(cns_rdma_init); ++//module_exit(cns_rdma_exit); +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/rdma.h +@@ -0,0 +1,178 @@ ++/* ++ * rdma.h - CNS3xxx hardware RAID acceleration ++ */ ++#ifndef _CNS3XXX_RDMA_H_ ++#define _CNS3XXX_RDMA_H_ ++ ++#include ++ ++#define RDMA_REGS_PHYS(x) ((u32)(CNS3XXX_RAID_BASE + (x))) ++#define RDMA_REGS_VIRT(x) ((u32 volatile *)(CNS3XXX_RAID_BASE_VIRT + (x))) ++#define RDMA_REGS_VALUE(x) (*((u32 volatile *)(CNS3XXX_RAID_BASE_VIRT + (x)))) ++ ++ ++#define GENERIC_ALIGN 0x8 /* 64-bits */ ++#define GENERIC_ALIGN_MASK 0xFFFFFFF8UL ++#define QUEUE_DEPTH_ALIGN_MUL 0x10 /* 16 bytes; ALIGNMENT == QDEPTH * 16 bytes */ ++ ++ ++/* Register Offset */ ++#define REG_PARA_OFFSET 0x00UL /* Parameter */ ++#define REG_BLSZ_OFFSET 0x04UL /* Block Size */ ++#define REG_SGAD_OFFSET 0x08UL /* SG Address */ ++#define REG_STAT_OFFSET 0x0CUL /* Status */ ++#define REG_FRNT_OFFSET 0x10UL /* FP */ ++#define REG_BACK_OFFSET 0x14UL /* BP */ ++#define REG_QPAR_OFFSET 0x18UL /* Queue Parameter */ ++ ++ ++/* 0x00: PARA */ ++#define REG_PARA_ENABLE 0x80000000UL /* 31 */ ++#define REG_PARA_XFER_END 0x40000000UL /* 30 */ ++#define REG_PARA_MEMORY_WR_DISABLE 0x20000000UL /* 29 */ ++#define REG_PARA_FAULTY_DISKS_CNT 0x08000000UL /* 28:27 */ ++ ++#define REG_PARA_CALC 0x01000000UL /* 26:24 */ ++ #define REG_PARA_CALC_DATA 0x00000000UL ++ #define REG_PARA_CALC_P 0x01000000UL ++ #define REG_PARA_CALC_Q 0x02000000UL ++ #define REG_PARA_CALC_R 0x04000000UL ++ #define REG_PARA_CALC_PQ 0x03000000UL ++ #define REG_PARA_CALC_PR 0x05000000UL ++ #define REG_PARA_CALC_QR 0x06000000UL ++ #define REG_PARA_CALC_PQR 0x07000000UL ++ ++#define REG_PARA_FDISK_3_R_IDX 0x00010000UL /* 23:16 */ ++#define REG_PARA_FDISK_2_Q_IDX 0x00000100UL /* 15:8 */ ++#define REG_PARA_FDISK_1_P_IDX 0x00000001UL /* 7:0 */ ++ ++/* 0x04: BLSZ */ ++#define REG_BLSZ_SHIFT 3 /* 19:3 */ ++#define REG_BLSZ_MASK 0x000FFFF8UL /* N * 8bytes */ ++ ++/* 0x08: SGAD */ ++#define REG_SGAD_SHIFT 0 ++ ++/* 0x0C: STAT */ ++#define REG_STAT_XFER_COMPLETE 0x80000000UL /* 31 */ ++#define REG_STAT_SLAVE_ERROR 0x40000000UL /* 30 */ ++#define REG_STAT_DECODER_ERROR 0x20000000UL /* 29 */ ++#define REG_STAT_R_FLAG 0x00080000UL /* 19 */ ++#define REG_STAT_Q_FLAG 0x00040000UL /* 18 */ ++#define REG_STAT_P_FLAG 0x00020000UL /* 17 */ ++#define REG_STAT_CMD_QUEUE_ENABLE 0x00000002UL /* 1 */ ++#define REG_STAT_INTERRUPT_FLAG 0x00000001UL /* 0 */ ++ ++/* 0x10/14: FRNT/BACK */ ++#define REG_FRNT_SHIFT 0 ++#define REG_BACK_SHIFT 0 ++ ++/* 0x18: QPAR */ ++#define MAX_Q_DEPTH 256 ++#define REG_QPAR_DEPTH_256 0xFF ++#define REG_QPAR_DEPTH_128 0x7F ++#define REG_QPAR_DEPTH_64 0x3F ++#define REG_QPAR_DEPTH_32 0x1F ++#define REG_QPAR_DEPTH_16 0xF ++#define REG_QPAR_DEPTH_8 0x7 ++#define REG_QPAR_DEPTH_4 0x3 ++#define REG_QPAR_DEPTH_2 0x1 ++ ++/* len = 32 */ ++#define CURR_Q_DEPTH (REG_QPAR_DEPTH_32 + 1) ++#define CURR_Q_DEPTH_ALIGN ((CURR_Q_DEPTH) * (QUEUE_DEPTH_ALIGN_MUL)) /* 0x200 */ ++#define CURR_Q_DEPTH_ALIGN_MASK 0xFFFFFE00UL ++ ++ ++#define MAX_SG 32 // cf. CURR_Q_DEPTH or MAX_Q_DEPTH ++#define MAX_ENTRIES_PER_SG 32 ++ ++/* SG Table */ ++#define SG_ADDR_MASK 0x00000000FFFFFFFFULL ++ ++#define SG_READ_IDX_MASK 0x000000FF00000000ULL ++#define SG_IDX_SHIFT 32 ++ ++// ---------------------- 7654321076543210 ++#define SG_RW_MASK 0x00000F0000000000ULL ++#define RWI_RD_D 0x0000000000000000ULL ++#define RWI_RD_P 0x0000010000000000ULL ++#define RWI_RD_Q 0x0000020000000000ULL ++#define RWI_RD_R 0x0000030000000000ULL ++#define RWI_W_D1 0x0000040000000000ULL ++#define RWI_W_P1 0x0000050000000000ULL ++#define RWI_W_Q1 0x0000060000000000ULL ++#define RWI_W_R1 0x0000070000000000ULL ++#define RWI_W_D2 0x0000080000000000ULL ++#define RWI_W_P2 0x0000090000000000ULL ++#define RWI_W_Q2 0x00000A0000000000ULL ++#define RWI_W_R2 0x00000B0000000000ULL ++#define RWI_W_D3 0x00000C0000000000ULL ++#define RWI_W_P3 0x00000D0000000000ULL ++#define RWI_W_Q3 0x00000E0000000000ULL ++#define RWI_W_R3 0x00000F0000000000ULL ++ ++ ++#define SG_STATUS_FREE 0x00000001UL ++#define SG_STATUS_ACQUIRED 0x00000002UL ++#define SG_STATUS_SCHEDULED 0x00000004UL ++#define SG_STATUS_DONE 0x00000008UL ++#define SG_STATUS_ERROR 0x00000010UL ++ ++#define SG_ENTRY_BYTES (8 * MAX_ENTRIES_PER_SG) ++ ++typedef struct rdma_sgtable sg_t; ++struct rdma_sgtable { ++ u64 entry[MAX_ENTRIES_PER_SG]; ++ ++ struct list_head lru; /* list_add_tail/list_del to/from process_q when schedule or isr */ ++ wait_queue_head_t wait; ++ ++ u32 status; ++}; ++ ++/* Command Queue: cmdq_t */ ++typedef struct rdma_cmdq cmdq_t; ++struct rdma_cmdq { ++ volatile u32 parameter; ++ volatile u32 block_size; ++ volatile u32 sg_addr; ++ volatile u32 reserved; ++}; ++ ++struct ctrl_regs { ++ volatile u32 *para; ++ volatile u32 *blsz; ++ volatile u32 *sgad; ++ volatile u32 *stat; ++ volatile u32 *frnt; ++ volatile u32 *back; ++ volatile u32 *qpar; ++}; ++ ++/* channel */ ++#define RDMA_CHANNEL_COUNT 1 ++typedef struct rdma_channel rdma_chan_t; ++struct rdma_channel ++{ ++ struct list_head process_q; ++ spinlock_t process_lock; /* process queue lock */ ++ ++ int irq; ++ const char *irq_str; ++ ++ /* cmd queue start address */ ++ volatile cmdq_t *q_virt; ++ volatile u32 q_first_phys; ++ volatile u32 q_last_phys; ++ ++ /* control regs */ ++ struct ctrl_regs *cregs; ++ ++ // wait_queue_head_t wait; ++}; ++ ++int __init cns_rdma_init(void); ++ ++#endif ++ +--- a/arch/arm/Makefile ++++ b/arch/arm/Makefile +@@ -146,6 +146,7 @@ machine-$(CONFIG_ARCH_ORION5X) := orion + machine-$(CONFIG_ARCH_PNX4008) := pnx4008 + machine-$(CONFIG_ARCH_PXA) := pxa + machine-$(CONFIG_ARCH_REALVIEW) := realview ++machine-$(CONFIG_ARCH_CNS3XXX) := cns3xxx + machine-$(CONFIG_ARCH_RPC) := rpc + machine-$(CONFIG_ARCH_S3C2410) := s3c2410 s3c2400 s3c2412 s3c2440 s3c2442 s3c2443 + machine-$(CONFIG_ARCH_S3C24A0) := s3c24a0 +--- /dev/null ++++ b/arch/arm/mm/cache-l2cc.c +@@ -0,0 +1,218 @@ ++/******************************************************************************* ++ * ++ * arch/arm/mm/cache-l2cc.c - L2 cache controller support ++ * ++ * Copyright (c) 2008 Cavium Networks ++ * ++ * This file is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, Version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or ++ * NONINFRINGEMENT. See the GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this file; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA or ++ * visit http://www.gnu.org/licenses/. ++ * ++ * This file may also be available under a different license from Cavium. ++ * Contact Cavium Networks for more information ++ * ++ ******************************************************************************/ ++ ++#include ++#include ++ ++#include ++#include ++#include ++ ++#define CACHE_LINE_SIZE 32 ++ ++static void __iomem *cns3xxx_l2_base; ++static DEFINE_SPINLOCK(cns3xxx_l2_lock); ++ ++static inline void cache_wait(void __iomem *reg, unsigned long mask) ++{ ++#ifndef CONFIG_L2CC_NO_WAIT ++ /* wait for the operation to complete */ ++ while (readl(reg) & mask); ++#endif ++} ++ ++static inline void sync_writel(unsigned long val, unsigned long reg, ++ unsigned long complete_mask) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&cns3xxx_l2_lock, flags); ++ writel(val, cns3xxx_l2_base + reg); ++ /* wait for the operation to complete */ ++ while (readl(cns3xxx_l2_base + reg) & complete_mask) ++ ; ++ spin_unlock_irqrestore(&cns3xxx_l2_lock, flags); ++} ++ ++static inline void cache_sync(void) ++{ ++ sync_writel(0, L2CC_CACHE_SYNC, 1); ++} ++ ++static inline void cns3xxx_l2_inv_all(void) ++{ ++ /* invalidate all ways */ ++ sync_writel(0xffff, L2CC_INV_WAY, 0xffff); ++ cache_sync(); ++} ++ ++static void cns3xxx_l2_inv_range(unsigned long start, unsigned long end) ++{ ++ unsigned long addr; ++ ++ if (start & (CACHE_LINE_SIZE - 1)) { ++ start &= ~(CACHE_LINE_SIZE - 1); ++ writel(start, cns3xxx_l2_base + L2CC_CLEAN_INV_LINE_PA); ++ start += CACHE_LINE_SIZE; ++ } ++ ++ if (end & (CACHE_LINE_SIZE - 1)) { ++ end &= ~(CACHE_LINE_SIZE - 1); ++ writel(end, cns3xxx_l2_base + L2CC_CLEAN_INV_LINE_PA); ++ } ++ ++ for (addr = start; addr < end; addr += CACHE_LINE_SIZE) ++ writel(addr, cns3xxx_l2_base + L2CC_INV_LINE_PA); ++ ++ cache_sync(); ++} ++ ++static void cns3xxx_l2_clean_range(unsigned long start, unsigned long end) ++{ ++ unsigned long addr; ++ ++ start &= ~(CACHE_LINE_SIZE - 1); ++ for (addr = start; addr < end; addr += CACHE_LINE_SIZE) ++ writel(addr, cns3xxx_l2_base + L2CC_CLEAN_LINE_PA); ++ ++ cache_wait(cns3xxx_l2_base + L2CC_CLEAN_LINE_PA, 1); ++ cache_sync(); ++} ++ ++static void cns3xxx_l2_flush_range(unsigned long start, unsigned long end) ++{ ++ unsigned long addr; ++ ++ start &= ~(CACHE_LINE_SIZE - 1); ++ for (addr = start; addr < end; addr += CACHE_LINE_SIZE) ++ writel(addr, cns3xxx_l2_base + L2CC_CLEAN_INV_LINE_PA); ++ ++ cache_wait(cns3xxx_l2_base + L2CC_CLEAN_INV_LINE_PA, 1); ++ cache_sync(); ++} ++ ++void __init l2cc_init(void __iomem *base) ++{ ++ __u32 aux, prefetch, tag, data; ++ ++ printk(KERN_INFO "Initializing CNS3XXX L2 cache controller... "); ++ ++ cns3xxx_l2_base = base; ++ ++ /* disable L2CC */ ++ writel(0, cns3xxx_l2_base + L2CC_CTRL); ++ ++ /* ++ * Auxiliary control register ++ * ++ * bit[22] - shared attribute internally ignored ++ * bit[21] - parity enabled ++ * bit[20] - ++ * bit[19:17] - 32kB way size ++ * bit[16] - way associative ++ * bit[12] - exclusive cache disabled ++ * ++ */ ++ aux = readl(cns3xxx_l2_base + L2CC_AUX_CTRL); ++ aux &= 0xfe000fff; ++#ifdef CONFIG_CACHE_L2_I_PREFETCH ++ aux |= 0x20000000; /* bit[29]: Instruction prefetching enable, bit[29]: Data prefetching enable */ ++#endif ++#ifdef CONFIG_CACHE_L2_D_PREFETCH ++ aux |= 0x10000000; /* bit[28]: Instruction prefetching enable, bit[28]: Data prefetching enable */ ++#endif ++ aux |= 0x00540000; /* ...010..., 32KB, 8-way, Parity Disable*/ ++ writel(aux, cns3xxx_l2_base + L2CC_AUX_CTRL); ++ ++ prefetch = readl(cns3xxx_l2_base + 0xF60); ++ prefetch |= 0x00000008; /* prefetch offset, bit[4..0] */ ++#ifdef CONFIG_CACHE_L2_I_PREFETCH ++ prefetch |= 0x20000000; ++#endif ++#ifdef CONFIG_CACHE_L2_D_PREFETCH ++ prefetch |= 0x10000000; ++#endif ++ writel(prefetch, cns3xxx_l2_base + 0xF60); ++ ++ /* Tag RAM Control register ++ * ++ * bit[10:8] - 1 cycle of write accesses latency ++ * bit[6:4] - 1 cycle of read accesses latency ++ * bit[3:0] - 1 cycle of setup latency ++ * ++ * 1 cycle of latency for setup, read and write accesses ++ */ ++ tag = readl(cns3xxx_l2_base + L2CC_TAG_RAM_LATENCY_CTRL); ++ tag &= 0xfffff888; ++ tag |= 0x00000000; ++ writel(tag, cns3xxx_l2_base + L2CC_TAG_RAM_LATENCY_CTRL); ++ ++ /* Data RAM Control register ++ * ++ * bit[10:8] - 1 cycles of write accesses latency ++ * bit[6:4] - 1 cycles of read accesses latency ++ * bit[3:0] - 1 cycle of setup latency ++ * ++ * 1 cycle of setup latency, 2 cycles of read and write accesses latency ++ */ ++ data = readl(cns3xxx_l2_base + L2CC_DATA_RAM_LATENCY_CTRL); ++ data &= 0xfffff888; ++ data |= 0x00000000; ++ writel(data, cns3xxx_l2_base + L2CC_DATA_RAM_LATENCY_CTRL); ++ ++ cns3xxx_l2_inv_all(); ++ ++ /* lockdown required ways for different effective size of the L2 cache */ ++#ifdef CONFIG_CACHE_L2CC_32KB ++ /* 32KB, lock way7..1 */ ++ writel(0xfe, cns3xxx_l2_base + L2CC_LOCKDOWN_0_WAY_D); ++ writel(0xfe, cns3xxx_l2_base + L2CC_LOCKDOWN_0_WAY_I); ++ printk(KERN_INFO "CNS3XXX L2 cache lock down : way7..1\n"); ++#elif defined(CONFIG_CACHE_L2CC_64KB) ++ /* 64KB, lock way7..2 */ ++ writel(0xfc, cns3xxx_l2_base + L2CC_LOCKDOWN_0_WAY_D); ++ writel(0xfc, cns3xxx_l2_base + L2CC_LOCKDOWN_0_WAY_I); ++ printk(KERN_INFO "CNS3XXX L2 cache lock down : way7..2\n"); ++#elif defined(CONFIG_CACHE_L2CC_96KB) ++ /* 96KB, lock way7..3 */ ++ writel(0xf8, cns3xxx_l2_base + L2CC_LOCKDOWN_0_WAY_D); ++ writel(0xf8, cns3xxx_l2_base + L2CC_LOCKDOWN_0_WAY_I); ++ printk(KERN_INFO "CNS3XXX L2 cache lock down : way7..3\n"); ++#elif defined(CONFIG_CACHE_L2CC_128KB) ++ /* 128KB, lock way7..4 */ ++ writel(0xf0, cns3xxx_l2_base + L2CC_LOCKDOWN_0_WAY_D); ++ writel(0xf0, cns3xxx_l2_base + L2CC_LOCKDOWN_0_WAY_I); ++ printk(KERN_INFO "CNS3XXX L2 cache lock down : way7..4\n"); ++#endif ++ ++ /* enable L2CC */ ++ writel(1, cns3xxx_l2_base + L2CC_CTRL); ++ ++ outer_cache.inv_range = cns3xxx_l2_inv_range; ++ outer_cache.clean_range = cns3xxx_l2_clean_range; ++ outer_cache.flush_range = cns3xxx_l2_flush_range; ++ ++ printk("done.\n"); ++} +--- a/arch/arm/mm/cache-l2x0.c ++++ b/arch/arm/mm/cache-l2x0.c +@@ -109,6 +109,25 @@ void __init l2x0_init(void __iomem *base + + l2x0_inv_all(); + ++ /* lockdown required ways for different effective size of the L2 cache */ ++#ifdef CONFIG_CACHE_L2X0_128KB ++ /* 128KB, lock way7..1 */ ++ writel(0xfe, l2x0_base + L2X0_LOCKDOWN_WAY_D); ++ writel(0xfe, l2x0_base + L2X0_LOCKDOWN_WAY_I); ++#elif defined(CONFIG_CACHE_L2X0_256KB) ++ /* 256KB, lock way7..2 */ ++ writel(0xfc, l2x0_base + L2X0_LOCKDOWN_WAY_D); ++ writel(0xfc, l2x0_base + L2X0_LOCKDOWN_WAY_I); ++#elif defined(CONFIG_CACHE_L2X0_512KB) ++ /* 512KB, lock way7..3 */ ++ writel(0xf8, l2x0_base + L2X0_LOCKDOWN_WAY_D); ++ writel(0xf8, l2x0_base + L2X0_LOCKDOWN_WAY_I); ++#elif defined(CONFIG_CACHE_L2X0_1MB) ++ /* 1MB, lock way7..4 */ ++ writel(0xf0, l2x0_base + L2X0_LOCKDOWN_WAY_D); ++ writel(0xf0, l2x0_base + L2X0_LOCKDOWN_WAY_I); ++#endif ++ + /* enable L2X0 */ + writel(1, l2x0_base + L2X0_CTRL); + +--- a/arch/arm/mm/dma-mapping.c ++++ b/arch/arm/mm/dma-mapping.c +@@ -29,7 +29,8 @@ + #error "CONSISTENT_DMA_SIZE must be multiple of 2MiB" + #endif + +-#define CONSISTENT_END (0xffe00000) ++//#define CONSISTENT_END (0xffe00000) ++#define CONSISTENT_END (0xf2000000) + #define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE) + + #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) +@@ -208,7 +209,7 @@ __dma_alloc(struct device *dev, size_t s + { + void *ptr = page_address(page); + memset(ptr, 0, size); +- dmac_flush_range(ptr, ptr + size); ++ smp_dma_flush_range(ptr, ptr + size); + outer_flush_range(__pa(ptr), __pa(ptr) + size); + } + +@@ -498,15 +499,15 @@ void dma_cache_maint(const void *start, + + switch (direction) { + case DMA_FROM_DEVICE: /* invalidate only */ +- inner_op = dmac_inv_range; ++ inner_op = smp_dma_inv_range; + outer_op = outer_inv_range; + break; + case DMA_TO_DEVICE: /* writeback only */ +- inner_op = dmac_clean_range; ++ inner_op = smp_dma_clean_range; + outer_op = outer_clean_range; + break; + case DMA_BIDIRECTIONAL: /* writeback and invalidate */ +- inner_op = dmac_flush_range; ++ inner_op = smp_dma_flush_range; + outer_op = outer_flush_range; + break; + default: +@@ -528,15 +529,15 @@ static void dma_cache_maint_contiguous(s + + switch (direction) { + case DMA_FROM_DEVICE: /* invalidate only */ +- inner_op = dmac_inv_range; ++ inner_op = smp_dma_inv_range; + outer_op = outer_inv_range; + break; + case DMA_TO_DEVICE: /* writeback only */ +- inner_op = dmac_clean_range; ++ inner_op = smp_dma_clean_range; + outer_op = outer_clean_range; + break; + case DMA_BIDIRECTIONAL: /* writeback and invalidate */ +- inner_op = dmac_flush_range; ++ inner_op = smp_dma_flush_range; + outer_op = outer_flush_range; + break; + default: +--- a/arch/arm/mm/Kconfig ++++ b/arch/arm/mm/Kconfig +@@ -391,7 +391,7 @@ config CPU_FEROCEON_OLD_ID + + # ARMv6 + config CPU_V6 +- bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX ++ bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || ARCH_CNS3XXX || MACH_REALVIEW_PBX + select CPU_32v6 + select CPU_ABRT_EV6 + select CPU_PABRT_NOIFAR +@@ -516,6 +516,16 @@ config CPU_CACHE_VIPT + config CPU_CACHE_FA + bool + ++config CPU_NO_CACHE_BCAST ++ bool ++ depends on SMP ++ default y if CPU_V6 ++ ++config CPU_NO_CACHE_BCAST_DEBUG ++ bool ++ depends on SMP ++ default y if CPU_V6 ++ + if MMU + # The copy-page model + config CPU_COPY_V3 +@@ -759,11 +769,84 @@ config CACHE_L2X0 + bool "Enable the L2x0 outer cache controller" + depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ + REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX +- default y ++ default n + select OUTER_CACHE + help + This option enables the L2x0 PrimeCell. + ++choice ++ prompt "L2 Cache Size" ++ depends on CACHE_L2X0 ++ default CACHE_L2X0_1MB ++ ++config CACHE_L2X0_128KB ++ bool "128KB" ++ help ++ 16KB/way, 8-way, evmon/parity/share enabled ++ ++config CACHE_L2X0_256KB ++ bool "256KB" ++ help ++ 32KB/way, 8-way, evmon/parity/share enabled ++ ++config CACHE_L2X0_512KB ++ bool "512KB" ++ help ++ 64KB/way, 8-way, evmon/parity/share enabled ++ ++config CACHE_L2X0_1MB ++ bool "1MB" ++ help ++ 128KB/way, 8-way, evmon/parity/share enabled ++endchoice ++ ++config CACHE_L2CC ++ bool "Enable the L2 outer cache controller" ++ depends on ARCH_CNS3XXX ++ default n ++ select OUTER_CACHE ++ help ++ This option enables the L2 cache controller. ++ ++choice ++ prompt "L2 Cache Size" ++ depends on CACHE_L2CC ++ default CACHE_L2CC_256KB ++ ++config CACHE_L2CC_32KB ++ bool "32KB" ++ help ++ 4KB/way, 8-way, evmon/share enabled ++ ++config CACHE_L2CC_64KB ++ bool "64KB" ++ help ++ 8KB/way, 8-way, evmon/share enabled ++ ++config CACHE_L2CC_96KB ++ bool "96KB" ++ help ++ 12KB/way, 8-way, evmon/share enabled ++ ++config CACHE_L2CC_128KB ++ bool "128KB" ++ help ++ 16KB/way, 8-way, evmon/share enabled ++ ++config CACHE_L2CC_256KB ++ bool "256KB" ++ help ++ 32KB/way, 8-way, evmon/share enabled ++ ++endchoice ++ ++config CACHE_L2_I_PREFETCH ++ bool "Enable the L2 instruction prefetching" ++ depends on CACHE_L2CC ++ default y ++ help ++ This option enables instruction prefetching. ++ + config CACHE_XSC3L2 + bool "Enable the L2 cache on XScale3" + depends on CPU_XSC3 +--- a/arch/arm/mm/Makefile ++++ b/arch/arm/mm/Makefile +@@ -82,5 +82,6 @@ obj-$(CONFIG_CPU_V7) += proc-v7.o + + obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o + obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o ++obj-$(CONFIG_CACHE_L2CC) += cache-l2cc.o + obj-$(CONFIG_CACHE_XSC3L2) += cache-xsc3l2.o + +--- a/arch/arm/mm/mmu.c ++++ b/arch/arm/mm/mmu.c +@@ -687,7 +687,7 @@ __early_param("vmalloc=", early_vmalloc) + + static void __init sanity_check_meminfo(void) + { +- int i, j, highmem = 0; ++ int i, j; + + for (i = 0, j = 0; i < meminfo.nr_banks; i++) { + struct membank *bank = &meminfo.bank[j]; +--- a/include/linux/pci_ids.h ++++ b/include/linux/pci_ids.h +@@ -2668,3 +2668,5 @@ + #define PCI_DEVICE_ID_RME_DIGI32 0x9896 + #define PCI_DEVICE_ID_RME_DIGI32_PRO 0x9897 + #define PCI_DEVICE_ID_RME_DIGI32_8 0x9898 ++ ++#define PCI_VENDOR_ID_CAVIUM 0x177d +--- a/arch/arm/tools/mach-types ++++ b/arch/arm/tools/mach-types +@@ -12,7 +12,7 @@ + # + # http://www.arm.linux.org.uk/developer/machines/?action=new + # +-# Last update: Sat Jun 20 22:28:39 2009 ++# Last update: Wed Jun 9 02:11:30 2010 + # + # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number + # +@@ -928,7 +928,7 @@ palmt5 MACH_PALMT5 PALMT5 917 + palmtc MACH_PALMTC PALMTC 918 + omap_apollon MACH_OMAP_APOLLON OMAP_APOLLON 919 + mxc30030evb MACH_MXC30030EVB MXC30030EVB 920 +-rea_2d MACH_REA_2D REA_2D 921 ++rea_cpu2 MACH_REA_2D REA_2D 921 + eti3e524 MACH_TI3E524 TI3E524 922 + ateb9200 MACH_ATEB9200 ATEB9200 923 + auckland MACH_AUCKLAND AUCKLAND 924 +@@ -1319,7 +1319,7 @@ mistral MACH_MISTRAL MISTRAL 1315 + msm MACH_MSM MSM 1316 + ct5910 MACH_CT5910 CT5910 1317 + ct5912 MACH_CT5912 CT5912 1318 +-hynet_ine MACH_HYNET_INE HYNET_INE 1319 ++argonst_foundation MACH_HYNET_INE HYNET_INE 1319 + hynet_app MACH_HYNET_APP HYNET_APP 1320 + msm7200 MACH_MSM7200 MSM7200 1321 + msm7600 MACH_MSM7600 MSM7600 1322 +@@ -1638,7 +1638,7 @@ mx35evb MACH_MX35EVB MX35EVB 1643 + aml_m8050 MACH_AML_M8050 AML_M8050 1644 + mx35_3ds MACH_MX35_3DS MX35_3DS 1645 + mars MACH_MARS MARS 1646 +-ntosd_644xa MACH_NTOSD_644XA NTOSD_644XA 1647 ++neuros_osd2 MACH_NEUROS_OSD2 NEUROS_OSD2 1647 + badger MACH_BADGER BADGER 1648 + trizeps4wl MACH_TRIZEPS4WL TRIZEPS4WL 1649 + trizeps5 MACH_TRIZEPS5 TRIZEPS5 1650 +@@ -1654,7 +1654,7 @@ vf10xx MACH_VF10XX VF10XX 1659 + zoran43xx MACH_ZORAN43XX ZORAN43XX 1660 + sonix926 MACH_SONIX926 SONIX926 1661 + celestialsemi MACH_CELESTIALSEMI CELESTIALSEMI 1662 +-cc9m2443 MACH_CC9M2443 CC9M2443 1663 ++cc9m2443js MACH_CC9M2443JS CC9M2443JS 1663 + tw5334 MACH_TW5334 TW5334 1664 + omap_htcartemis MACH_HTCARTEMIS HTCARTEMIS 1665 + nal_hlite MACH_NAL_HLITE NAL_HLITE 1666 +@@ -1769,14 +1769,15 @@ mx31cicada MACH_MX31CICADA MX31CICADA + mi424wr MACH_MI424WR MI424WR 1778 + axs_ultrax MACH_AXS_ULTRAX AXS_ULTRAX 1779 + at572d940deb MACH_AT572D940DEB AT572D940DEB 1780 +-davinci_da8xx_evm MACH_DAVINCI_DA8XX_EVM DAVINCI_DA8XX_EVM 1781 ++davinci_da830_evm MACH_DAVINCI_DA830_EVM DAVINCI_DA830_EVM 1781 + ep9302 MACH_EP9302 EP9302 1782 + at572d940hfek MACH_AT572D940HFEB AT572D940HFEB 1783 + cybook3 MACH_CYBOOK3 CYBOOK3 1784 + wdg002 MACH_WDG002 WDG002 1785 + sg560adsl MACH_SG560ADSL SG560ADSL 1786 + nextio_n2800_ica MACH_NEXTIO_N2800_ICA NEXTIO_N2800_ICA 1787 +-marvell_newdb MACH_MARVELL_NEWDB MARVELL_NEWDB 1789 ++dove_db MACH_DOVE_DB DOVE_DB 1788 ++dove_avng MACH_MARVELL_NEWDB MARVELL_NEWDB 1789 + vandihud MACH_VANDIHUD VANDIHUD 1790 + magx_e8 MACH_MAGX_E8 MAGX_E8 1791 + magx_z6 MACH_MAGX_Z6 MAGX_Z6 1792 +@@ -1802,7 +1803,7 @@ ccw9p9215js MACH_CCW9P9215JS CCW9P9215J + rd88f5181l_ge MACH_RD88F5181L_GE RD88F5181L_GE 1812 + sifmain MACH_SIFMAIN SIFMAIN 1813 + sam9_l9261 MACH_SAM9_L9261 SAM9_L9261 1814 +-cc9m2443js MACH_CC9M2443JS CC9M2443JS 1815 ++cc9m2443 MACH_CC9M2443 CC9M2443 1815 + xaria300 MACH_XARIA300 XARIA300 1816 + it9200 MACH_IT9200 IT9200 1817 + rd88f5181l_fxo MACH_RD88F5181L_FXO RD88F5181L_FXO 1818 +@@ -1962,7 +1963,7 @@ ethernut5 MACH_ETHERNUT5 ETHERNUT5 19 + arm11 MACH_ARM11 ARM11 1972 + cpuat9260 MACH_CPUAT9260 CPUAT9260 1973 + cpupxa255 MACH_CPUPXA255 CPUPXA255 1974 +-cpuimx27 MACH_CPUIMX27 CPUIMX27 1975 ++eukrea_cpuimx27 MACH_CPUIMX27 CPUIMX27 1975 + cheflux MACH_CHEFLUX CHEFLUX 1976 + eb_cpux9k2 MACH_EB_CPUX9K2 EB_CPUX9K2 1977 + opcotec MACH_OPCOTEC OPCOTEC 1978 +@@ -2249,14 +2250,14 @@ omap3_phrazer MACH_OMAP3_PHRAZER OMAP3_ + darwin MACH_DARWIN DARWIN 2262 + oratiscomu MACH_ORATISCOMU ORATISCOMU 2263 + rtsbc20 MACH_RTSBC20 RTSBC20 2264 +-i780 MACH_I780 I780 2265 ++sgh_i780 MACH_I780 I780 2265 + gemini324 MACH_GEMINI324 GEMINI324 2266 + oratislan MACH_ORATISLAN ORATISLAN 2267 + oratisalog MACH_ORATISALOG ORATISALOG 2268 + oratismadi MACH_ORATISMADI ORATISMADI 2269 + oratisot16 MACH_ORATISOT16 ORATISOT16 2270 + oratisdesk MACH_ORATISDESK ORATISDESK 2271 +-v2p_ca9 MACH_V2P_CA9 V2P_CA9 2272 ++vexpress MACH_VEXPRESS VEXPRESS 2272 + sintexo MACH_SINTEXO SINTEXO 2273 + cm3389 MACH_CM3389 CM3389 2274 + omap3_cio MACH_OMAP3_CIO OMAP3_CIO 2275 +@@ -2280,3 +2281,615 @@ htcrhodium MACH_HTCRHODIUM HTCRHODIUM + htctopaz MACH_HTCTOPAZ HTCTOPAZ 2293 + matrix504 MACH_MATRIX504 MATRIX504 2294 + mrfsa MACH_MRFSA MRFSA 2295 ++sc_p270 MACH_SC_P270 SC_P270 2296 ++atlas5_evb MACH_ATLAS5_EVB ATLAS5_EVB 2297 ++pelco_lobox MACH_PELCO_LOBOX PELCO_LOBOX 2298 ++dilax_pcu200 MACH_DILAX_PCU200 DILAX_PCU200 2299 ++leonardo MACH_LEONARDO LEONARDO 2300 ++zoran_approach7 MACH_ZORAN_APPROACH7 ZORAN_APPROACH7 2301 ++dp6xx MACH_DP6XX DP6XX 2302 ++bcm2153_vesper MACH_BCM2153_VESPER BCM2153_VESPER 2303 ++mahimahi MACH_MAHIMAHI MAHIMAHI 2304 ++clickc MACH_CLICKC CLICKC 2305 ++zb_gateway MACH_ZB_GATEWAY ZB_GATEWAY 2306 ++tazcard MACH_TAZCARD TAZCARD 2307 ++tazdev MACH_TAZDEV TAZDEV 2308 ++annax_cb_arm MACH_ANNAX_CB_ARM ANNAX_CB_ARM 2309 ++annax_dm3 MACH_ANNAX_DM3 ANNAX_DM3 2310 ++cerebric MACH_CEREBRIC CEREBRIC 2311 ++orca MACH_ORCA ORCA 2312 ++pc9260 MACH_PC9260 PC9260 2313 ++ems285a MACH_EMS285A EMS285A 2314 ++gec2410 MACH_GEC2410 GEC2410 2315 ++gec2440 MACH_GEC2440 GEC2440 2316 ++mw903 MACH_ARCH_MW903 ARCH_MW903 2317 ++mw2440 MACH_MW2440 MW2440 2318 ++ecac2378 MACH_ECAC2378 ECAC2378 2319 ++tazkiosk MACH_TAZKIOSK TAZKIOSK 2320 ++whiterabbit_mch MACH_WHITERABBIT_MCH WHITERABBIT_MCH 2321 ++sbox9263 MACH_SBOX9263 SBOX9263 2322 ++oreo_camera MACH_OREO OREO 2323 ++smdk6442 MACH_SMDK6442 SMDK6442 2324 ++openrd_base MACH_OPENRD_BASE OPENRD_BASE 2325 ++incredible MACH_INCREDIBLE INCREDIBLE 2326 ++incrediblec MACH_INCREDIBLEC INCREDIBLEC 2327 ++heroct MACH_HEROCT HEROCT 2328 ++mmnet1000 MACH_MMNET1000 MMNET1000 2329 ++devkit8000 MACH_DEVKIT8000 DEVKIT8000 2330 ++devkit9000 MACH_DEVKIT9000 DEVKIT9000 2331 ++mx31txtr MACH_MX31TXTR MX31TXTR 2332 ++u380 MACH_U380 U380 2333 ++oamp3_hualu MACH_HUALU_BOARD HUALU_BOARD 2334 ++npcmx50 MACH_NPCMX50 NPCMX50 2335 ++mx51_lange51 MACH_MX51_LANGE51 MX51_LANGE51 2336 ++mx51_lange52 MACH_MX51_LANGE52 MX51_LANGE52 2337 ++riom MACH_RIOM RIOM 2338 ++comcas MACH_COMCAS COMCAS 2339 ++wsi_mx27 MACH_WSI_MX27 WSI_MX27 2340 ++cm_t35 MACH_CM_T35 CM_T35 2341 ++net2big MACH_NET2BIG NET2BIG 2342 ++motorola_a1600 MACH_MOTOROLA_A1600 MOTOROLA_A1600 2343 ++igep0020 MACH_IGEP0020 IGEP0020 2344 ++igep0010 MACH_IGEP0010 IGEP0010 2345 ++mv6281gtwge2 MACH_MV6281GTWGE2 MV6281GTWGE2 2346 ++scat100 MACH_SCAT100 SCAT100 2347 ++sanmina MACH_SANMINA SANMINA 2348 ++momento MACH_MOMENTO MOMENTO 2349 ++nuc9xx MACH_NUC9XX NUC9XX 2350 ++nuc910evb MACH_NUC910EVB NUC910EVB 2351 ++nuc920evb MACH_NUC920EVB NUC920EVB 2352 ++nuc950evb MACH_NUC950EVB NUC950EVB 2353 ++nuc945evb MACH_NUC945EVB NUC945EVB 2354 ++nuc960evb MACH_NUC960EVB NUC960EVB 2355 ++nuc932evb MACH_NUC932EVB NUC932EVB 2356 ++nuc900 MACH_NUC900 NUC900 2357 ++sd1soc MACH_SD1SOC SD1SOC 2358 ++ln2440bc MACH_LN2440BC LN2440BC 2359 ++rsbc MACH_RSBC RSBC 2360 ++openrd_client MACH_OPENRD_CLIENT OPENRD_CLIENT 2361 ++hpipaq11x MACH_HPIPAQ11X HPIPAQ11X 2362 ++wayland MACH_WAYLAND WAYLAND 2363 ++acnbsx102 MACH_ACNBSX102 ACNBSX102 2364 ++hwat91 MACH_HWAT91 HWAT91 2365 ++at91sam9263cs MACH_AT91SAM9263CS AT91SAM9263CS 2366 ++csb732 MACH_CSB732 CSB732 2367 ++u8500 MACH_U8500 U8500 2368 ++huqiu MACH_HUQIU HUQIU 2369 ++mx51_kunlun MACH_MX51_KUNLUN MX51_KUNLUN 2370 ++pmt1g MACH_PMT1G PMT1G 2371 ++htcelf MACH_HTCELF HTCELF 2372 ++armadillo420 MACH_ARMADILLO420 ARMADILLO420 2373 ++armadillo440 MACH_ARMADILLO440 ARMADILLO440 2374 ++u_chip_dual_arm MACH_U_CHIP_DUAL_ARM U_CHIP_DUAL_ARM 2375 ++csr_bdb3 MACH_CSR_BDB3 CSR_BDB3 2376 ++dolby_cat1018 MACH_DOLBY_CAT1018 DOLBY_CAT1018 2377 ++hy9307 MACH_HY9307 HY9307 2378 ++aspire_easystore MACH_A_ES A_ES 2379 ++davinci_irif MACH_DAVINCI_IRIF DAVINCI_IRIF 2380 ++agama9263 MACH_AGAMA9263 AGAMA9263 2381 ++marvell_jasper MACH_MARVELL_JASPER MARVELL_JASPER 2382 ++flint MACH_FLINT FLINT 2383 ++tavorevb3 MACH_TAVOREVB3 TAVOREVB3 2384 ++sch_m490 MACH_SCH_M490 SCH_M490 2386 ++rbl01 MACH_RBL01 RBL01 2387 ++omnifi MACH_OMNIFI OMNIFI 2388 ++otavalo MACH_OTAVALO OTAVALO 2389 ++siena MACH_SIENNA SIENNA 2390 ++htc_excalibur_s620 MACH_HTC_EXCALIBUR_S620 HTC_EXCALIBUR_S620 2391 ++htc_opal MACH_HTC_OPAL HTC_OPAL 2392 ++touchbook MACH_TOUCHBOOK TOUCHBOOK 2393 ++latte MACH_LATTE LATTE 2394 ++xa200 MACH_XA200 XA200 2395 ++nimrod MACH_NIMROD NIMROD 2396 ++cc9p9215_3g MACH_CC9P9215_3G CC9P9215_3G 2397 ++cc9p9215_3gjs MACH_CC9P9215_3GJS CC9P9215_3GJS 2398 ++tk71 MACH_TK71 TK71 2399 ++comham3525 MACH_COMHAM3525 COMHAM3525 2400 ++mx31erebus MACH_MX31EREBUS MX31EREBUS 2401 ++mcardmx27 MACH_MCARDMX27 MCARDMX27 2402 ++paradise MACH_PARADISE PARADISE 2403 ++tide MACH_TIDE TIDE 2404 ++wzl2440 MACH_WZL2440 WZL2440 2405 ++sdrdemo MACH_SDRDEMO SDRDEMO 2406 ++ethercan2 MACH_ETHERCAN2 ETHERCAN2 2407 ++ecmimg20 MACH_ECMIMG20 ECMIMG20 2408 ++omap_dragon MACH_OMAP_DRAGON OMAP_DRAGON 2409 ++halo MACH_HALO HALO 2410 ++huangshan MACH_HUANGSHAN HUANGSHAN 2411 ++vl_ma2sc MACH_VL_MA2SC VL_MA2SC 2412 ++raumfeld_rc MACH_RAUMFELD_RC RAUMFELD_RC 2413 ++raumfeld_connector MACH_RAUMFELD_CONNECTOR RAUMFELD_CONNECTOR 2414 ++raumfeld_speaker MACH_RAUMFELD_SPEAKER RAUMFELD_SPEAKER 2415 ++multibus_master MACH_MULTIBUS_MASTER MULTIBUS_MASTER 2416 ++multibus_pbk MACH_MULTIBUS_PBK MULTIBUS_PBK 2417 ++tnetv107x MACH_TNETV107X TNETV107X 2418 ++snake MACH_SNAKE SNAKE 2419 ++cwmx27 MACH_CWMX27 CWMX27 2420 ++sch_m480 MACH_SCH_M480 SCH_M480 2421 ++platypus MACH_PLATYPUS PLATYPUS 2422 ++pss2 MACH_PSS2 PSS2 2423 ++davinci_apm150 MACH_DAVINCI_APM150 DAVINCI_APM150 2424 ++str9100 MACH_STR9100 STR9100 2425 ++net5big MACH_NET5BIG NET5BIG 2426 ++seabed9263 MACH_SEABED9263 SEABED9263 2427 ++mx51_m2id MACH_MX51_M2ID MX51_M2ID 2428 ++octvocplus_eb MACH_OCTVOCPLUS_EB OCTVOCPLUS_EB 2429 ++klk_firefox MACH_KLK_FIREFOX KLK_FIREFOX 2430 ++klk_wirma_module MACH_KLK_WIRMA_MODULE KLK_WIRMA_MODULE 2431 ++klk_wirma_mmi MACH_KLK_WIRMA_MMI KLK_WIRMA_MMI 2432 ++supersonic MACH_SUPERSONIC SUPERSONIC 2433 ++liberty MACH_LIBERTY LIBERTY 2434 ++mh355 MACH_MH355 MH355 2435 ++pc7802 MACH_PC7802 PC7802 2436 ++gnet_sgc MACH_GNET_SGC GNET_SGC 2437 ++einstein15 MACH_EINSTEIN15 EINSTEIN15 2438 ++cmpd MACH_CMPD CMPD 2439 ++davinci_hase1 MACH_DAVINCI_HASE1 DAVINCI_HASE1 2440 ++lgeincitephone MACH_LGEINCITEPHONE LGEINCITEPHONE 2441 ++ea313x MACH_EA313X EA313X 2442 ++fwbd_39064 MACH_FWBD_39064 FWBD_39064 2443 ++fwbd_390128 MACH_FWBD_390128 FWBD_390128 2444 ++pelco_moe MACH_PELCO_MOE PELCO_MOE 2445 ++minimix27 MACH_MINIMIX27 MINIMIX27 2446 ++omap3_thunder MACH_OMAP3_THUNDER OMAP3_THUNDER 2447 ++passionc MACH_PASSIONC PASSIONC 2448 ++mx27amata MACH_MX27AMATA MX27AMATA 2449 ++bgat1 MACH_BGAT1 BGAT1 2450 ++buzz MACH_BUZZ BUZZ 2451 ++mb9g20 MACH_MB9G20 MB9G20 2452 ++yushan MACH_YUSHAN YUSHAN 2453 ++lizard MACH_LIZARD LIZARD 2454 ++omap3polycom MACH_OMAP3POLYCOM OMAP3POLYCOM 2455 ++smdkv210 MACH_SMDKV210 SMDKV210 2456 ++bravo MACH_BRAVO BRAVO 2457 ++siogentoo1 MACH_SIOGENTOO1 SIOGENTOO1 2458 ++siogentoo2 MACH_SIOGENTOO2 SIOGENTOO2 2459 ++sm3k MACH_SM3K SM3K 2460 ++acer_tempo_f900 MACH_ACER_TEMPO_F900 ACER_TEMPO_F900 2461 ++sst61vc010_dev MACH_SST61VC010_DEV SST61VC010_DEV 2462 ++glittertind MACH_GLITTERTIND GLITTERTIND 2463 ++omap_zoom3 MACH_OMAP_ZOOM3 OMAP_ZOOM3 2464 ++omap_3630sdp MACH_OMAP_3630SDP OMAP_3630SDP 2465 ++cybook2440 MACH_CYBOOK2440 CYBOOK2440 2466 ++torino_s MACH_TORINO_S TORINO_S 2467 ++havana MACH_HAVANA HAVANA 2468 ++beaumont_11 MACH_BEAUMONT_11 BEAUMONT_11 2469 ++vanguard MACH_VANGUARD VANGUARD 2470 ++s5pc110_draco MACH_S5PC110_DRACO S5PC110_DRACO 2471 ++cartesio_two MACH_CARTESIO_TWO CARTESIO_TWO 2472 ++aster MACH_ASTER ASTER 2473 ++voguesv210 MACH_VOGUESV210 VOGUESV210 2474 ++acm500x MACH_ACM500X ACM500X 2475 ++km9260 MACH_KM9260 KM9260 2476 ++nideflexg1 MACH_NIDEFLEXG1 NIDEFLEXG1 2477 ++ctera_plug_io MACH_CTERA_PLUG_IO CTERA_PLUG_IO 2478 ++smartq7 MACH_SMARTQ7 SMARTQ7 2479 ++at91sam9g10ek2 MACH_AT91SAM9G10EK2 AT91SAM9G10EK2 2480 ++asusp527 MACH_ASUSP527 ASUSP527 2481 ++at91sam9g20mpm2 MACH_AT91SAM9G20MPM2 AT91SAM9G20MPM2 2482 ++topasa900 MACH_TOPASA900 TOPASA900 2483 ++electrum_100 MACH_ELECTRUM_100 ELECTRUM_100 2484 ++mx51grb MACH_MX51GRB MX51GRB 2485 ++xea300 MACH_XEA300 XEA300 2486 ++htcstartrek MACH_HTCSTARTREK HTCSTARTREK 2487 ++lima MACH_LIMA LIMA 2488 ++csb740 MACH_CSB740 CSB740 2489 ++usb_s8815 MACH_USB_S8815 USB_S8815 2490 ++watson_efm_plugin MACH_WATSON_EFM_PLUGIN WATSON_EFM_PLUGIN 2491 ++milkyway MACH_MILKYWAY MILKYWAY 2492 ++g4evm MACH_G4EVM G4EVM 2493 ++picomod6 MACH_PICOMOD6 PICOMOD6 2494 ++omapl138_hawkboard MACH_OMAPL138_HAWKBOARD OMAPL138_HAWKBOARD 2495 ++ip6000 MACH_IP6000 IP6000 2496 ++ip6010 MACH_IP6010 IP6010 2497 ++utm400 MACH_UTM400 UTM400 2498 ++omap3_zybex MACH_OMAP3_ZYBEX OMAP3_ZYBEX 2499 ++wireless_space MACH_WIRELESS_SPACE WIRELESS_SPACE 2500 ++sx560 MACH_SX560 SX560 2501 ++ts41x MACH_TS41X TS41X 2502 ++elphel10373 MACH_ELPHEL10373 ELPHEL10373 2503 ++rhobot MACH_RHOBOT RHOBOT 2504 ++mx51_refresh MACH_MX51_REFRESH MX51_REFRESH 2505 ++ls9260 MACH_LS9260 LS9260 2506 ++shank MACH_SHANK SHANK 2507 ++qsd8x50_st1 MACH_QSD8X50_ST1 QSD8X50_ST1 2508 ++at91sam9m10ekes MACH_AT91SAM9M10EKES AT91SAM9M10EKES 2509 ++hiram MACH_HIRAM HIRAM 2510 ++phy3250 MACH_PHY3250 PHY3250 2511 ++ea3250 MACH_EA3250 EA3250 2512 ++fdi3250 MACH_FDI3250 FDI3250 2513 ++htcwhitestone MACH_WHITESTONE WHITESTONE 2514 ++at91sam9263nit MACH_AT91SAM9263NIT AT91SAM9263NIT 2515 ++ccmx51 MACH_CCMX51 CCMX51 2516 ++ccmx51js MACH_CCMX51JS CCMX51JS 2517 ++ccwmx51 MACH_CCWMX51 CCWMX51 2518 ++ccwmx51js MACH_CCWMX51JS CCWMX51JS 2519 ++mini6410 MACH_MINI6410 MINI6410 2520 ++tiny6410 MACH_TINY6410 TINY6410 2521 ++nano6410 MACH_NANO6410 NANO6410 2522 ++at572d940hfnldb MACH_AT572D940HFNLDB AT572D940HFNLDB 2523 ++htcleo MACH_HTCLEO HTCLEO 2524 ++avp13 MACH_AVP13 AVP13 2525 ++xxsvideod MACH_XXSVIDEOD XXSVIDEOD 2526 ++vpnext MACH_VPNEXT VPNEXT 2527 ++swarco_itc3 MACH_SWARCO_ITC3 SWARCO_ITC3 2528 ++tx51 MACH_TX51 TX51 2529 ++dolby_cat1021 MACH_DOLBY_CAT1021 DOLBY_CAT1021 2530 ++mx28evk MACH_MX28EVK MX28EVK 2531 ++phoenix260 MACH_PHOENIX260 PHOENIX260 2532 ++uvaca_stork MACH_UVACA_STORK UVACA_STORK 2533 ++smartq5 MACH_SMARTQ5 SMARTQ5 2534 ++all3078 MACH_ALL3078 ALL3078 2535 ++ctera_2bay_ds MACH_CTERA_2BAY_DS CTERA_2BAY_DS 2536 ++siogentoo3 MACH_SIOGENTOO3 SIOGENTOO3 2537 ++epb5000 MACH_EPB5000 EPB5000 2538 ++hy9263 MACH_HY9263 HY9263 2539 ++acer_tempo_m900 MACH_ACER_TEMPO_M900 ACER_TEMPO_M900 2540 ++acer_tempo_dx650 MACH_ACER_TEMPO_DX900 ACER_TEMPO_DX900 2541 ++acer_tempo_x960 MACH_ACER_TEMPO_X960 ACER_TEMPO_X960 2542 ++acer_eten_v900 MACH_ACER_ETEN_V900 ACER_ETEN_V900 2543 ++acer_eten_x900 MACH_ACER_ETEN_X900 ACER_ETEN_X900 2544 ++bonnell MACH_BONNELL BONNELL 2545 ++oht_mx27 MACH_OHT_MX27 OHT_MX27 2546 ++htcquartz MACH_HTCQUARTZ HTCQUARTZ 2547 ++davinci_dm6467tevm MACH_DAVINCI_DM6467TEVM DAVINCI_DM6467TEVM 2548 ++c3ax03 MACH_C3AX03 C3AX03 2549 ++mxt_td60 MACH_MXT_TD60 MXT_TD60 2550 ++esyx MACH_ESYX ESYX 2551 ++dove_db2 MACH_DOVE_DB2 DOVE_DB2 2552 ++bulldog MACH_BULLDOG BULLDOG 2553 ++derell_me2000 MACH_DERELL_ME2000 DERELL_ME2000 2554 ++bcmring_base MACH_BCMRING_BASE BCMRING_BASE 2555 ++bcmring_evm MACH_BCMRING_EVM BCMRING_EVM 2556 ++bcmring_evm_jazz MACH_BCMRING_EVM_JAZZ BCMRING_EVM_JAZZ 2557 ++bcmring_sp MACH_BCMRING_SP BCMRING_SP 2558 ++bcmring_sv MACH_BCMRING_SV BCMRING_SV 2559 ++bcmring_sv_jazz MACH_BCMRING_SV_JAZZ BCMRING_SV_JAZZ 2560 ++bcmring_tablet MACH_BCMRING_TABLET BCMRING_TABLET 2561 ++bcmring_vp MACH_BCMRING_VP BCMRING_VP 2562 ++bcmring_evm_seikor MACH_BCMRING_EVM_SEIKOR BCMRING_EVM_SEIKOR 2563 ++bcmring_sp_wqvga MACH_BCMRING_SP_WQVGA BCMRING_SP_WQVGA 2564 ++bcmring_custom MACH_BCMRING_CUSTOM BCMRING_CUSTOM 2565 ++acer_s200 MACH_ACER_S200 ACER_S200 2566 ++bt270 MACH_BT270 BT270 2567 ++iseo MACH_ISEO ISEO 2568 ++cezanne MACH_CEZANNE CEZANNE 2569 ++lucca MACH_LUCCA LUCCA 2570 ++supersmart MACH_SUPERSMART SUPERSMART 2571 ++arm11_board MACH_CS_MISANO CS_MISANO 2572 ++magnolia2 MACH_MAGNOLIA2 MAGNOLIA2 2573 ++emxx MACH_EMXX EMXX 2574 ++outlaw MACH_OUTLAW OUTLAW 2575 ++riot_bei2 MACH_RIOT_BEI2 RIOT_BEI2 2576 ++riot_vox MACH_RIOT_VOX RIOT_VOX 2577 ++riot_x37 MACH_RIOT_X37 RIOT_X37 2578 ++mega25mx MACH_MEGA25MX MEGA25MX 2579 ++benzina2 MACH_BENZINA2 BENZINA2 2580 ++ignite MACH_IGNITE IGNITE 2581 ++foggia MACH_FOGGIA FOGGIA 2582 ++arezzo MACH_AREZZO AREZZO 2583 ++leica_skywalker MACH_LEICA_SKYWALKER LEICA_SKYWALKER 2584 ++jacinto2_jamr MACH_JACINTO2_JAMR JACINTO2_JAMR 2585 ++gts_nova MACH_GTS_NOVA GTS_NOVA 2586 ++p3600 MACH_P3600 P3600 2587 ++dlt2 MACH_DLT2 DLT2 2588 ++df3120 MACH_DF3120 DF3120 2589 ++ecucore_9g20 MACH_ECUCORE_9G20 ECUCORE_9G20 2590 ++nautel_lpc3240 MACH_NAUTEL_LPC3240 NAUTEL_LPC3240 2591 ++glacier MACH_GLACIER GLACIER 2592 ++phrazer_bulldog MACH_PHRAZER_BULLDOG PHRAZER_BULLDOG 2593 ++omap3_bulldog MACH_OMAP3_BULLDOG OMAP3_BULLDOG 2594 ++pca101 MACH_PCA101 PCA101 2595 ++buzzc MACH_BUZZC BUZZC 2596 ++sasie2 MACH_SASIE2 SASIE2 2597 ++davinci_dm6467_cio MACH_DAVINCI_CIO DAVINCI_CIO 2598 ++smartmeter_dl MACH_SMARTMETER_DL SMARTMETER_DL 2599 ++wzl6410 MACH_WZL6410 WZL6410 2600 ++wzl6410m MACH_WZL6410M WZL6410M 2601 ++wzl6410f MACH_WZL6410F WZL6410F 2602 ++wzl6410i MACH_WZL6410I WZL6410I 2603 ++spacecom1 MACH_SPACECOM1 SPACECOM1 2604 ++pingu920 MACH_PINGU920 PINGU920 2605 ++bravoc MACH_BRAVOC BRAVOC 2606 ++cybo2440 MACH_CYBO2440 CYBO2440 2607 ++vdssw MACH_VDSSW VDSSW 2608 ++romulus MACH_ROMULUS ROMULUS 2609 ++omap_magic MACH_OMAP_MAGIC OMAP_MAGIC 2610 ++eltd100 MACH_ELTD100 ELTD100 2611 ++capc7117 MACH_CAPC7117 CAPC7117 2612 ++swan MACH_SWAN SWAN 2613 ++veu MACH_VEU VEU 2614 ++rm2 MACH_RM2 RM2 2615 ++tt2100 MACH_TT2100 TT2100 2616 ++venice MACH_VENICE VENICE 2617 ++pc7323 MACH_PC7323 PC7323 2618 ++masp MACH_MASP MASP 2619 ++fujitsu_tvstbsoc0 MACH_FUJITSU_TVSTBSOC FUJITSU_TVSTBSOC 2620 ++fujitsu_tvstbsoc1 MACH_FUJITSU_TVSTBSOC1 FUJITSU_TVSTBSOC1 2621 ++lexikon MACH_LEXIKON LEXIKON 2622 ++mini2440v2 MACH_MINI2440V2 MINI2440V2 2623 ++icontrol MACH_ICONTROL ICONTROL 2624 ++sheevad MACH_SHEEVAD SHEEVAD 2625 ++qsd8x50a_st1_1 MACH_QSD8X50A_ST1_1 QSD8X50A_ST1_1 2626 ++qsd8x50a_st1_5 MACH_QSD8X50A_ST1_5 QSD8X50A_ST1_5 2627 ++bee MACH_BEE BEE 2628 ++mx23evk MACH_MX23EVK MX23EVK 2629 ++ap4evb MACH_AP4EVB AP4EVB 2630 ++stockholm MACH_STOCKHOLM STOCKHOLM 2631 ++lpc_h3131 MACH_LPC_H3131 LPC_H3131 2632 ++stingray MACH_STINGRAY STINGRAY 2633 ++kraken MACH_KRAKEN KRAKEN 2634 ++gw2388 MACH_GW2388 GW2388 2635 ++jadecpu MACH_JADECPU JADECPU 2636 ++carlisle MACH_CARLISLE CARLISLE 2637 ++lux_sf9 MACH_LUX_SFT9 LUX_SFT9 2638 ++nemid_tb MACH_NEMID_TB NEMID_TB 2639 ++terrier MACH_TERRIER TERRIER 2640 ++turbot MACH_TURBOT TURBOT 2641 ++sanddab MACH_SANDDAB SANDDAB 2642 ++mx35_cicada MACH_MX35_CICADA MX35_CICADA 2643 ++ghi2703d MACH_GHI2703D GHI2703D 2644 ++lux_sfx9 MACH_LUX_SFX9 LUX_SFX9 2645 ++lux_sf9g MACH_LUX_SF9G LUX_SF9G 2646 ++lux_edk9 MACH_LUX_EDK9 LUX_EDK9 2647 ++hw90240 MACH_HW90240 HW90240 2648 ++dm365_leopard MACH_DM365_LEOPARD DM365_LEOPARD 2649 ++mityomapl138 MACH_MITYOMAPL138 MITYOMAPL138 2650 ++scat110 MACH_SCAT110 SCAT110 2651 ++acer_a1 MACH_ACER_A1 ACER_A1 2652 ++cmcontrol MACH_CMCONTROL CMCONTROL 2653 ++pelco_lamar MACH_PELCO_LAMAR PELCO_LAMAR 2654 ++rfp43 MACH_RFP43 RFP43 2655 ++sk86r0301 MACH_SK86R0301 SK86R0301 2656 ++ctpxa MACH_CTPXA CTPXA 2657 ++epb_arm9_a MACH_EPB_ARM9_A EPB_ARM9_A 2658 ++guruplug MACH_GURUPLUG GURUPLUG 2659 ++spear310 MACH_SPEAR310 SPEAR310 2660 ++spear320 MACH_SPEAR320 SPEAR320 2661 ++robotx MACH_ROBOTX ROBOTX 2662 ++lsxhl MACH_LSXHL LSXHL 2663 ++smartlite MACH_SMARTLITE SMARTLITE 2664 ++cws2 MACH_CWS2 CWS2 2665 ++m619 MACH_M619 M619 2666 ++smartview MACH_SMARTVIEW SMARTVIEW 2667 ++lsa_salsa MACH_LSA_SALSA LSA_SALSA 2668 ++kizbox MACH_KIZBOX KIZBOX 2669 ++htccharmer MACH_HTCCHARMER HTCCHARMER 2670 ++guf_neso_lt MACH_GUF_NESO_LT GUF_NESO_LT 2671 ++pm9g45 MACH_PM9G45 PM9G45 2672 ++htcpanther MACH_HTCPANTHER HTCPANTHER 2673 ++htcpanther_cdma MACH_HTCPANTHER_CDMA HTCPANTHER_CDMA 2674 ++reb01 MACH_REB01 REB01 2675 ++aquila MACH_AQUILA AQUILA 2676 ++spark_sls_hw2 MACH_SPARK_SLS_HW2 SPARK_SLS_HW2 2677 ++sheeva_esata MACH_ESATA_SHEEVAPLUG ESATA_SHEEVAPLUG 2678 ++msm7x30_surf MACH_MSM7X30_SURF MSM7X30_SURF 2679 ++micro2440 MACH_MICRO2440 MICRO2440 2680 ++am2440 MACH_AM2440 AM2440 2681 ++tq2440 MACH_TQ2440 TQ2440 2682 ++lpc2478oem MACH_LPC2478OEM LPC2478OEM 2683 ++ak880x MACH_AK880X AK880X 2684 ++cobra3530 MACH_COBRA3530 COBRA3530 2685 ++pmppb MACH_PMPPB PMPPB 2686 ++u6715 MACH_U6715 U6715 2687 ++axar1500_sender MACH_AXAR1500_SENDER AXAR1500_SENDER 2688 ++g30_dvb MACH_G30_DVB G30_DVB 2689 ++vc088x MACH_VC088X VC088X 2690 ++mioa702 MACH_MIOA702 MIOA702 2691 ++hpmin MACH_HPMIN HPMIN 2692 ++ak880xak MACH_AK880XAK AK880XAK 2693 ++arm926tomap850 MACH_ARM926TOMAP850 ARM926TOMAP850 2694 ++lkevm MACH_LKEVM LKEVM 2695 ++mw6410 MACH_MW6410 MW6410 2696 ++terastation_wxl MACH_TERASTATION_WXL TERASTATION_WXL 2697 ++cpu8000e MACH_CPU8000E CPU8000E 2698 ++catania MACH_CATANIA CATANIA 2699 ++tokyo MACH_TOKYO TOKYO 2700 ++msm7201a_surf MACH_MSM7201A_SURF MSM7201A_SURF 2701 ++msm7201a_ffa MACH_MSM7201A_FFA MSM7201A_FFA 2702 ++msm7x25_surf MACH_MSM7X25_SURF MSM7X25_SURF 2703 ++msm7x25_ffa MACH_MSM7X25_FFA MSM7X25_FFA 2704 ++msm7x27_surf MACH_MSM7X27_SURF MSM7X27_SURF 2705 ++msm7x27_ffa MACH_MSM7X27_FFA MSM7X27_FFA 2706 ++msm7x30_ffa MACH_MSM7X30_FFA MSM7X30_FFA 2707 ++qsd8x50_surf MACH_QSD8X50_SURF QSD8X50_SURF 2708 ++qsd8x50_comet MACH_QSD8X50_COMET QSD8X50_COMET 2709 ++qsd8x50_ffa MACH_QSD8X50_FFA QSD8X50_FFA 2710 ++qsd8x50a_surf MACH_QSD8X50A_SURF QSD8X50A_SURF 2711 ++qsd8x50a_ffa MACH_QSD8X50A_FFA QSD8X50A_FFA 2712 ++adx_xgcp10 MACH_ADX_XGCP10 ADX_XGCP10 2713 ++mcgwumts2a MACH_MCGWUMTS2A MCGWUMTS2A 2714 ++mobikt MACH_MOBIKT MOBIKT 2715 ++mx53_evk MACH_MX53_EVK MX53_EVK 2716 ++igep0030 MACH_IGEP0030 IGEP0030 2717 ++axell_h40_h50_ctrl MACH_AXELL_H40_H50_CTRL AXELL_H40_H50_CTRL 2718 ++dtcommod MACH_DTCOMMOD DTCOMMOD 2719 ++gould MACH_GOULD GOULD 2720 ++siberia MACH_SIBERIA SIBERIA 2721 ++sbc3530 MACH_SBC3530 SBC3530 2722 ++qarm MACH_QARM QARM 2723 ++mips MACH_MIPS MIPS 2724 ++mx27grb MACH_MX27GRB MX27GRB 2725 ++sbc8100 MACH_SBC8100 SBC8100 2726 ++saarb MACH_SAARB SAARB 2727 ++omap3mini MACH_OMAP3MINI OMAP3MINI 2728 ++cnmbook7se MACH_CNMBOOK7SE CNMBOOK7SE 2729 ++catan MACH_CATAN CATAN 2730 ++harmony MACH_HARMONY HARMONY 2731 ++tonga MACH_TONGA TONGA 2732 ++cybook_orizon MACH_CYBOOK_ORIZON CYBOOK_ORIZON 2733 ++htcrhodiumcdma MACH_HTCRHODIUMCDMA HTCRHODIUMCDMA 2734 ++epc_g45 MACH_EPC_G45 EPC_G45 2735 ++epc_lpc3250 MACH_EPC_LPC3250 EPC_LPC3250 2736 ++mxc91341evb MACH_MXC91341EVB MXC91341EVB 2737 ++rtw1000 MACH_RTW1000 RTW1000 2738 ++bobcat MACH_BOBCAT BOBCAT 2739 ++trizeps6 MACH_TRIZEPS6 TRIZEPS6 2740 ++msm7x30_fluid MACH_MSM7X30_FLUID MSM7X30_FLUID 2741 ++nedap9263 MACH_NEDAP9263 NEDAP9263 2742 ++netgear_ms2110 MACH_NETGEAR_MS2110 NETGEAR_MS2110 2743 ++bmx MACH_BMX BMX 2744 ++netstream MACH_NETSTREAM NETSTREAM 2745 ++vpnext_rcu MACH_VPNEXT_RCU VPNEXT_RCU 2746 ++vpnext_mpu MACH_VPNEXT_MPU VPNEXT_MPU 2747 ++bcmring_tablet_v1 MACH_BCMRING_TABLET_V1 BCMRING_TABLET_V1 2748 ++sgarm10 MACH_SGARM10 SGARM10 2749 ++cm_t3517 MACH_CM_T3517 CM_T3517 2750 ++omap3_cps MACH_OMAP3_CPS OMAP3_CPS 2751 ++axar1500_receiver MACH_AXAR1500_RECEIVER AXAR1500_RECEIVER 2752 ++wbd222 MACH_WBD222 WBD222 2753 ++mt65xx MACH_MT65XX MT65XX 2754 ++msm8x60_surf MACH_MSM8X60_SURF MSM8X60_SURF 2755 ++msm8x60_sim MACH_MSM8X60_SIM MSM8X60_SIM 2756 ++vmc300 MACH_VMC300 VMC300 2757 ++tcc8000_sdk MACH_TCC8000_SDK TCC8000_SDK 2758 ++nanos MACH_NANOS NANOS 2759 ++stamp9g10 MACH_STAMP9G10 STAMP9G10 2760 ++stamp9g45 MACH_STAMP9G45 STAMP9G45 2761 ++h6053 MACH_H6053 H6053 2762 ++smint01 MACH_SMINT01 SMINT01 2763 ++prtlvt2 MACH_PRTLVT2 PRTLVT2 2764 ++ap420 MACH_AP420 AP420 2765 ++htcclio MACH_HTCSHIFT HTCSHIFT 2766 ++davinci_dm365_fc MACH_DAVINCI_DM365_FC DAVINCI_DM365_FC 2767 ++msm8x55_surf MACH_MSM8X55_SURF MSM8X55_SURF 2768 ++msm8x55_ffa MACH_MSM8X55_FFA MSM8X55_FFA 2769 ++esl_vamana MACH_ESL_VAMANA ESL_VAMANA 2770 ++sbc35 MACH_SBC35 SBC35 2771 ++mpx6446 MACH_MPX6446 MPX6446 2772 ++oreo_controller MACH_OREO_CONTROLLER OREO_CONTROLLER 2773 ++kopin_models MACH_KOPIN_MODELS KOPIN_MODELS 2774 ++ttc_vision2 MACH_TTC_VISION2 TTC_VISION2 2775 ++cns3420vb MACH_CNS3420VB CNS3420VB 2776 ++lpc_evo MACH_LPC2 LPC2 2777 ++olympus MACH_OLYMPUS OLYMPUS 2778 ++vortex MACH_VORTEX VORTEX 2779 ++s5pc200 MACH_S5PC200 S5PC200 2780 ++ecucore_9263 MACH_ECUCORE_9263 ECUCORE_9263 2781 ++smdkc200 MACH_SMDKC200 SMDKC200 2782 ++emsiso_sx27 MACH_EMSISO_SX27 EMSISO_SX27 2783 ++apx_som9g45_ek MACH_APX_SOM9G45_EK APX_SOM9G45_EK 2784 ++songshan MACH_SONGSHAN SONGSHAN 2785 ++tianshan MACH_TIANSHAN TIANSHAN 2786 ++vpx500 MACH_VPX500 VPX500 2787 ++am3517sam MACH_AM3517SAM AM3517SAM 2788 ++skat91_sim508 MACH_SKAT91_SIM508 SKAT91_SIM508 2789 ++skat91_s3e MACH_SKAT91_S3E SKAT91_S3E 2790 ++omap4_panda MACH_OMAP4_PANDA OMAP4_PANDA 2791 ++df7220 MACH_DF7220 DF7220 2792 ++nemini MACH_NEMINI NEMINI 2793 ++t8200 MACH_T8200 T8200 2794 ++apf51 MACH_APF51 APF51 2795 ++dr_rc_unit MACH_DR_RC_UNIT DR_RC_UNIT 2796 ++bordeaux MACH_BORDEAUX BORDEAUX 2797 ++catania_b MACH_CATANIA_B CATANIA_B 2798 ++mx51_ocean MACH_MX51_OCEAN MX51_OCEAN 2799 ++ti8168evm MACH_TI8168EVM TI8168EVM 2800 ++neocoreomap MACH_NEOCOREOMAP NEOCOREOMAP 2801 ++withings_wbp MACH_WITHINGS_WBP WITHINGS_WBP 2802 ++dbps MACH_DBPS DBPS 2803 ++at91sam9261 MACH_SBC9261 SBC9261 2804 ++pcbfp0001 MACH_PCBFP0001 PCBFP0001 2805 ++speedy MACH_SPEEDY SPEEDY 2806 ++chrysaor MACH_CHRYSAOR CHRYSAOR 2807 ++tango MACH_TANGO TANGO 2808 ++synology_dsx11 MACH_SYNOLOGY_DSX11 SYNOLOGY_DSX11 2809 ++hanlin_v3ext MACH_HANLIN_V3EXT HANLIN_V3EXT 2810 ++hanlin_v5 MACH_HANLIN_V5 HANLIN_V5 2811 ++hanlin_v3plus MACH_HANLIN_V3PLUS HANLIN_V3PLUS 2812 ++iriver_story MACH_IRIVER_STORY IRIVER_STORY 2813 ++irex_iliad MACH_IREX_ILIAD IREX_ILIAD 2814 ++irex_dr1000 MACH_IREX_DR1000 IREX_DR1000 2815 ++teton_bga MACH_TETON_BGA TETON_BGA 2816 ++snapper9g45 MACH_SNAPPER9G45 SNAPPER9G45 2817 ++tam3517 MACH_TAM3517 TAM3517 2818 ++pdc100 MACH_PDC100 PDC100 2819 ++eukrea_cpuimx25sd MACH_EUKREA_CPUIMX25 EUKREA_CPUIMX25 2820 ++eukrea_cpuimx35sd MACH_EUKREA_CPUIMX35 EUKREA_CPUIMX35 2821 ++eukrea_cpuimx51sd MACH_EUKREA_CPUIMX51SD EUKREA_CPUIMX51SD 2822 ++eukrea_cpuimx51 MACH_EUKREA_CPUIMX51 EUKREA_CPUIMX51 2823 ++p565 MACH_P565 P565 2824 ++acer_a4 MACH_ACER_A4 ACER_A4 2825 ++davinci_dm368_bip MACH_DAVINCI_DM368_BIP DAVINCI_DM368_BIP 2826 ++eshare MACH_ESHARE ESHARE 2827 ++hw_omapl138_europa MACH_HW_OMAPL138_EUROPA HW_OMAPL138_EUROPA 2828 ++wlbargn MACH_WLBARGN WLBARGN 2829 ++bm170 MACH_BM170 BM170 2830 ++netspace_mini_v2 MACH_NETSPACE_MINI_V2 NETSPACE_MINI_V2 2831 ++netspace_plug_v2 MACH_NETSPACE_PLUG_V2 NETSPACE_PLUG_V2 2832 ++siemens_l1 MACH_SIEMENS_L1 SIEMENS_L1 2833 ++elv_lcu1 MACH_ELV_LCU1 ELV_LCU1 2834 ++mcu1 MACH_MCU1 MCU1 2835 ++omap3_tao3530 MACH_OMAP3_TAO3530 OMAP3_TAO3530 2836 ++omap3_pcutouch MACH_OMAP3_PCUTOUCH OMAP3_PCUTOUCH 2837 ++smdkc210 MACH_SMDKC210 SMDKC210 2838 ++omap3_braillo MACH_OMAP3_BRAILLO OMAP3_BRAILLO 2839 ++spyplug MACH_SPYPLUG SPYPLUG 2840 ++ginger MACH_GINGER GINGER 2841 ++tny_t3530 MACH_TNY_T3530 TNY_T3530 2842 ++pca102 MACH_PCA102 PCA102 2843 ++spade MACH_SPADE SPADE 2844 ++mxc25_topaz MACH_MXC25_TOPAZ MXC25_TOPAZ 2845 ++t5325 MACH_T5325 T5325 2846 ++gw2361 MACH_GW2361 GW2361 2847 ++elog MACH_ELOG ELOG 2848 ++income MACH_INCOME INCOME 2849 ++bcm589x MACH_BCM589X BCM589X 2850 ++etna MACH_ETNA ETNA 2851 ++hawks MACH_HAWKS HAWKS 2852 ++meson MACH_MESON MESON 2853 ++xsbase255 MACH_XSBASE255 XSBASE255 2854 ++pvm2030 MACH_PVM2030 PVM2030 2855 ++mioa502 MACH_MIOA502 MIOA502 2856 ++vvbox_sdorig2 MACH_VVBOX_SDORIG2 VVBOX_SDORIG2 2857 ++vvbox_sdlite2 MACH_VVBOX_SDLITE2 VVBOX_SDLITE2 2858 ++vvbox_sdpro4 MACH_VVBOX_SDPRO4 VVBOX_SDPRO4 2859 ++htc_spv_m700 MACH_HTC_SPV_M700 HTC_SPV_M700 2860 ++mx257sx MACH_MX257SX MX257SX 2861 ++goni MACH_GONI GONI 2862 ++msm8x55_svlte_ffa MACH_MSM8X55_SVLTE_FFA MSM8X55_SVLTE_FFA 2863 ++msm8x55_svlte_surf MACH_MSM8X55_SVLTE_SURF MSM8X55_SVLTE_SURF 2864 ++quickstep MACH_QUICKSTEP QUICKSTEP 2865 ++dmw96 MACH_DMW96 DMW96 2866 ++hammerhead MACH_HAMMERHEAD HAMMERHEAD 2867 ++trident MACH_TRIDENT TRIDENT 2868 ++lightning MACH_LIGHTNING LIGHTNING 2869 ++iconnect MACH_ICONNECT ICONNECT 2870 ++autobot MACH_AUTOBOT AUTOBOT 2871 ++coconut MACH_COCONUT COCONUT 2872 ++durian MACH_DURIAN DURIAN 2873 ++cayenne MACH_CAYENNE CAYENNE 2874 ++fuji MACH_FUJI FUJI 2875 ++synology_6282 MACH_SYNOLOGY_6282 SYNOLOGY_6282 2876 ++em1sy MACH_EM1SY EM1SY 2877 ++m502 MACH_M502 M502 2878 ++matrix518 MACH_MATRIX518 MATRIX518 2879 ++tiny_gurnard MACH_TINY_GURNARD TINY_GURNARD 2880 ++spear1310 MACH_SPEAR1310 SPEAR1310 2881 ++bv07 MACH_BV07 BV07 2882 ++mxt_td61 MACH_MXT_TD61 MXT_TD61 2883 ++openrd_ultimate MACH_OPENRD_ULTIMATE OPENRD_ULTIMATE 2884 ++devixp MACH_DEVIXP DEVIXP 2885 ++miccpt MACH_MICCPT MICCPT 2886 ++mic256 MACH_MIC256 MIC256 2887 ++as1167 MACH_AS1167 AS1167 2888 ++omap3_ibiza MACH_OMAP3_IBIZA OMAP3_IBIZA 2889 ++u5500 MACH_U5500 U5500 2890 ++davinci_picto MACH_DAVINCI_PICTO DAVINCI_PICTO 2891 ++mecha MACH_MECHA MECHA 2892 ++bubba3 MACH_BUBBA3 BUBBA3 2893 ++pupitre MACH_PUPITRE PUPITRE 2894 ++tegra_harmony MACH_TEGRA_HARMONY TEGRA_HARMONY 2895 ++tegra_vogue MACH_TEGRA_VOGUE TEGRA_VOGUE 2896 ++tegra_e1165 MACH_TEGRA_E1165 TEGRA_E1165 2897 ++simplenet MACH_SIMPLENET SIMPLENET 2898 ++ec4350tbm MACH_EC4350TBM EC4350TBM 2899 ++pec_tc MACH_PEC_TC PEC_TC 2900 ++pec_hc2 MACH_PEC_HC2 PEC_HC2 2901 ++esl_mobilis_a MACH_ESL_MOBILIS_A ESL_MOBILIS_A 2902 ++esl_mobilis_b MACH_ESL_MOBILIS_B ESL_MOBILIS_B 2903 ++esl_wave_a MACH_ESL_WAVE_A ESL_WAVE_A 2904 ++esl_wave_b MACH_ESL_WAVE_B ESL_WAVE_B 2905 ++unisense_mmm MACH_UNISENSE_MMM UNISENSE_MMM 2906 ++blueshark MACH_BLUESHARK BLUESHARK 2907 ++e10 MACH_E10 E10 2908 +--- a/arch/arm/include/asm/thread_info.h ++++ b/arch/arm/include/asm/thread_info.h +@@ -115,7 +115,8 @@ extern void iwmmxt_task_restore(struct t + extern void iwmmxt_task_release(struct thread_info *); + extern void iwmmxt_task_switch(struct thread_info *); + +-extern void vfp_sync_state(struct thread_info *thread); ++extern void vfp_sync_hwstate(struct thread_info *); ++extern void vfp_flush_hwstate(struct thread_info *); + + #endif + +--- a/arch/arm/kernel/ptrace.c ++++ b/arch/arm/kernel/ptrace.c +@@ -663,7 +663,7 @@ static int ptrace_getvfpregs(struct task + union vfp_state *vfp = &thread->vfpstate; + struct user_vfp __user *ufp = data; + +- vfp_sync_state(thread); ++ vfp_sync_hwstate(thread); + + /* copy the floating point registers */ + if (copy_to_user(&ufp->fpregs, &vfp->hard.fpregs, +@@ -686,7 +686,7 @@ static int ptrace_setvfpregs(struct task + union vfp_state *vfp = &thread->vfpstate; + struct user_vfp __user *ufp = data; + +- vfp_sync_state(thread); ++ vfp_sync_hwstate(thread); + + /* copy the floating point registers */ + if (copy_from_user(&vfp->hard.fpregs, &ufp->fpregs, +@@ -697,6 +697,8 @@ static int ptrace_setvfpregs(struct task + if (get_user(vfp->hard.fpscr, &ufp->fpscr)) + return -EFAULT; + ++ vfp_flush_hwstate(thread); ++ + return 0; + } + #endif +--- a/arch/arm/vfp/entry.S ++++ b/arch/arm/vfp/entry.S +@@ -42,6 +42,7 @@ ENTRY(vfp_null_entry) + mov pc, lr + ENDPROC(vfp_null_entry) + ++ .align 2 + .LCvfp: + .word vfp_vector + +@@ -61,6 +62,7 @@ ENTRY(vfp_testing_entry) + mov pc, r9 @ we have handled the fault + ENDPROC(vfp_testing_entry) + ++ .align 2 + VFP_arch_address: + .word VFP_arch + +--- a/arch/arm/vfp/vfphw.S ++++ b/arch/arm/vfp/vfphw.S +@@ -209,40 +209,55 @@ ENDPROC(vfp_save_state) + last_VFP_context_address: + .word last_VFP_context + +-ENTRY(vfp_get_float) +- add pc, pc, r0, lsl #3 ++ .macro tbl_branch, base, tmp, shift ++#ifdef CONFIG_THUMB2_KERNEL ++ adr \tmp, 1f ++ add \tmp, \tmp, \base, lsl \shift ++ mov pc, \tmp ++#else ++ add pc, pc, \base, lsl \shift + mov r0, r0 ++#endif ++1: ++ .endm ++ ++ENTRY(vfp_get_float) ++ tbl_branch r0, r3, #3 + .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 +- mrc p10, 0, r0, c\dr, c0, 0 @ fmrs r0, s0 ++1: mrc p10, 0, r0, c\dr, c0, 0 @ fmrs r0, s0 + mov pc, lr +- mrc p10, 0, r0, c\dr, c0, 4 @ fmrs r0, s1 ++ .org 1b + 8 ++1: mrc p10, 0, r0, c\dr, c0, 4 @ fmrs r0, s1 + mov pc, lr ++ .org 1b + 8 + .endr + ENDPROC(vfp_get_float) + + ENTRY(vfp_put_float) +- add pc, pc, r1, lsl #3 +- mov r0, r0 ++ tbl_branch r1, r3, #3 + .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 +- mcr p10, 0, r0, c\dr, c0, 0 @ fmsr r0, s0 ++1: mcr p10, 0, r0, c\dr, c0, 0 @ fmsr r0, s0 + mov pc, lr +- mcr p10, 0, r0, c\dr, c0, 4 @ fmsr r0, s1 ++ .org 1b + 8 ++1: mcr p10, 0, r0, c\dr, c0, 4 @ fmsr r0, s1 + mov pc, lr ++ .org 1b + 8 + .endr + ENDPROC(vfp_put_float) + + ENTRY(vfp_get_double) +- add pc, pc, r0, lsl #3 +- mov r0, r0 ++ tbl_branch r0, r3, #3 + .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 +- fmrrd r0, r1, d\dr ++1: fmrrd r0, r1, d\dr + mov pc, lr ++ .org 1b + 8 + .endr + #ifdef CONFIG_VFPv3 + @ d16 - d31 registers + .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 +- mrrc p11, 3, r0, r1, c\dr @ fmrrd r0, r1, d\dr ++1: mrrc p11, 3, r0, r1, c\dr @ fmrrd r0, r1, d\dr + mov pc, lr ++ .org 1b + 8 + .endr + #endif + +@@ -253,17 +268,18 @@ ENTRY(vfp_get_double) + ENDPROC(vfp_get_double) + + ENTRY(vfp_put_double) +- add pc, pc, r2, lsl #3 +- mov r0, r0 ++ tbl_branch r2, r3, #3 + .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 +- fmdrr d\dr, r0, r1 ++1: fmdrr d\dr, r0, r1 + mov pc, lr ++ .org 1b + 8 + .endr + #ifdef CONFIG_VFPv3 + @ d16 - d31 registers + .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 +- mcrr p11, 3, r1, r2, c\dr @ fmdrr r1, r2, d\dr ++1: mcrr p11, 3, r0, r1, c\dr @ fmdrr r0, r1, d\dr + mov pc, lr ++ .org 1b + 8 + .endr + #endif + ENDPROC(vfp_put_double) +--- a/arch/arm/vfp/vfpmodule.c ++++ b/arch/arm/vfp/vfpmodule.c +@@ -38,16 +38,75 @@ union vfp_state *last_VFP_context[NR_CPU + */ + unsigned int VFP_arch; + ++/* ++ * Per-thread VFP initialization. ++ */ ++static void vfp_thread_flush(struct thread_info *thread) ++{ ++ union vfp_state *vfp = &thread->vfpstate; ++ unsigned int cpu; ++ ++ memset(vfp, 0, sizeof(union vfp_state)); ++ ++ vfp->hard.fpexc = FPEXC_EN; ++ vfp->hard.fpscr = FPSCR_ROUND_NEAREST; ++ ++ /* ++ * Disable VFP to ensure we initialize it first. We must ensure ++ * that the modification of last_VFP_context[] and hardware disable ++ * are done for the same CPU and without preemption. ++ */ ++ cpu = get_cpu(); ++ if (last_VFP_context[cpu] == vfp) ++ last_VFP_context[cpu] = NULL; ++ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); ++ put_cpu(); ++} ++ ++static void vfp_thread_exit(struct thread_info *thread) ++{ ++ /* release case: Per-thread VFP cleanup. */ ++ union vfp_state *vfp = &thread->vfpstate; ++ unsigned int cpu = get_cpu(); ++ ++ if (last_VFP_context[cpu] == vfp) ++ last_VFP_context[cpu] = NULL; ++ put_cpu(); ++} ++ ++/* ++ * When this function is called with the following 'cmd's, the following ++ * is true while this function is being run: ++ * THREAD_NOFTIFY_SWTICH: ++ * - the previously running thread will not be scheduled onto another CPU. ++ * - the next thread to be run (v) will not be running on another CPU. ++ * - thread->cpu is the local CPU number ++ * - not preemptible as we're called in the middle of a thread switch ++ * THREAD_NOTIFY_FLUSH: ++ * - the thread (v) will be running on the local CPU, so ++ * v === current_thread_info() ++ * - thread->cpu is the local CPU number at the time it is accessed, ++ * but may change at any time. ++ * - we could be preempted if tree preempt rcu is enabled, so ++ * it is unsafe to use thread->cpu. ++ * THREAD_NOTIFY_EXIT ++ * - the thread (v) will be running on the local CPU, so ++ * v === current_thread_info() ++ * - thread->cpu is the local CPU number at the time it is accessed, ++ * but may change at any time. ++ * - we could be preempted if tree preempt rcu is enabled, so ++ * it is unsafe to use thread->cpu. ++ */ + static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) + { + struct thread_info *thread = v; +- union vfp_state *vfp; +- __u32 cpu = thread->cpu; + + if (likely(cmd == THREAD_NOTIFY_SWITCH)) { + u32 fpexc = fmrx(FPEXC); + + #ifdef CONFIG_SMP ++ unsigned int cpu = thread->cpu; ++ + /* + * On SMP, if VFP is enabled, save the old state in + * case the thread migrates to a different CPU. The +@@ -74,25 +133,10 @@ static int vfp_notifier(struct notifier_ + return NOTIFY_DONE; + } + +- vfp = &thread->vfpstate; +- if (cmd == THREAD_NOTIFY_FLUSH) { +- /* +- * Per-thread VFP initialisation. +- */ +- memset(vfp, 0, sizeof(union vfp_state)); +- +- vfp->hard.fpexc = FPEXC_EN; +- vfp->hard.fpscr = FPSCR_ROUND_NEAREST; +- +- /* +- * Disable VFP to ensure we initialise it first. +- */ +- fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); +- } +- +- /* flush and release case: Per-thread VFP cleanup. */ +- if (last_VFP_context[cpu] == vfp) +- last_VFP_context[cpu] = NULL; ++ if (cmd == THREAD_NOTIFY_FLUSH) ++ vfp_thread_flush(thread); ++ else ++ vfp_thread_exit(thread); + + return NOTIFY_DONE; + } +@@ -153,10 +197,13 @@ static void vfp_raise_exceptions(u32 exc + } + + /* +- * Update the FPSCR with the additional exception flags. ++ * If any of the status flags are set, update the FPSCR. + * Comparison instructions always return at least one of + * these flags set. + */ ++ if (exceptions & (FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V)) ++ fpscr &= ~(FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V); ++ + fpscr |= exceptions; + + fmxr(FPSCR, fpscr); +@@ -381,54 +428,60 @@ static void vfp_pm_init(void) + static inline void vfp_pm_init(void) { } + #endif /* CONFIG_PM */ + +-/* +- * Synchronise the hardware VFP state of a thread other than current with the +- * saved one. This function is used by the ptrace mechanism. +- */ +-#ifdef CONFIG_SMP +-void vfp_sync_state(struct thread_info *thread) ++void vfp_sync_hwstate(struct thread_info *thread) + { ++ unsigned int cpu = get_cpu(); ++ + /* +- * On SMP systems, the VFP state is automatically saved at every +- * context switch. We mark the thread VFP state as belonging to a +- * non-existent CPU so that the saved one will be reloaded when +- * needed. ++ * If the thread we're interested in is the current owner of the ++ * hardware VFP state, then we need to save its state. + */ +- thread->vfpstate.hard.cpu = NR_CPUS; ++ if (last_VFP_context[cpu] == &thread->vfpstate) { ++ u32 fpexc = fmrx(FPEXC); ++ ++ /* ++ * Save the last VFP state on this CPU. ++ */ ++ fmxr(FPEXC, fpexc | FPEXC_EN); ++ vfp_save_state(&thread->vfpstate, fpexc | FPEXC_EN); ++ fmxr(FPEXC, fpexc); ++ } ++ ++ put_cpu(); + } +-#else +-void vfp_sync_state(struct thread_info *thread) ++ ++void vfp_flush_hwstate(struct thread_info *thread) + { + unsigned int cpu = get_cpu(); +- u32 fpexc = fmrx(FPEXC); + + /* +- * If VFP is enabled, the previous state was already saved and +- * last_VFP_context updated. ++ * If the thread we're interested in is the current owner of the ++ * hardware VFP state, then we need to save its state. + */ +- if (fpexc & FPEXC_EN) +- goto out; ++ if (last_VFP_context[cpu] == &thread->vfpstate) { ++ u32 fpexc = fmrx(FPEXC); + +- if (!last_VFP_context[cpu]) +- goto out; ++ fmxr(FPEXC, fpexc & ~FPEXC_EN); + +- /* +- * Save the last VFP state on this CPU. +- */ +- fmxr(FPEXC, fpexc | FPEXC_EN); +- vfp_save_state(last_VFP_context[cpu], fpexc); +- fmxr(FPEXC, fpexc); ++ /* ++ * Set the context to NULL to force a reload the next time ++ * the thread uses the VFP. ++ */ ++ last_VFP_context[cpu] = NULL; ++ } + ++#ifdef CONFIG_SMP + /* +- * Set the context to NULL to force a reload the next time the thread +- * uses the VFP. ++ * For SMP we still have to take care of the case where the thread ++ * migrates to another CPU and then back to the original CPU on which ++ * the last VFP user is still the same thread. Mark the thread VFP ++ * state as belonging to a non-existent CPU so that the saved one will ++ * be reloaded in the above case. + */ +- last_VFP_context[cpu] = NULL; +- +-out: ++ thread->vfpstate.hard.cpu = NR_CPUS; ++#endif + put_cpu(); + } +-#endif + + #include + +@@ -481,7 +534,7 @@ static int __init vfp_init(void) + */ + elf_hwcap |= HWCAP_VFP; + #ifdef CONFIG_VFPv3 +- if (VFP_arch >= 3) { ++ if (VFP_arch >= 2) { + elf_hwcap |= HWCAP_VFPv3; + + /* +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/pcie.c +@@ -0,0 +1,360 @@ ++/******************************************************************************* ++ * ++ * Copyright (c) 2008 Cavium Networks ++ * ++ * This file is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, Version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or ++ * NONINFRINGEMENT. See the GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this file; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA or ++ * visit http://www.gnu.org/licenses/. ++ * ++ * This file may also be available under a different license from Cavium. ++ * Contact Cavium Networks for more information ++ * ++ ******************************************************************************/ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++DEFINE_SPINLOCK(pci_config_lock); ++ ++static int pcie_linked[2] = {0, 0}; // if 1, mean link ok. ++ ++u32 cns3xxx_pcie0_irqs[2] = { IRQ_CNS3XXX_PCIE0_RC, IRQ_CNS3XXX_PCIE0_DEVICE, }; ++u32 cns3xxx_pcie1_irqs[2] = { IRQ_CNS3XXX_PCIE1_RC, IRQ_CNS3XXX_PCIE1_DEVICE, }; ++ ++static u32 access_base[2][3] = { ++ { CNS3XXX_PCIE0_HOST_BASE_VIRT, CNS3XXX_PCIE0_CFG0_BASE_VIRT, CNS3XXX_PCIE0_CFG1_BASE_VIRT}, ++ { CNS3XXX_PCIE1_HOST_BASE_VIRT, CNS3XXX_PCIE1_CFG0_BASE_VIRT, CNS3XXX_PCIE1_CFG1_BASE_VIRT}, ++}; ++ ++static int cns3xxx_pci_cfg_base(struct pci_bus *bus, ++ unsigned int devfn, int where) ++{ ++ int domain = pci_domain_nr(bus); ++ int slot = PCI_SLOT(devfn); ++ u32 base; ++ ++ if ((!pcie_linked[domain]) && (bus->number || slot)) ++ return 0; ++ ++ if (!(bus->number)) { ++ if (slot > 1) ++ return 0; ++ // CFG0 Type ++ base = access_base[domain][slot]; ++ } else { ++ // CFG1 Type ++ base = access_base[domain][2]; ++ } ++ base += (((bus->number & 0xf) << 20)| (devfn << 12) | (where & 0xfc)); ++ return base; ++} ++ ++static int cns3xxx_pci_read_config(struct pci_bus *bus, ++ unsigned int devfn, int where, int size, ++ u32 * val) ++{ ++ u32 v = 0xffffffff; ++ u32 base; ++ u32 mask = (0x1ull << (size * 8)) - 1; ++ int shift = (where % 4) * 8; ++ ++ base = cns3xxx_pci_cfg_base(bus, devfn, where); ++ if (!base) { ++ *val = 0xFFFFFFFF; ++ return PCIBIOS_SUCCESSFUL; ++ } ++ ++ v = __raw_readl(base); ++ if (bus->number == 0 && devfn == 0 && ++ (where & 0xffc) == PCI_CLASS_REVISION) { ++ /* RC's class is 0xb, but Linux PCI driver needs 0x604 for a PCIe bridge. */ ++ /* So we must dedicate the class code to 0x604 here */ ++ v &= 0xff; ++ v |= (0x604 << 16); ++ } ++ ++ *val = (v >> shift) & mask; ++ return PCIBIOS_SUCCESSFUL; ++} ++ ++static int cns3xxx_pci_write_config(struct pci_bus *bus, ++ unsigned int devfn, int where, int size, ++ u32 val) ++{ ++ u32 v; ++ u32 base; ++ u32 mask = (0x1ull << (size * 8)) - 1; ++ int shift = (where % 4) * 8; ++ ++ base = cns3xxx_pci_cfg_base(bus, devfn, where); ++ if (!base) ++ return PCIBIOS_SUCCESSFUL; ++ ++ v = __raw_readl(base); ++ v &= ~(mask << shift); ++ v |= (val & mask) << shift; ++ __raw_writel(v, base); ++ ++ return PCIBIOS_SUCCESSFUL; ++} ++ ++static struct pci_ops cns3xxx_pcie_ops = { ++ .read = cns3xxx_pci_read_config, ++ .write = cns3xxx_pci_write_config, ++}; ++ ++static struct resource cns3xxx_pcie0_io = { ++ .name = "PCIe0 I/O space", ++ .start = PCIE0_IO_SPACE_START, ++ .end = PCIE0_IO_SPACE_END, ++ .flags = IORESOURCE_IO, ++}; ++ ++static struct resource cns3xxx_pcie1_io = { ++ .name = "PCIe1 I/O space", ++ .start = PCIE1_IO_SPACE_START, ++ .end = PCIE1_IO_SPACE_END, ++ .flags = IORESOURCE_IO, ++}; ++ ++static struct resource cns3xxx_pcie0_mem = { ++ .name = "PCIe0 non-prefetchable", ++ .start = PCIE0_MEM_SPACE_START, ++ .end = PCIE0_MEM_SPACE_END, ++ .flags = IORESOURCE_MEM, ++}; ++ ++static struct resource cns3xxx_pcie1_mem = { ++ .name = "PCIe1 non-prefetchable", ++ .start = PCIE1_MEM_SPACE_START, ++ .end = PCIE1_MEM_SPACE_END, ++ .flags = IORESOURCE_MEM, ++}; ++ ++static int __init cns3xxx_pci_setup_resources(int nr, struct resource **resource) ++{ ++ if(nr==0){ ++ BUG_ON(request_resource(&iomem_resource, &cns3xxx_pcie0_io) || ++ request_resource(&iomem_resource, &cns3xxx_pcie0_mem)); ++ resource[0] = &cns3xxx_pcie0_io; ++ resource[1] = &cns3xxx_pcie0_mem; ++ }else{ ++ BUG_ON(request_resource(&iomem_resource, &cns3xxx_pcie1_io) || ++ request_resource(&iomem_resource, &cns3xxx_pcie1_mem)); ++ resource[0] = &cns3xxx_pcie1_io; ++ resource[1] = &cns3xxx_pcie1_mem; ++ } ++ return 0; ++} ++ ++int __init cns3xxx_pci_setup(int nr, struct pci_sys_data *sys) ++{ ++ BUG_ON(cns3xxx_pci_setup_resources(sys->domain,sys->resource)); ++ return 1; ++} ++ ++struct pci_bus *cns3xxx_pci_scan_bus(int nr, struct pci_sys_data *sys) ++{ ++ struct pci_bus *ret; ++ ret = pci_scan_bus(sys->busnr, &cns3xxx_pcie_ops, sys); ++ pci_assign_unassigned_resources(); ++ return ret; ++} ++ ++/* ++ * CNS3XXX PCIe device don't support hotplugin, and we will check the link at start up. ++ * ++ */ ++static void cns3xxx_pcie_check_link(int port) ++{ ++ ++ u32 reg; ++ u32 time; ++ ++ time = jiffies; /* set the start time for the receive */ ++ while (1) { ++ reg = __raw_readl( port == 0 ? CNS3XXX_PCIE0_PM_DEBUG : CNS3XXX_PCIE1_PM_DEBUG); /* check link up */ ++ reg = __raw_readl( port == 0 ? CNS3XXX_PCIE0_PM_DEBUG : CNS3XXX_PCIE1_PM_DEBUG); ++ if (reg & 0x1) { ++ pcie_linked[port]++; ++ break; ++ } else if (time_after(jiffies, (unsigned long)(time + 50))) { ++ break; ++ } ++ } ++ ++} ++ ++static void cns3xxx_pcie_hw_init(int port){ ++ struct pci_bus bus; ++ struct pci_sys_data sd; ++ u32 devfn = 0; ++ u8 pri_bus, sec_bus, sub_bus; ++ u8 cp, u8tmp; ++ u16 u16tmp,pos,dc; ++ u32 mem_base, host_base, io_base, cfg0_base; ++ ++ bus.number = 0; ++ bus.ops = &cns3xxx_pcie_ops; ++ sd.domain = port; ++ bus.sysdata = &sd; ++ ++ mem_base = ( port == 0 ? CNS3XXX_PCIE0_MEM_BASE : CNS3XXX_PCIE1_MEM_BASE ); ++ mem_base = mem_base >> 16; ++ ++ io_base = ( port == 0 ? CNS3XXX_PCIE0_IO_BASE : CNS3XXX_PCIE1_IO_BASE ); ++ io_base = io_base >> 16; ++ ++ host_base = ( port == 0 ? CNS3XXX_PCIE0_HOST_BASE_VIRT : CNS3XXX_PCIE1_HOST_BASE_VIRT ); ++ host_base = ( host_base -1 ) >> 16; ++ ++ cfg0_base = ( port == 0 ? CNS3XXX_PCIE0_CFG0_BASE_VIRT : CNS3XXX_PCIE1_CFG0_BASE_VIRT ); ++ cfg0_base = ( cfg0_base -1 ) >> 16; ++ ++ pci_bus_write_config_byte(&bus, devfn, PCI_PRIMARY_BUS, 0); ++ pci_bus_write_config_byte(&bus, devfn, PCI_SECONDARY_BUS, 1); ++ pci_bus_write_config_byte(&bus, devfn, PCI_SUBORDINATE_BUS, 1); ++ ++ pci_bus_read_config_byte(&bus, devfn, PCI_PRIMARY_BUS, &pri_bus); ++ pci_bus_read_config_byte(&bus, devfn, PCI_SECONDARY_BUS, &sec_bus); ++ pci_bus_read_config_byte(&bus, devfn, PCI_SUBORDINATE_BUS, &sub_bus); ++ ++ pci_bus_write_config_word(&bus, devfn, PCI_MEMORY_BASE, mem_base); ++ pci_bus_write_config_word(&bus, devfn, PCI_MEMORY_LIMIT, host_base); ++ pci_bus_write_config_word(&bus, devfn, PCI_IO_BASE_UPPER16, io_base); ++ pci_bus_write_config_word(&bus, devfn, PCI_IO_LIMIT_UPPER16, cfg0_base); ++ ++ pci_bus_read_config_byte(&bus, devfn, PCI_CAPABILITY_LIST, &cp); ++ while (cp != 0) { ++ pci_bus_read_config_byte(&bus, devfn, cp, &u8tmp); ++ // Read Next ID ++ pci_bus_read_config_word(&bus, devfn, cp, &u16tmp); ++ cp = (u16tmp & 0xFF00) >> 8; ++ } ++ ++ /* Modify device's Max_Read_Request size */ ++ devfn = PCI_DEVFN(1,0); ++ if (!pcie_linked[port]) ++ return; ++ ++ pci_bus_read_config_byte(&bus, devfn, PCI_CAPABILITY_LIST, &cp); ++ while (cp != 0) { ++ pci_bus_read_config_byte(&bus, devfn, cp, &u8tmp); ++ // Read Next ID ++ pci_bus_read_config_word(&bus, devfn, cp, &u16tmp); ++ cp = (u16tmp & 0xFF00) >> 8; ++ } ++ ++ /* Set Device Max_Read_Request_Size to 128 byte */ ++ pos = pci_bus_find_capability(&bus, devfn, PCI_CAP_ID_EXP); ++ pci_bus_read_config_word(&bus, devfn, pos + PCI_EXP_DEVCTL, &dc); ++ dc &= ~(0x3 << 12); /* Clear Device Control Register [14:12] */ ++ pci_bus_write_config_word(&bus, devfn, pos + PCI_EXP_DEVCTL, dc); ++ pci_bus_read_config_word(&bus, devfn, pos + PCI_EXP_DEVCTL, &dc); ++ ++ if (!port) { ++ /* Disable PCIe0 Interrupt Mask INTA to INTD */ ++ __raw_writel(~0x3FFF, CNS3XXX_MISC_BASE_VIRT + 0x978); ++ } else { ++ /* Disable PCIe1 Interrupt Mask INTA to INTD */ ++ __raw_writel(~0x3FFF, CNS3XXX_MISC_BASE_VIRT + 0xA78); ++ } ++} ++ ++ ++void __init cns3xxx_pcie0_preinit(void) ++{ ++ cns3xxx_pcie_check_link(0); ++ cns3xxx_pcie_hw_init(0); ++} ++ ++void __init cns3xxx_pcie1_preinit(void) ++{ ++ cns3xxx_pcie_check_link(1); ++ cns3xxx_pcie_hw_init(1); ++} ++ ++/* ++ * map the specified device/slot/pin to an IRQ. Different backplanes may need to modify this. ++ */ ++ ++static int __init cns3xxx_pcie0_map_irq(struct pci_dev *dev, u8 slot, u8 pin) ++{ ++ return cns3xxx_pcie0_irqs[slot]; ++} ++ ++static int __init cns3xxx_pcie1_map_irq(struct pci_dev *dev, u8 slot, u8 pin) ++{ ++ return cns3xxx_pcie1_irqs[slot]; ++} ++ ++static struct hw_pci cns3xxx_pcie[2] __initdata = { ++ { ++ .swizzle = pci_std_swizzle, ++ .map_irq = cns3xxx_pcie0_map_irq, ++ .nr_controllers = 1, ++ .nr_domains = 0, ++ .setup = cns3xxx_pci_setup, ++ .scan = cns3xxx_pci_scan_bus, ++ .preinit = cns3xxx_pcie0_preinit, ++ }, ++ { ++ .swizzle = pci_std_swizzle, ++ .map_irq = cns3xxx_pcie1_map_irq, ++ .nr_controllers = 1, ++ .nr_domains = 1, ++ .setup = cns3xxx_pci_setup, ++ .scan = cns3xxx_pci_scan_bus, ++ .preinit = cns3xxx_pcie1_preinit, ++ } ++}; ++ ++static int cns3xxx_pcie_abort_handler(unsigned long addr, unsigned int fsr, ++ struct pt_regs *regs) ++{ ++ if (fsr & (1 << 10)) ++ regs->ARM_pc += 4; ++ return 0; ++} ++ ++//extern void pci_common_init(struct hw_pci *); ++int cns3xxx_pcie_init(u8 ports) ++{ ++ hook_fault_code(16 + 6, cns3xxx_pcie_abort_handler, SIGBUS, "imprecise external abort"); ++ ++ if (ports & 0x1) ++ pci_common_init(&cns3xxx_pcie[0]); ++ if (ports & 0x2) ++ pci_common_init(&cns3xxx_pcie[1]); ++ ++ return 0; ++} ++ ++//device_initcall(cns3xxx_pcie_init); diff --git a/target/linux/cns3xxx/patches-2.6.31/101-laguna_support.patch b/target/linux/cns3xxx/patches-2.6.31/101-laguna_support.patch new file mode 100644 index 0000000000..c14c30d380 --- /dev/null +++ b/target/linux/cns3xxx/patches-2.6.31/101-laguna_support.patch @@ -0,0 +1,341 @@ +--- /dev/null ++++ b/drivers/hwmon/gsp.c +@@ -0,0 +1,310 @@ ++/* ++ * A hwmon driver for the Gateworks System Peripheral ++ * Copyright (C) 2009 Gateworks Corporation ++ * ++ * Author: Chris Lang ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, ++ * as published by the Free Software Foundation - version 2. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++ ++#define DRV_VERSION "0.2" ++ ++enum chips { gsp }; ++ ++/* AD7418 registers */ ++#define GSP_REG_TEMP_IN 0x00 ++#define GSP_REG_VIN 0x02 ++#define GSP_REG_3P3 0x05 ++#define GSP_REG_BAT 0x08 ++#define GSP_REG_5P0 0x0b ++#define GSP_REG_CORE 0x0e ++#define GSP_REG_CPU1 0x11 ++#define GSP_REG_CPU2 0x14 ++#define GSP_REG_DRAM 0x17 ++#define GSP_REG_EXT_BAT 0x1a ++#define GSP_REG_IO1 0x1d ++#define GSP_REG_IO2 0x20 ++#define GSP_REG_PCIE 0x23 ++#define GSP_REG_CURRENT 0x26 ++#define GSP_FAN_0 0x2C ++#define GSP_FAN_1 0x2E ++#define GSP_FAN_2 0x30 ++#define GSP_FAN_3 0x32 ++#define GSP_FAN_4 0x34 ++#define GSP_FAN_5 0x36 ++ ++struct gsp_sensor_info { ++ const char* name; ++ int reg; ++}; ++ ++static const struct gsp_sensor_info gsp_sensors[] = { ++ {"temp", GSP_REG_TEMP_IN}, ++ {"vin", GSP_REG_VIN}, ++ {"3p3", GSP_REG_3P3}, ++ {"bat", GSP_REG_BAT}, ++ {"5p0", GSP_REG_5P0}, ++ {"core", GSP_REG_CORE}, ++ {"cpu1", GSP_REG_CPU1}, ++ {"cpu2", GSP_REG_CPU2}, ++ {"dram", GSP_REG_DRAM}, ++ {"ext_bat", GSP_REG_EXT_BAT}, ++ {"io1", GSP_REG_IO1}, ++ {"io2", GSP_REG_IO2}, ++ {"pci2", GSP_REG_PCIE}, ++ {"current", GSP_REG_CURRENT}, ++ {"fan_point0", GSP_FAN_0}, ++ {"fan_point1", GSP_FAN_1}, ++ {"fan_point2", GSP_FAN_2}, ++ {"fan_point3", GSP_FAN_3}, ++ {"fan_point4", GSP_FAN_4}, ++ {"fan_point5", GSP_FAN_5}, ++}; ++ ++struct gsp_data { ++ struct device *hwmon_dev; ++ struct attribute_group attrs; ++ enum chips type; ++}; ++ ++static int gsp_probe(struct i2c_client *client, ++ const struct i2c_device_id *id); ++static int gsp_remove(struct i2c_client *client); ++ ++static const struct i2c_device_id gsp_id[] = { ++ { "gsp", 0 }, ++ { } ++}; ++MODULE_DEVICE_TABLE(i2c, gsp_id); ++ ++static struct i2c_driver gsp_driver = { ++ .driver = { ++ .name = "gsp", ++ }, ++ .probe = gsp_probe, ++ .remove = gsp_remove, ++ .id_table = gsp_id, ++}; ++ ++/* All registers are word-sized, except for the configuration registers. ++ * AD7418 uses a high-byte first convention. Do NOT use those functions to ++ * access the configuration registers CONF and CONF2, as they are byte-sized. ++ */ ++static inline int gsp_read(struct i2c_client *client, u8 reg) ++{ ++ unsigned int adc = 0; ++ if (reg == GSP_REG_TEMP_IN || reg > GSP_REG_CURRENT) ++ { ++ adc |= i2c_smbus_read_byte_data(client, reg); ++ adc |= i2c_smbus_read_byte_data(client, reg + 1) << 8; ++ return adc; ++ } ++ else ++ { ++ adc |= i2c_smbus_read_byte_data(client, reg); ++ adc |= i2c_smbus_read_byte_data(client, reg + 1) << 8; ++ adc |= i2c_smbus_read_byte_data(client, reg + 2) << 16; ++ return adc; ++ } ++} ++ ++static inline int gsp_write(struct i2c_client *client, u8 reg, u16 value) ++{ ++ i2c_smbus_write_byte_data(client, reg, value & 0xff); ++ i2c_smbus_write_byte_data(client, reg + 1, ((value >> 8) & 0xff)); ++ return 1; ++} ++ ++static ssize_t show_adc(struct device *dev, struct device_attribute *devattr, ++ char *buf) ++{ ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); ++ struct i2c_client *client = to_i2c_client(dev); ++ return sprintf(buf, "%d\n", gsp_read(client, gsp_sensors[attr->index].reg)); ++} ++ ++static ssize_t show_label(struct device *dev, ++ struct device_attribute *devattr, char *buf) ++{ ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); ++ ++ return sprintf(buf, "%s\n", gsp_sensors[attr->index].name); ++} ++ ++static ssize_t store_fan(struct device *dev, ++ struct device_attribute *devattr, const char *buf, size_t count) ++{ ++ u16 val; ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); ++ struct i2c_client *client = to_i2c_client(dev); ++ val = simple_strtoul(buf, NULL, 10); ++ gsp_write(client, gsp_sensors[attr->index].reg, val); ++ return count; ++} ++ ++static SENSOR_DEVICE_ATTR(temp0_input, S_IRUGO, show_adc, NULL, 0); ++static SENSOR_DEVICE_ATTR(temp0_label, S_IRUGO, show_label, NULL, 0); ++ ++static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_adc, NULL, 1); ++static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, show_label, NULL, 1); ++static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_adc, NULL, 2); ++static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, show_label, NULL, 2); ++static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_adc, NULL, 3); ++static SENSOR_DEVICE_ATTR(in2_label, S_IRUGO, show_label, NULL, 3); ++static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_adc, NULL, 4); ++static SENSOR_DEVICE_ATTR(in3_label, S_IRUGO, show_label, NULL, 4); ++static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_adc, NULL, 5); ++static SENSOR_DEVICE_ATTR(in4_label, S_IRUGO, show_label, NULL, 5); ++static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_adc, NULL, 6); ++static SENSOR_DEVICE_ATTR(in5_label, S_IRUGO, show_label, NULL, 6); ++static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_adc, NULL, 7); ++static SENSOR_DEVICE_ATTR(in6_label, S_IRUGO, show_label, NULL, 7); ++static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, show_adc, NULL, 8); ++static SENSOR_DEVICE_ATTR(in7_label, S_IRUGO, show_label, NULL, 8); ++static SENSOR_DEVICE_ATTR(in8_input, S_IRUGO, show_adc, NULL, 9); ++static SENSOR_DEVICE_ATTR(in8_label, S_IRUGO, show_label, NULL, 9); ++static SENSOR_DEVICE_ATTR(in9_input, S_IRUGO, show_adc, NULL, 10); ++static SENSOR_DEVICE_ATTR(in9_label, S_IRUGO, show_label, NULL, 10); ++static SENSOR_DEVICE_ATTR(in10_input, S_IRUGO, show_adc, NULL, 11); ++static SENSOR_DEVICE_ATTR(in10_label, S_IRUGO, show_label, NULL, 11); ++static SENSOR_DEVICE_ATTR(in11_input, S_IRUGO, show_adc, NULL, 12); ++static SENSOR_DEVICE_ATTR(in11_label, S_IRUGO, show_label, NULL, 12); ++static SENSOR_DEVICE_ATTR(in12_input, S_IRUGO, show_adc, NULL, 13); ++static SENSOR_DEVICE_ATTR(in12_label, S_IRUGO, show_label, NULL, 13); ++ ++static SENSOR_DEVICE_ATTR(fan0_point0, S_IRUGO | S_IWUSR, show_adc, store_fan, 14); ++static SENSOR_DEVICE_ATTR(fan0_point1, S_IRUGO | S_IWUSR, show_adc, store_fan, 15); ++static SENSOR_DEVICE_ATTR(fan0_point2, S_IRUGO | S_IWUSR, show_adc, store_fan, 16); ++static SENSOR_DEVICE_ATTR(fan0_point3, S_IRUGO | S_IWUSR, show_adc, store_fan, 17); ++static SENSOR_DEVICE_ATTR(fan0_point4, S_IRUGO | S_IWUSR, show_adc, store_fan, 18); ++static SENSOR_DEVICE_ATTR(fan0_point5, S_IRUGO | S_IWUSR, show_adc, store_fan, 19); ++ ++ ++ ++static struct attribute *gsp_attributes[] = { ++ &sensor_dev_attr_temp0_input.dev_attr.attr, ++ &sensor_dev_attr_in0_input.dev_attr.attr, ++ &sensor_dev_attr_in1_input.dev_attr.attr, ++ &sensor_dev_attr_in2_input.dev_attr.attr, ++ &sensor_dev_attr_in3_input.dev_attr.attr, ++ &sensor_dev_attr_in4_input.dev_attr.attr, ++ &sensor_dev_attr_in5_input.dev_attr.attr, ++ &sensor_dev_attr_in6_input.dev_attr.attr, ++ &sensor_dev_attr_in7_input.dev_attr.attr, ++ &sensor_dev_attr_in8_input.dev_attr.attr, ++ &sensor_dev_attr_in9_input.dev_attr.attr, ++ &sensor_dev_attr_in10_input.dev_attr.attr, ++ &sensor_dev_attr_in11_input.dev_attr.attr, ++ &sensor_dev_attr_in12_input.dev_attr.attr, ++ ++ &sensor_dev_attr_temp0_label.dev_attr.attr, ++ &sensor_dev_attr_in0_label.dev_attr.attr, ++ &sensor_dev_attr_in1_label.dev_attr.attr, ++ &sensor_dev_attr_in2_label.dev_attr.attr, ++ &sensor_dev_attr_in3_label.dev_attr.attr, ++ &sensor_dev_attr_in4_label.dev_attr.attr, ++ &sensor_dev_attr_in5_label.dev_attr.attr, ++ &sensor_dev_attr_in6_label.dev_attr.attr, ++ &sensor_dev_attr_in7_label.dev_attr.attr, ++ &sensor_dev_attr_in8_label.dev_attr.attr, ++ &sensor_dev_attr_in9_label.dev_attr.attr, ++ &sensor_dev_attr_in10_label.dev_attr.attr, ++ &sensor_dev_attr_in11_label.dev_attr.attr, ++ &sensor_dev_attr_in12_label.dev_attr.attr, ++ ++ &sensor_dev_attr_fan0_point0.dev_attr.attr, ++ &sensor_dev_attr_fan0_point1.dev_attr.attr, ++ &sensor_dev_attr_fan0_point2.dev_attr.attr, ++ &sensor_dev_attr_fan0_point3.dev_attr.attr, ++ &sensor_dev_attr_fan0_point4.dev_attr.attr, ++ &sensor_dev_attr_fan0_point5.dev_attr.attr, ++ ++ NULL ++}; ++ ++ ++static int gsp_probe(struct i2c_client *client, ++ const struct i2c_device_id *id) ++{ ++ struct i2c_adapter *adapter = client->adapter; ++ struct gsp_data *data; ++ int err; ++ ++ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | ++ I2C_FUNC_SMBUS_WORD_DATA)) { ++ err = -EOPNOTSUPP; ++ goto exit; ++ } ++ ++ if (!(data = kzalloc(sizeof(struct gsp_data), GFP_KERNEL))) { ++ err = -ENOMEM; ++ goto exit; ++ } ++ ++ i2c_set_clientdata(client, data); ++ ++ data->type = id->driver_data; ++ ++ switch (data->type) { ++ case 0: ++ data->attrs.attrs = gsp_attributes; ++ break; ++ } ++ ++ dev_info(&client->dev, "%s chip found\n", client->name); ++ ++ /* Register sysfs hooks */ ++ if ((err = sysfs_create_group(&client->dev.kobj, &data->attrs))) ++ goto exit_free; ++ ++ data->hwmon_dev = hwmon_device_register(&client->dev); ++ if (IS_ERR(data->hwmon_dev)) { ++ err = PTR_ERR(data->hwmon_dev); ++ goto exit_remove; ++ } ++ ++ return 0; ++ ++exit_remove: ++ sysfs_remove_group(&client->dev.kobj, &data->attrs); ++exit_free: ++ kfree(data); ++exit: ++ return err; ++} ++ ++static int gsp_remove(struct i2c_client *client) ++{ ++ struct gsp_data *data = i2c_get_clientdata(client); ++ hwmon_device_unregister(data->hwmon_dev); ++ sysfs_remove_group(&client->dev.kobj, &data->attrs); ++ kfree(data); ++ return 0; ++} ++ ++static int __init gsp_init(void) ++{ ++ return i2c_add_driver(&gsp_driver); ++} ++ ++static void __exit gsp_exit(void) ++{ ++ i2c_del_driver(&gsp_driver); ++} ++ ++MODULE_AUTHOR("Chris Lang "); ++MODULE_DESCRIPTION("GSP HWMON driver"); ++MODULE_LICENSE("GPL"); ++MODULE_VERSION(DRV_VERSION); ++ ++module_init(gsp_init); ++module_exit(gsp_exit); +--- a/drivers/hwmon/Kconfig ++++ b/drivers/hwmon/Kconfig +@@ -57,6 +57,15 @@ config SENSORS_ABITUGURU3 + This driver can also be built as a module. If so, the module + will be called abituguru3. + ++config SENSORS_GSP ++ tristate "Gateworks System Peripheral" ++ depends on I2C && EXPERIMENTAL ++ help ++ If you say yes here you get support for the Gateworks System Peripherals. ++ ++ This driver can also be built as a module. If so, the module ++ will be called gsp. ++ + config SENSORS_AD7414 + tristate "Analog Devices AD7414" + depends on I2C && EXPERIMENTAL +--- a/drivers/hwmon/Makefile ++++ b/drivers/hwmon/Makefile +@@ -15,6 +15,7 @@ obj-$(CONFIG_SENSORS_W83791D) += w83791d + + obj-$(CONFIG_SENSORS_ABITUGURU) += abituguru.o + obj-$(CONFIG_SENSORS_ABITUGURU3)+= abituguru3.o ++obj-$(CONFIG_SENSORS_GSP) += gsp.o + obj-$(CONFIG_SENSORS_AD7414) += ad7414.o + obj-$(CONFIG_SENSORS_AD7418) += ad7418.o + obj-$(CONFIG_SENSORS_ADCXX) += adcxx.o diff --git a/target/linux/cns3xxx/patches-2.6.31/102-cns3xxx_ata_support.patch b/target/linux/cns3xxx/patches-2.6.31/102-cns3xxx_ata_support.patch new file mode 100644 index 0000000000..5c7156dd2b --- /dev/null +++ b/target/linux/cns3xxx/patches-2.6.31/102-cns3xxx_ata_support.patch @@ -0,0 +1,3350 @@ +--- /dev/null ++++ b/drivers/ata/cns3xxx_ahci.c +@@ -0,0 +1,3281 @@ ++/* ++ * ahci.c - AHCI SATA support ++ * ++ * Maintained by: Jeff Garzik ++ * Please ALWAYS copy linux-ide@vger.kernel.org ++ * on emails. ++ * ++ * Copyright 2004-2005 Red Hat, Inc. ++ * ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2, or (at your option) ++ * any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; see the file COPYING. If not, write to ++ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. ++ * ++ * ++ * libata documentation is available via 'make {ps|pdf}docs', ++ * as Documentation/DocBook/libata.* ++ * ++ * AHCI hardware documentation: ++ * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf ++ * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf ++ * ++ */ ++/* ++ * Cavium CNS3XXX notice ++ * This driver is copy from ahci, and this driver only modify memory access function. ++ * Let the driver support non-PCI device ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define DRV_NAME "cns3xxx_ahci" ++#define DRV_VERSION "3.0" ++ ++#define MISC_REG_VALUE(offset) (*((volatile unsigned int *)(CNS3XXX_MISC_BASE_VIRT+offset))) ++#define CNS3XXX_MISC_REGISTER MISC_REG_VALUE(0x514) ++#define AHCI_REG_VALUE(offset) (*((volatile unsigned int *)(CNS3XXX_SATA2_BASE_VIRT+offset))) ++#define CNS3XXX_AHCI_HOSTCTL_REG AHCI_REG_VALUE(0x04) ++ ++/* Enclosure Management Control */ ++#define EM_CTRL_MSG_TYPE 0x000f0000 ++ ++/* Enclosure Management LED Message Type */ ++#define EM_MSG_LED_HBA_PORT 0x0000000f ++#define EM_MSG_LED_PMP_SLOT 0x0000ff00 ++#define EM_MSG_LED_VALUE 0xffff0000 ++#define EM_MSG_LED_VALUE_ACTIVITY 0x00070000 ++#define EM_MSG_LED_VALUE_OFF 0xfff80000 ++#define EM_MSG_LED_VALUE_ON 0x00010000 ++ ++/* PHY Misc Define */ ++#define MISC_SATA_POWER_MODE MISC_MEM_MAP_VALUE(0x310) ++#define MISC_SATA_CORE_ID MISC_MEM_MAP_VALUE(0x600) ++#define MISC_SATA_PORT0_PHY_CFG MISC_MEM_MAP_VALUE(0x604) ++#define MISC_SATA_PORT1_PHY_CFG MISC_MEM_MAP_VALUE(0x608) ++#define MISC_SATA_PORT0_PHY_TST MISC_MEM_MAP_VALUE(0x60C) ++#define MISC_SATA_PORT1_PHY_TST MISC_MEM_MAP_VALUE(0x610) ++ ++ ++static int ahci_skip_host_reset; ++static int ahci_ignore_sss; ++ ++module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444); ++MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)"); ++ ++module_param_named(ignore_sss, ahci_ignore_sss, int, 0444); ++MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)"); ++ ++static int ahci_enable_alpm(struct ata_port *ap, ++ enum link_pm policy); ++static void ahci_disable_alpm(struct ata_port *ap); ++static ssize_t ahci_led_show(struct ata_port *ap, char *buf); ++static ssize_t ahci_led_store(struct ata_port *ap, const char *buf, ++ size_t size); ++static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state, ++ ssize_t size); ++ ++enum { ++ AHCI_PCI_BAR = 5, ++ AHCI_MAX_PORTS = 32, ++ AHCI_MAX_SG = 168, /* hardware max is 64K */ ++ AHCI_DMA_BOUNDARY = 0xffffffff, ++ AHCI_MAX_CMDS = 32, ++ AHCI_CMD_SZ = 32, ++ AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ, ++ AHCI_RX_FIS_SZ = 256, ++ AHCI_CMD_TBL_CDB = 0x40, ++ AHCI_CMD_TBL_HDR_SZ = 0x80, ++ AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16), ++ AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS, ++ AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ + ++ AHCI_RX_FIS_SZ, ++ AHCI_IRQ_ON_SG = (1 << 31), ++ AHCI_CMD_ATAPI = (1 << 5), ++ AHCI_CMD_WRITE = (1 << 6), ++ AHCI_CMD_PREFETCH = (1 << 7), ++ AHCI_CMD_RESET = (1 << 8), ++ AHCI_CMD_CLR_BUSY = (1 << 10), ++ ++ RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */ ++ RX_FIS_SDB = 0x58, /* offset of SDB FIS data */ ++ RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */ ++ ++ board_ahci = 0, ++ board_ahci_vt8251 = 1, ++ board_ahci_ign_iferr = 2, ++ board_ahci_sb600 = 3, ++ board_ahci_mv = 4, ++ board_ahci_sb700 = 5, /* for SB700 and SB800 */ ++ board_ahci_mcp65 = 6, ++ board_ahci_nopmp = 7, ++ board_ahci_yesncq = 8, ++ ++ /* global controller registers */ ++ HOST_CAP = 0x00, /* host capabilities */ ++ HOST_CTL = 0x04, /* global host control */ ++ HOST_IRQ_STAT = 0x08, /* interrupt status */ ++ HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */ ++ HOST_VERSION = 0x10, /* AHCI spec. version compliancy */ ++ HOST_EM_LOC = 0x1c, /* Enclosure Management location */ ++ HOST_EM_CTL = 0x20, /* Enclosure Management Control */ ++ ++ /* HOST_CTL bits */ ++ HOST_RESET = (1 << 0), /* reset controller; self-clear */ ++ HOST_IRQ_EN = (1 << 1), /* global IRQ enable */ ++ HOST_AHCI_EN = (1 << 31), /* AHCI enabled */ ++ ++ /* HOST_CAP bits */ ++ HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */ ++ HOST_CAP_SSC = (1 << 14), /* Slumber capable */ ++ HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */ ++ HOST_CAP_CLO = (1 << 24), /* Command List Override support */ ++ HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */ ++ HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */ ++ HOST_CAP_SNTF = (1 << 29), /* SNotification register */ ++ HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */ ++ HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */ ++ ++ /* registers for each SATA port */ ++ PORT_LST_ADDR = 0x00, /* command list DMA addr */ ++ PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */ ++ PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */ ++ PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */ ++ PORT_IRQ_STAT = 0x10, /* interrupt status */ ++ PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */ ++ PORT_CMD = 0x18, /* port command */ ++ PORT_TFDATA = 0x20, /* taskfile data */ ++ PORT_SIG = 0x24, /* device TF signature */ ++ PORT_CMD_ISSUE = 0x38, /* command issue */ ++ PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */ ++ PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */ ++ PORT_SCR_ERR = 0x30, /* SATA phy register: SError */ ++ PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */ ++ PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */ ++ ++ /* PORT_IRQ_{STAT,MASK} bits */ ++ PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */ ++ PORT_IRQ_TF_ERR = (1 << 30), /* task file error */ ++ PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */ ++ PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */ ++ PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */ ++ PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */ ++ PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */ ++ PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */ ++ ++ PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */ ++ PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */ ++ PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */ ++ PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */ ++ PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */ ++ PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */ ++ PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */ ++ PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */ ++ PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */ ++ ++ PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR | ++ PORT_IRQ_IF_ERR | ++ PORT_IRQ_CONNECT | ++ PORT_IRQ_PHYRDY | ++ PORT_IRQ_UNK_FIS | ++ PORT_IRQ_BAD_PMP, ++ PORT_IRQ_ERROR = PORT_IRQ_FREEZE | ++ PORT_IRQ_TF_ERR | ++ PORT_IRQ_HBUS_DATA_ERR, ++ DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE | ++ PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS | ++ PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS, ++ ++ /* PORT_CMD bits */ ++ PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */ ++ PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */ ++ PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */ ++ PORT_CMD_PMP = (1 << 17), /* PMP attached */ ++ PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */ ++ PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */ ++ PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */ ++ PORT_CMD_CLO = (1 << 3), /* Command list override */ ++ PORT_CMD_POWER_ON = (1 << 2), /* Power up device */ ++ PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */ ++ PORT_CMD_START = (1 << 0), /* Enable port DMA engine */ ++ ++ PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */ ++ PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */ ++ PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */ ++ PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */ ++ ++ /* hpriv->flags bits */ ++ AHCI_HFLAG_NO_NCQ = (1 << 0), ++ AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */ ++ AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */ ++ AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */ ++ AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */ ++ AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */ ++ AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */ ++ AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */ ++ AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */ ++ AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */ ++ AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */ ++ AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as ++ link offline */ ++ ++ /* ap->flags bits */ ++ ++ AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ++ ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | ++ ATA_FLAG_ACPI_SATA | ATA_FLAG_AN | ++ ATA_FLAG_IPM, ++ ++ ICH_MAP = 0x90, /* ICH MAP register */ ++ ++ /* em constants */ ++ EM_MAX_SLOTS = 8, ++ EM_MAX_RETRY = 5, ++ ++ /* em_ctl bits */ ++ EM_CTL_RST = (1 << 9), /* Reset */ ++ EM_CTL_TM = (1 << 8), /* Transmit Message */ ++ EM_CTL_ALHD = (1 << 26), /* Activity LED */ ++ ++ /* CNS3XXX define */ ++ HOST_TIMER1MS = 0xe0, /* Timer 1ms register */ ++}; ++ ++struct ahci_cmd_hdr { ++ __le32 opts; ++ __le32 status; ++ __le32 tbl_addr; ++ __le32 tbl_addr_hi; ++ __le32 reserved[4]; ++}; ++ ++struct ahci_sg { ++ __le32 addr; ++ __le32 addr_hi; ++ __le32 reserved; ++ __le32 flags_size; ++}; ++ ++struct ahci_em_priv { ++ enum sw_activity blink_policy; ++ struct timer_list timer; ++ unsigned long saved_activity; ++ unsigned long activity; ++ unsigned long led_state; ++}; ++ ++struct ahci_host_priv { ++ unsigned int flags; /* AHCI_HFLAG_* */ ++ u32 cap; /* cap to use */ ++ u32 port_map; /* port map to use */ ++ u32 saved_cap; /* saved initial cap */ ++ u32 saved_port_map; /* saved initial port_map */ ++ u32 em_loc; /* enclosure management location */ ++}; ++ ++struct ahci_port_priv { ++ struct ata_link *active_link; ++ struct ahci_cmd_hdr *cmd_slot; ++ dma_addr_t cmd_slot_dma; ++ void *cmd_tbl; ++ dma_addr_t cmd_tbl_dma; ++ void *rx_fis; ++ dma_addr_t rx_fis_dma; ++ /* for NCQ spurious interrupt analysis */ ++ unsigned int ncq_saw_d2h:1; ++ unsigned int ncq_saw_dmas:1; ++ unsigned int ncq_saw_sdb:1; ++ u32 intr_mask; /* interrupts to enable */ ++ /* enclosure management info per PM slot */ ++ struct ahci_em_priv em_priv[EM_MAX_SLOTS]; ++}; ++ ++static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); ++static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); ++#if 0 ++static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); ++#else ++static int ahci_probe(struct platform_device *pdev); ++static int ahci_remove(struct platform_device *pdev); ++#endif ++static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc); ++static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc); ++static int ahci_port_start(struct ata_port *ap); ++static void ahci_port_stop(struct ata_port *ap); ++static void ahci_qc_prep(struct ata_queued_cmd *qc); ++static void ahci_freeze(struct ata_port *ap); ++static void ahci_thaw(struct ata_port *ap); ++static void ahci_pmp_attach(struct ata_port *ap); ++static void ahci_pmp_detach(struct ata_port *ap); ++static int ahci_softreset(struct ata_link *link, unsigned int *class, ++ unsigned long deadline); ++static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class, ++ unsigned long deadline); ++static int ahci_hardreset(struct ata_link *link, unsigned int *class, ++ unsigned long deadline); ++static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, ++ unsigned long deadline); ++#if 0 ++static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, ++ unsigned long deadline); ++#endif ++static void ahci_postreset(struct ata_link *link, unsigned int *class); ++static void ahci_error_handler(struct ata_port *ap); ++static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); ++static int ahci_port_resume(struct ata_port *ap); ++static void ahci_dev_config(struct ata_device *dev); ++static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, ++ u32 opts); ++#ifdef CONFIG_PM ++static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg); ++static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); ++static int ahci_pci_device_resume(struct pci_dev *pdev); ++#endif ++static ssize_t ahci_activity_show(struct ata_device *dev, char *buf); ++static ssize_t ahci_activity_store(struct ata_device *dev, ++ enum sw_activity val); ++static void ahci_init_sw_activity(struct ata_link *link); ++ ++static struct device_attribute *ahci_shost_attrs[] = { ++ &dev_attr_link_power_management_policy, ++ &dev_attr_em_message_type, ++ &dev_attr_em_message, ++ NULL ++}; ++ ++static struct device_attribute *ahci_sdev_attrs[] = { ++ &dev_attr_sw_activity, ++ &dev_attr_unload_heads, ++ NULL ++}; ++ ++static struct scsi_host_template ahci_sht = { ++ ATA_NCQ_SHT(DRV_NAME), ++ .can_queue = AHCI_MAX_CMDS - 1, ++ .sg_tablesize = AHCI_MAX_SG, ++ .dma_boundary = AHCI_DMA_BOUNDARY, ++ .shost_attrs = ahci_shost_attrs, ++ .sdev_attrs = ahci_sdev_attrs, ++}; ++ ++static struct ata_port_operations ahci_ops = { ++ .inherits = &sata_pmp_port_ops, ++ ++ .qc_defer = sata_pmp_qc_defer_cmd_switch, ++ .qc_prep = ahci_qc_prep, ++ .qc_issue = ahci_qc_issue, ++ .qc_fill_rtf = ahci_qc_fill_rtf, ++ ++ .freeze = ahci_freeze, ++ .thaw = ahci_thaw, ++ .softreset = ahci_softreset, ++ .hardreset = ahci_hardreset, ++ .postreset = ahci_postreset, ++ .pmp_softreset = ahci_softreset, ++ .error_handler = ahci_error_handler, ++ .post_internal_cmd = ahci_post_internal_cmd, ++ .dev_config = ahci_dev_config, ++ ++ .scr_read = ahci_scr_read, ++ .scr_write = ahci_scr_write, ++ .pmp_attach = ahci_pmp_attach, ++ .pmp_detach = ahci_pmp_detach, ++ ++ .enable_pm = ahci_enable_alpm, ++ .disable_pm = ahci_disable_alpm, ++ .em_show = ahci_led_show, ++ .em_store = ahci_led_store, ++ .sw_activity_show = ahci_activity_show, ++ .sw_activity_store = ahci_activity_store, ++#ifdef CONFIG_PM ++ .port_suspend = ahci_port_suspend, ++ .port_resume = ahci_port_resume, ++#endif ++ .port_start = ahci_port_start, ++ .port_stop = ahci_port_stop, ++}; ++ ++static struct ata_port_operations ahci_vt8251_ops = { ++ .inherits = &ahci_ops, ++ .hardreset = ahci_vt8251_hardreset, ++}; ++ ++#if 0 ++static struct ata_port_operations ahci_p5wdh_ops = { ++ .inherits = &ahci_ops, ++ .hardreset = ahci_p5wdh_hardreset, ++}; ++#endif ++ ++static struct ata_port_operations ahci_sb600_ops = { ++ .inherits = &ahci_ops, ++ .softreset = ahci_sb600_softreset, ++ .pmp_softreset = ahci_sb600_softreset, ++}; ++ ++#define AHCI_HFLAGS(flags) .private_data = (void *)(flags) ++ ++static const struct ata_port_info ahci_port_info[] = { ++ [board_ahci] = ++ { ++ .flags = AHCI_FLAG_COMMON, ++ .pio_mask = ATA_PIO4, ++ .udma_mask = ATA_UDMA6, ++ .port_ops = &ahci_ops, ++ }, ++ [board_ahci_vt8251] = ++ { ++ AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP), ++ .flags = AHCI_FLAG_COMMON, ++ .pio_mask = ATA_PIO4, ++ .udma_mask = ATA_UDMA6, ++ .port_ops = &ahci_vt8251_ops, ++ }, ++ [board_ahci_ign_iferr] = ++ { ++ AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR), ++ .flags = AHCI_FLAG_COMMON, ++ .pio_mask = ATA_PIO4, ++ .udma_mask = ATA_UDMA6, ++ .port_ops = &ahci_ops, ++ }, ++ [board_ahci_sb600] = ++ { ++ AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | ++ AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255), ++ .flags = AHCI_FLAG_COMMON, ++ .pio_mask = ATA_PIO4, ++ .udma_mask = ATA_UDMA6, ++ .port_ops = &ahci_sb600_ops, ++ }, ++ [board_ahci_mv] = ++ { ++ AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI | ++ AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP), ++ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ++ ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA, ++ .pio_mask = ATA_PIO4, ++ .udma_mask = ATA_UDMA6, ++ .port_ops = &ahci_ops, ++ }, ++ [board_ahci_sb700] = /* for SB700 and SB800 */ ++ { ++ AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL), ++ .flags = AHCI_FLAG_COMMON, ++ .pio_mask = ATA_PIO4, ++ .udma_mask = ATA_UDMA6, ++ .port_ops = &ahci_sb600_ops, ++ }, ++ [board_ahci_mcp65] = ++ { ++ AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ), ++ .flags = AHCI_FLAG_COMMON, ++ .pio_mask = ATA_PIO4, ++ .udma_mask = ATA_UDMA6, ++ .port_ops = &ahci_ops, ++ }, ++ [board_ahci_nopmp] = ++ { ++ AHCI_HFLAGS (AHCI_HFLAG_NO_PMP), ++ .flags = AHCI_FLAG_COMMON, ++ .pio_mask = ATA_PIO4, ++ .udma_mask = ATA_UDMA6, ++ .port_ops = &ahci_ops, ++ }, ++ /* board_ahci_yesncq */ ++ { ++ AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ), ++ .flags = AHCI_FLAG_COMMON, ++ .pio_mask = ATA_PIO4, ++ .udma_mask = ATA_UDMA6, ++ .port_ops = &ahci_ops, ++ }, ++}; ++ ++static const struct pci_device_id ahci_pci_tbl[] = { ++ /* Intel */ ++ { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */ ++ { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */ ++ { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */ ++ { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */ ++ { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */ ++ { PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */ ++ { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */ ++ { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */ ++ { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */ ++ { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */ ++ { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */ ++ { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */ ++ { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */ ++ { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */ ++ { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */ ++ { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */ ++ { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */ ++ { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */ ++ { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */ ++ { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */ ++ { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */ ++ { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */ ++ { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */ ++ { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */ ++ { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */ ++ { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */ ++ { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */ ++ { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */ ++ { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */ ++ { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */ ++ { PCI_VDEVICE(INTEL, 0x3a22), board_ahci }, /* ICH10 */ ++ { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */ ++ { PCI_VDEVICE(INTEL, 0x3b22), board_ahci }, /* PCH AHCI */ ++ { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */ ++ { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */ ++ { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */ ++ { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */ ++ { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ ++ { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */ ++ { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */ ++ ++ /* JMicron 360/1/3/5/6, match class to avoid IDE function */ ++ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, ++ PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr }, ++ ++ /* ATI */ ++ { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */ ++ { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */ ++ { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */ ++ { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */ ++ { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */ ++ { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */ ++ { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */ ++ ++ /* VIA */ ++ { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */ ++ { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */ ++ ++ /* NVIDIA */ ++ { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 }, /* MCP65 */ ++ { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 }, /* MCP65 */ ++ { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 }, /* MCP65 */ ++ { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 }, /* MCP65 */ ++ { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 }, /* MCP65 */ ++ { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */ ++ { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */ ++ { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */ ++ { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_yesncq }, /* MCP67 */ ++ { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_yesncq }, /* MCP67 */ ++ { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_yesncq }, /* MCP67 */ ++ { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_yesncq }, /* MCP67 */ ++ { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_yesncq }, /* MCP67 */ ++ { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_yesncq }, /* MCP67 */ ++ { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_yesncq }, /* MCP67 */ ++ { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_yesncq }, /* MCP67 */ ++ { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_yesncq }, /* MCP67 */ ++ { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_yesncq }, /* MCP67 */ ++ { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq }, /* MCP67 */ ++ { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq }, /* MCP67 */ ++ { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq }, /* MCP73 */ ++ { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq }, /* MCP73 */ ++ { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq }, /* MCP73 */ ++ { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_yesncq }, /* MCP73 */ ++ { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_yesncq }, /* MCP73 */ ++ { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_yesncq }, /* MCP73 */ ++ { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_yesncq }, /* MCP73 */ ++ { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_yesncq }, /* MCP73 */ ++ { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_yesncq }, /* MCP73 */ ++ { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_yesncq }, /* MCP73 */ ++ { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_yesncq }, /* MCP73 */ ++ { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_yesncq }, /* MCP73 */ ++ { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */ ++ { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */ ++ { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */ ++ { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */ ++ { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */ ++ { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */ ++ { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */ ++ { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */ ++ { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */ ++ { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */ ++ { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */ ++ { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */ ++ { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */ ++ { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */ ++ { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */ ++ { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */ ++ { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */ ++ { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */ ++ { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */ ++ { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */ ++ { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */ ++ { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */ ++ { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */ ++ { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */ ++ { PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci }, /* MCP89 */ ++ { PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci }, /* MCP89 */ ++ { PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci }, /* MCP89 */ ++ { PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci }, /* MCP89 */ ++ { PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci }, /* MCP89 */ ++ { PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci }, /* MCP89 */ ++ { PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci }, /* MCP89 */ ++ { PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci }, /* MCP89 */ ++ { PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci }, /* MCP89 */ ++ { PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci }, /* MCP89 */ ++ { PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci }, /* MCP89 */ ++ { PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci }, /* MCP89 */ ++ ++ /* SiS */ ++ { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */ ++ { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */ ++ { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */ ++ ++ /* Marvell */ ++ { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */ ++ { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */ ++ ++ /* Promise */ ++ { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ ++ ++ /* Generic, PCI class code for AHCI */ ++ { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, ++ PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci }, ++ ++ { } /* terminate list */ ++}; ++ ++ ++#if 0 ++static struct pci_driver ahci_pci_driver = { ++ .name = DRV_NAME, ++ .id_table = ahci_pci_tbl, ++ .probe = ahci_init_one, ++ .remove = ata_pci_remove_one, ++#ifdef CONFIG_PM ++ .suspend = ahci_pci_device_suspend, ++ .resume = ahci_pci_device_resume, ++#endif ++}; ++#else ++static struct platform_driver ahci_driver = { ++ .probe = ahci_probe, ++ .remove = __devexit_p(ahci_remove), ++ .driver = { ++ .name = DRV_NAME, ++ .owner = THIS_MODULE, ++ }, ++}; ++#endif ++ ++static int ahci_em_messages = 1; ++module_param(ahci_em_messages, int, 0444); ++/* add other LED protocol types when they become supported */ ++MODULE_PARM_DESC(ahci_em_messages, ++ "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED"); ++ ++#if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE) ++static int marvell_enable; ++#else ++static int marvell_enable = 1; ++#endif ++module_param(marvell_enable, int, 0644); ++MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)"); ++ ++ ++static inline int ahci_nr_ports(u32 cap) ++{ ++ return (cap & 0x1f) + 1; ++} ++ ++static inline void __iomem *__ahci_port_base(struct ata_host *host, ++ unsigned int port_no) ++{ ++#if 0 ++ void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; ++#else ++ void __iomem *mmio = (void __iomem *)host->iomap;//[AHCI_BAR]; ++#endif ++ ++ return mmio + 0x100 + (port_no * 0x80); ++} ++ ++static inline void __iomem *ahci_port_base(struct ata_port *ap) ++{ ++ return __ahci_port_base(ap->host, ap->port_no); ++} ++ ++static void ahci_enable_ahci(void __iomem *mmio) ++{ ++ int i; ++ u32 tmp; ++ ++ /* turn on AHCI_EN */ ++ tmp = readl(mmio + HOST_CTL); ++ if (tmp & HOST_AHCI_EN) ++ return; ++ ++ /* Some controllers need AHCI_EN to be written multiple times. ++ * Try a few times before giving up. ++ */ ++ for (i = 0; i < 5; i++) { ++ tmp |= HOST_AHCI_EN; ++ writel(tmp, mmio + HOST_CTL); ++ tmp = readl(mmio + HOST_CTL); /* flush && sanity check */ ++ if (tmp & HOST_AHCI_EN) ++ return; ++ msleep(10); ++ } ++ ++ WARN_ON(1); ++} ++ ++/** ++ * ahci_save_initial_config - Save and fixup initial config values ++ * @pdev: target PCI device ++ * @hpriv: host private area to store config values ++ * ++ * Some registers containing configuration info might be setup by ++ * BIOS and might be cleared on reset. This function saves the ++ * initial values of those registers into @hpriv such that they ++ * can be restored after controller reset. ++ * ++ * If inconsistent, config values are fixed up by this function. ++ * ++ * LOCKING: ++ * None. ++ */ ++#if 0 ++static void ahci_save_initial_config(struct pci_dev *pdev, ++ struct ahci_host_priv *hpriv) ++#else ++static void ahci_save_initial_config(struct platform_device *pdev, ++ struct ahci_host_priv *hpriv, ++ u8 * base) ++#endif ++{ ++#if 0 ++ void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR]; ++#else ++ void __iomem *mmio = (void __iomem *)base; ++#endif ++ u32 cap, port_map; ++ int i; ++#if 0 ++ int mv; ++#endif ++ ++ /* make sure AHCI mode is enabled before accessing CAP */ ++ ahci_enable_ahci(mmio); ++ ++ /* Values prefixed with saved_ are written back to host after ++ * reset. Values without are used for driver operation. ++ */ ++ hpriv->saved_cap = cap = readl(mmio + HOST_CAP); ++ hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL); ++ ++ /* some chips have errata preventing 64bit use */ ++ if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) { ++ dev_printk(KERN_INFO, &pdev->dev, ++ "controller can't do 64bit DMA, forcing 32bit\n"); ++ cap &= ~HOST_CAP_64; ++ } ++ ++ if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) { ++ dev_printk(KERN_INFO, &pdev->dev, ++ "controller can't do NCQ, turning off CAP_NCQ\n"); ++ cap &= ~HOST_CAP_NCQ; ++ } ++ ++ if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) { ++ dev_printk(KERN_INFO, &pdev->dev, ++ "controller can do NCQ, turning on CAP_NCQ\n"); ++ cap |= HOST_CAP_NCQ; ++ } ++ ++ if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) { ++ dev_printk(KERN_INFO, &pdev->dev, ++ "controller can't do PMP, turning off CAP_PMP\n"); ++ cap &= ~HOST_CAP_PMP; ++ } ++#if 0 ++ if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 && ++ port_map != 1) { ++ dev_printk(KERN_INFO, &pdev->dev, ++ "JMB361 has only one port, port_map 0x%x -> 0x%x\n", ++ port_map, 1); ++ port_map = 1; ++ } ++ ++ /* ++ * Temporary Marvell 6145 hack: PATA port presence ++ * is asserted through the standard AHCI port ++ * presence register, as bit 4 (counting from 0) ++ */ ++ if (hpriv->flags & AHCI_HFLAG_MV_PATA) { ++ if (pdev->device == 0x6121) ++ mv = 0x3; ++ else ++ mv = 0xf; ++ dev_printk(KERN_ERR, &pdev->dev, ++ "MV_AHCI HACK: port_map %x -> %x\n", ++ port_map, ++ port_map & mv); ++ dev_printk(KERN_ERR, &pdev->dev, ++ "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n"); ++ ++ port_map &= mv; ++ } ++#endif ++ ++ /* cross check port_map and cap.n_ports */ ++ if (port_map) { ++ int map_ports = 0; ++ ++ for (i = 0; i < AHCI_MAX_PORTS; i++) ++ if (port_map & (1 << i)) ++ map_ports++; ++ ++ /* If PI has more ports than n_ports, whine, clear ++ * port_map and let it be generated from n_ports. ++ */ ++ if (map_ports > ahci_nr_ports(cap)) { ++ dev_printk(KERN_WARNING, &pdev->dev, ++ "implemented port map (0x%x) contains more " ++ "ports than nr_ports (%u), using nr_ports\n", ++ port_map, ahci_nr_ports(cap)); ++ port_map = 0; ++ } ++ } ++ ++ /* fabricate port_map from cap.nr_ports */ ++ if (!port_map) { ++ port_map = (1 << ahci_nr_ports(cap)) - 1; ++ dev_printk(KERN_WARNING, &pdev->dev, ++ "forcing PORTS_IMPL to 0x%x\n", port_map); ++ ++ /* write the fixed up value to the PI register */ ++ hpriv->saved_port_map = port_map; ++ } ++ ++ /* record values to use during operation */ ++ hpriv->cap = cap; ++ hpriv->port_map = port_map; ++} ++ ++/** ++ * ahci_restore_initial_config - Restore initial config ++ * @host: target ATA host ++ * ++ * Restore initial config stored by ahci_save_initial_config(). ++ * ++ * LOCKING: ++ * None. ++ */ ++static void ahci_restore_initial_config(struct ata_host *host) ++{ ++ struct ahci_host_priv *hpriv = host->private_data; ++#if 0 ++ void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; ++#else ++ void __iomem *mmio = (void __iomem *)host->iomap;//[AHCI_BAR]; ++#endif ++ ++ writel(hpriv->saved_cap, mmio + HOST_CAP); ++ writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL); ++ (void) readl(mmio + HOST_PORTS_IMPL); /* flush */ ++} ++ ++static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg) ++{ ++ static const int offset[] = { ++ [SCR_STATUS] = PORT_SCR_STAT, ++ [SCR_CONTROL] = PORT_SCR_CTL, ++ [SCR_ERROR] = PORT_SCR_ERR, ++ [SCR_ACTIVE] = PORT_SCR_ACT, ++ [SCR_NOTIFICATION] = PORT_SCR_NTF, ++ }; ++ struct ahci_host_priv *hpriv = ap->host->private_data; ++ ++ if (sc_reg < ARRAY_SIZE(offset) && ++ (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF))) ++ return offset[sc_reg]; ++ return 0; ++} ++ ++static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val) ++{ ++ void __iomem *port_mmio = ahci_port_base(link->ap); ++ int offset = ahci_scr_offset(link->ap, sc_reg); ++ ++ if (offset) { ++ *val = readl(port_mmio + offset); ++ return 0; ++ } ++ return -EINVAL; ++} ++ ++static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val) ++{ ++ void __iomem *port_mmio = ahci_port_base(link->ap); ++ int offset = ahci_scr_offset(link->ap, sc_reg); ++ ++ if (offset) { ++ writel(val, port_mmio + offset); ++ return 0; ++ } ++ return -EINVAL; ++} ++ ++static void ahci_start_engine(struct ata_port *ap) ++{ ++ void __iomem *port_mmio = ahci_port_base(ap); ++ u32 tmp; ++ ++ /* start DMA */ ++ tmp = readl(port_mmio + PORT_CMD); ++ tmp |= PORT_CMD_START; ++ writel(tmp, port_mmio + PORT_CMD); ++ readl(port_mmio + PORT_CMD); /* flush */ ++} ++ ++static int ahci_stop_engine(struct ata_port *ap) ++{ ++ void __iomem *port_mmio = ahci_port_base(ap); ++ u32 tmp; ++ ++ tmp = readl(port_mmio + PORT_CMD); ++ ++ /* check if the HBA is idle */ ++ if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0) ++ return 0; ++ ++ /* setting HBA to idle */ ++ tmp &= ~PORT_CMD_START; ++ writel(tmp, port_mmio + PORT_CMD); ++ ++ /* wait for engine to stop. This could be as long as 500 msec */ ++ tmp = ata_wait_register(port_mmio + PORT_CMD, ++ PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500); ++ if (tmp & PORT_CMD_LIST_ON) ++ return -EIO; ++ ++ return 0; ++} ++ ++static void ahci_start_fis_rx(struct ata_port *ap) ++{ ++ void __iomem *port_mmio = ahci_port_base(ap); ++ struct ahci_host_priv *hpriv = ap->host->private_data; ++ struct ahci_port_priv *pp = ap->private_data; ++ u32 tmp; ++ ++ /* set FIS registers */ ++ if (hpriv->cap & HOST_CAP_64) ++ writel((pp->cmd_slot_dma >> 16) >> 16, ++ port_mmio + PORT_LST_ADDR_HI); ++ writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR); ++ ++ if (hpriv->cap & HOST_CAP_64) ++ writel((pp->rx_fis_dma >> 16) >> 16, ++ port_mmio + PORT_FIS_ADDR_HI); ++ writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR); ++ ++ /* enable FIS reception */ ++ tmp = readl(port_mmio + PORT_CMD); ++ tmp |= PORT_CMD_FIS_RX; ++ writel(tmp, port_mmio + PORT_CMD); ++ ++ /* flush */ ++ readl(port_mmio + PORT_CMD); ++} ++ ++static int ahci_stop_fis_rx(struct ata_port *ap) ++{ ++ void __iomem *port_mmio = ahci_port_base(ap); ++ u32 tmp; ++ ++ /* disable FIS reception */ ++ tmp = readl(port_mmio + PORT_CMD); ++ tmp &= ~PORT_CMD_FIS_RX; ++ writel(tmp, port_mmio + PORT_CMD); ++ ++ /* wait for completion, spec says 500ms, give it 1000 */ ++ tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON, ++ PORT_CMD_FIS_ON, 10, 1000); ++ if (tmp & PORT_CMD_FIS_ON) ++ return -EBUSY; ++ ++ return 0; ++} ++ ++static void ahci_power_up(struct ata_port *ap) ++{ ++ struct ahci_host_priv *hpriv = ap->host->private_data; ++ void __iomem *port_mmio = ahci_port_base(ap); ++ u32 cmd; ++ ++ cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK; ++ ++ /* spin up device */ ++ if (hpriv->cap & HOST_CAP_SSS) { ++ cmd |= PORT_CMD_SPIN_UP; ++ writel(cmd, port_mmio + PORT_CMD); ++ } ++ ++ /* wake up link */ ++ writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD); ++} ++ ++static void ahci_disable_alpm(struct ata_port *ap) ++{ ++ struct ahci_host_priv *hpriv = ap->host->private_data; ++ void __iomem *port_mmio = ahci_port_base(ap); ++ u32 cmd; ++ struct ahci_port_priv *pp = ap->private_data; ++ ++ /* IPM bits should be disabled by libata-core */ ++ /* get the existing command bits */ ++ cmd = readl(port_mmio + PORT_CMD); ++ ++ /* disable ALPM and ASP */ ++ cmd &= ~PORT_CMD_ASP; ++ cmd &= ~PORT_CMD_ALPE; ++ ++ /* force the interface back to active */ ++ cmd |= PORT_CMD_ICC_ACTIVE; ++ ++ /* write out new cmd value */ ++ writel(cmd, port_mmio + PORT_CMD); ++ cmd = readl(port_mmio + PORT_CMD); ++ ++ /* wait 10ms to be sure we've come out of any low power state */ ++ msleep(10); ++ ++ /* clear out any PhyRdy stuff from interrupt status */ ++ writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT); ++ ++ /* go ahead and clean out PhyRdy Change from Serror too */ ++ ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18))); ++ ++ /* ++ * Clear flag to indicate that we should ignore all PhyRdy ++ * state changes ++ */ ++ hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG; ++ ++ /* ++ * Enable interrupts on Phy Ready. ++ */ ++ pp->intr_mask |= PORT_IRQ_PHYRDY; ++ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); ++ ++ /* ++ * don't change the link pm policy - we can be called ++ * just to turn of link pm temporarily ++ */ ++} ++ ++static int ahci_enable_alpm(struct ata_port *ap, ++ enum link_pm policy) ++{ ++ struct ahci_host_priv *hpriv = ap->host->private_data; ++ void __iomem *port_mmio = ahci_port_base(ap); ++ u32 cmd; ++ struct ahci_port_priv *pp = ap->private_data; ++ u32 asp; ++ ++ /* Make sure the host is capable of link power management */ ++ if (!(hpriv->cap & HOST_CAP_ALPM)) ++ return -EINVAL; ++ ++ switch (policy) { ++ case MAX_PERFORMANCE: ++ case NOT_AVAILABLE: ++ /* ++ * if we came here with NOT_AVAILABLE, ++ * it just means this is the first time we ++ * have tried to enable - default to max performance, ++ * and let the user go to lower power modes on request. ++ */ ++ ahci_disable_alpm(ap); ++ return 0; ++ case MIN_POWER: ++ /* configure HBA to enter SLUMBER */ ++ asp = PORT_CMD_ASP; ++ break; ++ case MEDIUM_POWER: ++ /* configure HBA to enter PARTIAL */ ++ asp = 0; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ /* ++ * Disable interrupts on Phy Ready. This keeps us from ++ * getting woken up due to spurious phy ready interrupts ++ * TBD - Hot plug should be done via polling now, is ++ * that even supported? ++ */ ++ pp->intr_mask &= ~PORT_IRQ_PHYRDY; ++ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); ++ ++ /* ++ * Set a flag to indicate that we should ignore all PhyRdy ++ * state changes since these can happen now whenever we ++ * change link state ++ */ ++ hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG; ++ ++ /* get the existing command bits */ ++ cmd = readl(port_mmio + PORT_CMD); ++ ++ /* ++ * Set ASP based on Policy ++ */ ++ cmd |= asp; ++ ++ /* ++ * Setting this bit will instruct the HBA to aggressively ++ * enter a lower power link state when it's appropriate and ++ * based on the value set above for ASP ++ */ ++ cmd |= PORT_CMD_ALPE; ++ ++ /* write out new cmd value */ ++ writel(cmd, port_mmio + PORT_CMD); ++ cmd = readl(port_mmio + PORT_CMD); ++ ++ /* IPM bits should be set by libata-core */ ++ return 0; ++} ++ ++#ifdef CONFIG_PM ++static void ahci_power_down(struct ata_port *ap) ++{ ++ struct ahci_host_priv *hpriv = ap->host->private_data; ++ void __iomem *port_mmio = ahci_port_base(ap); ++ u32 cmd, scontrol; ++ ++ if (!(hpriv->cap & HOST_CAP_SSS)) ++ return; ++ ++ /* put device into listen mode, first set PxSCTL.DET to 0 */ ++ scontrol = readl(port_mmio + PORT_SCR_CTL); ++ scontrol &= ~0xf; ++ writel(scontrol, port_mmio + PORT_SCR_CTL); ++ ++ /* then set PxCMD.SUD to 0 */ ++ cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK; ++ cmd &= ~PORT_CMD_SPIN_UP; ++ writel(cmd, port_mmio + PORT_CMD); ++} ++#endif ++ ++static void ahci_start_port(struct ata_port *ap) ++{ ++ struct ahci_port_priv *pp = ap->private_data; ++ struct ata_link *link; ++ struct ahci_em_priv *emp; ++ ssize_t rc; ++ int i; ++ ++ /* enable FIS reception */ ++ ahci_start_fis_rx(ap); ++ ++ /* enable DMA */ ++ ahci_start_engine(ap); ++ ++ /* turn on LEDs */ ++ if (ap->flags & ATA_FLAG_EM) { ++ ata_for_each_link(link, ap, EDGE) { ++ emp = &pp->em_priv[link->pmp]; ++ ++ /* EM Transmit bit maybe busy during init */ ++ for (i = 0; i < EM_MAX_RETRY; i++) { ++ rc = ahci_transmit_led_message(ap, ++ emp->led_state, ++ 4); ++ if (rc == -EBUSY) ++ msleep(1); ++ else ++ break; ++ } ++ } ++ } ++ ++ if (ap->flags & ATA_FLAG_SW_ACTIVITY) ++ ata_for_each_link(link, ap, EDGE) ++ ahci_init_sw_activity(link); ++ ++} ++ ++static int ahci_deinit_port(struct ata_port *ap, const char **emsg) ++{ ++ int rc; ++ ++ /* disable DMA */ ++ rc = ahci_stop_engine(ap); ++ if (rc) { ++ *emsg = "failed to stop engine"; ++ return rc; ++ } ++ ++ /* disable FIS reception */ ++ rc = ahci_stop_fis_rx(ap); ++ if (rc) { ++ *emsg = "failed stop FIS RX"; ++ return rc; ++ } ++ ++ return 0; ++} ++ ++static int ahci_reset_controller(struct ata_host *host) ++{ ++#if 0 ++ struct pci_dev *pdev = to_pci_dev(host->dev); ++ struct ahci_host_priv *hpriv = host->private_data; ++ void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; ++#else ++ void __iomem *mmio = (void __iomem *)host->iomap;//[AHCI_BAR]; ++#endif ++ u32 tmp; ++ ++ /* we must be in AHCI mode, before using anything ++ * AHCI-specific, such as HOST_RESET. ++ */ ++ ahci_enable_ahci(mmio); ++ ++ /* global controller reset */ ++ if (!ahci_skip_host_reset) { ++ tmp = readl(mmio + HOST_CTL); ++ if ((tmp & HOST_RESET) == 0) { ++ writel(tmp | HOST_RESET, mmio + HOST_CTL); ++ readl(mmio + HOST_CTL); /* flush */ ++ } ++ ++ /* ++ * to perform host reset, OS should set HOST_RESET ++ * and poll until this bit is read to be "0". ++ * reset must complete within 1 second, or ++ * the hardware should be considered fried. ++ */ ++ tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET, ++ HOST_RESET, 10, 1000); ++ ++ if (tmp & HOST_RESET) { ++ dev_printk(KERN_ERR, host->dev, ++ "controller reset failed (0x%x)\n", tmp); ++ return -EIO; ++ } ++ ++ /* turn on AHCI mode */ ++ ahci_enable_ahci(mmio); ++ ++ /* Some registers might be cleared on reset. Restore ++ * initial values. ++ */ ++ ahci_restore_initial_config(host); ++ } else ++ dev_printk(KERN_INFO, host->dev, ++ "skipping global host reset\n"); ++ ++#if 0 ++ if (pdev->vendor == PCI_VENDOR_ID_INTEL) { ++ u16 tmp16; ++ ++ /* configure PCS */ ++ pci_read_config_word(pdev, 0x92, &tmp16); ++ if ((tmp16 & hpriv->port_map) != hpriv->port_map) { ++ tmp16 |= hpriv->port_map; ++ pci_write_config_word(pdev, 0x92, tmp16); ++ } ++ } ++#endif ++ ++ return 0; ++} ++ ++static void ahci_sw_activity(struct ata_link *link) ++{ ++ struct ata_port *ap = link->ap; ++ struct ahci_port_priv *pp = ap->private_data; ++ struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; ++ ++ if (!(link->flags & ATA_LFLAG_SW_ACTIVITY)) ++ return; ++ ++ emp->activity++; ++ if (!timer_pending(&emp->timer)) ++ mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10)); ++} ++ ++static void ahci_sw_activity_blink(unsigned long arg) ++{ ++ struct ata_link *link = (struct ata_link *)arg; ++ struct ata_port *ap = link->ap; ++ struct ahci_port_priv *pp = ap->private_data; ++ struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; ++ unsigned long led_message = emp->led_state; ++ u32 activity_led_state; ++ unsigned long flags; ++ ++ led_message &= EM_MSG_LED_VALUE; ++ led_message |= ap->port_no | (link->pmp << 8); ++ ++ /* check to see if we've had activity. If so, ++ * toggle state of LED and reset timer. If not, ++ * turn LED to desired idle state. ++ */ ++ spin_lock_irqsave(ap->lock, flags); ++ if (emp->saved_activity != emp->activity) { ++ emp->saved_activity = emp->activity; ++ /* get the current LED state */ ++ activity_led_state = led_message & EM_MSG_LED_VALUE_ON; ++ ++ if (activity_led_state) ++ activity_led_state = 0; ++ else ++ activity_led_state = 1; ++ ++ /* clear old state */ ++ led_message &= ~EM_MSG_LED_VALUE_ACTIVITY; ++ ++ /* toggle state */ ++ led_message |= (activity_led_state << 16); ++ mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100)); ++ } else { ++ /* switch to idle */ ++ led_message &= ~EM_MSG_LED_VALUE_ACTIVITY; ++ if (emp->blink_policy == BLINK_OFF) ++ led_message |= (1 << 16); ++ } ++ spin_unlock_irqrestore(ap->lock, flags); ++ ahci_transmit_led_message(ap, led_message, 4); ++} ++ ++static void ahci_init_sw_activity(struct ata_link *link) ++{ ++ struct ata_port *ap = link->ap; ++ struct ahci_port_priv *pp = ap->private_data; ++ struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; ++ ++ /* init activity stats, setup timer */ ++ emp->saved_activity = emp->activity = 0; ++ setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link); ++ ++ /* check our blink policy and set flag for link if it's enabled */ ++ if (emp->blink_policy) ++ link->flags |= ATA_LFLAG_SW_ACTIVITY; ++} ++ ++static int ahci_reset_em(struct ata_host *host) ++{ ++#if 0 ++ void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; ++#else ++ void __iomem *mmio = (void __iomem *)host->iomap;//[AHCI_BAR]; ++#endif ++ u32 em_ctl; ++ ++ em_ctl = readl(mmio + HOST_EM_CTL); ++ if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST)) ++ return -EINVAL; ++ ++ writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL); ++ return 0; ++} ++ ++static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state, ++ ssize_t size) ++{ ++ struct ahci_host_priv *hpriv = ap->host->private_data; ++ struct ahci_port_priv *pp = ap->private_data; ++#if 0 ++ void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR]; ++#else ++ void __iomem *mmio = (void __iomem *)ap->host->iomap;//[AHCI_BAR]; ++#endif ++ u32 em_ctl; ++ u32 message[] = {0, 0}; ++ unsigned long flags; ++ int pmp; ++ struct ahci_em_priv *emp; ++ ++ /* get the slot number from the message */ ++ pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8; ++ if (pmp < EM_MAX_SLOTS) ++ emp = &pp->em_priv[pmp]; ++ else ++ return -EINVAL; ++ ++ spin_lock_irqsave(ap->lock, flags); ++ ++ /* ++ * if we are still busy transmitting a previous message, ++ * do not allow ++ */ ++ em_ctl = readl(mmio + HOST_EM_CTL); ++ if (em_ctl & EM_CTL_TM) { ++ spin_unlock_irqrestore(ap->lock, flags); ++ return -EBUSY; ++ } ++ ++ /* ++ * create message header - this is all zero except for ++ * the message size, which is 4 bytes. ++ */ ++ message[0] |= (4 << 8); ++ ++ /* ignore 0:4 of byte zero, fill in port info yourself */ ++ message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no); ++ ++ /* write message to EM_LOC */ ++ writel(message[0], mmio + hpriv->em_loc); ++ writel(message[1], mmio + hpriv->em_loc+4); ++ ++ /* save off new led state for port/slot */ ++ emp->led_state = state; ++ ++ /* ++ * tell hardware to transmit the message ++ */ ++ writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL); ++ ++ spin_unlock_irqrestore(ap->lock, flags); ++ return size; ++} ++ ++static ssize_t ahci_led_show(struct ata_port *ap, char *buf) ++{ ++ struct ahci_port_priv *pp = ap->private_data; ++ struct ata_link *link; ++ struct ahci_em_priv *emp; ++ int rc = 0; ++ ++ ata_for_each_link(link, ap, EDGE) { ++ emp = &pp->em_priv[link->pmp]; ++ rc += sprintf(buf, "%lx\n", emp->led_state); ++ } ++ return rc; ++} ++ ++static ssize_t ahci_led_store(struct ata_port *ap, const char *buf, ++ size_t size) ++{ ++ int state; ++ int pmp; ++ struct ahci_port_priv *pp = ap->private_data; ++ struct ahci_em_priv *emp; ++ ++ state = simple_strtoul(buf, NULL, 0); ++ ++ /* get the slot number from the message */ ++ pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8; ++ if (pmp < EM_MAX_SLOTS) ++ emp = &pp->em_priv[pmp]; ++ else ++ return -EINVAL; ++ ++ /* mask off the activity bits if we are in sw_activity ++ * mode, user should turn off sw_activity before setting ++ * activity led through em_message ++ */ ++ if (emp->blink_policy) ++ state &= ~EM_MSG_LED_VALUE_ACTIVITY; ++ ++ return ahci_transmit_led_message(ap, state, size); ++} ++ ++static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val) ++{ ++ struct ata_link *link = dev->link; ++ struct ata_port *ap = link->ap; ++ struct ahci_port_priv *pp = ap->private_data; ++ struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; ++ u32 port_led_state = emp->led_state; ++ ++ /* save the desired Activity LED behavior */ ++ if (val == OFF) { ++ /* clear LFLAG */ ++ link->flags &= ~(ATA_LFLAG_SW_ACTIVITY); ++ ++ /* set the LED to OFF */ ++ port_led_state &= EM_MSG_LED_VALUE_OFF; ++ port_led_state |= (ap->port_no | (link->pmp << 8)); ++ ahci_transmit_led_message(ap, port_led_state, 4); ++ } else { ++ link->flags |= ATA_LFLAG_SW_ACTIVITY; ++ if (val == BLINK_OFF) { ++ /* set LED to ON for idle */ ++ port_led_state &= EM_MSG_LED_VALUE_OFF; ++ port_led_state |= (ap->port_no | (link->pmp << 8)); ++ port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */ ++ ahci_transmit_led_message(ap, port_led_state, 4); ++ } ++ } ++ emp->blink_policy = val; ++ return 0; ++} ++ ++static ssize_t ahci_activity_show(struct ata_device *dev, char *buf) ++{ ++ struct ata_link *link = dev->link; ++ struct ata_port *ap = link->ap; ++ struct ahci_port_priv *pp = ap->private_data; ++ struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; ++ ++ /* display the saved value of activity behavior for this ++ * disk. ++ */ ++ return sprintf(buf, "%d\n", emp->blink_policy); ++} ++ ++#if 0 ++static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap, ++ int port_no, void __iomem *mmio, ++ void __iomem *port_mmio) ++#else ++static void ahci_port_init(struct platform_device *pdev, struct ata_port *ap, ++ int port_no, void __iomem *mmio, ++ void __iomem *port_mmio) ++#endif ++{ ++ const char *emsg = NULL; ++ int rc; ++ u32 tmp; ++ ++ /* make sure port is not active */ ++ rc = ahci_deinit_port(ap, &emsg); ++ if (rc) ++ dev_printk(KERN_WARNING, &pdev->dev, ++ "%s (%d)\n", emsg, rc); ++ ++ /* clear SError */ ++ tmp = readl(port_mmio + PORT_SCR_ERR); ++ VPRINTK("PORT_SCR_ERR 0x%x\n", tmp); ++ writel(tmp, port_mmio + PORT_SCR_ERR); ++ ++ /* clear port IRQ */ ++ tmp = readl(port_mmio + PORT_IRQ_STAT); ++ VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp); ++ if (tmp) ++ writel(tmp, port_mmio + PORT_IRQ_STAT); ++ ++ writel(1 << port_no, mmio + HOST_IRQ_STAT); ++} ++ ++static void ahci_init_controller(struct ata_host *host) ++{ ++ struct ahci_host_priv *hpriv = host->private_data; ++#if 0 ++ struct pci_dev *pdev = to_pci_dev(host->dev); ++ void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; ++#else ++ struct platform_device *pdev = to_platform_device(host->dev); ++ void __iomem *mmio = (void __iomem *)host->iomap;//[AHCI_BAR]; ++#endif ++ int i; ++ void __iomem *port_mmio; ++ u32 tmp; ++ int mv; ++ ++ if (hpriv->flags & AHCI_HFLAG_MV_PATA) { ++#if 0 ++ if (pdev->device == 0x6121) ++ mv = 2; ++ else ++ mv = 4; ++#else ++ mv = 0; ++#endif ++ port_mmio = __ahci_port_base(host, mv); ++ ++ writel(0, port_mmio + PORT_IRQ_MASK); ++ ++ /* clear port IRQ */ ++ tmp = readl(port_mmio + PORT_IRQ_STAT); ++ VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp); ++ if (tmp) ++ writel(tmp, port_mmio + PORT_IRQ_STAT); ++ } ++ ++ /* set Timer 1ms, hclk = 200Mhz */ ++ /* FIXME: Add auto detect function */ ++ printk("CPU clock : %d \n", cns3xxx_cpu_clock()); ++ tmp = readl(mmio + HOST_TIMER1MS); ++ printk("*** Timer 1ms: %d(0x%x) ***\n",tmp,tmp); ++ writel(cns3xxx_cpu_clock()*500, mmio + HOST_TIMER1MS); ++ tmp = readl(mmio + HOST_TIMER1MS); ++ printk("*** Set to: %d(0x%x) ***\n",tmp, tmp); ++ ++ ++ ++ for (i = 0; i < host->n_ports; i++) { ++ struct ata_port *ap = host->ports[i]; ++ ++ port_mmio = ahci_port_base(ap); ++ if (ata_port_is_dummy(ap)) ++ continue; ++ ++ ahci_port_init(pdev, ap, i, mmio, port_mmio); ++ } ++ ++ tmp = readl(mmio + HOST_CTL); ++ VPRINTK("HOST_CTL 0x%x\n", tmp); ++ writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL); ++ tmp = readl(mmio + HOST_CTL); ++ VPRINTK("HOST_CTL 0x%x\n", tmp); ++} ++ ++static void ahci_dev_config(struct ata_device *dev) ++{ ++ struct ahci_host_priv *hpriv = dev->link->ap->host->private_data; ++ ++ if (hpriv->flags & AHCI_HFLAG_SECT255) { ++ dev->max_sectors = 255; ++ ata_dev_printk(dev, KERN_INFO, ++ "SB600 AHCI: limiting to 255 sectors per cmd\n"); ++ } ++} ++ ++static unsigned int ahci_dev_classify(struct ata_port *ap) ++{ ++ void __iomem *port_mmio = ahci_port_base(ap); ++ struct ata_taskfile tf; ++ u32 tmp; ++ ++ tmp = readl(port_mmio + PORT_SIG); ++ tf.lbah = (tmp >> 24) & 0xff; ++ tf.lbam = (tmp >> 16) & 0xff; ++ tf.lbal = (tmp >> 8) & 0xff; ++ tf.nsect = (tmp) & 0xff; ++ ++ return ata_dev_classify(&tf); ++} ++ ++static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, ++ u32 opts) ++{ ++ dma_addr_t cmd_tbl_dma; ++ ++ cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ; ++ ++#if 0 ++ pp->cmd_slot[tag].opts = cpu_to_le32(opts); ++#else ++ pp->cmd_slot[tag].opts = opts; ++#endif ++ pp->cmd_slot[tag].status = 0; ++#if 0 ++ pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff); ++ pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16); ++#else ++ pp->cmd_slot[tag].tbl_addr = cmd_tbl_dma & 0xffffffff; ++ pp->cmd_slot[tag].tbl_addr_hi = (cmd_tbl_dma >> 16) >> 16; ++#endif ++} ++ ++static int ahci_kick_engine(struct ata_port *ap, int force_restart) ++{ ++ void __iomem *port_mmio = ahci_port_base(ap); ++ struct ahci_host_priv *hpriv = ap->host->private_data; ++ u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; ++ u32 tmp; ++ int busy, rc; ++ ++ /* do we need to kick the port? */ ++ busy = status & (ATA_BUSY | ATA_DRQ); ++ if (!busy && !force_restart) ++ return 0; ++ ++ /* stop engine */ ++ rc = ahci_stop_engine(ap); ++ if (rc) ++ goto out_restart; ++ ++ /* need to do CLO? */ ++ if (!busy) { ++ rc = 0; ++ goto out_restart; ++ } ++ ++ if (!(hpriv->cap & HOST_CAP_CLO)) { ++ rc = -EOPNOTSUPP; ++ goto out_restart; ++ } ++ ++ /* perform CLO */ ++ tmp = readl(port_mmio + PORT_CMD); ++ tmp |= PORT_CMD_CLO; ++ writel(tmp, port_mmio + PORT_CMD); ++ ++ rc = 0; ++ tmp = ata_wait_register(port_mmio + PORT_CMD, ++ PORT_CMD_CLO, PORT_CMD_CLO, 1, 500); ++ if (tmp & PORT_CMD_CLO) ++ rc = -EIO; ++ ++ /* restart engine */ ++ out_restart: ++ ahci_start_engine(ap); ++ return rc; ++} ++ ++static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp, ++ struct ata_taskfile *tf, int is_cmd, u16 flags, ++ unsigned long timeout_msec) ++{ ++ const u32 cmd_fis_len = 5; /* five dwords */ ++ struct ahci_port_priv *pp = ap->private_data; ++ void __iomem *port_mmio = ahci_port_base(ap); ++ u8 *fis = pp->cmd_tbl; ++ u32 tmp; ++ ++ /* prep the command */ ++ ata_tf_to_fis(tf, pmp, is_cmd, fis); ++ ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12)); ++ ++ /* issue & wait */ ++ writel(1, port_mmio + PORT_CMD_ISSUE); ++ ++ if (timeout_msec) { ++ tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, ++ 1, timeout_msec); ++ if (tmp & 0x1) { ++ ahci_kick_engine(ap, 1); ++ return -EBUSY; ++ } ++ } else ++ readl(port_mmio + PORT_CMD_ISSUE); /* flush */ ++ ++ return 0; ++} ++ ++static int ahci_do_softreset(struct ata_link *link, unsigned int *class, ++ int pmp, unsigned long deadline, ++ int (*check_ready)(struct ata_link *link)) ++{ ++ struct ata_port *ap = link->ap; ++ struct ahci_host_priv *hpriv = ap->host->private_data; ++ const char *reason = NULL; ++ unsigned long now, msecs; ++ struct ata_taskfile tf; ++ int rc; ++ ++ DPRINTK("ENTER\n"); ++ ++ /* prepare for SRST (AHCI-1.1 10.4.1) */ ++ rc = ahci_kick_engine(ap, 1); ++ if (rc && rc != -EOPNOTSUPP) ++ ata_link_printk(link, KERN_WARNING, ++ "failed to reset engine (errno=%d)\n", rc); ++ ++ ata_tf_init(link->device, &tf); ++ ++ /* issue the first D2H Register FIS */ ++ msecs = 0; ++ now = jiffies; ++ if (time_after(now, deadline)) ++ msecs = jiffies_to_msecs(deadline - now); ++ ++ tf.ctl |= ATA_SRST; ++ if (ahci_exec_polled_cmd(ap, pmp, &tf, 0, ++ AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) { ++ rc = -EIO; ++ reason = "1st FIS failed"; ++ goto fail; ++ } ++ ++ /* spec says at least 5us, but be generous and sleep for 1ms */ ++ msleep(1); ++ ++ /* issue the second D2H Register FIS */ ++ tf.ctl &= ~ATA_SRST; ++ ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0); ++ ++ /* wait for link to become ready */ ++ rc = ata_wait_after_reset(link, deadline, check_ready); ++ if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) { ++ /* ++ * Workaround for cases where link online status can't ++ * be trusted. Treat device readiness timeout as link ++ * offline. ++ */ ++ ata_link_printk(link, KERN_INFO, ++ "device not ready, treating as offline\n"); ++ *class = ATA_DEV_NONE; ++ } else if (rc) { ++ /* link occupied, -ENODEV too is an error */ ++ reason = "device not ready"; ++ goto fail; ++ } else ++ *class = ahci_dev_classify(ap); ++ ++ DPRINTK("EXIT, class=%u\n", *class); ++ return 0; ++ ++ fail: ++ ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason); ++ return rc; ++} ++ ++static int ahci_check_ready(struct ata_link *link) ++{ ++ void __iomem *port_mmio = ahci_port_base(link->ap); ++ u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; ++ ++ return ata_check_ready(status); ++} ++ ++static int ahci_softreset(struct ata_link *link, unsigned int *class, ++ unsigned long deadline) ++{ ++ int pmp = sata_srst_pmp(link); ++ ++ DPRINTK("ENTER\n"); ++ ++ return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready); ++} ++ ++static int ahci_sb600_check_ready(struct ata_link *link) ++{ ++ void __iomem *port_mmio = ahci_port_base(link->ap); ++ u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; ++ u32 irq_status = readl(port_mmio + PORT_IRQ_STAT); ++ ++ /* ++ * There is no need to check TFDATA if BAD PMP is found due to HW bug, ++ * which can save timeout delay. ++ */ ++ if (irq_status & PORT_IRQ_BAD_PMP) ++ return -EIO; ++ ++ return ata_check_ready(status); ++} ++ ++static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class, ++ unsigned long deadline) ++{ ++ struct ata_port *ap = link->ap; ++ void __iomem *port_mmio = ahci_port_base(ap); ++ int pmp = sata_srst_pmp(link); ++ int rc; ++ u32 irq_sts; ++ ++ DPRINTK("ENTER\n"); ++ ++ rc = ahci_do_softreset(link, class, pmp, deadline, ++ ahci_sb600_check_ready); ++ ++ /* ++ * Soft reset fails on some ATI chips with IPMS set when PMP ++ * is enabled but SATA HDD/ODD is connected to SATA port, ++ * do soft reset again to port 0. ++ */ ++ if (rc == -EIO) { ++ irq_sts = readl(port_mmio + PORT_IRQ_STAT); ++ if (irq_sts & PORT_IRQ_BAD_PMP) { ++ ata_link_printk(link, KERN_WARNING, ++ "applying SB600 PMP SRST workaround " ++ "and retrying\n"); ++ rc = ahci_do_softreset(link, class, 0, deadline, ++ ahci_check_ready); ++ } ++ } ++ ++ return rc; ++} ++ ++static int ahci_hardreset(struct ata_link *link, unsigned int *class, ++ unsigned long deadline) ++{ ++ const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); ++ struct ata_port *ap = link->ap; ++ struct ahci_port_priv *pp = ap->private_data; ++ u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; ++ struct ata_taskfile tf; ++ bool online; ++ int rc; ++ ++ DPRINTK("ENTER\n"); ++ ++ ahci_stop_engine(ap); ++ ++ /* clear D2H reception area to properly wait for D2H FIS */ ++ ata_tf_init(link->device, &tf); ++ tf.command = 0x80; ++ ata_tf_to_fis(&tf, 0, 0, d2h_fis); ++ ++ rc = sata_link_hardreset(link, timing, deadline, &online, ++ ahci_check_ready); ++ ++ ahci_start_engine(ap); ++ ++ if (online) ++ *class = ahci_dev_classify(ap); ++ ++ DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); ++ return rc; ++} ++ ++static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, ++ unsigned long deadline) ++{ ++ struct ata_port *ap = link->ap; ++ bool online; ++ int rc; ++ ++ DPRINTK("ENTER\n"); ++ ++ ahci_stop_engine(ap); ++ ++ rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), ++ deadline, &online, NULL); ++ ++ ahci_start_engine(ap); ++ ++ DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); ++ ++ /* vt8251 doesn't clear BSY on signature FIS reception, ++ * request follow-up softreset. ++ */ ++ return online ? -EAGAIN : rc; ++} ++ ++#if 0 ++static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, ++ unsigned long deadline) ++{ ++ struct ata_port *ap = link->ap; ++ struct ahci_port_priv *pp = ap->private_data; ++ u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; ++ struct ata_taskfile tf; ++ bool online; ++ int rc; ++ ++ ahci_stop_engine(ap); ++ ++ /* clear D2H reception area to properly wait for D2H FIS */ ++ ata_tf_init(link->device, &tf); ++ tf.command = 0x80; ++ ata_tf_to_fis(&tf, 0, 0, d2h_fis); ++ ++ rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), ++ deadline, &online, NULL); ++ ++ ahci_start_engine(ap); ++ ++ /* The pseudo configuration device on SIMG4726 attached to ++ * ASUS P5W-DH Deluxe doesn't send signature FIS after ++ * hardreset if no device is attached to the first downstream ++ * port && the pseudo device locks up on SRST w/ PMP==0. To ++ * work around this, wait for !BSY only briefly. If BSY isn't ++ * cleared, perform CLO and proceed to IDENTIFY (achieved by ++ * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA). ++ * ++ * Wait for two seconds. Devices attached to downstream port ++ * which can't process the following IDENTIFY after this will ++ * have to be reset again. For most cases, this should ++ * suffice while making probing snappish enough. ++ */ ++ if (online) { ++ rc = ata_wait_after_reset(link, jiffies + 2 * HZ, ++ ahci_check_ready); ++ if (rc) ++ ahci_kick_engine(ap, 0); ++ } ++ return rc; ++} ++#endif ++ ++static void ahci_postreset(struct ata_link *link, unsigned int *class) ++{ ++ struct ata_port *ap = link->ap; ++ void __iomem *port_mmio = ahci_port_base(ap); ++ u32 new_tmp, tmp; ++ ++ ata_std_postreset(link, class); ++ ++ /* Make sure port's ATAPI bit is set appropriately */ ++ new_tmp = tmp = readl(port_mmio + PORT_CMD); ++ if (*class == ATA_DEV_ATAPI) ++ new_tmp |= PORT_CMD_ATAPI; ++ else ++ new_tmp &= ~PORT_CMD_ATAPI; ++ if (new_tmp != tmp) { ++ writel(new_tmp, port_mmio + PORT_CMD); ++ readl(port_mmio + PORT_CMD); /* flush */ ++ } ++} ++ ++static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl) ++{ ++ struct scatterlist *sg; ++ struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ; ++ unsigned int si; ++ ++ VPRINTK("ENTER\n"); ++ ++ /* ++ * Next, the S/G list. ++ */ ++ for_each_sg(qc->sg, sg, qc->n_elem, si) { ++ dma_addr_t addr = sg_dma_address(sg); ++ u32 sg_len = sg_dma_len(sg); ++ ++#if 0 ++ ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff); ++ ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16); ++ ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1); ++#else ++ ahci_sg[si].addr = addr & 0xffffffff; ++ ahci_sg[si].addr_hi = (addr >> 16) >> 16; ++ ahci_sg[si].flags_size = sg_len - 1; ++#endif ++ } ++ ++ return si; ++} ++ ++static void ahci_qc_prep(struct ata_queued_cmd *qc) ++{ ++ struct ata_port *ap = qc->ap; ++ struct ahci_port_priv *pp = ap->private_data; ++ int is_atapi = ata_is_atapi(qc->tf.protocol); ++ void *cmd_tbl; ++ u32 opts; ++ const u32 cmd_fis_len = 5; /* five dwords */ ++ unsigned int n_elem; ++ ++ /* ++ * Fill in command table information. First, the header, ++ * a SATA Register - Host to Device command FIS. ++ */ ++ cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ; ++ ++ ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl); ++ if (is_atapi) { ++ memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); ++ memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len); ++ } ++ ++ n_elem = 0; ++ if (qc->flags & ATA_QCFLAG_DMAMAP) ++ n_elem = ahci_fill_sg(qc, cmd_tbl); ++ ++ /* ++ * Fill in command slot information. ++ */ ++ opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12); ++ if (qc->tf.flags & ATA_TFLAG_WRITE) ++ opts |= AHCI_CMD_WRITE; ++ if (is_atapi) ++ opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; ++ ++ ahci_fill_cmd_slot(pp, qc->tag, opts); ++} ++ ++static void ahci_error_intr(struct ata_port *ap, u32 irq_stat) ++{ ++ struct ahci_host_priv *hpriv = ap->host->private_data; ++ struct ahci_port_priv *pp = ap->private_data; ++ struct ata_eh_info *host_ehi = &ap->link.eh_info; ++ struct ata_link *link = NULL; ++ struct ata_queued_cmd *active_qc; ++ struct ata_eh_info *active_ehi; ++ u32 serror; ++ ++ /* determine active link */ ++ ata_for_each_link(link, ap, EDGE) ++ if (ata_link_active(link)) ++ break; ++ if (!link) ++ link = &ap->link; ++ ++ active_qc = ata_qc_from_tag(ap, link->active_tag); ++ active_ehi = &link->eh_info; ++ ++ /* record irq stat */ ++ ata_ehi_clear_desc(host_ehi); ++ ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat); ++ ++ /* AHCI needs SError cleared; otherwise, it might lock up */ ++ ahci_scr_read(&ap->link, SCR_ERROR, &serror); ++ ahci_scr_write(&ap->link, SCR_ERROR, serror); ++ host_ehi->serror |= serror; ++ ++ /* some controllers set IRQ_IF_ERR on device errors, ignore it */ ++ if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR) ++ irq_stat &= ~PORT_IRQ_IF_ERR; ++ ++ if (irq_stat & PORT_IRQ_TF_ERR) { ++ /* If qc is active, charge it; otherwise, the active ++ * link. There's no active qc on NCQ errors. It will ++ * be determined by EH by reading log page 10h. ++ */ ++ if (active_qc) ++ active_qc->err_mask |= AC_ERR_DEV; ++ else ++ active_ehi->err_mask |= AC_ERR_DEV; ++ ++ if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL) ++ host_ehi->serror &= ~SERR_INTERNAL; ++ } ++ ++ if (irq_stat & PORT_IRQ_UNK_FIS) { ++ u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK); ++ ++ active_ehi->err_mask |= AC_ERR_HSM; ++ active_ehi->action |= ATA_EH_RESET; ++ ata_ehi_push_desc(active_ehi, ++ "unknown FIS %08x %08x %08x %08x" , ++ unk[0], unk[1], unk[2], unk[3]); ++ } ++ ++ if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) { ++ active_ehi->err_mask |= AC_ERR_HSM; ++ active_ehi->action |= ATA_EH_RESET; ++ ata_ehi_push_desc(active_ehi, "incorrect PMP"); ++ } ++ ++ if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) { ++ host_ehi->err_mask |= AC_ERR_HOST_BUS; ++ host_ehi->action |= ATA_EH_RESET; ++ ata_ehi_push_desc(host_ehi, "host bus error"); ++ } ++ ++ if (irq_stat & PORT_IRQ_IF_ERR) { ++ host_ehi->err_mask |= AC_ERR_ATA_BUS; ++ host_ehi->action |= ATA_EH_RESET; ++ ata_ehi_push_desc(host_ehi, "interface fatal error"); ++ } ++ ++ if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) { ++ ata_ehi_hotplugged(host_ehi); ++ ata_ehi_push_desc(host_ehi, "%s", ++ irq_stat & PORT_IRQ_CONNECT ? ++ "connection status changed" : "PHY RDY changed"); ++ } ++ ++ /* okay, let's hand over to EH */ ++ ++ if (irq_stat & PORT_IRQ_FREEZE) ++ ata_port_freeze(ap); ++ else ++ ata_port_abort(ap); ++} ++ ++static void ahci_port_intr(struct ata_port *ap) ++{ ++ void __iomem *port_mmio = ahci_port_base(ap); ++ struct ata_eh_info *ehi = &ap->link.eh_info; ++ struct ahci_port_priv *pp = ap->private_data; ++ struct ahci_host_priv *hpriv = ap->host->private_data; ++ int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING); ++ u32 status, qc_active; ++ int rc; ++ ++ status = readl(port_mmio + PORT_IRQ_STAT); ++ writel(status, port_mmio + PORT_IRQ_STAT); ++ ++ /* ignore BAD_PMP while resetting */ ++ if (unlikely(resetting)) ++ status &= ~PORT_IRQ_BAD_PMP; ++ ++ /* If we are getting PhyRdy, this is ++ * just a power state change, we should ++ * clear out this, plus the PhyRdy/Comm ++ * Wake bits from Serror ++ */ ++ if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) && ++ (status & PORT_IRQ_PHYRDY)) { ++ status &= ~PORT_IRQ_PHYRDY; ++ ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18))); ++ } ++ ++ if (unlikely(status & PORT_IRQ_ERROR)) { ++ ahci_error_intr(ap, status); ++ return; ++ } ++ ++ if (status & PORT_IRQ_SDB_FIS) { ++ /* If SNotification is available, leave notification ++ * handling to sata_async_notification(). If not, ++ * emulate it by snooping SDB FIS RX area. ++ * ++ * Snooping FIS RX area is probably cheaper than ++ * poking SNotification but some constrollers which ++ * implement SNotification, ICH9 for example, don't ++ * store AN SDB FIS into receive area. ++ */ ++ if (hpriv->cap & HOST_CAP_SNTF) ++ sata_async_notification(ap); ++ else { ++ /* If the 'N' bit in word 0 of the FIS is set, ++ * we just received asynchronous notification. ++ * Tell libata about it. ++ */ ++ const __le32 *f = pp->rx_fis + RX_FIS_SDB; ++#if 0 ++ u32 f0 = le32_to_cpu(f[0]); ++#else ++ u32 f0 = f[0]; ++#endif ++ ++ if (f0 & (1 << 15)) ++ sata_async_notification(ap); ++ } ++ } ++ ++ /* pp->active_link is valid iff any command is in flight */ ++ if (ap->qc_active && pp->active_link->sactive) ++ qc_active = readl(port_mmio + PORT_SCR_ACT); ++ else ++ qc_active = readl(port_mmio + PORT_CMD_ISSUE); ++ ++ rc = ata_qc_complete_multiple(ap, qc_active); ++ ++ /* while resetting, invalid completions are expected */ ++ if (unlikely(rc < 0 && !resetting)) { ++ ehi->err_mask |= AC_ERR_HSM; ++ ehi->action |= ATA_EH_RESET; ++ ata_port_freeze(ap); ++ } ++} ++ ++static irqreturn_t ahci_interrupt(int irq, void *dev_instance) ++{ ++ struct ata_host *host = dev_instance; ++ struct ahci_host_priv *hpriv; ++ unsigned int i, handled = 0; ++ void __iomem *mmio; ++ u32 irq_stat, irq_masked; ++ ++ VPRINTK("ENTER\n"); ++ ++ hpriv = host->private_data; ++#if 0 ++ mmio = host->iomap[AHCI_PCI_BAR]; ++#else ++ mmio = (void __iomem *)host->iomap;//[AHCI_BAR]; ++#endif ++ ++ /* sigh. 0xffffffff is a valid return from h/w */ ++ irq_stat = readl(mmio + HOST_IRQ_STAT); ++ if (!irq_stat) ++ return IRQ_NONE; ++ ++ irq_masked = irq_stat & hpriv->port_map; ++ ++ spin_lock(&host->lock); ++ ++ for (i = 0; i < host->n_ports; i++) { ++ struct ata_port *ap; ++ ++ if (!(irq_masked & (1 << i))) ++ continue; ++ ++ ap = host->ports[i]; ++ if (ap) { ++ ahci_port_intr(ap); ++ VPRINTK("port %u\n", i); ++ } else { ++ VPRINTK("port %u (no irq)\n", i); ++ if (ata_ratelimit()) ++ dev_printk(KERN_WARNING, host->dev, ++ "interrupt on disabled port %u\n", i); ++ } ++ ++ handled = 1; ++ } ++ ++ /* HOST_IRQ_STAT behaves as level triggered latch meaning that ++ * it should be cleared after all the port events are cleared; ++ * otherwise, it will raise a spurious interrupt after each ++ * valid one. Please read section 10.6.2 of ahci 1.1 for more ++ * information. ++ * ++ * Also, use the unmasked value to clear interrupt as spurious ++ * pending event on a dummy port might cause screaming IRQ. ++ */ ++ writel(irq_stat, mmio + HOST_IRQ_STAT); ++ ++ spin_unlock(&host->lock); ++ ++ VPRINTK("EXIT\n"); ++ ++ return IRQ_RETVAL(handled); ++} ++ ++static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) ++{ ++ struct ata_port *ap = qc->ap; ++ void __iomem *port_mmio = ahci_port_base(ap); ++ struct ahci_port_priv *pp = ap->private_data; ++ ++ /* Keep track of the currently active link. It will be used ++ * in completion path to determine whether NCQ phase is in ++ * progress. ++ */ ++ pp->active_link = qc->dev->link; ++ ++ if (qc->tf.protocol == ATA_PROT_NCQ) ++ writel(1 << qc->tag, port_mmio + PORT_SCR_ACT); ++ writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE); ++ ++ ahci_sw_activity(qc->dev->link); ++ ++ return 0; ++} ++ ++static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc) ++{ ++ struct ahci_port_priv *pp = qc->ap->private_data; ++ u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; ++ ++ ata_tf_from_fis(d2h_fis, &qc->result_tf); ++ return true; ++} ++ ++static void ahci_freeze(struct ata_port *ap) ++{ ++ void __iomem *port_mmio = ahci_port_base(ap); ++ ++ /* turn IRQ off */ ++ writel(0, port_mmio + PORT_IRQ_MASK); ++} ++ ++static void ahci_thaw(struct ata_port *ap) ++{ ++#if 0 ++ void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR]; ++#else ++ void __iomem *mmio = (void __iomem *)ap->host->iomap;//[AHCI_BAR]; ++#endif ++ void __iomem *port_mmio = ahci_port_base(ap); ++ u32 tmp; ++ struct ahci_port_priv *pp = ap->private_data; ++ ++ /* clear IRQ */ ++ tmp = readl(port_mmio + PORT_IRQ_STAT); ++ writel(tmp, port_mmio + PORT_IRQ_STAT); ++ writel(1 << ap->port_no, mmio + HOST_IRQ_STAT); ++ ++ /* turn IRQ back on */ ++ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); ++} ++ ++static void ahci_error_handler(struct ata_port *ap) ++{ ++ if (!(ap->pflags & ATA_PFLAG_FROZEN)) { ++ /* restart engine */ ++ ahci_stop_engine(ap); ++ ahci_start_engine(ap); ++ } ++ ++ sata_pmp_error_handler(ap); ++} ++ ++static void ahci_post_internal_cmd(struct ata_queued_cmd *qc) ++{ ++ struct ata_port *ap = qc->ap; ++ ++ /* make DMA engine forget about the failed command */ ++ if (qc->flags & ATA_QCFLAG_FAILED) ++ ahci_kick_engine(ap, 1); ++} ++ ++static void ahci_pmp_attach(struct ata_port *ap) ++{ ++ void __iomem *port_mmio = ahci_port_base(ap); ++ struct ahci_port_priv *pp = ap->private_data; ++ u32 cmd; ++ ++ cmd = readl(port_mmio + PORT_CMD); ++ cmd |= PORT_CMD_PMP; ++ writel(cmd, port_mmio + PORT_CMD); ++ ++ pp->intr_mask |= PORT_IRQ_BAD_PMP; ++ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); ++} ++ ++static void ahci_pmp_detach(struct ata_port *ap) ++{ ++ void __iomem *port_mmio = ahci_port_base(ap); ++ struct ahci_port_priv *pp = ap->private_data; ++ u32 cmd; ++ ++ cmd = readl(port_mmio + PORT_CMD); ++ cmd &= ~PORT_CMD_PMP; ++ writel(cmd, port_mmio + PORT_CMD); ++ ++ pp->intr_mask &= ~PORT_IRQ_BAD_PMP; ++ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); ++} ++ ++static int ahci_port_resume(struct ata_port *ap) ++{ ++ ahci_power_up(ap); ++ ahci_start_port(ap); ++ ++ if (sata_pmp_attached(ap)) ++ ahci_pmp_attach(ap); ++ else ++ ahci_pmp_detach(ap); ++ ++ return 0; ++} ++ ++#ifdef CONFIG_PM ++static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg) ++{ ++ const char *emsg = NULL; ++ int rc; ++ ++ rc = ahci_deinit_port(ap, &emsg); ++ if (rc == 0) ++ ahci_power_down(ap); ++ else { ++ ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc); ++ ahci_start_port(ap); ++ } ++ ++ return rc; ++} ++ ++static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) ++{ ++ struct ata_host *host = dev_get_drvdata(&pdev->dev); ++ struct ahci_host_priv *hpriv = host->private_data; ++ void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; ++ u32 ctl; ++ ++ if (mesg.event & PM_EVENT_SUSPEND && ++ hpriv->flags & AHCI_HFLAG_NO_SUSPEND) { ++ dev_printk(KERN_ERR, &pdev->dev, ++ "BIOS update required for suspend/resume\n"); ++ return -EIO; ++ } ++ ++ if (mesg.event & PM_EVENT_SLEEP) { ++ /* AHCI spec rev1.1 section 8.3.3: ++ * Software must disable interrupts prior to requesting a ++ * transition of the HBA to D3 state. ++ */ ++ ctl = readl(mmio + HOST_CTL); ++ ctl &= ~HOST_IRQ_EN; ++ writel(ctl, mmio + HOST_CTL); ++ readl(mmio + HOST_CTL); /* flush */ ++ } ++ ++ return ata_pci_device_suspend(pdev, mesg); ++} ++ ++static int ahci_pci_device_resume(struct pci_dev *pdev) ++{ ++ struct ata_host *host = dev_get_drvdata(&pdev->dev); ++ int rc; ++ ++ rc = ata_pci_device_do_resume(pdev); ++ if (rc) ++ return rc; ++ ++ if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { ++ rc = ahci_reset_controller(host); ++ if (rc) ++ return rc; ++ ++ ahci_init_controller(host); ++ } ++ ++ ata_host_resume(host); ++ ++ return 0; ++} ++#endif ++ ++static int ahci_port_start(struct ata_port *ap) ++{ ++ struct device *dev = ap->host->dev; ++ struct ahci_port_priv *pp; ++ void *mem; ++ dma_addr_t mem_dma; ++ ++ pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); ++ if (!pp) ++ return -ENOMEM; ++ ++ mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, ++ GFP_KERNEL); ++ if (!mem) ++ return -ENOMEM; ++ memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ); ++ ++ /* ++ * First item in chunk of DMA memory: 32-slot command table, ++ * 32 bytes each in size ++ */ ++ pp->cmd_slot = mem; ++ pp->cmd_slot_dma = mem_dma; ++ ++ mem += AHCI_CMD_SLOT_SZ; ++ mem_dma += AHCI_CMD_SLOT_SZ; ++ ++ /* ++ * Second item: Received-FIS area ++ */ ++ pp->rx_fis = mem; ++ pp->rx_fis_dma = mem_dma; ++ ++ mem += AHCI_RX_FIS_SZ; ++ mem_dma += AHCI_RX_FIS_SZ; ++ ++ /* ++ * Third item: data area for storing a single command ++ * and its scatter-gather table ++ */ ++ pp->cmd_tbl = mem; ++ pp->cmd_tbl_dma = mem_dma; ++ ++ /* ++ * Save off initial list of interrupts to be enabled. ++ * This could be changed later ++ */ ++ pp->intr_mask = DEF_PORT_IRQ; ++ ++ ap->private_data = pp; ++ ++ /* engage engines, captain */ ++ return ahci_port_resume(ap); ++} ++ ++static void ahci_port_stop(struct ata_port *ap) ++{ ++ const char *emsg = NULL; ++ int rc; ++ ++ /* de-initialize port */ ++ rc = ahci_deinit_port(ap, &emsg); ++ if (rc) ++ ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc); ++} ++ ++#if 0 ++static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac) ++{ ++ int rc; ++ ++ if (using_dac && ++ !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { ++ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); ++ if (rc) { ++ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); ++ if (rc) { ++ dev_printk(KERN_ERR, &pdev->dev, ++ "64-bit DMA enable failed\n"); ++ return rc; ++ } ++ } ++ } else { ++ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); ++ if (rc) { ++ dev_printk(KERN_ERR, &pdev->dev, ++ "32-bit DMA enable failed\n"); ++ return rc; ++ } ++ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); ++ if (rc) { ++ dev_printk(KERN_ERR, &pdev->dev, ++ "32-bit consistent DMA enable failed\n"); ++ return rc; ++ } ++ } ++ return 0; ++} ++#endif ++ ++static void ahci_print_info(struct ata_host *host) ++{ ++ struct ahci_host_priv *hpriv = host->private_data; ++#if 0 ++ struct pci_dev *pdev = to_pci_dev(host->dev); ++ void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; ++#else ++ struct platform_device *pdev = to_platform_device(host->dev); ++ void __iomem *mmio = (void __iomem *)host->iomap;//[AHCI_BAR]; ++#endif ++ u32 vers, cap, impl, speed; ++ const char *speed_s; ++#if 0 ++ u16 cc; ++#endif ++ const char *scc_s; ++ ++ vers = readl(mmio + HOST_VERSION); ++ cap = hpriv->cap; ++ impl = hpriv->port_map; ++ ++ speed = (cap >> 20) & 0xf; ++ if (speed == 1) ++ speed_s = "1.5"; ++ else if (speed == 2) ++ speed_s = "3"; ++ else if (speed == 3) ++ speed_s = "6"; ++ else ++ speed_s = "?"; ++ ++#if 0 ++ pci_read_config_word(pdev, 0x0a, &cc); ++ if (cc == PCI_CLASS_STORAGE_IDE) ++ scc_s = "IDE"; ++ else if (cc == PCI_CLASS_STORAGE_SATA) ++ scc_s = "SATA"; ++ else if (cc == PCI_CLASS_STORAGE_RAID) ++ scc_s = "RAID"; ++ else ++ scc_s = "unknown"; ++#else ++ scc_s = "SATA"; ++#endif ++ ++ dev_printk(KERN_INFO, &pdev->dev, ++ "AHCI %02x%02x.%02x%02x " ++ "%u slots %u ports %s Gbps 0x%x impl %s mode\n" ++ , ++ ++ (vers >> 24) & 0xff, ++ (vers >> 16) & 0xff, ++ (vers >> 8) & 0xff, ++ vers & 0xff, ++ ++ ((cap >> 8) & 0x1f) + 1, ++ (cap & 0x1f) + 1, ++ speed_s, ++ impl, ++ scc_s); ++ ++ dev_printk(KERN_INFO, &pdev->dev, ++ "flags: " ++ "%s%s%s%s%s%s%s" ++ "%s%s%s%s%s%s%s" ++ "%s\n" ++ , ++ ++ cap & (1 << 31) ? "64bit " : "", ++ cap & (1 << 30) ? "ncq " : "", ++ cap & (1 << 29) ? "sntf " : "", ++ cap & (1 << 28) ? "ilck " : "", ++ cap & (1 << 27) ? "stag " : "", ++ cap & (1 << 26) ? "pm " : "", ++ cap & (1 << 25) ? "led " : "", ++ ++ cap & (1 << 24) ? "clo " : "", ++ cap & (1 << 19) ? "nz " : "", ++ cap & (1 << 18) ? "only " : "", ++ cap & (1 << 17) ? "pmp " : "", ++ cap & (1 << 15) ? "pio " : "", ++ cap & (1 << 14) ? "slum " : "", ++ cap & (1 << 13) ? "part " : "", ++ cap & (1 << 6) ? "ems ": "" ++ ); ++} ++ ++#if 0 ++/* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is ++ * hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't ++ * support PMP and the 4726 either directly exports the device ++ * attached to the first downstream port or acts as a hardware storage ++ * controller and emulate a single ATA device (can be RAID 0/1 or some ++ * other configuration). ++ * ++ * When there's no device attached to the first downstream port of the ++ * 4726, "Config Disk" appears, which is a pseudo ATA device to ++ * configure the 4726. However, ATA emulation of the device is very ++ * lame. It doesn't send signature D2H Reg FIS after the initial ++ * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues. ++ * ++ * The following function works around the problem by always using ++ * hardreset on the port and not depending on receiving signature FIS ++ * afterward. If signature FIS isn't received soon, ATA class is ++ * assumed without follow-up softreset. ++ */ ++static void ahci_p5wdh_workaround(struct ata_host *host) ++{ ++ static struct dmi_system_id sysids[] = { ++ { ++ .ident = "P5W DH Deluxe", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, ++ "ASUSTEK COMPUTER INC"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"), ++ }, ++ }, ++ { } ++ }; ++ struct pci_dev *pdev = to_pci_dev(host->dev); ++ ++ if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) && ++ dmi_check_system(sysids)) { ++ struct ata_port *ap = host->ports[1]; ++ ++ dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH " ++ "Deluxe on-board SIMG4726 workaround\n"); ++ ++ ap->ops = &ahci_p5wdh_ops; ++ ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA; ++ } ++} ++ ++/* ++ * SB600 ahci controller on ASUS M2A-VM can't do 64bit DMA with older ++ * BIOS. The oldest version known to be broken is 0901 and working is ++ * 1501 which was released on 2007-10-26. Force 32bit DMA on anything ++ * older than 1501. Please read bko#9412 for more info. ++ */ ++static bool ahci_asus_m2a_vm_32bit_only(struct pci_dev *pdev) ++{ ++ static const struct dmi_system_id sysids[] = { ++ { ++ .ident = "ASUS M2A-VM", ++ .matches = { ++ DMI_MATCH(DMI_BOARD_VENDOR, ++ "ASUSTeK Computer INC."), ++ DMI_MATCH(DMI_BOARD_NAME, "M2A-VM"), ++ }, ++ }, ++ { } ++ }; ++ const char *cutoff_mmdd = "10/26"; ++ const char *date; ++ int year; ++ ++ if (pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x12, 0) || ++ !dmi_check_system(sysids)) ++ return false; ++ ++ /* ++ * Argh.... both version and date are free form strings. ++ * Let's hope they're using the same date format across ++ * different versions. ++ */ ++ date = dmi_get_system_info(DMI_BIOS_DATE); ++ year = dmi_get_year(DMI_BIOS_DATE); ++ if (date && strlen(date) >= 10 && date[2] == '/' && date[5] == '/' && ++ (year > 2007 || ++ (year == 2007 && strncmp(date, cutoff_mmdd, 5) >= 0))) ++ return false; ++ ++ dev_printk(KERN_WARNING, &pdev->dev, "ASUS M2A-VM: BIOS too old, " ++ "forcing 32bit DMA, update BIOS\n"); ++ ++ return true; ++} ++ ++static bool ahci_broken_system_poweroff(struct pci_dev *pdev) ++{ ++ static const struct dmi_system_id broken_systems[] = { ++ { ++ .ident = "HP Compaq nx6310", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6310"), ++ }, ++ /* PCI slot number of the controller */ ++ .driver_data = (void *)0x1FUL, ++ }, ++ { ++ .ident = "HP Compaq 6720s", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6720s"), ++ }, ++ /* PCI slot number of the controller */ ++ .driver_data = (void *)0x1FUL, ++ }, ++ ++ { } /* terminate list */ ++ }; ++ const struct dmi_system_id *dmi = dmi_first_match(broken_systems); ++ ++ if (dmi) { ++ unsigned long slot = (unsigned long)dmi->driver_data; ++ /* apply the quirk only to on-board controllers */ ++ return slot == PCI_SLOT(pdev->devfn); ++ } ++ ++ return false; ++} ++ ++static bool ahci_broken_suspend(struct pci_dev *pdev) ++{ ++ static const struct dmi_system_id sysids[] = { ++ /* ++ * On HP dv[4-6] and HDX18 with earlier BIOSen, link ++ * to the harddisk doesn't become online after ++ * resuming from STR. Warn and fail suspend. ++ */ ++ { ++ .ident = "dv4", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), ++ DMI_MATCH(DMI_PRODUCT_NAME, ++ "HP Pavilion dv4 Notebook PC"), ++ }, ++ .driver_data = "F.30", /* cutoff BIOS version */ ++ }, ++ { ++ .ident = "dv5", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), ++ DMI_MATCH(DMI_PRODUCT_NAME, ++ "HP Pavilion dv5 Notebook PC"), ++ }, ++ .driver_data = "F.16", /* cutoff BIOS version */ ++ }, ++ { ++ .ident = "dv6", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), ++ DMI_MATCH(DMI_PRODUCT_NAME, ++ "HP Pavilion dv6 Notebook PC"), ++ }, ++ .driver_data = "F.21", /* cutoff BIOS version */ ++ }, ++ { ++ .ident = "HDX18", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), ++ DMI_MATCH(DMI_PRODUCT_NAME, ++ "HP HDX18 Notebook PC"), ++ }, ++ .driver_data = "F.23", /* cutoff BIOS version */ ++ }, ++ { } /* terminate list */ ++ }; ++ const struct dmi_system_id *dmi = dmi_first_match(sysids); ++ const char *ver; ++ ++ if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2)) ++ return false; ++ ++ ver = dmi_get_system_info(DMI_BIOS_VERSION); ++ ++ return !ver || strcmp(ver, dmi->driver_data) < 0; ++} ++ ++static bool ahci_broken_online(struct pci_dev *pdev) ++{ ++#define ENCODE_BUSDEVFN(bus, slot, func) \ ++ (void *)(unsigned long)(((bus) << 8) | PCI_DEVFN((slot), (func))) ++ static const struct dmi_system_id sysids[] = { ++ /* ++ * There are several gigabyte boards which use ++ * SIMG5723s configured as hardware RAID. Certain ++ * 5723 firmware revisions shipped there keep the link ++ * online but fail to answer properly to SRST or ++ * IDENTIFY when no device is attached downstream ++ * causing libata to retry quite a few times leading ++ * to excessive detection delay. ++ * ++ * As these firmwares respond to the second reset try ++ * with invalid device signature, considering unknown ++ * sig as offline works around the problem acceptably. ++ */ ++ { ++ .ident = "EP45-DQ6", ++ .matches = { ++ DMI_MATCH(DMI_BOARD_VENDOR, ++ "Gigabyte Technology Co., Ltd."), ++ DMI_MATCH(DMI_BOARD_NAME, "EP45-DQ6"), ++ }, ++ .driver_data = ENCODE_BUSDEVFN(0x0a, 0x00, 0), ++ }, ++ { ++ .ident = "EP45-DS5", ++ .matches = { ++ DMI_MATCH(DMI_BOARD_VENDOR, ++ "Gigabyte Technology Co., Ltd."), ++ DMI_MATCH(DMI_BOARD_NAME, "EP45-DS5"), ++ }, ++ .driver_data = ENCODE_BUSDEVFN(0x03, 0x00, 0), ++ }, ++ { } /* terminate list */ ++ }; ++#undef ENCODE_BUSDEVFN ++ const struct dmi_system_id *dmi = dmi_first_match(sysids); ++ unsigned int val; ++ ++ if (!dmi) ++ return false; ++ ++ val = (unsigned long)dmi->driver_data; ++ ++ return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff); ++} ++ ++#endif ++static int ahci_remove(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct ata_host *host = dev_get_drvdata(dev); ++ ++ ata_host_detach(host); ++ return 0; ++} ++ ++#if 0 ++static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ++#else ++static int __init ahci_probe(struct platform_device *pdev) ++#endif ++{ ++ static int printed_version; ++#if 0 ++ unsigned int board_id = ent->driver_data; ++ struct ata_port_info pi = ahci_port_info[board_id]; ++#else ++ struct ata_port_info pi = ahci_port_info[board_ahci]; ++#endif ++ const struct ata_port_info *ppi[] = { &pi, NULL }; ++ struct device *dev = &pdev->dev; ++ struct ahci_host_priv *hpriv; ++ struct ata_host *host; ++ int n_ports, i, rc; ++ struct resource *res; ++ u8 *base = NULL; ++ ++ VPRINTK("ENTER\n"); ++ ++ WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS); ++ ++ if (!printed_version++) ++ dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); ++ ++#if 0 ++ /* The AHCI driver can only drive the SATA ports, the PATA driver ++ can drive them all so if both drivers are selected make sure ++ AHCI stays out of the way */ ++ if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable) ++ return -ENODEV; ++ ++ /* acquire resources */ ++ rc = pcim_enable_device(pdev); ++ if (rc) ++ return rc; ++ ++ /* AHCI controllers often implement SFF compatible interface. ++ * Grab all PCI BARs just in case. ++ */ ++ rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME); ++ if (rc == -EBUSY) ++ pcim_pin_device(pdev); ++ if (rc) ++ return rc; ++ ++ if (pdev->vendor == PCI_VENDOR_ID_INTEL && ++ (pdev->device == 0x2652 || pdev->device == 0x2653)) { ++ u8 map; ++ ++ /* ICH6s share the same PCI ID for both piix and ahci ++ * modes. Enabling ahci mode while MAP indicates ++ * combined mode is a bad idea. Yield to ata_piix. ++ */ ++ pci_read_config_byte(pdev, ICH_MAP, &map); ++ if (map & 0x3) { ++ dev_printk(KERN_INFO, &pdev->dev, "controller is in " ++ "combined mode, can't enable AHCI mode\n"); ++ return -ENODEV; ++ } ++ } ++#endif ++ ++ hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); ++ if (!hpriv) ++ return -ENOMEM; ++ hpriv->flags |= (unsigned long)pi.private_data; ++ ++#if 0 ++ /* MCP65 revision A1 and A2 can't do MSI */ ++ if (board_id == board_ahci_mcp65 && ++ (pdev->revision == 0xa1 || pdev->revision == 0xa2)) ++ hpriv->flags |= AHCI_HFLAG_NO_MSI; ++ ++ /* SB800 does NOT need the workaround to ignore SERR_INTERNAL */ ++ if (board_id == board_ahci_sb700 && pdev->revision >= 0x40) ++ hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL; ++ ++ /* apply ASUS M2A_VM quirk */ ++ if (ahci_asus_m2a_vm_32bit_only(pdev)) ++ hpriv->flags |= AHCI_HFLAG_32BIT_ONLY; ++ ++ if (!(hpriv->flags & AHCI_HFLAG_NO_MSI)) ++ pci_enable_msi(pdev); ++#endif ++ ++ /* Cavium CNS3XXX Initial */ ++ /* Get SATA register base address */ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res) { ++ dev_err(&pdev->dev, "no reg addr\n"); ++ return -ENODEV; ++ } ++ ++ /* ioremap SATA registers */ ++ base = devm_ioremap(&pdev->dev, res->start, res->end - res->start + 1); ++ ++ if (!base) { ++ dev_err(&pdev->dev, "ioremap failed for 0x%x\n", res->start); ++ return -ENODEV; ++ } ++ ++#if 0 ++ /* reset PHY test chip */ ++ printk("*** Reset PHY ***\n"); ++ CNS3XXX_MISC_REGISTER |= 0xF; ++ mdelay(100); ++ ++ printk("%s %d, base:0x%x\n",__FUNCTION__,__LINE__,(u32)base); ++ ++ /* set PI first */ ++ printk("*** Manually set PI ***\n"); ++ writel(0x1, (void __iomem *)base + HOST_PORTS_IMPL); ++ printk("*** Now PI is: 0x%x ***\n",readl((void __iomem *)base + HOST_PORTS_IMPL)); ++#endif ++ ++ ++ ++ ++ /* save initial config */ ++#if 0 ++ ahci_save_initial_config(pdev, hpriv); ++#else ++ ahci_save_initial_config(pdev, hpriv, base); ++#endif ++ ++ /* prepare host */ ++ if (hpriv->cap & HOST_CAP_NCQ) ++ pi.flags |= ATA_FLAG_NCQ; ++ ++ if (hpriv->cap & HOST_CAP_PMP) ++ pi.flags |= ATA_FLAG_PMP; ++ ++ if (ahci_em_messages && (hpriv->cap & HOST_CAP_EMS)) { ++ u8 messages; ++#if 0 ++ void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR]; ++#else ++ void __iomem *mmio = (void __iomem *)base; ++#endif ++ u32 em_loc = readl(mmio + HOST_EM_LOC); ++ u32 em_ctl = readl(mmio + HOST_EM_CTL); ++ ++ messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16; ++ ++ /* we only support LED message type right now */ ++ if ((messages & 0x01) && (ahci_em_messages == 1)) { ++ /* store em_loc */ ++ hpriv->em_loc = ((em_loc >> 16) * 4); ++ pi.flags |= ATA_FLAG_EM; ++ if (!(em_ctl & EM_CTL_ALHD)) ++ pi.flags |= ATA_FLAG_SW_ACTIVITY; ++ } ++ } ++ ++#if 0 ++ if (ahci_broken_system_poweroff(pdev)) { ++ pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN; ++ dev_info(&pdev->dev, ++ "quirky BIOS, skipping spindown on poweroff\n"); ++ } ++ ++ if (ahci_broken_suspend(pdev)) { ++ hpriv->flags |= AHCI_HFLAG_NO_SUSPEND; ++ dev_printk(KERN_WARNING, &pdev->dev, ++ "BIOS update required for suspend/resume\n"); ++ } ++ ++ if (ahci_broken_online(pdev)) { ++ hpriv->flags |= AHCI_HFLAG_SRST_TOUT_IS_OFFLINE; ++ dev_info(&pdev->dev, ++ "online status unreliable, applying workaround\n"); ++ } ++#endif ++ ++ /* CAP.NP sometimes indicate the index of the last enabled ++ * port, at other times, that of the last possible port, so ++ * determining the maximum port number requires looking at ++ * both CAP.NP and port_map. ++ */ ++ n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map)); ++ ++ host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); ++ if (!host) ++ return -ENOMEM; ++#if 0 ++ host->iomap = pcim_iomap_table(pdev); ++#else ++ host->iomap = (void __iomem *)base; ++#endif ++ host->private_data = hpriv; ++ ++ if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) ++ host->flags |= ATA_HOST_PARALLEL_SCAN; ++ else ++ printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n"); ++ ++ if (pi.flags & ATA_FLAG_EM) ++ ahci_reset_em(host); ++ ++ for (i = 0; i < host->n_ports; i++) { ++ struct ata_port *ap = host->ports[i]; ++ ++#if 0 ++ ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar"); ++ ata_port_pbar_desc(ap, AHCI_PCI_BAR, ++ 0x100 + ap->port_no * 0x80, "port"); ++#else ++ ata_port_desc(ap, "%s %s%llu@0x%llx", "ahci bar", "m", ++ (long long)(res->end - res->start) + 1, (long long)res->start); ++ ata_port_desc(ap, "%s 0x%llx", "port", ++ (long long)res->start + 0x100 + ap->port_no * 0x80); ++#endif ++ ++ /* set initial link pm policy */ ++ ap->pm_policy = NOT_AVAILABLE; ++ ++ /* set enclosure management message type */ ++ if (ap->flags & ATA_FLAG_EM) ++ ap->em_message_type = ahci_em_messages; ++ ++ ++ /* disabled/not-implemented port */ ++ if (!(hpriv->port_map & (1 << i))) ++ ap->ops = &ata_dummy_port_ops; ++ } ++ ++#if 0 ++ /* apply workaround for ASUS P5W DH Deluxe mainboard */ ++ ahci_p5wdh_workaround(host); ++ ++ /* initialize adapter */ ++ rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64); ++ if (rc) ++ return rc; ++#endif ++ ++ rc = ahci_reset_controller(host); ++ if (rc) ++ return rc; ++ ++ ahci_init_controller(host); ++ ahci_print_info(host); ++ ++#if 0 ++ pci_set_master(pdev); ++#endif ++ ++ ++ ++#if 0 ++ return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED, ++ &ahci_sht); ++#else ++ /* Get SATA port interrupt number */ ++ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); ++ if (!res) { ++ dev_err(&pdev->dev, "no SATA irq\n"); ++ return -ENODEV; ++ } ++ ++ return ata_host_activate(host, res->start, ahci_interrupt, IRQF_SHARED, ++ &ahci_sht); ++ ++ ++#endif ++} ++ ++ ++#if defined(CONFIG_CNS3XXX_SILICON) || defined(CONFIG_SILICON) ++static void ahci_phy_init(void){ ++ ++ u32 u32tmp; ++ ++ ++ u32tmp = MISC_SATA_POWER_MODE; ++ u32tmp |= 0x1<< 16; // Disable SATA PHY 0 from SLUMBER Mode ++ u32tmp |= 0x1<< 17; // Disable SATA PHY 1 from SLUMBER Mode ++ MISC_SATA_POWER_MODE = u32tmp; ++ ++ /* Enable SATA PHY */ ++ cns3xxx_pwr_power_up(0x1 << PM_PLL_HM_PD_CTRL_REG_OFFSET_SATA_PHY0); ++ cns3xxx_pwr_power_up(0x1 << PM_PLL_HM_PD_CTRL_REG_OFFSET_SATA_PHY1); ++ ++ /* Enable SATA Clock */ ++ cns3xxx_pwr_clk_en(0x1 << PM_CLK_GATE_REG_OFFSET_SATA); ++ ++ /* De-Asscer SATA Reset */ ++ u32tmp = PM_SOFT_RST_REG; ++ u32tmp |= 0x1 << PM_SOFT_RST_REG_OFFST_SATA; ++ PM_SOFT_RST_REG = u32tmp; ++} ++#endif ++ ++ ++ ++static int __init ahci_init(void) ++{ ++#if 0 ++ return pci_register_driver(&ahci_pci_driver); ++#else ++ printk("CNS3XXX AHCI SATA low-level driver\n"); ++#if defined(CONFIG_CNS3XXX_SILICON) || defined(CONFIG_SILICON) ++ ahci_phy_init(); ++#endif ++ return platform_driver_register(&ahci_driver); ++#endif ++} ++ ++static void __exit ahci_exit(void) ++{ ++#if 0 ++ pci_unregister_driver(&ahci_pci_driver); ++#else ++ platform_driver_unregister(&ahci_driver); ++#endif ++} ++ ++ ++MODULE_AUTHOR("Jeff Garzik"); ++MODULE_DESCRIPTION("AHCI SATA low-level driver"); ++MODULE_LICENSE("GPL"); ++#if 0 ++MODULE_DEVICE_TABLE(pci, ahci_pci_tbl); ++#endif ++MODULE_VERSION(DRV_VERSION); ++ ++module_init(ahci_init); ++module_exit(ahci_exit); +--- a/drivers/ata/Kconfig ++++ b/drivers/ata/Kconfig +@@ -47,6 +47,14 @@ config SATA_PMP + This option adds support for SATA Port Multipliers + (the SATA version of an ethernet hub, or SAS expander). + ++config SATA_CNS3XXX_AHCI ++ tristate "Cavium CNS3XXX AHCI SATA support" ++ help ++ This option enables support for AHCI Serial ATA support for Cavium CNS3XXX. ++ ++ If unsure, say N. ++ ++ + config SATA_AHCI + tristate "AHCI SATA support" + depends on PCI +--- a/drivers/ata/libata-scsi.c ++++ b/drivers/ata/libata-scsi.c +@@ -3096,12 +3096,22 @@ int ata_scsi_queuecmd(struct scsi_cmnd * + struct scsi_device *scsidev = cmd->device; + struct Scsi_Host *shost = scsidev->host; + int rc = 0; +- ++#ifdef CONFIG_SMP ++ u32 flags; ++ local_save_flags(flags); ++#endif + ap = ata_shost_to_port(shost); + + spin_unlock(shost->host_lock); ++#ifndef CONFIG_SMP + spin_lock(ap->lock); +- ++#else ++ while(!spin_trylock(ap->lock)){ ++ if(!irqs_disabled()) continue; ++ local_irq_enable(); ++ local_irq_restore(flags); ++ } ++#endif + ata_scsi_dump_cdb(ap, cmd); + + dev = ata_scsi_find_dev(ap, scsidev); +--- a/drivers/ata/libata-sff.c ++++ b/drivers/ata/libata-sff.c +@@ -893,6 +893,9 @@ static void ata_pio_sector(struct ata_qu + do_write); + } + ++ if (!do_write) ++ flush_dcache_page(page); ++ + qc->curbytes += qc->sect_size; + qc->cursg_ofs += qc->sect_size; + +--- a/drivers/ata/Makefile ++++ b/drivers/ata/Makefile +@@ -1,6 +1,7 @@ + + obj-$(CONFIG_ATA) += libata.o + ++obj-$(CONFIG_SATA_CNS3XXX_AHCI) += cns3xxx_ahci.o + obj-$(CONFIG_SATA_AHCI) += ahci.o + obj-$(CONFIG_SATA_SVW) += sata_svw.o + obj-$(CONFIG_ATA_PIIX) += ata_piix.o diff --git a/target/linux/cns3xxx/patches-2.6.31/203-cns3xxx_i2c_support.patch b/target/linux/cns3xxx/patches-2.6.31/203-cns3xxx_i2c_support.patch new file mode 100644 index 0000000000..30452334d8 --- /dev/null +++ b/target/linux/cns3xxx/patches-2.6.31/203-cns3xxx_i2c_support.patch @@ -0,0 +1,416 @@ +--- /dev/null ++++ b/drivers/i2c/busses/i2c-cns3xxx.c +@@ -0,0 +1,388 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* ++ * We need the memory map ++ */ ++ ++#include ++ ++#define MISC_MEM_MAP_VALUE(reg_offset) (*((uint32_t volatile *)(CNS3XXX_MISC_BASE_VIRT + reg_offset))) ++#define MISC_IOCDB_CTRL MISC_MEM_MAP_VALUE(0x020) ++ ++#define I2C_MEM_MAP_ADDR(x) (CNS3XXX_SSP_BASE_VIRT + x) ++#define I2C_MEM_MAP_VALUE(x) (*((unsigned int volatile*)I2C_MEM_MAP_ADDR(x))) ++ ++#define I2C_CONTROLLER_REG I2C_MEM_MAP_VALUE(0x20) ++#define I2C_TIME_OUT_REG I2C_MEM_MAP_VALUE(0x24) ++#define I2C_SLAVE_ADDRESS_REG I2C_MEM_MAP_VALUE(0x28) ++#define I2C_WRITE_DATA_REG I2C_MEM_MAP_VALUE(0x2C) ++#define I2C_READ_DATA_REG I2C_MEM_MAP_VALUE(0x30) ++#define I2C_INTERRUPT_STATUS_REG I2C_MEM_MAP_VALUE(0x34) ++#define I2C_INTERRUPT_ENABLE_REG I2C_MEM_MAP_VALUE(0x38) ++#define I2C_TWI_OUT_DLY_REG I2C_MEM_MAP_VALUE(0x3C) ++ ++#define I2C_BUS_ERROR_FLAG (0x1) ++#define I2C_ACTION_DONE_FLAG (0x2) ++ ++#define CNS3xxx_I2C_ENABLE() (I2C_CONTROLLER_REG) |= ((unsigned int)0x1 << 31) ++#define CNS3xxx_I2C_DISABLE() (I2C_CONTROLLER_REG) &= ~((unsigned int)0x1 << 31) ++#define CNS3xxx_I2C_ENABLE_INTR() (I2C_INTERRUPT_ENABLE_REG) |= 0x03 ++#define CNS3xxx_I2C_DISABLE_INTR() (I2C_INTERRUPT_ENABLE_REG) &= 0xfc ++ ++#define TWI_TIMEOUT (10*HZ) ++#define I2C_100KHZ 100000 ++#define I2C_200KHZ 200000 ++#define I2C_300KHZ 300000 ++#define I2C_400KHZ 400000 ++ ++#define CNS3xxx_I2C_CLK I2C_100KHZ ++ ++#define STATE_DONE 1 ++#define STATE_ERROR 2 ++ ++struct cns3xxx_i2c { ++ void __iomem *base; ++ wait_queue_head_t wait; ++ struct i2c_adapter adap; ++ struct i2c_msg *msg; ++ int state; /* see STATE_ */ ++ int rd_wr_len; ++ u8 *buf; ++}; ++ ++static u32 cns3xxx_i2c_func(struct i2c_adapter *adap) ++{ ++ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; ++} ++ ++static int ++cns3xxx_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg) ++{ ++ struct cns3xxx_i2c *i2c = i2c_get_adapdata(adap); ++ int i, j; ++ u8 buf[1] = { 0 }; ++ ++ if (msg->len == 0) { ++ /* ++ * We are probably doing a probe for a device here, ++ * so set the length to one, and data to 0 ++ */ ++ msg->len = 1; ++ i2c->buf = buf; ++ } else { ++ i2c->buf = msg->buf; ++ } ++ ++ if (msg->flags & I2C_M_TEN) { ++ printk ++ ("%s:%d: Presently the driver does not handle extended addressing\n", ++ __FUNCTION__, __LINE__); ++ return -EINVAL; ++ } ++ i2c->msg = msg; ++ ++ for (i = 0; i < msg->len; i++) { ++ if (msg->len - i >= 4) ++ i2c->rd_wr_len = 3; ++ else ++ i2c->rd_wr_len = msg->len - i - 1; ++ ++ // Set Data Width and TWI_EN ++ I2C_CONTROLLER_REG = 0x80000000 | (i2c->rd_wr_len << 2) | (i2c->rd_wr_len); ++ ++ // Clear Write Reg ++ I2C_WRITE_DATA_REG = 0; ++ ++ // Set the slave address ++ I2C_SLAVE_ADDRESS_REG = (msg->addr << 1); ++ ++ // Are we Writing ++ if (!(msg->flags & I2C_M_RD)) { ++ I2C_CONTROLLER_REG |= (1 << 4); ++ if (i != 0) { ++ /* ++ * We need to set the address in the first byte. ++ * The base address is going to be in buf[0] and then ++ * it needs to be incremented by i - 1. ++ */ ++ i2c->buf--; ++ *i2c->buf = buf[0] + i - 1; ++ ++ if (i2c->rd_wr_len < 3) { ++ i += i2c->rd_wr_len; ++ i2c->rd_wr_len++; ++ I2C_CONTROLLER_REG = 0x80000000 | (1 << 4) | (i2c->rd_wr_len << 2) | (i2c->rd_wr_len); ++ } else { ++ i += i2c->rd_wr_len - 1; ++ } ++ } else { ++ i += i2c->rd_wr_len; ++ buf[0] = *i2c->buf; ++ } ++ for (j = 0; j <= i2c->rd_wr_len; j++) { ++ I2C_WRITE_DATA_REG |= ((*i2c->buf++) << (8 * j)); ++ } ++ } else { ++ i += i2c->rd_wr_len; ++ } ++ ++ // Start the Transfer ++ i2c->state = 0; // Clear out the State ++ I2C_CONTROLLER_REG |= (1 << 6); ++ ++ if (wait_event_timeout(i2c->wait, (i2c->state == STATE_ERROR) || ++ (i2c->state == STATE_DONE), TWI_TIMEOUT)) { ++ if (i2c->state == STATE_ERROR) { ++ return -EIO; ++ } ++ } else { ++ return -ETIMEDOUT; ++ } ++ } ++ return 0; ++} ++ ++static int ++cns3xxx_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) ++{ ++ int i; ++ int ret; ++ for (i = 0; i < num; i++) ++ { ++ ret = cns3xxx_i2c_xfer_msg(adap, &msgs[i]); ++ if (ret < 0) { ++ return ret; ++ } ++ } ++ return num; ++} ++ ++ ++static struct i2c_algorithm cns3xxx_i2c_algo = { ++ .master_xfer = cns3xxx_i2c_xfer, ++ .functionality = cns3xxx_i2c_func, ++}; ++ ++static struct i2c_adapter cns3xxx_i2c_adapter = { ++ .owner = THIS_MODULE, ++ .algo = &cns3xxx_i2c_algo, ++ .algo_data = NULL, ++ .nr = 0, ++ .name = "CNS3xxx I2C 0", ++ .retries = 5, ++}; ++ ++static void cns3xxx_i2c_adapter_init(struct cns3xxx_i2c *i2c) ++{ ++ ++ /* Steps ++ * 1. Check if the power is enabled to the module (PMU_BASE + 0x010) ++ * 2. Enable the clock (Enabled by default (PMU doc ++ * but check clk status anyway PMU_BASE + 0X00C) ++ * 3. Configure the registers of i2c ++ */ ++ ++ // if (!CNS3xxx_I2C_POWER_ON()) ++// CNS3xxx_I2C_POWER_ENABLE(); ++ ++ // if (!CNS3xxx_I2C_CLOCK()) ++ // CNS3xxx_I2C_CLOCK_ENABLE(); ++ ++ cns3xxx_pwr_clk_en(0x1 << PM_CLK_GATE_REG_OFFSET_SPI_PCM_I2C); ++ cns3xxx_pwr_power_up(0x1 << PM_CLK_GATE_REG_OFFSET_SPI_PCM_I2C); ++ cns3xxx_pwr_soft_rst(0x1 << PM_CLK_GATE_REG_OFFSET_SPI_PCM_I2C); ++ ++ /* Disable the I2C */ ++ I2C_CONTROLLER_REG = 0; /* Disabled the I2C */ ++ ++ //enable SCL and SDA which share pin with GPIOB_PIN_EN(0x18) ++ //GPIOB[12]: SCL ++ //GPIOB[13]: SDA ++ (*(u32*)(CNS3XXX_MISC_BASE_VIRT+0x18)) |= ((1<<12)|(1<<13)); ++ ++ MISC_IOCDB_CTRL &= ~0x300; ++ MISC_IOCDB_CTRL |= 0x300; //21mA... ++ ++ /* Check the Reg Dump when testing */ ++ I2C_TIME_OUT_REG = ++ ((((((cns3xxx_cpu_clock()*(1000000/8)) / (2 * CNS3xxx_I2C_CLK)) - ++ 1) & 0x3FF) << 8) | (1 << 7) | 0x7F); ++ I2C_TWI_OUT_DLY_REG |= 0x3; ++ ++ /* Enable The Interrupt */ ++ CNS3xxx_I2C_ENABLE_INTR(); ++ ++ /* Clear Interrupt Status (0x2 | 0x1) */ ++ I2C_INTERRUPT_STATUS_REG |= (I2C_ACTION_DONE_FLAG | I2C_BUS_ERROR_FLAG); ++ ++ /* Enable the I2C Controller */ ++ CNS3xxx_I2C_ENABLE(); ++} ++ ++static irqreturn_t cns3xxx_i2c_isr(int irq, void *dev_id) ++{ ++ struct cns3xxx_i2c *i2c = dev_id; ++ int i; ++ uint32_t stat = I2C_INTERRUPT_STATUS_REG; ++ ++ /* Clear Interrupt */ ++ I2C_INTERRUPT_STATUS_REG |= 0x1; ++ ++ if (stat & I2C_BUS_ERROR_FLAG) { ++ i2c->state = STATE_ERROR; ++ } else { ++ if (i2c->msg->flags & I2C_M_RD) { ++ for (i = 0; i <= i2c->rd_wr_len; i++) ++ { ++ *i2c->buf++ = ((I2C_READ_DATA_REG >> (8 * i)) & 0xff); ++ } ++ } ++ i2c->state = STATE_DONE; ++ } ++ wake_up(&i2c->wait); ++ return IRQ_HANDLED; ++} ++ ++static int __devinit cns3xxx_i2c_probe(struct platform_device *pdev) ++{ ++ struct cns3xxx_i2c *i2c; ++ struct resource *res, *res2; ++ int ret; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res) { ++ printk("%s: IORESOURCE_MEM not defined \n", __FUNCTION__); ++ return -ENODEV; ++ } ++ ++ res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0); ++ if (!res2) { ++ printk("%s: IORESOURCE_IRQ not defined \n", __FUNCTION__); ++ return -ENODEV; ++ } ++ ++ i2c = kzalloc(sizeof(*i2c), GFP_KERNEL); ++ if (!i2c) ++ return -ENOMEM; ++ ++ if (!request_mem_region(res->start, res->end - res->start + 1, ++ pdev->name)) { ++ dev_err(&pdev->dev, "Memory region busy\n"); ++ ret = -EBUSY; ++ goto request_mem_failed; ++ } ++ ++ i2c->base = ioremap(res->start, res->end - res->start + 1); ++ if (!i2c->base) { ++ dev_err(&pdev->dev, "Unable to map registers\n"); ++ ret = -EIO; ++ goto map_failed; ++ } ++ ++ cns3xxx_i2c_adapter_init(i2c); ++ ++ init_waitqueue_head(&i2c->wait); ++ ret = request_irq(res2->start, cns3xxx_i2c_isr, 0, pdev->name, i2c); ++ if (ret) { ++ dev_err(&pdev->dev, "Cannot claim IRQ\n"); ++ goto request_irq_failed; ++ } ++ ++ platform_set_drvdata(pdev, i2c); ++ i2c->adap = cns3xxx_i2c_adapter; ++ i2c_set_adapdata(&i2c->adap, i2c); ++ i2c->adap.dev.parent = &pdev->dev; ++ ++ /* add i2c adapter to i2c tree */ ++ ret = i2c_add_numbered_adapter(&i2c->adap); ++ if (ret) { ++ dev_err(&pdev->dev, "Failed to add adapter\n"); ++ goto add_adapter_failed; ++ } ++ ++ return 0; ++ ++ add_adapter_failed: ++ free_irq(res2->start, i2c); ++ request_irq_failed: ++ iounmap(i2c->base); ++ map_failed: ++ release_mem_region(res->start, res->end - res->start + 1); ++ request_mem_failed: ++ kfree(i2c); ++ ++ return ret; ++} ++ ++static int __devexit cns3xxx_i2c_remove(struct platform_device *pdev) ++{ ++ struct cns3xxx_i2c *i2c = platform_get_drvdata(pdev); ++ struct resource *res; ++ ++ /* disable i2c logic */ ++ CNS3xxx_I2C_DISABLE_INTR(); ++ CNS3xxx_I2C_DISABLE(); ++ /* remove adapter & data */ ++ i2c_del_adapter(&i2c->adap); ++ platform_set_drvdata(pdev, NULL); ++ ++ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); ++ if (res) ++ free_irq(res->start, i2c); ++ ++ iounmap(i2c->base); ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (res) ++ release_mem_region(res->start, res->end - res->start + 1); ++ ++ kfree(i2c); ++ ++ return 0; ++} ++ ++#ifdef CONFIG_PM ++#warning "CONFIG_PM defined: suspend and resume not implemented" ++#define cns3xxx_i2c_suspend NULL ++#define cns3xxx_i2c_resume NULL ++#else ++#define cns3xxx_i2c_suspend NULL ++#define cns3xxx_i2c_resume NULL ++#endif ++ ++static struct platform_driver cns3xxx_i2c_driver = { ++ .probe = cns3xxx_i2c_probe, ++ .remove = cns3xxx_i2c_remove, ++ .suspend = cns3xxx_i2c_suspend, ++ .resume = cns3xxx_i2c_resume, ++ .driver = { ++ .owner = THIS_MODULE, ++ .name = "cns3xxx-i2c", ++ }, ++}; ++ ++static int __init cns3xxx_i2c_init(void) ++{ ++ return platform_driver_register(&cns3xxx_i2c_driver); ++} ++ ++static void __exit cns3xxx_i2c_exit(void) ++{ ++ platform_driver_unregister(&cns3xxx_i2c_driver); ++} ++ ++module_init(cns3xxx_i2c_init); ++module_exit(cns3xxx_i2c_exit); ++ ++MODULE_AUTHOR("Cavium Networks"); ++MODULE_DESCRIPTION("Cavium CNS3XXX I2C Controller"); ++MODULE_LICENSE("GPL"); +--- a/drivers/i2c/busses/Kconfig ++++ b/drivers/i2c/busses/Kconfig +@@ -422,6 +422,12 @@ config I2C_MV64XXX + This driver can also be built as a module. If so, the module + will be called i2c-mv64xxx. + ++config I2C_CNS3XXX ++ tristate "Cavium Networks CNS3XXX I2C Controller" ++ depends on ARCH_CNS3XXX ++ help ++ Supports the Cavium Networks CNS3XXX on-chip I2C interfaces ++ + config I2C_OCORES + tristate "OpenCores I2C Controller" + depends on EXPERIMENTAL +--- a/drivers/i2c/busses/Makefile ++++ b/drivers/i2c/busses/Makefile +@@ -39,6 +39,7 @@ obj-$(CONFIG_I2C_IOP3XX) += i2c-iop3xx.o + obj-$(CONFIG_I2C_IXP2000) += i2c-ixp2000.o + obj-$(CONFIG_I2C_MPC) += i2c-mpc.o + obj-$(CONFIG_I2C_MV64XXX) += i2c-mv64xxx.o ++obj-$(CONFIG_I2C_CNS3XXX) += i2c-cns3xxx.o + obj-$(CONFIG_I2C_OCORES) += i2c-ocores.o + obj-$(CONFIG_I2C_OMAP) += i2c-omap.o + obj-$(CONFIG_I2C_PASEMI) += i2c-pasemi.o diff --git a/target/linux/cns3xxx/patches-2.6.31/204-cns3xxx_mmc_support.patch b/target/linux/cns3xxx/patches-2.6.31/204-cns3xxx_mmc_support.patch new file mode 100644 index 0000000000..3f81ad81eb --- /dev/null +++ b/target/linux/cns3xxx/patches-2.6.31/204-cns3xxx_mmc_support.patch @@ -0,0 +1,2663 @@ +--- a/drivers/mmc/card/block.c ++++ b/drivers/mmc/card/block.c +@@ -130,7 +130,7 @@ mmc_blk_getgeo(struct block_device *bdev + return 0; + } + +-static struct block_device_operations mmc_bdops = { ++static const struct block_device_operations mmc_bdops = { + .open = mmc_blk_open, + .release = mmc_blk_release, + .getgeo = mmc_blk_getgeo, +@@ -392,13 +392,9 @@ static int mmc_blk_issue_rq(struct mmc_q + } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || + (R1_CURRENT_STATE(cmd.resp[0]) == 7)); + +-#if 0 + if (cmd.resp[0] & ~0x00000900) + printk(KERN_ERR "%s: status = %08x\n", + req->rq_disk->disk_name, cmd.resp[0]); +- if (mmc_decode_status(cmd.resp)) +- goto cmd_err; +-#endif + } + + if (brq.cmd.error || brq.stop.error || brq.data.error) { +--- a/drivers/mmc/core/core.c ++++ b/drivers/mmc/core/core.c +@@ -37,6 +37,9 @@ + #include "sd_ops.h" + #include "sdio_ops.h" + ++/* scott.trace */ ++//#define MMC_DEBUG ++ + static struct workqueue_struct *workqueue; + + /* +@@ -90,17 +93,30 @@ void mmc_request_done(struct mmc_host *h + cmd->error = 0; + host->ops->request(host, mrq); + } else { ++#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) + led_trigger_event(host->led, LED_OFF); ++#endif + + pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n", + mmc_hostname(host), cmd->opcode, err, + cmd->resp[0], cmd->resp[1], + cmd->resp[2], cmd->resp[3]); ++#ifdef MMC_DEBUG ++ printk("[MMC_DEBUG] %s: req done (CMD%u): %d: %08x %08x %08x %08x\n", ++ mmc_hostname(host), cmd->opcode, err, ++ cmd->resp[0], cmd->resp[1], ++ cmd->resp[2], cmd->resp[3]); ++#endif + + if (mrq->data) { + pr_debug("%s: %d bytes transferred: %d\n", + mmc_hostname(host), + mrq->data->bytes_xfered, mrq->data->error); ++#ifdef MMC_DEBUG ++ printk("[MMC_DEBUG] %s: %d bytes transferred: %d\n", ++ mmc_hostname(host), ++ mrq->data->bytes_xfered, mrq->data->error); ++#endif + } + + if (mrq->stop) { +@@ -109,6 +125,13 @@ void mmc_request_done(struct mmc_host *h + mrq->stop->error, + mrq->stop->resp[0], mrq->stop->resp[1], + mrq->stop->resp[2], mrq->stop->resp[3]); ++#ifdef MMC_DEBUG ++ printk("[MMC_DEBUG] %s: (CMD%u): %d: %08x %08x %08x %08x\n", ++ mmc_hostname(host), mrq->stop->opcode, ++ mrq->stop->error, ++ mrq->stop->resp[0], mrq->stop->resp[1], ++ mrq->stop->resp[2], mrq->stop->resp[3]); ++#endif + } + + if (mrq->done) +@@ -129,6 +152,11 @@ mmc_start_request(struct mmc_host *host, + pr_debug("%s: starting CMD%u arg %08x flags %08x\n", + mmc_hostname(host), mrq->cmd->opcode, + mrq->cmd->arg, mrq->cmd->flags); ++#ifdef MMC_DEBUG ++ printk("[MMC_DEBUG] %s: starting CMD%u arg %08x flags %08x\n", ++ mmc_hostname(host), mrq->cmd->opcode, ++ mrq->cmd->arg, mrq->cmd->flags); ++#endif + + if (mrq->data) { + pr_debug("%s: blksz %d blocks %d flags %08x " +@@ -137,17 +165,32 @@ mmc_start_request(struct mmc_host *host, + mrq->data->blocks, mrq->data->flags, + mrq->data->timeout_ns / 1000000, + mrq->data->timeout_clks); ++#ifdef MMC_DEBUG ++ printk("[MMC_DEBUG] %s: blksz %d blocks %d flags %08x " ++ "tsac %d ms nsac %d\n", ++ mmc_hostname(host), mrq->data->blksz, ++ mrq->data->blocks, mrq->data->flags, ++ mrq->data->timeout_ns / 1000000, ++ mrq->data->timeout_clks); ++#endif + } + + if (mrq->stop) { + pr_debug("%s: CMD%u arg %08x flags %08x\n", + mmc_hostname(host), mrq->stop->opcode, + mrq->stop->arg, mrq->stop->flags); ++#ifdef MMC_DEBUG ++ printk("[MMC_DEBUG] %s: CMD%u arg %08x flags %08x\n", ++ mmc_hostname(host), mrq->stop->opcode, ++ mrq->stop->arg, mrq->stop->flags); ++#endif + } + + WARN_ON(!host->claimed); + ++#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) + led_trigger_event(host->led, LED_FULL); ++#endif + + mrq->cmd->error = 0; + mrq->cmd->mrq = mrq; +@@ -286,9 +329,9 @@ void mmc_set_data_timeout(struct mmc_dat + * The limit is really 250 ms, but that is + * insufficient for some crappy cards. + */ +- limit_us = 300000; ++ limit_us = 500000; + else +- limit_us = 100000; ++ limit_us = 200000; + + /* + * SDHC cards always use these fixed values. +@@ -344,6 +387,101 @@ unsigned int mmc_align_data_size(struct + EXPORT_SYMBOL(mmc_align_data_size); + + /** ++ * mmc_host_enable - enable a host. ++ * @host: mmc host to enable ++ * ++ * Hosts that support power saving can use the 'enable' and 'disable' ++ * methods to exit and enter power saving states. For more information ++ * see comments for struct mmc_host_ops. ++ */ ++int mmc_host_enable(struct mmc_host *host) ++{ ++ if (!(host->caps & MMC_CAP_DISABLE)) ++ return 0; ++ ++ if (host->en_dis_recurs) ++ return 0; ++ ++ if (host->nesting_cnt++) ++ return 0; ++ ++ cancel_delayed_work_sync(&host->disable); ++ ++ if (host->enabled) ++ return 0; ++ ++ if (host->ops->enable) { ++ int err; ++ ++ host->en_dis_recurs = 1; ++ err = host->ops->enable(host); ++ host->en_dis_recurs = 0; ++ ++ if (err) { ++ pr_debug("%s: enable error %d\n", ++ mmc_hostname(host), err); ++ return err; ++ } ++ } ++ host->enabled = 1; ++ return 0; ++} ++EXPORT_SYMBOL(mmc_host_enable); ++ ++static int mmc_host_do_disable(struct mmc_host *host, int lazy) ++{ ++ if (host->ops->disable) { ++ int err; ++ ++ host->en_dis_recurs = 1; ++ err = host->ops->disable(host, lazy); ++ host->en_dis_recurs = 0; ++ ++ if (err < 0) { ++ pr_debug("%s: disable error %d\n", ++ mmc_hostname(host), err); ++ return err; ++ } ++ if (err > 0) { ++ unsigned long delay = msecs_to_jiffies(err); ++ ++ mmc_schedule_delayed_work(&host->disable, delay); ++ } ++ } ++ host->enabled = 0; ++ return 0; ++} ++ ++/** ++ * mmc_host_disable - disable a host. ++ * @host: mmc host to disable ++ * ++ * Hosts that support power saving can use the 'enable' and 'disable' ++ * methods to exit and enter power saving states. For more information ++ * see comments for struct mmc_host_ops. ++ */ ++int mmc_host_disable(struct mmc_host *host) ++{ ++ int err; ++ ++ if (!(host->caps & MMC_CAP_DISABLE)) ++ return 0; ++ ++ if (host->en_dis_recurs) ++ return 0; ++ ++ if (--host->nesting_cnt) ++ return 0; ++ ++ if (!host->enabled) ++ return 0; ++ ++ err = mmc_host_do_disable(host, 0); ++ return err; ++} ++EXPORT_SYMBOL(mmc_host_disable); ++ ++/** + * __mmc_claim_host - exclusively claim a host + * @host: mmc host to claim + * @abort: whether or not the operation should be aborted +@@ -366,25 +504,111 @@ int __mmc_claim_host(struct mmc_host *ho + while (1) { + set_current_state(TASK_UNINTERRUPTIBLE); + stop = abort ? atomic_read(abort) : 0; +- if (stop || !host->claimed) ++ if (stop || !host->claimed || host->claimer == current) + break; + spin_unlock_irqrestore(&host->lock, flags); + schedule(); + spin_lock_irqsave(&host->lock, flags); + } + set_current_state(TASK_RUNNING); +- if (!stop) ++ if (!stop) { + host->claimed = 1; +- else ++ host->claimer = current; ++ host->claim_cnt += 1; ++ } else + wake_up(&host->wq); + spin_unlock_irqrestore(&host->lock, flags); + remove_wait_queue(&host->wq, &wait); ++ if (!stop) ++ mmc_host_enable(host); + return stop; + } + + EXPORT_SYMBOL(__mmc_claim_host); + + /** ++ * mmc_try_claim_host - try exclusively to claim a host ++ * @host: mmc host to claim ++ * ++ * Returns %1 if the host is claimed, %0 otherwise. ++ */ ++int mmc_try_claim_host(struct mmc_host *host) ++{ ++ int claimed_host = 0; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&host->lock, flags); ++ if (!host->claimed || host->claimer == current) { ++ host->claimed = 1; ++ host->claimer = current; ++ host->claim_cnt += 1; ++ claimed_host = 1; ++ } ++ spin_unlock_irqrestore(&host->lock, flags); ++ return claimed_host; ++} ++EXPORT_SYMBOL(mmc_try_claim_host); ++ ++static void mmc_do_release_host(struct mmc_host *host) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&host->lock, flags); ++ if (--host->claim_cnt) { ++ /* Release for nested claim */ ++ spin_unlock_irqrestore(&host->lock, flags); ++ } else { ++ host->claimed = 0; ++ host->claimer = NULL; ++ spin_unlock_irqrestore(&host->lock, flags); ++ wake_up(&host->wq); ++ } ++} ++ ++void mmc_host_deeper_disable(struct work_struct *work) ++{ ++ struct mmc_host *host = ++ container_of(work, struct mmc_host, disable.work); ++ ++ /* If the host is claimed then we do not want to disable it anymore */ ++ if (!mmc_try_claim_host(host)) ++ return; ++ mmc_host_do_disable(host, 1); ++ mmc_do_release_host(host); ++} ++ ++/** ++ * mmc_host_lazy_disable - lazily disable a host. ++ * @host: mmc host to disable ++ * ++ * Hosts that support power saving can use the 'enable' and 'disable' ++ * methods to exit and enter power saving states. For more information ++ * see comments for struct mmc_host_ops. ++ */ ++int mmc_host_lazy_disable(struct mmc_host *host) ++{ ++ if (!(host->caps & MMC_CAP_DISABLE)) ++ return 0; ++ ++ if (host->en_dis_recurs) ++ return 0; ++ ++ if (--host->nesting_cnt) ++ return 0; ++ ++ if (!host->enabled) ++ return 0; ++ ++ if (host->disable_delay) { ++ mmc_schedule_delayed_work(&host->disable, ++ msecs_to_jiffies(host->disable_delay)); ++ return 0; ++ } else ++ return mmc_host_do_disable(host, 1); ++} ++EXPORT_SYMBOL(mmc_host_lazy_disable); ++ ++/** + * mmc_release_host - release a host + * @host: mmc host to release + * +@@ -393,15 +617,11 @@ EXPORT_SYMBOL(__mmc_claim_host); + */ + void mmc_release_host(struct mmc_host *host) + { +- unsigned long flags; +- + WARN_ON(!host->claimed); + +- spin_lock_irqsave(&host->lock, flags); +- host->claimed = 0; +- spin_unlock_irqrestore(&host->lock, flags); ++ mmc_host_lazy_disable(host); + +- wake_up(&host->wq); ++ mmc_do_release_host(host); + } + + EXPORT_SYMBOL(mmc_release_host); +@@ -687,7 +907,13 @@ void mmc_set_timing(struct mmc_host *hos + */ + static void mmc_power_up(struct mmc_host *host) + { +- int bit = fls(host->ocr_avail) - 1; ++ int bit; ++ ++ /* If ocr is set, we use it */ ++ if (host->ocr) ++ bit = ffs(host->ocr) - 1; ++ else ++ bit = fls(host->ocr_avail) - 1; + + host->ios.vdd = bit; + if (mmc_host_is_spi(host)) { +@@ -947,6 +1173,8 @@ void mmc_stop_host(struct mmc_host *host + spin_unlock_irqrestore(&host->lock, flags); + #endif + ++ if (host->caps & MMC_CAP_DISABLE) ++ cancel_delayed_work(&host->disable); + cancel_delayed_work(&host->detect); + mmc_flush_scheduled_work(); + +@@ -958,6 +1186,8 @@ void mmc_stop_host(struct mmc_host *host + mmc_claim_host(host); + mmc_detach_bus(host); + mmc_release_host(host); ++ mmc_bus_put(host); ++ return; + } + mmc_bus_put(host); + +@@ -966,6 +1196,80 @@ void mmc_stop_host(struct mmc_host *host + mmc_power_off(host); + } + ++void mmc_power_save_host(struct mmc_host *host) ++{ ++ mmc_bus_get(host); ++ ++ if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { ++ mmc_bus_put(host); ++ return; ++ } ++ ++ if (host->bus_ops->power_save) ++ host->bus_ops->power_save(host); ++ ++ mmc_bus_put(host); ++ ++ mmc_power_off(host); ++} ++EXPORT_SYMBOL(mmc_power_save_host); ++ ++void mmc_power_restore_host(struct mmc_host *host) ++{ ++ mmc_bus_get(host); ++ ++ if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { ++ mmc_bus_put(host); ++ return; ++ } ++ ++ mmc_power_up(host); ++ host->bus_ops->power_restore(host); ++ ++ mmc_bus_put(host); ++} ++EXPORT_SYMBOL(mmc_power_restore_host); ++ ++int mmc_card_awake(struct mmc_host *host) ++{ ++ int err = -ENOSYS; ++ ++ mmc_bus_get(host); ++ ++ if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) ++ err = host->bus_ops->awake(host); ++ ++ mmc_bus_put(host); ++ ++ return err; ++} ++EXPORT_SYMBOL(mmc_card_awake); ++ ++int mmc_card_sleep(struct mmc_host *host) ++{ ++ int err = -ENOSYS; ++ ++ mmc_bus_get(host); ++ ++ if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) ++ err = host->bus_ops->sleep(host); ++ ++ mmc_bus_put(host); ++ ++ return err; ++} ++EXPORT_SYMBOL(mmc_card_sleep); ++ ++int mmc_card_can_sleep(struct mmc_host *host) ++{ ++ struct mmc_card *card = host->card; ++ ++ if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3) ++ return 1; ++ return 0; ++} ++EXPORT_SYMBOL(mmc_card_can_sleep); ++ + #ifdef CONFIG_PM + + /** +@@ -975,27 +1279,36 @@ void mmc_stop_host(struct mmc_host *host + */ + int mmc_suspend_host(struct mmc_host *host, pm_message_t state) + { ++ int err = 0; ++ ++ if (host->caps & MMC_CAP_DISABLE) ++ cancel_delayed_work(&host->disable); + cancel_delayed_work(&host->detect); + mmc_flush_scheduled_work(); + + mmc_bus_get(host); + if (host->bus_ops && !host->bus_dead) { + if (host->bus_ops->suspend) +- host->bus_ops->suspend(host); +- if (!host->bus_ops->resume) { ++ err = host->bus_ops->suspend(host); ++ if (err == -ENOSYS || !host->bus_ops->resume) { ++ /* ++ * We simply "remove" the card in this case. ++ * It will be redetected on resume. ++ */ + if (host->bus_ops->remove) + host->bus_ops->remove(host); +- + mmc_claim_host(host); + mmc_detach_bus(host); + mmc_release_host(host); ++ err = 0; + } + } + mmc_bus_put(host); + +- mmc_power_off(host); ++ if (!err) ++ mmc_power_off(host); + +- return 0; ++ return err; + } + + EXPORT_SYMBOL(mmc_suspend_host); +@@ -1006,12 +1319,26 @@ EXPORT_SYMBOL(mmc_suspend_host); + */ + int mmc_resume_host(struct mmc_host *host) + { ++ int err = 0; ++ + mmc_bus_get(host); + if (host->bus_ops && !host->bus_dead) { + mmc_power_up(host); + mmc_select_voltage(host, host->ocr); + BUG_ON(!host->bus_ops->resume); +- host->bus_ops->resume(host); ++ err = host->bus_ops->resume(host); ++ if (err) { ++ printk(KERN_WARNING "%s: error %d during resume " ++ "(card was removed?)\n", ++ mmc_hostname(host), err); ++ if (host->bus_ops->remove) ++ host->bus_ops->remove(host); ++ mmc_claim_host(host); ++ mmc_detach_bus(host); ++ mmc_release_host(host); ++ /* no need to bother upper layers */ ++ err = 0; ++ } + } + mmc_bus_put(host); + +@@ -1021,7 +1348,7 @@ int mmc_resume_host(struct mmc_host *hos + */ + mmc_detect_change(host, 1); + +- return 0; ++ return err; + } + + EXPORT_SYMBOL(mmc_resume_host); +--- a/drivers/mmc/core/core.h ++++ b/drivers/mmc/core/core.h +@@ -16,10 +16,14 @@ + #define MMC_CMD_RETRIES 3 + + struct mmc_bus_ops { ++ int (*awake)(struct mmc_host *); ++ int (*sleep)(struct mmc_host *); + void (*remove)(struct mmc_host *); + void (*detect)(struct mmc_host *); +- void (*suspend)(struct mmc_host *); +- void (*resume)(struct mmc_host *); ++ int (*suspend)(struct mmc_host *); ++ int (*resume)(struct mmc_host *); ++ void (*power_save)(struct mmc_host *); ++ void (*power_restore)(struct mmc_host *); + }; + + void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops); +--- a/drivers/mmc/core/debugfs.c ++++ b/drivers/mmc/core/debugfs.c +@@ -240,7 +240,7 @@ static int mmc_ext_csd_release(struct in + return 0; + } + +-static struct file_operations mmc_dbg_ext_csd_fops = { ++static const struct file_operations mmc_dbg_ext_csd_fops = { + .open = mmc_ext_csd_open, + .read = mmc_ext_csd_read, + .release = mmc_ext_csd_release, +--- a/drivers/mmc/core/host.c ++++ b/drivers/mmc/core/host.c +@@ -83,6 +83,7 @@ struct mmc_host *mmc_alloc_host(int extr + spin_lock_init(&host->lock); + init_waitqueue_head(&host->wq); + INIT_DELAYED_WORK(&host->detect, mmc_rescan); ++ INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable); + + /* + * By default, hosts do not support SGIO or large requests. +--- a/drivers/mmc/core/host.h ++++ b/drivers/mmc/core/host.h +@@ -14,5 +14,7 @@ + int mmc_register_host_class(void); + void mmc_unregister_host_class(void); + ++void mmc_host_deeper_disable(struct work_struct *work); ++ + #endif + +--- a/drivers/mmc/core/mmc.c ++++ b/drivers/mmc/core/mmc.c +@@ -160,7 +160,6 @@ static int mmc_read_ext_csd(struct mmc_c + { + int err; + u8 *ext_csd; +- unsigned int ext_csd_struct; + + BUG_ON(!card); + +@@ -207,16 +206,16 @@ static int mmc_read_ext_csd(struct mmc_c + goto out; + } + +- ext_csd_struct = ext_csd[EXT_CSD_REV]; +- if (ext_csd_struct > 3) { ++ card->ext_csd.rev = ext_csd[EXT_CSD_REV]; ++ if (card->ext_csd.rev > 3) { + printk(KERN_ERR "%s: unrecognised EXT_CSD structure " + "version %d\n", mmc_hostname(card->host), +- ext_csd_struct); ++ card->ext_csd.rev); + err = -EINVAL; + goto out; + } + +- if (ext_csd_struct >= 2) { ++ if (card->ext_csd.rev >= 2) { + card->ext_csd.sectors = + ext_csd[EXT_CSD_SEC_CNT + 0] << 0 | + ext_csd[EXT_CSD_SEC_CNT + 1] << 8 | +@@ -241,6 +240,15 @@ static int mmc_read_ext_csd(struct mmc_c + goto out; + } + ++ if (card->ext_csd.rev >= 3) { ++ u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT]; ++ ++ /* Sleep / awake timeout in 100ns units */ ++ if (sa_shift > 0 && sa_shift <= 0x17) ++ card->ext_csd.sa_timeout = ++ 1 << ext_csd[EXT_CSD_S_A_TIMEOUT]; ++ } ++ + out: + kfree(ext_csd); + +@@ -276,7 +284,7 @@ static struct attribute_group mmc_std_at + .attrs = mmc_std_attrs, + }; + +-static struct attribute_group *mmc_attr_groups[] = { ++static const struct attribute_group *mmc_attr_groups[] = { + &mmc_std_attr_group, + NULL, + }; +@@ -408,12 +416,17 @@ static int mmc_init_card(struct mmc_host + (host->caps & MMC_CAP_MMC_HIGHSPEED)) { + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + EXT_CSD_HS_TIMING, 1); +- if (err) ++ if (err && err != -EBADMSG) + goto free_card; + +- mmc_card_set_highspeed(card); +- +- mmc_set_timing(card->host, MMC_TIMING_MMC_HS); ++ if (err) { ++ printk(KERN_WARNING "%s: switch to highspeed failed\n", ++ mmc_hostname(card->host)); ++ err = 0; ++ } else { ++ mmc_card_set_highspeed(card); ++ mmc_set_timing(card->host, MMC_TIMING_MMC_HS); ++ } + } + + /* +@@ -448,10 +461,17 @@ static int mmc_init_card(struct mmc_host + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + EXT_CSD_BUS_WIDTH, ext_csd_bit); + +- if (err) ++ if (err && err != -EBADMSG) + goto free_card; + +- mmc_set_bus_width(card->host, bus_width); ++ if (err) { ++ printk(KERN_WARNING "%s: switch to bus width %d " ++ "failed\n", mmc_hostname(card->host), ++ 1 << bus_width); ++ err = 0; ++ } else { ++ mmc_set_bus_width(card->host, bus_width); ++ } + } + + if (!oldcard) +@@ -507,12 +527,10 @@ static void mmc_detect(struct mmc_host * + } + } + +-#ifdef CONFIG_MMC_UNSAFE_RESUME +- + /* + * Suspend callback from host. + */ +-static void mmc_suspend(struct mmc_host *host) ++static int mmc_suspend(struct mmc_host *host) + { + BUG_ON(!host); + BUG_ON(!host->card); +@@ -522,6 +540,8 @@ static void mmc_suspend(struct mmc_host + mmc_deselect_cards(host); + host->card->state &= ~MMC_STATE_HIGHSPEED; + mmc_release_host(host); ++ ++ return 0; + } + + /* +@@ -530,7 +550,7 @@ static void mmc_suspend(struct mmc_host + * This function tries to determine if the same card is still present + * and, if so, restore all state to it. + */ +-static void mmc_resume(struct mmc_host *host) ++static int mmc_resume(struct mmc_host *host) + { + int err; + +@@ -541,30 +561,99 @@ static void mmc_resume(struct mmc_host * + err = mmc_init_card(host, host->ocr, host->card); + mmc_release_host(host); + +- if (err) { +- mmc_remove(host); ++ return err; ++} + +- mmc_claim_host(host); +- mmc_detach_bus(host); +- mmc_release_host(host); ++static void mmc_power_restore(struct mmc_host *host) ++{ ++ host->card->state &= ~MMC_STATE_HIGHSPEED; ++ mmc_claim_host(host); ++ mmc_init_card(host, host->ocr, host->card); ++ mmc_release_host(host); ++} ++ ++static int mmc_sleep(struct mmc_host *host) ++{ ++ struct mmc_card *card = host->card; ++ int err = -ENOSYS; ++ ++ if (card && card->ext_csd.rev >= 3) { ++ err = mmc_card_sleepawake(host, 1); ++ if (err < 0) ++ pr_debug("%s: Error %d while putting card into sleep", ++ mmc_hostname(host), err); + } + ++ return err; + } + +-#else ++static int mmc_awake(struct mmc_host *host) ++{ ++ struct mmc_card *card = host->card; ++ int err = -ENOSYS; + +-#define mmc_suspend NULL +-#define mmc_resume NULL ++ if (card && card->ext_csd.rev >= 3) { ++ err = mmc_card_sleepawake(host, 0); ++ if (err < 0) ++ pr_debug("%s: Error %d while awaking sleeping card", ++ mmc_hostname(host), err); ++ } + +-#endif ++ return err; ++} ++ ++#ifdef CONFIG_MMC_UNSAFE_RESUME ++ ++static const struct mmc_bus_ops mmc_ops = { ++ .awake = mmc_awake, ++ .sleep = mmc_sleep, ++ .remove = mmc_remove, ++ .detect = mmc_detect, ++ .suspend = mmc_suspend, ++ .resume = mmc_resume, ++ .power_restore = mmc_power_restore, ++}; ++ ++static void mmc_attach_bus_ops(struct mmc_host *host) ++{ ++ mmc_attach_bus(host, &mmc_ops); ++} ++ ++#else + + static const struct mmc_bus_ops mmc_ops = { ++ .awake = mmc_awake, ++ .sleep = mmc_sleep, ++ .remove = mmc_remove, ++ .detect = mmc_detect, ++ .suspend = NULL, ++ .resume = NULL, ++ .power_restore = mmc_power_restore, ++}; ++ ++static const struct mmc_bus_ops mmc_ops_unsafe = { ++ .awake = mmc_awake, ++ .sleep = mmc_sleep, + .remove = mmc_remove, + .detect = mmc_detect, + .suspend = mmc_suspend, + .resume = mmc_resume, ++ .power_restore = mmc_power_restore, + }; + ++static void mmc_attach_bus_ops(struct mmc_host *host) ++{ ++ const struct mmc_bus_ops *bus_ops; ++ ++ if (host->caps & MMC_CAP_NONREMOVABLE) ++ bus_ops = &mmc_ops_unsafe; ++ else ++ bus_ops = &mmc_ops; ++ mmc_attach_bus(host, bus_ops); ++} ++ ++#endif ++ + /* + * Starting point for MMC card init. + */ +@@ -575,7 +664,7 @@ int mmc_attach_mmc(struct mmc_host *host + BUG_ON(!host); + WARN_ON(!host->claimed); + +- mmc_attach_bus(host, &mmc_ops); ++ mmc_attach_bus_ops(host); + + /* + * We need to get OCR a different way for SPI. +--- a/drivers/mmc/core/mmc_ops.c ++++ b/drivers/mmc/core/mmc_ops.c +@@ -57,6 +57,42 @@ int mmc_deselect_cards(struct mmc_host * + return _mmc_select_card(host, NULL); + } + ++int mmc_card_sleepawake(struct mmc_host *host, int sleep) ++{ ++ struct mmc_command cmd; ++ struct mmc_card *card = host->card; ++ int err; ++ ++ if (sleep) ++ mmc_deselect_cards(host); ++ ++ memset(&cmd, 0, sizeof(struct mmc_command)); ++ ++ cmd.opcode = MMC_SLEEP_AWAKE; ++ cmd.arg = card->rca << 16; ++ if (sleep) ++ cmd.arg |= 1 << 15; ++ ++ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; ++ err = mmc_wait_for_cmd(host, &cmd, 0); ++ if (err) ++ return err; ++ ++ /* ++ * If the host does not wait while the card signals busy, then we will ++ * will have to wait the sleep/awake timeout. Note, we cannot use the ++ * SEND_STATUS command to poll the status because that command (and most ++ * others) is invalid while the card sleeps. ++ */ ++ if (!(host->caps & MMC_CAP_WAIT_WHILE_BUSY)) ++ mmc_delay(DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000)); ++ ++ if (!sleep) ++ err = mmc_select_card(card); ++ ++ return err; ++} ++ + int mmc_go_idle(struct mmc_host *host) + { + int err; +@@ -354,6 +390,7 @@ int mmc_switch(struct mmc_card *card, u8 + { + int err; + struct mmc_command cmd; ++ u32 status; + + BUG_ON(!card); + BUG_ON(!card->host); +@@ -371,6 +408,28 @@ int mmc_switch(struct mmc_card *card, u8 + if (err) + return err; + ++ /* Must check status to be sure of no errors */ ++ do { ++ err = mmc_send_status(card, &status); ++ if (err) ++ return err; ++ if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) ++ break; ++ if (mmc_host_is_spi(card->host)) ++ break; ++ } while (R1_CURRENT_STATE(status) == 7); ++ ++ if (mmc_host_is_spi(card->host)) { ++ if (status & R1_SPI_ILLEGAL_COMMAND) ++ return -EBADMSG; ++ } else { ++ if (status & 0xFDFFA000) ++ printk(KERN_WARNING "%s: unexpected status %#x after " ++ "switch", mmc_hostname(card->host), status); ++ if (status & R1_SWITCH_ERROR) ++ return -EBADMSG; ++ } ++ + return 0; + } + +--- a/drivers/mmc/core/mmc_ops.h ++++ b/drivers/mmc/core/mmc_ops.h +@@ -25,6 +25,7 @@ int mmc_send_status(struct mmc_card *car + int mmc_send_cid(struct mmc_host *host, u32 *cid); + int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp); + int mmc_spi_set_crc(struct mmc_host *host, int use_crc); ++int mmc_card_sleepawake(struct mmc_host *host, int sleep); + + #endif + +--- a/drivers/mmc/core/sd.c ++++ b/drivers/mmc/core/sd.c +@@ -314,7 +314,7 @@ static struct attribute_group sd_std_att + .attrs = sd_std_attrs, + }; + +-static struct attribute_group *sd_attr_groups[] = { ++static const struct attribute_group *sd_attr_groups[] = { + &sd_std_attr_group, + NULL, + }; +@@ -561,12 +561,10 @@ static void mmc_sd_detect(struct mmc_hos + } + } + +-#ifdef CONFIG_MMC_UNSAFE_RESUME +- + /* + * Suspend callback from host. + */ +-static void mmc_sd_suspend(struct mmc_host *host) ++static int mmc_sd_suspend(struct mmc_host *host) + { + BUG_ON(!host); + BUG_ON(!host->card); +@@ -576,6 +574,8 @@ static void mmc_sd_suspend(struct mmc_ho + mmc_deselect_cards(host); + host->card->state &= ~MMC_STATE_HIGHSPEED; + mmc_release_host(host); ++ ++ return 0; + } + + /* +@@ -584,7 +584,7 @@ static void mmc_sd_suspend(struct mmc_ho + * This function tries to determine if the same card is still present + * and, if so, restore all state to it. + */ +-static void mmc_sd_resume(struct mmc_host *host) ++static int mmc_sd_resume(struct mmc_host *host) + { + int err; + +@@ -595,30 +595,63 @@ static void mmc_sd_resume(struct mmc_hos + err = mmc_sd_init_card(host, host->ocr, host->card); + mmc_release_host(host); + +- if (err) { +- mmc_sd_remove(host); +- +- mmc_claim_host(host); +- mmc_detach_bus(host); +- mmc_release_host(host); +- } ++ return err; ++} + ++static void mmc_sd_power_restore(struct mmc_host *host) ++{ ++ host->card->state &= ~MMC_STATE_HIGHSPEED; ++ mmc_claim_host(host); ++ mmc_sd_init_card(host, host->ocr, host->card); ++ mmc_release_host(host); + } + +-#else ++#ifdef CONFIG_MMC_UNSAFE_RESUME + +-#define mmc_sd_suspend NULL +-#define mmc_sd_resume NULL ++static const struct mmc_bus_ops mmc_sd_ops = { ++ .remove = mmc_sd_remove, ++ .detect = mmc_sd_detect, ++ .suspend = mmc_sd_suspend, ++ .resume = mmc_sd_resume, ++ .power_restore = mmc_sd_power_restore, ++}; + +-#endif ++static void mmc_sd_attach_bus_ops(struct mmc_host *host) ++{ ++ mmc_attach_bus(host, &mmc_sd_ops); ++} ++ ++#else + + static const struct mmc_bus_ops mmc_sd_ops = { + .remove = mmc_sd_remove, + .detect = mmc_sd_detect, ++ .suspend = NULL, ++ .resume = NULL, ++ .power_restore = mmc_sd_power_restore, ++}; ++ ++static const struct mmc_bus_ops mmc_sd_ops_unsafe = { ++ .remove = mmc_sd_remove, ++ .detect = mmc_sd_detect, + .suspend = mmc_sd_suspend, + .resume = mmc_sd_resume, ++ .power_restore = mmc_sd_power_restore, + }; + ++static void mmc_sd_attach_bus_ops(struct mmc_host *host) ++{ ++ const struct mmc_bus_ops *bus_ops; ++ ++ if (host->caps & MMC_CAP_NONREMOVABLE) ++ bus_ops = &mmc_sd_ops_unsafe; ++ else ++ bus_ops = &mmc_sd_ops; ++ mmc_attach_bus(host, bus_ops); ++} ++ ++#endif ++ + /* + * Starting point for SD card init. + */ +@@ -629,7 +662,7 @@ int mmc_attach_sd(struct mmc_host *host, + BUG_ON(!host); + WARN_ON(!host->claimed); + +- mmc_attach_bus(host, &mmc_sd_ops); ++ mmc_sd_attach_bus_ops(host); + + /* + * We need to get OCR a different way for SPI. +--- a/drivers/mmc/core/sdio_bus.c ++++ b/drivers/mmc/core/sdio_bus.c +@@ -20,9 +20,6 @@ + #include "sdio_cis.h" + #include "sdio_bus.h" + +-#define dev_to_sdio_func(d) container_of(d, struct sdio_func, dev) +-#define to_sdio_driver(d) container_of(d, struct sdio_driver, drv) +- + /* show configuration fields */ + #define sdio_config_attr(field, format_string) \ + static ssize_t \ +@@ -251,12 +248,15 @@ int sdio_add_func(struct sdio_func *func + /* + * Unregister a SDIO function with the driver model, and + * (eventually) free it. ++ * This function can be called through error paths where sdio_add_func() was ++ * never executed (because a failure occurred at an earlier point). + */ + void sdio_remove_func(struct sdio_func *func) + { +- if (sdio_func_present(func)) +- device_del(&func->dev); ++ if (!sdio_func_present(func)) ++ return; + ++ device_del(&func->dev); + put_device(&func->dev); + } + +--- a/drivers/mmc/core/sdio.c ++++ b/drivers/mmc/core/sdio.c +@@ -165,6 +165,29 @@ static int sdio_enable_wide(struct mmc_c + } + + /* ++ * If desired, disconnect the pull-up resistor on CD/DAT[3] (pin 1) ++ * of the card. This may be required on certain setups of boards, ++ * controllers and embedded sdio device which do not need the card's ++ * pull-up. As a result, card detection is disabled and power is saved. ++ */ ++static int sdio_disable_cd(struct mmc_card *card) ++{ ++ int ret; ++ u8 ctrl; ++ ++ if (!card->cccr.disable_cd) ++ return 0; ++ ++ ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_IF, 0, &ctrl); ++ if (ret) ++ return ret; ++ ++ ctrl |= SDIO_BUS_CD_DISABLE; ++ ++ return mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL); ++} ++ ++/* + * Test if the card supports high-speed mode and, if so, switch to it. + */ + static int sdio_enable_hs(struct mmc_card *card) +@@ -195,6 +218,135 @@ static int sdio_enable_hs(struct mmc_car + } + + /* ++ * Handle the detection and initialisation of a card. ++ * ++ * In the case of a resume, "oldcard" will contain the card ++ * we're trying to reinitialise. ++ */ ++static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, ++ struct mmc_card *oldcard) ++{ ++ struct mmc_card *card; ++ int err; ++ ++ BUG_ON(!host); ++ WARN_ON(!host->claimed); ++ ++ /* ++ * Inform the card of the voltage ++ */ ++ err = mmc_send_io_op_cond(host, host->ocr, &ocr); ++ if (err) ++ goto err; ++ ++ /* ++ * For SPI, enable CRC as appropriate. ++ */ ++ if (mmc_host_is_spi(host)) { ++ err = mmc_spi_set_crc(host, use_spi_crc); ++ if (err) ++ goto err; ++ } ++ ++ /* ++ * Allocate card structure. ++ */ ++ card = mmc_alloc_card(host, NULL); ++ if (IS_ERR(card)) { ++ err = PTR_ERR(card); ++ goto err; ++ } ++ ++ card->type = MMC_TYPE_SDIO; ++ ++ /* ++ * For native busses: set card RCA and quit open drain mode. ++ */ ++ if (!mmc_host_is_spi(host)) { ++ err = mmc_send_relative_addr(host, &card->rca); ++ if (err) ++ goto remove; ++ ++ mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); ++ } ++ ++ /* ++ * Select card, as all following commands rely on that. ++ */ ++ if (!mmc_host_is_spi(host)) { ++ err = mmc_select_card(card); ++ if (err) ++ goto remove; ++ } ++ ++ /* ++ * Read the common registers. ++ */ ++ err = sdio_read_cccr(card); ++ if (err) ++ goto remove; ++ ++ /* ++ * Read the common CIS tuples. ++ */ ++ err = sdio_read_common_cis(card); ++ if (err) ++ goto remove; ++ ++ if (oldcard) { ++ int same = (card->cis.vendor == oldcard->cis.vendor && ++ card->cis.device == oldcard->cis.device); ++ mmc_remove_card(card); ++ if (!same) { ++ err = -ENOENT; ++ goto err; ++ } ++ card = oldcard; ++ return 0; ++ } ++ ++ /* ++ * Switch to high-speed (if supported). ++ */ ++ err = sdio_enable_hs(card); ++ if (err) ++ goto remove; ++ ++ /* ++ * Change to the card's maximum speed. ++ */ ++ if (mmc_card_highspeed(card)) { ++ /* ++ * The SDIO specification doesn't mention how ++ * the CIS transfer speed register relates to ++ * high-speed, but it seems that 50 MHz is ++ * mandatory. ++ */ ++ mmc_set_clock(host, 50000000); ++ } else { ++ mmc_set_clock(host, card->cis.max_dtr); ++ } ++ ++ /* ++ * Switch to wider bus (if supported). ++ */ ++ err = sdio_enable_wide(card); ++ if (err) ++ goto remove; ++ ++ if (!oldcard) ++ host->card = card; ++ return 0; ++ ++remove: ++ if (!oldcard) ++ mmc_remove_card(card); ++ ++err: ++ return err; ++} ++ ++/* + * Host is being removed. Free up the current card. + */ + static void mmc_sdio_remove(struct mmc_host *host) +@@ -243,10 +395,77 @@ static void mmc_sdio_detect(struct mmc_h + } + } + ++/* ++ * SDIO suspend. We need to suspend all functions separately. ++ * Therefore all registered functions must have drivers with suspend ++ * and resume methods. Failing that we simply remove the whole card. ++ */ ++static int mmc_sdio_suspend(struct mmc_host *host) ++{ ++ int i, err = 0; ++ ++ for (i = 0; i < host->card->sdio_funcs; i++) { ++ struct sdio_func *func = host->card->sdio_func[i]; ++ if (func && sdio_func_present(func) && func->dev.driver) { ++ const struct dev_pm_ops *pmops = func->dev.driver->pm; ++ if (!pmops || !pmops->suspend || !pmops->resume) { ++ /* force removal of entire card in that case */ ++ err = -ENOSYS; ++ } else ++ err = pmops->suspend(&func->dev); ++ if (err) ++ break; ++ } ++ } ++ while (err && --i >= 0) { ++ struct sdio_func *func = host->card->sdio_func[i]; ++ if (func && sdio_func_present(func) && func->dev.driver) { ++ const struct dev_pm_ops *pmops = func->dev.driver->pm; ++ pmops->resume(&func->dev); ++ } ++ } ++ ++ return err; ++} ++ ++static int mmc_sdio_resume(struct mmc_host *host) ++{ ++ int i, err; ++ ++ BUG_ON(!host); ++ BUG_ON(!host->card); ++ ++ /* Basic card reinitialization. */ ++ mmc_claim_host(host); ++ err = mmc_sdio_init_card(host, host->ocr, host->card); ++ mmc_release_host(host); ++ ++ /* ++ * If the card looked to be the same as before suspending, then ++ * we proceed to resume all card functions. If one of them returns ++ * an error then we simply return that error to the core and the ++ * card will be redetected as new. It is the responsibility of ++ * the function driver to perform further tests with the extra ++ * knowledge it has of the card to confirm the card is indeed the ++ * same as before suspending (same MAC address for network cards, ++ * etc.) and return an error otherwise. ++ */ ++ for (i = 0; !err && i < host->card->sdio_funcs; i++) { ++ struct sdio_func *func = host->card->sdio_func[i]; ++ if (func && sdio_func_present(func) && func->dev.driver) { ++ const struct dev_pm_ops *pmops = func->dev.driver->pm; ++ err = pmops->resume(&func->dev); ++ } ++ } ++ ++ return err; ++} + + static const struct mmc_bus_ops mmc_sdio_ops = { + .remove = mmc_sdio_remove, + .detect = mmc_sdio_detect, ++ .suspend = mmc_sdio_suspend, ++ .resume = mmc_sdio_resume, + }; + + +@@ -275,13 +494,6 @@ int mmc_attach_sdio(struct mmc_host *hos + ocr &= ~0x7F; + } + +- if (ocr & MMC_VDD_165_195) { +- printk(KERN_WARNING "%s: SDIO card claims to support the " +- "incompletely defined 'low voltage range'. This " +- "will be ignored.\n", mmc_hostname(host)); +- ocr &= ~MMC_VDD_165_195; +- } +- + host->ocr = mmc_select_voltage(host, ocr); + + /* +@@ -293,108 +505,31 @@ int mmc_attach_sdio(struct mmc_host *hos + } + + /* +- * Inform the card of the voltage ++ * Detect and init the card. + */ +- err = mmc_send_io_op_cond(host, host->ocr, &ocr); ++ err = mmc_sdio_init_card(host, host->ocr, NULL); + if (err) + goto err; +- +- /* +- * For SPI, enable CRC as appropriate. +- */ +- if (mmc_host_is_spi(host)) { +- err = mmc_spi_set_crc(host, use_spi_crc); +- if (err) +- goto err; +- } ++ card = host->card; + + /* + * The number of functions on the card is encoded inside + * the ocr. + */ + funcs = (ocr & 0x70000000) >> 28; ++ card->sdio_funcs = 0; + + /* +- * Allocate card structure. +- */ +- card = mmc_alloc_card(host, NULL); +- if (IS_ERR(card)) { +- err = PTR_ERR(card); +- goto err; +- } +- +- card->type = MMC_TYPE_SDIO; +- card->sdio_funcs = funcs; +- +- host->card = card; +- +- /* +- * For native busses: set card RCA and quit open drain mode. ++ * If needed, disconnect card detection pull-up resistor. + */ +- if (!mmc_host_is_spi(host)) { +- err = mmc_send_relative_addr(host, &card->rca); +- if (err) +- goto remove; +- +- mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); +- } +- +- /* +- * Select card, as all following commands rely on that. +- */ +- if (!mmc_host_is_spi(host)) { +- err = mmc_select_card(card); +- if (err) +- goto remove; +- } +- +- /* +- * Read the common registers. +- */ +- err = sdio_read_cccr(card); +- if (err) +- goto remove; +- +- /* +- * Read the common CIS tuples. +- */ +- err = sdio_read_common_cis(card); +- if (err) +- goto remove; +- +- /* +- * Switch to high-speed (if supported). +- */ +- err = sdio_enable_hs(card); +- if (err) +- goto remove; +- +- /* +- * Change to the card's maximum speed. +- */ +- if (mmc_card_highspeed(card)) { +- /* +- * The SDIO specification doesn't mention how +- * the CIS transfer speed register relates to +- * high-speed, but it seems that 50 MHz is +- * mandatory. +- */ +- mmc_set_clock(host, 50000000); +- } else { +- mmc_set_clock(host, card->cis.max_dtr); +- } +- +- /* +- * Switch to wider bus (if supported). +- */ +- err = sdio_enable_wide(card); ++ err = sdio_disable_cd(card); + if (err) + goto remove; + + /* + * Initialize (but don't add) all present functions. + */ +- for (i = 0;i < funcs;i++) { ++ for (i = 0; i < funcs; i++, card->sdio_funcs++) { + err = sdio_init_func(host->card, i + 1); + if (err) + goto remove; +--- a/drivers/mmc/core/sdio_cis.c ++++ b/drivers/mmc/core/sdio_cis.c +@@ -29,6 +29,8 @@ static int cistpl_vers_1(struct mmc_card + unsigned i, nr_strings; + char **buffer, *string; + ++ /* Find all null-terminated (including zero length) strings in ++ the TPLLV1_INFO field. Trailing garbage is ignored. */ + buf += 2; + size -= 2; + +@@ -39,11 +41,8 @@ static int cistpl_vers_1(struct mmc_card + if (buf[i] == 0) + nr_strings++; + } +- +- if (buf[i-1] != '\0') { +- printk(KERN_WARNING "SDIO: ignoring broken CISTPL_VERS_1\n"); ++ if (nr_strings == 0) + return 0; +- } + + size = i; + +@@ -98,6 +97,22 @@ static const unsigned char speed_val[16] + static const unsigned int speed_unit[8] = + { 10000, 100000, 1000000, 10000000, 0, 0, 0, 0 }; + ++/* FUNCE tuples with these types get passed to SDIO drivers */ ++static const unsigned char funce_type_whitelist[] = { ++ 4 /* CISTPL_FUNCE_LAN_NODE_ID used in Broadcom cards */ ++}; ++ ++static int cistpl_funce_whitelisted(unsigned char type) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(funce_type_whitelist); i++) { ++ if (funce_type_whitelist[i] == type) ++ return 1; ++ } ++ return 0; ++} ++ + static int cistpl_funce_common(struct mmc_card *card, + const unsigned char *buf, unsigned size) + { +@@ -120,6 +135,10 @@ static int cistpl_funce_func(struct sdio + unsigned vsn; + unsigned min_size; + ++ /* let SDIO drivers take care of whitelisted FUNCE tuples */ ++ if (cistpl_funce_whitelisted(buf[0])) ++ return -EILSEQ; ++ + vsn = func->card->cccr.sdio_vsn; + min_size = (vsn == SDIO_SDIO_REV_1_00) ? 28 : 42; + +@@ -154,13 +173,12 @@ static int cistpl_funce(struct mmc_card + else + ret = cistpl_funce_common(card, buf, size); + +- if (ret) { ++ if (ret && ret != -EILSEQ) { + printk(KERN_ERR "%s: bad CISTPL_FUNCE size %u " + "type %u\n", mmc_hostname(card->host), size, buf[0]); +- return ret; + } + +- return 0; ++ return ret; + } + + typedef int (tpl_parse_t)(struct mmc_card *, struct sdio_func *, +@@ -253,21 +271,12 @@ static int sdio_read_cis(struct mmc_card + for (i = 0; i < ARRAY_SIZE(cis_tpl_list); i++) + if (cis_tpl_list[i].code == tpl_code) + break; +- if (i >= ARRAY_SIZE(cis_tpl_list)) { +- /* this tuple is unknown to the core */ +- this->next = NULL; +- this->code = tpl_code; +- this->size = tpl_link; +- *prev = this; +- prev = &this->next; +- printk(KERN_DEBUG +- "%s: queuing CIS tuple 0x%02x length %u\n", +- mmc_hostname(card->host), tpl_code, tpl_link); +- } else { ++ if (i < ARRAY_SIZE(cis_tpl_list)) { + const struct cis_tpl *tpl = cis_tpl_list + i; + if (tpl_link < tpl->min_size) { + printk(KERN_ERR +- "%s: bad CIS tuple 0x%02x (length = %u, expected >= %u)\n", ++ "%s: bad CIS tuple 0x%02x" ++ " (length = %u, expected >= %u)\n", + mmc_hostname(card->host), + tpl_code, tpl_link, tpl->min_size); + ret = -EINVAL; +@@ -275,7 +284,30 @@ static int sdio_read_cis(struct mmc_card + ret = tpl->parse(card, func, + this->data, tpl_link); + } +- kfree(this); ++ /* ++ * We don't need the tuple anymore if it was ++ * successfully parsed by the SDIO core or if it is ++ * not going to be parsed by SDIO drivers. ++ */ ++ if (!ret || ret != -EILSEQ) ++ kfree(this); ++ } else { ++ /* unknown tuple */ ++ ret = -EILSEQ; ++ } ++ ++ if (ret == -EILSEQ) { ++ /* this tuple is unknown to the core or whitelisted */ ++ this->next = NULL; ++ this->code = tpl_code; ++ this->size = tpl_link; ++ *prev = this; ++ prev = &this->next; ++ printk(KERN_DEBUG ++ "%s: queuing CIS tuple 0x%02x length %u\n", ++ mmc_hostname(card->host), tpl_code, tpl_link); ++ /* keep on analyzing tuples */ ++ ret = 0; + } + + ptr += tpl_link; +--- a/drivers/mmc/core/sdio_io.c ++++ b/drivers/mmc/core/sdio_io.c +@@ -624,7 +624,7 @@ void sdio_f0_writeb(struct sdio_func *fu + + BUG_ON(!func); + +- if (addr < 0xF0 || addr > 0xFF) { ++ if ((addr < 0xF0 || addr > 0xFF) && (!mmc_card_lenient_fn0(func->card))) { + if (err_ret) + *err_ret = -EINVAL; + return; +--- a/drivers/mmc/host/Kconfig ++++ b/drivers/mmc/host/Kconfig +@@ -55,6 +55,17 @@ config MMC_SDHCI_PCI + + If unsure, say N. + ++config MMC_SDHCI_CNS3XXX ++ tristate "SDHCI support on CNS3XXX" ++ depends on MMC_SDHCI && ARCH_CNS3XXX ++ help ++ This selects the Secure Digital Host Controller Interface (SDHCI) ++ in Cavium Networks CNS3XXX SOCs. ++ ++ If you have a controller with this interface, say Y or M here. ++ ++ If unsure, say N. ++ + config MMC_RICOH_MMC + tristate "Ricoh MMC Controller Disabler (EXPERIMENTAL)" + depends on MMC_SDHCI_PCI +--- a/drivers/mmc/host/Makefile ++++ b/drivers/mmc/host/Makefile +@@ -12,6 +12,7 @@ obj-$(CONFIG_MMC_IMX) += imxmmc.o + obj-$(CONFIG_MMC_MXC) += mxcmmc.o + obj-$(CONFIG_MMC_SDHCI) += sdhci.o + obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o ++obj-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o + obj-$(CONFIG_MMC_RICOH_MMC) += ricoh_mmc.o + obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o + obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o +--- a/drivers/mmc/host/sdhci.c ++++ b/drivers/mmc/host/sdhci.c +@@ -27,6 +27,15 @@ + + #define DRIVER_NAME "sdhci" + ++#define SDHCI_DEBUG ++#undef SDHCI_DEBUG ++ ++#ifdef SDHCI_DEBUG ++#define sd_printk(x...) printk(x) ++#else ++#define sd_printk(x...) do { } while(0) ++#endif ++ + #define DBG(f, x...) \ + pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x) + +@@ -43,6 +52,39 @@ static void sdhci_finish_data(struct sdh + static void sdhci_send_command(struct sdhci_host *, struct mmc_command *); + static void sdhci_finish_command(struct sdhci_host *); + ++static void sdhci_dumpallregs(struct sdhci_host *host) ++{ ++#ifdef SDHCI_DEBUG ++ printk(" _______________________________________________\n"); ++ ++ printk(" 0x00: 0x%08x | 0x04: 0x%08x\n", sdhci_readl(host, 0x00), sdhci_readl(host, 0x04)); ++ printk(" 0x08: 0x%08x | 0x0C: 0x%08x\n", sdhci_readl(host, 0x08), sdhci_readl(host, 0x0C)); ++ printk(" 0x10: 0x%08x | 0x14: 0x%08x\n", sdhci_readl(host, 0x10), sdhci_readl(host, 0x14)); ++ printk(" 0x18: 0x%08x | 0x1C: 0x%08x\n", sdhci_readl(host, 0x18), sdhci_readl(host, 0x1C)); ++ printk(" -----------------| 0x24: 0x%08x\n", sdhci_readl(host, 0x24)); ++ printk(" 0x28: 0x%08x | 0x2C: 0x%08x\n", sdhci_readl(host, 0x28), sdhci_readl(host, 0x2C)); ++ printk(" 0x30: 0x%08x | 0x34: 0x%08x\n", sdhci_readl(host, 0x30), sdhci_readl(host, 0x34)); ++ printk(" 0x38: 0x%08x | 0x3C: 0x%08x\n", sdhci_readl(host, 0x38), sdhci_readl(host, 0x3C)); ++ printk(" 0x40: 0x%08x | 0x44: 0x%08x\n", sdhci_readl(host, 0x40), sdhci_readl(host, 0x44)); ++ printk(" 0x48: 0x%08x | 0x4C: 0x%08x\n", sdhci_readl(host, 0x48), sdhci_readl(host, 0x4C)); ++ printk(" 0x50: 0x%08x | 0xFC: 0x%08x\n", sdhci_readl(host, 0x50), sdhci_readl(host, 0xFC)); ++//#else ++ printk(KERN_DEBUG " _______________________________________________\n"); ++ ++ printk(KERN_DEBUG " 0x00: 0x%08x | 0x04: 0x%08x\n", sdhci_readl(host, 0x00), sdhci_readl(host, 0x04)); ++ printk(KERN_DEBUG " 0x08: 0x%08x | 0x0C: 0x%08x\n", sdhci_readl(host, 0x08), sdhci_readl(host, 0x0C)); ++ printk(KERN_DEBUG " 0x10: 0x%08x | 0x14: 0x%08x\n", sdhci_readl(host, 0x10), sdhci_readl(host, 0x14)); ++ printk(KERN_DEBUG " 0x18: 0x%08x | 0x1C: 0x%08x\n", sdhci_readl(host, 0x18), sdhci_readl(host, 0x1C)); ++ printk(KERN_DEBUG " -----------------| 0x24: 0x%08x\n", sdhci_readl(host, 0x24)); ++ printk(KERN_DEBUG " 0x28: 0x%08x | 0x2C: 0x%08x\n", sdhci_readl(host, 0x28), sdhci_readl(host, 0x2C)); ++ printk(KERN_DEBUG " 0x30: 0x%08x | 0x34: 0x%08x\n", sdhci_readl(host, 0x30), sdhci_readl(host, 0x34)); ++ printk(KERN_DEBUG " 0x38: 0x%08x | 0x3C: 0x%08x\n", sdhci_readl(host, 0x38), sdhci_readl(host, 0x3C)); ++ printk(KERN_DEBUG " 0x40: 0x%08x | 0x44: 0x%08x\n", sdhci_readl(host, 0x40), sdhci_readl(host, 0x44)); ++ printk(KERN_DEBUG " 0x48: 0x%08x | 0x4C: 0x%08x\n", sdhci_readl(host, 0x48), sdhci_readl(host, 0x4C)); ++ printk(KERN_DEBUG " 0x50: 0x%08x | 0xFC: 0x%08x\n", sdhci_readl(host, 0x50), sdhci_readl(host, 0xFC)); ++#endif ++} ++ + static void sdhci_dumpregs(struct sdhci_host *host) + { + printk(KERN_DEBUG DRIVER_NAME ": ============== REGISTER DUMP ==============\n"); +@@ -591,6 +633,9 @@ static u8 sdhci_calc_timeout(struct sdhc + target_timeout = data->timeout_ns / 1000 + + data->timeout_clks / host->clock; + ++ if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) ++ host->timeout_clk = host->clock / 1000; ++ + /* + * Figure out needed cycles. + * We do this in steps in order to fit inside a 32 bit int. +@@ -622,7 +667,7 @@ static u8 sdhci_calc_timeout(struct sdhc + static void sdhci_set_transfer_irqs(struct sdhci_host *host) + { + u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL; +- u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR; ++ u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ACMD12ERR | SDHCI_INT_ADMA_ERROR; + + if (host->flags & SDHCI_REQ_USE_DMA) + sdhci_clear_set_irqs(host, pio_irqs, dma_irqs); +@@ -652,7 +697,7 @@ static void sdhci_prepare_data(struct sd + count = sdhci_calc_timeout(host, data); + sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); + +- if (host->flags & SDHCI_USE_DMA) ++ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) + host->flags |= SDHCI_REQ_USE_DMA; + + /* +@@ -736,11 +781,21 @@ static void sdhci_prepare_data(struct sd + } else { + int sg_cnt; + ++ sd_printk("[SDHCI_DEBUG] dma_map_sg(), mmc_dev(host->mmc) = %p \n", mmc_dev(host->mmc)); ++ sd_printk("[SDHCI_DEBUG] dma_map_sg(), data->sg = %p \n", data->sg); ++ sd_printk("[SDHCI_DEBUG] dma_map_sg(), data->sg_len = %d \n", data->sg_len); + sg_cnt = dma_map_sg(mmc_dev(host->mmc), + data->sg, data->sg_len, + (data->flags & MMC_DATA_READ) ? + DMA_FROM_DEVICE : + DMA_TO_DEVICE); ++ if (data->sg == NULL) { ++ sd_printk("[SDHCI_DEBUG] dma_map_sg(), data->sg = (NULL) \n"); ++ return; ++ } ++ sd_printk("[SDHCI_DEBUG] dma_map_sg(), data->sg = %p \n", data->sg); ++ sd_printk("[SDHCI_DEBUG] dma_map_sg(), sg_cnt = %d \n", sg_cnt); ++ + if (sg_cnt == 0) { + /* + * This only happens when someone fed +@@ -750,6 +805,7 @@ static void sdhci_prepare_data(struct sd + host->flags &= ~SDHCI_REQ_USE_DMA; + } else { + WARN_ON(sg_cnt != 1); ++ sd_printk("[SDHCI_DEBUG] sg_dma_address() => %08x \n", sg_dma_address(data->sg)); + sdhci_writel(host, sg_dma_address(data->sg), + SDHCI_DMA_ADDRESS); + } +@@ -763,14 +819,32 @@ static void sdhci_prepare_data(struct sd + */ + if (host->version >= SDHCI_SPEC_200) { + ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); ++#ifdef SDHCI_USE_LEDS_CLASS ++ ctrl |= SDHCI_CTRL_LED; ++#else ++ ctrl &= ~SDHCI_CTRL_LED; ++#endif + ctrl &= ~SDHCI_CTRL_DMA_MASK; + if ((host->flags & SDHCI_REQ_USE_DMA) && + (host->flags & SDHCI_USE_ADMA)) + ctrl |= SDHCI_CTRL_ADMA32; + else + ctrl |= SDHCI_CTRL_SDMA; ++ + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); +- } ++ } else if (host->version == SDHCI_SPEC_100) { ++ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); ++#ifdef SDHCI_USE_LEDS_CLASS ++ ctrl |= SDHCI_CTRL_LED; ++#else ++ ctrl &= ~SDHCI_CTRL_LED; ++#endif ++ ctrl &= ~SDHCI_CTRL_DMA_MASK; ++ if (host->flags & SDHCI_REQ_USE_DMA) ++ ctrl |= SDHCI_CTRL_SDMA; ++ ++ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); ++ } + + if (!(host->flags & SDHCI_REQ_USE_DMA)) { + int flags; +@@ -795,15 +869,26 @@ static void sdhci_set_transfer_mode(stru + struct mmc_data *data) + { + u16 mode; ++ u8 bgctrl; + + if (data == NULL) + return; + + WARN_ON(!host->data); + ++ bgctrl = sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL); ++ if (host->quirks & SDHCI_QUIRK_READ_WAIT_CTRL) ++ bgctrl |= SDHCI_READ_WAIT_CTRL; ++ sdhci_writeb(host, bgctrl, SDHCI_BLOCK_GAP_CONTROL); ++ + mode = SDHCI_TRNS_BLK_CNT_EN; +- if (data->blocks > 1) ++ ++ if (data->blocks > 1) { + mode |= SDHCI_TRNS_MULTI; ++ ++ if (host->quirks & SDHCI_QUIRK_AUTO_CMD12) ++ mode |= SDHCI_TRNS_ACMD12; ++ } + if (data->flags & MMC_DATA_READ) + mode |= SDHCI_TRNS_READ; + if (host->flags & SDHCI_REQ_USE_DMA) +@@ -812,6 +897,20 @@ static void sdhci_set_transfer_mode(stru + sdhci_writew(host, mode, SDHCI_TRANSFER_MODE); + } + ++static void shdci_check_dma_overrun(struct sdhci_host *host, struct mmc_data *data) ++{ ++ u32 dma_pos = sdhci_readl(host, SDHCI_DMA_ADDRESS); ++ u32 dma_start = sg_dma_address(data->sg); ++ u32 dma_end = dma_start + data->sg->length; ++ ++ /* Test whether we ended up moving more data than was originally requested. */ ++ if (dma_pos <= dma_end) ++ return; ++ ++ printk(KERN_ERR "%s: dma overrun, dma %08x, req %08x..%08x\n", ++ mmc_hostname(host->mmc), dma_pos, dma_start, dma_end); ++} ++ + static void sdhci_finish_data(struct sdhci_host *host) + { + struct mmc_data *data; +@@ -825,6 +924,9 @@ static void sdhci_finish_data(struct sdh + if (host->flags & SDHCI_USE_ADMA) + sdhci_adma_table_post(host, data); + else { ++ shdci_check_dma_overrun(host, data); ++ ++ sd_printk("[SDHCI_DEBUG] dma_unmap_sg(), data->sg_len = %d \n", data->sg_len); + dma_unmap_sg(mmc_dev(host->mmc), data->sg, + data->sg_len, (data->flags & MMC_DATA_READ) ? + DMA_FROM_DEVICE : DMA_TO_DEVICE); +@@ -866,12 +968,16 @@ static void sdhci_send_command(struct sd + + WARN_ON(host->cmd); + ++ sd_printk("[SDHCI_DEBUG] sdhci_send_command() \n"); ++ + /* Wait max 10 ms */ + timeout = 10; + + mask = SDHCI_CMD_INHIBIT; + if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY)) + mask |= SDHCI_DATA_INHIBIT; ++ if ((cmd->data != NULL)) ++ mask |= SDHCI_DATA_INHIBIT; + + /* We shouldn't wait for data inihibit for stop commands, even + though they might use busy signaling */ +@@ -925,7 +1031,11 @@ static void sdhci_send_command(struct sd + if (cmd->data) + flags |= SDHCI_CMD_DATA; + ++ sd_printk("[SDHCI_DEBUG] sdhci_send_command() => %08x \n", SDHCI_MAKE_CMD(cmd->opcode, flags)); ++ sdhci_dumpallregs(host); + sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); ++ sd_printk("[SDHCI_DEBUG] sdhci_send_command(): After issue command \n"); ++ sdhci_dumpallregs(host); + } + + static void sdhci_finish_command(struct sdhci_host *host) +@@ -934,6 +1044,8 @@ static void sdhci_finish_command(struct + + BUG_ON(host->cmd == NULL); + ++ sd_printk("[SDHCI_DEBUG] sdhci_finish_command() \n"); ++ + if (host->cmd->flags & MMC_RSP_PRESENT) { + if (host->cmd->flags & MMC_RSP_136) { + /* CRC is stripped so we need to do some shifting. */ +@@ -991,8 +1103,8 @@ static void sdhci_set_clock(struct sdhci + clk |= SDHCI_CLOCK_INT_EN; + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + +- /* Wait max 10 ms */ +- timeout = 10; ++ /* Wait max 20 ms */ ++ timeout = 20; + while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) + & SDHCI_CLOCK_INT_STABLE)) { + if (timeout == 0) { +@@ -1154,6 +1266,12 @@ static void sdhci_set_ios(struct mmc_hos + else + ctrl &= ~SDHCI_CTRL_HISPD; + ++#ifdef SDHCI_USE_LEDS_CLASS ++ ctrl |= SDHCI_CTRL_LED; ++#else ++ ctrl &= ~SDHCI_CTRL_LED; ++#endif ++ + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); + + /* +@@ -1321,7 +1439,11 @@ static void sdhci_timeout_timer(unsigned + if (host->mrq) { + printk(KERN_ERR "%s: Timeout waiting for hardware " + "interrupt.\n", mmc_hostname(host->mmc)); ++#ifdef SDHCI_DEBUG ++ sdhci_dumpallregs(host); ++#else + sdhci_dumpregs(host); ++#endif + + if (host->data) { + host->data->error = -ETIMEDOUT; +@@ -1508,6 +1630,10 @@ static irqreturn_t sdhci_irq(int irq, vo + DBG("*** %s got interrupt: 0x%08x\n", + mmc_hostname(host->mmc), intmask); + ++#ifdef SDHCI_DEBUG ++ printk("*** %s got interrupt: 0x%08x\n", mmc_hostname(host->mmc), intmask); ++#endif ++ + if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { + sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT | + SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS); +@@ -1597,7 +1723,7 @@ int sdhci_resume_host(struct sdhci_host + { + int ret; + +- if (host->flags & SDHCI_USE_DMA) { ++ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { + if (host->ops->enable_dma) + host->ops->enable_dma(host); + } +@@ -1678,23 +1804,20 @@ int sdhci_add_host(struct sdhci_host *ho + caps = sdhci_readl(host, SDHCI_CAPABILITIES); + + if (host->quirks & SDHCI_QUIRK_FORCE_DMA) +- host->flags |= SDHCI_USE_DMA; +- else if (!(caps & SDHCI_CAN_DO_DMA)) +- DBG("Controller doesn't have DMA capability\n"); ++ host->flags |= SDHCI_USE_SDMA; ++ else if (!(caps & SDHCI_CAN_DO_SDMA)) ++ DBG("Controller doesn't have SDMA capability\n"); + else +- host->flags |= SDHCI_USE_DMA; ++ host->flags |= SDHCI_USE_SDMA; + + if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) && +- (host->flags & SDHCI_USE_DMA)) { ++ (host->flags & SDHCI_USE_SDMA)) { + DBG("Disabling DMA as it is marked broken\n"); +- host->flags &= ~SDHCI_USE_DMA; ++ host->flags &= ~SDHCI_USE_SDMA; + } + +- if (host->flags & SDHCI_USE_DMA) { +- if ((host->version >= SDHCI_SPEC_200) && +- (caps & SDHCI_CAN_DO_ADMA2)) +- host->flags |= SDHCI_USE_ADMA; +- } ++ if ((host->version >= SDHCI_SPEC_200) && (caps & SDHCI_CAN_DO_ADMA2)) ++ host->flags |= SDHCI_USE_ADMA; + + if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && + (host->flags & SDHCI_USE_ADMA)) { +@@ -1702,13 +1825,14 @@ int sdhci_add_host(struct sdhci_host *ho + host->flags &= ~SDHCI_USE_ADMA; + } + +- if (host->flags & SDHCI_USE_DMA) { ++ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { + if (host->ops->enable_dma) { + if (host->ops->enable_dma(host)) { + printk(KERN_WARNING "%s: No suitable DMA " + "available. Falling back to PIO.\n", + mmc_hostname(mmc)); +- host->flags &= ~(SDHCI_USE_DMA | SDHCI_USE_ADMA); ++ host->flags &= ++ ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); + } + } + } +@@ -1736,7 +1860,7 @@ int sdhci_add_host(struct sdhci_host *ho + * mask, but PIO does not need the hw shim so we set a new + * mask here in that case. + */ +- if (!(host->flags & SDHCI_USE_DMA)) { ++ if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) { + host->dma_mask = DMA_BIT_MASK(64); + mmc_dev(host->mmc)->dma_mask = &host->dma_mask; + } +@@ -1757,13 +1881,15 @@ int sdhci_add_host(struct sdhci_host *ho + host->timeout_clk = + (caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT; + if (host->timeout_clk == 0) { +- if (!host->ops->get_timeout_clock) { ++ if (host->ops->get_timeout_clock) { ++ host->timeout_clk = host->ops->get_timeout_clock(host); ++ } else if (!(host->quirks & ++ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { + printk(KERN_ERR + "%s: Hardware doesn't specify timeout clock " + "frequency.\n", mmc_hostname(mmc)); + return -ENODEV; + } +- host->timeout_clk = host->ops->get_timeout_clock(host); + } + if (caps & SDHCI_TIMEOUT_CLK_UNIT) + host->timeout_clk *= 1000; +@@ -1772,7 +1898,8 @@ int sdhci_add_host(struct sdhci_host *ho + * Set host parameters. + */ + mmc->ops = &sdhci_ops; +- if (host->ops->get_min_clock) ++ if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK && ++ host->ops->set_clock && host->ops->get_min_clock) + mmc->f_min = host->ops->get_min_clock(host); + else + mmc->f_min = host->max_clk / 256; +@@ -1810,7 +1937,7 @@ int sdhci_add_host(struct sdhci_host *ho + */ + if (host->flags & SDHCI_USE_ADMA) + mmc->max_hw_segs = 128; +- else if (host->flags & SDHCI_USE_DMA) ++ else if (host->flags & SDHCI_USE_SDMA) + mmc->max_hw_segs = 1; + else /* PIO */ + mmc->max_hw_segs = 128; +@@ -1893,10 +2020,10 @@ int sdhci_add_host(struct sdhci_host *ho + + mmc_add_host(mmc); + +- printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s%s\n", ++ printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s\n", + mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), +- (host->flags & SDHCI_USE_ADMA)?"A":"", +- (host->flags & SDHCI_USE_DMA)?"DMA":"PIO"); ++ (host->flags & SDHCI_USE_ADMA) ? "ADMA" : ++ (host->flags & SDHCI_USE_SDMA) ? "SDMA" : "PIO"); + + sdhci_enable_card_detection(host); + +--- /dev/null ++++ b/drivers/mmc/host/sdhci-cns3xxx.c +@@ -0,0 +1,313 @@ ++/******************************************************************************* ++ * ++ * drivers/mmc/host/sdhci-cns3xxx.c ++ * ++ * SDHCI support for the CNS3XXX SOCs ++ * ++ * Author: Scott Shu ++ * ++ * Copyright (c) 2008 Cavium Networks ++ * ++ * This file is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, Version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or ++ * NONINFRINGEMENT. See the GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this file; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA or ++ * visit http://www.gnu.org/licenses/. ++ * ++ * This file may also be available under a different license from Cavium. ++ * Contact Cavium Networks for more information ++ * ++ ******************************************************************************/ ++ ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include "sdhci.h" ++ ++//#define DEBUG ++ ++#define MAX_BUS_CLK (4) ++ ++static unsigned __initdata use_dma = 0; ++ ++struct sdhci_cns3xxx { ++ struct sdhci_host *host; ++ struct platform_device *pdev; ++ struct resource *ioarea; ++ struct cns3xxx_sdhci_platdata *pdata; ++ struct clk *clk_io; ++ struct clk *clk_bus[MAX_BUS_CLK]; ++}; ++ ++static unsigned int sdhci_cns3xxx_get_max_clk(struct sdhci_host *host) ++{ ++ int clk = 50000000; ++ ++ return clk; ++} ++ ++static unsigned int sdhci_cns3xxx_get_timeout_clk(struct sdhci_host *host) ++{ ++ return sdhci_cns3xxx_get_max_clk(host) / 100000; ++} ++ ++/* ++ * sdhci_cns3xxx_set_clock - callback on clock change ++ * ++ * When the card's clock is going to be changed, look at the new frequency ++ * and find the best clock source to go with it. ++ */ ++static void sdhci_cns3xxx_set_clock(struct sdhci_host *host, unsigned int clock) ++{ ++ u16 clk; ++ unsigned long timeout; ++ ++ if (clock == host->clock) ++ return; ++ ++ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); ++ ++ if (clock == 0) ++ goto out; ++#if 1 ++ clk = 0x03 << SDHCI_DIVIDER_SHIFT; /* base clock divided by 3 */ ++#else ++ /* high speed mode or normal speed mode */ ++ if (0x4 & sdhci_readw(host, 0x28)) { ++ clk = 0x03 << SDHCI_DIVIDER_SHIFT; /* base clock divided by 3 */ ++ } else { ++ clk = 0x02 << SDHCI_DIVIDER_SHIFT; /* base clock divided by 4 */ ++ } ++#endif ++ clk |= SDHCI_CLOCK_INT_EN; ++ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); ++ ++ timeout = 10; ++ while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) ++ & SDHCI_CLOCK_INT_STABLE)) { ++ if (timeout == 0) { ++ return; ++ } ++ timeout--; ++ mdelay(1); ++ } ++ ++ clk |= SDHCI_CLOCK_CARD_EN; ++ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); ++ ++ host->timeout_clk = sdhci_cns3xxx_get_timeout_clk(host); ++out: ++ host->clock = clock; ++} ++ ++static struct sdhci_ops sdhci_cns3xxx_ops = { ++ .get_max_clock = sdhci_cns3xxx_get_max_clk, ++ .get_timeout_clock = sdhci_cns3xxx_get_timeout_clk, ++ .set_clock = sdhci_cns3xxx_set_clock, ++}; ++ ++static int __devinit sdhci_cns3xxx_probe(struct platform_device *pdev) ++{ ++ struct cns3xxx_sdhci_platdata *pdata = pdev->dev.platform_data; ++ struct device *dev = &pdev->dev; ++ struct sdhci_host *host; ++ struct sdhci_cns3xxx *sc; ++ struct resource *res; ++ int ret, irq; ++ ++ if (!pdata) { ++ dev_err(dev, "no device data specified\n"); ++ return -ENOENT; ++ } ++ ++ irq = platform_get_irq(pdev, 0); ++ if (irq < 0) { ++ dev_err(dev, "no irq specified\n"); ++ return irq; ++ } ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res) { ++ dev_err(dev, "no memory specified\n"); ++ return -ENOENT; ++ } ++ ++ host = sdhci_alloc_host(dev, sizeof(*sc)); ++ if (IS_ERR(host)) { ++ dev_err(dev, "sdhci_alloc_host() failed\n"); ++ return PTR_ERR(host); ++ } ++ ++ sc = sdhci_priv(host); ++ ++ sc->host = host; ++ sc->pdev = pdev; ++ sc->pdata = pdata; ++ ++ platform_set_drvdata(pdev, host); ++ ++ sc->ioarea = request_mem_region(res->start, resource_size(res), mmc_hostname(host->mmc)); ++ if (!sc->ioarea) { ++ dev_err(dev, "failed to reserve register area\n"); ++ ret = -ENXIO; ++ goto err_req_regs; ++ } ++ ++ host->ioaddr = ioremap_nocache(res->start, resource_size(res)); ++ if (!host->ioaddr) { ++ dev_err(dev, "failed to map registers\n"); ++ ret = -ENXIO; ++ goto err_req_regs; ++ } ++ ++ host->hw_name = "cns3xxx"; ++ host->ops = &sdhci_cns3xxx_ops; ++ host->quirks = 0; ++ host->irq = irq; ++ ++ if (use_dma != 1) { ++ host->quirks |= SDHCI_QUIRK_BROKEN_DMA; ++ host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK; ++ } else { ++ host->quirks |= SDHCI_QUIRK_FORCE_DMA; ++ host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; ++ host->quirks |= (SDHCI_QUIRK_32BIT_DMA_ADDR | SDHCI_QUIRK_32BIT_DMA_SIZE); ++ host->quirks |= SDHCI_QUIRK_NO_BUSY_IRQ; ++ //host->quirks |= SDHCI_QUIRK_FORCE_BLK_SZ_2048; ++ //host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK; ++ host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK; ++ host->quirks |= SDHCI_QUIRK_AUTO_CMD12; ++ host->quirks |= SDHCI_QUIRK_READ_WAIT_CTRL; ++ } ++ ++ //host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT; ++ ++ host->quirks |= SDHCI_QUIRK_NONSTANDARD_CLOCK; ++ ++ host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; ++ ++ ret = sdhci_add_host(host); ++ if (ret) { ++ dev_err(dev, "sdhci_add_host() failed (%d)\n", ret); ++ goto err_add_host; ++ } ++ ++ return 0; ++ ++err_add_host: ++ free_irq(host->irq, host); ++ iounmap(host->ioaddr); ++ release_resource(sc->ioarea); ++ kfree(sc->ioarea); ++ ++err_req_regs: ++ sdhci_free_host(host); ++ ++ return ret; ++} ++ ++static int __devexit sdhci_cns3xxx_remove(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct sdhci_host *host = dev_get_drvdata(dev); ++ struct resource *res; ++ ++ pr_debug("%s: remove=%p\n", __func__, pdev); ++ ++ sdhci_remove_host(host, 0); ++ sdhci_free_host(host); ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ release_mem_region(res->start, resource_size(res)); ++ ++ return 0; ++} ++ ++#ifdef CONFIG_PM ++ ++static int sdhci_cns3xxx_suspend(struct platform_device *dev, pm_message_t state) ++{ ++ ++ return 0; ++} ++ ++static int sdhci_cns3xxx_resume(struct platform_device *dev) ++{ ++ ++ return 0; ++} ++ ++#else ++#define sdhci_cns3xxx_suspend NULL ++#define sdhci_cns3xxx_resume NULL ++#endif /* CONFIG_PM */ ++ ++static struct platform_driver sdhci_cns3xxx_driver = { ++ .probe = sdhci_cns3xxx_probe, ++ .remove = __devexit_p(sdhci_cns3xxx_remove), ++ .suspend = sdhci_cns3xxx_suspend, ++ .resume = sdhci_cns3xxx_resume, ++ .driver = { ++ .name = "cns3xxx-sdhci", ++ .owner = THIS_MODULE, ++ }, ++}; ++ ++static char banner[] __initdata = KERN_INFO "cns3xxx-sdhci, (c) 2009 Cavium Networks\n"; ++ ++static int __init sdhci_cns3xxx_init(void) ++{ ++#ifdef CONFIG_SILICON ++ unsigned long gpioapin = __raw_readl((void __iomem *)(CNS3XXX_MISC_BASE_VIRT + 0x0014));; ++#else ++ unsigned long status = __raw_readl((void __iomem *)(CNS3XXX_MISC_BASE_VIRT + 0x0700)); ++#endif ++ ++ printk(banner); ++ ++#ifdef CONFIG_SILICON ++ /* MMC/SD pins share with GPIOA */ ++ __raw_writel(gpioapin | (0x1fff0004), (void __iomem *)(CNS3XXX_MISC_BASE_VIRT + 0x0014)); ++ cns3xxx_pwr_clk_en(CNS3XXX_PWR_CLK_EN(SDIO)); ++ cns3xxx_pwr_soft_rst(CNS3XXX_PWR_SOFTWARE_RST(SDIO)); ++#else ++ /* insert a delay on SDIO output interface (only for FPGA mode & high-speed mode) */ ++ __raw_writel(status | (1 << 4), (void __iomem *)(CNS3XXX_MISC_BASE_VIRT + 0x0700)); ++#endif ++ return platform_driver_register(&sdhci_cns3xxx_driver); ++} ++ ++static void __exit sdhci_cns3xxx_exit(void) ++{ ++ platform_driver_unregister(&sdhci_cns3xxx_driver); ++} ++ ++module_init(sdhci_cns3xxx_init); ++module_exit(sdhci_cns3xxx_exit); ++ ++module_param(use_dma, uint, 0); ++ ++MODULE_AUTHOR("Scott Shu"); ++MODULE_DESCRIPTION("Cavium Networks CNS3XXX SDHCI glue"); ++MODULE_LICENSE("GPL"); ++MODULE_ALIAS("platform:cns3xxx-sdhci"); ++ ++MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 0"); +--- a/drivers/mmc/host/sdhci.h ++++ b/drivers/mmc/host/sdhci.h +@@ -8,6 +8,8 @@ + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ ++#ifndef __SDHCI_H ++#define __SDHCI_H + + #include + #include +@@ -78,6 +80,7 @@ + #define SDHCI_POWER_330 0x0E + + #define SDHCI_BLOCK_GAP_CONTROL 0x2A ++#define SDHCI_READ_WAIT_CTRL 0x04 + + #define SDHCI_WAKE_UP_CONTROL 0x2B + +@@ -143,7 +146,7 @@ + #define SDHCI_CAN_DO_ADMA2 0x00080000 + #define SDHCI_CAN_DO_ADMA1 0x00100000 + #define SDHCI_CAN_DO_HISPD 0x00200000 +-#define SDHCI_CAN_DO_DMA 0x00400000 ++#define SDHCI_CAN_DO_SDMA 0x00400000 + #define SDHCI_CAN_VDD_330 0x01000000 + #define SDHCI_CAN_VDD_300 0x02000000 + #define SDHCI_CAN_VDD_180 0x04000000 +@@ -232,6 +235,12 @@ struct sdhci_host { + #define SDHCI_QUIRK_FORCE_1_BIT_DATA (1<<22) + /* Controller needs 10ms delay between applying power and clock */ + #define SDHCI_QUIRK_DELAY_AFTER_POWER (1<<23) ++/* Controller uses SDCLK instead of TMCLK for data timeouts */ ++#define SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK (1<<24) ++/* Controller uses Auto CMD12 */ ++#define SDHCI_QUIRK_AUTO_CMD12 (1<<25) ++/* Controller uses read wait control protocol */ ++#define SDHCI_QUIRK_READ_WAIT_CTRL (1<<26) + + int irq; /* Device IRQ */ + void __iomem * ioaddr; /* Mapped address */ +@@ -250,7 +259,7 @@ struct sdhci_host { + spinlock_t lock; /* Mutex */ + + int flags; /* Host attributes */ +-#define SDHCI_USE_DMA (1<<0) /* Host is DMA capable */ ++#define SDHCI_USE_SDMA (1<<0) /* Host is SDMA capable */ + #define SDHCI_USE_ADMA (1<<1) /* Host is ADMA capable */ + #define SDHCI_REQ_USE_DMA (1<<2) /* Use DMA for this req. */ + #define SDHCI_DEVICE_DEAD (1<<3) /* Device unresponsive */ +@@ -406,3 +415,5 @@ extern void sdhci_remove_host(struct sdh + extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state); + extern int sdhci_resume_host(struct sdhci_host *host); + #endif ++ ++#endif /* __SDHCI_H */ +--- a/include/linux/mmc/card.h ++++ b/include/linux/mmc/card.h +@@ -40,6 +40,8 @@ struct mmc_csd { + }; + + struct mmc_ext_csd { ++ u8 rev; ++ unsigned int sa_timeout; /* Units: 100ns */ + unsigned int hs_max_dtr; + unsigned int sectors; + }; +@@ -62,7 +64,8 @@ struct sdio_cccr { + low_speed:1, + wide_bus:1, + high_power:1, +- high_speed:1; ++ high_speed:1, ++ disable_cd:1; + }; + + struct sdio_cis { +@@ -94,6 +97,8 @@ struct mmc_card { + #define MMC_STATE_READONLY (1<<1) /* card is read-only */ + #define MMC_STATE_HIGHSPEED (1<<2) /* card is in high speed mode */ + #define MMC_STATE_BLOCKADDR (1<<3) /* card uses block-addressing */ ++ unsigned int quirks; /* card quirks */ ++#define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */ + + u32 raw_cid[4]; /* raw card CID */ + u32 raw_csd[4]; /* raw card CSD */ +@@ -129,6 +134,11 @@ struct mmc_card { + #define mmc_card_set_highspeed(c) ((c)->state |= MMC_STATE_HIGHSPEED) + #define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR) + ++static inline int mmc_card_lenient_fn0(const struct mmc_card *c) ++{ ++ return c->quirks & MMC_QUIRK_LENIENT_FN0; ++} ++ + #define mmc_card_name(c) ((c)->cid.prod_name) + #define mmc_card_id(c) (dev_name(&(c)->dev)) + +--- a/include/linux/mmc/core.h ++++ b/include/linux/mmc/core.h +@@ -139,6 +139,7 @@ extern unsigned int mmc_align_data_size( + + extern int __mmc_claim_host(struct mmc_host *host, atomic_t *abort); + extern void mmc_release_host(struct mmc_host *host); ++extern int mmc_try_claim_host(struct mmc_host *host); + + /** + * mmc_claim_host - exclusively claim a host +--- a/include/linux/mmc/host.h ++++ b/include/linux/mmc/host.h +@@ -11,6 +11,7 @@ + #define LINUX_MMC_HOST_H + + #include ++#include + + #include + +@@ -51,6 +52,35 @@ struct mmc_ios { + }; + + struct mmc_host_ops { ++ /* ++ * Hosts that support power saving can use the 'enable' and 'disable' ++ * methods to exit and enter power saving states. 'enable' is called ++ * when the host is claimed and 'disable' is called (or scheduled with ++ * a delay) when the host is released. The 'disable' is scheduled if ++ * the disable delay set by 'mmc_set_disable_delay()' is non-zero, ++ * otherwise 'disable' is called immediately. 'disable' may be ++ * scheduled repeatedly, to permit ever greater power saving at the ++ * expense of ever greater latency to re-enable. Rescheduling is ++ * determined by the return value of the 'disable' method. A positive ++ * value gives the delay in milliseconds. ++ * ++ * In the case where a host function (like set_ios) may be called ++ * with or without the host claimed, enabling and disabling can be ++ * done directly and will nest correctly. Call 'mmc_host_enable()' and ++ * 'mmc_host_lazy_disable()' for this purpose, but note that these ++ * functions must be paired. ++ * ++ * Alternatively, 'mmc_host_enable()' may be paired with ++ * 'mmc_host_disable()' which calls 'disable' immediately. In this ++ * case the 'disable' method will be called with 'lazy' set to 0. ++ * This is mainly useful for error paths. ++ * ++ * Because lazy disable may be called from a work queue, the 'disable' ++ * method must claim the host when 'lazy' != 0, which will work ++ * correctly because recursion is detected and handled. ++ */ ++ int (*enable)(struct mmc_host *host); ++ int (*disable)(struct mmc_host *host, int lazy); + void (*request)(struct mmc_host *host, struct mmc_request *req); + /* + * Avoid calling these three functions too often or in a "fast path", +@@ -118,6 +148,9 @@ struct mmc_host { + #define MMC_CAP_SPI (1 << 4) /* Talks only SPI protocols */ + #define MMC_CAP_NEEDS_POLL (1 << 5) /* Needs polling for card-detection */ + #define MMC_CAP_8_BIT_DATA (1 << 6) /* Can the host do 8 bit transfers */ ++#define MMC_CAP_DISABLE (1 << 7) /* Can the host be disabled */ ++#define MMC_CAP_NONREMOVABLE (1 << 8) /* Nonremovable e.g. eMMC */ ++#define MMC_CAP_WAIT_WHILE_BUSY (1 << 9) /* Waits while card is busy */ + + /* host specific block data */ + unsigned int max_seg_size; /* see blk_queue_max_segment_size */ +@@ -142,9 +175,18 @@ struct mmc_host { + unsigned int removed:1; /* host is being removed */ + #endif + ++ /* Only used with MMC_CAP_DISABLE */ ++ int enabled; /* host is enabled */ ++ int nesting_cnt; /* "enable" nesting count */ ++ int en_dis_recurs; /* detect recursion */ ++ unsigned int disable_delay; /* disable delay in msecs */ ++ struct delayed_work disable; /* disabling work */ ++ + struct mmc_card *card; /* device attached to this host */ + + wait_queue_head_t wq; ++ struct task_struct *claimer; /* task that has host claimed */ ++ int claim_cnt; /* "claim" nesting count */ + + struct delayed_work detect; + +@@ -183,6 +225,9 @@ static inline void *mmc_priv(struct mmc_ + extern int mmc_suspend_host(struct mmc_host *, pm_message_t); + extern int mmc_resume_host(struct mmc_host *); + ++extern void mmc_power_save_host(struct mmc_host *host); ++extern void mmc_power_restore_host(struct mmc_host *host); ++ + extern void mmc_detect_change(struct mmc_host *, unsigned long delay); + extern void mmc_request_done(struct mmc_host *, struct mmc_request *); + +@@ -197,5 +242,19 @@ struct regulator; + int mmc_regulator_get_ocrmask(struct regulator *supply); + int mmc_regulator_set_ocr(struct regulator *supply, unsigned short vdd_bit); + ++int mmc_card_awake(struct mmc_host *host); ++int mmc_card_sleep(struct mmc_host *host); ++int mmc_card_can_sleep(struct mmc_host *host); ++ ++int mmc_host_enable(struct mmc_host *host); ++int mmc_host_disable(struct mmc_host *host); ++int mmc_host_lazy_disable(struct mmc_host *host); ++ ++static inline void mmc_set_disable_delay(struct mmc_host *host, ++ unsigned int disable_delay) ++{ ++ host->disable_delay = disable_delay; ++} ++ + #endif + +--- a/include/linux/mmc/mmc.h ++++ b/include/linux/mmc/mmc.h +@@ -31,6 +31,7 @@ + #define MMC_ALL_SEND_CID 2 /* bcr R2 */ + #define MMC_SET_RELATIVE_ADDR 3 /* ac [31:16] RCA R1 */ + #define MMC_SET_DSR 4 /* bc [31:16] RCA */ ++#define MMC_SLEEP_AWAKE 5 /* ac [31:16] RCA 15:flg R1b */ + #define MMC_SWITCH 6 /* ac [31:0] See below R1b */ + #define MMC_SELECT_CARD 7 /* ac [31:16] RCA R1 */ + #define MMC_SEND_EXT_CSD 8 /* adtc R1 */ +@@ -127,6 +128,7 @@ + #define R1_STATUS(x) (x & 0xFFFFE000) + #define R1_CURRENT_STATE(x) ((x & 0x00001E00) >> 9) /* sx, b (4 bits) */ + #define R1_READY_FOR_DATA (1 << 8) /* sx, a */ ++#define R1_SWITCH_ERROR (1 << 7) /* sx, c */ + #define R1_APP_CMD (1 << 5) /* sr, c */ + + /* +@@ -254,6 +256,7 @@ struct _mmc_csd { + #define EXT_CSD_CARD_TYPE 196 /* RO */ + #define EXT_CSD_REV 192 /* RO */ + #define EXT_CSD_SEC_CNT 212 /* RO, 4 bytes */ ++#define EXT_CSD_S_A_TIMEOUT 217 + + /* + * EXT_CSD field definitions +--- a/include/linux/mmc/sdio_func.h ++++ b/include/linux/mmc/sdio_func.h +@@ -67,6 +67,7 @@ struct sdio_func { + + #define sdio_get_drvdata(f) dev_get_drvdata(&(f)->dev) + #define sdio_set_drvdata(f,d) dev_set_drvdata(&(f)->dev, d) ++#define dev_to_sdio_func(d) container_of(d, struct sdio_func, dev) + + /* + * SDIO function device driver +@@ -81,6 +82,8 @@ struct sdio_driver { + struct device_driver drv; + }; + ++#define to_sdio_driver(d) container_of(d, struct sdio_driver, drv) ++ + /** + * SDIO_DEVICE - macro used to describe a specific SDIO device + * @vend: the 16 bit manufacturer code +--- a/include/linux/mmc/sdio_ids.h ++++ b/include/linux/mmc/sdio_ids.h +@@ -22,6 +22,12 @@ + /* + * Vendors and devices. Sort key: vendor first, device next. + */ ++#define SDIO_VENDOR_ID_INTEL 0x0089 ++#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX 0x1402 ++#define SDIO_DEVICE_ID_INTEL_IWMC3200WIFI 0x1403 ++#define SDIO_DEVICE_ID_INTEL_IWMC3200TOP 0x1404 ++#define SDIO_DEVICE_ID_INTEL_IWMC3200GPS 0x1405 ++#define SDIO_DEVICE_ID_INTEL_IWMC3200BT 0x1406 + + #define SDIO_VENDOR_ID_MARVELL 0x02df + #define SDIO_DEVICE_ID_MARVELL_LIBERTAS 0x9103 diff --git a/target/linux/cns3xxx/patches-2.6.31/205-cns3xxx_net_device_support.patch b/target/linux/cns3xxx/patches-2.6.31/205-cns3xxx_net_device_support.patch new file mode 100644 index 0000000000..945edec6be --- /dev/null +++ b/target/linux/cns3xxx/patches-2.6.31/205-cns3xxx_net_device_support.patch @@ -0,0 +1,11802 @@ +--- /dev/null ++++ b/drivers/net/cns3xxx/cns3xxx_config.h +@@ -0,0 +1,136 @@ ++/******************************************************************************* ++ * ++ * Copyright (c) 2009 Cavium Networks ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the Free ++ * Software Foundation; either version 2 of the License, or (at your option) ++ * any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ * more details. ++ * ++ * You should have received a copy of the GNU General Public License along with ++ * this program; if not, write to the Free Software Foundation, Inc., 59 ++ * Temple Place - Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * The full GNU General Public License is included in this distribution in the ++ * file called LICENSE. ++ * ++ ********************************************************************************/ ++ ++#include ++ ++#ifndef CNS3XXX_CONFIG_H ++#define CNS3XXX_CONFIG_H ++ ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27) ++#define LINUX2627 1 ++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31) ++#define LINUX2631 1 ++#endif ++ ++ ++#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) ++#define CNS3XXX_VLAN_8021Q ++#endif ++ ++#ifdef CNS3XXX_VLAN_8021Q ++//#define CNS3XXX_NIC_MODE_8021Q // use NIC mode to support 8021Q ++ ++#endif ++ ++#define CONFIG_CNS3XXX_JUMBO_FRAME ++ ++#ifdef CONFIG_CNS3XXX_JUMBO_FRAME ++#define MAX_PACKET_LEN 9600 ++#else ++#define MAX_PACKET_LEN 1536 ++#endif ++ ++//#define CONFIG_SWITCH_BIG_ENDIAN ++ ++//#define CONFIG_FPGA_FORCE ++ ++//#define CNS3XXX_GIGA_MODE ++ ++#define CNS3XXX_SET_ARL_TABLE ++#define CNS3XXX_AGE_ENABLE ++#define CNS3XXX_LEARN_ENABLE ++#define CNS3XXX_CPU_PORT_FC ++#define CNS3XXX_CPU_MIB_COUNTER ++#define CNS3XXX_MAC0_MIB_COUNTER ++#define CNS3XXX_MAC1_MIB_COUNTER ++//#define CNS3XXX_MAC2_MIB_COUNTER ++//#define QOS_TEST ++//#define ACCEPT_CRC_BAD_PKT ++//#define CONFIG_FAST_BRIDGE ++//#define CONFIG_HOLP_TEST ++ ++ ++#define CONFIG_CNS3XXX_NAPI ++#ifdef CONFIG_CNS3XXX_NAPI ++#define CNS3XXX_NAPI_WEIGHT 64 ++#endif ++//#define CONFIG_NIC_MODE ++//#define CNS3XXX_TX_HW_CHECKSUM ++//#define CNS3XXX_RX_HW_CHECKSUM ++//#define CNS3XXX_STATUS_ISR ++//#define CNS3XXX_TEST_ONE_LEG_VLAN ++//#define CNS3XXX_TX_DSCP_PROC ++ ++ ++#define CNS3XXX_FSQF_RING0_ISR ++//#define CNS3XXX_TSTC_RING0_ISR ++//#define CNS3XXX_TSTC_RING1_ISR ++ ++//#define CNS3XXX_COMPARE_PACKET ++//#define CONFIG_FPGA_10 ++//#define CNS3XXX_CONFIG_SIM_MODE ++ ++#define CNS3XXX_8021Q_HW_TX ++ ++ ++#ifndef CONFIG_CNS3XXX_SPPE ++#define IVL // if no define, use SVL ++#endif ++//#define CNS3XXX_4N // if don't define it, use 4N+2 ++ ++//#define NCNB_TEST ++//#define CNS3XXX_TEST_D_CACHE ++#define CNS3XXX_FREE_TX_IN_RX_PATH ++ ++ ++//#define DEBUG_RX ++//#define DEBUG_TX ++//#define DEBUG_PRIO_IPDSCR ++#define DEBUG_RX_PROC ++#define DEBUG_TX_PROC ++//#define DEBUG_PHY_PROC ++#define CNS3XXX_PVID_PROC ++#define CNS3XXX_SARL_PROC ++ ++ ++//#define DOUBLE_RING_TEST ++ ++//#define CNS3XXX_DOUBLE_RX_RING ++//#define CNS3XXX_DOUBLE_TX_RING ++#define CNS3XXX_USE_MASK ++ ++#define CNS3XXX_CONFIG_CHANGE_TX_RING ++ ++#ifdef CNS3XXX_DOUBLE_RX_RING ++#define CNS3XXX_FSQF_RING1_ISR ++#endif ++ ++//#define CNS3XXX_DELAYED_INTERRUPT ++ ++#ifdef CNS3XXX_DELAYED_INTERRUPT ++#define MAX_PEND_INT_CNT 0x06 ++#define MAX_PEND_TIME 0x20 ++#endif ++ ++//#define CNS3XXX_ENABLE_RINT1 ++#endif +--- /dev/null ++++ b/drivers/net/cns3xxx/cns3xxx_ethtool.c +@@ -0,0 +1,436 @@ ++/******************************************************************************* ++ * ++ * ++ * Copyright (c) 2009 Cavium Networks ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the Free ++ * Software Foundation; either version 2 of the License, or (at your option) ++ * any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ * more details. ++ * ++ * You should have received a copy of the GNU General Public License along with ++ * this program; if not, write to the Free Software Foundation, Inc., 59 ++ * Temple Place - Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * The full GNU General Public License is included in this distribution in the ++ * file called LICENSE. ++ * ++ ********************************************************************************/ ++ ++//#include ++#include ++#include ++#include ++#include "cns3xxx_symbol.h" ++#include "cns3xxx.h" ++#include "cns3xxx_tool.h" ++ ++// ethtool support reference e100.c and e1000_ethtool.c . ++static void cns3xxx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) ++{ ++ strcpy(info->driver, "cns3xxx"); ++ strcpy(info->version, DRV_VERSION); ++ strcpy(info->fw_version, "N/A"); ++ strcpy(info->bus_info, "N/A"); ++} ++ ++static void cns3xxx_get_ringparam(struct net_device *netdev, ++ struct ethtool_ringparam *ring) ++{ ++ CNS3XXXPrivate *priv = netdev_priv(netdev); ++ ++ ring->rx_max_pending = priv->rx_ring->max_ring_size; ++ ring->tx_max_pending = priv->tx_ring->max_ring_size; ++ ring->rx_pending = priv->rx_ring->ring_size; ++ ring->tx_pending = priv->tx_ring->ring_size; ++#if 0 ++ struct nic *nic = netdev_priv(netdev); ++ struct param_range *rfds = &nic->params.rfds; ++ struct param_range *cbs = &nic->params.cbs; ++ ++ ring->rx_max_pending = rfds->max; ++ ring->tx_max_pending = cbs->max; ++ ring->rx_mini_max_pending = 0; ++ ring->rx_jumbo_max_pending = 0; ++ ring->rx_pending = rfds->count; ++ ring->tx_pending = cbs->count; ++ ring->rx_mini_pending = 0; ++ ring->rx_jumbo_pending = 0; ++#endif ++} ++ ++ ++ ++static int cns3xxx_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) ++{ ++ int cns3xxx_up(void); ++ int cns3xxx_down(void); ++ int cns3xxx_close(struct net_device *dev); ++ int cns3xxx_open(struct net_device *dev); ++ extern struct net_device *net_dev_array[]; ++ ++ CNS3XXXPrivate *priv = netdev_priv(netdev); ++ ++ int i=0; ++ ++#if 0 ++ struct nic *nic = netdev_priv(netdev); ++ struct param_range *rfds = &nic->params.rfds; ++ struct param_range *cbs = &nic->params.cbs; ++ ++ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) ++ return -EINVAL; ++ ++ if(netif_running(netdev)) ++ e100_down(nic); ++ rfds->count = max(ring->rx_pending, rfds->min); ++ rfds->count = min(rfds->count, rfds->max); ++ cbs->count = max(ring->tx_pending, cbs->min); ++ cbs->count = min(cbs->count, cbs->max); ++ DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n", ++ rfds->count, cbs->count); ++ if(netif_running(netdev)) ++ e100_up(nic); ++ ++#endif ++ //ring->rx_max_pending = RX_DESC_SIZE; ++ //ring->tx_max_pending = TX_DESC_SIZE; ++ ++#if 0 ++ printk("ring->rx_max_pending: %d\n", ring->rx_max_pending); ++ printk("ring->tx_max_pending: %d\n", ring->tx_max_pending); ++ printk("ring->rx_pending: %d\n", ring->rx_pending); ++ printk("ring->tx_pending: %d\n", ring->tx_pending); ++#endif ++ ++ for (i=0 ; i < NETDEV_SIZE ; ++i) { ++ if(net_dev_array[i] && netif_running(net_dev_array[i])) { ++ //printk("close net_dev_array[%d]: %s\n", i, net_dev_array[i]); ++ cns3xxx_close(net_dev_array[i]); ++ } ++ } ++ ++ //cns3xxx_down(); ++ ++ priv->rx_ring->ring_size = min(ring->rx_pending, priv->rx_ring->max_ring_size); ++ priv->tx_ring->ring_size = min(ring->rx_pending, priv->tx_ring->max_ring_size); ++ ++ for (i=0 ; i < NETDEV_SIZE ; ++i) { ++ if(net_dev_array[i] && netif_running(net_dev_array[i])) { ++ //printk("open net_dev_array[%d]: %s\n", i, net_dev_array[i]); ++ cns3xxx_open(net_dev_array[i]); ++ } ++ } ++ //cns3xxx_up(); ++ ++ return 0; ++} ++ ++static uint32_t cns3xxx_get_tx_csum(struct net_device *netdev) ++{ ++ //return (netdev->features & NETIF_F_HW_CSUM) != 0; ++ return (netdev->features & NETIF_F_IP_CSUM) != 0; ++} ++ ++static int cns3xxx_set_tx_csum(struct net_device *netdev, uint32_t data) ++{ ++ if (data) ++ netdev->features |= NETIF_F_IP_CSUM; ++ else ++ netdev->features &= ~NETIF_F_IP_CSUM; ++ return 0; ++} ++ ++static uint32_t cns3xxx_get_rx_csum(struct net_device *netdev) ++{ ++ //struct e1000_adapter *adapter = netdev_priv(netdev); ++ //return adapter->rx_csum; ++ return 1; ++} ++ ++static int cns3xxx_set_rx_csum(struct net_device *netdev, uint32_t data) ++{ ++ return 0; ++} ++ ++u32 cns3xxx_get_sg(struct net_device *dev) ++{ ++#ifdef NETIF_F_SG ++ return (dev->features & NETIF_F_SG) != 0; ++#else ++ return 0; ++#endif ++} ++ ++int cns3xxx_set_sg(struct net_device *dev, u32 data) ++{ ++#ifdef NETIF_F_SG ++ if (data) ++ dev->features |= NETIF_F_SG; ++ else ++ dev->features &= ~NETIF_F_SG; ++#endif ++ ++ return 0; ++} ++ ++static void cns3xxx_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) ++{ ++ u32 mac_port_config = 0; ++ CNS3XXXPrivate *priv = netdev_priv(netdev); ++ ++ switch (priv->net_device_priv->which_port) ++ { ++ case MAC_PORT0: ++ { ++ mac_port_config = MAC0_CFG_REG; ++ break; ++ } ++ case MAC_PORT1: ++ { ++ mac_port_config = MAC1_CFG_REG; ++ break; ++ } ++ case MAC_PORT2: ++ { ++ mac_port_config = MAC2_CFG_REG; ++ break; ++ } ++ } ++ ++ ++ pause->autoneg = ( ((mac_port_config >> 7) & 1) ? AUTONEG_ENABLE : AUTONEG_DISABLE); ++ pause->tx_pause = (mac_port_config >> 6) & 1; ++ pause->rx_pause = (mac_port_config >> 5) & 1; ++} ++ ++static int cns3xxx_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) ++{ ++ u32 mac_port_config = 0; ++ CNS3XXXPrivate *priv = netdev_priv(netdev); ++ ++ switch (priv->net_device_priv->which_port) ++ { ++ case MAC_PORT0: ++ { ++ mac_port_config = MAC0_CFG_REG; ++ break; ++ } ++ case MAC_PORT1: ++ { ++ mac_port_config = MAC1_CFG_REG; ++ break; ++ } ++ case MAC_PORT2: ++ { ++ mac_port_config = MAC2_CFG_REG; ++ break; ++ } ++ } ++ ++ ++ mac_port_config &= ~(0x1 << 7); // clean AN ++ mac_port_config &= ~(0x1 << 11); // clean rx flow control ++ mac_port_config &= ~(0x1 << 12); // clean tx flow control ++ ++ mac_port_config |= ( (pause->autoneg << 7) | (pause->rx_pause << 11) | (pause->tx_pause << 12) ); ++ ++ ++ switch (priv->net_device_priv->which_port) ++ { ++ case MAC_PORT0: ++ { ++ MAC0_CFG_REG = mac_port_config; ++ break; ++ } ++ case MAC_PORT1: ++ { ++ MAC1_CFG_REG = mac_port_config; ++ break; ++ } ++ case MAC_PORT2: ++ { ++ MAC2_CFG_REG = mac_port_config; ++ break; ++ } ++ } ++ return 0; ++} ++ ++u32 cns3xxx_get_link(struct net_device *netdev) ++{ ++ u32 mac_port_config = 0; ++ CNS3XXXPrivate *priv = netdev_priv(netdev); ++ ++ switch (priv->net_device_priv->which_port) ++ { ++ case MAC_PORT0: ++ { ++ mac_port_config = MAC0_CFG_REG; ++ break; ++ } ++ case MAC_PORT1: ++ { ++ mac_port_config = MAC1_CFG_REG; ++ break; ++ } ++ case MAC_PORT2: ++ { ++ mac_port_config = MAC2_CFG_REG; ++ break; ++ } ++ } ++ ++ return (mac_port_config & 1 ) ? 1 : 0; ++ //return netif_carrier_ok(dev) ? 1 : 0; ++} ++ ++ ++static int cns3xxx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) ++{ ++ u8 value; ++ u32 mac_port_config = 0; ++ CNS3XXXPrivate *priv = netdev_priv(netdev); ++ ++ ++ if (priv->net_device_priv->nic_setting == 0) { // connect to switch chip ++ ++ GET_MAC_PORT_CFG(priv->net_device_priv->which_port, mac_port_config) ++ ++ ecmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full| SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_Pause); ++ ++ ecmd->duplex = ((mac_port_config >> 4) & 0x1) ? DUPLEX_FULL : DUPLEX_HALF ; ++ ++ value = ((mac_port_config >> 2) & 0x3); ++ switch (value) ++ { ++ case 0: ++ ecmd->speed = SPEED_10; ++ break; ++ case 1: ++ ecmd->speed = SPEED_100; ++ break; ++ case 2: ++ ecmd->speed = SPEED_1000; ++ break; ++ } ++ ++ ecmd->autoneg = ((mac_port_config >> 7) & 1) ? AUTONEG_ENABLE : AUTONEG_DISABLE; ++ ++ ++ ++ } else { // connect to PHY chip ++ ++ } ++ ++ return 0; ++} ++ ++// set speed and duplex ++int cns3xxx_set_spd_dplx(struct net_device *netdev, u16 spddplx) ++{ ++ u32 mac_port_config = 0; ++ CNS3XXXPrivate *priv = netdev_priv(netdev); ++ ++ GET_MAC_PORT_CFG(priv->net_device_priv->which_port, mac_port_config) ++ ++ //printk("mac_port_config: %x\n", mac_port_config); ++ ++ mac_port_config &= ~(0x3 << 8); // clear speed ++ mac_port_config &= ~(0x1 << 10); // clear duplex ++ mac_port_config &= ~(0x1 << 7); // disable AN ++ ++ switch (spddplx) { ++ case AUTONEG_ENABLE: ++ mac_port_config |= (0x1 << 7); // enable AN ++ break; ++ case SPEED_10 + DUPLEX_HALF: ++ printk("10, halt\n"); ++ mac_port_config |= (0 << 8); // set speed ++ mac_port_config |= (0 << 10); // set duplex ++ //printk("xxx mac_port_config: %x\n", mac_port_config); ++ break; ++ case SPEED_10 + DUPLEX_FULL: ++ mac_port_config |= (0 << 8); // set speed ++ mac_port_config |= (1 << 10); // set duplex ++ break; ++ case SPEED_100 + DUPLEX_HALF: ++ mac_port_config |= (1 << 8); // set speed ++ mac_port_config |= (0 << 10); // set duplex ++ break; ++ case SPEED_100 + DUPLEX_FULL: ++ mac_port_config |= (1 << 8); // set speed ++ mac_port_config |= (1 << 10); // set duplex ++ break; ++ case SPEED_1000 + DUPLEX_HALF: ++ mac_port_config |= (2 << 8); // set speed ++ mac_port_config |= (0 << 10); // set duplex ++ break; ++ case SPEED_1000 + DUPLEX_FULL: ++ mac_port_config |= (2 << 8); // set speed ++ mac_port_config |= (1 << 10); // set duplex ++ break; ++ default: ++ //printk("Unsupported Speed/Duplex configuration\n"); ++ return -EINVAL; ++ } ++ ++ SET_MAC_PORT_CFG(priv->net_device_priv->which_port, mac_port_config) ++ ++ return 0; ++} ++ ++static int cns3xxx_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) ++{ ++ u8 value = 0; ++ CNS3XXXPrivate *priv = netdev_priv(netdev); ++ ++ if (priv->net_device_priv->nic_setting == 0) { // connect to switch chip ++ if (ecmd->autoneg == AUTONEG_ENABLE) { ++ printk("autoneg\n"); ++ if ((value=cns3xxx_set_spd_dplx(netdev, AUTONEG_ENABLE)) != 0) { ++ return -EINVAL; ++ } ++ } else { ++ printk("no autoneg\n"); ++ if ((value=cns3xxx_set_spd_dplx(netdev, ecmd->speed + ecmd->duplex)) != 0) { ++ return -EINVAL; ++ } ++ ++ ++ } ++ ++ } else { // connect to PHY chip ++ ++ } ++ ++ // down then up ++ return 0; ++} ++ ++static const struct ethtool_ops cns3xxx_ethtool_ops = { ++ .get_drvinfo = cns3xxx_get_drvinfo, ++ .get_ringparam = cns3xxx_get_ringparam, ++ .set_ringparam = cns3xxx_set_ringparam, ++ .get_rx_csum = cns3xxx_get_rx_csum, ++ .set_rx_csum = cns3xxx_set_rx_csum, ++ .get_tx_csum = cns3xxx_get_tx_csum, ++ .set_tx_csum = cns3xxx_set_tx_csum, ++ .get_sg = cns3xxx_get_sg, ++ .set_sg = cns3xxx_set_sg, ++ .get_pauseparam = cns3xxx_get_pauseparam, ++ .set_pauseparam = cns3xxx_set_pauseparam, ++ .get_link = cns3xxx_get_link, ++ .get_settings = cns3xxx_get_settings, ++ .set_settings = cns3xxx_set_settings, ++}; ++ ++void cns3xxx_set_ethtool_ops(struct net_device *netdev) ++{ ++ SET_ETHTOOL_OPS(netdev, &cns3xxx_ethtool_ops); ++} +--- /dev/null ++++ b/drivers/net/cns3xxx/cns3xxx.h +@@ -0,0 +1,452 @@ ++/******************************************************************************* ++ * ++ * Copyright (c) 2009 Cavium Networks ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the Free ++ * Software Foundation; either version 2 of the License, or (at your option) ++ * any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ * more details. ++ * ++ * You should have received a copy of the GNU General Public License along with ++ * this program; if not, write to the Free Software Foundation, Inc., 59 ++ * Temple Place - Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * The full GNU General Public License is included in this distribution in the ++ * file called LICENSE. ++ * ++ ********************************************************************************/ ++ ++#ifndef CNS3XXX_H ++#define CNS3XXX_H ++ ++#include "cns3xxx_symbol.h" ++#include "cns3xxx_config.h" ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++//#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++ ++#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) ++#include ++#endif ++ ++//#define VERSION "1.0" ++ ++ ++typedef struct ++{ ++ int32_t sdp; // segment data pointer ++ ++#ifdef CONFIG_SWITCH_BIG_ENDIAN ++ u32 cown:1; ++ u32 eor:1; ++ u32 fsd:1; ++ u32 lsd:1; ++ u32 interrupt:1; ++ u32 fr:1; ++ u32 fp:1; // force priority ++ u32 pri:3; ++ u32 rsv_1:3; // reserve ++ u32 ico:1; ++ u32 uco:1; ++ u32 tco:1; ++ u32 sdl:16; // segment data length ++ ++#else ++ u32 sdl:16; // segment data length ++ u32 tco:1; ++ u32 uco:1; ++ u32 ico:1; ++ u32 rsv_1:3; // reserve ++ u32 pri:3; ++ u32 fp:1; // force priority ++ u32 fr:1; ++ u32 interrupt:1; ++ u32 lsd:1; ++ u32 fsd:1; ++ u32 eor:1; ++ u32 cown:1; ++#endif ++ ++#ifdef CONFIG_SWITCH_BIG_ENDIAN ++ u32 rsv_3:5; ++ u32 fewan:1; ++ u32 ewan:1; ++ u32 mark:3; ++ u32 pmap:5; ++ u32 rsv_2:9; ++ u32 dels:1; ++ u32 inss:1; ++ u32 sid:4; ++ u32 stv:1; ++ u32 ctv:1; ++#else ++ u32 ctv:1; ++ u32 stv:1; ++ u32 sid:4; ++ u32 inss:1; ++ u32 dels:1; ++ u32 rsv_2:9; ++ u32 pmap:5; ++ u32 mark:3; ++ u32 ewan:1; ++ u32 fewan:1; ++ u32 rsv_3:5; ++#endif ++ ++#ifdef CONFIG_SWITCH_BIG_ENDIAN ++ u32 s_pri:3; ++ u32 s_dei:1; ++ u32 s_vid:12; ++ u32 c_pri:3; ++ u32 c_cfs:1; ++ u32 c_vid:12; ++#else ++ u32 c_vid:12; ++ u32 c_cfs:1; ++ u32 c_pri:3; ++ u32 s_vid:12; ++ u32 s_dei:1; ++ u32 s_pri:3; ++#endif ++ ++ u8 alignment[16]; // for alignment 32 byte ++ ++} __attribute__((packed)) TXDesc; ++ ++typedef struct ++{ ++ u32 sdp; ++ ++#ifdef CONFIG_SWITCH_BIG_ENDIAN ++ u32 cown:1; ++ u32 eor:1; ++ u32 fsd:1; ++ u32 lsd:1; ++ u32 hr :6; ++ u32 prot:4; ++ u32 ipf:1; ++ u32 l4f:1; ++ u32 sdl:16; ++#else ++ u32 sdl:16; ++ u32 l4f:1; ++ u32 ipf:1; ++ u32 prot:4; ++ u32 hr :6; ++ u32 lsd:1; ++ u32 fsd:1; ++ u32 eor:1; ++ u32 cown:1; ++#endif ++ ++#ifdef CONFIG_SWITCH_BIG_ENDIAN ++ u32 rsv_3:11; ++ u32 ip_offset:5; ++ u32 rsv_2:1; ++ u32 tc:2; ++ u32 un_eth:1; ++ u32 crc_err:1; ++ u32 sp:3; ++ u32 rsv_1:2; ++ u32 e_wan:1; ++ u32 exdv:1; ++ u32 iwan:1; ++ u32 unv:1; ++ u32 stv:1; ++ u32 ctv:1; ++#else ++ u32 ctv:1; ++ u32 stv:1; ++ u32 unv:1; ++ u32 iwan:1; ++ u32 exdv:1; ++ u32 e_wan:1; ++ u32 rsv_1:2; ++ u32 sp:3; ++ u32 crc_err:1; ++ u32 un_eth:1; ++ u32 tc:2; ++ u32 rsv_2:1; ++ u32 ip_offset:5; ++ u32 rsv_3:11; ++#endif ++ ++#ifdef CONFIG_SWITCH_BIG_ENDIAN ++ u32 s_pri:3; ++ u32 s_dei:1; ++ u32 s_vid:12; ++ u32 c_pri:3; ++ u32 c_cfs:1; ++ u32 c_vid:12; ++#else ++ u32 c_vid:12; ++ u32 c_cfs:1; ++ u32 c_pri:3; ++ u32 s_vid:12; ++ u32 s_dei:1; ++ u32 s_pri:3; ++#endif ++ ++ u8 alignment[16]; // for alignment 32 byte ++ ++} __attribute__((packed)) RXDesc; ++ ++typedef struct { ++ TXDesc *tx_desc; ++ struct sk_buff *skb; // for free skb ++ u32 pri; ++ unsigned long j; ++ unsigned long tx_index; ++}TXBuffer; ++ ++typedef struct { ++ RXDesc *rx_desc; ++ struct sk_buff *skb; // rx path need to fill some skb field, ex: length ... ++#ifdef NCNB_TEST ++ u32 ncnb_index; ++#endif ++}RXBuffer; ++ ++ ++typedef struct { ++ TXBuffer *head; ++ TXDesc *tx_desc_head_vir_addr; ++ dma_addr_t tx_desc_head_phy_addr; ++ u32 cur_index; // for put send packet ++ spinlock_t tx_lock; ++ u32 non_free_tx_skb; ++ u32 free_tx_skb_index; ++ u32 ring_size; ++ u32 max_ring_size; ++}TXRing; ++ ++ ++typedef struct { ++ RXBuffer *head; ++ RXDesc *rx_desc_head_vir_addr; ++ dma_addr_t rx_desc_head_phy_addr; ++ u32 cur_index; ++ u32 ring_size; ++ u32 max_ring_size; ++}RXRing; ++ ++#if 0 ++typedef struct ++{ ++ CNS3XXXIoctlCmd cmd; ++ TXRing *tx_ring; ++ RXRing *rx_ring; ++}CNS3XXXRingStatus; ++#endif ++ ++ ++#define RX_RING0(priv) (priv->rx_ring[0]) ++#define TX_RING0(priv) (priv->tx_ring[0]) ++ ++ ++static inline u32 get_rx_ring_size(const RXRing *ring) ++{ ++ //printk("rx ring->ring_size: %d\n", ring->ring_size); ++ return ring->ring_size; ++} ++ ++static inline u32 get_tx_ring_size(TXRing *ring) ++{ ++ //printk("tx ring->ring_size: %d\n", ring->ring_size); ++ return ring->ring_size; ++} ++ ++static inline RXBuffer *get_rx_ring_head(const RXRing *rx_ring) ++{ ++ return rx_ring->head; ++} ++ ++static inline TXBuffer *get_tx_ring_head(TXRing *tx_ring) ++{ ++ return tx_ring->head; ++} ++ ++static inline RXBuffer *get_cur_rx_buffer(RXRing *rx_ring) ++{ ++ return rx_ring->head + rx_ring->cur_index; ++} ++ ++static inline TXBuffer *get_cur_tx_buffer(TXRing *tx_ring) ++{ ++ return tx_ring->head + tx_ring->cur_index; ++} ++ ++static inline u32 get_rx_head_phy_addr(RXRing *rx_ring) ++{ ++ return rx_ring->rx_desc_head_phy_addr; ++} ++ ++static inline u32 get_tx_ring_head_phy_addr(TXRing *tx_ring) ++{ ++ return tx_ring->tx_desc_head_phy_addr; ++} ++ ++ ++static inline u32 get_rx_cur_index(RXRing *rx_ring) ++{ ++ return rx_ring->cur_index; ++} ++ ++static inline u32 get_tx_cur_index(TXRing *tx_ring) ++{ ++ return tx_ring->cur_index; ++} ++ ++static inline u32 get_tx_cur_phy_addr(u8 ring_num) ++{ ++ if (ring_num == 0) ++ return TS_DESC_PTR0_REG; ++ if (ring_num == 1) ++ return TS_DESC_PTR1_REG; ++ return 0; // fail ++} ++ ++static inline void rx_index_next(RXRing *ring) ++{ ++ ring->cur_index = ((ring->cur_index + 1) % ring->ring_size); ++} ++static inline void tx_index_next(TXRing *ring) ++{ ++ ring->cur_index = ((ring->cur_index + 1) % ring->ring_size); ++} ++ ++ ++ ++struct CNS3XXXPrivate_; ++ ++typedef int (*RXFuncPtr)(struct sk_buff *skb, RXDesc*tx_desc_ptr, const struct CNS3XXXPrivate_* ); ++typedef int (*TXFuncPtr)(TXDesc*tx_desc_ptr, const struct CNS3XXXPrivate_*, struct sk_buff *); ++typedef void (*OpenPtr)(void); ++typedef void (*ClosePtr)(void); ++ ++ ++// for ethtool set operate ++typedef struct{ ++ ++}NICSetting; ++ ++typedef struct{ ++ int pmap; // for port base, force route ++ int is_wan; // mean the net device is WAN side. ++ //u16 gid; ++ u16 s_tag; ++ //u8 mac_type; // VLAN base, or port base; ++ u16 vlan_tag; ++ ++ // do port base mode and vlan base mode work ++ RXFuncPtr rx_func; ++ TXFuncPtr tx_func; ++ OpenPtr open; ++ ClosePtr close; ++ u8 which_port; ++ //NICSetting *nic_setting; ++ u8 *mac; // point to a mac address array ++ VLANTableEntry *vlan_table_entry; ++ ARLTableEntry *arl_table_entry; ++ NICSetting *nic_setting; ++ const char *name; // 16 bytes, reference include/linux/netdevice.h IFNAMSIZ ++}NetDevicePriv; ++ ++typedef struct ++{ ++ u8 num_rx_queues; ++ u8 num_tx_queues; ++ TXRing *tx_ring; ++ RXRing *rx_ring; ++}RingInfo; ++ ++ ++/* store this information for the driver.. */ ++typedef struct CNS3XXXPrivate_ ++{ ++ u8 num_rx_queues; ++ u8 num_tx_queues; ++ TXRing *tx_ring; ++ RXRing *rx_ring; ++ struct net_device_stats stats; ++ spinlock_t lock; ++ int pmap; ++ int is_wan; // mean the net device is WAN side. ++ u16 gid; ++ u8 mac_type; // VLAN base, or port base; ++ u16 vlan_tag; ++ struct napi_struct napi; ++ struct work_struct reset_task; ++ ++ u8 which_port; ++ //NICSetting *nic_setting; ++ char name[IFNAMSIZ]; // 16 bytes, reference include/linux/netdevice.h IFNAMSIZ ++ ++ ++ NetDevicePriv *net_device_priv; ++ u8 ring_index; ++ ++ u32 rx_s_vid[4096]; // record receive s vid (0x9100 ...) ++ u32 rx_c_vid[4096]; // record receive c vid (0x8100 ...) ++#ifdef CONFIG_CNS3XXX_NAPI ++ volatile unsigned long is_qf; // determine rx ring queue full state ++#endif ++ ++#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) ++ struct vlan_group *vlgrp; ++#endif ++}CNS3XXXPrivate; ++ ++ ++ ++ ++int rx_port_base(struct sk_buff *skb, RXDesc *rx_desc_ptr, const struct CNS3XXXPrivate_ *priv); ++ ++int rx_vlan_base(struct sk_buff *skb, RXDesc *rx_desc_ptr, const struct CNS3XXXPrivate_ *priv); ++ ++int tx_port_base(TXDesc *tx_desc_ptr, const struct CNS3XXXPrivate_ *priv, struct sk_buff *skb); ++ ++ ++int tx_vlan_base(TXDesc *tx_desc_ptr, const struct CNS3XXXPrivate_ *priv, struct sk_buff *skb); ++#if defined (CONFIG_CNS3XXX_SPPE) ++int fp_port_base(TXDesc *tx_desc_ptr, const struct CNS3XXXPrivate_ *priv, struct sk_buff *skb); ++#endif ++#endif ++ +--- /dev/null ++++ b/drivers/net/cns3xxx/cns3xxx_main.c +@@ -0,0 +1,3949 @@ ++/******************************************************************************* ++ * ++ * Copyright (c) 2009 Cavium Networks ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the Free ++ * Software Foundation; either version 2 of the License, or (at your option) ++ * any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ * more details. ++ * ++ * You should have received a copy of the GNU General Public License along with ++ * this program; if not, write to the Free Software Foundation, Inc., 59 ++ * Temple Place - Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * The full GNU General Public License is included in this distribution in the ++ * file called LICENSE. ++ * ++ ********************************************************************************/ ++ ++#include ++#include ++#include ++#include "cns3xxx.h" ++#include "cns3xxx_tool.h" ++#include "cns3xxx_config.h" ++ ++#if defined (CONFIG_CNS3XXX_SPPE) ++#include ++#define PACKET_REASON_TO_CPU (0x2C) ++#endif ++ ++#define RX_SDP_ALIGN 64 ++ ++#ifdef CONFIG_FPGA ++#include "fpga.h" ++#endif ++ ++#ifdef CONFIG_VB ++#include "vb.h" ++#endif ++ ++#define CPU_CACHE_BYTES 64 ++#define CPU_CACHE_ALIGN(X) (((X) + (CPU_CACHE_BYTES-1)) & ~(CPU_CACHE_BYTES-1)) ++ ++ ++#define QUEUE_WEIGHT_SET(port, ctl) \ ++{ \ ++ MAC##port##_PRI_CTRL_REG &= ~(0x3ffff); \ ++ MAC##port##_PRI_CTRL_REG |= (ctl.sch_mode << 16); \ ++ MAC##port##_PRI_CTRL_REG |= (ctl.q0_w); \ ++ MAC##port##_PRI_CTRL_REG |= (ctl.q1_w << 4); \ ++ MAC##port##_PRI_CTRL_REG |= (ctl.q2_w << 8); \ ++ MAC##port##_PRI_CTRL_REG |= (ctl.q3_w << 12); \ ++} ++ ++#define QUEUE_WEIGHT_GET(port, ctl) \ ++{ \ ++ ctl.sch_mode = ((MAC##port##_PRI_CTRL_REG >> 16 ) & 0x3); \ ++ ctl.q0_w = ((MAC##port##_PRI_CTRL_REG >> 0 ) & 0x7); \ ++ ctl.q1_w = ((MAC##port##_PRI_CTRL_REG >> 4 ) & 0x7); \ ++ ctl.q2_w = ((MAC##port##_PRI_CTRL_REG >> 8 ) & 0x7); \ ++ ctl.q3_w = ((MAC##port##_PRI_CTRL_REG >> 12 ) & 0x7); \ ++} ++ ++int cns3xxx_send_packet(struct sk_buff *skb, struct net_device *netdev); ++static int install_isr_rc = 0; ++static int rc_setup_rx_tx = 0; // rc means reference counting. ++static struct net_device *intr_netdev; ++struct net_device *net_dev_array[NETDEV_SIZE]; ++spinlock_t tx_lock; ++spinlock_t rx_lock; ++u8 fast_bridge_en=1; ++u8 show_rx_proc=0; ++u8 show_tx_proc=0; ++ ++int init_port=7; // bit map 7 means port 0, 1 and 2, default is 7. ++//module_param(init_port, u8, S_IRUGO); ++module_param(init_port, int, 0); ++ ++u8 ring_index=0; // 0 or 1 ++ ++#ifdef CNS3XXX_DELAYED_INTERRUPT ++static u32 max_pend_int_cnt=MAX_PEND_INT_CNT, max_pend_time=MAX_PEND_TIME; ++#endif ++ ++#ifdef CONFIG_CNS3XXX_NAPI ++struct net_device *napi_dev; ++ #ifdef CNS3XXX_DOUBLE_RX_RING ++ struct net_device *r1_napi_dev; // ring1 napi dev ++ #endif ++#endif ++ ++const u32 MAX_RX_DESC_SIZE = 512; ++const u32 MAX_TX_DESC_SIZE = 512; ++const u32 RX_DESC_SIZE = 128; ++//const u32 RX_DESC_SIZE = 5; ++const u32 TX_DESC_SIZE = 120; ++ ++//RXRing *rx_ring; ++//TXRing *tx_ring; ++ ++// only for debug (proc) ++RingInfo g_ring_info; ++ ++int MSG_LEVEL = NORMAL_MSG; ++ ++#ifdef CNS3XXX_STATUS_ISR ++const char *cns3xxx_gsw_status_tbl[] = { ++ "\nMAC0_Q_FULL\n", ++ "\nMAC1_Q_FULL\n", ++ "\nCPU_Q_FULL\n", ++ "\nHNAT_Q_FULL\n", ++ "\nMAC2_Q_FULL\n", ++ "\nMAC0_Q_EXT_FULL\n", ++ "\nGLOBAL_Q_FULL\n", ++ "\nBUFFER_FULL\n", ++ "\nMIB_COUNTER_TH\n", ++ "\n", // 9 ++ "\nMAC0_INTRUDER\n", ++ "\nMAC1_INTRUDER\n", ++ "\nCPU_INTRUDER\n", ++ "\nMAC2_INTRUDER\n", ++ "\nMAC0_STATUS_CHG\n", ++ "\nMAC1_STATUS_CHG\n", ++ "\nMAC2_STATUS_CHG\n", ++ "\nMAC0_NO_LINK_DROP\n", ++ "\nMAC1_NO_LINK_DROP\n", ++ "\nMAC2_NO_LINK_DROP\n", ++ "\nMAC0_RX_ERROR_DROP\n", ++ "\nMAC1_RX_ERROR_DROP\n", ++ "\nMAC2_RX_ERROR_DROP\n", ++ "\nMAC0_NO_DESTINATION_DROP\n", ++ "\nMAC1_NO_DESTINATION_DROP\n", ++ "\nMAC2_NO_DESTINATION_DROP\n", ++ "\nMAC0_RMC_PAUSE_DROP\n", ++ "\nMAC1_RMC_PAUSE_DROP\n", ++ "\nMAC2_RMC_PAUSE_DROP\n", ++ "\nMAC0_LOCAL_DROP\n", ++ "\nMAC1_LOCAL_DROP\n", ++ "\nMAC2_LOCAL_DROP\n", ++}; ++#endif ++ ++#define MIN_PACKET_LEN 14 ++ ++void cns3xxx_write_pri_mask(u8 pri_mask); ++ ++static int cns3xxx_notify_reboot(struct notifier_block *nb, unsigned long event, void *ptr); ++ ++static struct notifier_block cns3xxx_notifier_reboot = { ++ .notifier_call = cns3xxx_notify_reboot, ++ .next = NULL, ++ .priority = 0 ++}; ++ ++#if defined(CNS3XXX_VLAN_8021Q) ++void cns3xxx_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid); ++void cns3xxx_vlan_rx_register(struct net_device *dev, struct vlan_group *grp); ++#endif ++ ++void take_off_vlan_header(struct sk_buff *skb) ++{ ++ // take off VLAN header ++ memmove(skb->data + 4, skb->data, 12); ++#if 0 ++ //skb_ptr->data += 4; ++ skb_reserve(skb, 4); ++#else ++ skb->data += 4; ++#endif ++ skb->len -= 4; // minus 4 byte vlan tag ++} ++ ++int rx_port_base(struct sk_buff *skb, RXDesc *rx_desc_ptr, const struct CNS3XXXPrivate_ *priv) ++{ ++ if (skb->data[12] == 0x81 && skb->data[13] == 0x00) // VLAN header ++ { ++ take_off_vlan_header(skb); ++ print_packet(skb->data, skb->len); ++ } ++ return 0; ++} ++ ++int rx_vlan_base(struct sk_buff *skb, RXDesc *rx_desc_ptr, const struct CNS3XXXPrivate_ *priv) ++{ ++ return 0; ++} ++ ++int tx_port_base(TXDesc *tx_desc_ptr, const struct CNS3XXXPrivate_ *priv, struct sk_buff *skb) ++{ ++#if defined(CNS3XXX_VLAN_8021Q) && defined (CNS3XXX_8021Q_HW_TX) ++ if (skb && priv->vlgrp != NULL && vlan_tx_tag_present(skb)) ++ { ++ tx_desc_ptr->c_vid = cpu_to_le16(vlan_tx_tag_get(skb)); ++ tx_desc_ptr->ctv=1; ++ tx_desc_ptr->fr = 0; ++ ++ } ++ else ++#endif ++ { ++ tx_desc_ptr->ctv = 0; ++ tx_desc_ptr->pmap = priv->net_device_priv->pmap; ++ tx_desc_ptr->fr = 1; ++ } ++ ++ return 0; ++} ++ ++ ++int tx_vlan_base(TXDesc *tx_desc_ptr, const struct CNS3XXXPrivate_ *priv, struct sk_buff *skb) ++{ ++#if defined(CNS3XXX_VLAN_8021Q) ++ ++ if (skb && priv->vlgrp != NULL && vlan_tx_tag_present(skb)) { ++ tx_desc_ptr->c_vid = cpu_to_le16(vlan_tx_tag_get(skb)); ++ } ++#else ++ tx_desc_ptr->c_vid = priv->net_device_priv->vlan_tag; ++ ++#endif ++ tx_desc_ptr->ctv=1; ++ tx_desc_ptr->fr = 0; ++ ++ return 0; ++} ++ ++#if defined (CONFIG_CNS3XXX_SPPE) ++int fp_port_base(TXDesc *tx_desc_ptr, const struct CNS3XXXPrivate_ *priv, struct sk_buff *skb) ++{ ++#if 1 ++ tx_desc_ptr->fr = 1; ++ tx_desc_ptr->pmap = 0x8; ++#else ++ tx_desc_ptr->fr = 0; ++ tx_desc_ptr->ctv = 1; ++ tx_desc_ptr->c_vid = 80; ++#endif ++ return 0; ++} ++#endif ++ ++static inline struct sk_buff *cns3xxx_alloc_skb(void) ++{ ++ struct sk_buff *skb; ++ u32 align_64; ++ ++ skb = dev_alloc_skb(MAX_PACKET_LEN + 2 + RX_SDP_ALIGN); ++ ++ if (unlikely(!skb)) { ++ return NULL; ++ } ++ pci_dma_sync_single_for_device(NULL, virt_to_phys(skb->data), MAX_PACKET_LEN+2+RX_SDP_ALIGN, PCI_DMA_FROMDEVICE); ++ ++ align_64=CPU_CACHE_ALIGN((u32)skb->data); ++ skb_reserve(skb, align_64-(u32)skb->data); /* 16 bytes alignment */ ++ ++#ifndef CNS3XXX_4N ++ skb_reserve(skb, NET_IP_ALIGN); /* 16 bytes alignment */ ++#endif ++ ++ ++ ++ return skb; ++} ++ ++static int free_rx_skb(RXRing *rx_ring) ++{ ++ int i=0; ++ RXBuffer *rx_buffer = rx_ring->head; ++ //RXDesc *rx_desc = rx_ring.rx_desc_head_vir_addr; ++ ++ for (i=0 ; i < get_rx_ring_size(rx_ring) ; ++i) { ++ if (rx_buffer->rx_desc->cown==0 && rx_buffer->skb) { ++ dev_kfree_skb(rx_buffer->skb); ++ rx_buffer->skb=0; ++ } ++ } ++ return 0; ++} ++ ++int cns3xxx_setup_all_rx_resources(RXRing *rx_ring, u8 ring_num) ++{ ++ int i=0; ++ RXBuffer *rx_buffer = 0; ++ RXDesc *rx_desc = 0; ++ ++#ifdef NCNB_TEST ++ ncnb_buf = dma_alloc_coherent(NULL, 2*1024* get_rx_ring_size(rx_ring), &ncnb_buf_phy, GFP_KERNEL); ++ printk("NCB_BUF: %08X PHY: %08X \n", ncnb_buf, ncnb_buf_phy); ++ ++#endif ++ ++ // alloc RXDesc array ++ rx_ring->rx_desc_head_vir_addr = dma_alloc_coherent(NULL, sizeof(RXDesc) * (get_rx_ring_size(rx_ring)), &rx_ring->rx_desc_head_phy_addr, GFP_KERNEL); ++ if (!rx_ring->rx_desc_head_vir_addr) { ++ return -ENOMEM; ++ } ++ ++ memset(rx_ring->rx_desc_head_vir_addr, 0, sizeof(RXDesc) * get_rx_ring_size(rx_ring)); ++ ++ // alloc RXBuffer array ++ rx_ring->head = kmalloc(sizeof(RXBuffer) * get_rx_ring_size(rx_ring), GFP_KERNEL); ++ ++ if (!rx_ring->head) { ++ return -ENOMEM; ++ } ++ ++ rx_buffer = rx_ring->head; ++ for (i=0 ; i < get_rx_ring_size(rx_ring) ; ++i) { ++ rx_buffer->skb=0; ++ ++rx_buffer; ++ } ++ ++ rx_buffer = rx_ring->head; ++ rx_desc = rx_ring->rx_desc_head_vir_addr; ++ for (i=0 ; i < get_rx_ring_size(rx_ring) ; ++i, ++rx_buffer, ++rx_desc) { ++ rx_buffer->rx_desc = rx_desc; ++ rx_buffer->skb = cns3xxx_alloc_skb(); ++ ++ if (!rx_buffer->skb) { ++ ++ free_rx_skb(rx_ring); ++ kfree(rx_ring->head); ++ dma_free_coherent(NULL, sizeof(RXDesc) * get_rx_ring_size(rx_ring), rx_ring->rx_desc_head_vir_addr, rx_ring->rx_desc_head_phy_addr); ++ return -ENOMEM; ++ } ++ ++#ifdef CONFIG_SWITCH_BIG_ENDIAN ++ { ++ RXDesc tmp_rx_desc; ++ ++ memset(&tmp_rx_desc, 0, sizeof(RXDesc)); ++ tmp_rx_desc.sdp = (u32)virt_to_phys(rx_buffer->skb->data); ++ tmp_rx_desc.sdl = MAX_PACKET_LEN; ++ if (i == (get_rx_ring_size(rx_ring)-1) ){ ++ tmp_rx_desc.eor = 1; ++ } ++ tmp_rx_desc.fsd = 1; ++ tmp_rx_desc.lsd = 1; ++ swap_rx_desc(&tmp_rx_desc, rx_buffer->rx_desc); ++ } ++ ++#else ++ rx_buffer->rx_desc->sdp = (u32)virt_to_phys(rx_buffer->skb->data); ++ rx_buffer->rx_desc->sdl = MAX_PACKET_LEN; ++ if (i == (get_rx_ring_size(rx_ring)-1) ){ ++ rx_buffer->rx_desc->eor = 1; ++ } ++ rx_buffer->rx_desc->fsd = 1; ++ rx_buffer->rx_desc->lsd = 1; ++#endif ++ ++ } ++ rx_ring->cur_index = 0 ; ++ ++ if (ring_num == 0){ ++ FS_DESC_PTR0_REG = rx_ring->rx_desc_head_phy_addr; ++ FS_DESC_BASE_ADDR0_REG = rx_ring->rx_desc_head_phy_addr; ++ ++ } else if (ring_num == 1){ ++ FS_DESC_PTR1_REG = rx_ring->rx_desc_head_phy_addr; ++ FS_DESC_BASE_ADDR1_REG = rx_ring->rx_desc_head_phy_addr; ++ } ++ ++ return CAVM_OK; ++} ++ ++static int cns3xxx_setup_all_tx_resources(TXRing *tx_ring, u8 ring_num) ++{ ++ int i=0; ++ TXBuffer *tx_buffer = 0; ++ TXDesc *tx_desc = 0; ++ ++ ++ spin_lock_init(&(tx_ring->tx_lock)); ++ ++ tx_ring->tx_desc_head_vir_addr = dma_alloc_coherent(NULL, sizeof(TXDesc) * get_tx_ring_size(tx_ring), &tx_ring->tx_desc_head_phy_addr, GFP_KERNEL); ++ if (!tx_ring->tx_desc_head_vir_addr) { ++ return -ENOMEM; ++ } ++ ++ memset(tx_ring->tx_desc_head_vir_addr, 0, sizeof(TXDesc) * get_tx_ring_size(tx_ring)); ++ tx_ring->head = kmalloc(sizeof(TXBuffer) * get_tx_ring_size(tx_ring), GFP_KERNEL); ++ ++ tx_buffer = tx_ring->head; ++ tx_desc = tx_ring->tx_desc_head_vir_addr; ++ for (i=0 ; i < get_tx_ring_size(tx_ring) ; ++i, ++tx_buffer, ++tx_desc) { ++ tx_buffer->tx_desc = tx_desc; ++ ++ tx_buffer->tx_desc->cown = 1; ++ tx_buffer->skb = 0; ++ if (i == (get_tx_ring_size(tx_ring)-1) ){ ++ tx_buffer->tx_desc->eor = 1; ++ } ++#ifdef CONFIG_SWITCH_BIG_ENDIAN ++ swap_tx_desc(tx_buffer->tx_desc, tx_buffer->tx_desc); ++#endif ++ ++ } ++ ++ tx_ring->cur_index = 0 ; ++ ++ if (ring_num == 0){ ++ TS_DESC_PTR0_REG = tx_ring->tx_desc_head_phy_addr; ++ TS_DESC_BASE_ADDR0_REG = tx_ring->tx_desc_head_phy_addr; ++ } else if (ring_num == 1){ ++ TS_DESC_PTR1_REG = tx_ring->tx_desc_head_phy_addr; ++ TS_DESC_BASE_ADDR1_REG = tx_ring->tx_desc_head_phy_addr; ++ } ++ return CAVM_OK; ++} ++ ++int cns3xxx_free_all_rx_resources(RXRing *rx_ring) ++{ ++ free_rx_skb(rx_ring); ++ kfree(rx_ring->head); ++ dma_free_coherent(NULL, sizeof(RXDesc) * get_rx_ring_size(rx_ring), rx_ring->rx_desc_head_vir_addr, rx_ring->rx_desc_head_phy_addr); ++ return 0; ++} ++ ++static int free_tx_skb(TXRing *tx_ring) ++{ ++ int i=0; ++ TXBuffer *tx_buffer = tx_ring->head; ++ ++ for (i=0 ; i < get_tx_ring_size(tx_ring) ; ++i) { ++ if (tx_buffer->skb) { ++ dev_kfree_skb(tx_buffer->skb); ++ tx_buffer->skb = 0; ++ } ++ } ++ return 0; ++} ++ ++int cns3xxx_free_all_tx_resources(TXRing *tx_ring) ++{ ++ free_tx_skb(tx_ring); ++ kfree(tx_ring->head); ++ dma_free_coherent(NULL, sizeof(TXDesc) * get_tx_ring_size(tx_ring), tx_ring->tx_desc_head_vir_addr, tx_ring->tx_desc_head_phy_addr); ++ return 0; ++} ++ ++static int cns3xxx_free_rx_tx_res(CNS3XXXPrivate *priv) ++{ ++ int i=0; ++ ++ --rc_setup_rx_tx; ++ if (rc_setup_rx_tx == 0) { ++ enable_port(3, 0); // disable cpu port ++ ++ // stop RX/TX ring0 dma ++ enable_rx_dma(0, 0); ++ enable_tx_dma(0, 0); ++ ++ for (i=0 ; i < priv->num_rx_queues ; ++i) { ++ cns3xxx_free_all_rx_resources(priv->rx_ring+i); ++ memset(priv->rx_ring + i, 0, sizeof(RXRing)); ++ } ++ ++ for (i=0 ; i < priv->num_tx_queues ; ++i) { ++ cns3xxx_free_all_tx_resources(priv->tx_ring+i); ++ memset(priv->tx_ring + i, 0, sizeof(TXRing)); ++ } ++ ++ } ++ return 0; ++} ++ ++ ++static int cns3xxx_setup_rx_tx_res(CNS3XXXPrivate *priv) ++{ ++ int i=0; ++ ++ if (rc_setup_rx_tx == 0) { ++ clear_fs_dma_state(1); ++ FS_DESC_PTR0_REG = 0; ++ FS_DESC_BASE_ADDR0_REG = 0; ++ FS_DESC_PTR1_REG = 0; ++ FS_DESC_BASE_ADDR1_REG = 0; ++ TS_DESC_PTR0_REG = 0; ++ TS_DESC_BASE_ADDR0_REG = 0; ++ TS_DESC_PTR1_REG = 0; ++ TS_DESC_BASE_ADDR1_REG = 0; ++ ++ for (i=0 ; i < priv->num_tx_queues ; ++i) { ++ spin_lock_init(&((priv->tx_ring+i)->tx_lock)); ++ (priv->tx_ring+i)->max_ring_size = MAX_TX_DESC_SIZE; ++ (priv->tx_ring+i)->ring_size = TX_DESC_SIZE; ++ if (cns3xxx_setup_all_tx_resources(priv->tx_ring+i, i) != CAVM_OK) ++ return CAVM_ERR; ++ } ++ ++ for (i=0 ; i < priv->num_rx_queues ; ++i) { ++ (priv->rx_ring+i)->max_ring_size = MAX_RX_DESC_SIZE; ++ (priv->rx_ring+i)->ring_size = RX_DESC_SIZE; ++ if (cns3xxx_setup_all_rx_resources(priv->rx_ring+i, i) != CAVM_OK) ++ return CAVM_ERR; ++ ++ } ++ clear_fs_dma_state(0); ++ } ++ ++rc_setup_rx_tx; ++ return CAVM_OK; ++} ++ ++int free_tx_desc_skb(TXRing *tx_ring, u8 ring_num) ++{ ++#if 1 ++ int i=0; ++ //u32 tssd_current=0; ++ TXBuffer *tx_buffer = 0; ++ u32 tx_ring_size = get_tx_ring_size(tx_ring); ++ // check curent hw index previous tx descriptor ++ u32 cur_index = cns3xxx_get_tx_hw_index(ring_num) - 1; ++ ++ tx_buffer = get_tx_buffer_by_index(tx_ring, cur_index); ++ ++ ++ //while (1) ++ for (i=0 ; i < tx_ring_size ; ++i) { ++ if (tx_buffer->tx_desc->cown == 1 && tx_buffer->skb) { ++ dev_kfree_skb_any(tx_buffer->skb); ++ tx_buffer->skb=0; ++ //tx_buffer->tx_desc->cown == 1; ++ } else { ++ break; ++ } ++ // --tx_desc_pair_ptr ++ --cur_index; ++ tx_buffer = get_tx_buffer_by_index(tx_ring, cur_index); ++ ++ } ++#endif ++ return 0; ++} ++ ++void do_arl_lookup(void) ++{ ++} ++ ++inline void assign_netdev(RXBuffer volatile *rx_buffer) ++{ ++ RXDesc * rx_desc=0; ++#ifdef CONFIG_SWITCH_BIG_ENDIAN ++ RXDesc tmp_rx_desc; ++ rx_desc = &tmp_rx_desc; ++ swap_rx_desc(rx_buffer->rx_desc, rx_desc); ++#else ++ rx_desc = rx_buffer->rx_desc; ++#endif ++ ++ ++#if defined(CONFIG_CNS3XXX_PORT_BASE) || defined(CNS3XXX_VLAN_8021Q) ++ // sp: ++ // 0 - mac port 0 ++ // 1 - mac port 1 ++ // 4 - mac port 2 ++ ++ switch (rx_desc->sp) ++ { ++ case 0: ++ { ++ rx_buffer->skb->dev = PORT0_NETDEV; ++ break; ++ } ++ case 1: ++ { ++ rx_buffer->skb->dev = PORT1_NETDEV; ++ break; ++ } ++ case 4: ++ { ++ rx_buffer->skb->dev = PORT2_NETDEV; ++ break; ++ } ++ ++ } ++#endif ++ ++#ifdef CONFIG_CNS3XXX_VLAN_BASE ++{ ++ u16 vlan_tag; ++ ++ vlan_tag = rx_desc->c_vid; ++ rx_buffer->skb->dev = net_dev_array[vlan_tag]; ++ ++} ++#endif ++ ++} ++ ++#if defined(CNS3XXX_VLAN_8021Q) ++static int cns3xxx_vlan_rx(CNS3XXXPrivate *priv, struct sk_buff *skb, u16 vlan_tag) ++{ ++ return vlan_hwaccel_receive_skb(skb, priv->vlgrp, vlan_tag); ++} ++#endif ++ ++// old_priv has ring index information, current version only uses the information. ++static int cns3xxx_get_rfd_buff(RXBuffer volatile *rx_buffer, CNS3XXXPrivate *old_priv) ++{ ++ CNS3XXXPrivate *priv=0; ++ //RXDesc volatile *rxdesc_ptr = rx_buffer->rx_desc; ++ struct sk_buff *skb; ++ //unsigned char *data; ++ u32 len; ++ RXDesc *rx_desc; ++ ++#ifdef CONFIG_SWITCH_BIG_ENDIAN ++ ++ RXDesc tmp_rx_desc; ++ ++ rx_desc = &tmp_rx_desc; ++ swap_rx_desc(rx_buffer->rx_desc, rx_desc); ++ ++#else ++ rx_desc = rx_buffer->rx_desc; ++#endif ++ ++ //rxdesc_ptr = rxring.vir_addr + index; ++ skb = rx_buffer->skb; ++ len = rx_desc->sdl; ++ ++ ++#ifdef DEBUG_RX ++ if (MSG_LEVEL == DUMP_RX_PKT_INFO) { ++ print_packet(skb->data, len); ++ } ++ ++#endif ++ ++ pci_dma_sync_single_for_device(NULL, virt_to_phys(skb->data), len, PCI_DMA_FROMDEVICE); ++#if defined (CONFIG_CNS3XXX_SPPE) ++ if (PACKET_REASON_TO_CPU == rx_buffer->rx_desc->hr) { ++ if (sppe_pci_fp_ready) { ++ SPPE_PARAM param; ++ int pci_dev_index; ++ struct iphdr *iph; ++ ++ skb_put(skb, len); ++ iph = (struct iphdr *)(skb->data + sizeof(struct ethhdr)); ++ ++ memset(¶m, 0, sizeof(SPPE_PARAM)); ++ param.cmd = SPPE_CMD_ARP; ++ param.op = SPPE_OP_GET; ++ param.data.sppe_arp.ip[0] = iph->daddr; ++ if (SPPE_RESULT_SUCCESS != sppe_func_hook(¶m)) { ++ goto NOT_IN_PCI_FP; ++ } else { ++ pci_dev_index = param.data.sppe_arp.unused_1; ++ } ++ param.cmd = SPPE_CMD_PCI_FP_DEV; ++ param.op = SPPE_OP_GET; ++ param.data.sppe_pci_fp_dev.dev = NULL; ++ param.data.sppe_pci_fp_dev.index = pci_dev_index; ++ if (SPPE_RESULT_SUCCESS != sppe_pci_fp_hook(¶m)) { ++ goto NOT_IN_PCI_FP; ++ } else { ++ skb->dev = param.data.sppe_pci_fp_dev.dev; ++ } ++ #if 1 ++ dev_queue_xmit(skb); ++ #else ++ skb->dev->hard_start_xmit(skb, skb->dev); ++ #endif ++ ++ return 0; ++ } ++ } ++NOT_IN_PCI_FP: ++#endif ++ ++#ifdef CNS3XXX_NON_NIC_MODE_8021Q ++ if (cns3xxx_is_untag_packet(rx_desc) == 1) ++ take_off_vlan_header(skb); ++#endif ++ ++#ifdef CONFIG_CNS3XXX_PORT_BASE ++ assign_netdev(rx_buffer); ++ ++ if (rx_buffer->skb->dev) // if skb->dev is 0, means VLAN base ++ goto determine_dev_ok; ++ ++#endif /* CONFIG_CNS3XXX_PORT_BASE */ ++ ++ ++#ifdef CONFIG_CNS3XXX_VLAN_BASE ++ ++#ifdef CONFIG_HAVE_VLAN_TAG ++ ++#if defined(CNS3XXX_VLAN_8021Q) ++ // some funcion need netdev like eth_type_trans(), so need to assign it. ++ skb->dev = intr_netdev; ++ // 8021Q module will determine right netdev by vlan tag. ++#else // defined(CNS3XXX_VLAN_8021Q) ++ { ++ assign_netdev(rx_buffer); ++ ++ take_off_vlan_header(skb); ++ if (MSG_LEVEL == 5) ++ print_packet(skb->data, 32); ++ ++ if ( rx_buffer->skb->dev == 0){ ++ goto freepacket; ++ } ++ } ++ ++#endif // CNS3XXX_VLAN_8021Q ++ ++#else /* CONFIG_HAVE_VLAN_TAG */ ++ ++#ifdef CNS3XXX_RX_DESC_VLAN_INFO ++// get VLAN information by RX descriptor field ++ ++#endif ++ ++#endif // CONFIG_HAVE_VLAN_TAG ++ ++#endif // CONFIG_CNS3XXX_VLAN_BASE ++ ++ ++#ifdef CONFIG_CNS3XXX_PORT_BASE ++determine_dev_ok: ++#endif ++ ++ skb_put(skb, len); ++ ++ if (skb->dev) { ++ priv = netdev_priv(skb->dev); ++ } ++ else{ ++ DEBUG_MSG(WARNING_MSG, "skb_ptr->dev==NULL\n"); ++ goto freepacket; ++ } ++ ++#ifdef CNS3XXX_RX_HW_CHECKSUM ++ switch (rx_desc->prot) ++ { ++ case 1 : ++ case 2 : ++ case 5 : ++ case 6 : ++ { ++ if ( rx_desc->l4f == 0) { // tcp/udp checksum is correct ++ skb->ip_summed = CHECKSUM_UNNECESSARY; ++ } else { ++ skb->ip_summed = CHECKSUM_NONE; ++ } ++ break; ++ } ++ default: ++ { ++ skb->ip_summed = CHECKSUM_NONE; ++ break; ++ } ++ } ++#else ++ skb->ip_summed = CHECKSUM_NONE; ++#endif // CNS3XXX_RX_HW_CHECKSUM ++ ++ ++ // this line must, if no, packet will not send to network layer ++#ifdef CONFIG_FAST_BRIDGE ++ if (fast_bridge_en == 0) ++#endif ++ skb->protocol = eth_type_trans(skb, skb->dev); ++ ++ skb->dev->last_rx = jiffies; ++ priv->stats.rx_packets++; ++ priv->stats.rx_bytes += len; ++ ++#ifdef CONFIG_FAST_BRIDGE ++ if (fast_bridge_en == 1) { ++ ++ skb->ip_summed = CHECKSUM_NONE; ++ if ( skb->dev == PORT0_NETDEV) { ++ skb->dev = PORT1_NETDEV; ++ } else if ( skb->dev == PORT1_NETDEV) { ++ skb->dev = PORT0_NETDEV; ++ } ++ //skb->dev->hard_start_xmit(skb, skb->dev); ++ cns3xxx_send_packet(skb, skb->dev); ++ } else { ++#endif // #ifdef CONFIG_FAST_BRIDGE ++ ++ ++//#if defined(CNS3XXX_VLAN_8021Q) ++#if 0 ++ if (priv->vlgrp != NULL) ++ { ++ //cns3xxx_vlan_rx(priv, skb, rx_buffer->rx_desc->c_vid); ++ cns3xxx_vlan_rx(priv, skb, rx_buffer->rx_desc->c_vid); ++ //cns3xxx_vlan_rx(priv, skb, swab16(le32_to_cpu(rx_buffer->rx_desc->c_vid)) ); ++ } ++ else ++#else ++ #ifdef CONFIG_CNS3XXX_NAPI ++ netif_receive_skb(skb); ++ #else ++ netif_rx(skb); ++ #endif ++#endif ++ ++#ifdef CONFIG_FAST_BRIDGE ++ } ++#endif ++ ++ //vlan_hwaccel_receive_skb(skb, priv->vlgrp, 1); ++ ++ return 0; ++ ++freepacket: ++ //DEBUG_MSG(NORMAL_MSG, "freepacket\n"); ++ dev_kfree_skb_any(skb); ++ return 0; ++} ++ ++// index from 1 ++inline u32 get_rx_hw_index(CNS3XXXPrivate *priv) ++{ ++ return ((FS_DESC_PTR0_REG - get_rx_head_phy_addr(&RX_RING0(priv))) / sizeof(RXDesc) ); ++} ++ ++inline int get_rx_hw_index_by_reg(u8 ring_num) ++{ ++ if (ring_num == 0 ) { ++ return ((FS_DESC_PTR0_REG - FS_DESC_BASE_ADDR0_REG) / sizeof(RXDesc) ); ++ } else if (ring_num == 1 ) { ++ return ((FS_DESC_PTR1_REG - FS_DESC_BASE_ADDR1_REG) / sizeof(RXDesc) ); ++ } ++ ++ return CAVM_FAIL; ++} ++ ++void dump_rxring(void) ++{ ++ int j=0; ++ RXBuffer *rx_buffer = 0; ++ ++ rx_buffer = get_rx_ring_head(g_ring_info.rx_ring+0); ++ for (j=0 ; j < get_rx_ring_size(g_ring_info.rx_ring+0); ++j, ++rx_buffer) { ++ printk("[%d] ## rx_buffer->rx_desc->cown: %d\n", j, rx_buffer->rx_desc->cown); ++ } ++} ++ ++#ifdef CONFIG_CNS3XXX_NAPI ++void cns3xxx_receive_packet(CNS3XXXPrivate *priv, int mode, int *work_done, int work_to_do) ++#else ++void cns3xxx_receive_packet(CNS3XXXPrivate *priv, int mode) ++#endif ++{ ++ int fssd_index; ++ //int fssd_current; ++ RXBuffer volatile *rx_buffer = 0; ++ RXDesc volatile *rx_desc=0; ++ struct sk_buff *skb; ++#ifndef CONFIG_CNS3XXX_NAPI ++ int fsqf = 0; // Queue Full Mode =0 ++#endif ++ int i, rxcount = 0; ++ u8 queue_index = priv->ring_index; ++ ++#ifdef CONFIG_SWITCH_BIG_ENDIAN ++ RXDesc tmp_rx_desc; ++#endif ++ ++ rx_buffer = get_cur_rx_buffer(&(priv->rx_ring[queue_index])); ++ ++#ifdef CONFIG_SWITCH_BIG_ENDIAN ++ rx_desc = &tmp_rx_desc; ++ swap_rx_desc(rx_buffer->rx_desc, rx_desc); ++#else ++ rx_desc = rx_buffer->rx_desc; ++#endif ++ ++ fssd_index = get_rx_hw_index_by_reg(queue_index); ++ ++ if (fssd_index > get_rx_cur_index(&priv->rx_ring[queue_index]) ) { ++ rxcount = fssd_index - get_rx_cur_index(&priv->rx_ring[queue_index]); ++ } else if (fssd_index < get_rx_cur_index(&priv->rx_ring[queue_index])) { ++ rxcount = (get_rx_ring_size(&priv->rx_ring[queue_index]) - get_rx_cur_index(&priv->rx_ring[queue_index]) ) + fssd_index; ++ } else { // fssd_index == rxring.cur_index ++ if (rx_desc->cown == 0) { // if rx_desc->cown is 1, we can receive the RX descriptor. ++ enable_rx_dma(0, 1); ++ goto receive_packet_exit; ++ } else { ++ // Queue Full ++#ifndef CONFIG_CNS3XXX_NAPI ++ fsqf = 1; ++#endif ++ rxcount = get_rx_ring_size(&priv->rx_ring[queue_index]); ++ } ++ } ++#ifndef CONFIG_CNS3XXX_NAPI ++ if (mode == 1) { ++ fsqf = 1; ++ rxcount = get_rx_ring_size(&priv->rx_ring[queue_index]); ++ } ++#endif ++ ++#ifdef CNS3XXX_FREE_TX_IN_RX_PATH ++ free_tx_desc_skb(priv->tx_ring + 0, 0); ++#ifdef CNS3XXX_DOUBLE_TX_RING ++ free_tx_desc_skb(priv->tx_ring + 1, 1); ++#endif ++#endif ++ ++ for (i = 0; i < rxcount; i++) { ++ ++ if (rx_desc->cown != 0) { // start to get packet ++ // Alloc New skb_buff ++ skb = cns3xxx_alloc_skb(); ++ // Check skb_buff ++ if (skb) { ++ cns3xxx_get_rfd_buff(rx_buffer, priv); ++ rx_buffer->skb = skb; ++#ifndef NCNB_TEST ++ rx_desc->sdp = (u32)virt_to_phys(skb->data); ++#endif ++ rx_desc->sdl = MAX_PACKET_LEN; ++ rx_desc->fsd = 1; ++ rx_desc->lsd = 1; ++ rx_desc->cown = 0; // set cbit to 0 ++#ifdef CONFIG_SWITCH_BIG_ENDIAN ++ swap_rx_desc(rx_desc, rx_buffer->rx_desc); ++#endif ++ ++#ifdef CONFIG_CNS3XXX_NAPI ++ ++(*work_done); ++ if (*work_done >= work_to_do) { ++ ++ rx_index_next(&priv->rx_ring[queue_index]); // rx_ring.cur_index points to next ++ rx_buffer = get_cur_rx_buffer(&priv->rx_ring[queue_index]); ++ rx_desc = rx_buffer->rx_desc; ++ break; ++ } ++#endif ++ ++ } else { ++ // I will add dev->lp.stats->rx_dropped, it will effect the performance ++ //PDEBUG("%s: Alloc sk_buff fail, reuse the buffer\n", __FUNCTION__); ++ rx_desc->cown = 0; // set cbit to 0 ++#ifdef CONFIG_SWITCH_BIG_ENDIAN ++ swap_rx_desc(rx_desc, rx_buffer->rx_desc); ++#endif ++ ++ return; ++ } ++ } else { // cown is 0, no packets ++ //*work_done = 0; ++ return; ++ } ++ ++ ++ rx_index_next(&priv->rx_ring[queue_index]); // rx_ring.cur_index points to next ++ rx_buffer = get_cur_rx_buffer(&priv->rx_ring[queue_index]); ++ rx_desc = rx_buffer->rx_desc; ++ ++ } // end for (i = 0; i < rxcount; i++) ++ ++ ++#ifndef CONFIG_CNS3XXX_NAPI ++ if (fsqf) { ++ priv->rx_ring[queue_index].cur_index = fssd_index; ++ mb(); ++ enable_rx_dma(0, 1); ++ } ++#endif ++ ++ ++ //spin_unlock(&rx_lock); ++receive_packet_exit: ++ return; ++} ++ ++irqreturn_t cns3xxx_fsrc_ring0_isr(int irq, void *dev_id) ++{ ++ struct net_device *netdev = dev_id; ++ CNS3XXXPrivate *priv = netdev_priv(netdev); ++ ++ priv->ring_index=0; ++ ++#ifdef CONFIG_CNS3XXX_NAPI ++{ ++ CNS3XXXPrivate *priv = netdev_priv(napi_dev); ++ priv->ring_index=0; ++ ++#ifdef CNS3XXX_USE_MASK ++ cns3xxx_write_pri_mask(0xb0); ++#else ++ cns3xxx_disable_irq(FSRC_RING0_INTERRUPT_ID); ++#endif ++ ++ //if (likely(netif_rx_schedule_prep(napi_dev, &priv->napi))) { ++ if (likely(napi_schedule_prep(&priv->napi))) { ++ //__netif_rx_schedule(napi_dev, &priv->napi); ++ __napi_schedule(&priv->napi); ++ } else { ++#ifdef CNS3XXX_USE_MASK ++ cns3xxx_write_pri_mask(0xf0); ++#else ++ cns3xxx_enable_irq(FSRC_RING0_INTERRUPT_ID); ++#endif ++ } ++} ++#else // !CONFIG_CNS3XXX_NAPI ++ ++#ifdef CNS3XXX_USE_MASK ++ cns3xxx_write_pri_mask(0xb0); ++#else ++ cns3xxx_disable_irq(FSRC_RING0_INTERRUPT_ID); ++ cns3xxx_disable_irq(FSQF_RING0_INTERRUPT_ID); ++#endif ++ ++ cns3xxx_receive_packet(priv, 0); // Receive Once ++ ++#ifdef CNS3XXX_USE_MASK ++ cns3xxx_write_pri_mask(0xf0); ++#else ++ cns3xxx_enable_irq(FSRC_RING0_INTERRUPT_ID); ++ cns3xxx_enable_irq(FSQF_RING0_INTERRUPT_ID); ++#endif ++ enable_rx_dma(0, 1); ++#endif ++ ++ return IRQ_HANDLED; ++} ++ ++ ++#if defined(CNS3XXX_DOUBLE_RX_RING) ++irqreturn_t cns3xxx_fsrc_ring1_isr(int irq, void *dev_id) ++{ ++ struct net_device *netdev = dev_id; ++ CNS3XXXPrivate *priv = netdev_priv(netdev); ++ priv->ring_index=1; ++ ++ ++#if defined(CONFIG_CNS3XXX_NAPI) && defined(CNS3XXX_DOUBLE_RX_RING) ++{ ++ CNS3XXXPrivate *priv = netdev_priv(r1_napi_dev); ++ priv->ring_index=1; ++ ++ cns3xxx_disable_irq(FSRC_RING1_INTERRUPT_ID); ++ ++ if (likely(napi_schedule_prep(&priv->napi))) { ++ __napi_schedule(&priv->napi); ++ } else { ++ cns3xxx_enable_irq(FSRC_RING1_INTERRUPT_ID); ++ } ++} ++#else ++ ++ cns3xxx_disable_irq(CNS3XXX_FSRC_RING1_INTERRUPT_ID); ++ cns3xxx_disable_irq(CNS3XXX_FSQF_RING1_INTERRUPT_ID); ++ cns3xxx_receive_packet(priv, 0); // Receive Once ++ enable_rx_dma(1, 1); ++ ++ cns3xxx_enable_irq(CNS3XXX_FSRC_RING1_INTERRUPT_ID); ++ cns3xxx_enable_irq(CNS3XXX_FSQF_RING1_INTERRUPT_ID); ++#endif ++ ++ return IRQ_HANDLED; ++} ++#endif ++ ++int cns3xxx_check_enough_tx_descriptor(TXRing *tx_ring, int need_free_tx_desc) ++{ ++#if 1 ++ int i=0; ++ TXDesc *tx_desc=0; ++ u32 cur_index = get_tx_cur_index(tx_ring); ++ TXBuffer *tx_buffer = get_tx_buffer_by_index(tx_ring, cur_index); ++ ++#ifdef CONFIG_SWITCH_BIG_ENDIAN ++ TXDesc tmp_tx_desc; ++ tx_desc = &tmp_tx_desc; ++ swap_tx_desc(tx_buffer->tx_desc, tx_desc); ++#else ++ tx_desc = tx_buffer->tx_desc; ++#endif ++ ++ ++ for (i=0 ; i < need_free_tx_desc ; ++i) { ++ if ( tx_desc->cown == 0 ) { ++ return 0; // no free TX descriptor ++ } ++ tx_buffer = get_tx_buffer_by_index(tx_ring, ++cur_index); ++ } ++#endif ++ return 1; ++} ++ ++// if return CAVM_ERR, means pad is fail, the packet cannot send by switch. ++ ++int fill_a_skb_to_tx_desc(TXBuffer * tx_buffer, u8 *data, int len, struct sk_buff *skb, const struct CNS3XXXPrivate_ *priv, int sg, int fsd, int lsd) ++{ ++ //TXDesc *tx_desc_ptr = tx_buffer->tx_desc; ++ static int tt=0; ++ ++ TXDesc *tx_desc_ptr = 0; ++#ifdef CONFIG_SWTICH_BIG_ENDIAN ++ TXDesc tmp_tx_desc; ++ tx_desc_ptr = &tmp_tx_desc; ++ swap_tx_desc(tx_buffer->tx_desc, tx_desc_ptr); ++#else ++ tx_desc_ptr = tx_buffer->tx_desc; ++#endif ++ ++ ++ ++ if (tx_buffer->skb) { ++ dev_kfree_skb_any(tx_buffer->skb); ++ tx_buffer->skb = 0 ; ++ } else { ++ //++tx_ring.non_free_tx_skb; ++ } ++ ++ tx_buffer->skb = skb; /* for free skb */ ++ tx_desc_ptr->sdp = virt_to_phys(data); ++ tx_buffer->j = tt; ++ tx_buffer->tx_index = cns3xxx_get_tx_hw_index(0); ++ ++tt; ++ ++#if 0 ++ { ++ static u16 previous_sn_num=10; ++ u16 sn_num=0; ++ u16 e_type=0; ++ ++ memcpy(&e_type, skb->data + 12, 2); ++ e_type = be16_to_cpu(e_type); ++ ++ if (e_type == 0x0800) { ++ memcpy(&sn_num, skb->data + 0x28, 2); ++ sn_num = be16_to_cpu(sn_num); ++ ++ if ( previous_sn_num == sn_num) ++ printk("dup\n"); ++ ++ previous_sn_num = sn_num; ++ } ++ ++ } ++#endif ++ ++ ++#ifdef CNS3XXX_TX_HW_CHECKSUM ++ tx_desc_ptr->ico = 1; ++ tx_desc_ptr->uco = 1; ++ tx_desc_ptr->tco = 1; ++#else ++ tx_desc_ptr->ico = 0; ++ tx_desc_ptr->uco = 0; ++ tx_desc_ptr->tco = 0; ++#endif ++ // Wake interrupt ++#ifdef CNS3XXX_TSTC_RING0_ISR ++ tx_desc_ptr->interrupt = 1; ++#else ++ tx_desc_ptr->interrupt = 0; ++#endif ++ ++ /* fill 0 to MIN_PACKET_LEN size */ ++ // can change MIN_PACKET_LEN to 14 ++ if (sg==0 && len < MIN_PACKET_LEN) { ++ if (skb_padto(skb, MIN_PACKET_LEN)) ++ return CAVM_ERR; ++ ++ //memset(skb->data + len, 0, MIN_PACKET_LEN - len); ++ //skb->len = MIN_PACKET_LEN; ++ tx_desc_ptr->sdl = MIN_PACKET_LEN; ++ } else { ++ tx_desc_ptr->sdl = len; ++ } ++ ++ dma_cache_maint(data, tx_desc_ptr->sdl, PCI_DMA_TODEVICE); ++ ++ /* VLAN base or port base function to set TX descriptor */ ++ /* reference: tx_//port_base(), tx_vlan_base() */ ++ priv->net_device_priv->tx_func(tx_desc_ptr, priv, skb); ++ tx_desc_ptr->fsd = fsd; ++ tx_desc_ptr->lsd = lsd; ++ ++ /* NOT SG packet */ ++ if( fsd == 1 && lsd == 1) ++ tx_desc_ptr->cown = 0; ++ ++#ifdef CONFIG_SWITCH_BIG_ENDIAN ++ swap_tx_desc(tx_desc_ptr, tx_buffer->tx_desc); ++#endif ++ ++ return CAVM_OK; ++} ++ ++int cns3xxx_send_packet(struct sk_buff *skb, struct net_device *netdev) ++{ ++ ++ CNS3XXXPrivate *priv = netdev_priv(netdev); ++ TXBuffer *tx_buffer = 0; ++ unsigned long flags; ++ int nr_frags =skb_shinfo(skb)->nr_frags; ++ ++ TXDesc *tx_desc[10]; // FIXME: ensure to maximum sg size ++ int tx_desc_count=0; ++ int i=0; ++ ++#ifdef DEBUG_TX ++ if (MSG_LEVEL == DUMP_TX_PKT_INFO) { ++ print_packet(tx_buffer->skb->data, tx_buffer->tx_desc->sdl); ++ //dump_tx_desc(tx_buffer->tx_desc); ++ } ++#endif ++ ++ spin_lock_irqsave(&tx_lock, flags); ++ ++ if (cns3xxx_check_enough_tx_descriptor(priv->tx_ring + ring_index, (nr_frags==0 ) ? 1 : nr_frags) == 0) { ++ // no enough tx descriptor ++ spin_unlock_irqrestore(&tx_lock, flags); ++ // re-queue the skb ++ return NETDEV_TX_BUSY; ++ } ++ ++ tx_buffer = get_cur_tx_buffer(priv->tx_ring + ring_index); ++ ++ if (nr_frags == 0) { // non scatter/gather I/O ++ ++ fill_a_skb_to_tx_desc(tx_buffer, skb->data, skb->len, skb, priv, 0, 1, 1); ++ ++ tx_index_next(priv->tx_ring + ring_index); ++ ++ } else { // scatter/gather I/O ++ struct skb_frag_struct *frag = 0; ++ ++ ++ fill_a_skb_to_tx_desc(tx_buffer, skb->data, skb->len - skb->data_len, 0, priv, 1, 1, 0); ++ tx_desc[tx_desc_count++] = tx_buffer->tx_desc; ++ tx_index_next(priv->tx_ring + ring_index); ++ tx_buffer = get_cur_tx_buffer(priv->tx_ring + ring_index); ++ ++ for (i=0 ; i < nr_frags-1 ; ++i) { ++ frag = &skb_shinfo(skb)->frags[i]; ++ ++ fill_a_skb_to_tx_desc(tx_buffer, page_address(frag->page) + frag->page_offset, frag->size, 0, priv, 1, 0, 0); ++ tx_desc[tx_desc_count++] = tx_buffer->tx_desc; ++ ++ tx_index_next(priv->tx_ring + ring_index); ++ tx_buffer = get_cur_tx_buffer(priv->tx_ring + ring_index); ++ } ++ frag = &skb_shinfo(skb)->frags[nr_frags-1]; ++ ++ // last fragment ++ fill_a_skb_to_tx_desc(tx_buffer, page_address(frag->page) + frag->page_offset, frag->size, skb, priv, 1, 0, 1); ++ tx_desc[tx_desc_count++] = tx_buffer->tx_desc; ++ ++ tx_index_next(priv->tx_ring + ring_index); ++ tx_buffer = get_cur_tx_buffer(priv->tx_ring + ring_index); ++ } ++ ++ ++ if( nr_frags != 0) { ++ ++ for (i = 0; i < tx_desc_count ; i++ ) ++ tx_desc[i]->cown = 0 ; ++ } ++ ++ mb(); ++ enable_tx_dma(ring_index, 1); ++ ++ priv->stats.tx_packets++; ++ priv->stats.tx_bytes += skb->len; ++ netdev->trans_start = jiffies; ++ ++ spin_unlock_irqrestore(&tx_lock, flags); ++ return NETDEV_TX_OK; ++} ++ ++ ++#ifdef CNS3XXX_FSQF_RING0_ISR ++irqreturn_t cns3xxx_fsqf_ring0_isr(int irq, void *dev_id) ++{ ++#ifndef CONFIG_CNS3XXX_NAPI ++ struct net_device *netdev = dev_id; ++ CNS3XXXPrivate *priv = netdev_priv(netdev); ++#endif ++ ++#ifdef CONFIG_CNS3XXX_NAPI ++{ ++ CNS3XXXPrivate *priv = netdev_priv(napi_dev); ++ // because in normal state, fsql only invoke once and set_bit is atomic function. ++ // so I don't mask it. ++ set_bit(0, &priv->is_qf); ++} ++#else ++#ifdef CNS3XXX_USE_MASK ++ cns3xxx_write_pri_mask(0xb0); ++#else ++ cns3xxx_disable_irq(FSRC_RING0_INTERRUPT_ID); ++ cns3xxx_disable_irq(FSQF_RING0_INTERRUPT_ID); ++#endif ++ ++ ++ cns3xxx_receive_packet(priv, 1); // Receive at Queue Full Mode ++ ++#ifdef CNS3XXX_USE_MASK ++ cns3xxx_write_pri_mask(0xf0); ++#else ++ cns3xxx_enable_irq(FSRC_RING0_INTERRUPT_ID); ++ cns3xxx_enable_irq(FSQF_RING0_INTERRUPT_ID); ++#endif ++ ++ enable_rx_dma(0, 1); ++#endif // CONFIG_CNS3XXX_NAPI ++ ++ return IRQ_HANDLED; ++} ++#endif ++ ++ ++#if defined(CNS3XXX_DOUBLE_RX_RING) ++#ifdef CNS3XXX_FSQF_RING1_ISR ++irqreturn_t cns3xxx_fsqf_ring1_isr(int irq, void *dev_id) ++{ ++ struct net_device *netdev = dev_id; ++ CNS3XXXPrivate *priv = netdev_priv(netdev); ++ //INTC_CLEAR_EDGE_TRIGGER_INTERRUPT(INTC_GSW_FSQF_BIT_INDEX); ++ ++#ifdef CONFIG_CNS3XXX_NAPI ++{ ++ CNS3XXXPrivate *priv = netdev_priv(r1_napi_dev); ++ // because in normal state, fsqf only invoke once and set_bit is atomic function. ++ // so don't mask it. ++ set_bit(0, &priv->is_qf); ++} ++#else ++ cns3xxx_disable_irq(FSRC_RING1_INTERRUPT_ID); ++ cns3xxx_disable_irq(FSQF_RING1_INTERRUPT_ID); ++ ++ cns3xxx_receive_packet(priv, 1); // Receive at Queue Full Mode ++ enable_rx_dma(1, 1); ++ ++ cns3xxx_enable_irq(FSRC_RING1_INTERRUPT_ID); ++ cns3xxx_enable_irq(FSQF_RING1_INTERRUPT_ID); ++#endif ++ return IRQ_HANDLED; ++} ++#endif ++#endif //#if defined(CNS3XXX_DOUBLE_RX_RING) ++ ++ ++#ifdef CNS3XXX_STATUS_ISR ++irqreturn_t cns3xxx_status_isr(int irq, void *dev_id) ++{ ++ u32 int_status = INTR_STAT_REG; ++ u32 i=0; ++ ++ cns3xxx_disable_irq(STATUS_INTERRUPT_ID); ++ for (i = 0; i < 32; i++) { ++ if (int_status & (1 << i)) { ++ PRINT_INFO(cns3xxx_gsw_status_tbl[i]); ++ } ++ } ++ INTR_STAT_REG = 0xffffffff; // write 1 for clear. ++ cns3xxx_enable_irq(STATUS_INTERRUPT_ID); ++ return IRQ_HANDLED; ++} ++#endif ++ ++ ++#ifdef CNS3XXX_TSTC_RING0_ISR ++irqreturn_t cns3xxx_tstc_ring0_isr(int irq, void *dev_id) ++{ ++ return IRQ_HANDLED; ++} ++#endif ++ ++ ++static int cns3xxx_install_isr(struct net_device *dev) ++{ ++ int retval; ++ CNS3XXXPrivate *priv = netdev_priv(dev); ++ ++ if (install_isr_rc == 0) { ++ ++ retval = request_irq(FSRC_RING0_INTERRUPT_ID, cns3xxx_fsrc_ring0_isr, IRQF_SHARED, "FSRC_RING0", intr_netdev); ++ ++ if (retval) { ++ return 1; ++ } ++ ++#ifdef CNS3XXX_FSQF_RING0_ISR ++ retval = request_irq(FSQF_RING0_INTERRUPT_ID, cns3xxx_fsqf_ring0_isr, IRQF_SHARED, "FSQF_RING0", intr_netdev); ++ ++ if (retval) { ++ PRINT_INFO("%s: unable to get IRQ %d (irqval=%d).\n", "FSQF_RING0", FSQF_RING0_INTERRUPT_ID, retval); ++ return 2; ++ } ++#endif ++ ++#ifdef CNS3XXX_TSTC_RING0_ISR ++ retval = request_irq(TSTC_RING0_INTERRUPT_ID, cns3xxx_tstc_ring0_isr, IRQF_SHARED, "TSTC_RING0", intr_netdev); ++ ++ if (retval) { ++ PRINT_INFO("%s: unable to get IRQ %d (irqval=%d).\n", "TSTC_RING0", FSQF_RING0_INTERRUPT_ID, retval); ++ return 3; ++ } ++ ++#endif ++ ++ ++ if (priv->num_rx_queues == 2) { ++#if defined(CNS3XXX_DOUBLE_RX_RING) ++ retval = request_irq(FSRC_RING1_INTERRUPT_ID, cns3xxx_fsrc_ring1_isr, IRQF_SHARED, "FSRC_RING1", intr_netdev); ++ ++ if (retval) { ++ return 1; ++ } ++ ++#ifdef CNS3XXX_FSQF_RING1_ISR ++ retval = request_irq(FSQF_RING1_INTERRUPT_ID, cns3xxx_fsqf_ring1_isr, IRQF_SHARED, "FSQF_RING1", intr_netdev); ++ ++ if (retval) { ++ PRINT_INFO("%s: unable to get IRQ %d (irqval=%d).\n", "FSQF_RING1", FSQF_RING1_INTERRUPT_ID, retval); ++ return 2; ++ } ++#endif ++ ++#endif ++ } ++ ++#ifdef CNS3XXX_STATUS_ISR ++ retval = request_irq(STATUS_INTERRUPT_ID, cns3xxx_status_isr, IRQF_SHARED, "GSW_STATUS", intr_netdev); ++ ++ if (retval) { ++ PRINT_INFO("%s: unable to get IRQ %d (irqval=%d).\n", "GSW STATUS INT", STATUS_INTERRUPT_ID, retval); ++ return 3; ++ } ++ INTR_MASK_REG = 0; ++#endif ++ ++ ++ ++ ++ ++ ++#ifdef CONFIG_CNS3XXX_NAPI ++{ ++ CNS3XXXPrivate *sp = netdev_priv(napi_dev); ++ napi_enable(&sp->napi); ++ netif_start_queue(napi_dev); ++ ++#ifdef CNS3XXX_DOUBLE_RX_RING ++ sp = netdev_priv(r1_napi_dev); ++ napi_enable(&sp->napi); ++ netif_start_queue(r1_napi_dev); ++#endif ++} ++#endif ++ // enable cpu port ++ enable_port(3, 1); ++ ++ } // end if (install_isr_rc == 0) ++ ++ ++install_isr_rc; ++ ++ return 0; ++} ++ ++ ++int cns3xxx_open(struct net_device *dev) ++{ ++ CNS3XXXPrivate *priv = netdev_priv(dev); ++ //static int init_state=0; ++ ++ if (cns3xxx_setup_rx_tx_res(priv) != CAVM_OK) { ++ return -1; ++ } ++ ++ netif_start_queue(dev); ++ priv->net_device_priv->open(); ++ ++ cns3xxx_install_isr(dev); ++ ++ enable_rx_dma(0, 1); ++ ++ if (priv->num_rx_queues == 2) ++ enable_rx_dma(1, 1); ++ ++ netif_carrier_on(dev); ++ ++ return 0; ++} ++ ++static int cns3xxx_uninstall_isr(struct net_device *dev) ++{ ++ CNS3XXXPrivate *priv = netdev_priv(dev); ++ --install_isr_rc; ++ if (install_isr_rc == 0) { ++ enable_port(3, 0); ++ free_irq(FSRC_RING0_INTERRUPT_ID, intr_netdev); ++#ifdef CNS3XXX_STATUS_ISR ++ free_irq(STATUS_INTERRUPT_ID, intr_netdev); ++#endif ++ ++#ifdef CNS3XXX_FSQF_RING0_ISR ++ free_irq(FSQF_RING0_INTERRUPT_ID, intr_netdev); ++#endif ++ ++#ifdef CNS3XXX_TSTC_RING0_ISR ++ free_irq(TSTC_RING0_INTERRUPT_ID, intr_netdev); ++#endif ++ ++ if (priv->num_rx_queues == 2) { ++ free_irq(FSRC_RING1_INTERRUPT_ID, intr_netdev); ++ ++#ifdef CNS3XXX_FSQF_RING1_ISR ++ free_irq(FSQF_RING1_INTERRUPT_ID, intr_netdev); ++#endif ++ } ++ ++ ++ ++#ifdef CONFIG_CNS3XXX_NAPI ++{ ++ CNS3XXXPrivate *sp = netdev_priv(napi_dev); ++ ++ napi_disable(&sp->napi); ++ netif_stop_queue(napi_dev); ++#ifdef CNS3XXX_DOUBLE_RX_RING ++ sp = netdev_priv(r1_napi_dev); ++ ++ napi_disable(&sp->napi); ++ netif_stop_queue(r1_napi_dev); ++#endif ++} ++#endif ++ ++ ++ } ++ ++ return 0; ++} ++ ++int cns3xxx_close(struct net_device *dev) ++{ ++ CNS3XXXPrivate *priv = netdev_priv(dev); ++ ++ enable_rx_dma(0, 0); ++ enable_tx_dma(0, 0); ++ ++ if (priv->num_rx_queues == 2) ++ enable_tx_dma(1, 0); ++ ++ if (priv->num_tx_queues == 2) ++ enable_rx_dma(1, 0); ++ ++ netif_stop_queue(dev); ++ ++ priv->net_device_priv->close(); ++ cns3xxx_uninstall_isr(dev); ++ cns3xxx_free_rx_tx_res(priv); ++ netif_carrier_off(dev); ++ return 0; ++} ++ ++ ++ ++//#define MAC_PORT(p) MAC##p##_CFG_REG ++ ++void broadcast_storm_cfg(u8 port, u8 boradcast, u8 multicast, u8 unknown) ++{ ++ switch (port) ++ { ++ case 0: ++ { ++ (boradcast == 1) ? (MAC0_CFG_REG |= (1 << 30)) : (MAC0_CFG_REG &= (~(1 << 30))) ; ++ (multicast == 1) ? (MAC0_CFG_REG |= (1 << 29)) : (MAC0_CFG_REG &= (~(1 << 29))) ; ++ (unknown == 1) ? (MAC0_CFG_REG |= (1 << 28)) : (MAC0_CFG_REG &= (~(1 << 28))) ; ++ break; ++ } ++ case 1: ++ { ++ (boradcast == 1) ? (MAC1_CFG_REG |= (1 << 30)) : (MAC1_CFG_REG &= (~(1 << 30))) ; ++ (multicast == 1) ? (MAC1_CFG_REG |= (1 << 29)) : (MAC1_CFG_REG &= (~(1 << 29))) ; ++ (unknown == 1) ? (MAC1_CFG_REG |= (1 << 28)) : (MAC1_CFG_REG &= (~(1 << 28))) ; ++ break; ++ } ++ case 2: ++ { ++ (boradcast == 1) ? (MAC2_CFG_REG |= (1 << 30)) : (MAC2_CFG_REG &= (~(1 << 30))) ; ++ (multicast == 1) ? (MAC2_CFG_REG |= (1 << 29)) : (MAC2_CFG_REG &= (~(1 << 29))) ; ++ (unknown == 1) ? (MAC2_CFG_REG |= (1 << 28)) : (MAC2_CFG_REG &= (~(1 << 28))) ; ++ break; ++ } ++ } ++} ++ ++void broadcast_storm_rate(u8 rate) ++{ ++ TC_CTRL_REG &= (~(0xf << 24)); ++ TC_CTRL_REG |= (rate << 24); ++} ++ ++// port: 0, 1, 2 ; port0, port1 and port2 ++// config general mac port configuration ++void cns3xxx_general_mac_cfg(u8 port) ++{ ++ u32 cfg=0; ++ ++ switch (port) ++ { ++ case 0: ++ { ++ cfg = MAC0_CFG_REG; ++ break; ++ } ++ case 1: ++ { ++ cfg = MAC1_CFG_REG; ++ break; ++ } ++ case 2: ++ { ++ cfg = MAC2_CFG_REG; ++ break; ++ } ++ } ++ ++ ++ // txc_check_en: 1 ++ cfg |= (1 << 13); ++ ++ // bp_en: 1 ++ cfg |= (1 << 17); ++ ++#ifdef CNS3XXX_LEARN_ENABLE ++ // learn_dis: 0 ++ cfg &= (~(1 << 19)); ++#else ++ // learn disable ++ cfg |= (1 << 19); ++#endif ++ ++ // blocking_state: 0 ++ cfg &= (~(1 << 20)); ++ ++ // block_mode: 0 ++ cfg &= (~(1 << 21)); ++ ++#ifdef CNS3XXX_AGE_ENABLE ++ // age_en: 1 ++ cfg |= (1 << 22); ++ ++#else ++ // age disable ++ cfg &= (~(1 << 22)); ++#endif ++ ++ // SA_secured: 0 ++ cfg &= (~(1 << 23)); ++ ++ switch (port) ++ { ++ case 0: ++ { ++ MAC0_CFG_REG = cfg; ++ break; ++ } ++ case 1: ++ { ++ MAC1_CFG_REG = cfg; ++ break; ++ } ++ case 2: ++ { ++ MAC2_CFG_REG = cfg; ++ break; ++ } ++ } ++ ++} ++ ++void cns3xxx_configu_cpu_port(void) ++{ ++ // Set CPU port to general configuration ++ ++#ifdef CNS3XXX_LEARN_ENABLE ++ CPU_CFG_REG &= (~(1 << 19)); ++#else ++ // learn_dis: 1 ++ CPU_CFG_REG |= (1 << 19); ++#endif ++ ++#ifdef CNS3XXX_AGE_ENABLE ++ // age_en: 1 ++ CPU_CFG_REG |= (1 << 22); ++#else ++ // age disable ++ CPU_CFG_REG &= (~(1 << 22)); ++#endif ++ ++ // SA_secured: 0 ++ CPU_CFG_REG &= (~(1 << 23)); ++ ++ // go to hnat:1 ++ CPU_CFG_REG |= (1 << 29); ++ ++ //offset 4N +2 ++ CPU_CFG_REG &= (~(1 << 30)); ++#ifdef CNS3XXX_4N ++ CPU_CFG_REG |= (1 << 30); ++#endif ++ ++ // cpu flow control disable ++ CPU_CFG_REG &= (~(1 << 31)); ++#ifdef CNS3XXX_CPU_PORT_FC ++ // cpu flow control enable ++ CPU_CFG_REG |= (1 << 31); ++#endif ++ ++} ++ ++static void __init cns3xxx_gsw_hw_init(void) ++{ ++ //u32 mac_port_config; ++ int i; ++ //u32 cfg_reg = 0; ++ u32 reg_config = 0; ++ ++#ifdef CONFIG_SILICON ++ ++ //GPIOB_PIN_EN_REG |= (1 << 14); //enable GMII2_CRS ++ //GPIOB_PIN_EN_REG |= (1 << 15); //enable GMII2_COL ++ GPIOB_PIN_EN_REG |= (1 << 20); //enable MDC ++ GPIOB_PIN_EN_REG |= (1 << 21); //enable MDIO ++ ++ cns3xxx_gsw_power_enable(); ++ cns3xxx_gsw_software_reset(); ++#endif ++ ++#ifdef CNS3XXX_CONFIG_SIM_MODE ++ SLK_SKEW_CTRL_REG |= (1 << 31); ++#endif ++ ++ ++#if 1 ++ while (((SRAM_TEST_REG >> 20) & 1) == 0); ++#endif ++ ++ clear_fs_dma_state(1); ++ ++ ++ // disable port mac0, mac1, mac2, cpu port ++ enable_port(0, 0); ++ enable_port(1, 0); ++ enable_port(2, 0); ++ enable_port(3, 0); ++ ++ // disable RX0/TX0 RX1/TX1 DMA ++ enable_tx_dma(0, 0); ++ enable_tx_dma(1, 0); ++ enable_rx_dma(0, 0); ++ enable_rx_dma(1, 0); ++ ++ INTR_STAT_REG = 0xffffffff; // write 1 for clear. ++ ++#ifdef CNS3XXX_DELAYED_INTERRUPT ++ DELAY_INTR_CFG_REG = (1 << 16) | (max_pend_int_cnt << 8) | (max_pend_time); ++#endif ++ ++ reg_config = PHY_AUTO_ADDR_REG; ++ reg_config &= ~(3 << 30); ++#ifdef CONFIG_CNS3XXX_JUMBO_FRAME ++ reg_config |= (3 << 30); // maximum frame length: 9600 bytes ++#else ++ reg_config |= (2 << 30); // maximum frame length: 1536 bytes ++#endif ++ ++ PHY_AUTO_ADDR_REG = reg_config; ++ ++ ++ // Set general value for MAC_GLOB_CFG_REG ++ // age_time: 2 ^(1-1) * 300 sec ++ MAC_GLOB_CFG_REG &= (~0xf); ++ MAC_GLOB_CFG_REG |= 1; ++ ++ ++ // bkoff_mode: 111 follow standard ++ MAC_GLOB_CFG_REG &= (~(0x7 << 9)); ++ MAC_GLOB_CFG_REG |= (0x7 << 9); ++ ++ // jam_no: 1010: ++ MAC_GLOB_CFG_REG &= (~(0xf << 12)); ++ MAC_GLOB_CFG_REG |= (0xa << 12); ++ ++ // bp_mode: 10: ++ MAC_GLOB_CFG_REG &= (~(0x3 << 16)); ++ MAC_GLOB_CFG_REG |= (0x2 << 16); ++ ++ // res_mc_flt: 0 ++ MAC_GLOB_CFG_REG &= (~(0x1 << 28)); ++ ++ // col_mode: 11 ++ MAC_GLOB_CFG_REG &= (~(0x3 << 18)); ++ MAC_GLOB_CFG_REG |= (0x3 << 18); ++ ++ // crc_stripping: 1 ++ MAC_GLOB_CFG_REG |= (0x1 << 20); ++ ++ ++ // ACCEPT_CRC_BAD_PKT : 0 ++ MAC_GLOB_CFG_REG &= (~(0x1 << 21)); ++ ++#ifdef ACCEPT_CRC_BAD_PKT ++ MAC_GLOB_CFG_REG |= (0x1 << 21); ++#endif ++ ++ // SVL ++ MAC_GLOB_CFG_REG &= (~(0x1 << 7)); ++ ++#ifdef IVL ++ // IVL: 1 (IVL), 0 (SVL) ++ MAC_GLOB_CFG_REG |= (0x1 << 7); ++#endif ++ ++ ++ // HNAT_en: 0 ++ MAC_GLOB_CFG_REG &= (~(0x1 << 26)); ++ ++ // Firewall_mode: 0 ++ MAC_GLOB_CFG_REG &= (~(0x1 << 27)); ++ ++ ++ ++ cns3xxx_general_mac_cfg(0); ++ cns3xxx_general_mac_cfg(1); ++ cns3xxx_general_mac_cfg(2); ++ cns3xxx_configu_cpu_port(); ++ ++ // write vlan table ++ // set cpu port vlan table ++ cns3xxx_vlan_table_add(&cpu_vlan_table_entry); ++ for (i=0 ; i < sizeof(vlan_table_entry)/sizeof(VLANTableEntry) ; ++i) ++ cns3xxx_vlan_table_add(&vlan_table_entry[i]); ++ ++ cns3xxx_set_pvid(0, PORT0_PVID); ++ cns3xxx_set_pvid(1, PORT1_PVID); ++ cns3xxx_set_pvid(2, PORT2_PVID); ++ cns3xxx_set_pvid(3, CPU_PVID); ++ ++#ifdef CNS3XXX_SET_ARL_TABLE ++ // set arl table ++ cns3xxx_arl_table_flush(); ++#endif ++} ++ ++static int cns3xxx_set_mac_addr(struct net_device *dev, void *p) ++{ ++ //struct sockaddr *sock_addr = addr; ++ CNS3XXXPrivate *priv = netdev_priv(dev); ++ ++ struct sockaddr *addr= p; ++ ++ ++ spin_lock_irq(&priv->lock); ++ ++ ++ if (!is_valid_ether_addr(addr->sa_data)) ++ return -EADDRNOTAVAIL; ++ ++ // 1. delete old arl mac entry ++ // 2. add new arl mac entry ++ // 3. copy new mac to netdev field ++ ++ if (priv->net_device_priv->arl_table_entry) { ++ cns3xxx_arl_table_invalid(priv->net_device_priv->arl_table_entry); ++ memcpy(priv->net_device_priv->arl_table_entry->mac, addr->sa_data, dev->addr_len); ++ //print_arl_table_entry(priv->net_device_priv->arl_table_entry); ++ cns3xxx_arl_table_add(priv->net_device_priv->arl_table_entry); ++ } ++ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); ++ ++ spin_unlock_irq(&priv->lock); ++ return 0; ++} ++ ++ ++int set_fc_rls(struct ifreq *ifr) ++{ ++ CNS3XXXSARLEntry ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXSARLEntry)) ) ++ return -EFAULT; ++ FC_GLOB_THRS_REG &= (~(0x1ff << 16)); ++ FC_GLOB_THRS_REG |= (ctl.val << 16); ++ return CAVM_OK; ++} ++ ++int get_fc_rls(struct ifreq *ifr) ++{ ++ CNS3XXXSARLEntry ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXSARLEntry)) ) ++ return -EFAULT; ++ ++ ctl.val = ((FC_GLOB_THRS_REG >> 16) & 0x1ff); ++ ++ if (copy_to_user(ifr->ifr_data, &ctl, sizeof(CNS3XXXSARLEntry)) ) ++ return -EFAULT; ++ return CAVM_OK; ++} ++ ++int set_fc_set(struct ifreq *ifr) ++{ ++ CNS3XXXSARLEntry ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXSARLEntry)) ) ++ return -EFAULT; ++ FC_GLOB_THRS_REG &= (~0x1ff); ++ FC_GLOB_THRS_REG |= ctl.val; ++ return CAVM_OK; ++} ++ ++int get_fc_set(struct ifreq *ifr) ++{ ++ CNS3XXXSARLEntry ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXSARLEntry)) ) ++ return -EFAULT; ++ ++ ctl.val = ((FC_GLOB_THRS_REG) & 0x1ff); ++ ++ if (copy_to_user(ifr->ifr_data, &ctl, sizeof(CNS3XXXSARLEntry)) ) ++ return -EFAULT; ++ return CAVM_OK; ++} ++ ++ ++int set_sarl_rls(struct ifreq *ifr) ++{ ++ CNS3XXXSARLEntry ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXSARLEntry)) ) ++ return -EFAULT; ++ SARL_CTRL_REG &= (~(0x1ff << 12)); ++ SARL_CTRL_REG |= (ctl.val << 12); ++ return CAVM_OK; ++} ++ ++int get_sarl_rls(struct ifreq *ifr) ++{ ++ CNS3XXXSARLEntry ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXSARLEntry)) ) ++ return -EFAULT; ++ ++ ctl.val = ((SARL_CTRL_REG >> 12) & 0x1ff); ++ ++ if (copy_to_user(ifr->ifr_data, &ctl, sizeof(CNS3XXXSARLEntry)) ) ++ return -EFAULT; ++ return CAVM_OK; ++} ++ ++int set_sarl_enable(struct ifreq *ifr) ++{ ++ CNS3XXXSARLEntry ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXSARLEntry)) ) ++ return -EFAULT; ++ SARL_CTRL_REG &= (~(0x1 << 31)); ++ SARL_CTRL_REG |= (ctl.val << 31); ++ return CAVM_OK; ++} ++ ++int get_sarl_enable(struct ifreq *ifr) ++{ ++ CNS3XXXSARLEntry ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXSARLEntry)) ) ++ return -EFAULT; ++ ctl.val = ((SARL_CTRL_REG >> 31 ) & 0x1); ++ if (copy_to_user(ifr->ifr_data, &ctl, sizeof(CNS3XXXSARLEntry)) ) ++ return -EFAULT; ++ return CAVM_OK; ++} ++int set_sarl_set(struct ifreq *ifr) ++{ ++ CNS3XXXSARLEntry ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXSARLEntry)) ) ++ return -EFAULT; ++ SARL_CTRL_REG &= (~0x1ff); ++ SARL_CTRL_REG |= ctl.val; ++ return CAVM_OK; ++} ++ ++int get_sarl_set(struct ifreq *ifr) ++{ ++ CNS3XXXSARLEntry ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXSARLEntry)) ) ++ return -EFAULT; ++ ++ ctl.val = ((SARL_CTRL_REG) & 0x1ff); ++ ++ if (copy_to_user(ifr->ifr_data, &ctl, sizeof(CNS3XXXSARLEntry)) ) ++ return -EFAULT; ++ return CAVM_OK; ++} ++ ++int set_sarl_oq(struct ifreq *ifr) ++{ ++ CNS3XXXSARLEntry ctl; ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXSARLEntry)) ) ++ return -EFAULT; ++ ++ switch (ctl.gyr) ++ { ++ case 0: // green ++ { ++ SARL_OQ_GTH_REG &= (~(0xff << ctl.tc*8)); ++ SARL_OQ_GTH_REG |= (ctl.val << ctl.tc*8); ++ break; ++ } ++ case 1: // yellow ++ { ++ SARL_OQ_YTH_REG &= (~(0xff << ctl.tc*8)); ++ SARL_OQ_YTH_REG |= (ctl.val << ctl.tc*8); ++ break; ++ } ++ case 2: // red ++ { ++ SARL_OQ_RTH_REG &= (~(0xff << ctl.tc*8)); ++ SARL_OQ_RTH_REG |= (ctl.val << ctl.tc*8); ++ break; ++ } ++ } ++ return CAVM_OK; ++} ++ ++int get_sarl_oq(struct ifreq *ifr) ++{ ++ CNS3XXXSARLEntry ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXSARLEntry)) ) ++ return -EFAULT; ++ ++ switch (ctl.gyr) ++ { ++ case 0: // green ++ { ++ ctl.val = ((SARL_OQ_GTH_REG >> ctl.tc*8) & 0xff); ++ break; ++ } ++ case 1: // yellow ++ { ++ ctl.val = ((SARL_OQ_YTH_REG >> ctl.tc*8) & 0xff); ++ break; ++ } ++ case 2: // red ++ { ++ ctl.val = ((SARL_OQ_RTH_REG >> ctl.tc*8) & 0xff); ++ break; ++ } ++ } ++ ++ if (copy_to_user(ifr->ifr_data, &ctl, sizeof(CNS3XXXSARLEntry)) ) ++ return -EFAULT; ++ return CAVM_OK; ++} ++ ++int set_queue_weight(struct ifreq *ifr) ++{ ++ CNS3XXXQueueWeightEntry ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXQueueWeightEntry)) ) ++ return -EFAULT; ++ switch (ctl.which_port) ++ { ++ case 0: ++ { ++ QUEUE_WEIGHT_SET(0, ctl) ++ return 0; ++ } ++ case 1: ++ { ++ QUEUE_WEIGHT_SET(1, ctl) ++ return 0; ++ } ++ case 2: ++ { ++ QUEUE_WEIGHT_SET(2, ctl) ++ return 0; ++ } ++ case 3: // cpu port ++ { ++ CPU_PRI_CTRL_REG &= ~(0x3ffff); ++ CPU_PRI_CTRL_REG |= (ctl.sch_mode << 16); ++ CPU_PRI_CTRL_REG |= (ctl.q0_w); ++ CPU_PRI_CTRL_REG |= (ctl.q1_w << 4); ++ CPU_PRI_CTRL_REG |= (ctl.q2_w << 8); ++ CPU_PRI_CTRL_REG |= (ctl.q3_w << 12); ++ return 0; ++ } ++ case 4: // PPE port ++ { ++ HNAT_PRI_CTRL_REG &= ~(0x3ffff); ++ HNAT_PRI_CTRL_REG |= (ctl.sch_mode << 16); ++ HNAT_PRI_CTRL_REG |= (ctl.q0_w); ++ HNAT_PRI_CTRL_REG |= (ctl.q1_w << 4); ++ HNAT_PRI_CTRL_REG |= (ctl.q2_w << 8); ++ HNAT_PRI_CTRL_REG |= (ctl.q3_w << 12); ++ return 0; ++ } ++ default: ++ { ++ return -EFAULT; ++ } ++ } ++} ++ ++int get_queue_weight(struct ifreq *ifr) ++{ ++ CNS3XXXQueueWeightEntry ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXQueueWeightEntry)) ) ++ return -EFAULT; ++ ++ switch (ctl.which_port) ++ { ++ case 0: ++ { ++ QUEUE_WEIGHT_GET(0, ctl) ++ break; ++ } ++ case 1: ++ { ++ QUEUE_WEIGHT_GET(1, ctl) ++ break; ++ } ++ case 2: ++ { ++ QUEUE_WEIGHT_GET(2, ctl) ++ break; ++ } ++ case 3: ++ { ++ ctl.sch_mode = ((CPU_PRI_CTRL_REG >> 16 ) & 0x3); ++ ctl.q0_w = ((CPU_PRI_CTRL_REG >> 0 ) & 0x7); ++ ctl.q1_w = ((CPU_PRI_CTRL_REG >> 4 ) & 0x7); ++ ctl.q2_w = ((CPU_PRI_CTRL_REG >> 8 ) & 0x7); ++ ctl.q3_w = ((CPU_PRI_CTRL_REG >> 12 ) & 0x7); ++ break; ++ } ++ case 4: ++ { ++ ctl.sch_mode = ((HNAT_PRI_CTRL_REG >> 16 ) & 0x3); ++ ctl.q0_w = ((HNAT_PRI_CTRL_REG >> 0 ) & 0x7); ++ ctl.q1_w = ((HNAT_PRI_CTRL_REG >> 4 ) & 0x7); ++ ctl.q2_w = ((HNAT_PRI_CTRL_REG >> 8 ) & 0x7); ++ ctl.q3_w = ((HNAT_PRI_CTRL_REG >> 12 ) & 0x7); ++ break; ++ } ++ } ++ ++ if (copy_to_user(ifr->ifr_data, &ctl, sizeof(CNS3XXXQueueWeightEntry)) ) ++ return -EFAULT; ++ ++ return CAVM_OK; ++} ++ ++int set_rate_limit(struct ifreq *ifr) ++{ ++ CNS3XXXRateLimitEntry ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXRateLimitEntry)) ) ++ return -EFAULT; ++ switch (ctl.which_port) ++ { ++ case 0: ++ { ++ RATE_CTRL_REG &= (~(0x7f << 8)); ++ RATE_CTRL_REG |= ( ctl.band_width << 8); ++ RATE_CTRL_REG &= (~(0x3)); ++ RATE_CTRL_REG |= ctl.base_rate; ++ return 0; ++ } ++ case 1: ++ { ++ RATE_CTRL_REG &= (~(0x7f << 16)); ++ RATE_CTRL_REG |= ( ctl.band_width << 16); ++ RATE_CTRL_REG &= (~(0x3 << 2)); ++ RATE_CTRL_REG |= (ctl.base_rate << 2); ++ return 0; ++ } ++ case 2: ++ { ++ RATE_CTRL_REG &= (~(0x7f << 24)); ++ RATE_CTRL_REG |= ( ctl.band_width << 24); ++ RATE_CTRL_REG &= (~(0x3 << 4)); ++ RATE_CTRL_REG |= (ctl.base_rate << 4); ++ return 0; ++ } ++ case 3: // port 0 extra dma ++ { ++ TC_CTRL_REG &= (~0x7f); ++ TC_CTRL_REG |= ctl.band_width; ++ RATE_CTRL_REG &= (~(0x3 << 6)); ++ RATE_CTRL_REG |= (ctl.base_rate << 6); ++ return 0; ++ } ++ default: ++ { ++ return -EFAULT; ++ } ++ } ++} ++ ++int get_rate_limit(struct ifreq *ifr) ++{ ++ CNS3XXXRateLimitEntry ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXRateLimitEntry)) ) ++ return -EFAULT; ++ switch (ctl.which_port) ++ { ++ case 0: ++ { ++ ctl.band_width = (RATE_CTRL_REG >> 8) & 0x7f; ++ ctl.base_rate = RATE_CTRL_REG & 0x3; ++ break; ++ } ++ case 1: ++ { ++ ctl.band_width = (RATE_CTRL_REG >> 16) & 0x7f; ++ ctl.base_rate = (RATE_CTRL_REG >> 2) & 0x3; ++ break; ++ } ++ case 2: ++ { ++ ctl.band_width = (RATE_CTRL_REG >> 24) & 0x7f; ++ ctl.base_rate = (RATE_CTRL_REG >> 4) & 0x3; ++ break; ++ } ++ case 3: // port 0 extra dma ++ { ++ ctl.band_width = (TC_CTRL_REG) & 0x7f; ++ ctl.base_rate = (RATE_CTRL_REG >> 6) & 0x3; ++ break; ++ } ++ default: ++ { ++ return -EFAULT; ++ } ++ } ++ ++ if (copy_to_user(ifr->ifr_data, &ctl, sizeof(CNS3XXXRateLimitEntry)) ) ++ return -EFAULT; ++ ++ return CAVM_OK; ++} ++ ++int set_fc(struct ifreq *ifr) ++{ ++ CNS3XXXFCEntry ctl; ++ u32 port_offset[]={0x0c, 0x10, 0x18, 0x14}; // 0x14 is cpu port offset ++ u32 val=0; ++ ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXFCEntry)) ) ++ return -EFAULT; ++ ++ val = SWITCH_REG_VALUE(port_offset[ctl.port]); ++ if (ctl.port == 3) { // cpu port, only can set rx fc ++ val &= (~(1 << 31)); ++ if (ctl.fc_en) ++ val |= (1 << 31); ++ } else { ++ val &= (~(1 << 11)); // disable rx fc ++ val &= (~(1 << 12)); // disable tx fc ++ val |= (ctl.fc_en << 11); ++ } ++ ++ SWITCH_REG_VALUE(port_offset[ctl.port]) = val; ++ return CAVM_OK; ++} ++ ++int get_fc(struct ifreq *ifr) ++{ ++ CNS3XXXFCEntry ctl; ++ u32 port_offset[]={0x0c, 0x10, 0x18, 0x14}; // 0x14 is cpu port offset ++ u32 val=0; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXFCEntry)) ) ++ return -EFAULT; ++ ++ val = SWITCH_REG_VALUE(port_offset[ctl.port]); ++ if (ctl.port == 3) { // cpu port, only can set rx fc ++ ctl.fc_en = ((val >> 31) & 1); ++ } else { ++ ctl.fc_en = ((val >> 11) & 3); ++ ++ } ++ ++ if (copy_to_user(ifr->ifr_data, &ctl, sizeof(CNS3XXXFCEntry)) ) ++ return -EFAULT; ++ ++ return CAVM_OK; ++} ++ ++int set_ivl(struct ifreq *ifr) ++{ ++ CNS3XXXIVLEntry ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXIVLEntry)) ) ++ return -EFAULT; ++ ++ cns3xxx_ivl(ctl.enable); ++ ++ return CAVM_OK; ++} ++ ++int get_ivl(struct ifreq *ifr) ++{ ++ CNS3XXXIVLEntry ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXIVLEntry)) ) ++ return -EFAULT; ++ ++ ctl.enable = ((MAC_GLOB_CFG_REG >> 7) & 0x1); ++ ++ if (copy_to_user(ifr->ifr_data, &ctl, sizeof(CNS3XXXIVLEntry)) ) ++ return -EFAULT; ++ ++ return CAVM_OK; ++} ++ ++int set_wan_port(struct ifreq *ifr) ++{ ++ CNS3XXXWANPortEntry ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXWANPortEntry)) ) ++ return -EFAULT; ++ VLAN_CFG &= (~(0x1f << 8)); ++ VLAN_CFG |= (ctl.wan_port << 8); ++ ++ return CAVM_OK; ++} ++int get_wan_port(struct ifreq *ifr) ++{ ++ CNS3XXXWANPortEntry ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXWANPortEntry)) ) ++ return -EFAULT; ++ ++ ctl.wan_port = ((VLAN_CFG >> 8) & 0x1f); ++ ++ if (copy_to_user(ifr->ifr_data, &ctl, sizeof(CNS3XXXWANPortEntry)) ) ++ return -EFAULT; ++ ++ return CAVM_OK; ++} ++ ++int set_pvid(struct ifreq *ifr) ++{ ++ CNS3XXXPVIDEntry ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXPVIDEntry)) ) ++ return -EFAULT; ++ cns3xxx_set_pvid(ctl.which_port, ctl.pvid); ++ ++ return CAVM_OK; ++} ++ ++int get_pvid(struct ifreq *ifr) ++{ ++ CNS3XXXPVIDEntry ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXPVIDEntry)) ) ++ return -EFAULT; ++ ++ ctl.pvid = cns3xxx_get_pvid(ctl.which_port); ++ if (copy_to_user(ifr->ifr_data, &ctl, sizeof(CNS3XXXPVIDEntry)) ) ++ return -EFAULT; ++ return CAVM_OK; ++} ++ ++int set_qa(struct ifreq *ifr) ++{ ++ CNS3XXXQAEntry ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXQAEntry)) ) ++ return -EFAULT; ++ ++ MAC_GLOB_CFG_EXT_REG &= ~(0x7 << 27); ++ MAC_GLOB_CFG_EXT_REG |= (ctl.qa << 27); ++ ++ return CAVM_OK; ++} ++ ++int get_qa(struct ifreq *ifr) ++{ ++ CNS3XXXQAEntry ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXQAEntry)) ) ++ return -EFAULT; ++ ++ ctl.qa = (MAC_GLOB_CFG_EXT_REG >> 27) & 0x7; ++ ++ if (copy_to_user(ifr->ifr_data, &ctl, sizeof(CNS3XXXQAEntry)) ) ++ return -EFAULT; ++ return CAVM_OK; ++} ++ ++int get_packet_max_len(struct ifreq *ifr) ++{ ++ CNS3XXXMaxLenEntry ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXMaxLenEntry)) ) ++ return -EFAULT; ++ ++ ctl.max_len = (PHY_AUTO_ADDR_REG >> 30) & 0x3; ++ ++ if (copy_to_user(ifr->ifr_data, &ctl, sizeof(CNS3XXXMaxLenEntry)) ) ++ return -EFAULT; ++ return CAVM_OK; ++} ++ ++int set_packet_max_len(struct ifreq *ifr) ++{ ++ CNS3XXXMaxLenEntry ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXMaxLenEntry)) ) ++ return -EFAULT; ++ ++ PHY_AUTO_ADDR_REG &= (~(3 << 30)); ++ PHY_AUTO_ADDR_REG |= (ctl.max_len << 30); ++ ++ return CAVM_OK; ++} ++ ++int set_udp_range(struct ifreq *ifr) ++{ ++ CNS3XXXUdpRangeEtypeControl conf; ++ ++ if (copy_from_user(&conf, ifr->ifr_data, sizeof(CNS3XXXUdpRangeEtypeControl)) ) ++ return -EFAULT; ++ ++ switch (conf.udp_range_num) ++ { ++ case 0: ++ { ++ UDP_RANGE0_REG = 0; ++ UDP_RANGE0_REG |= conf.port_start; ++ UDP_RANGE0_REG |= (conf.port_end << 16); ++ break; ++ } ++ case 1: ++ { ++ UDP_RANGE1_REG = 0; ++ UDP_RANGE1_REG |= conf.port_start; ++ UDP_RANGE1_REG |= (conf.port_end << 16); ++ break; ++ } ++ case 2: ++ { ++ UDP_RANGE2_REG = 0; ++ UDP_RANGE2_REG |= conf.port_start; ++ UDP_RANGE2_REG |= (conf.port_end << 16); ++ break; ++ } ++ case 3: ++ { ++ UDP_RANGE3_REG = 0; ++ UDP_RANGE3_REG |= conf.port_start; ++ UDP_RANGE3_REG |= (conf.port_end << 16); ++ break; ++ } ++ } ++ ++ return CAVM_OK; ++} ++ ++int get_udp_range(struct ifreq *ifr) ++{ ++ CNS3XXXUdpRangeEtypeControl conf; ++ ++ if (copy_from_user(&conf, ifr->ifr_data, sizeof(CNS3XXXUdpRangeEtypeControl)) ) ++ return -EFAULT; ++ ++ switch (conf.udp_range_num) ++ { ++ case 0: ++ { ++ conf.port_start = (UDP_RANGE0_REG & 0xffff); ++ conf.port_end = ((UDP_RANGE0_REG >> 16 )& 0xffff); ++ break; ++ } ++ case 1: ++ { ++ conf.port_start = (UDP_RANGE1_REG & 0xffff); ++ conf.port_end = ((UDP_RANGE1_REG >> 16 )& 0xffff); ++ break; ++ } ++ case 2: ++ { ++ conf.port_start = (UDP_RANGE2_REG & 0xffff); ++ conf.port_end = ((UDP_RANGE2_REG >> 16 )& 0xffff); ++ break; ++ } ++ case 3: ++ { ++ conf.port_start = (UDP_RANGE3_REG & 0xffff); ++ conf.port_end = ((UDP_RANGE3_REG >> 16 )& 0xffff); ++ break; ++ } ++ } ++ ++ if (copy_to_user(ifr->ifr_data, &conf, sizeof(CNS3XXXEtypeControl)) ) ++ return -EFAULT; ++ ++ return CAVM_OK; ++} ++ ++int get_etype(struct ifreq *ifr) ++{ ++ CNS3XXXEtypeControl conf; ++ ++ if (copy_from_user(&conf, ifr->ifr_data, sizeof(CNS3XXXEtypeControl)) ) ++ return -EFAULT; ++ switch (conf.etype_num) ++ { ++ case 0: ++ { ++ conf.val = (ETYPE1_ETYPE0_REG & 0xffff); ++ conf.pri = (PRIO_ETYPE_UDP_REG & 0x7); ++ break; ++ } ++ case 1: ++ { ++ conf.val = ((ETYPE1_ETYPE0_REG >> 16 )& 0xffff); ++ conf.pri = ((PRIO_ETYPE_UDP_REG >> 4) & 0x7); ++ break; ++ } ++ case 2: ++ { ++ conf.val = (ETYPE3_ETYPE2_REG & 0xffff); ++ conf.pri = ((PRIO_ETYPE_UDP_REG >> 8) & 0x7); ++ break; ++ } ++ case 3: ++ { ++ conf.val = ((ETYPE3_ETYPE2_REG >> 16 )& 0xffff); ++ conf.pri = ((PRIO_ETYPE_UDP_REG >> 12) & 0x7); ++ break; ++ } ++ } ++ if (copy_to_user(ifr->ifr_data, &conf, sizeof(CNS3XXXEtypeControl)) ) ++ return -EFAULT; ++ ++ return CAVM_OK; ++} ++ ++int set_etype(struct ifreq *ifr) ++{ ++ CNS3XXXEtypeControl conf; ++ ++ if (copy_from_user(&conf, ifr->ifr_data, sizeof(CNS3XXXEtypeControl)) ) ++ return -EFAULT; ++ switch (conf.etype_num) ++ { ++ case 0: ++ { ++ ETYPE1_ETYPE0_REG &= (~0xffff); ++ ETYPE1_ETYPE0_REG |= conf.val; ++ ++ PRIO_ETYPE_UDP_REG &= (~7); ++ PRIO_ETYPE_UDP_REG |= (conf.pri); ++ break; ++ } ++ case 1: ++ { ++ ETYPE1_ETYPE0_REG &= (~(0xffff << 16)); ++ ETYPE1_ETYPE0_REG |= (conf.val << 16); ++ ++ PRIO_ETYPE_UDP_REG &= (~(7 << 4)); ++ PRIO_ETYPE_UDP_REG |= (conf.pri << 4); ++ break; ++ } ++ case 2: ++ { ++ ETYPE3_ETYPE2_REG &= (~0xffff); ++ ETYPE3_ETYPE2_REG |= conf.val; ++ ++ PRIO_ETYPE_UDP_REG &= (~(7 << 8)); ++ PRIO_ETYPE_UDP_REG |= (conf.pri << 8); ++ break; ++ } ++ case 3: ++ { ++ ETYPE3_ETYPE2_REG &= (~(0xffff << 16)); ++ ETYPE3_ETYPE2_REG |= (conf.val << 16); ++ ++ PRIO_ETYPE_UDP_REG &= (~(7 << 12)); ++ PRIO_ETYPE_UDP_REG |= (conf.pri << 12); ++ break; ++ } ++ } ++ return CAVM_OK; ++} ++ ++int get_pri_ip_dscp(struct ifreq *ifr) ++{ ++ CNS3XXXPriIpDscpControl conf; ++ ++ if (copy_from_user(&conf, ifr->ifr_data, sizeof(CNS3XXXPriIpDscpControl)) ) ++ return -EFAULT; ++ ++ if ( 0 <= conf.ip_dscp_num && conf.ip_dscp_num <= 7) { ++ conf.pri = ((PRIO_IPDSCP_7_0_REG >> (conf.ip_dscp_num * 4)) & 0x7); ++ } else if ( 8 <= conf.ip_dscp_num && conf.ip_dscp_num <= 15) { ++ conf.pri = ((PRIO_IPDSCP_15_8_REG >> ((conf.ip_dscp_num-8) * 4)) & 0x7); ++ } else if ( 16 <= conf.ip_dscp_num && conf.ip_dscp_num <= 23) { ++ conf.pri = ((PRIO_IPDSCP_23_16_REG >> ((conf.ip_dscp_num-16) * 4)) & 0x7); ++ } else if ( 24 <= conf.ip_dscp_num && conf.ip_dscp_num <= 31) { ++ conf.pri = ((PRIO_IPDSCP_31_24_REG >> ((conf.ip_dscp_num-24) * 4)) & 0x7); ++ } else if ( 32 <= conf.ip_dscp_num && conf.ip_dscp_num <= 39) { ++ conf.pri = ((PRIO_IPDSCP_39_32_REG >> ((conf.ip_dscp_num-32) * 4)) & 0x7); ++ } else if ( 40 <= conf.ip_dscp_num && conf.ip_dscp_num <= 47) { ++ conf.pri = ((PRIO_IPDSCP_47_40_REG >> ((conf.ip_dscp_num-40) * 4)) & 0x7); ++ } else if ( 48 <= conf.ip_dscp_num && conf.ip_dscp_num <= 55) { ++ conf.pri = ((PRIO_IPDSCP_55_48_REG >> ((conf.ip_dscp_num-48) * 4)) & 0x7); ++ } else if ( 56 <= conf.ip_dscp_num && conf.ip_dscp_num <= 63) { ++ conf.pri = ((PRIO_IPDSCP_63_56_REG >> ((conf.ip_dscp_num-56) * 4)) & 0x7); ++ } else { ++ return CAVM_ERR; ++ } ++ ++ ++ if (copy_to_user(ifr->ifr_data, &conf, sizeof(CNS3XXXPriIpDscpControl)) ) ++ return -EFAULT; ++ return CAVM_OK; ++} ++ ++ ++int set_pri_ip_dscp(struct ifreq *ifr) ++{ ++ CNS3XXXPriIpDscpControl conf; ++ ++ if (copy_from_user(&conf, ifr->ifr_data, sizeof(CNS3XXXPriIpDscpControl)) ) ++ return -EFAULT; ++ ++ if ( 0 <= conf.ip_dscp_num && conf.ip_dscp_num <= 7) { ++ PRIO_IPDSCP_7_0_REG &= (~(0x7 << (conf.ip_dscp_num * 4) ) ); ++ PRIO_IPDSCP_7_0_REG |= (conf.pri << (conf.ip_dscp_num * 4)); ++ } else if ( 8 <= conf.ip_dscp_num && conf.ip_dscp_num <= 15) { ++ PRIO_IPDSCP_15_8_REG &= (~(0x7 << ((conf.ip_dscp_num-8) * 4) ) ); ++ PRIO_IPDSCP_15_8_REG |= (conf.pri << ((conf.ip_dscp_num-8) * 4)); ++ } else if ( 16 <= conf.ip_dscp_num && conf.ip_dscp_num <= 23) { ++ PRIO_IPDSCP_23_16_REG &= (~(0x7 << ((conf.ip_dscp_num-16) * 4) ) ); ++ PRIO_IPDSCP_23_16_REG |= (conf.pri << ((conf.ip_dscp_num-16) * 4)); ++ ++ } else if ( 24 <= conf.ip_dscp_num && conf.ip_dscp_num <= 31) { ++ PRIO_IPDSCP_31_24_REG &= (~(0x7 << ((conf.ip_dscp_num-24) * 4) ) ); ++ PRIO_IPDSCP_31_24_REG |= (conf.pri << ((conf.ip_dscp_num-24) * 4)); ++ ++ } else if ( 32 <= conf.ip_dscp_num && conf.ip_dscp_num <= 39) { ++ PRIO_IPDSCP_39_32_REG &= (~(0x7 << ((conf.ip_dscp_num-32) * 4) ) ); ++ PRIO_IPDSCP_39_32_REG |= (conf.pri << ((conf.ip_dscp_num-32) * 4)); ++ ++ } else if ( 40 <= conf.ip_dscp_num && conf.ip_dscp_num <= 47) { ++ PRIO_IPDSCP_47_40_REG &= (~(0x7 << ((conf.ip_dscp_num-40) * 4) ) ); ++ PRIO_IPDSCP_47_40_REG |= (conf.pri << ((conf.ip_dscp_num-40) * 4)); ++ } else if ( 48 <= conf.ip_dscp_num && conf.ip_dscp_num <= 55) { ++ PRIO_IPDSCP_55_48_REG &= (~(0x7 << ((conf.ip_dscp_num-48) * 4) ) ); ++ PRIO_IPDSCP_55_48_REG |= (conf.pri << ((conf.ip_dscp_num-48) * 4)); ++ } else if ( 56 <= conf.ip_dscp_num && conf.ip_dscp_num <= 63) { ++ PRIO_IPDSCP_63_56_REG &= (~(0x7 << ((conf.ip_dscp_num-56) * 4) ) ); ++ PRIO_IPDSCP_63_56_REG |= (conf.pri << ((conf.ip_dscp_num-56) * 4)); ++ } else { ++ return CAVM_ERR; ++ } ++ return CAVM_OK; ++} ++ ++ ++int bcm53115M_reg_read_ioctl(struct ifreq *ifr) ++{ ++ int bcm53115M_reg_read(int page, int offset, u8 *buf, int len); ++ CNS3XXXBCM53115M conf; ++ int __init_or_module gpio_direction_output(unsigned int pin, unsigned int state); ++ ++ ++ if (copy_from_user(&conf, ifr->ifr_data, sizeof(CNS3XXXBCM53115M)) ) ++ return -EFAULT; ++ printk("conf.page: %x\n", conf.page); ++ printk("conf.offset: %x\n", conf.offset); ++ printk("conf.data_len: %x\n", conf.data_len); ++ switch (conf.data_len) ++ { ++ case 1: ++ { ++ bcm53115M_reg_read(conf.page, conf.offset, (u8 *)&conf.u8_val, 1); ++ printk("conf.u8_val: %x\n", conf.u8_val); ++ break; ++ } ++ case 2: ++ { ++ bcm53115M_reg_read(conf.page, conf.offset, (u8 *)&conf.u16_val, 2); ++ printk("conf.u16_val: %x\n", conf.u16_val); ++ break; ++ } ++ case 4: ++ { ++ bcm53115M_reg_read(conf.page, conf.offset, (u8 *)&conf.u32_val, 4); ++ printk("conf.u32_val: %x\n", conf.u32_val); ++ break; ++ } ++ default: ++ { ++ printk("[kernel mode]: don't support date length: %d\n", conf.data_len); ++ } ++ } ++ ++ ++ ++ if (copy_to_user(ifr->ifr_data, &conf, sizeof(CNS3XXXBCM53115M)) ) ++ return -EFAULT; ++ return CAVM_OK; ++} ++ ++int bcm53115M_reg_write_ioctl(struct ifreq *ifr) ++{ ++ int bcm53115M_reg_write(int page, int offset, u8 *buf, int len); ++ CNS3XXXBCM53115M conf; ++ ++ if (copy_from_user(&conf, ifr->ifr_data, sizeof(CNS3XXXBCM53115M)) ) ++ return -EFAULT; ++ ++ switch (conf.data_len) ++ { ++ case 1: ++ { ++ bcm53115M_reg_write(conf.page, conf.offset, (u8 *)&conf.u8_val, 1); ++ break; ++ } ++ case 2: ++ { ++ bcm53115M_reg_write(conf.page, conf.offset, (u8 *)&conf.u16_val, 2); ++ break; ++ } ++ case 4: ++ { ++ bcm53115M_reg_write(conf.page, conf.offset, (u8 *)&conf.u32_val, 4); ++ break; ++ } ++ default: ++ { ++ printk("[kernel mode]: don't support date length: %d\n", conf.data_len); ++ } ++ } ++ return CAVM_OK; ++} ++ ++#if 0 ++int get_rxring(struct ifreq *ifr) ++{ ++ CNS3XXXRingStatus conf; ++ ++ if (copy_from_user(&conf, ifr->ifr_data, sizeof(CNS3XXXRingStatus)) ) ++ return -EFAULT; ++ conf.rx_ring=g_ring_info.rx_ring; ++ conf.tx_ring=0; ++ if (copy_to_user(ifr->ifr_data, &conf, sizeof(CNS3XXXRingStatus)) ) ++ return -EFAULT; ++} ++#endif ++ ++int dump_mib_counter(struct ifreq *ifr) ++{ ++ CNS3XXXMIBCounter conf; ++ int addr=0,i=0; ++ ++ if (copy_from_user(&conf, ifr->ifr_data, sizeof(CNS3XXXMIBCounter)) ) ++ return -EFAULT; ++ ++ for (addr=0x300; addr <= 0x334 ; addr+=4) ++ conf.mib[i++]=SWITCH_REG_VALUE(addr); ++ for (addr=0x400; addr <= 0x434 ; addr+=4) ++ conf.mib[i++]=SWITCH_REG_VALUE(addr); ++ for (addr=0x600; addr <= 0x634 ; addr+=4) ++ conf.mib[i++]=SWITCH_REG_VALUE(addr); ++ // cpu mib counter ++ for (addr=0x500; addr <= 0x528 ; addr+=4) ++ conf.mib[i++]=SWITCH_REG_VALUE(addr); ++ conf.mib_len=i; ++ if (copy_to_user(ifr->ifr_data, &conf, sizeof(CNS3XXXMIBCounter)) ) ++ return -EFAULT; ++ return 0; ++} ++ ++// reference e100.c ++int cns3xxx_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) ++{ ++ CNS3XXXIoctlCmd ioctl_cmd; ++ ++ //printk("cns3xxx_do_ioctl begin\n"); ++ ++ if (cmd != SIOCDEVPRIVATE) { ++ return -EOPNOTSUPP; ++ } ++ if (copy_from_user(&ioctl_cmd, ifr->ifr_data, sizeof(CNS3XXXIoctlCmd))) ++ return -EFAULT; ++ ++ //printk("ioctl_cmd: %d\n", ioctl_cmd); ++ switch (ioctl_cmd) { ++ case CNS3XXX_ARP_REQUEST_SET: ++ { ++ CNS3XXXArpRequestControl ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXArpRequestControl)) ) ++ return -EFAULT; ++ ++ (ctl.val==0) ? (MAC_GLOB_CFG_REG &= (~(1 << 23)) ): (MAC_GLOB_CFG_REG |= (1 << 23) ); ++ ++ } ++ ++ case CNS3XXX_ARP_REQUEST_GET: ++ { ++ CNS3XXXArpRequestControl ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXArpRequestControl)) ) ++ return -EFAULT; ++ ++ ctl.val = ((MAC_GLOB_CFG_REG >> 23) & 1); ++ ++ if (copy_to_user(ifr->ifr_data, &ctl, sizeof(CNS3XXXArpRequestControl)) ) ++ return -EFAULT; ++ return CAVM_OK; ++ } ++ ++ case CNS3XXX_HOL_PREVENT_SET: ++ { ++ CNS3XXXHOLPreventControl ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXHOLPreventControl)) ) ++ return -EFAULT; ++ (ctl.enable == 1) ? (TC_CTRL_REG |= (1 << 29)) : (TC_CTRL_REG &= (~(1 << 29))) ; ++ ++ return CAVM_OK; ++ } ++ case CNS3XXX_HOL_PREVENT_GET: ++ { ++ CNS3XXXHOLPreventControl ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXHOLPreventControl)) ) ++ return -EFAULT; ++ ++ ctl.enable = ((TC_CTRL_REG >> 29) & 0x1); ++ ++ if (copy_to_user(ifr->ifr_data, &ctl, sizeof(CNS3XXXHOLPreventControl)) ) ++ return -EFAULT; ++ return CAVM_OK; ++ } ++ ++ // for S component or C conponent ++ case CNS3XXX_BRIDGE_SET: ++ { ++ CNS3XXXBridgeControl ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXBridgeControl)) ) ++ return -EFAULT; ++ (ctl.type == 1) ? (VLAN_CFG |= (1 << 1)) : (VLAN_CFG &= (~(1 << 1))) ; ++ ++ ++ } ++ case CNS3XXX_BRIDGE_GET: ++ { ++ CNS3XXXBridgeControl ctl; ++ ++ ctl.type = ((VLAN_CFG >> 1) & 0x1); ++ printk("[kernel mode] ctl.type: %d\n", ctl.type); ++ ++ if (copy_to_user(ifr->ifr_data, &ctl, sizeof(CNS3XXXBridgeControl)) ) ++ return -EFAULT; ++ ++ return CAVM_OK; ++ } ++ ++ case CNS3XXX_PORT_NEIGHBOR_SET: ++ { ++ CNS3XXXPortNeighborControl ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXPortNeighborControl)) ) ++ return -EFAULT; ++ switch (ctl.which_port) ++ { ++ case 0: ++ { ++ (ctl.type == 1) ? (VLAN_CFG |= (1 << 4)) : (VLAN_CFG &= (~(1 << 4))) ; ++ return 0; ++ } ++ case 1: ++ { ++ (ctl.type == 1) ? (VLAN_CFG |= (1 << 5)) : (VLAN_CFG &= (~(1 << 5))) ; ++ return 0; ++ } ++ case 2: ++ { ++ (ctl.type == 1) ? (VLAN_CFG |= (1 << 7)) : (VLAN_CFG &= (~(1 << 7))) ; ++ return 0; ++ } ++ case 3: // cpu port ++ { ++ (ctl.type == 1) ? (VLAN_CFG |= (1 << 6)) : (VLAN_CFG &= (~(1 << 6))) ; ++ return 0; ++ } ++ default: ++ return -EFAULT; ++ } ++ ++ } ++ ++ case CNS3XXX_PORT_NEIGHBOR_GET: ++ { ++ CNS3XXXPortNeighborControl ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXPortNeighborControl)) ) ++ return -EFAULT; ++ switch (ctl.which_port) ++ { ++ case 0: ++ { ++ ctl.type = ((VLAN_CFG >> 4 ) & 0x1); ++ break; ++ } ++ case 1: ++ { ++ ctl.type = ((VLAN_CFG >> 5 ) & 0x1); ++ break; ++ } ++ case 2: ++ { ++ ctl.type = ((VLAN_CFG >> 7 ) & 0x1); ++ break; ++ } ++ case 3: // cpu port ++ { ++ ctl.type = ((VLAN_CFG >> 6 ) & 0x1); ++ break; ++ } ++ } ++ ++ if (copy_to_user(ifr->ifr_data, &ctl, sizeof(CNS3XXXPortNeighborControl)) ) ++ return -EFAULT; ++ ++ return CAVM_OK; ++ } ++ ++ case CNS3XXX_VLAN_TABLE_LOOKUP: ++ { ++ CNS3XXXVLANTableEntry ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXVLANTableEntry)) ) ++ return -EFAULT; ++ if (cns3xxx_vlan_table_lookup(&ctl.entry) == CAVM_NOT_FOUND) { ++ return CAVM_NOT_FOUND; ++ } ++ ++ if (copy_to_user(ifr->ifr_data, &ctl, sizeof(CNS3XXXVLANTableEntry))) ++ return -EFAULT; ++ ++ return CAVM_FOUND; ++ } ++ case CNS3XXX_VLAN_TABLE_READ: ++ { ++ CNS3XXXVLANTableEntry ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXVLANTableEntry)) ) ++ { ++ return -EFAULT; ++ } ++ cns3xxx_vlan_table_read(&ctl.entry); ++ if (copy_to_user(ifr->ifr_data, &ctl, sizeof(CNS3XXXVLANTableEntry))) ++ return -EFAULT; ++ ++ return 0; ++ } ++ case CNS3XXX_VLAN_TABLE_ADD: ++ { ++ CNS3XXXVLANTableEntry ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXVLANTableEntry)) ) ++ return -EFAULT; ++ cns3xxx_vlan_table_add(&ctl.entry); ++ if (copy_to_user(ifr->ifr_data, &ctl, sizeof(CNS3XXXVLANTableEntry))) ++ return -EFAULT; ++ ++ return 0; ++ } ++ ++ case CNS3XXX_ARL_TABLE_ADD: ++ { ++ CNS3XXXARLTableEntry ctl; ++ ++ printk("[kernel mode] CNS3XXX_ARL_TABLE_ADD\n"); ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXARLTableEntry)) ) ++ return -EFAULT; ++ cns3xxx_arl_table_add(&ctl.entry); ++ if (copy_to_user(ifr->ifr_data, &ctl, sizeof(CNS3XXXARLTableEntry))) ++ return -EFAULT; ++ ++ return 0; ++ } ++ ++ ++ case CNS3XXX_ARL_TABLE_DEL: ++ { ++ CNS3XXXARLTableEntry ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXARLTableEntry)) ) ++ return -EFAULT; ++ cns3xxx_arl_table_invalid(&ctl.entry); ++ if (copy_to_user(ifr->ifr_data, &ctl, sizeof(CNS3XXXARLTableEntry))) ++ return -EFAULT; ++ ++ return 0; ++ } ++ case CNS3XXX_VLAN_TABLE_DEL: ++ { ++ CNS3XXXARLTableEntry ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXARLTableEntry)) ) ++ return -EFAULT; ++ cns3xxx_arl_table_invalid(&ctl.entry); ++ ++ if (copy_to_user(ifr->ifr_data, &ctl, sizeof(CNS3XXXARLTableEntry))) ++ return -EFAULT; ++ ++ return CAVM_FOUND; ++ } ++ ++ case CNS3XXX_ARL_TABLE_SEARCH: ++ { ++ CNS3XXXARLTableEntry ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXARLTableEntry)) ) ++ return -EFAULT; ++ if (cns3xxx_arl_table_search(&ctl.entry) == CAVM_NOT_FOUND){ ++ printk("[kernel mode] not found\n"); ++ return CAVM_NOT_FOUND; ++ } ++ if (copy_to_user(ifr->ifr_data, &ctl, sizeof(CNS3XXXARLTableEntry))) ++ return -EFAULT; ++ ++ return CAVM_FOUND; ++ } ++ case CNS3XXX_ARL_IS_TABLE_END: ++ { ++ CNS3XXXARLTableEntry ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXARLTableEntry)) ) ++ return -EFAULT; ++ if (cns3xxx_is_arl_table_end() == CAVM_ERR) ++ return CAVM_ERR; ++ if (copy_to_user(ifr->ifr_data, &ctl, sizeof(CNS3XXXARLTableEntry))) ++ return -EFAULT; ++ ++ return CAVM_OK; ++ } ++ ++ case CNS3XXX_ARL_TABLE_SEARCH_AGAIN: ++ { ++ CNS3XXXARLTableEntry ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXARLTableEntry)) ) ++ return -EFAULT; ++ if (cns3xxx_arl_table_search_again(&ctl.entry) == CAVM_NOT_FOUND) ++ return CAVM_NOT_FOUND; ++ if (copy_to_user(ifr->ifr_data, &ctl, sizeof(CNS3XXXARLTableEntry))) ++ return -EFAULT; ++ ++ return CAVM_FOUND; ++ } ++ ++ case CNS3XXX_ARL_TABLE_FLUSH: ++ { ++ CNS3XXXARLTableEntry ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXARLTableEntry)) ) ++ return -EFAULT; ++ ++ cns3xxx_arl_table_flush(); ++ ++ if (copy_to_user(ifr->ifr_data, &ctl, sizeof(CNS3XXXARLTableEntry))) ++ return -EFAULT; ++ ++ return CAVM_FOUND; ++ } ++ ++ ++ ++ case CNS3XXX_ARL_TABLE_LOOKUP: ++ { ++ CNS3XXXARLTableEntry ctl; ++ ++ ++ printk("[kernel mode] in CNS3XXX_ARL_TABLE_LOOKUP\n"); ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXARLTableEntry)) ) ++ return -EFAULT; ++ if (cns3xxx_arl_table_lookup(&ctl.entry) == CAVM_NOT_FOUND) ++ return CAVM_NOT_FOUND; ++ if (copy_to_user(ifr->ifr_data, &ctl, sizeof(CNS3XXXARLTableEntry))) ++ return -EFAULT; ++ ++ return CAVM_FOUND; ++ } ++ ++ case CNS3XXX_TC_SET: ++ { ++ CNS3XXXTrafficClassControl ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXTrafficClassControl)) ) ++ return -EFAULT; ++ TC_CTRL_REG &= (~(0x3 << 30)); ++ TC_CTRL_REG |= (ctl.tc << 30); ++ return CAVM_OK; ++ } ++ case CNS3XXX_TC_GET: ++ { ++ CNS3XXXTrafficClassControl ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXTrafficClassControl)) ) ++ return -EFAULT; ++ ++ ctl.tc = ((TC_CTRL_REG >> 30) & 0x3); ++ ++ if (copy_to_user(ifr->ifr_data, &ctl, sizeof(CNS3XXXTrafficClassControl)) ) ++ return -EFAULT; ++ ++ return CAVM_OK; ++ } ++ ++ case CNS3XXX_PRI_CTRL_SET: ++ { ++ CNS3XXXPriCtrlControl ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXPriCtrlControl)) ) ++ return -EFAULT; ++ ++ switch (ctl.which_port) ++ { ++ case 0: ++ { ++ MAC0_PRI_CTRL_REG &= (~(0x7 << 24)); ++ MAC0_PRI_CTRL_REG &= (~(0xf << 18)); ++ ++ MAC0_PRI_CTRL_REG |= (ctl.port_pri << 24); ++ ++ MAC0_PRI_CTRL_REG |= (ctl.ether_pri_en << 18); ++ MAC0_PRI_CTRL_REG |= (ctl.vlan_pri_en << 19); ++ MAC0_PRI_CTRL_REG |= (ctl.dscp_pri_en << 20); ++ MAC0_PRI_CTRL_REG |= (ctl.udp_pri_en << 21); ++ break; ++ } ++ case 1: ++ { ++ MAC1_PRI_CTRL_REG &= (~(0x7 << 24)); ++ MAC1_PRI_CTRL_REG &= (~(0xf << 18)); ++ ++ MAC1_PRI_CTRL_REG |= (ctl.port_pri << 24); ++ ++ MAC1_PRI_CTRL_REG |= (ctl.ether_pri_en << 18); ++ MAC1_PRI_CTRL_REG |= (ctl.vlan_pri_en << 19); ++ MAC1_PRI_CTRL_REG |= (ctl.dscp_pri_en << 20); ++ MAC1_PRI_CTRL_REG |= (ctl.udp_pri_en << 21); ++ break; ++ } ++ case 2: ++ { ++ MAC2_PRI_CTRL_REG &= (~(0x7 << 24)); ++ MAC2_PRI_CTRL_REG &= (~(0xf << 18)); ++ ++ MAC2_PRI_CTRL_REG |= (ctl.port_pri << 24); ++ ++ MAC2_PRI_CTRL_REG |= (ctl.ether_pri_en << 18); ++ MAC2_PRI_CTRL_REG |= (ctl.vlan_pri_en << 19); ++ MAC2_PRI_CTRL_REG |= (ctl.dscp_pri_en << 20); ++ MAC2_PRI_CTRL_REG |= (ctl.udp_pri_en << 21); ++ break; ++ } ++ case 3: // cpu ++ { ++ printk("[kernel mode] CPU_PRI_CTRL_REG: %#x\n", CPU_PRI_CTRL_REG); ++ CPU_PRI_CTRL_REG &= (~(0x7 << 24)); ++ CPU_PRI_CTRL_REG &= (~(0xf << 18)); ++ ++ CPU_PRI_CTRL_REG |= (ctl.port_pri << 24); ++ ++ CPU_PRI_CTRL_REG |= (ctl.ether_pri_en << 18); ++ CPU_PRI_CTRL_REG |= (ctl.vlan_pri_en << 19); ++ CPU_PRI_CTRL_REG |= (ctl.dscp_pri_en << 20); ++ CPU_PRI_CTRL_REG |= (ctl.udp_pri_en << 21); ++ break; ++ } ++ } ++ ++ return CAVM_OK; ++ } ++ ++ case CNS3XXX_PRI_CTRL_GET: ++ { ++ CNS3XXXPriCtrlControl ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXPriCtrlControl)) ) ++ return -EFAULT; ++ ++ ++ if (copy_to_user(ifr->ifr_data, &ctl, sizeof(CNS3XXXPriCtrlControl)) ) ++ return -EFAULT; ++ ++ return CAVM_OK; ++ } ++ ++ case CNS3XXX_DMA_RING_CTRL_SET: ++ { ++ CNS3XXXDmaRingCtrlControl ctl; ++ ++ if (copy_from_user(&ctl, ifr->ifr_data, sizeof(CNS3XXXDmaRingCtrlControl)) ) ++ return -EFAULT; ++ ++ (ctl.ts_double_ring_en == 0) ? DMA_RING_CTRL_REG &= (~(0x1 << 16)) : (DMA_RING_CTRL_REG |= (ctl.ts_double_ring_en << 16)); ++ (ctl.fs_double_ring_en == 0) ? DMA_RING_CTRL_REG &= (~(0x1 << 0)) : (DMA_RING_CTRL_REG |= (ctl.fs_double_ring_en << 0)); ++ (ctl.fs_pkt_allocate == 0) ? DMA_RING_CTRL_REG &= (~(0x1 << 1)) : (DMA_RING_CTRL_REG |= (ctl.fs_pkt_allocate << 1)); ++ } ++ ++ case CNS3XXX_PRI_IP_DSCP_SET: ++ { ++ return set_pri_ip_dscp(ifr); ++ } ++ case CNS3XXX_PRI_IP_DSCP_GET: ++ { ++ return get_pri_ip_dscp(ifr); ++ } ++ ++ case CNS3XXX_ETYPE_SET: ++ { ++ return set_etype(ifr); ++ } ++ case CNS3XXX_ETYPE_GET: ++ { ++ return get_etype(ifr); ++ } ++ ++ case CNS3XXX_UDP_RANGE_SET: ++ { ++ return set_udp_range(ifr); ++ } ++ case CNS3XXX_UDP_RANGE_GET: ++ { ++ return get_udp_range(ifr); ++ } ++ ++ case CNS3XXX_RATE_LIMIT_SET: ++ { ++ return set_rate_limit(ifr); ++ } ++ case CNS3XXX_RATE_LIMIT_GET: ++ { ++ return get_rate_limit(ifr); ++ } ++ case CNS3XXX_QUEUE_WEIGHT_SET: ++ { ++ return set_queue_weight(ifr); ++ } ++ case CNS3XXX_QUEUE_WEIGHT_GET: ++ { ++ return get_queue_weight(ifr); ++ } ++ ++ case CNS3XXX_FC_RLS_SET: ++ { ++ return set_fc_rls(ifr); ++ } ++ case CNS3XXX_FC_RLS_GET: ++ { ++ return get_fc_rls(ifr); ++ } ++ ++ case CNS3XXX_FC_SET_SET: ++ { ++ return set_fc_set(ifr); ++ } ++ case CNS3XXX_FC_SET_GET: ++ { ++ return get_fc_set(ifr); ++ } ++ ++ case CNS3XXX_SARL_RLS_SET: ++ { ++ return set_sarl_rls(ifr); ++ } ++ case CNS3XXX_SARL_RLS_GET: ++ { ++ return get_sarl_rls(ifr); ++ } ++ ++ case CNS3XXX_SARL_SET_SET: ++ { ++ return set_sarl_set(ifr); ++ } ++ case CNS3XXX_SARL_SET_GET: ++ { ++ return get_sarl_set(ifr); ++ } ++ ++ case CNS3XXX_SARL_OQ_SET: ++ { ++ return set_sarl_oq(ifr); ++ } ++ case CNS3XXX_SARL_OQ_GET: ++ { ++ return get_sarl_oq(ifr); ++ } ++ ++ case CNS3XXX_SARL_ENABLE_SET: ++ { ++ return set_sarl_enable(ifr); ++ } ++ case CNS3XXX_SARL_ENABLE_GET: ++ { ++ return get_sarl_enable(ifr); ++ } ++ ++ case CNS3XXX_FC_SET: ++ { ++ return set_fc(ifr); ++ } ++ case CNS3XXX_FC_GET: ++ { ++ return get_fc(ifr); ++ } ++ ++ case CNS3XXX_IVL_SET: ++ { ++ return set_ivl(ifr); ++ } ++ case CNS3XXX_IVL_GET: ++ { ++ return get_ivl(ifr); ++ } ++ ++ case CNS3XXX_WAN_PORT_SET: ++ { ++ return set_wan_port(ifr); ++ } ++ case CNS3XXX_WAN_PORT_GET: ++ { ++ return get_wan_port(ifr); ++ } ++ ++ case CNS3XXX_PVID_SET: ++ { ++ return set_pvid(ifr); ++ } ++ case CNS3XXX_PVID_GET: ++ { ++ return get_pvid(ifr); ++ } ++ ++ case CNS3XXX_QA_GET: ++ { ++ return get_qa(ifr); ++ } ++ case CNS3XXX_QA_SET: ++ { ++ return set_qa(ifr); ++ } ++ ++ case CNS3XXX_PACKET_MAX_LEN_GET: ++ { ++ return get_packet_max_len(ifr); ++ } ++ case CNS3XXX_PACKET_MAX_LEN_SET: ++ { ++ return set_packet_max_len(ifr); ++ } ++ ++ case CNS3XXX_BCM53115M_REG_READ: ++ { ++ return bcm53115M_reg_read_ioctl(ifr); ++ } ++ case CNS3XXX_BCM53115M_REG_WRITE: ++ { ++ return bcm53115M_reg_write_ioctl(ifr); ++ } ++ ++#if 0 ++ case CNS3XXX_RXRING_STATUS: ++ { ++ return get_rxring(ifr); ++ } ++#endif ++ case CNS3XXX_DUMP_MIB_COUNTER: ++ { ++ return dump_mib_counter(ifr); ++ } ++ ++ ++ default: ++ { ++ printk("[kernel mode] don't match any command\n"); ++ break; ++ } ++ ++ } // end switch (ioctl_cmd) ++ return 0; ++} ++ ++#ifdef CONFIG_CNS3XXX_NAPI ++static int cns3xxx_poll(struct napi_struct *napi, int budget) ++{ ++ ++ CNS3XXXPrivate *sp = container_of(napi, CNS3XXXPrivate, napi); ++ int work_done = 0; ++ int work_to_do = budget; // define minima value ++ ++ cns3xxx_receive_packet(sp, 0, &work_done, work_to_do); ++ ++ budget -= work_done; ++ ++ if (work_done) { ++ if (test_bit(0, (unsigned long *)&sp->is_qf) == 1){ ++ clear_bit(0, (unsigned long *)&sp->is_qf); ++ enable_rx_dma(sp->ring_index, 1); ++ return 1; ++ } ++ } else { ++ //netif_rx_complete(napi_dev, &sp->napi); ++ napi_complete(napi); ++#ifdef CNS3XXX_USE_MASK ++ cns3xxx_write_pri_mask(0xf0); ++#else ++ if (sp->ring_index == 0) ++ cns3xxx_enable_irq(FSRC_RING0_INTERRUPT_ID); ++ else ++ cns3xxx_enable_irq(FSRC_RING1_INTERRUPT_ID); ++#endif ++ return 0; ++ } ++ ++ return 1; ++} ++#endif ++ ++static struct net_device_stats *cns3xxx_get_stats(struct net_device *dev) ++{ ++ CNS3XXXPrivate *priv = netdev_priv(dev); ++ ++ return &priv->stats; ++} ++ ++static int cns3xxx_change_mtu(struct net_device *dev, int new_mtu) ++{ ++ if (new_mtu < cns3xxx_min_mtu() || new_mtu > cns3xxx_max_mtu()) ++ return -EINVAL; ++ ++ dev->mtu = new_mtu; ++ ++ return 0; ++} ++ ++static void cns3xxx_timeout(struct net_device *dev) ++{ ++ //star_gsw_enable(dev); ++ netif_wake_queue(dev); ++ dev->trans_start = jiffies; ++} ++ ++#ifdef LINUX2631 ++static const struct net_device_ops cns3xxx_netdev_ops = { ++ .ndo_open = cns3xxx_open, ++ .ndo_stop = cns3xxx_close, ++ .ndo_start_xmit = cns3xxx_send_packet, ++ //.ndo_validate_addr = eth_validate_addr, ++ //.ndo_set_multicast_list = cns3xxx_set_multicast_list, ++ .ndo_set_mac_address = cns3xxx_set_mac_addr, ++ .ndo_change_mtu = cns3xxx_change_mtu, ++ .ndo_do_ioctl = cns3xxx_do_ioctl, ++ .ndo_tx_timeout = cns3xxx_timeout, ++ .ndo_get_stats = cns3xxx_get_stats, ++ ++#if defined(CNS3XXX_VLAN_8021Q) ++ .ndo_vlan_rx_register = cns3xxx_vlan_rx_register, ++ //.ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, ++ .ndo_vlan_rx_kill_vid = cns3xxx_vlan_rx_kill_vid, ++#endif ++ ++#ifdef CONFIG_NET_POLL_CONTROLLER ++ .ndo_poll_controller = cns3xxx_netpoll, ++#endif ++}; ++#endif // LINUX2631 ++ ++static int __init cns3xxx_probe(RingInfo ring_info) ++{ ++ void cns3xxx_set_ethtool_ops(struct net_device *netdev); ++ ++ int netdev_size = sizeof(net_device_prive)/sizeof(NetDevicePriv); ++ int i=0, err=0; ++ struct net_device *netdev=0; ++ CNS3XXXPrivate *priv=0; ++ struct sockaddr sock_addr; ++ ++ for (i=0 ; i < netdev_size ; ++i) { ++ if (init_port & (1 << i)) { ++ ++ netdev = alloc_etherdev(sizeof(CNS3XXXPrivate)); ++ if (!netdev) { ++ err = -ENOMEM; ++ goto err_alloc_etherdev; ++ } ++ if (net_device_prive[i].name) ++ strcpy(netdev->name, net_device_prive[i].name); ++ ++ ++ net_dev_array[net_device_prive[i].vlan_tag] = netdev; ++ if (intr_netdev==0) ++ intr_netdev = netdev; ++ ++ SET_NETDEV_DEV(netdev, NULL); ++ priv = netdev_priv(netdev); ++ spin_lock_init(&priv->lock); ++ memset(priv, 0, sizeof(CNS3XXXPrivate)); ++ ++#if 1 ++ priv->num_rx_queues = ring_info.num_rx_queues; ++ priv->num_tx_queues = ring_info.num_tx_queues; ++ priv->rx_ring = ring_info.rx_ring; ++ priv->tx_ring = ring_info.tx_ring; ++#endif ++ ++ priv->net_device_priv = &net_device_prive[i]; ++ ++ // set netdev MAC address ++ memcpy(sock_addr.sa_data, net_device_prive[i].mac, 6); ++ cns3xxx_set_mac_addr(netdev, &sock_addr); ++ ++#ifdef LINUX2631 ++ netdev->netdev_ops = &cns3xxx_netdev_ops; ++#endif ++ ++ cns3xxx_set_ethtool_ops(netdev); ++#ifdef LINUX2627 ++ //netdev->base_addr = IO_ADDRESS(GSW_BASE_ADDR); ++ netdev->base_addr = 0; ++ netdev->open = cns3xxx_open; ++ netdev->stop = cns3xxx_close; ++ netdev->hard_start_xmit = cns3xxx_send_packet; ++ //netdev->hard_start_xmit = 0; ++ netdev->do_ioctl = cns3xxx_do_ioctl; ++ netdev->change_mtu = cns3xxx_change_mtu; ++ ++ //netdev->get_stats = cns3xxx_get_stats; ++ netdev->watchdog_timeo = 5 * HZ; // ref e1000_main.c ++ netdev->tx_timeout = cns3xxx_timeout; ++ netdev->set_mac_address = cns3xxx_set_mac_addr; ++#endif ++ ++#if defined(CNS3XXX_TX_HW_CHECKSUM) ++ netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG); ++ //netdev->features |= (NETIF_F_HW_CSUM | NETIF_F_SG); ++#endif ++ ++ ++#ifdef CONFIG_CNS3XXX_NAPI ++ //netif_napi_add(netdev, &priv->napi, cns3xxx_poll, CNS3XXX_NAPI_WEIGHT); ++#endif ++ ++#if defined(CNS3XXX_VLAN_8021Q) ++ // do not let 8021Q module insert vlan tag ++ // can use the snippet code to get vlan tage ++ // if (priv->vlgrp && vlan_tx_tag_present(skb)) ++ // vlan_tag = cpu_to_be16(vlan_tx_tag_get(skb)); ++#ifdef CNS3XXX_8021Q_HW_TX ++ // hardware support insert VLAN tag on TX path ++ netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; ++#else ++ netdev->features |= NETIF_F_HW_VLAN_RX; // remove NETIF_F_HW_VLAN_TX flag that 8021Q module to insert vlan tag. ++#endif ++ ++ //netdev->vlan_rx_register = cns3xxx_vlan_rx_register; ++ //netdev->vlan_rx_kill_vid = cns3xxx_vlan_rx_kill_vid; ++#endif ++ ++ ++ err = register_netdev(netdev); ++ if (err) { ++ goto err_register_netdev; ++ } ++ ++ netif_carrier_off(netdev); ++ netdev = 0; ++ } ++ } // for (i=0 ; i < netdev_size ; ++i) ++ ++ return 0; ++ ++ ++err_register_netdev: ++ free_netdev(netdev); ++ ++err_alloc_etherdev: ++ return err; ++} ++ ++int cns3xxx_gsw_config_mac_port0(void) ++{ ++ INIT_PORT0_PHY ++ INIT_PORT0_MAC ++ PORT0_LINK_DOWN ++ return 0; ++} ++ ++int cns3xxx_gsw_config_mac_port1(void) ++{ ++ INIT_PORT1_PHY ++ INIT_PORT1_MAC ++ PORT1_LINK_DOWN ++ return 0; ++} ++ ++int cns3xxx_gsw_config_mac_port2(void) ++{ ++ INIT_PORT2_PHY ++ INIT_PORT2_MAC ++ PORT2_LINK_DOWN ++ return 0; ++} ++ ++static int cns3xxx_notify_reboot(struct notifier_block *nb, unsigned long event, void *ptr) ++{ ++ // stop the DMA ++ enable_rx_dma(0, 0); ++ enable_tx_dma(0, 0); ++ enable_rx_dma(1, 0); ++ enable_tx_dma(1, 0); ++ ++ // disable Port 0 ++ enable_port(0, 0); ++ enable_port(1, 0); ++ enable_port(2, 0); ++ enable_port(3, 0); ++ return NOTIFY_DONE; ++} ++ ++#ifdef CONFIG_CNS3XXX_NAPI ++static struct net_device *init_napi_dev(struct net_device *ndev, const RingInfo *ring_info) ++{ ++ CNS3XXXPrivate *priv; ++ ++ ndev = alloc_etherdev(sizeof(CNS3XXXPrivate)); ++ if (!ndev) { ++ BUG(); ++ } ++ priv = netdev_priv(ndev); ++ memset(priv, 0, sizeof(CNS3XXXPrivate)); ++ ++ //priv = netdev_priv(napi_dev); ++ priv->num_rx_queues = ring_info->num_rx_queues; ++ priv->num_tx_queues = ring_info->num_tx_queues; ++ priv->rx_ring = ring_info->rx_ring; ++ priv->tx_ring = ring_info->tx_ring; ++ //priv->is_qf=0; // because of memset, so need not the line ++ ++ netif_napi_add(ndev, &priv->napi , cns3xxx_poll, CNS3XXX_NAPI_WEIGHT); ++ dev_hold(ndev); ++ set_bit(__LINK_STATE_START, &ndev->state); ++ ++ return ndev; ++} ++#endif ++ ++ ++void cns3xxx_config_intr(void) ++{ ++ u32 v=0xffffffff; ++ ++ get_interrupt_type(FSRC_RING0_INTERRUPT_ID, &v); ++#if 1 ++ set_interrupt_type(FSRC_RING0_INTERRUPT_ID, RISING_EDGE); ++ get_interrupt_type(FSRC_RING0_INTERRUPT_ID, &v); ++ ++ get_interrupt_type(FSRC_RING1_INTERRUPT_ID, &v); ++ set_interrupt_type(FSRC_RING1_INTERRUPT_ID, RISING_EDGE); ++ get_interrupt_type(FSRC_RING1_INTERRUPT_ID, &v); ++ ++ get_interrupt_type(FSQF_RING0_INTERRUPT_ID, &v); ++ set_interrupt_type(FSQF_RING0_INTERRUPT_ID, RISING_EDGE); ++ get_interrupt_type(FSQF_RING0_INTERRUPT_ID, &v); ++ ++ get_interrupt_type(FSQF_RING1_INTERRUPT_ID, &v); ++ set_interrupt_type(FSQF_RING1_INTERRUPT_ID, RISING_EDGE); ++ get_interrupt_type(FSQF_RING1_INTERRUPT_ID, &v); ++ ++ #ifdef CNS3XXX_USE_MASK ++ get_interrupt_pri(FSRC_RING0_INTERRUPT_ID, &v); ++ set_interrupt_pri(FSRC_RING0_INTERRUPT_ID, 0xc); ++ get_interrupt_pri(FSRC_RING0_INTERRUPT_ID, &v); ++ ++ get_interrupt_pri(FSRC_RING1_INTERRUPT_ID, &v); ++ set_interrupt_pri(FSRC_RING1_INTERRUPT_ID, 0xc); ++ get_interrupt_pri(FSRC_RING1_INTERRUPT_ID, &v); ++ ++ get_interrupt_pri(FSQF_RING1_INTERRUPT_ID, &v); ++ set_interrupt_pri(FSQF_RING1_INTERRUPT_ID, 0xc); ++ get_interrupt_pri(FSQF_RING1_INTERRUPT_ID, &v); ++ ++ #ifndef CONFIG_CNS3XXX_NAPI ++ set_interrupt_pri(FSQF_RING0_INTERRUPT_ID, 0xc); ++ #endif ++ ++ ++ #endif // CNS3XXX_USE_MASK ++#endif ++} ++ ++static int __devinit cns3xxx_init(struct platform_device *pdev) ++{ ++ // when tx_ring/rx_ring alloc memory, ++ // don't free them until cns3xxx_exit_module ++ ++ struct eth_plat_info *plat = pdev->dev.platform_data; ++ init_port = plat->ports; ++ memcpy(cpu_vlan_table_entry.my_mac, plat->cpu_hwaddr, ETH_ALEN); ++#if defined (CONFIG_CNS3XXX_SPPE) ++ memcpy(net_device_prive[3].mac, plat->cpu_hwaddr, ETH_ALEN); ++#endif ++ ++ RingInfo ring_info; ++ int i=0; ++ //spin_lock_init(&star_gsw_send_lock); ++ ++ ++#ifdef CNS3XXX_DOUBLE_RX_RING ++ ring_info.num_rx_queues = 2; ++#else ++ ring_info.num_rx_queues = 1; ++#endif ++ ++#ifdef CNS3XXX_DOUBLE_TX_RING ++ ring_info.num_tx_queues = 2; ++#else ++ ring_info.num_tx_queues = 1; ++#endif ++ ++ ring_info.rx_ring = kcalloc(ring_info.num_rx_queues, sizeof(RXRing), GFP_KERNEL); ++ if (!ring_info.rx_ring) ++ return -ENOMEM; ++ ++ for (i=0 ; i < ring_info.num_rx_queues ; ++i) { ++ memset(ring_info.rx_ring + i, 0, sizeof(RXRing)); ++ } ++ ++ ++ ring_info.tx_ring = kcalloc(ring_info.num_tx_queues, sizeof(TXRing), GFP_KERNEL); ++ ++ ++ if (!ring_info.tx_ring) ++ return -ENOMEM; ++ ++ for (i=0 ; i < ring_info.num_tx_queues ; ++i) { ++ memset(ring_info.tx_ring + i, 0, sizeof(TXRing)); ++ } ++ ++ ++ g_ring_info = ring_info; ++ ++ cns3xxx_gsw_hw_init(); ++ ++#ifdef CONFIG_FPGA ++ // GIGA mode disable ++ MAC0_CFG_REG &= (~(1<<16)); ++ MAC1_CFG_REG &= (~(1<<16)); ++ MAC2_CFG_REG &= (~(1<<16)); ++#endif ++ ++ if ((init_port & 1) == 1) { ++ memcpy(vlan_table_entry[0].my_mac, plat->eth0_hwaddr, ETH_ALEN); ++ memcpy(arl_table_entry[0].mac, plat->eth0_hwaddr, ETH_ALEN); ++ memcpy(net_device_prive[0].mac, plat->eth0_hwaddr, ETH_ALEN); ++ cns3xxx_gsw_config_mac_port0(); ++ } ++ ++ if (((init_port >> 1) & 1) == 1) { ++ memcpy(vlan_table_entry[1].my_mac, plat->eth1_hwaddr, ETH_ALEN); ++ memcpy(arl_table_entry[1].mac, plat->eth1_hwaddr, ETH_ALEN); ++ memcpy(net_device_prive[1].mac, plat->eth1_hwaddr, ETH_ALEN); ++ cns3xxx_gsw_config_mac_port1(); ++ } ++ ++ if (((init_port >> 2) & 1) == 1) { ++ memcpy(vlan_table_entry[2].my_mac, plat->eth2_hwaddr, ETH_ALEN); ++ memcpy(arl_table_entry[2].mac, plat->eth2_hwaddr, ETH_ALEN); ++ memcpy(net_device_prive[2].mac, plat->eth2_hwaddr, ETH_ALEN); ++ cns3xxx_gsw_config_mac_port2(); ++ } ++ ++ cns3xxx_probe(ring_info); ++ cns3xxx_config_intr(); ++ ++#ifdef CNS3XXX_VLAN_8021Q ++#ifdef CNS3XXX_NIC_MODE_8021Q ++ cns3xxx_nic_mode(1); ++#endif ++#endif ++ spin_lock_init(&tx_lock); ++ spin_lock_init(&rx_lock); ++ ++#ifdef CONFIG_CNS3XXX_NAPI ++ napi_dev = init_napi_dev(napi_dev, &ring_info); ++ #ifdef CNS3XXX_DOUBLE_RX_RING ++ r1_napi_dev = init_napi_dev(r1_napi_dev, &ring_info); ++ #endif ++#endif ++ ++ register_reboot_notifier(&cns3xxx_notifier_reboot); ++ clear_fs_dma_state(0); ++ ++ if (ring_info.num_rx_queues == 2) { ++ // enable RX dobule ring ++ DMA_RING_CTRL_REG |= 1; ++ } ++ ++ if (ring_info.num_tx_queues == 2 ) { ++ // enable TX dobule ring ++ DMA_RING_CTRL_REG |= (1 << 16); ++ } ++ ++ ++ return 0; ++} ++ ++static int __devexit cns3xxx_remove(struct platform_device *pdev) ++{ ++ int i=0; ++ ++#if 1 ++ for (i=0 ; i < NETDEV_SIZE ; ++i) { ++ CNS3XXXPrivate *priv = 0; ++ ++ if (net_dev_array[i]){ ++ priv = netdev_priv(net_dev_array[i]); ++ ++ kfree(priv->tx_ring); ++ priv->tx_ring = 0; ++ ++ kfree(priv->rx_ring); ++ priv->rx_ring = 0; ++ ++ unregister_netdev(net_dev_array[i]); ++ free_netdev(net_dev_array[i]); ++ } ++ ++ ++#if 0 ++ sprintf(netdev_name, "eth%d", i); ++ netdev=__dev_get_by_name(&init_net, netdev_name); ++ // if no unregister_netdev and free_netdev, ++ // after remove module, ifconfig will hang. ++ #if 1 ++ if (netdev) { ++ unregister_netdev(netdev); ++ free_netdev(netdev); ++ } ++#endif ++ #endif ++ } ++#endif ++ ++#ifdef CONFIG_CNS3XXX_NAPI ++ free_netdev(napi_dev); ++ #ifdef CNS3XXX_DOUBLE_RX_RING ++ free_netdev(r1_napi_dev); ++ #endif ++#endif ++ ++ ++#if 0 ++ //star_gsw_buffer_free(); ++#endif ++ unregister_reboot_notifier(&cns3xxx_notifier_reboot); ++} ++ ++ ++// this snippet code ref 8139cp.c ++#if defined(CNS3XXX_VLAN_8021Q) ++void cns3xxx_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) ++{ ++ CNS3XXXPrivate *priv = netdev_priv(dev); ++ unsigned long flags; ++ ++ spin_lock_irqsave(&priv->lock, flags); ++ priv->vlgrp = grp; ++ spin_unlock_irqrestore(&priv->lock, flags); ++} ++ ++void cns3xxx_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) ++{ ++ CNS3XXXPrivate *priv = netdev_priv(dev); ++ unsigned long flags; ++ ++ spin_lock_irqsave(&priv->lock, flags); ++ // reference: linux-2.6.24-current/drivers/netvia-velocity.c ++ vlan_group_set_device(priv->vlgrp, vid, NULL); ++ //priv->vlgrp->vlan_devices[vid] = NULL; ++ spin_unlock_irqrestore(&priv->lock, flags); ++} ++ ++#endif ++ ++static struct platform_driver drv = { ++ .driver.name = "cns3xxx-net", ++ .probe = cns3xxx_init, ++ .remove = cns3xxx_remove, ++}; ++ ++static int __init cns3xxx_init_module(void) ++{ ++ return platform_driver_register(&drv); ++} ++ ++static void __exit cns3xxx_exit_module(void) ++{ ++ platform_driver_unregister(&drv); ++} ++ ++MODULE_AUTHOR("Cavium Networks, "); ++MODULE_DESCRIPTION("CNS3XXX Switch Driver"); ++MODULE_LICENSE("GPL"); ++MODULE_VERSION(DRV_VERSION); ++ ++module_init(cns3xxx_init_module); ++module_exit(cns3xxx_exit_module); ++ +--- /dev/null ++++ b/drivers/net/cns3xxx/cns3xxx_phy.c +@@ -0,0 +1,1968 @@ ++/******************************************************************************* ++ * ++ * ++ * Copyright (c) 2009 Cavium Networks ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the Free ++ * Software Foundation; either version 2 of the License, or (at your option) ++ * any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but WITHOUT ++1* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ * more details. ++ * ++ * You should have received a copy of the GNU General Public License along with ++ * this program; if not, write to the Free Software Foundation, Inc., 59 ++ * Temple Place - Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * The full GNU General Public License is included in this distribution in the ++ * file called LICENSE. ++ * ++ ********************************************************************************/ ++ ++#include "cns3xxx_phy.h" ++#include "cns3xxx_symbol.h" ++ ++ ++#if defined(LINUX_KERNEL) ++#include "cns3xxx_tool.h" ++#include // for CAVM_OK ... macro ++#include ++#include "cns3xxx_config.h" ++#else // u-boot ++#include ++#include "cns3xxx_switch_type.h" ++#define printk printf ++#endif ++ ++int cns3xxx_phy_reset(u8 phy_addr) ++{ ++ u16 phy_data=0; ++ ++ if (cns3xxx_read_phy(phy_addr, 0, &phy_data) != CAVM_OK) ++ return CAVM_ERR; ++ phy_data |= (0x1 << 15); ++ if (cns3xxx_write_phy(phy_addr, 0, phy_data) != CAVM_OK) ++ return CAVM_ERR; ++ ++ return CAVM_OK; ++} ++ ++// mac_port: 0, 1, 2 ++int cns3xxx_enable_mac_clock(u8 mac_port, u8 en) ++{ ++ switch (mac_port) ++ { ++ case 0: ++ { ++ (en==1)?(PHY_AUTO_ADDR_REG |= 1 << 7) :(PHY_AUTO_ADDR_REG &= (~(1 << 7)) ); ++ break; ++ } ++ case 1: ++ { ++ (en==1)?(PHY_AUTO_ADDR_REG |= (1 << 15)) :(PHY_AUTO_ADDR_REG &= (~(1 << 15)) ); ++ break; ++ } ++ case 2: ++ { ++ (en==1)?(PHY_AUTO_ADDR_REG |= (1 << 23)) :(PHY_AUTO_ADDR_REG &= (~(1 << 23)) ); ++ break; ++ } ++ } ++ ++ return CAVM_OK; ++} ++ ++// dis: 1 disable ++// dis: 0 enable ++int cns3xxx_phy_auto_polling_enable(u8 port, u8 en) ++{ ++ u8 phy_addr[]={5, 13, 21}; ++ ++ PHY_AUTO_ADDR_REG &= (~(1 << phy_addr[port])); ++ if (en) { ++ PHY_AUTO_ADDR_REG |= (1 << phy_addr[port]); ++ } ++ return CAVM_OK; ++} ++ ++// dis: 1 disable ++// dis: 0 enable ++int cns3xxx_mdc_mdio_disable(u8 dis) ++{ ++ ++ PHY_CTRL_REG &= (~(1 << 7)); ++ if (dis) { ++ PHY_CTRL_REG |= (1 << 7); ++ } ++ return CAVM_OK; ++} ++ ++ ++static int cns3xxx_phy_auto_polling_conf(int mac_port, u8 phy_addr) ++{ ++ if ( (mac_port < 0) || (mac_port > 2) ) { ++ return CAVM_ERR; ++ } ++ ++ switch (mac_port) ++ { ++ case 0: ++ { ++ PHY_AUTO_ADDR_REG &= (~0x1f); ++ PHY_AUTO_ADDR_REG |= phy_addr; ++ break; ++ } ++ case 1: ++ { ++ PHY_AUTO_ADDR_REG &= (~(0x1f << 8)); ++ PHY_AUTO_ADDR_REG |= (phy_addr << 8); ++ break; ++ } ++ case 2: ++ { ++ PHY_AUTO_ADDR_REG &= (~(0x1f << 16)); ++ PHY_AUTO_ADDR_REG |= (phy_addr << 16); ++ break; ++ } ++ } ++ cns3xxx_phy_auto_polling_enable(mac_port, 1); ++ return CAVM_OK; ++} ++ ++ ++ ++int cns3xxx_read_phy(u8 phy_addr, u8 phy_reg, u16 *read_data) ++{ ++ int delay=0; ++ u32 volatile tmp = PHY_CTRL_REG; ++ ++ PHY_CTRL_REG |= (1 << 15); // clear "command completed" bit ++ // delay ++ for (delay=0; delay<10; delay++); ++ tmp &= (~0x1f); ++ tmp |= phy_addr; ++ ++ tmp &= (~(0x1f << 8)); ++ tmp |= (phy_reg << 8); ++ ++ tmp |= (1 << 14); // read command ++ ++ PHY_CTRL_REG = tmp; ++ ++ // wait command complete ++ while ( ((PHY_CTRL_REG >> 15) & 1) == 0); ++ ++ *read_data = (PHY_CTRL_REG >> 16); ++ ++ PHY_CTRL_REG |= (1 << 15); // clear "command completed" bit ++ ++ return CAVM_OK; ++} ++ ++int cns3xxx_write_phy(u8 phy_addr, u8 phy_reg, u16 write_data) ++{ ++ int delay=0; ++ u32 tmp = PHY_CTRL_REG; ++ ++ PHY_CTRL_REG |= (1 << 15); // clear "command completed" bit ++ // delay ++ for (delay=0; delay<10; delay++); ++ ++ tmp &= (~(0xffff << 16)); ++ tmp |= (write_data << 16); ++ ++ tmp &= (~0x1f); ++ tmp |= phy_addr; ++ ++ tmp &= (~(0x1f << 8)); ++ tmp |= (phy_reg << 8); ++ ++ tmp |= (1 << 13); // write command ++ ++ PHY_CTRL_REG = tmp; ++ ++ // wait command complete ++ while ( ((PHY_CTRL_REG >> 15) & 1) == 0); ++ ++ return CAVM_OK; ++} ++ ++// port 0,1,2 ++void cns3xxx_rxc_dly(u8 port, u8 val) ++{ ++ switch (port) ++ { ++ case 0: ++ { ++ SLK_SKEW_CTRL_REG &= (~(0x3 << 4)); ++ SLK_SKEW_CTRL_REG |= (val << 4); ++ break; ++ } ++ case 1: ++ { ++ SLK_SKEW_CTRL_REG &= (~(0x3 << 12)); ++ SLK_SKEW_CTRL_REG |= (val << 12); ++ break; ++ } ++ case 2: ++ { ++ SLK_SKEW_CTRL_REG &= (~(0x3 << 20)); ++ SLK_SKEW_CTRL_REG |= (val << 20); ++ break; ++ } ++ } ++} ++ ++// port 0,1,2 ++void cns3xxx_txc_dly(u8 port, u8 val) ++{ ++ switch (port) ++ { ++ case 0: ++ { ++ SLK_SKEW_CTRL_REG &= (~(0x3 << 6)); ++ SLK_SKEW_CTRL_REG |= (val << 6); ++ break; ++ } ++ case 1: ++ { ++ SLK_SKEW_CTRL_REG &= (~(0x3 << 14)); ++ SLK_SKEW_CTRL_REG |= (val << 14); ++ break; ++ } ++ case 2: ++ { ++ SLK_SKEW_CTRL_REG &= (~(0x3 << 22)); ++ SLK_SKEW_CTRL_REG |= (val << 22); ++ break; ++ } ++ } ++} ++ ++void cns3xxx_mac2_gtxd_dly(u8 val) ++{ ++ SLK_SKEW_CTRL_REG &= (~(0x3 << 24)); ++ SLK_SKEW_CTRL_REG |= (val << 24); ++} ++ ++// VITESSE suggest use isolate bit. ++int vsc8601_power_down(int phy_addr, int y) ++{ ++ u16 phy_data = 0; ++ /* set isolate bit instead of powerdown */ ++ cns3xxx_read_phy(phy_addr, 0, &phy_data); ++ if (y==1) // set isolate ++ phy_data |= (0x1 << 10); ++ if (y==0) // unset isolate ++ phy_data &= (~(0x1 << 10)); ++ cns3xxx_write_phy(phy_addr, 0, phy_data); ++ ++ return 0; ++} ++ ++ ++// port : 0 => port0 ; port : 1 => port1 ++// y = 1 ; disable AN ++void disable_AN(int port, int y) ++{ ++ u32 mac_port_config=0; ++ ++ switch (port) ++ { ++ case 0: ++ { ++ mac_port_config = MAC0_CFG_REG; ++ break; ++ } ++ case 1: ++ { ++ mac_port_config = MAC1_CFG_REG; ++ break; ++ } ++ case 2: ++ { ++ mac_port_config = MAC2_CFG_REG; ++ break; ++ } ++ } ++ ++ ++ // disable PHY's AN ++ if (y==1) ++ { ++ mac_port_config &= ~(0x1 << 7); ++ } ++ ++ // enable PHY's AN ++ if (y==0) ++ { ++ mac_port_config |= (0x1 << 7); ++ } ++ ++ switch (port) ++ { ++ case 0: ++ { ++ MAC0_CFG_REG = mac_port_config; ++ break; ++ } ++ case 1: ++ { ++ MAC1_CFG_REG = mac_port_config; ++ break; ++ } ++ case 2: ++ { ++ MAC2_CFG_REG = mac_port_config; ++ break; ++ } ++ } ++} ++ ++int cns3xxx_std_phy_power_down(int phy_addr, int y) ++{ ++ u16 phy_data = 0; ++ // power-down or up the PHY ++ cns3xxx_read_phy(phy_addr, 0, &phy_data); ++ if (y==1) // down ++ phy_data |= (0x1 << 11); ++ if (y==0) // up ++ phy_data &= (~(0x1 << 11)); ++ cns3xxx_write_phy(phy_addr, 0, phy_data); ++ ++ phy_data=0; ++ cns3xxx_read_phy(phy_addr, 0, &phy_data); ++ ++ return 0; ++} ++ ++ ++#if defined(LINUX_KERNEL) ++int cns3xxx_spi_tx_rx_n(u32 tx_data, u32 *rx_data, u32 tx_channel, u32 tx_eof_flag) ++{ ++ u8 cns3xxx_spi_tx_rx(u8 tx_channel, u8 tx_eof, u32 tx_data, u32 * rx_data); ++ ++ return cns3xxx_spi_tx_rx(tx_channel, tx_eof_flag, tx_data, rx_data); ++} ++ ++int bcm53115M_reg_read(int page, int offset, u8 *buf, int len) ++{ ++ u32 ch = BCM53115_SPI_CHANNEL; ++ u8 cmd_byte; ++ u32 dumy_word; ++ u32 spi_status; ++ int i; ++ ++ /* ++ * Normal SPI Mode (Command Byte) ++ * Bit7 Bit6 Bit5 Bit4 Bit3 Bit2 Bit1 Bit0 ++ * 0 1 1 Mode=0 CHIP_ID2 ID1 ID0(lsb) Rd/Wr(0/1) ++ * ++ */ ++ ++ /* Normal Read Operation */ ++ /* 1. Issue a normal read command(0x60) to poll the SPIF bit in the ++ SPI status register(0XFE) to determine the operation can start */ ++ do ++ { ++ cmd_byte = 0x60; ++ cns3xxx_spi_tx_rx_n(cmd_byte, &dumy_word, ch, 0); ++ cns3xxx_spi_tx_rx_n(0xFE, &dumy_word, ch, 0); ++ cns3xxx_spi_tx_rx_n(0x00, &spi_status, ch, 1); ++ udelay(100); ++ }while ((spi_status >> ROBO_SPIF_BIT) & 1) ; // wait SPI bit to 0 ++ ++ /* 2. Issue a normal write command(0x61) to write the register page value ++ into the SPI page register(0xFF) */ ++ cmd_byte = 0x61; ++ cns3xxx_spi_tx_rx_n(cmd_byte, &dumy_word, ch, 0); ++ cns3xxx_spi_tx_rx_n(0xFF, &dumy_word, ch, 0); ++ cns3xxx_spi_tx_rx_n(page, &dumy_word, ch, 1); ++ ++ /* 3. Issue a normal read command(0x60) to setup the required RobiSwitch register ++ address */ ++ cmd_byte = 0x60; ++ cns3xxx_spi_tx_rx_n(cmd_byte, &dumy_word, ch, 0); ++ cns3xxx_spi_tx_rx_n(offset, &dumy_word, ch, 0); ++ cns3xxx_spi_tx_rx_n(0x00, &dumy_word, ch, 1); ++ ++ /* 4. Issue a normal read command(0x60) to poll the RACK bit in the ++ SPI status register(0XFE) to determine the completion of read */ ++ do ++ { ++ cmd_byte = 0x60; ++ cns3xxx_spi_tx_rx_n(cmd_byte, &dumy_word, ch, 0); ++ cns3xxx_spi_tx_rx_n(0xFE, &dumy_word, ch, 0); ++ cns3xxx_spi_tx_rx_n(0x00, &spi_status, ch, 1); ++ udelay(100); ++ }while (((spi_status >> ROBO_RACK_BIT) & 1) == 0); // wait RACK bit to 1 ++ ++ /* 5. Issue a normal read command(0x60) to read the specific register's conternt ++ placed in the SPI data I/O register(0xF0) */ ++ cmd_byte = 0x60; ++ cns3xxx_spi_tx_rx_n(cmd_byte, &dumy_word, ch, 0); ++ cns3xxx_spi_tx_rx_n(0xF0, &dumy_word, ch, 0); ++ // read content ++ for (i=0; i> ROBO_SPIF_BIT) & 1) ; // wait SPI bit to 0 ++ ++ /* 2. Issue a normal write command(0x61) to write the register page value ++ into the SPI page register(0xFF) */ ++ cmd_byte = 0x61; ++ cns3xxx_spi_tx_rx_n((u32)cmd_byte, &dumy_word, ch, 0); ++ cns3xxx_spi_tx_rx_n(0xFF, &dumy_word, ch, 0); ++ cns3xxx_spi_tx_rx_n(page, &dumy_word, ch, 1); ++ ++ /* 3. Issue a normal write command(0x61) and write the address of the accessed ++ register followed by the write content starting from a lower byte */ ++ cmd_byte = 0x61; ++ cns3xxx_spi_tx_rx_n((u32)cmd_byte, &dumy_word, ch, 0); ++ cns3xxx_spi_tx_rx_n(offset, &dumy_word, ch, 0); ++ // write content ++ for (i=0; ivid; ++ bcm53115M_reg_write(0x05, 0x81, (u8*)&wval, 2); ++ ++ // fill table content ++ dwval = 0; ++ dwval |= (v->forward_map & 0x1FF); ++ dwval |= ((v->untag_map& 0x1FF) << 9); ++ bcm53115M_reg_write(0x05, 0x83, (u8*)&wval, 4); ++ ++ // write cmd ++ bval = VLAN_WRITE_CMD; ++ bval |= (1 << VLAN_START_BIT); ++ bcm53115M_reg_write(0x05, 0x80, (u8*)&bval, 1); ++ ++ // wait cmd complete ++ while(1) { ++ bcm53115M_reg_read(0x05, 0x80, (u8*)&bval, 1); ++ if (((bval >> VLAN_START_BIT) & 1) == 0) break; ++ } ++ ++ return CAVM_OK; ++} ++ ++typedef struct bcm_port_cfg_t ++{ ++ u8 link; ++ u8 fdx; ++ BCM_PORT_SPEED speed; ++ u8 rx_flw_ctrl; ++ u8 tx_flw_ctrl; ++ u8 ow; ++}bcm_port_cfg; ++ ++ ++ ++int bcm53115M_mac_port_config(int port, bcm_port_cfg *cfg) ++{ ++ u8 bval = 0; ++ int page, addr; ++ ++ if (cfg->link) bval |= (1<<0); ++ if (cfg->fdx) bval |= (1<<1); ++ bval |= ((cfg->speed&0x3) << 2); ++ if (cfg->rx_flw_ctrl) bval |= (1<<4); ++ if (cfg->tx_flw_ctrl) bval |= (1<<5); ++ ++ if (port == BCM_PORT_IMP) { ++ bval |= (1<<7); // Use content of this register ++ page = 0x00; ++ addr = 0x0E; ++ }else { ++ page = 0x00; ++ addr = 0x58+port; ++ } ++ ++ bcm53115M_reg_write(page, addr, &bval, 1); ++ ++ return 0; ++} ++ ++int bcm53115M_init_internal_phy(void) ++{ ++ int p, page; ++ u16 wval; ++ ++ for (p=BCM_PORT_0; p<=BCM_PORT_4; p++) { ++ page = 0x10+p; ++ ++ // reset phy ++ bcm53115M_reg_read(page, 0x00, (u8*)&wval, 2); ++ wval |= 0x8000; ++ bcm53115M_reg_write(page, 0x00, (u8*)&wval, 2); ++ ++ // config auto-nego & all advertisement ++ bcm53115M_reg_read(page, 0x00, (u8*)&wval, 2); ++ wval |= (1<<12); // auto-nego ++ bcm53115M_reg_write(page, 0x00, (u8*)&wval, 2); ++ ++ bcm53115M_reg_read(page, 0x08, (u8*)&wval, 2); ++ wval |= 0x1E0; // advertisement all ++ bcm53115M_reg_write(page, 0x08, (u8*)&wval, 2); ++ ++ // 1000BASE-T ++ bcm53115M_reg_read(page, 0x12, (u8*)&wval, 2); ++ wval &= ~(1<<12); // automatic master/slave configuration ++ wval |= 0x300; // 1000-base full/half advertisements ++ bcm53115M_reg_write(page, 0x12, (u8*)&wval, 2); ++ } ++ ++ return 0; ++} ++ ++int bcm53115M_led_init(void) ++{ ++ u16 led_func, bval, wval; ++ ++ /* LED function 1G/ACT, 100M/ACT, 10M/ACT, not used */ ++ led_func = 0x2C00; ++ bcm53115M_reg_write(0x00, 0x10, (u8*)&led_func, 2); ++ bcm53115M_reg_write(0x00, 0x12, (u8*)&led_func, 2); ++ ++ /* LED map enable */ ++ wval = 0x1F; // port0~4 ++ bcm53115M_reg_write(0x00, 0x16, (u8*)&wval, 2); ++ ++ /* LED mode map */ ++ wval = 0x1F; // led auto mode ++ bcm53115M_reg_write(0x00, 0x18, (u8*)&wval, 2); ++ bcm53115M_reg_write(0x00, 0x1A, (u8*)&wval, 2); ++ ++ /* LED enable */ ++ bcm53115M_reg_read(0x00, 0x0F, (u8*)&bval, 1); ++ bval |= 0x80; ++ bcm53115M_reg_write(0x00, 0x0F, (u8*)&bval, 1); ++ ++ return 0; ++} ++ ++//#define BCM53115M_DUMMY_SWITCH ++#define BCM53115M_DISABLE_LEARNING ++ ++int bcm53115M_init(u8 mac_port, u16 phy_addr) ++{ ++ u32 u32_val=0; ++ u16 u16_val=0; ++ u8 bval=0; ++ int i=0; ++ bcm53115M_vlan_entry v_ent; ++ bcm_port_cfg pc; ++ u8 page=0, offset=0; ++ ++ printk("bcm53115M init\n"); ++ ++ memset(&v_ent, 0, sizeof(bcm53115M_vlan_entry)); ++ ++ // gpio B pin 18 ++ gpio_direction_output(50, 0); ++ bcm53115M_init_mac(0, 0); ++ bcm53115M_init_mac(1, 1); ++ ++ // read device id ++ bcm53115M_reg_read(0x02, 0x30, (u8*)&u32_val, 4); ++ printk("bcm53115M device id:(0x%x)\r\n", u32_val); ++ ++ if (u32_val != 0x53115) { ++ printk("bad device id(0x%x)\r\n", u32_val); ++ return -1; ++ } ++ ++ u16_val=0; ++ // read phy id ++ bcm53115M_reg_read(0x10, 0x04, (u8 *)&u16_val, 2); ++ printk("bcm53115M phy id_1:(0x%x)\r\n", u16_val); ++ ++ if (u16_val != 0x143) { ++ printk("bad phy id1(0x%x)\r\n", u16_val); ++ return CAVM_ERR; ++ } ++ ++ u16_val=0; ++ // read phy id2 ++ bcm53115M_reg_read(0x10, 0x06, (u8 *)&u16_val, 2); ++ printk("bcm53115M phy id_2:(0x%x)\r\n", u16_val); ++ ++#ifdef BCM53115M_DUMMY_SWITCH ++ bval=0; ++ bcm53115M_reg_read(0x00, 0x0e, (u8 *)&bval, 1); ++ printk("bcm53115M page:0 addr:0x0e ## %x\n", bval); ++ bval |= (1 << 7); ++ bval |= 1; ++ ++ bval = 0x8b; ++ bval |= (1 << 5); ++ bval |= (1 << 4); ++ printk("bval : %x\n", bval); ++ bcm53115M_reg_write(0x00, 0x0e, (u8 *)&bval, 1); ++ bcm53115M_reg_read(0x00, 0x0e, (u8 *)&bval, 1); ++ printk("bcm53115M page:0 addr:0x0e ## %x\n", bval); ++ ++ /* Unmanagement mode */ ++ // Switch Mode. Page 00h,Address 0Bh ++ bval = 0x06; // forward enable, unmanaged mode ++ //bval = 0x3; // forward enable, managed mode ++ bcm53115M_reg_write(0x0, 0xb, &bval, 1); ++ bcm53115M_reg_read(0x0, 0xb, (u8 *)&bval, 1); ++ printk("bcm53115M page:0 addr:0xb ## %x\n", bval); ++ ++ page=0x0; ++ offset=0x5d; // port 5 ++ bval=0x7b; ++ bcm53115M_reg_write(page, offset, (u8 *)&bval, 1); ++ bcm53115M_reg_read(page, offset, (u8 *)&bval, 1); ++ ++ printk("bcm53115M page:%x addr:%x ## %x\n", page, offset, bval); ++ ++ page=0x0; ++ offset=0x58; // port 0 ++ bval=0x7b; ++ bcm53115M_reg_write(page, offset, (u8 *)&bval, 1); ++ bcm53115M_reg_read(page, offset, (u8 *)&bval, 1); ++ printk("bcm53115M page:%x addr:%x ## %x\n", page, offset, bval); ++ ++#ifdef CONFIG_CNS3XXX_JUMBO_FRAME ++ printk("enable BCM53115 jumbo frame\n"); ++ ++ page=0x40; ++ offset=0x01; ++ u32_val=0x013f; // enable 0-5 port and IMP port jumbo frame. MAX frame: 9720 bytes. ++ bcm53115M_reg_write(page, offset, (u8 *)&u32_val, 4); ++ bcm53115M_reg_read(page, offset, (u8 *)&u32_val, 4); ++ printk("bcm53115M page:%x addr:%x ## %x\n", page, offset, u32_val); ++ ++#if 0 ++ page=0x40; ++ offset=0x05; ++ u16_val=0; ++ bcm53115M_reg_write(page, offset, (u8 *)&u16_val, 2); ++#endif ++ ++#endif ++ ++#else // !BCM53115M_DUMMY_SWITCH ++ /* Loop detection disable */ ++ bcm53115M_reg_read(0x72, 0x00, (u8 *)&u16_val, 2); ++ u16_val &= ~(0x3<<11); ++ bcm53115M_reg_write(0x72, 0x00, (u8 *)&u16_val, 2); ++ ++ ++ /* VLAN forwarding mask */ ++ // Bit8 IMP port, Bits[5:0] correspond to ports[5:0] ++ // port 0 <-> port IMP ++ u16_val = 0x103; ++ bcm53115M_reg_write(0x31, 0x0, (u8 *)&u16_val, 2); // port 0 ++ u16_val = 0x103; ++ bcm53115M_reg_write(0x31, 0x10, (u8 *)&u16_val, 2); // IMP ++ ++ ++ // port 4 <-> port 5 ++ u16_val = 0x3c; ++ bcm53115M_reg_write(0x31, 0x08, (u8 *)&u16_val, 2); // port 4 ++ u16_val = 0x3c; ++ bcm53115M_reg_write(0x31, 0x0A, (u8 *)&u16_val, 2); // port 5 ++ ++ ++ // others <-> none ++ u16_val = 0x00; ++ bcm53115M_reg_write(0x31, 0x02, (u8 *)&u16_val, 2); // port 1 ++ bcm53115M_reg_write(0x31, 0x04, (u8 *)&u16_val, 2); // port 2 ++ bcm53115M_reg_write(0x31, 0x06, (u8 *)&u16_val, 2); // port 3 ++ ++ // port 1 <-> port IMP ++ u16_val = 0x103; ++ bcm53115M_reg_write(0x31, 0x2, (u8 *)&u16_val, 2); // port 1 ++ ++ // port 2 <-> port 5 ++ u16_val = 0x3c; ++ bcm53115M_reg_write(0x31, 0x4, (u8 *)&u16_val, 2); // port 2 ++ ++ // port 3 <-> port 5 ++ u16_val = 0x3c; ++ bcm53115M_reg_write(0x31, 0x6, (u8 *)&u16_val, 2); // port 3 ++ ++ /* Create VLAN1 for default port pvid */ ++#if 0 ++ v_ent.vid = 1; ++ v_ent.forward_map = 0x13F; // all ports ++ robo_write_vlan(&v_ent); ++#endif ++ ++ /* Unmanagement mode */ ++ // Switch Mode. Page 00h,Address 0Bh ++ bval = 0x02; // forward enable, unmanaged mode ++ bcm53115M_reg_write(0x0, 0xb, &bval, 1); ++ ++ /* Init port5 & IMP (test giga mode first) */ ++ // IMP port control. Page 00h,Address 08h ++ bval = 0x1C; // RX UCST/MCST/BCST enable ++ bcm53115M_reg_write(0x0, 0x8, &bval, 1); ++ ++ offset=0x5d; // port 5 ++ bval=0x7b; ++ bcm53115M_reg_write(page, offset, (u8 *)&bval, 1); ++ bcm53115M_reg_read(page, offset, (u8 *)&bval, 1); ++ ++ // Speed, dulplex......etc ++ // setting in Gsw_Configure_Gsw_Hardware() ++ ++ // Mgmt configuration, Page 02h, Address 00h ++ bval = 0; ++ bcm53115M_reg_write(0x02, 0x00, &bval, 1); ++ // BRCM header, Page 02h, Address 03h ++ bval = 0; // without additional header information ++ bcm53115M_reg_write(0x02, 0x03, &bval, 1); ++ ++ /* Init front ports, port0-4 */ ++ // MAC ++ pc.speed = BCM_PORT_1G; ++ pc.link = 0; // link detect by robo_port_update() ++ pc.ow = 0; ++ for (i=BCM_PORT_0; i<=BCM_PORT_4; i++) ++ bcm53115M_mac_port_config(i, &pc); ++ // Internal Phy ++ bcm53115M_init_internal_phy(); ++ ++ /* Enable all port, STP_STATE=No spanning tree, TX/RX enable */ ++ // Page 00h, Address 00h-05h ++ bval = 0x0; ++ for (i=0; i<=5; i++) ++ bcm53115M_reg_write(0x0, i, &bval, 1); ++ ++ // Disable broadcast storm control due to h/w strap pin BC_SUPP_EN ++ // Page 41h, Address 10h-13h, bit28&22 ++ ++ // for port 0 ~ 5 ++ for (i=0 ; i <= 0x14; i+=4) { ++ bcm53115M_reg_read(0x41, 0x10+i, (u8 *)&u32_val, 4); ++ u32_val &= ~((1<<28) | (1<<22)); ++ bcm53115M_reg_write(0x41, 0x10+i, (u8 *)&u32_val, 4); ++ } ++ ++ // for IMP port ++ bcm53115M_reg_read(0x41, 0x30, (u8 *)&u32_val, 4); ++ u32_val &= ~((1<<28) | (1<<22)); ++ bcm53115M_reg_write(0x41, 0x30, (u8 *)&u32_val, 4); ++ ++ /* Misc */ ++ // led ++ bcm53115M_led_init(); ++ // multicast fwd rule, Page 00h, Address 2Fh ++ bval = 0; ++ bcm53115M_reg_write(0x00, 0x2F, &bval, 1); ++ ++#ifdef BCM53115M_DISABLE_LEARNING ++ // disable learning ++ page=0x00; ++ offset=0x3c; ++ u16_val=0x13f; ++ bcm53115M_reg_write(page, offset, (u8 *)&u16_val, 2); ++ bcm53115M_reg_read(page, offset, (u8 *)&u16_val, 2); ++ ++ page=0x02; ++ offset=0x06; ++ u32_val=4; ++ bcm53115M_reg_write(page, offset, (u8 *)&u32_val, 4); ++#endif ++#endif ++ return CAVM_OK; ++} ++#endif // defined(LINUX_KERNEL) ++ ++//#define MAC2_RGMII ++#define CNS3XXX_MAC2_IP1001_GIGA_MODE ++ ++void icp_ip1001_init_mac(u8 mac_port, u16 phy_addr) ++{ ++ u32 mac_port_config = 0; ++ u8 mac_addr[]={0x0c, 0x10, 0x18}; ++ ++ cns3xxx_enable_mac_clock(mac_port, 1); ++ ++ mac_port_config = SWITCH_REG_VALUE(mac_addr[mac_port]); ++ ++ //cns3xxx_txc_dly(mac_port, 2); ++ //cns3xxx_rxc_dly(mac_port, 2); ++ //SLK_SKEW_CTRL_REG ++#if 1 ++ ++ // enable GMII, MII, reverse MII ++ mac_port_config &= (~(1 << 15)); ++ ++#ifdef MAC2_RGMII ++ mac_port_config |= (1 << 15); ++#endif ++ ++ // TXC check disable ++ //mac_port_config &= (~(1 << 13)); ++ ++ // disable GIGA mode ++ mac_port_config &= (~(1<<16)); ++ ++#ifdef CNS3XXX_MAC2_IP1001_GIGA_MODE ++ // enable GIGA mode ++ mac_port_config |= (1<<16); ++ ++ //mac_port_config |= (1<<19); ++#endif ++ ++ // disable PHY's AN ++ mac_port_config &= (~(0x1 << 7)); ++ ++ // enable PHY's AN ++ mac_port_config |= (0x1 << 7); ++#else ++ // disable PHY's AN ++ mac_port_config &= (~(0x1 << 7)); ++ // disable GIGA mode ++ mac_port_config &= (~(1<<16)); ++ ++ // force 100Mbps ++ mac_port_config &= (~(0x3 << 8)); ++ mac_port_config |= (0x1 << 8); ++ ++ // force duplex ++ mac_port_config |= (0x1 << 10); ++ ++ // TX flow control off ++ mac_port_config &= (~(0x1 << 12)); ++ ++ // RX flow control off ++ mac_port_config &= (~(0x1 << 11)); ++ ++#if 0 ++ // TX flow control on ++ mac_port_config |= (0x1 << 12); ++ ++ // RX flow control on ++ mac_port_config |= (0x1 << 11); ++#endif ++ ++ // enable GMII, MII, reverse MII ++ mac_port_config &= (~(1 << 15)); ++#endif ++ SWITCH_REG_VALUE(mac_addr[mac_port]) = mac_port_config; ++ ++ // If mac port AN turns on, auto polling needs to turn on. ++ cns3xxx_phy_auto_polling_conf(mac_port, phy_addr); ++ ++} ++ ++int icp_ip1001_init(u8 mac_port, u8 phy_addr) ++{ ++ u16 phy_data = 0; ++ ++ printk("mac_port: %d ## phy_addr: %d\n", mac_port, phy_addr); ++ cns3xxx_mdc_mdio_disable(0); ++ ++#if 0 ++ // GMII2 high speed drive strength ++ IOCDA_REG &= ((~3 << 10)); ++ IOCDA_REG |= (1 << 10); ++#endif ++ IOCDA_REG = 0x55555800; ++ ++ phy_data = get_phy_id(phy_addr); // should be 0x243 ++ ++ printk("ICPLUS IP 1001 phy id : %x\n", phy_data); ++ ++ if (phy_data != 0x0243) { ++ printk("wrong phy id!!\n"); ++ return CAVM_ERR; ++ } ++ ++ ++ cns3xxx_phy_reset(phy_addr); ++ ++ icp_ip1001_init_mac(mac_port, phy_addr); ++ ++ // read advertisement register ++ cns3xxx_read_phy(phy_addr, 0x4, &phy_data); ++ ++ // enable PAUSE frame capability ++ phy_data |= (0x1 << 10); ++ ++ phy_data &= (~(0x1 << 5)); ++ phy_data &= (~(0x1 << 6)); ++ phy_data &= (~(0x1 << 7)); ++ phy_data &= (~(0x1 << 8)); ++ ++#if 1 ++ phy_data |= (0x1 << 5); ++ phy_data |= (0x1 << 6); ++ phy_data |= (0x1 << 7); ++ phy_data |= (0x1 << 8); ++#endif ++ ++ cns3xxx_write_phy(phy_addr, 0x4, phy_data); ++ ++ cns3xxx_read_phy(phy_addr, 9, &phy_data); ++ ++ phy_data &= (~(1<<8)); // remove advertise 1000 half duples ++ phy_data &= (~(1<<9)); // remove advertise 1000 full duples ++#ifdef CNS3XXX_MAC2_IP1001_GIGA_MODE ++ //phy_data |= (1<<8); // add advertise 1000 half duples ++ phy_data |= (1<<9); // add advertise 1000 full duples ++#endif ++ cns3xxx_write_phy(phy_addr, 9, phy_data); ++ ++ cns3xxx_read_phy(phy_addr, 9, &phy_data); ++ ++ cns3xxx_read_phy(phy_addr, 0, &phy_data); ++ // AN enable ++ phy_data |= (0x1 << 12); ++ cns3xxx_write_phy(phy_addr, 0, phy_data); ++ ++ cns3xxx_read_phy(phy_addr, 0, &phy_data); ++ // restart AN ++ phy_data |= (0x1 << 9); ++ cns3xxx_write_phy(phy_addr, 0, phy_data); ++ return 0; ++} ++ ++#define PHY_CONTROL_REG_ADDR 0x00 ++#define PHY_AN_ADVERTISEMENT_REG_ADDR 0x04 ++ ++int icp_101a_init_mac(u8 port, u8 phy_addr) ++{ ++ u32 mac_port_config = 0; ++ ++ cns3xxx_enable_mac_clock(port, 1); ++ ++ switch (port) ++ { ++ case 0: ++ { ++ mac_port_config = MAC0_CFG_REG; ++ break; ++ } ++ case 1: ++ { ++ mac_port_config = MAC1_CFG_REG; ++ break; ++ } ++ case 2: ++ { ++ mac_port_config = MAC2_CFG_REG; ++ break; ++ } ++ } ++ ++ // enable GMII, MII, reverse MII ++ mac_port_config &= (~(1 << 15)); ++ ++ // disable PHY's AN, use force mode ++ mac_port_config &= (~(0x1 << 7)); ++#ifdef CONFIG_FPGA_FORCE ++ ++ // force 100Mbps ++ mac_port_config &= (~(0x3 << 8)); ++ mac_port_config |= (0x1 << 8); ++ ++ // force duplex ++ mac_port_config |= (0x1 << 10); ++ ++ // TX flow control on ++ mac_port_config |= (0x1 << 12); ++ ++ // RX flow control on ++ mac_port_config |= (0x1 << 11); ++ ++ // Turn off GSW_PORT_TX_CHECK_EN_BIT ++ mac_port_config &= (~(0x1 << 13)); ++#else ++ // enable PHY's AN ++ mac_port_config |= (0x1 << 7); ++ // If mac port AN turns on, auto polling needs to turn on. ++ cns3xxx_phy_auto_polling_conf(port, phy_addr); ++#endif ++ // normal MII ++ mac_port_config &= (~(1 << 14)); ++ ++ ++ switch (port) ++ { ++ case 0: ++ { ++ MAC0_CFG_REG = mac_port_config; ++ break; ++ } ++ case 1: ++ { ++ MAC1_CFG_REG = mac_port_config; ++ break; ++ } ++ case 2: ++ { ++ MAC2_CFG_REG = mac_port_config; ++ break; ++ } ++ } ++ ++ ++ return CAVM_OK; ++} ++ ++int icp_101a_init(u8 mac_port, u8 phy_addr) ++{ ++ u32 mac_port_config=0; ++ u16 phy_data = 0; ++ ++ cns3xxx_mdc_mdio_disable(0); ++ cns3xxx_phy_reset(phy_addr); ++ ++ phy_data = get_phy_id(mac_port); ++ if (phy_data != 0x0243) { ++ printk("ICPLUS 101A phy id should be 0x243, but the phy id is : %x\n", phy_data); ++ return CAVM_ERR; ++ } ++ printk("phy id : %x\n", phy_data); ++ printk("init IC+101A\n"); ++ ++ icp_101a_init_mac(mac_port, phy_addr); ++ ++ // read advertisement register ++ cns3xxx_read_phy(phy_addr, 0x4, &phy_data); ++ ++ // enable PAUSE frame capability ++ phy_data |= (0x1 << 10); ++ ++ cns3xxx_write_phy(phy_addr, 0x4, phy_data); ++ ++#ifndef CONFIG_FPGA_FORCE ++ ++ switch (mac_port) ++ { ++ case 0: ++ { ++ mac_port_config = MAC0_CFG_REG; ++ break; ++ } ++ case 1: ++ { ++ mac_port_config = MAC1_CFG_REG; ++ break; ++ } ++ case 2: ++ { ++ mac_port_config = MAC2_CFG_REG; ++ break; ++ } ++ } ++ ++#if 0 ++ if (!(mac_port_config & (0x1 << 5))) { ++ if (cns3xxx_read_phy (port, PHY_AN_ADVERTISEMENT_REG_ADDR, &phy_data) == CAVM_ERR) ++ { ++ //PDEBUG("\n PORT%d, enable local flow control capability Fail\n", port); ++ return CAVM_ERR; ++ } ++ else ++ { ++ // enable PAUSE frame capability ++ phy_data |= (0x1 << 10); ++ ++ if (cns3xxx_write_phy (port, PHY_AN_ADVERTISEMENT_REG_ADDR, phy_data) == CAVM_ERR) ++ { ++ //PDEBUG("\nPORT%d, enable PAUSE frame capability Fail\n", port); ++ return CAVM_ERR; ++ } ++ } ++ } ++#endif ++ ++ cns3xxx_read_phy(phy_addr, 0, &phy_data); ++ // an enable ++ phy_data |= (0x1 << 12); ++ ++ // restart AN ++ phy_data |= (0x1 << 9); ++ cns3xxx_write_phy(phy_addr, 0, phy_data); ++ ++ while (1) ++ { ++ //PDEBUG ("\n Polling PHY%d AN \n", port); ++ cns3xxx_read_phy (phy_data, 0, &phy_data); ++ ++ if (phy_data & (0x1 << 9)) { ++ continue; ++ } else { ++ //PDEBUG ("\n PHY%d AN restart is complete \n", port); ++ break; ++ } ++ } ++ ++#endif ++ ++ return CAVM_OK; ++} ++ ++int cns3xxx_config_VSC8601_mac(u8 port) ++{ ++ u32 mac_port_config = 0; ++ ++ switch (port) ++ { ++ case 0: ++ { ++ mac_port_config = MAC0_CFG_REG; ++ break; ++ } ++ case 1: ++ { ++ mac_port_config = MAC1_CFG_REG; ++ break; ++ } ++ case 2: ++ { ++ mac_port_config = MAC2_CFG_REG; ++ break; ++ } ++ } ++ ++ switch (port) ++ { ++ case 0: ++ { ++ MAC0_CFG_REG = mac_port_config; ++ break; ++ } ++ case 1: ++ { ++ MAC1_CFG_REG = mac_port_config; ++ break; ++ } ++ case 2: ++ { ++ MAC2_CFG_REG = mac_port_config; ++ break; ++ } ++ } ++ return CAVM_OK; ++} ++ ++u16 get_phy_id(u8 phy_addr) ++{ ++ u16 read_data; ++ ++ cns3xxx_read_phy(phy_addr, 2, &read_data); ++ ++ return read_data; ++} ++ ++u32 get_vsc8601_recv_err_counter(u8 phy_addr) ++{ ++ u16 read_data=0; ++ cns3xxx_read_phy(phy_addr, 19, &read_data); ++ return read_data; ++} ++ ++u32 get_crc_good_counter(u8 phy_addr) ++{ ++ u16 read_data=0; ++ ++ // enter extended register mode ++ cns3xxx_write_phy(phy_addr, 31, 0x0001); ++ ++ cns3xxx_read_phy(phy_addr, 18, &read_data); ++ ++ // back to normal register mode ++ cns3xxx_write_phy(phy_addr, 31, 0x0000); ++ ++ return read_data; ++} ++ ++int cns3xxx_config_VSC8601(u8 mac_port, u8 phy_addr) ++{ ++ u16 phy_data=0; ++ u32 mac_port_config=0; ++ //u8 tx_skew=1, rx_skew=1; ++ u16 phy_id=0; ++ ++ cns3xxx_mdc_mdio_disable(0); ++ ++ cns3xxx_read_phy(phy_addr, 0, &phy_data); ++ // software reset ++ phy_data |= (0x1 << 15); ++ cns3xxx_write_phy(phy_addr, 0, phy_data); ++ udelay(10); ++ ++ phy_id = get_phy_id(phy_addr); ++ if (phy_id != 0x143) { ++ return CAVM_ERR; ++ } ++ ++ switch (mac_port) ++ { ++ case 0: ++ { ++ mac_port_config = MAC0_CFG_REG; ++ break; ++ } ++ case 1: ++ { ++ mac_port_config = MAC1_CFG_REG; ++ break; ++ } ++ case 2: ++ { ++ mac_port_config = MAC2_CFG_REG; ++ break; ++ } ++ } ++ ++ cns3xxx_enable_mac_clock(mac_port, 1); ++ //phy_auto_polling(mac_port, phy_addr); ++ ++ // enable RGMII-PHY mode ++ mac_port_config |= (0x1 << 15); ++ ++ // If mac AN turns on, auto polling needs to turn on. ++ // enable PHY's AN ++ mac_port_config |= (0x1 << 7); ++ cns3xxx_phy_auto_polling_conf(mac_port, phy_addr); ++ ++ // enable GSW MAC port 0 ++ mac_port_config &= ~(0x1 << 18); ++ ++ // normal MII ++ mac_port_config &= (~(1 << 14)); ++ ++ switch (mac_port) ++ { ++ case 0: ++ { ++ MAC0_CFG_REG = mac_port_config; ++ printk("8601 MAC0_CFG_REG: %x\n", MAC0_CFG_REG); ++ break; ++ } ++ case 1: ++ { ++ MAC1_CFG_REG = mac_port_config; ++ printk("8601 MAC1_CFG_REG: %x\n", MAC1_CFG_REG); ++ break; ++ } ++ case 2: ++ { ++ MAC2_CFG_REG = mac_port_config; ++ break; ++ } ++ } ++ ++ cns3xxx_write_phy(phy_addr, 0x18, 0xf1e7); ++ cns3xxx_write_phy(phy_addr, 0x1c, 0x8e00); ++ cns3xxx_write_phy(phy_addr, 0x10, 0x20); ++ cns3xxx_write_phy(phy_addr, 0x1c, 0xa41f); ++ cns3xxx_write_phy(phy_addr, 0x1c, 0xb41a); ++ cns3xxx_write_phy(phy_addr, 0x1c, 0xb863); ++ cns3xxx_write_phy(phy_addr, 0x17, 0xf04); ++ cns3xxx_write_phy(phy_addr, 0x15, 0x1); ++ cns3xxx_write_phy(phy_addr, 0x17, 0x0); ++ ++ return CAVM_OK; ++} ++ ++ ++ ++#ifdef CONFIG_LIBRA ++void icp_175c_all_phy_power_down(int y) ++{ ++ int i=0; ++ ++ for (i=0 ; i < 5 ; ++i) ++ std_phy_power_down(i, y); ++ ++} ++ ++static int star_gsw_config_icplus_175c_phy4(void) ++{ ++ u16 phy_data = 0, phy_data2 = 0; ++ u32 volatile ii, jj; ++ u8 phy_speed_dup = 0, phy_flowctrl = 0; ++ u32 volatile reg; ++ u8 gsw_mac_0_phy_addr = 0; ++ u8 gsw_mac_1_phy_addr = 1; ++ ++ ++ printk("config IC+175C\n"); ++ /* ++ * Configure MAC port 0 ++ * For IP175C Switch setting ++ * Force 100Mbps, and full-duplex, and flow control on ++ */ ++ reg = GSW_MAC_PORT_0_CONFIG_REG; ++ ++ // disable PHY's AN ++ reg &= ~(0x1 << 7); ++ ++ // disable RGMII-PHY mode ++ reg &= ~(0x1 << 15); ++ ++ // force speed = 100Mbps ++ reg &= ~(0x3 << 8); ++ reg |= (0x1 << 8); ++ ++ // force full-duplex ++ reg |= (0x1 << 10); ++ ++ // force Tx/Rx flow-control on ++ reg |= (0x1 << 11) | (0x1 << 12); ++ ++ GSW_MAC_PORT_0_CONFIG_REG = reg; ++ ++ ++ for (ii = 0; ii < 0x2000; ii++) ++ { ++ reg = GSW_MAC_PORT_0_CONFIG_REG; ++ ++ if ((reg & 0x1) && !(reg & 0x2)) ++ { ++ /* ++ * enable MAC port 0 ++ */ ++ reg &= ~(0x1 << 18); ++ ++ ++ /* ++ * enable the forwarding of unknown, multicast and broadcast packets to CPU ++ */ ++ reg &= ~((0x1 << 25) | (0x1 << 26) | (0x1 << 27)); ++ ++ /* ++ * include unknown, multicast and broadcast packets into broadcast storm ++ */ ++ reg |= ((0x1 << 29) | (0x1 << 30) | ((u32)0x1 << 31)); ++ ++ GSW_MAC_PORT_0_CONFIG_REG = reg; ++ ++ break; ++ } ++ else ++ { ++ for (jj = 0; jj < 0x1000; jj++); ++ ++ ++ if ((ii % 4) == 0) ++ printk("\rCheck MAC/PHY 0 Link Status : |"); ++ else if ((ii % 4) == 1) ++ printk("\rCheck MAC/PHY 0 Link Status : /"); ++ else if ((ii % 4) == 2) ++ printk("\rCheck MAC/PHY 0 Link Status : -"); ++ else if ((ii % 4) == 3) ++ printk("\rCheck MAC/PHY 0 Link Status : \\"); ++ } ++ } ++ ++ ++ if (!(reg & 0x1) || (reg & 0x2)) ++ { ++ /* ++ * Port 0 PHY link down or no TXC in Port 0 ++ */ ++ printk("\rCheck MAC/PHY 0 Link Status : DOWN!\n"); ++ ++ return -1; ++ } ++ else ++ { ++ printk("\rCheck MAC/PHY 0 Link Status : UP!\n"); ++ } ++ ++ ++ ++ /* ++ * Configure MAC port 1 ++ */ ++ reg = GSW_MAC_PORT_0_CONFIG_REG; ++ ++ // disable MAC's AN ++ reg &= ~(0x1 << 7); ++ ++ GSW_MAC_PORT_0_CONFIG_REG = reg; ++ ++ ++ /* enable flow control on (PAUSE frame) */ ++ star_gsw_read_phy(gsw_mac_1_phy_addr, 0x4, &phy_data); ++ ++ phy_data |= (0x1 << 10); ++ ++ star_gsw_write_phy(gsw_mac_1_phy_addr, 0x4, phy_data); ++ ++#if 1 ++ /* 2007/12/18 Jerry ++ The software reset of IC+ 175C won't reset MII register 29, 30, 31. ++ Router Control Register: bit 7 (TAG_VLAN_EN) is a VLAN related filed which affect vlan setting. ++ Router Control Register: bit 3 (ROUTER_EN) enable router function at MII port. ++ We set them to default to let U-boot properly work. ++ */ ++ phy_data = 0x1001; ++ star_gsw_write_phy(30, 9, phy_data); ++#endif ++ /* restart PHY auto neg. */ ++ star_gsw_read_phy(gsw_mac_1_phy_addr, 0x0, &phy_data); ++ ++ phy_data |= (0x1 << 9) | (0x1 << 12); ++ ++ star_gsw_write_phy(gsw_mac_1_phy_addr, 0x0, phy_data); ++ ++ ++ ++ /* wait for PHY auto neg. complete */ ++ for (ii = 0; ii < 0x20; ii++) ++ { ++ star_gsw_read_phy(gsw_mac_1_phy_addr, 0x1, &phy_data); ++ ++ if ((phy_data & (0x1 << 2)) && (phy_data & (0x1 << 5))) ++ { ++ break; ++ } ++ else ++ { ++ if ((ii % 4) == 0) ++ printk("\rCheck MAC/PHY 1 Link Status : |"); ++ else if ((ii % 4) == 1) ++ printk("\rCheck MAC/PHY 1 Link Status : /"); ++ else if ((ii % 4) == 2) ++ printk("\rCheck MAC/PHY 1 Link Status : -"); ++ else if ((ii % 4) == 3) ++ printk("\rCheck MAC/PHY 1 Link Status : \\"); ++ } ++ } ++ ++ ++ if (ii >= 0x20) ++ { ++ printk("\rCheck MAC/PHY 1 Link Status : DOWN!\n"); ++ ++ return -1; ++ } ++ else ++ { ++ printk("\rCheck MAC/PHY 1 Link Status : UP!\n"); ++ } ++ ++ ++ star_gsw_read_phy(gsw_mac_1_phy_addr, 0x4, &phy_data); ++ ++ star_gsw_read_phy(gsw_mac_1_phy_addr, 0x5, &phy_data2); ++ ++ ++ if (phy_data & 0x0400) //FC on ++ { ++ //printk(""); ++ phy_flowctrl = 1; ++ } ++ else ++ { ++ // printk(""); ++ phy_flowctrl = 0; ++ } ++ ++ ++ phy_speed_dup = 0; ++ ++ if ((phy_data & 0x0100) && (phy_data2 & 0x0100)) //100F ++ { ++ // printk("<100F>"); ++ phy_speed_dup |= (0x1 << 3); //set bit3 for 100F ++ } ++ else if ((phy_data & 0x0080) && (phy_data2 & 0x0080)) //100F ++ { ++ // printk("<100H>"); ++ phy_speed_dup |= (0x1 << 2); ++ } ++ else if ((phy_data & 0x0040) && (phy_data2 & 0x0040)) //100F ++ { ++ // printk("<10F>"); ++ phy_speed_dup |= (0x1 << 1); ++ } ++ else if ((phy_data & 0x0020) && (phy_data2 & 0x0020)) //100F ++ { ++ // printk("<10H>"); ++ phy_speed_dup |= 0x1; ++ } ++ ++ ++ /* ++ * Configure MAC port 1 in forced setting subject to the current PHY status ++ */ ++ reg = GSW_MAC_PORT_1_CONFIG_REG; ++ ++ reg &= ~(0x1 << 7); //AN off ++ ++ reg &= ~(0x3 << 8); ++ ++ if (phy_speed_dup & 0x0C) //100 ++ { ++ //printk(""); ++ reg |= (0x01 << 8); ++ } ++ else if (phy_speed_dup & 0x03) //10 ++ { ++ //printk(""); ++ reg |= (0x00 << 8); ++ } ++ ++ reg &= ~(0x1 << 11); ++ ++ if (phy_flowctrl) //FC on ++ { ++ //printk(""); ++ reg |= (0x1 << 11); ++ } ++ else ++ { ++ //printk(""); ++ reg |= (0x0 << 11); ++ } ++ ++ reg &= ~(0x1 << 10); ++ ++ if ((phy_speed_dup & 0x2) || (phy_speed_dup & 0x8)) //FullDup ++ { ++ //printk(""); ++ reg |= (0x1 << 10); ++ } ++ else //HalfDup ++ { ++ //printk(""); ++ reg |= (0x0 << 10); //Half ++ } ++ ++ GSW_MAC_PORT_1_CONFIG_REG = reg; ++ ++ ++ /* ++ * Check MAC port 1 link status ++ */ ++ for (ii = 0; ii < 0x1000; ii++) ++ { ++ reg = GSW_MAC_PORT_1_CONFIG_REG; ++ ++ if ((reg & 0x1) && !(reg & 0x2)) ++ { ++ /* ++ * enable MAC port 1 ++ */ ++ reg &= ~(0x1 << 18); ++ ++ /* ++ * enable the forwarding of unknown, multicast and broadcast packets to CPU ++ */ ++ reg &= ~((0x1 << 25) | (0x1 << 26) | (0x1 << 27)); ++ ++ /* ++ * include unknown, multicast and broadcast packets into broadcast storm ++ */ ++ reg |= ((0x1 << 29) | (0x1 << 30) | ((u32)0x1 << 31)); ++ ++ GSW_MAC_PORT_1_CONFIG_REG = reg; ++ ++ return 0; ++ } ++ } ++ ++ ++ if (ii > 0x1000) ++ { ++ /* ++ * Port 1 PHY link down or no TXC in Port 1 ++ */ ++ printk("\rCheck MAC/PHY 1 Link Status : DOWN!\n"); ++ ++ return -1; ++ } ++ return 0; ++} ++#endif ++ ++#if 0 ++static int star_gsw_config_VSC8201(u8 mac_port, u8 phy_addr) // include cicada 8201 ++{ ++ //u32 mac_port_base = 0; ++ u32 mac_port_config=0; ++ u16 phy_reg; ++ int i; ++ ++ printk("\nconfigure VSC8201\n"); ++ //PDEBUG("mac port : %d phy addr : %d\n", mac_port, phy_addr); ++ /* ++ * Configure MAC port 0 ++ * For Cicada CIS8201 single PHY ++ */ ++ if (mac_port == 0) { ++ //PDEBUG("port 0\n"); ++ mac_port_config = GSW_MAC_PORT_0_CONFIG_REG; ++ } ++ if (mac_port == 1) { ++ //PDEBUG("port 1\n"); ++ mac_port_config = GSW_MAC_PORT_1_CONFIG_REG; ++ } ++ ++ star_gsw_set_phy_addr(mac_port, phy_addr); ++ //star_gsw_set_phy_addr(1, 1); ++ ++ //mac_port_config = __REG(mac_port_base); ++ ++ // enable PHY's AN ++ mac_port_config |= (0x1 << 7); ++ ++ // enable RGMII-PHY mode ++ mac_port_config |= (0x1 << 15); ++ ++ // enable GSW MAC port 0 ++ mac_port_config &= ~(0x1 << 18); ++ ++ if (mac_port == 0) { ++ //PDEBUG("port 0\n"); ++ GSW_MAC_PORT_0_CONFIG_REG = mac_port_config; ++ } ++ if (mac_port == 1) { ++ //PDEBUG("port 1\n"); ++ GSW_MAC_PORT_1_CONFIG_REG = mac_port_config; ++ } ++ ++ /* ++ * Configure Cicada's CIS8201 single PHY ++ */ ++#ifdef CONFIG_STAR9100_SHNAT_PCI_FASTPATH ++ /* near-end loopback mode */ ++ star_gsw_read_phy(phy_addr, 0x0, &phy_reg); ++ phy_reg |= (0x1 << 14); ++ star_gsw_write_phy(phy_addr, 0x0, phy_reg); ++#endif ++ ++ star_gsw_read_phy(phy_addr, 0x1C, &phy_reg); ++ ++ // configure SMI registers have higher priority over MODE/FRC_DPLX, and ANEG_DIS pins ++ phy_reg |= (0x1 << 2); ++ ++ star_gsw_write_phy(phy_addr, 0x1C, phy_reg); ++ ++ star_gsw_read_phy(phy_addr, 0x17, &phy_reg); ++ ++ // enable RGMII MAC interface mode ++ phy_reg &= ~(0xF << 12); ++ phy_reg |= (0x1 << 12); ++ ++ // enable RGMII I/O pins operating from 2.5V supply ++ phy_reg &= ~(0x7 << 9); ++ phy_reg |= (0x1 << 9); ++ ++ star_gsw_write_phy(phy_addr, 0x17, phy_reg); ++ ++ star_gsw_read_phy(phy_addr, 0x4, &phy_reg); ++ ++ // Enable symmetric Pause capable ++ phy_reg |= (0x1 << 10); ++ ++ star_gsw_write_phy(phy_addr, 0x4, phy_reg); ++ ++ ++ ++ if (mac_port == 0) { ++ //PDEBUG("port 0\n"); ++ mac_port_config = GSW_MAC_PORT_0_CONFIG_REG; ++ } ++ if (mac_port == 1) { ++ //PDEBUG("port 1\n"); ++ mac_port_config = GSW_MAC_PORT_1_CONFIG_REG; ++ } ++ ++ ++ ++ ++ ++ ++ ++ // enable PHY's AN ++ mac_port_config |= (0x1 << 7); ++ ++ if (mac_port == 0) { ++ //PDEBUG("port 0\n"); ++ GSW_MAC_PORT_0_CONFIG_REG = mac_port_config; ++ } ++ if (mac_port == 1) { ++ //PDEBUG("port 1\n"); ++ GSW_MAC_PORT_1_CONFIG_REG = mac_port_config; ++ } ++ ++ /* ++ * Enable PHY1 AN restart bit to restart PHY1 AN ++ */ ++ star_gsw_read_phy(phy_addr, 0x0, &phy_reg); ++ ++ phy_reg |= (0x1 << 9) | (0x1 << 12); ++ ++ star_gsw_write_phy(phy_addr, 0x0, phy_reg); ++ ++ /* ++ * Polling until PHY0 AN restart is complete ++ */ ++ for (i = 0; i < 0x1000; i++) { ++ star_gsw_read_phy(phy_addr, 0x1, &phy_reg); ++ ++ if ((phy_reg & (0x1 << 5)) && (phy_reg & (0x1 << 2))) { ++ printk("0x1 phy reg: %x\n", phy_reg); ++ break; ++ } else { ++ udelay(100); ++ } ++ } ++ ++ if (mac_port == 0) { ++ //PDEBUG("port 0\n"); ++ mac_port_config = GSW_MAC_PORT_0_CONFIG_REG; ++ } ++ if (mac_port == 1) { ++ //PDEBUG("port 1\n"); ++ mac_port_config = GSW_MAC_PORT_1_CONFIG_REG; ++ } ++ ++ if (((mac_port_config & 0x1) == 0) || (mac_port_config & 0x2)) { ++ printk("Check MAC/PHY%s Link Status : DOWN!\n", (mac_port == 0 ? "0" : "1")); ++ } else { ++ printk("Check MAC/PHY%s Link Status : UP!\n", (mac_port == 0 ? "0" : "1")); ++ /* ++ * There is a bug for CIS8201 PHY operating at 10H mode, and we use the following ++ * code segment to work-around ++ */ ++ star_gsw_read_phy(phy_addr, 0x05, &phy_reg); ++ ++ if ((phy_reg & (0x1 << 5)) && (!(phy_reg & (0x1 << 6))) && (!(phy_reg & (0x1 << 7))) && (!(phy_reg & (0x1 << 8)))) { /* 10H,10F/100F/100H off */ ++ star_gsw_read_phy(phy_addr, 0x0a, &phy_reg); ++ ++ if ((!(phy_reg & (0x1 << 10))) && (!(phy_reg & (0x1 << 11)))) { /* 1000F/1000H off */ ++ star_gsw_read_phy(phy_addr, 0x16, &phy_reg); ++ ++ phy_reg |= (0x1 << 13) | (0x1 << 15); // disable "Link integrity check(B13)" & "Echo mode(B15)" ++ ++ star_gsw_write_phy(phy_addr, 0x16, phy_reg); ++ } ++ } ++ } ++ ++ if (mac_port == 0) { ++ // adjust MAC port 0 RX/TX clock skew ++ GSW_BIST_RESULT_TEST_0_REG &= ~((0x3 << 24) | (0x3 << 26)); ++ GSW_BIST_RESULT_TEST_0_REG |= ((0x2 << 24) | (0x2 << 26)); ++ } ++ ++ if (mac_port == 1) { ++ // adjust MAC port 1 RX/TX clock skew ++ GSW_BIST_RESULT_TEST_0_REG &= ~((0x3 << 28) | (0x3 << 30)); ++ GSW_BIST_RESULT_TEST_0_REG |= ((0x2 << 28) | (0x2 << 30)); ++ } ++ ++ return 0; ++} ++ ++ ++ ++ ++static void star_gsw_config_VSC8X01() ++{ ++ u16 phy_id = 0; ++ ++#ifdef CONFIG_DORADO2 ++ star_gsw_set_phy_addr(1,1); ++ star_gsw_read_phy(1, 0x02, &phy_id); ++ // printk("phy id = %X\n", phy_id); ++ if (phy_id == 0x000F) //VSC8201 ++ star_gsw_config_VSC8201(1,1); ++ else ++ star_gsw_config_VSC8601(1,1); ++#else ++#ifdef CONFIG_LEO ++ star_gsw_set_phy_addr(0,0); ++ star_gsw_read_phy(0, 0x02, &phy_id); ++ // printk("phy id = %X\n", phy_id); ++ if (phy_id == 0x000F) //VSC8201 ++ star_gsw_config_VSC8201(0,0); ++ else ++ star_gsw_config_VSC8601(0,0); ++#endif ++#endif ++} ++#endif ++ ++#if defined(CONFIG_DORADO) || defined(CONFIG_DORADO2) ++static int star_gsw_config_port0_VSC7385(void) ++{ ++ u32 mac_port_config=0; ++ int i; ++ ++ printk("config VSC7385\n"); ++ ++ mac_port_config = GSW_MAC_PORT_0_CONFIG_REG; ++ ++ // disable PHY's AN ++ mac_port_config &= ~(0x1 << 7); ++ ++ // enable RGMII-PHY mode ++ mac_port_config |= (0x1 << 15); ++ ++ // force speed = 1000Mbps ++ mac_port_config &= ~(0x3 << 8); ++ mac_port_config |= (0x2 << 8); ++ ++ // force full-duplex ++ mac_port_config |= (0x1 << 10); ++ ++ // force Tx/Rx flow-control on ++ mac_port_config |= (0x1 << 11) | (0x1 << 12); ++ ++ GSW_MAC_PORT_0_CONFIG_REG = mac_port_config; ++ ++ udelay(1000); ++ ++ for (i = 0; i < 50000; i++) { ++ mac_port_config = GSW_MAC_PORT_0_CONFIG_REG; ++ if ((mac_port_config & 0x1) && !(mac_port_config & 0x2)) { ++ break; ++ } else { ++ udelay(100); ++ } ++ } ++ ++ if (!(mac_port_config & 0x1) || (mac_port_config & 0x2)) { ++ printk("MAC0 PHY Link Status : DOWN!\n"); ++ return -1; ++ } else { ++ printk("MAC0 PHY Link Status : UP!\n"); ++ } ++ ++ // enable MAC port 0 ++ mac_port_config &= ~(0x1 << 18); ++ ++ // disable SA learning ++ mac_port_config |= (0x1 << 19); ++ ++ // forward unknown, multicast and broadcast packets to CPU ++ mac_port_config &= ~((0x1 << 25) | (0x1 << 26) | (0x1 << 27)); ++ ++ // storm rate control for unknown, multicast and broadcast packets ++ mac_port_config |= (0x1 << 29) | (0x1 << 30) | ((u32)0x1 << 31); ++ ++ GSW_MAC_PORT_0_CONFIG_REG = mac_port_config; ++ ++ // disable MAC port 1 ++ mac_port_config = GSW_MAC_PORT_1_CONFIG_REG; ++ mac_port_config |= (0x1 << 18); ++ GSW_MAC_PORT_1_CONFIG_REG = mac_port_config; ++ ++ // adjust MAC port 0 /RX/TX clock skew ++ GSW_BIST_RESULT_TEST_0_REG &= ~((0x3 << 24) | (0x3 << 26)); ++ GSW_BIST_RESULT_TEST_0_REG |= ((0x2 << 24) | (0x2 << 26)); ++ ++ return 0; ++} ++#endif +--- /dev/null ++++ b/drivers/net/cns3xxx/cns3xxx_phy.h +@@ -0,0 +1,82 @@ ++/******************************************************************************* ++ * ++ * ++ * Copyright (c) 2009 Cavium Networks ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the Free ++ * Software Foundation; either version 2 of the License, or (at your option) ++ * any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but WITHOUT ++1* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ * more details. ++ * ++ * You should have received a copy of the GNU General Public License along with ++ * this program; if not, write to the Free Software Foundation, Inc., 59 ++ * Temple Place - Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * The full GNU General Public License is included in this distribution in the ++ * file called LICENSE. ++ * ++ ********************************************************************************/ ++ ++#ifndef CNS3XXX_PHY_H ++#define CNS3XXX_PHY_H ++ ++#define LINUX_KERNEL // if don't define LINUX_KERNEL, mean u-boot ++ ++#if defined(LINUX_KERNEL) ++#include ++#include ++#else // u-boot ++#define __init_or_module ++#include "cns3xxx_symbol.h" ++#endif ++ ++void disable_AN(int port, int y); ++ ++u16 get_phy_id(u8 phy_addr); ++int cns3xxx_std_phy_power_down(int phy_addr, int y); ++u32 get_vsc8601_recv_err_counter(u8 phy_addr); ++u32 get_crc_good_counter(u8 phy_addr); ++int cns3xxx_config_VSC8601(u8 mac_port, u8 phy_addr); ++int vsc8601_power_down(int phy_addr, int y); ++int icp_101a_init(u8 mac_port, u8 phy_addr); ++int bcm53115M_init(u8 mac_port, u16 phy_addr); ++int icp_ip1001_init(u8 mac_port, u8 phy_addr); ++ ++int cns3xxx_phy_auto_polling_enable(u8 port, u8 en); ++ ++int cns3xxx_read_phy(u8 phy_addr, u8 phy_reg, u16 *read_data); ++int cns3xxx_write_phy(u8 phy_addr, u8 phy_reg, u16 write_data); ++ ++// wrap cns3xxx_spi_tx_rx() for argument order ++int cns3xxx_spi_tx_rx_n(u32 tx_data, u32 *rx_data, u32 tx_channel, u32 tx_eof_flag); ++ ++// for bcm53115M ++#define ROBO_SPIF_BIT 7 ++#define BCM53115_SPI_CHANNEL 1 ++#define ROBO_RACK_BIT 5 ++ ++#define VLAN_START_BIT 7 ++#define VLAN_WRITE_CMD 0 ++ ++//#define BCM_PORT_1G 2 ++typedef enum ++{ ++ BCM_PORT_10M = 0, ++ BCM_PORT_100M, ++ BCM_PORT_1G, ++}BCM_PORT_SPEED; ++ ++#define BCM_PORT_0 0 ++#define BCM_PORT_1 1 ++#define BCM_PORT_2 2 ++#define BCM_PORT_3 3 ++#define BCM_PORT_4 4 ++#define BCM_PORT_5 5 ++#define BCM_PORT_IMP 6 ++ ++#endif // end #ifndef CNS3XXX_PHY_H +--- /dev/null ++++ b/drivers/net/cns3xxx/cns3xxx_sppe_hook.c +@@ -0,0 +1,39 @@ ++/****************************************************************************** ++ * ++ * Copyright (c) 2008 Cavium Networks ++ * ++ * This file is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, Version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or ++ * NONINFRINGEMENT. See the GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this file; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA or ++ * visit http://www.gnu.org/licenses/. ++ * ++ * This file may also be available under a different license from Cavium. ++ * Contact Cavium Networks for more information ++ * ++ ******************************************************************************/ ++ ++#if defined(CONFIG_CNS3XXX_SPPE) ++#include ++#include ++ ++int sppe_hook_ready = 0; ++int (*sppe_func_hook)(SPPE_PARAM *param) = NULL; ++int sppe_pci_fp_ready = 0; ++int (*sppe_pci_fp_hook)(SPPE_PARAM *param) = NULL; ++ ++EXPORT_SYMBOL(sppe_hook_ready); ++EXPORT_SYMBOL(sppe_func_hook); ++EXPORT_SYMBOL(sppe_pci_fp_ready); ++EXPORT_SYMBOL(sppe_pci_fp_hook); ++ ++#endif //#if defined(CONFIG_CNS3XXX_SPPE) ++ +--- /dev/null ++++ b/drivers/net/cns3xxx/cns3xxx_symbol.h +@@ -0,0 +1,317 @@ ++/******************************************************************************* ++ * ++ * Copyright (c) 2009 Cavium Networks ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the Free ++ * Software Foundation; either version 2 of the License, or (at your option) ++ * any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ * more details. ++ * ++ * You should have received a copy of the GNU General Public License along with ++ * this program; if not, write to the Free Software Foundation, Inc., 59 ++ * Temple Place - Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * The full GNU General Public License is included in this distribution in the ++ * file called LICENSE. ++ * ++ ********************************************************************************/ ++ ++// the symbol define memory map register. ++ ++#ifndef CNS3XXX_SYMBOL_H ++#define CNS3XXX_SYMBOL_H ++ ++#define DRV_VERSION "Cavium CNS3XXX Switch Driver-0.0.1" ++ ++ ++#define LINUX_KERNEL // if don't define LINUX_KERNEL, mean u-boot ++ ++#if defined(LINUX_KERNEL) ++// linux kernel ++#include ++ ++#define SWITCH_REG_VALUE(offset) (*((volatile unsigned int *)(CNS3XXX_SWITCH_BASE_VIRT+offset))) ++#define PMU_REG_VALUE(offset) (*((volatile unsigned int *)(CNS3XXX_PM_BASE_VIRT+offset))) ++#define MISC_REG_VALUE(offset) (*((volatile unsigned int *)(CNS3XXX_MISC_BASE_VIRT+offset))) ++ ++ ++#define NETDEV_SIZE 4097+3 ++ ++#define PORT0_NETDEV_INDEX NETDEV_SIZE-3 ++#define PORT1_NETDEV_INDEX NETDEV_SIZE-2 ++#define PORT2_NETDEV_INDEX NETDEV_SIZE-1 ++ ++#if defined (CONFIG_CNS3XXX_SPPE) ++#define FP_NETDEV_INDEX NETDEV_SIZE-4 ++#endif ++ ++#define PORT0_NETDEV net_dev_array[PORT0_NETDEV_INDEX] ++#define PORT1_NETDEV net_dev_array[PORT1_NETDEV_INDEX] ++#define PORT2_NETDEV net_dev_array[PORT2_NETDEV_INDEX] ++ ++#if defined (CONFIG_CNS3XXX_SPPE) ++#define FP_NETDEV net_dev_array[FP_NETDEV_INDEX] ++#endif ++ ++#else // u-boot ++#include // for u8, u32 ++ ++#include "cns3000.h" ++#define CAVM_OK 0 ++#define CAVM_ERR 1 ++#define CAVM_NOT_FOUND 2 ++#define CAVM_FOUND 3 ++#define CAVM_FAIL -1 // use minus ++ ++#define SWITCH_REG_VALUE(addr) (*((volatile unsigned int *)(CNS3000_VEGA_SWITCH_BASE+addr))) ++#define PMU_REG_VALUE(addr) (*((volatile unsigned int *)(CNS3000_VEGA_PM_BASE+addr))) ++#define MISC_REG_VALUE(offset) (*((volatile unsigned int *)(CNS3000_VEGA_MISC_BASE+offset))) ++ ++#endif ++ ++// for VLAN and ARL table MB_PMAP ++#define MAC_PORT0_PMAP 1 ++#define MAC_PORT1_PMAP (1 << 1) ++#define MAC_PORT2_PMAP (1 << 4) ++#define CPU_PORT_PMAP (1 << 2) ++ ++ ++ ++// memory map register definition ++ ++//#define PHY_CTRL_REG (*(u32 volatile*(0xff))) ++#define PHY_CTRL_REG SWITCH_REG_VALUE(0x0) ++#define PHY_AUTO_ADDR_REG SWITCH_REG_VALUE(0x04) ++ ++#define MAC_GLOB_CFG_REG SWITCH_REG_VALUE(0x08) ++#define MAC_GLOB_CFG_EXT_REG SWITCH_REG_VALUE(0xf4) ++#define MAC0_CFG_REG SWITCH_REG_VALUE(0x0c) ++#define MAC1_CFG_REG SWITCH_REG_VALUE(0x10) ++#define MAC2_CFG_REG SWITCH_REG_VALUE(0x18) ++#define CPU_CFG_REG SWITCH_REG_VALUE(0x14) ++ ++#define MAC0_PRI_CTRL_REG SWITCH_REG_VALUE(0x1c) ++#define MAC1_PRI_CTRL_REG SWITCH_REG_VALUE(0x20) ++#define CPU_PRI_CTRL_REG SWITCH_REG_VALUE(0x24) ++#define HNAT_PRI_CTRL_REG SWITCH_REG_VALUE(0x28) ++#define MAC2_PRI_CTRL_REG SWITCH_REG_VALUE(0x2c) ++ ++#define MAC0_PRI_CTRL_EXT_REG SWITCH_REG_VALUE(0x30) ++ ++#define ETYPE1_ETYPE0_REG SWITCH_REG_VALUE(0x34) ++#define ETYPE3_ETYPE2_REG SWITCH_REG_VALUE(0x38) ++ ++#define UDP_RANGE0_REG SWITCH_REG_VALUE(0x3c) ++#define UDP_RANGE1_REG SWITCH_REG_VALUE(0x40) ++#define UDP_RANGE2_REG SWITCH_REG_VALUE(0x44) ++#define UDP_RANGE3_REG SWITCH_REG_VALUE(0x48) ++ ++ ++#define PRIO_ETYPE_UDP_REG SWITCH_REG_VALUE(0x4c) ++ ++#define PRIO_IPDSCP_7_0_REG SWITCH_REG_VALUE(0x50) ++#define PRIO_IPDSCP_15_8_REG SWITCH_REG_VALUE(0x54) ++#define PRIO_IPDSCP_23_16_REG SWITCH_REG_VALUE(0x58) ++#define PRIO_IPDSCP_31_24_REG SWITCH_REG_VALUE(0x5c) ++#define PRIO_IPDSCP_39_32_REG SWITCH_REG_VALUE(0x60) ++#define PRIO_IPDSCP_47_40_REG SWITCH_REG_VALUE(0x64) ++#define PRIO_IPDSCP_55_48_REG SWITCH_REG_VALUE(0x68) ++#define PRIO_IPDSCP_63_56_REG SWITCH_REG_VALUE(0x6c) ++ ++#define TC_CTRL_REG SWITCH_REG_VALUE(0x70) ++#define RATE_CTRL_REG SWITCH_REG_VALUE(0x74) ++ ++#define FC_GLOB_THRS_REG SWITCH_REG_VALUE(0x78) ++#define FC_PORT_THRS_REG SWITCH_REG_VALUE(0x7c) ++#define MC_GLOB_THRS_REG SWITCH_REG_VALUE(0x80) ++#define DC_GLOB_THRS_REG SWITCH_REG_VALUE(0x84) ++ ++#define ARL_VLAN_CMD_REG SWITCH_REG_VALUE(0x88) ++ ++#define ARL_CTRL0_REG SWITCH_REG_VALUE(0x8c) ++#define ARL_CTRL1_REG SWITCH_REG_VALUE(0x90) ++#define ARL_CTRL2_REG SWITCH_REG_VALUE(0x94) ++ ++#define VLAN_CFG SWITCH_REG_VALUE(0x098) ++ ++#define MAC1_MAC0_PVID_REG SWITCH_REG_VALUE(0x9c) ++#define MAC2_CPU_PVID_REG SWITCH_REG_VALUE(0xa0) ++ ++#define VLAN_CTRL0_REG SWITCH_REG_VALUE(0xa4) ++#define VLAN_CTRL1_REG SWITCH_REG_VALUE(0xa8) ++#define VLAN_CTRL2_REG SWITCH_REG_VALUE(0xac) ++ ++#define SESSION_ID_1_0_REG SWITCH_REG_VALUE(0xb0) ++#define SESSION_ID_3_2_REG SWITCH_REG_VALUE(0xb4) ++#define SESSION_ID_5_4_REG SWITCH_REG_VALUE(0xb8) ++#define SESSION_ID_7_6_REG SWITCH_REG_VALUE(0xbc) ++#define SESSION_ID_9_8_REG SWITCH_REG_VALUE(0xc0) ++#define SESSION_ID_11_10_REG SWITCH_REG_VALUE(0xc4) ++#define SESSION_ID_13_12_REG SWITCH_REG_VALUE(0xc8) ++#define SESSION_ID_15_14_REG SWITCH_REG_VALUE(0xcc) ++ ++#define INTR_STAT_REG SWITCH_REG_VALUE(0xd0) ++#define INTR_MASK_REG SWITCH_REG_VALUE(0xd4) ++ ++#define SRAM_TEST_REG SWITCH_REG_VALUE(0xd8) ++ ++#define MEM_QUEUE_REG SWITCH_REG_VALUE(0xdc) ++ ++#define SARL_CTRL_REG SWITCH_REG_VALUE(0xe0) ++#define SARL_OQ_GTH_REG SWITCH_REG_VALUE(0xe4) ++#define SARL_OQ_YTH_REG SWITCH_REG_VALUE(0xe8) ++#define SARL_OQ_RTH_REG SWITCH_REG_VALUE(0xec) ++ ++#define SLK_SKEW_CTRL_REG SWITCH_REG_VALUE(0xf0) ++ ++#define DMA_RING_CTRL_REG SWITCH_REG_VALUE(0x100) ++ ++#define DMA_AUTO_POLL_CFG_REG SWITCH_REG_VALUE(0x104) ++ ++#define DELAY_INTR_CFG_REG SWITCH_REG_VALUE(0x108) ++ ++#define TS_DMA_CTRL0_REG SWITCH_REG_VALUE(0x110) ++#define TS_DESC_PTR0_REG SWITCH_REG_VALUE(0x114) ++#define TS_DESC_BASE_ADDR0_REG SWITCH_REG_VALUE(0x118) ++ ++#define FS_DMA_CTRL0_REG SWITCH_REG_VALUE(0x120) ++#define FS_DESC_PTR0_REG SWITCH_REG_VALUE(0x124) ++#define FS_DESC_BASE_ADDR0_REG SWITCH_REG_VALUE(0x128) ++ ++#define TS_DMA_CTRL1_REG SWITCH_REG_VALUE(0x130) ++#define TS_DESC_PTR1_REG SWITCH_REG_VALUE(0x134) ++#define TS_DESC_BASE_ADDR1_REG SWITCH_REG_VALUE(0x138) ++ ++#define FS_DMA_CTRL1_REG SWITCH_REG_VALUE(0x140) ++#define FS_DESC_PTR1_REG SWITCH_REG_VALUE(0x144) ++#define FS_DESC_BASE_ADDR1_REG SWITCH_REG_VALUE(0x148) ++ ++#define TS_DMA_STA_REG SWITCH_REG_VALUE(0x150) ++#define FS_DMA_STA_REG SWITCH_REG_VALUE(0x154) ++ ++#define TS_MRD_CMD_CNT_REG SWITCH_REG_VALUE(0x158) ++#define TS_MWT_CMD_CNT_REG SWITCH_REG_VALUE(0x15c) ++ ++#define FS_MRD_CMD_CNT_REG SWITCH_REG_VALUE(0x160) ++#define FS_MWT_CMD_CNT_REG SWITCH_REG_VALUE(0x164) ++ ++#define C_RXOKPKT_MAC0_REG SWITCH_REG_VALUE(0x300) ++#define C_RXOKBYTE_MAC0_REG SWITCH_REG_VALUE(0x304) ++#define C_RXRUNT_MAC0_REG SWITCH_REG_VALUE(0x308) ++#define C_RXLONG_MAC0_REG SWITCH_REG_VALUE(0x30c) ++#define C_RXDROP_MAC0_REG SWITCH_REG_VALUE(0x310) ++#define C_RXCRC_MAC0_REG SWITCH_REG_VALUE(0x314) ++#define C_RXARLDROP_MAC0_REG SWITCH_REG_VALUE(0x318) ++#define C_VIDROP_MAC0_REG SWITCH_REG_VALUE(0x31c) ++#define C_VEDROP_MAC0_REG SWITCH_REG_VALUE(0x320) ++#define C_RXRL_MAC0_REG SWITCH_REG_VALUE(0x324) ++#define C_RXPAUSE_MAC0_REG SWITCH_REG_VALUE(0x328) ++ ++#define C_TXOKPKT_MAC0_REG SWITCH_REG_VALUE(0x32c) ++#define C_TXOKBYTE_MAC0_REG SWITCH_REG_VALUE(0x330) ++#define C_TXPAUSECOL_MAC0_REG SWITCH_REG_VALUE(0x334) ++ ++#define C_RXOKPKT_MAC1_REG SWITCH_REG_VALUE(0x400) ++#define C_RXOKBYTE_MAC1_REG SWITCH_REG_VALUE(0x404) ++#define C_RXRUNT_MAC1_REG SWITCH_REG_VALUE(0x408) ++#define C_RXLONG_MAC1_REG SWITCH_REG_VALUE(0x40c) ++#define C_RXDROP_MAC1_REG SWITCH_REG_VALUE(0x410) ++#define C_RXCRC_MAC1_REG SWITCH_REG_VALUE(0x414) ++#define C_RXARLDROP_MAC1_REG SWITCH_REG_VALUE(0x418) ++#define C_VIDROP_MAC1_REG SWITCH_REG_VALUE(0x41c) ++#define C_VEDROP_MAC1_REG SWITCH_REG_VALUE(0x420) ++#define C_RXRL_MAC1_REG SWITCH_REG_VALUE(0x424) ++#define C_RXPAUSE_MAC1_REG SWITCH_REG_VALUE(0x428) ++ ++#define C_TXOKPKT_MAC1_REG SWITCH_REG_VALUE(0x42c) ++#define C_TXOKBYTE_MAC1_REG SWITCH_REG_VALUE(0x430) ++#define C_TXPAUSECOL_MAC1_REG SWITCH_REG_VALUE(0x434) ++ ++#define C_TSOKPKT_CPU_REG SWITCH_REG_VALUE(0x500) ++#define C_TSOKBYTE_CPU_REG SWITCH_REG_VALUE(0x504) ++#define C_TSRUNT_CPU_REG SWITCH_REG_VALUE(0x508) ++#define C_TSLONG_CPU_REG SWITCH_REG_VALUE(0x50c) ++#define C_TSNODSTDROP_CPU_REG SWITCH_REG_VALUE(0x510) ++#define C_TSARLDROP_CPU_REG SWITCH_REG_VALUE(0x514) ++#define C_TSVIDROP_CPU_REG SWITCH_REG_VALUE(0x518) ++#define C_TSVEDROP_CPU_REG SWITCH_REG_VALUE(0x51c) ++#define C_TSRL_CPU_REG SWITCH_REG_VALUE(0x520) ++ ++#define C_FSOKPKT_CPU_REG SWITCH_REG_VALUE(0x524) ++#define C_FSOKBYTE_CPU_REG SWITCH_REG_VALUE(0x528) ++ ++#define C_RXOKPKT_MAC2_REG SWITCH_REG_VALUE(0x600) ++#define C_RXOKBYTE_MAC2_REG SWITCH_REG_VALUE(0x604) ++#define C_RXRUNT_MAC2_REG SWITCH_REG_VALUE(0x608) ++#define C_RXLONG_MAC2_REG SWITCH_REG_VALUE(0x60c) ++#define C_RXDROP_MAC2_REG SWITCH_REG_VALUE(0x610) ++#define C_RXCRC_MAC2_REG SWITCH_REG_VALUE(0x614) ++#define C_RXARLDROP_MAC2_REG SWITCH_REG_VALUE(0x618) ++#define C_VIDROP_MAC2_REG SWITCH_REG_VALUE(0x61c) ++#define C_VEDROP_MAC2_REG SWITCH_REG_VALUE(0x620) ++#define C_RXRL_MAC2_REG SWITCH_REG_VALUE(0x624) ++#define C_RXPAUSE_MAC2_REG SWITCH_REG_VALUE(0x628) ++ ++#define C_TXOKPKT_MAC2_REG SWITCH_REG_VALUE(0x62c) ++#define C_TXOKBYTE_MAC2_REG SWITCH_REG_VALUE(0x630) ++#define C_TXPAUSECOL_MAC2_REG SWITCH_REG_VALUE(0x634) ++ ++#define C_TXOKPKT_MAC0_EXT_REG SWITCH_REG_VALUE(0x72c) ++#define C_TXOKBYTE_MAC0_EXT_REG SWITCH_REG_VALUE(0x730) ++ ++#define CLK_GATE_REG PMU_REG_VALUE(0x0) ++#define SOFT_RST_REG PMU_REG_VALUE(0x4) ++#define PLL_HM_PD_CTRL_REG PMU_REG_VALUE(0x1c) ++ ++#define GPIOB_PIN_EN_REG MISC_REG_VALUE(0x18) ++#define IOCDA_REG MISC_REG_VALUE(0x1c) ++ ++#define LEVEL_HIGH 0 ++#define RISING_EDGE 1 ++ ++#ifdef CONFIG_SILICON ++ ++#define STATUS_INTERRUPT_ID 49 ++ ++#define FSRC_RING0_INTERRUPT_ID 51 ++#define FSQF_RING0_INTERRUPT_ID 53 ++ ++#define FSRC_RING1_INTERRUPT_ID 55 ++#define FSQF_RING1_INTERRUPT_ID 57 ++ ++#define TSTC_RING0_INTERRUPT_ID 50 ++ ++#define TSTC_RING1_INTERRUPT_ID 54 ++ ++#define HNAT_INTERRUPT_ID 58 ++ ++#else ++ ++//#define STATUS_INTERRUPT_ID 49 ++#define STATUS_INTERRUPT_ID 38 ++//#define FSRC_RING0_INTERRUPT_ID 51 ++#define FSRC_RING0_INTERRUPT_ID 40 ++ ++#define TSQE_RING0_INTERRUPT_ID 52 ++ ++//#define FSQF_RING0_INTERRUPT_ID 53 ++#define FSQF_RING0_INTERRUPT_ID 42 ++ ++#define FSQF_RING1_INTERRUPT_ID 46 ++#define FSRC_RING1_INTERRUPT_ID 44 ++ ++//#define FSRC_RING1_INTERRUPT_ID 55 ++ ++#define TSTC_RING0_INTERRUPT_ID 39 ++#define TSTC_RING1_INTERRUPT_ID 43 ++ ++#define TSQE_RING1_INTERRUPT_ID 56 ++#define HNAT_INTERRUPT_ID 58 ++#endif // #ifdef CONFIG_SILICON ++ ++#endif +--- /dev/null ++++ b/drivers/net/cns3xxx/cns3xxx_tool.h +@@ -0,0 +1,898 @@ ++/******************************************************************************* ++ * ++ * ++ * Copyright (c) 2009 Cavium Networks ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the Free ++ * Software Foundation; either version 2 of the License, or (at your option) ++ * any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ * more details. ++ * ++ * You should have received a copy of the GNU General Public License along with ++ * this program; if not, write to the Free Software Foundation, Inc., 59 ++ * Temple Place - Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * The full GNU General Public License is included in this distribution in the ++ * file called LICENSE. ++ * ++ ********************************************************************************/ ++ ++#ifndef CNS3XXX_TOOL_H ++#define CNS3XXX_TOOL_H ++ ++#define PRINT_INFO printk ++ ++#if defined(__KERNEL__) ++ ++#include "cns3xxx.h" ++#include // for printk ++ ++#else // u-boot ++ ++#endif ++ ++#define SHOW_DEBUG_MESSAGE ++#ifdef SHOW_DEBUG_MESSAGE ++ ++extern int MSG_LEVEL; ++ ++#define NO_MSG 0 ++#define NORMAL_MSG 1 ++#define WARNING_MSG (1 << 1) ++#define CRITICAL_MSG (1 << 2) ++#define DUMP_RX_PKT_INFO (1 << 3) ++#define DUMP_TX_PKT_INFO (1 << 4) ++ ++#define DEBUG_MSG(msg_level, fmt, args...)\ ++{ \ ++ int i=0; \ ++\ ++ for(i=0 ; i < 3 ; ++i) { \ ++ if ((MSG_LEVEL & msg_level) >> i) \ ++ printk(KERN_INFO "*cns3xxx gsw debug* " fmt, ## args); \ ++ } \ ++} ++ ++#endif ++ ++#define GET_MAC_PORT_CFG(port, cfg) \ ++{ \ ++ switch (port) \ ++ { \ ++ case MAC_PORT0: \ ++ { \ ++ cfg = MAC0_CFG_REG; \ ++ break; \ ++ } \ ++ case MAC_PORT1: \ ++ { \ ++ cfg = MAC1_CFG_REG; \ ++ break; \ ++ } \ ++ case MAC_PORT2: \ ++ { \ ++ cfg = MAC2_CFG_REG; \ ++ break; \ ++ } \ ++ } \ ++} ++ ++#define SET_MAC_PORT_CFG(port, cfg) \ ++{ \ ++ switch (port) \ ++ { \ ++ case MAC_PORT0: \ ++ { \ ++ MAC0_CFG_REG = cfg; \ ++ break; \ ++ } \ ++ case MAC_PORT1: \ ++ { \ ++ MAC1_CFG_REG = cfg; \ ++ break; \ ++ } \ ++ case MAC_PORT2: \ ++ { \ ++ MAC2_CFG_REG = cfg; \ ++ break; \ ++ } \ ++ } \ ++} ++ ++#define between(x, start, end) ((x)>=(start) && (x)<=(end)) ++static inline void print_packet(unsigned char *data, int len) ++{ ++ int i,j; ++ ++ printk("packet length: %d%s:\n", len, len>128?"(only show the first 128 bytes)":""); ++#if 0 ++ if(len > 128) { ++ len = 128; ++ } ++#endif ++ for(i=0;len;) { ++ if(len >=16 ) { ++ for(j=0;j<16;j++) { ++ printk("%02x ", data[i++]); ++ } ++ printk("| "); ++ ++ i -= 16; ++ for(j=0;j<16;j++) { ++ if( between(data[i], 0x21, 0x7e) ) { ++ printk("%c", data[i++]); ++ } ++ else { ++ printk("."); ++ i++; ++ } ++ } ++ printk("\n"); ++ ++ len -= 16; ++ } ++ else { ++ /* last line */ ++ ++ for(j=0; jvid; ++ ARL_VLAN_CMD_REG |= (1 << 8); // look up vlan table command ++ ++ // wait for vlan command complete ++ while(( (ARL_VLAN_CMD_REG >> 9) & 1) == 0) ; ++ ++ if (!((ARL_VLAN_CMD_REG >> 10) & 1)) { ++ // not found any entry ++ return CAVM_NOT_FOUND; ++ } ++ ++ entry->valid = ((VLAN_CTRL0_REG >> 31) & 0x1); ++ entry->vid = ((VLAN_CTRL2_REG >> 31) & 0xfff); ++ entry->wan_side = ((VLAN_CTRL0_REG >> 30) & 0x1); ++ entry->etag_pmap = ((VLAN_CTRL0_REG >> 25) & 0x1f); ++ entry->mb_pmap = ((VLAN_CTRL0_REG >> 9) & 0x1f); ++ ++ entry->my_mac[0] = ((VLAN_CTRL1_REG >> 24) & 0xff); ++ entry->my_mac[1] = ((VLAN_CTRL1_REG >> 16) & 0xff); ++ entry->my_mac[2] = ((VLAN_CTRL1_REG >> 8) & 0xff); ++ entry->my_mac[3] = (VLAN_CTRL1_REG & 0xff); ++ ++ entry->my_mac[4] = ((VLAN_CTRL2_REG >> 24) & 0xff); ++ entry->my_mac[5] = ((VLAN_CTRL2_REG >> 16) & 0xff); ++ ++ return CAVM_FOUND; ++} ++ ++static inline int cns3xxx_vlan_table_read(VLANTableEntry *entry) ++{ ++ //printf("VLAN_CTRL0_REG: %x\n", VLAN_CTRL0_REG); ++ ARL_VLAN_CMD_REG &= (~0x3f); ++ ARL_VLAN_CMD_REG |= (entry->vlan_index); ++ ARL_VLAN_CMD_REG |= (1 << 7); // read vlan table command ++ //printf("after read ARL_VLAN_CMD_REG: %x\n", ARL_VLAN_CMD_REG); ++ ++ // wait for vlan command complete ++ while(( (ARL_VLAN_CMD_REG >> 9) & 1) == 0) ; ++ ++ //printf("ARL_VLAN_CMD_REG: %x\n", ARL_VLAN_CMD_REG); ++ //printf("VLAN_CTRL0_REG: %x\n", VLAN_CTRL0_REG); ++ ++ entry->valid = ((VLAN_CTRL0_REG >> 31) & 0x1); ++ entry->vid = ((VLAN_CTRL2_REG) & 0xfff); ++ entry->wan_side = ((VLAN_CTRL0_REG >> 30) & 0x1); ++ entry->etag_pmap = ((VLAN_CTRL0_REG >> 25) & 0x1f); ++ entry->mb_pmap = ((VLAN_CTRL0_REG >> 9) & 0x1f); ++ ++ entry->my_mac[0] = ((VLAN_CTRL1_REG >> 24) & 0xff); ++ entry->my_mac[1] = ((VLAN_CTRL1_REG >> 16) & 0xff); ++ entry->my_mac[2] = ((VLAN_CTRL1_REG >> 8) & 0xff); ++ entry->my_mac[3] = (VLAN_CTRL1_REG & 0xff); ++ ++ entry->my_mac[4] = ((VLAN_CTRL2_REG >> 24) & 0xff); ++ entry->my_mac[5] = ((VLAN_CTRL2_REG >> 16) & 0xff); ++ ++ return CAVM_OK; ++ ++} ++ ++ ++// add a entry in the vlan table ++static inline int cns3xxx_vlan_table_add(VLANTableEntry *entry) ++{ ++ VLAN_CTRL0_REG = 0; ++ VLAN_CTRL1_REG = 0; ++ VLAN_CTRL2_REG = 0; ++ ++#if 0 ++ printk("a [kernel mode] VLAN_CTRL0_REG: %x\n", VLAN_CTRL0_REG); ++ printk("a [kernel mode] VLAN_CTRL1_REG: %x\n", VLAN_CTRL1_REG); ++ printk("a [kernel mode] VLAN_CTRL2_REG: %x\n", VLAN_CTRL2_REG); ++#endif ++ ++ //printk("vlan_index: %x\n", entry->vlan_index); ++ VLAN_CTRL0_REG |= (entry->valid << 31); ++ //DEBUG_MSG(NORMAL_MSG, "1 [kernel mode] VLAN_CTRL0_REG: %x\n", VLAN_CTRL0_REG); ++ VLAN_CTRL0_REG |= (entry->wan_side << 30); ++ //DEBUG_MSG(NORMAL_MSG, "2 [kernel mode] VLAN_CTRL0_REG: %x\n", VLAN_CTRL0_REG); ++ //printk("entry->etag_pmap: %x\n", entry->etag_pmap); ++ VLAN_CTRL0_REG |= (entry->etag_pmap << 25); ++ //DEBUG_MSG(NORMAL_MSG, "3 [kernel mode] VLAN_CTRL0_REG: %x\n", VLAN_CTRL0_REG); ++ //printk("entry->mb_pmap: %x\n", entry->mb_pmap); ++ VLAN_CTRL0_REG |= (entry->mb_pmap << 9); ++ //DEBUG_MSG(NORMAL_MSG, "4 [kernel mode] VLAN_CTRL0_REG: %x\n", VLAN_CTRL0_REG); ++ //printk("bb [kernel mode] VLAN_CTRL0_REG: %x\n", VLAN_CTRL0_REG); ++ ++ //printf("vlan index: %d ## add VLAN_CTRL0_REG: %x\n", entry->vlan_index, VLAN_CTRL0_REG); ++ ++ ++ VLAN_CTRL1_REG |= (entry->my_mac[0] << 24); ++ VLAN_CTRL1_REG |= (entry->my_mac[1] << 16); ++ VLAN_CTRL1_REG |= (entry->my_mac[2] << 8); ++ VLAN_CTRL1_REG |= (entry->my_mac[3]); ++ ++ VLAN_CTRL2_REG |= (entry->my_mac[4] << 24); ++ VLAN_CTRL2_REG |= (entry->my_mac[5] << 16); ++ VLAN_CTRL2_REG |= entry->vid; ++ ++#if 0 ++ printk("b [kernel mode] VLAN_CTRL0_REG: %x\n", VLAN_CTRL0_REG); ++ printk("b [kernel mode] VLAN_CTRL1_REG: %x\n", VLAN_CTRL1_REG); ++ printk("b [kernel mode] VLAN_CTRL2_REG: %x\n", VLAN_CTRL2_REG); ++#endif ++ ++ ARL_VLAN_CMD_REG &= (~0x3f); ++ ARL_VLAN_CMD_REG |= (entry->vlan_index); ++ ARL_VLAN_CMD_REG |= (1 << 6); // write vlan table command ++ ++ ++ //printf("after write ARL_VLAN_CMD_REG: %x\n", ARL_VLAN_CMD_REG); ++ ++ // wait for vlan command complete ++ while(( (ARL_VLAN_CMD_REG >> 9) & 1) == 0) ; ++ ++ return CAVM_OK; ++} ++ ++static inline void print_arl_table_entry(ARLTableEntry *entry) ++{ ++ printk("vid: %d\n", entry->vid); ++ printk("pmap: %#x\n", entry->pmap); ++ printk("age_field: %d\n", entry->age_field); ++ printk("vlan_mac: %d\n", entry->vlan_mac); ++ printk("filter: %d\n", entry->filter); ++ printk("mac addr: %x:%x:%x:%x:%x:%x\n", entry->mac[0], entry->mac[1],entry->mac[2],entry->mac[3],entry->mac[4],entry->mac[5]); ++ ++} ++ ++ ++static inline int cns3xxx_arl_table_lookup(ARLTableEntry *entry) ++{ ++ ARL_CTRL0_REG = 0; ++ ARL_CTRL1_REG = 0; ++ ARL_CTRL2_REG = 0; ++ ++ ARL_CTRL0_REG |= (entry->vid << 16); ++ ++ ARL_CTRL1_REG |= (entry->mac[0] << 24); ++ ARL_CTRL1_REG |= (entry->mac[1] << 16); ++ ARL_CTRL1_REG |= (entry->mac[2] << 8); ++ ARL_CTRL1_REG |= entry->mac[3]; ++ ++ ARL_CTRL2_REG |= (entry->mac[4] << 24); ++ ARL_CTRL2_REG |= (entry->mac[5] << 16); ++ ++ ARL_VLAN_CMD_REG |= (1 << 18); // arl table lookup command ++ ++ // wait arl command complete ++ while(( (ARL_VLAN_CMD_REG >> 21) & 1) == 0); ++ ++ if (( (ARL_VLAN_CMD_REG >> 23) & 1)) { ++ // found ++ ++ entry->vid = ((ARL_CTRL0_REG >> 16) & 0xfff); ++ entry->pmap = ((ARL_CTRL0_REG >> 9) & 0x1f); ++ ++ entry->age_field = ((ARL_CTRL2_REG >> 4 ) & 0x7); ++ entry->vlan_mac = ((ARL_CTRL2_REG >> 1 ) & 0x1); ++ entry->filter = (ARL_CTRL2_REG & 0x1); ++ } else { ++ // not found ++ return CAVM_NOT_FOUND; ++ } ++#if 0 ++ printk("[kernel mode] ARL_VLAN_CMD_REG : %#x\n", ARL_VLAN_CMD_REG); ++ printk("[kernel mode] ARL_CTRL0_REG : %#x\n", ARL_CTRL0_REG); ++ printk("[kernel mode] ARL_CTRL1_REG : %#x\n", ARL_CTRL1_REG); ++ printk("[kernel mode] ARL_CTRL2_REG : %#x\n", ARL_CTRL2_REG); ++#endif ++ ++ return CAVM_FOUND; ++} ++ ++static inline int cns3xxx_arl_table_search_again(ARLTableEntry *entry) ++{ ++ ARL_CTRL0_REG = 0; ++ ARL_CTRL1_REG = 0; ++ ARL_CTRL2_REG = 0; ++ ++ ARL_VLAN_CMD_REG |= (1 << 17); // arl table search again command ++ ++ // wait arl command complete ++ while(( (ARL_VLAN_CMD_REG >> 21) & 1) == 0); ++ ++ if ((ARL_VLAN_CMD_REG >> 23) & 1) { ++ ++ // found ++ #if 0 ++ printk("[kernel mode] ARL_VLAN_CMD_REG : %#x\n", ARL_VLAN_CMD_REG); ++ printk("[kernel mode] ARL_CTRL0_REG : %#x\n", ARL_CTRL0_REG); ++ printk("[kernel mode] ARL_CTRL1_REG : %#x\n", ARL_CTRL1_REG); ++ printk("[kernel mode] ARL_CTRL2_REG : %#x\n", ARL_CTRL2_REG); ++ #endif ++ entry->vid = ((ARL_CTRL0_REG >> 16) & 0xfff); ++ entry->pmap = ((ARL_CTRL0_REG >> 9) & 0x1f); ++ ++ entry->age_field = ((ARL_CTRL2_REG >> 4 ) & 0x7); ++ entry->vlan_mac = ((ARL_CTRL2_REG >> 1 ) & 0x1); ++ entry->filter = (ARL_CTRL2_REG & 0x1); ++ ++ entry->mac[0] = (ARL_CTRL1_REG >> 24); ++ entry->mac[1] = (ARL_CTRL1_REG >> 16); ++ entry->mac[2] = (ARL_CTRL1_REG >> 8); ++ entry->mac[3] = ARL_CTRL1_REG; ++ ++ entry->mac[4] = (ARL_CTRL2_REG >> 24); ++ entry->mac[5] = (ARL_CTRL2_REG >> 16); ++ ++ return CAVM_FOUND; ++ } else { ++ // not found ++ return CAVM_NOT_FOUND; ++ } ++} ++ ++static inline int cns3xxx_is_arl_table_end(void) ++{ ++ ARL_CTRL0_REG = 0; ++ ARL_CTRL1_REG = 0; ++ ARL_CTRL2_REG = 0; ++ ++ if (( (ARL_VLAN_CMD_REG >> 22) & 1)) { // search to table end ++ return CAVM_OK; ++ } else { ++ return CAVM_ERR; ++ } ++} ++ ++static inline int cns3xxx_arl_table_search(ARLTableEntry *entry) ++{ ++ ARL_CTRL0_REG = 0; ++ ARL_CTRL1_REG = 0; ++ ARL_CTRL2_REG = 0; ++ ++#if 0 ++ ARL_CTRL0_REG |= (entry->vid << 16); ++ ++ ARL_CTRL1_REG |= (entry->mac[0] << 24); ++ ARL_CTRL1_REG |= (entry->mac[1] << 16); ++ ARL_CTRL1_REG |= (entry->mac[2] << 8); ++ ARL_CTRL1_REG |= entry->mac[3]; ++ ++ ARL_CTRL2_REG |= (entry->mac[4] << 24); ++ ARL_CTRL2_REG |= (entry->mac[5] << 16); ++#endif ++ ARL_VLAN_CMD_REG |= (1 << 16); // arl table search start command ++ ++ // wait arl command complete ++ while(( (ARL_VLAN_CMD_REG >> 21) & 1) == 0); ++ ++ if (((ARL_VLAN_CMD_REG >> 23) & 1)) { ++ // found ++ #if 0 ++ printk("[kernel mode] ARL_VLAN_CMD_REG : %#x\n", ARL_VLAN_CMD_REG); ++ printk("[kernel mode] ARL_CTRL0_REG : %#x\n", ARL_CTRL0_REG); ++ printk("[kernel mode] ARL_CTRL1_REG : %#x\n", ARL_CTRL1_REG); ++ printk("[kernel mode] ARL_CTRL2_REG : %#x\n", ARL_CTRL2_REG); ++ #endif ++ entry->vid = ((ARL_CTRL0_REG >> 16) & 0xfff); ++ entry->pmap = ((ARL_CTRL0_REG >> 9) & 0x1f); ++ ++ entry->age_field = ((ARL_CTRL2_REG >> 4 ) & 0x7); ++ entry->vlan_mac = ((ARL_CTRL2_REG >> 1 ) & 0x1); ++ entry->filter = (ARL_CTRL2_REG & 0x1); ++ ++ entry->mac[0] = (ARL_CTRL1_REG >> 24); ++ entry->mac[1] = (ARL_CTRL1_REG >> 16); ++ entry->mac[2] = (ARL_CTRL1_REG >> 8); ++ entry->mac[3] = ARL_CTRL1_REG; ++ ++ entry->mac[4] = (ARL_CTRL2_REG >> 24); ++ entry->mac[5] = (ARL_CTRL2_REG >> 16); ++ ++ return CAVM_FOUND; ++ } else { ++ // not found ++ return CAVM_NOT_FOUND; ++ } ++} ++ ++ ++// flush all age out entries except static entries ++static inline int cns3xxx_arl_table_flush(void) ++{ ++ ARL_VLAN_CMD_REG |= (1 << 20); // flush arl table command ++ ++ // wait arl command complete ++ while(( (ARL_VLAN_CMD_REG >> 21) & 1) == 0); ++ ++ ++ return CAVM_OK; ++} ++ ++ ++// add a entry in the arl table ++static inline int cns3xxx_arl_table_add(ARLTableEntry *entry) ++{ ++ ARL_CTRL0_REG = 0; ++ ARL_CTRL1_REG = 0; ++ ARL_CTRL2_REG = 0; ++ ++ entry->age_field = 7; // static entry ++ ARL_CTRL0_REG |= (entry->vid << 16); ++ ARL_CTRL0_REG |= (entry->pmap << 9); ++ ++ ARL_CTRL1_REG |= (entry->mac[0] << 24); ++ ARL_CTRL1_REG |= (entry->mac[1] << 16); ++ ARL_CTRL1_REG |= (entry->mac[2] << 8); ++ ARL_CTRL1_REG |= entry->mac[3]; ++ ++ ARL_CTRL2_REG |= (entry->mac[4] << 24); ++ ARL_CTRL2_REG |= (entry->mac[5] << 16); ++ ++ ARL_CTRL2_REG |= (entry->age_field << 4); ++ ARL_CTRL2_REG |= (entry->vlan_mac << 1); ++ ARL_CTRL2_REG |= (entry->filter); ++ ++ //printk("entry->age_field: %d\n", entry->age_field); ++ //printk("ARL_CTRL2_REG: %x\n", ARL_CTRL2_REG); ++ ++ ARL_VLAN_CMD_REG |= (1 << 19); // arl table write command ++ ++ // wait arl command complete ++ while(( (ARL_VLAN_CMD_REG >> 21) & 1) == 0); ++ ++ return CAVM_OK; ++} ++ ++// invalid a entry in the arl table ++static inline int cns3xxx_arl_table_invalid(ARLTableEntry *entry) ++{ ++ entry->age_field = 0; // invalid ++ return cns3xxx_arl_table_add(entry); ++} ++ ++// port: ++// 0 : mac port0 ++// 1 : mac port1 ++// 2 : mac port2 ++// 3 : cpu port ++static inline void cns3xxx_set_pvid(u8 port, u16 pvid) ++{ ++ switch (port) ++ { ++ case 0: ++ { ++ MAC1_MAC0_PVID_REG &= (~0x0fff); ++ MAC1_MAC0_PVID_REG |= pvid; ++ break; ++ } ++ case 1: ++ { ++ MAC1_MAC0_PVID_REG &= (~(0x0fff << 16)); ++ MAC1_MAC0_PVID_REG |= (pvid << 16); ++ break; ++ } ++ case 2: ++ { ++ MAC2_CPU_PVID_REG &= (~(0x0fff << 16)); ++ MAC2_CPU_PVID_REG |= (pvid << 16); ++ break; ++ } ++ case 3: // cpu port ++ { ++ MAC2_CPU_PVID_REG &= (~0x0fff); ++ MAC2_CPU_PVID_REG |= pvid; ++ break; ++ } ++ } ++ ++ ++} ++ ++static inline u16 cns3xxx_get_pvid(u8 port) ++{ ++ // 0, 1, 2, cpu port ++ u16 port_offset[]={0x9c, 0x9c, 0xa0, 0xa0}; ++ // 0, 1, 2, cpu port ++ u16 port_shift[]={0, 16, 16, 0}; ++ ++ return ((SWITCH_REG_VALUE(port_offset[port]) >> port_shift[port]) & 0xfff); ++} ++ ++// which : 0 or 1 ++// enable: 0 or 1 ++static inline int enable_rx_dma(u8 which, u8 enable) ++{ ++ if (which == 0 ) { ++ FS_DMA_CTRL0_REG = enable; ++ } else if (which == 1 ) { ++ FS_DMA_CTRL1_REG = enable; ++ } else { ++ return CAVM_ERR; ++ } ++ return CAVM_OK; ++} ++ ++ ++// which : 0 or 1 ++// enable: 0 or 1 ++static inline int enable_tx_dma(u8 which, u8 enable) ++{ ++ if (which == 0 ) { ++ TS_DMA_CTRL0_REG = enable; ++ } else if (which == 1 ) { ++ TS_DMA_CTRL1_REG = enable; ++ } else { ++ return CAVM_ERR; ++ } ++ return CAVM_OK; ++} ++ ++#define DUMP_TX_DESC_PROC(tx_desc, page, num) \ ++{ \ ++ num += sprintf(page + num,"sdp: %x\n", tx_desc->sdp); \ ++ num += sprintf(page + num,"sdl: %d\n", tx_desc->sdl); \ ++ num += sprintf(page + num,"tco: %d\n", tx_desc->tco); \ ++ num += sprintf(page + num,"uco: %d\n", tx_desc->uco); \ ++ num += sprintf(page + num,"ico: %d\n", tx_desc->ico); \ ++ num += sprintf(page + num,"pri: %d\n", tx_desc->pri); \ ++ num += sprintf(page + num,"fp: %d\n", tx_desc->fp); \ ++ num += sprintf(page + num,"fr: %d\n", tx_desc->fr); \ ++ num += sprintf(page + num,"interrupt: %d\n", tx_desc->interrupt); \ ++ num += sprintf(page + num,"lsd: %d\n", tx_desc->lsd); \ ++ num += sprintf(page + num,"fsd: %d\n", tx_desc->fsd); \ ++ num += sprintf(page + num,"eor: %d\n", tx_desc->eor); \ ++ num += sprintf(page + num,"cown: %d\n", tx_desc->cown); \ ++ \ ++ num += sprintf(page + num,"ctv: %d\n", tx_desc->ctv); \ ++ num += sprintf(page + num,"stv: %d\n", tx_desc->stv); \ ++ num += sprintf(page + num,"sid: %d\n", tx_desc->sid); \ ++ num += sprintf(page + num,"inss: %d\n", tx_desc->inss); \ ++ num += sprintf(page + num,"dels: %d\n", tx_desc->dels); \ ++ num += sprintf(page + num,"pmap: %d\n", tx_desc->pmap); \ ++ num += sprintf(page + num,"mark: %d\n", tx_desc->mark); \ ++ num += sprintf(page + num,"ewan: %d\n", tx_desc->ewan); \ ++ num += sprintf(page + num,"fewan: %d\n", tx_desc->fewan); \ ++ \ ++ num += sprintf(page + num,"c_vid: %d\n", tx_desc->c_vid); \ ++ num += sprintf(page + num,"c_cfs: %d\n", tx_desc->c_cfs); \ ++ num += sprintf(page + num,"c_pri: %d\n", tx_desc->c_pri); \ ++ num += sprintf(page + num,"s_vid: %d\n", tx_desc->s_vid); \ ++ num += sprintf(page + num,"s_dei: %d\n", tx_desc->s_dei); \ ++ num += sprintf(page + num,"s_pri: %d\n", tx_desc->s_pri); \ ++} ++ ++static inline void dump_tx_desc(TXDesc volatile *tx_desc) ++{ ++ printk("sdp: %x\n", tx_desc->sdp); ++ printk("sdl: %d\n", tx_desc->sdl); ++ printk("tco: %d\n", tx_desc->tco); ++ printk("uco: %d\n", tx_desc->uco); ++ printk("ico: %d\n", tx_desc->ico); ++ printk("pri: %d\n", tx_desc->pri); ++ printk("fp: %d\n", tx_desc->fp); ++ printk("fr: %d\n", tx_desc->fr); ++ printk("interrupt: %d\n", tx_desc->interrupt); ++ printk("lsd: %d\n", tx_desc->lsd); ++ printk("fsd: %d\n", tx_desc->fsd); ++ printk("eor: %d\n", tx_desc->eor); ++ printk("cown: %d\n", tx_desc->cown); ++ ++ printk("ctv: %d\n", tx_desc->ctv); ++ printk("stv: %d\n", tx_desc->stv); ++ printk("sid: %d\n", tx_desc->sid); ++ printk("inss: %d\n", tx_desc->inss); ++ printk("dels: %d\n", tx_desc->dels); ++ printk("pmap: %d\n", tx_desc->pmap); ++ printk("mark: %d\n", tx_desc->mark); ++ printk("ewan: %d\n", tx_desc->ewan); ++ printk("fewan: %d\n", tx_desc->fewan); ++ ++ printk("c_vid: %d\n", tx_desc->c_vid); ++ printk("c_cfs: %d\n", tx_desc->c_cfs); ++ printk("c_pri: %d\n", tx_desc->c_pri); ++ printk("s_vid: %d\n", tx_desc->s_vid); ++ printk("s_dei: %d\n", tx_desc->s_dei); ++ printk("s_pri: %d\n", tx_desc->s_pri); ++} ++ ++static inline void dump_rx_desc(RXDesc volatile *rx_desc) ++{ ++ ++ printk("rx_desc: %p\n", rx_desc); ++ //printk("rx_desc: %p ## cown: %d\n", rx_desc, rx_desc->cown); ++ //printk("rx_desc phy addr : %x\n", (u32)page_to_dma(NULL, rx_desc) ); ++#if 0 ++ int i=0; ++ for (i=0; i < 8 ; ++4) { ++ u32 rx_desc_data = *((u32 *)(rx_desc+i)); ++ printk("%d: %#x\n", i, rx_desc_data); ++ } ++#endif ++ ++ printk("sdp: %x\n", rx_desc->sdp); ++ ++ printk("sdl: %d\n", rx_desc->sdl); ++#if 1 ++ printk("l4f: %d\n", rx_desc->l4f); ++ printk("ipf: %d\n", rx_desc->ipf); ++ printk("prot: %d\n", rx_desc->prot); ++ printk("hr: %d\n", rx_desc->hr); ++ printk("lsd: %d\n", rx_desc->lsd); ++ printk("fsd: %d\n", rx_desc->fsd); ++ printk("eor: %d\n", rx_desc->eor); ++#endif ++ printk("cown: %d\n", rx_desc->cown); ++ ++#if 1 ++ printk("ctv: %d\n", rx_desc->ctv); ++ printk("stv: %d\n", rx_desc->stv); ++ printk("unv: %d\n", rx_desc->unv); ++ printk("iwan: %d\n", rx_desc->iwan); ++ printk("exdv: %d\n", rx_desc->exdv); ++ printk("sp: %d\n", rx_desc->sp); ++ printk("crc_err: %d\n", rx_desc->crc_err); ++ printk("un_eth: %d\n", rx_desc->un_eth); ++ printk("tc: %d\n", rx_desc->tc); ++ printk("ip_offset: %d\n", rx_desc->ip_offset); ++ ++ printk("c_vid: %d\n", rx_desc->c_vid); ++ printk("c_cfs: %d\n", rx_desc->c_cfs); ++ printk("c_pri: %d\n", rx_desc->c_pri); ++ printk("s_vid: %d\n", rx_desc->s_vid); ++ printk("s_dei: %d\n", rx_desc->s_dei); ++ printk("s_pri: %d\n", rx_desc->s_pri); ++#endif ++} ++ ++static inline void dump_all_rx_ring(const RXRing *rx_ring, u8 r_index) ++{ ++ int i=0; ++ ++ RXBuffer volatile *rx_buf = get_rx_ring_head(rx_ring); ++ ++ printk("all rx ring: %d\n", r_index); ++ for (i=0 ; i < get_rx_ring_size(rx_ring) ; ++i) { ++ printk("%d ## rx_buf: %p ## rx_buf->rx_desc: %p\n", i, rx_buf, rx_buf->rx_desc); ++ dump_rx_desc(rx_buf->rx_desc); ++ ++rx_buf; ++ } ++} ++ ++static inline void rx_dma_suspend(u8 enable) ++{ ++#if 1 ++ DMA_AUTO_POLL_CFG_REG &= (~0x00000001); ++ if (enable == 1) ++ DMA_AUTO_POLL_CFG_REG |= 1; ++#endif ++} ++ ++ ++// clear: 0 normal ++// clear: 1 clear ++static inline void clear_fs_dma_state(u8 clear) ++{ ++ DMA_RING_CTRL_REG &= (~(1 << 31)); ++ if (clear==1) { ++ DMA_RING_CTRL_REG |= (1 << 31); ++ } ++} ++ ++// enable: 1 -> IVL ++// enable: 0 -> SVL ++static inline void cns3xxx_ivl(u8 enable) ++{ ++ // SVL ++ MAC_GLOB_CFG_REG &= (~(0x1 << 7)); ++ if (enable == 1) ++ MAC_GLOB_CFG_REG |= (0x1 << 7); ++} ++ ++static inline void cns3xxx_nic_mode(u8 enable) ++{ ++ VLAN_CFG &= (~(1<<15)); ++ if (enable == 1) ++ VLAN_CFG |= (1<<15); ++} ++ ++ ++void gic_mask_irq(unsigned int irq); ++void gic_unmask_irq(unsigned int irq); ++extern void __iomem *gic_cpu_base_addr; ++ ++ ++static inline void cns3xxx_disable_irq(u32 irq) ++{ ++#ifdef CONFIG_SMP ++ disable_irq_nosync(irq); ++#else ++ disable_irq(irq); ++#endif ++ //gic_mask_irq(irq); ++} ++ ++static inline void cns3xxx_enable_irq(u32 irq) ++{ ++ enable_irq(irq); ++ //gic_unmask_irq(irq); ++} ++ ++static inline int cns3xxx_get_tx_hw_index(u8 ring_index) ++{ ++ if (ring_index == 0) { ++ return (TS_DESC_PTR0_REG - TS_DESC_BASE_ADDR0_REG) / sizeof (TXDesc); ++ } else if (ring_index == 1) { ++ return (TS_DESC_PTR1_REG - TS_DESC_BASE_ADDR1_REG) / sizeof (TXDesc); ++ } else { ++ return CAVM_ERR; ++ } ++} ++ ++static inline TXBuffer* get_tx_buffer_by_index(TXRing *tx_ring, int i) ++{ ++ int index = i; ++ ++ index = ((index + get_tx_ring_size(tx_ring) )% get_tx_ring_size(tx_ring)); ++ ++ return tx_ring->head + index; ++} ++ ++static inline int cns3xxx_is_untag_packet(const RXDesc *rx_desc) ++{ ++ return rx_desc->crc_err; ++} ++ ++ ++#ifdef CONFIG_SWITCH_BIG_ENDIAN ++static inline void swap_rx_desc(RXDesc *org_desc, RXDesc *new_desc) ++{ ++ int i=0; ++ void *org_p = org_desc; ++ void *new_p = new_desc; ++ ++ for (i=0; i < 16 ; i+=4) { ++ u32 rx_desc_data = 0; ++ u32 swab_rx_desc_data = 0; ++ ++ rx_desc_data = *((volatile u32 *)(org_p+i)); ++ swab_rx_desc_data = ___swab32(rx_desc_data); ++ ++ *((volatile u32 *)(new_p+i)) = swab_rx_desc_data; ++ } ++} ++ ++static inline void swap_tx_desc(TXDesc *org_desc, TXDesc *new_desc) ++{ ++ int i=0; ++ void *org_p = org_desc; ++ void *new_p = new_desc; ++ ++ for (i=0; i < 16 ; i+=4) { ++ u32 desc_data = *((volatile u32 *)(org_p+i)); ++ u32 swab_desc_data = ___swab32(desc_data); ++ ++ *((volatile u32 *)(new_p+i)) = swab_desc_data; ++ } ++} ++ ++#endif ++ ++ ++static inline int cns3xxx_min_mtu(void) ++{ ++ return 64; ++} ++ ++static inline int cns3xxx_max_mtu(void) ++{ ++ int max_len[]={1518, 1522, 1536, 9600}; ++ ++ return max_len[((PHY_AUTO_ADDR_REG >> 30) & 0x3)]; ++} ++ ++#endif // CNS3XXX_TOOL_H +--- /dev/null ++++ b/drivers/net/cns3xxx/fpga.h +@@ -0,0 +1,306 @@ ++/******************************************************************************* ++ * ++ * Copyright (c) 2009 Cavium Networks ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the Free ++ * Software Foundation; either version 2 of the License, or (at your option) ++ * any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ * more details. ++ * ++ * You should have received a copy of the GNU General Public License along with ++ * this program; if not, write to the Free Software Foundation, Inc., 59 ++ * Temple Place - Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * The full GNU General Public License is included in this distribution in the ++ * file called LICENSE. ++ * ++ ********************************************************************************/ ++ ++// This macro or function divide two part, ++// one is initial state, another is in netdev open (ifconfig up) function. ++ ++#ifndef FPGA_H ++#define FPGA_H ++ ++#include ++ ++#include "cns3xxx_config.h" ++#include "cns3xxx_phy.h" ++ ++//#define FGPA ++ ++ ++#ifdef CONFIG_FPGA ++// init phy or switch chip ++#define INIT_PORT0_PHY cns3xxx_config_VSC8601(0,0); ++#define INIT_PORT1_PHY cns3xxx_config_VSC8601(1,1); ++#define INIT_PORT2_PHY icp_101a_init(2, 2); ++//#define INIT_PORT1_PHY ++ ++// configure mac0/mac1 register ++#define INIT_PORT0_MAC ++#define INIT_PORT1_MAC ++#define INIT_PORT2_MAC ++//#define INIT_PORT1_MAC ++ ++#define PORT0_LINK_DOWN vsc8601_power_down(0, 1); ++#define PORT0_LINK_UP vsc8601_power_down(0, 0); ++ ++#define PORT1_LINK_DOWN vsc8601_power_down(1, 1); ++#define PORT1_LINK_UP vsc8601_power_down(1, 0); ++ ++#define PORT2_LINK_DOWN cns3xxx_std_phy_power_down(2, 1); ++#define PORT2_LINK_UP cns3xxx_std_phy_power_down(2, 0); ++ ++ ++ ++#define MODEL "VEGA FPGA" ++ ++static int rc_port0 = 0; // rc means reference counting, determine port open/close. ++ ++ ++// enable port ++// link down ++static inline void open_port0(void) ++{ ++ if (rc_port0 == 0) { ++ enable_port(0, 1); ++ PRINT_INFO("open mac port 0\n"); ++ // link up ++ PORT0_LINK_UP ++ } else { ++ PRINT_INFO("port 0 already open\n");\ ++ } ++ ++rc_port0; ++} ++ ++static inline void close_port0(void) ++{ ++ --rc_port0; ++ if (rc_port0 == 0) { ++ // link down ++ PORT0_LINK_DOWN ++ enable_port(0, 0); ++ PRINT_INFO("close mac port 0\n");\ ++ } ++} ++ ++static inline void open_port1(void) ++{ ++ ++ enable_port(1, 1); ++ PRINT_INFO("open mac port 1\n"); ++ // link up ++ PORT1_LINK_UP ++} ++ ++static inline void close_port1(void) ++{ ++ enable_port(1, 0); ++ PRINT_INFO("close mac port 1\n"); ++ // link down ++ PORT1_LINK_DOWN ++} ++ ++static inline void open_port2(void) ++{ ++ ++ enable_port(2, 1); ++ PRINT_INFO("open mac port 2\n"); ++ // link up ++ PORT2_LINK_UP ++} ++ ++static inline void close_port2(void) ++{ ++ enable_port(2, 0); ++ PRINT_INFO("close mac port 2\n"); ++ // link down ++ PORT2_LINK_DOWN ++} ++ ++static u8 my_vlan0_mac[] = {0x00, 0x11, 0x22, 0x33, 0x55, 0x00}; ++static u8 my_vlan1_mac[] = {0x00, 0x11, 0x22, 0x33, 0x55, 0x11}; ++static u8 my_vlan2_mac[] = {0x00, 0x11, 0xbb, 0xcc, 0xdd, 0x70}; ++static u8 my_vlan3_mac[] = {0x00, 0x11, 0xbb, 0xcc, 0xdd, 0x80}; ++ ++ ++ ++ ++// CNS3XXX_NIC_MODE_8021Q, CNS3XXX_NON_NIC_MODE_8021Q, CNS3XXX_VLAN_BASE_MODE and ++// CNS3XXX_PORT_BASE_MODE, only one macro can be defined ++ ++#ifdef CNS3XXX_VLAN_8021Q ++ #ifndef CNS3XXX_NIC_MODE_8021Q ++ #define CNS3XXX_NON_NIC_MODE_8021Q ++ #endif ++#else ++ //#define CNS3XXX_VLAN_BASE_MODE ++ #define CNS3XXX_PORT_BASE_MODE ++#endif ++ ++#ifdef CNS3XXX_PORT_BASE_MODE ++ ++#define PORT0_PVID 0x1 ++#define PORT1_PVID 0x2 ++#define PORT2_PVID 3 ++#define CPU_PVID 5 ++ ++#define CONFIG_CNS3XXX_PORT_BASE ++ ++static VLANTableEntry cpu_vlan_table_entry = {0, 1, CPU_PVID, 0, 0, MAC_PORT0_PMAP | MAC_PORT1_PMAP | MAC_PORT2_PMAP | CPU_PORT_PMAP, my_vlan3_mac}; ++ ++static VLANTableEntry vlan_table_entry[] = ++{ ++ // vlan_index; valid; vid; wan_side; etag_pmap; mb_pmap; *my_mac; ++ //{0, 1, 1, 0, 0, MAC_PORT0_PMAP | MAC_PORT1_PMAP | MAC_PORT2_PMAP | CPU_PORT_PMAP, my_vlan0_mac}, ++ {1, 1, PORT0_PVID, 0, 0, MAC_PORT0_PMAP | CPU_PORT_PMAP, my_vlan0_mac}, ++ {2, 1, PORT1_PVID, 0, 0, MAC_PORT1_PMAP | CPU_PORT_PMAP, my_vlan1_mac}, ++ {3, 1, PORT2_PVID, 1, 0, MAC_PORT2_PMAP | CPU_PORT_PMAP, my_vlan2_mac}, ++ //{2, 1, 4, 0, 0, MAC_PORT0_PMAP | MAC_PORT1_PMAP | MAC_PORT2_PMAP | CPU_PORT_PMAP, my_vlan3_mac}, // for cpu ++}; ++ ++static ARLTableEntry arl_table_entry[] = ++{ ++ // vid; pmap; *mac; age_field; vlan_mac ; filter ++ {PORT0_PVID, CPU_PORT_PMAP, my_vlan0_mac, 7, 1, 0}, ++ //{CPU_PVID, CPU_PORT_PMAP, my_vlan0_mac, 7, 1, 0}, ++ {PORT1_PVID, CPU_PORT_PMAP, my_vlan1_mac, 7, 1, 0}, ++ {PORT2_PVID, CPU_PORT_PMAP, my_vlan2_mac, 7, 1, 0}, ++ //{PORT0_PVID, MAC_PORT0_PMAP, my_vlan8_mac, 7, 0, 0}, ++ //{PORT0_PVID, MAC_PORT0_PMAP, my_vlan9_mac, 7, 0, 0}, ++ //{CPU_PVID, 0x4, my_vlan2_mac, 7, 1, 0}, ++ //{CPU_PVID, MAC_PORT2_PMAP, my_vlan2_mac, 7, 1, 0}, ++}; ++ ++static NetDevicePriv net_device_prive[]= { ++ /* pmap, is_wan, s-tag, vlan_tag or pvid, rx_func_ptr, tx_func_ptr, open_ptr, close_ptr, which port, mac, VLANTableEntry, ARLTableEntry, NICSetting, netdev s-tag, name */ ++ {MAC_PORT0_PMAP, 0, 1, PORT0_NETDEV_INDEX, rx_port_base, tx_port_base, open_port0, close_port0, MAC_PORT0, my_vlan0_mac, &vlan_table_entry[0], &arl_table_entry[0], 0, 0}, // eth0 ++ {MAC_PORT1_PMAP, 0, 2, PORT1_NETDEV_INDEX, rx_port_base, tx_port_base, open_port1, close_port1, MAC_PORT1, my_vlan1_mac, &vlan_table_entry[1], &arl_table_entry[1], 0, 0}, // eth1 ++ {MAC_PORT2_PMAP, 1, 3, PORT2_NETDEV_INDEX, rx_port_base, tx_port_base, open_port2, close_port2, MAC_PORT2, my_vlan2_mac, &vlan_table_entry[2], &arl_table_entry[2], 0, 0} // eth2 ++ }; ++ ++#endif ++ ++#ifdef CNS3XXX_NON_NIC_MODE_8021Q ++//#error "8021Q" ++#define PORT0_PVID 50 ++#define PORT1_PVID 60 ++#define PORT2_PVID 70 ++#define CPU_PVID 80 ++ ++#define CONFIG_CNS3XXX_PORT_BASE ++//#define CONFIG_CNS3XXX_VLAN_BASE ++//#define CONFIG_HAVE_VLAN_TAG ++ ++static VLANTableEntry cpu_vlan_table_entry = {0, 1, CPU_PVID, 0, 0, MAC_PORT0_PMAP | MAC_PORT1_PMAP | MAC_PORT2_PMAP | CPU_PORT_PMAP, my_vlan3_mac}; // for cpu ++ ++static VLANTableEntry vlan_table_entry[] = ++{ ++ // vlan_index; valid; vid; wan_side; etag_pmap; mb_pmap; *my_mac;C_PORT2_PMAP ++ {1, 1, PORT0_PVID, 0, CPU_PORT_PMAP, MAC_PORT0_PMAP | CPU_PORT_PMAP, my_vlan0_mac}, ++ {2, 1, PORT1_PVID, 0, CPU_PORT_PMAP, MAC_PORT1_PMAP | CPU_PORT_PMAP, my_vlan1_mac}, ++ {3, 1, PORT2_PVID, 0, CPU_PORT_PMAP, MAC_PORT2_PMAP | CPU_PORT_PMAP, my_vlan2_mac}, ++}; ++ ++static ARLTableEntry arl_table_entry[] = ++{ ++ // vid; pmap; *mac; age_field; vlan_mac ; filter ++ {PORT0_PVID, CPU_PORT_PMAP, my_vlan0_mac, 7, 1, 0}, ++ {PORT1_PVID, CPU_PORT_PMAP, my_vlan1_mac, 7, 1, 0}, ++ {PORT2_PVID, CPU_PORT_PMAP, my_vlan2_mac, 7, 1, 0}, ++}; ++ ++ ++// if used 8021Q, use PORT0_NETDEV_INDEX, don't use VID ++static NetDevicePriv net_device_prive[]= { ++ {MAC_PORT0_PMAP, 0, 1, PORT0_NETDEV_INDEX, rx_port_base, tx_port_base, open_port0, close_port0, MAC_PORT0, my_vlan0_mac, &vlan_table_entry[0], &arl_table_entry[0], 0, 0}, // eth0 ++ {MAC_PORT1_PMAP, 0, 0, PORT1_NETDEV_INDEX, rx_port_base, tx_port_base, open_port1, close_port1, MAC_PORT1, my_vlan1_mac, &vlan_table_entry[1], &arl_table_entry[1], 0, 0}, // eth1 ++ {MAC_PORT2_PMAP, 1, 3, PORT2_NETDEV_INDEX, rx_port_base, tx_port_base, open_port2, close_port2, MAC_PORT2, my_vlan2_mac, &vlan_table_entry[2], &arl_table_entry[2], 0, 0} // eth2 ++ }; ++#endif ++ ++ ++ ++#ifdef CNS3XXX_NIC_MODE_8021Q ++//#error "8021Q" ++#define PORT0_PVID 1 ++#define PORT1_PVID 2 ++#define PORT2_PVID 9 ++#define CPU_PVID 5 ++ ++#define CONFIG_CNS3XXX_PORT_BASE ++//#define CONFIG_CNS3XXX_VLAN_BASE ++//#define CONFIG_HAVE_VLAN_TAG ++ ++static VLANTableEntry cpu_vlan_table_entry = {0, 1, CPU_PVID, 0, 0, MAC_PORT0_PMAP | MAC_PORT1_PMAP | MAC_PORT2_PMAP | CPU_PORT_PMAP, my_vlan3_mac}; // for cpu ++ ++static VLANTableEntry vlan_table_entry[] = ++{ ++ // vlan_index; valid; vid; wan_side; etag_pmap; mb_pmap; *my_mac;C_PORT2_PMAP ++ {1, 1, PORT0_PVID, 1, MAC_PORT0_PMAP|CPU_PORT_PMAP, MAC_PORT0_PMAP | CPU_PORT_PMAP, my_vlan0_mac}, ++ {2, 1, PORT1_PVID, 0, MAC_PORT1_PMAP|CPU_PORT_PMAP, MAC_PORT1_PMAP | CPU_PORT_PMAP, my_vlan1_mac}, ++ {3, 1, PORT2_PVID, 1, MAC_PORT2_PMAP|CPU_PORT_PMAP, MAC_PORT2_PMAP | CPU_PORT_PMAP, my_vlan2_mac}, ++}; ++ ++static ARLTableEntry arl_table_entry[] = ++{ ++ // vid; pmap; *mac; age_field; vlan_mac ; filter ++ {PORT0_PVID, CPU_PORT_PMAP, my_vlan0_mac, 7, 1, 0}, ++ {PORT1_PVID, CPU_PORT_PMAP, my_vlan1_mac, 7, 1, 0}, ++ {PORT2_PVID, CPU_PORT_PMAP, my_vlan2_mac, 7, 1, 0}, ++}; ++ ++ ++// if used 8021Q, use PORT0_NETDEV_INDEX, don't use VID ++static NetDevicePriv net_device_prive[]= { ++ {MAC_PORT0_PMAP, 0, 1, PORT0_NETDEV_INDEX, rx_port_base, tx_port_base, open_port0, close_port0, MAC_PORT0, my_vlan0_mac, &vlan_table_entry[0], &arl_table_entry[0], 0, 0}, // eth0 ++ {MAC_PORT1_PMAP, 0, 0, PORT1_NETDEV_INDEX, rx_port_base, tx_port_base, open_port1, close_port1, MAC_PORT1, my_vlan1_mac, &vlan_table_entry[1], &arl_table_entry[1], 0, 0}, // eth1 ++ {MAC_PORT2_PMAP, 1, 3, PORT2_NETDEV_INDEX, rx_port_base, tx_port_base, open_port2, close_port2, MAC_PORT2, my_vlan2_mac, &vlan_table_entry[2], &arl_table_entry[2], 0, 0} // eth2 ++ }; ++#endif ++ ++#ifdef CNS3XXX_VLAN_BASE_MODE ++//#error "vlan_base" ++// vlan configuration ++ ++#define PORT0_PVID 1 ++#define PORT1_PVID 2 ++#define PORT2_PVID 3 ++#define CPU_PVID 5 ++#define CONFIG_CNS3XXX_VLAN_BASE ++#define CONFIG_HAVE_VLAN_TAG ++ ++static VLANTableEntry cpu_vlan_table_entry = {0, 1, CPU_PVID, 0, MAC_PORT0_PMAP | MAC_PORT1_PMAP | MAC_PORT2_PMAP | CPU_PORT_PMAP, MAC_PORT0_PMAP | MAC_PORT1_PMAP | MAC_PORT2_PMAP | CPU_PORT_PMAP, my_vlan3_mac}; // for cpu ++ ++static VLANTableEntry vlan_table_entry[] = ++{ ++ // vlan_index; valid; vid; wan_side; etag_pmap; mb_pmap; *my_mac; ++ {1, 1, PORT0_PVID, 0, MAC_PORT0_PMAP | CPU_PORT_PMAP, MAC_PORT0_PMAP | CPU_PORT_PMAP, my_vlan0_mac}, ++ {2, 1, PORT1_PVID, 0, MAC_PORT1_PMAP | CPU_PORT_PMAP, MAC_PORT1_PMAP | CPU_PORT_PMAP, my_vlan1_mac}, ++ {3, 1, PORT2_PVID, 1, MAC_PORT2_PMAP | CPU_PORT_PMAP, MAC_PORT2_PMAP | CPU_PORT_PMAP, my_vlan2_mac}, ++}; ++ ++static ARLTableEntry arl_table_entry[] = ++{ ++ // vid; pmap; *mac; age_field; vlan_mac ; filter ++ {PORT0_PVID, CPU_PORT_PMAP, my_vlan0_mac, 7, 1, 0}, ++ {PORT1_PVID, CPU_PORT_PMAP, my_vlan1_mac, 7, 1, 0}, ++ {PORT2_PVID, CPU_PORT_PMAP, my_vlan2_mac, 7, 1, 0}, ++}; ++ ++static NetDevicePriv net_device_prive[]= { ++ /* pmap, is_wan, gid, vlan_tag or pvid, rx_func_ptr, tx_func_ptr, open_ptr, close_ptr, which port, mac, VLANTableEntry, ARLTableEntry, NICSetting, netdev name */ ++ {MAC_PORT0_PMAP, 0, 1, PORT0_PVID, rx_port_base, tx_vlan_base, open_port0, close_port0, MAC_PORT0, my_vlan0_mac, &vlan_table_entry[0], &arl_table_entry[0], 0, 0}, // eth0 ++ {MAC_PORT1_PMAP, 0, 0, PORT1_PVID, rx_port_base, tx_vlan_base, open_port1, close_port1, MAC_PORT1, my_vlan1_mac, &vlan_table_entry[1], &arl_table_entry[1], 0, 0}, // eth1 ++ {MAC_PORT2_PMAP, 1, 3, PORT2_PVID, rx_port_base, tx_vlan_base, open_port2, close_port2, MAC_PORT2, my_vlan2_mac, &vlan_table_entry[2], &arl_table_entry[2], 0, 0} // eth2 ++ }; ++#endif ++ ++#endif // CONFIG_FPGA ++#endif // FPGA_H +--- /dev/null ++++ b/drivers/net/cns3xxx/Kconfig +@@ -0,0 +1,58 @@ ++menu "CNS3XXX Gigabit Switch Support" ++ depends on ARCH_CNS3XXX ++ ++config CNS3XXX_GSW ++ tristate "CNS3XXX Gigabit Switch Driver Support" ++ help ++ CNS3XXX Gigabit Switch. ++ ++config CNS3XXX_SPPE ++ bool "CNS3XXX Smart PPE(Packet Processing Engine) Support" ++ depends on CNS3XXX_GSW ++ help ++ PPE(Packet Processing Engine) is a hardware accelerator hook on a port of ++ CNS3XXX Gigabit Switch. ++ ++ This option is used for Smart PPE hook. ++ ++ Say Y if you want to enable Smart PPE function. ++ ++config CNS3XXX_HCIE_TEST ++ bool "CNS3XXX HCIE(Hardware Content Inspection Engine) Support" ++# depends on CNS3XXX_GSW ++ help ++ HCIE is patent-protected layer-7 packet processing engine. ++ ++ This option is used for fundamental HCIE functional test . ++ Say Y if you want to do HCIE functional test. ++ ++ ++#config CNS3XXX_SHNAT_PCI_FASTPATH ++# bool "FastPath(From PCI to WAN) Support" ++# depends on CNS3XXX_SHNAT ++# help ++# Add FastPath Support for Smart HNAT. ++ ++comment "NOTE: 'Validation Board' depends on" ++comment "GPIO_CNS3XXX and SPI_CNS3XXX" ++choice ++ prompt "CNS3XXX Board" ++ depends on CNS3XXX_GSW ++ default FPGA ++ ++config FPGA ++ bool "Fpga" ++ ++config VB ++ bool "Validation Board" ++ help ++ MAC0 and MAC1 connect to BCM53115M. It need enable CNS3XXX SPI and CNS3XXX GPIO option. ++ MAC2 use ICPLUS IP1001 phy. ++ ++#config LEO ++# bool "Leo" ++ ++endchoice ++ ++endmenu ++ +--- /dev/null ++++ b/drivers/net/cns3xxx/Makefile +@@ -0,0 +1,41 @@ ++################################################################################ ++# ++# ++# Copyright (c) 2008 Cavium Networks ++# ++# This program is free software; you can redistribute it and/or modify it ++# under the terms of the GNU General Public License as published by the Free ++# Software Foundation; either version 2 of the License, or (at your option) ++# any later version. ++# ++# This program is distributed in the hope that it will be useful, but WITHOUT ++# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++# more details. ++# ++# You should have received a copy of the GNU General Public License along with ++# this program; if not, write to the Free Software Foundation, Inc., 59 ++# Temple Place - Suite 330, Boston, MA 02111-1307, USA. ++# ++# The full GNU General Public License is included in this distribution in the ++# file called LICENSE. ++# ++# Contact Information: ++# Star semiconduction Linux Support ++# ++################################################################################ ++ ++# ++# Makefile for the Star GSW ethernet driver ++# ++ ++#obj-y := ++#obj-m := ++ ++obj-$(CONFIG_CNS3XXX_GSW) += cns3xxx.o ++cns3xxx-objs := cns3xxx_phy.o cns3xxx_main.o cns3xxx_ethtool.o ++obj-$(CONFIG_CNS3XXX_SPPE) += cns3xxx_sppe_hook.o ++#endif ++#vega_main.o ++ ++#include $(TOPDIR)/Rules.make +--- /dev/null ++++ b/drivers/net/cns3xxx/vb.h +@@ -0,0 +1,328 @@ ++/******************************************************************************* ++ * ++ * Copyright (c) 2009 Cavium Networks ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the Free ++ * Software Foundation; either version 2 of the License, or (at your option) ++ * any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ * more details. ++ * ++ * You should have received a copy of the GNU General Public License along with ++ * this program; if not, write to the Free Software Foundation, Inc., 59 ++ * Temple Place - Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * The full GNU General Public License is included in this distribution in the ++ * file called LICENSE. ++ * ++ ********************************************************************************/ ++ ++// This macro or function divide two part, ++// one is initial state, another is in netdev open (ifconfig up) function. ++ ++#ifndef VB_H ++#define VB_H ++ ++#include ++ ++#include "cns3xxx_config.h" ++#include "cns3xxx_phy.h" ++ ++#ifdef CONFIG_VB ++// init phy or switch chip ++#define INIT_PORT0_PHY cns3xxx_config_VSC8601(0, 0); ++#define INIT_PORT1_PHY cns3xxx_config_VSC8601(1, 1); ++#define INIT_PORT2_PHY ++//#define INIT_PORT1_PHY ++ ++// configure mac0/mac1 register ++#define INIT_PORT0_MAC ++#define INIT_PORT1_MAC ++#define INIT_PORT2_MAC ++//#define INIT_PORT1_MAC ++ ++#define PORT0_LINK_DOWN cns3xxx_std_phy_power_down(0, 1); ++#define PORT0_LINK_UP cns3xxx_std_phy_power_down(0, 0); ++ ++#define PORT1_LINK_DOWN cns3xxx_std_phy_power_down(1, 1); ++#define PORT1_LINK_UP cns3xxx_std_phy_power_down(1, 0); ++ ++#define PORT2_LINK_DOWN ++#define PORT2_LINK_UP ++ ++#define MODEL "CNS3XXX validation board" ++ ++static int rc_port0 = 0; // rc means reference counting, determine port open/close. ++ ++#define PRINT_INFO printk ++ ++// enable port ++// link down ++static inline void open_port0(void) ++{ ++ if (rc_port0 == 0) { ++ enable_port(0, 1); ++ // link up ++ PORT0_LINK_UP ++ } ++ ++rc_port0; ++} ++ ++static inline void close_port0(void) ++{ ++ --rc_port0; ++ if (rc_port0 == 0) { ++ // link down ++ PORT0_LINK_DOWN ++ enable_port(0, 0); ++ } ++} ++ ++static inline void open_port1(void) ++{ ++ ++ enable_port(1, 1); ++ // link up ++ PORT1_LINK_UP ++} ++ ++static inline void close_port1(void) ++{ ++ enable_port(1, 0); ++ // link down ++ PORT1_LINK_DOWN ++} ++ ++static inline void open_port2(void) ++{ ++ ++ enable_port(2, 1); ++ // link up ++ PORT2_LINK_UP ++} ++ ++static inline void close_port2(void) ++{ ++ enable_port(2, 0); ++ // link down ++ PORT2_LINK_DOWN ++} ++ ++#if defined (CONFIG_CNS3XXX_SPPE) ++/* only for PPE PCI-to-WAN fast path */ ++static int fp_ref_cnt = 0; ++static inline void open_fp(void) ++{ ++ if (!fp_ref_cnt) { ++ fp_ref_cnt++; ++ } ++} ++ ++static inline void close_fp(void) ++{ ++ if (fp_ref_cnt) { ++ fp_ref_cnt--; ++ } ++} ++#endif ++ ++static u8 my_vlan0_mac[] = {0x00, 0x11, 0x22, 0x33, 0x55, 0x00}; ++static u8 my_vlan1_mac[] = {0x00, 0x11, 0x22, 0x33, 0x55, 0x11}; ++static u8 my_vlan2_mac[] = {0x00, 0x11, 0xbb, 0xcc, 0xdd, 0x70}; ++static u8 my_vlan3_mac[] = {0x00, 0x11, 0xbb, 0xcc, 0xdd, 0x80}; ++ ++ ++ ++ ++// CNS3XXX_NIC_MODE_8021Q, CNS3XXX_NON_NIC_MODE_8021Q, CNS3XXX_VLAN_BASE_MODE and ++// CNS3XXX_PORT_BASE_MODE, only one macro can be defined ++ ++#ifdef CNS3XXX_VLAN_8021Q ++ #define CNS3XXX_NIC_MODE_8021Q ++ #ifndef CNS3XXX_NIC_MODE_8021Q ++ #define CNS3XXX_NON_NIC_MODE_8021Q ++ #endif ++#else ++ //#define CNS3XXX_VLAN_BASE_MODE ++ #define CNS3XXX_PORT_BASE_MODE ++#endif ++ ++//#define CNS3XXX_PORT_BASE_MODE ++// ++#ifdef CNS3XXX_NON_NIC_MODE_8021Q ++ ++#define PORT0_PVID 50 ++#define PORT1_PVID 60 ++#define PORT2_PVID 70 ++#define CPU_PVID 80 ++ ++#define CONFIG_CNS3XXX_PORT_BASE ++ ++static VLANTableEntry cpu_vlan_table_entry = {0, 1, CPU_PVID, 0, 0, MAC_PORT0_PMAP | MAC_PORT1_PMAP | MAC_PORT2_PMAP | CPU_PORT_PMAP, my_vlan3_mac}; ++ ++static VLANTableEntry vlan_table_entry[] = ++{ ++ // vlan_index; valid; vid; wan_side; etag_pmap; mb_pmap; *my_mac; ++ #if 0 ++ {1, 1, PORT0_PVID, 0, 0, MAC_PORT0_PMAP | CPU_PORT_PMAP, my_vlan0_mac}, ++ {2, 1, PORT1_PVID, 0, 0, MAC_PORT1_PMAP | CPU_PORT_PMAP, my_vlan1_mac}, ++ {3, 1, PORT2_PVID, 1, 0, MAC_PORT2_PMAP | CPU_PORT_PMAP, my_vlan2_mac}, ++ #endif ++ ++ {1, 1, PORT0_PVID, 0, CPU_PORT_PMAP, MAC_PORT0_PMAP | CPU_PORT_PMAP, my_vlan0_mac}, ++ {2, 1, PORT1_PVID, 0, CPU_PORT_PMAP, MAC_PORT1_PMAP | CPU_PORT_PMAP, my_vlan1_mac}, ++ {3, 1, PORT2_PVID, 0, CPU_PORT_PMAP, MAC_PORT2_PMAP | CPU_PORT_PMAP, my_vlan2_mac}, ++ //{2, 1, 4, 0, 0, MAC_PORT0_PMAP | MAC_PORT1_PMAP | MAC_PORT2_PMAP | CPU_PORT_PMAP, my_vlan3_mac}, // for cpu ++}; ++ ++static ARLTableEntry arl_table_entry[] = ++{ ++ // vid; pmap; *mac; age_field; vlan_mac ; filter ++ {PORT0_PVID, CPU_PORT_PMAP, my_vlan0_mac, 7, 1, 0}, ++ //{CPU_PVID, CPU_PORT_PMAP, my_vlan0_mac, 7, 1, 0}, ++ {PORT1_PVID, CPU_PORT_PMAP, my_vlan1_mac, 7, 1, 0}, ++ {PORT2_PVID, CPU_PORT_PMAP, my_vlan2_mac, 7, 1, 0}, ++ //{PORT0_PVID, MAC_PORT0_PMAP, my_vlan8_mac, 7, 0, 0}, ++ //{PORT0_PVID, MAC_PORT0_PMAP, my_vlan9_mac, 7, 0, 0}, ++ //{CPU_PVID, 0x4, my_vlan2_mac, 7, 1, 0}, ++ //{CPU_PVID, MAC_PORT2_PMAP, my_vlan2_mac, 7, 1, 0}, ++}; ++ ++static NetDevicePriv net_device_prive[]= { ++ /* pmap, is_wan, s-tag, vlan_tag or pvid, rx_func_ptr, tx_func_ptr, open_ptr, close_ptr, which port, mac, VLANTableEntry, ARLTableEntry, NICSetting, netdev s-tag, name */ ++ {MAC_PORT0_PMAP, 0, 1, PORT0_NETDEV_INDEX, rx_port_base, tx_port_base, open_port0, close_port0, MAC_PORT0, my_vlan0_mac, &vlan_table_entry[0], &arl_table_entry[0], 0, 0}, // eth0 ++ {MAC_PORT1_PMAP, 0, 2, PORT1_NETDEV_INDEX, rx_port_base, tx_port_base, open_port1, close_port1, MAC_PORT1, my_vlan1_mac, &vlan_table_entry[1], &arl_table_entry[1], 0, 0}, // eth1 ++ {MAC_PORT2_PMAP, 1, 3, PORT2_NETDEV_INDEX, rx_port_base, tx_port_base, open_port2, close_port2, MAC_PORT2, my_vlan2_mac, &vlan_table_entry[2], &arl_table_entry[2], 0, 0} // eth2 ++#if defined (CONFIG_CNS3XXX_SPPE) ++ ,{CPU_PORT_PMAP, 0, 1, FP_NETDEV_INDEX, NULL, fp_port_base, ++ open_fp, close_fp, CPU_PORT, my_vlan3_mac, &cpu_vlan_table_entry, ++ 0, 0, "fp"} ++#endif ++ }; ++ ++#endif // CNS3XXX_PORT_BASE_MODE ++ ++#ifdef CNS3XXX_PORT_BASE_MODE ++ ++#define PORT0_PVID 0x1 ++#define PORT1_PVID 0x2 ++#define PORT2_PVID 3 ++#define CPU_PVID 5 ++ ++#define CONFIG_CNS3XXX_PORT_BASE ++ ++static VLANTableEntry cpu_vlan_table_entry = {0, 1, CPU_PVID, 0, 0, MAC_PORT0_PMAP | MAC_PORT1_PMAP | MAC_PORT2_PMAP | CPU_PORT_PMAP, my_vlan3_mac}; ++ ++static VLANTableEntry vlan_table_entry[] = ++{ ++ // vlan_index; valid; vid; wan_side; etag_pmap; mb_pmap; *my_mac; ++ //{0, 1, 1, 0, 0, MAC_PORT0_PMAP | MAC_PORT1_PMAP | MAC_PORT2_PMAP | CPU_PORT_PMAP, my_vlan0_mac}, ++ {1, 1, PORT0_PVID, 0, 0, MAC_PORT0_PMAP | CPU_PORT_PMAP, my_vlan0_mac}, ++ {2, 1, PORT1_PVID, 0, 0, MAC_PORT1_PMAP | CPU_PORT_PMAP, my_vlan1_mac}, ++ {3, 1, PORT2_PVID, 1, 0, MAC_PORT2_PMAP | CPU_PORT_PMAP, my_vlan2_mac}, ++ //{2, 1, 4, 0, 0, MAC_PORT0_PMAP | MAC_PORT1_PMAP | MAC_PORT2_PMAP | CPU_PORT_PMAP, my_vlan3_mac}, // for cpu ++}; ++ ++static ARLTableEntry arl_table_entry[] = ++{ ++ // vid; pmap; *mac; age_field; vlan_mac ; filter ++ {PORT0_PVID, CPU_PORT_PMAP, my_vlan0_mac, 7, 1, 0}, ++ //{CPU_PVID, CPU_PORT_PMAP, my_vlan0_mac, 7, 1, 0}, ++ {PORT1_PVID, CPU_PORT_PMAP, my_vlan1_mac, 7, 1, 0}, ++ {PORT2_PVID, CPU_PORT_PMAP, my_vlan2_mac, 7, 1, 0}, ++ //{PORT0_PVID, MAC_PORT0_PMAP, my_vlan8_mac, 7, 0, 0}, ++ //{PORT0_PVID, MAC_PORT0_PMAP, my_vlan9_mac, 7, 0, 0}, ++ //{CPU_PVID, 0x4, my_vlan2_mac, 7, 1, 0}, ++ //{CPU_PVID, MAC_PORT2_PMAP, my_vlan2_mac, 7, 1, 0}, ++}; ++ ++static NetDevicePriv net_device_prive[]= { ++ /* pmap, is_wan, s-tag, vlan_tag or pvid, rx_func_ptr, tx_func_ptr, open_ptr, close_ptr, which port, mac, VLANTableEntry, ARLTableEntry, NICSetting, netdev s-tag, name */ ++ {MAC_PORT0_PMAP, 0, 1, PORT0_NETDEV_INDEX, rx_port_base, tx_port_base, open_port0, close_port0, MAC_PORT0, my_vlan0_mac, &vlan_table_entry[0], &arl_table_entry[0], 0, 0}, // eth0 ++ {MAC_PORT1_PMAP, 0, 2, PORT1_NETDEV_INDEX, rx_port_base, tx_port_base, open_port1, close_port1, MAC_PORT1, my_vlan1_mac, &vlan_table_entry[1], &arl_table_entry[1], 0, 0}, // eth1 ++ {MAC_PORT2_PMAP, 1, 3, PORT2_NETDEV_INDEX, rx_port_base, tx_port_base, open_port2, close_port2, MAC_PORT2, my_vlan2_mac, &vlan_table_entry[2], &arl_table_entry[2], 0, 0} // eth2 ++ }; ++ ++#endif // CNS3XXX_PORT_BASE_MODE ++ ++#ifdef CNS3XXX_NIC_MODE_8021Q ++//#error "8021Q" ++#define PORT0_PVID 1 ++#define PORT1_PVID 2 ++#define PORT2_PVID 9 ++#define CPU_PVID 5 ++ ++#define CONFIG_CNS3XXX_PORT_BASE ++//#define CONFIG_CNS3XXX_VLAN_BASE ++//#define CONFIG_HAVE_VLAN_TAG ++ ++static VLANTableEntry cpu_vlan_table_entry = {0, 1, CPU_PVID, 0, 0, MAC_PORT0_PMAP | MAC_PORT1_PMAP | MAC_PORT2_PMAP | CPU_PORT_PMAP, my_vlan3_mac}; // for cpu ++ ++static VLANTableEntry vlan_table_entry[] = ++{ ++ // vlan_index; valid; vid; wan_side; etag_pmap; mb_pmap; *my_mac;C_PORT2_PMAP ++ {1, 1, PORT0_PVID, 1, MAC_PORT0_PMAP|CPU_PORT_PMAP, MAC_PORT0_PMAP | CPU_PORT_PMAP, my_vlan0_mac}, ++ {2, 1, PORT1_PVID, 0, MAC_PORT1_PMAP|CPU_PORT_PMAP, MAC_PORT1_PMAP | CPU_PORT_PMAP, my_vlan1_mac}, ++ {3, 1, PORT2_PVID, 1, MAC_PORT2_PMAP|CPU_PORT_PMAP, MAC_PORT2_PMAP | CPU_PORT_PMAP, my_vlan2_mac}, ++}; ++ ++static ARLTableEntry arl_table_entry[] = ++{ ++ // vid; pmap; *mac; age_field; vlan_mac ; filter ++ {PORT0_PVID, CPU_PORT_PMAP, my_vlan0_mac, 7, 1, 0}, ++ {PORT1_PVID, CPU_PORT_PMAP, my_vlan1_mac, 7, 1, 0}, ++ {PORT2_PVID, CPU_PORT_PMAP, my_vlan2_mac, 7, 1, 0}, ++}; ++ ++ ++// if used 8021Q, use PORT0_NETDEV_INDEX, don't use VID ++static NetDevicePriv net_device_prive[]= { ++ {MAC_PORT0_PMAP, 0, 1, PORT0_NETDEV_INDEX, rx_port_base, tx_port_base, open_port0, close_port0, MAC_PORT0, my_vlan0_mac, &vlan_table_entry[0], &arl_table_entry[0], 0, 0}, // eth0 ++ {MAC_PORT1_PMAP, 0, 0, PORT1_NETDEV_INDEX, rx_port_base, tx_port_base, open_port1, close_port1, MAC_PORT1, my_vlan1_mac, &vlan_table_entry[1], &arl_table_entry[1], 0, 0}, // eth1 ++ {MAC_PORT2_PMAP, 1, 3, PORT2_NETDEV_INDEX, rx_port_base, tx_port_base, open_port2, close_port2, MAC_PORT2, my_vlan2_mac, &vlan_table_entry[2], &arl_table_entry[2], 0, 0} // eth2 ++ }; ++#endif // CNS3XXX_NIC_MODE_8021Q ++ ++#ifdef CNS3XXX_VLAN_BASE_MODE ++//#error "vlan_base" ++// vlan configuration ++ ++#define PORT0_PVID 1 ++#define PORT1_PVID 2 ++#define PORT2_PVID 3 ++#define CPU_PVID 5 ++#define CONFIG_CNS3XXX_VLAN_BASE ++#define CONFIG_HAVE_VLAN_TAG ++ ++static VLANTableEntry cpu_vlan_table_entry = {0, 1, CPU_PVID, 0, MAC_PORT0_PMAP | MAC_PORT1_PMAP | MAC_PORT2_PMAP | CPU_PORT_PMAP, MAC_PORT0_PMAP | MAC_PORT1_PMAP | MAC_PORT2_PMAP | CPU_PORT_PMAP, my_vlan3_mac}; // for cpu ++ ++static VLANTableEntry vlan_table_entry[] = ++{ ++ // vlan_index; valid; vid; wan_side; etag_pmap; mb_pmap; *my_mac; ++ {1, 1, PORT0_PVID, 0, MAC_PORT0_PMAP | CPU_PORT_PMAP, MAC_PORT0_PMAP | CPU_PORT_PMAP, my_vlan0_mac}, ++ {2, 1, PORT1_PVID, 0, MAC_PORT1_PMAP | CPU_PORT_PMAP, MAC_PORT1_PMAP | CPU_PORT_PMAP, my_vlan1_mac}, ++ {3, 1, PORT2_PVID, 1, MAC_PORT2_PMAP | CPU_PORT_PMAP, MAC_PORT2_PMAP | CPU_PORT_PMAP, my_vlan2_mac}, ++}; ++ ++static ARLTableEntry arl_table_entry[] = ++{ ++ // vid; pmap; *mac; age_field; vlan_mac ; filter ++ {PORT0_PVID, CPU_PORT_PMAP, my_vlan0_mac, 7, 1, 0}, ++ {PORT1_PVID, CPU_PORT_PMAP, my_vlan1_mac, 7, 1, 0}, ++ {PORT2_PVID, CPU_PORT_PMAP, my_vlan2_mac, 7, 1, 0}, ++}; ++ ++static NetDevicePriv net_device_prive[]= { ++ /* pmap, is_wan, gid, vlan_tag or pvid, rx_func_ptr, tx_func_ptr, open_ptr, close_ptr, which port, mac, VLANTableEntry, ARLTableEntry, NICSetting, netdev name */ ++ {MAC_PORT0_PMAP, 0, 1, PORT0_PVID, rx_port_base, tx_vlan_base, open_port0, close_port0, MAC_PORT0, my_vlan0_mac, &vlan_table_entry[0], &arl_table_entry[0], 0, 0}, // eth0 ++ {MAC_PORT1_PMAP, 0, 0, PORT1_PVID, rx_port_base, tx_vlan_base, open_port1, close_port1, MAC_PORT1, my_vlan1_mac, &vlan_table_entry[1], &arl_table_entry[1], 0, 0}, // eth1 ++ {MAC_PORT2_PMAP, 1, 3, PORT2_PVID, rx_port_base, tx_vlan_base, open_port2, close_port2, MAC_PORT2, my_vlan2_mac, &vlan_table_entry[2], &arl_table_entry[2], 0, 0} // eth2 ++ }; ++#endif // CNS3XXX_VLAN_BASE_MODE ++ ++#endif // CONFIG_VB ++#endif // VB_H +--- a/drivers/net/Kconfig ++++ b/drivers/net/Kconfig +@@ -2076,6 +2076,8 @@ menuconfig NETDEV_1000 + + if NETDEV_1000 + ++source "drivers/net/cns3xxx/Kconfig" ++ + config ACENIC + tristate "Alteon AceNIC/3Com 3C985/NetGear GA620 Gigabit support" + depends on PCI +--- a/drivers/net/Makefile ++++ b/drivers/net/Makefile +@@ -6,6 +6,11 @@ obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci + + obj-$(CONFIG_E1000) += e1000/ + obj-$(CONFIG_E1000E) += e1000e/ ++obj-$(CONFIG_CNS3XXX_GSW) += cns3xxx/ ++ifeq ($(CONFIG_CNS3XXX_GSW),m) ++ obj-y += cns3xxx/cns3xxx_sppe_hook.o ++endif ++ + obj-$(CONFIG_IBM_NEW_EMAC) += ibm_newemac/ + obj-$(CONFIG_IGB) += igb/ + obj-$(CONFIG_IGBVF) += igbvf/ +--- /dev/null ++++ b/include/linux/cns3xxx/sppe.h +@@ -0,0 +1,1579 @@ ++/* ++ * PROJECT CODE: CNS3XXX Smart Packet Processing Engine ++ * MODULE NAME: sppe.h ++ * DESCRIPTION: ++ * ++ * Change Log ++ * ++ * 1.0.0 25-Dec-2008 ++ * o ++ * ++ */ ++ ++#ifndef _SPPE_H_ ++#define _SPPE_H_ ++ ++#if defined(CONFIG_CNS3XXX_SPPE) ++ ++ ++/* PPE Table Size Def. */ ++#define PPE_TABLE_SIZE_2K (0x0) ++#define PPE_TABLE_SIZE_4K (0x1) ++#define PPE_TABLE_SIZE_8K (0x2) ++#define PPE_TABLE_SIZE_16K (0x3) ++#define PPE_TABLE_SIZE_32K (0x4) ++#define PPE_TABLE_SIZE_64K (0x5) ++#define PPE_TABLE_SIZE_128K (0x6) ++#define PPE_TABLE_SIZE_256K (0x7) ++ ++typedef enum _sppe_cmd { ++ SPPE_CMD_INIT = 0, ++ SPPE_CMD_VERSION, ++ ++ SPPE_CMD_ENABLE, ++ SPPE_CMD_FIREWALL, ++ SPPE_CMD_RULE_CHECK, ++ SPPE_CMD_GRL_CHECK, ++ SPPE_CMD_FLOW_CHECK, ++ SPPE_CMD_RATE_LIMIT_EN, ++ SPPE_CMD_POLICE_EN, ++ SPPE_CMD_RLCFG, ++ SPPE_CMD_FC, /* flow control */ ++ SPPE_CMD_MIRROR_TO_CPU, ++ ++ SPPE_CMD_TCP_SNA_TH, ++ SPPE_CMD_PRDA, ++ SPPE_CMD_AGING, ++ SPPE_CMD_MAX_LENGTH, ++ ++ SPPE_CMD_LANIPV4, ++ SPPE_CMD_WANIPV4, ++ ++ SPPE_CMD_RULE_PPPOE_RELAY, ++ SPPE_CMD_RULE_BRIDGE, ++ SPPE_CMD_RULE_ACL, ++ SPPE_CMD_RULE_ROUTE, ++#if 0 ++ SPPE_CMD_RULE_VSERVER, ++#else ++ SPPE_CMD_RULE_SNAT, ++ SPPE_CMD_RULE_DNAT, ++#endif ++ SPPE_CMD_RULE_GRL, ++ ++ SPPE_CMD_ARP, ++ SPPE_CMD_ARL, ++ SPPE_CMD_PPPOE_SID, ++ ++ SPPE_CMD_FLOW_BRIDGE_IPV4, ++ SPPE_CMD_FLOW_BRIDGE_IPV6, ++ SPPE_CMD_FLOW_ROUTE_IPV4, ++ SPPE_CMD_FLOW_ROUTE_IPV6, ++ SPPE_CMD_FLOW_NAT_IPV4, ++ SPPE_CMD_FLOW_NAT_IPV6, ++ //SPPE_CMD_FLOW_TWICE_NAT, ++ SPPE_CMD_FLOW_MCAST_IPV4, ++ SPPE_CMD_FLOW_MCAST_IPV6, ++ SPPE_CMD_FLOW_BRIDGE_L2, ++ ++ SPPE_CMD_CHGDSCP, ++ SPPE_CMD_CHGPRI, ++ SPPE_CMD_RL_FLOW, ++ SPPE_CMD_RL_RULE, ++ ++ SPPE_CMD_DEBUG, ++ SPPE_CMD_REG, ++ SPPE_CMD_SRAM, ++ SPPE_CMD_DUMP, ++ ++ /* accounting group and drop packet count */ ++ SPPE_CMD_ACCOUNTING_GROUP, ++ SPPE_CMD_DROP_IPCS_ERR, ++ SPPE_CMD_DROP_RATE_LIMIT, ++ SPPE_CMD_DROP_OTHERS, ++ ++ SPPE_CMD_PCI_FP_DEV, ++ ++} SPPE_CMD; ++ ++typedef enum _sppe_op { ++ SPPE_OP_GET = 0, ++ SPPE_OP_SET, ++ SPPE_OP_DELETE, ++ SPPE_OP_DELETE_OUTDATED, /* flow only */ ++ SPPE_OP_UPDATE_COUNTER, /* ACL rule only */ ++ SPPE_OP_CLEAN, ++ SPPE_OP_UNKNOWN ++} SPPE_OP; ++ ++typedef enum _sppe_boolean { ++ SPPE_BOOL_FALSE = 0, ++ SPPE_BOOL_TRUE = 1 ++} SPPE_BOOL; ++ ++ ++typedef enum _sppe_result { ++ SPPE_RESULT_SUCCESS = 0, ++ SPPE_RESULT_FAIL, ++ SPPE_RESULT_UNSUPPORT_CMD, ++ SPPE_RESULT_UNSUPPORT_OP, ++ SPPE_RESULT_INVALID_INDEX, ++ SPPE_RESULT_INVALID_TYPE, ++ SPPE_RESULT_FLOW_NOT_FOUND, ++} SPPE_RESULT; ++ ++typedef enum _sppe_prot { ++ SPPE_PROT_UDP = 0, ++ SPPE_PROT_TCP = 1, ++ SPPE_PROT_PPTP_GRE = 2, ++ SPPE_PROT_OTHERS = 3, ++} SPPE_PROT; ++ ++ ++typedef enum _sppe_l2_select { ++ SPPE_L2S_ARP_TABLE = 0, ++ SPPE_L2S_POLICY_ROUTE = 1, ++ SPPE_L2S_IN_FLOW = 2, ++ SPPE_L2S_RESERVED = 3, ++} SPPE_L2_SELECT; ++ ++typedef enum _sppe_dump_type { ++ SPPE_DUMP_TYPE_FLOW = 0, ++ SPPE_DUMP_TYPE_ARP, ++ SPPE_DUMP_TYPE_RULE ++} SPPE_DUMP_TYPE; ++ ++/* Data Structure */ ++typedef struct _sppe_pppoe_relay { ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int valid:1; ++ unsigned int unused:31; ++#else ++ unsigned int unused:31; ++ unsigned int valid:1; ++#endif ++ unsigned short lsid; /* PPPoE session ID in LAN side */ ++ unsigned short wsid; /* PPPoE session ID in WAN side */ ++ unsigned char lmac[6]; /* MAC address of PPPoE client */ ++ unsigned char wmac[6]; /* MAC address of PPPoE server */ ++} SPPE_PPPOE_RELAY; ++ ++typedef struct _sppe_bridge { ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int valid:1; ++ unsigned int wan:1; ++ unsigned int ppp:1; /* enable PPPoE sessoion ID comparison*/ ++ unsigned int psidx:4; /* PPPoE session ID index */ ++ unsigned int kv:1; ++ unsigned int sws:1; ++ unsigned int max_len:2; /* Max. length select */ ++ unsigned int fp:1; /* force VLAN priority */ ++ unsigned int pri:3; ++ unsigned int ag:2; ++ unsigned int unused:15; ++#else ++ unsigned int unused:15; ++ unsigned int ag:2; ++ unsigned int pri:3; ++ unsigned int fp:1; /* force VLAN priority */ ++ unsigned int max_len:2; /* Max. length select */ ++ unsigned int sws:1; ++ unsigned int kv:1; ++ unsigned int psidx:4; /* PPPoE session ID index */ ++ unsigned int ppp:1; /* enable PPPoE sessoion ID comparison*/ ++ unsigned int wan:1; ++ unsigned int valid:1; ++#endif ++ ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int svid:12; ++ unsigned int cvid:12; ++ unsigned int loc:8; ++#else ++ unsigned int loc:8; ++ unsigned int cvid:12; ++ unsigned int svid:12; ++#endif ++ ++ unsigned char smac[6]; /* source MAC address */ ++ unsigned char dmac[6]; /* destination MAC address */ ++ unsigned int pkt_cnt; ++} SPPE_BRIDGE; ++ ++typedef struct _sppe_acl { ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int valid:1; ++ unsigned int ipv6:1; ++ unsigned int wan:1; ++ unsigned int tcp:1; ++ unsigned int udp:1; ++ unsigned int to:4; ++ unsigned int from:4; ++ unsigned int rr:4; ++ unsigned int kv:1; ++ unsigned int sws:1; ++ unsigned int loc:8; ++ unsigned int max_len:2; /* Max. length select */ ++ unsigned int unused:3; ++#else ++ unsigned int unused:3; ++ unsigned int max_len:2; /* Max. length select */ ++ unsigned int loc:8; ++ unsigned int sws:1; ++ unsigned int kv:1; ++ unsigned int rr:4; ++ unsigned int from:4; ++ unsigned int to:4; ++ unsigned int udp:1; ++ unsigned int tcp:1; ++ unsigned int wan:1; ++ unsigned int ipv6:1; ++ unsigned int valid:1; ++#endif ++ ++ unsigned int sip[4]; ++ unsigned int dip[4]; ++ unsigned short sip_mask; ++ unsigned short dip_mask; ++ ++ unsigned short sport_start; ++ unsigned short sport_end; ++ unsigned short dport_start; ++ unsigned short dport_end; ++ unsigned int pkt_cnt; ++} SPPE_ACL; ++ ++typedef struct _sppe_route { ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int valid:1; ++ unsigned int ipv6:1; ++ unsigned int wan:1; ++ unsigned int rd:1; /* replace dscp */ ++ unsigned int dscp:6; ++ unsigned int pr:1; /* policy route */ ++ unsigned int prs:2; /* policy route select */ ++ unsigned int kv:1; ++ unsigned int sws:1; ++ unsigned int max_len:2; /* Max. length select */ ++ unsigned int fp:1; /* force VLAN priority */ ++ unsigned int pri:3; ++ unsigned int pd:1; ++ unsigned int pi:1; ++ unsigned int psidx:4; ++ unsigned int ag:2; ++ unsigned int unused:3; ++#else ++ unsigned int unused:3; ++ unsigned int ag:2; ++ unsigned int psidx:4; ++ unsigned int pi:1; ++ unsigned int pd:1; ++ unsigned int pri:3; ++ unsigned int fp:1; /* force VLAN priority */ ++ unsigned int max_len:2; /* Max. length select */ ++ unsigned int sws:1; ++ unsigned int kv:1; ++ unsigned int prs:2; /* policy route select */ ++ unsigned int pr:1; /* policy route */ ++ unsigned int dscp:6; ++ unsigned int rd:1; /* replace dscp */ ++ unsigned int wan:1; ++ unsigned int ipv6:1; ++ unsigned int valid:1; ++#endif ++ ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int unused_1:24; ++ unsigned int loc:8; ++#else ++ unsigned int loc:8; ++ unsigned int unused_1:24; ++#endif ++ ++ unsigned int dip[4]; ++ unsigned int sip[4]; ++ unsigned short dip_mask; ++ unsigned short sip_mask; ++ unsigned int pkt_cnt; ++} SPPE_ROUTE; ++ ++#if 0 ++typedef struct _sppe_vserver { ++ unsigned int valid:1; ++ unsigned int tcp:1; ++ unsigned int udp:1; ++ unsigned int dscp_lan:6; ++ unsigned int dscp_wan:6; ++ unsigned int pri_lan:3; ++ unsigned int pri_wan:3; ++ unsigned int unused:11; ++ ++ unsigned int wanip; ++ unsigned int lanip; ++ unsigned short port_start; ++ unsigned short port_end; ++ unsigned int pkt_cnt; ++} SPPE_VSERVER; ++#else ++typedef struct _sppe_snat { ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int valid:1; ++ unsigned int tcp:1; ++ unsigned int udp:1; ++ unsigned int rd:1; ++ unsigned int dscp:6; ++ unsigned int fp:1; ++ unsigned int pri:3; ++ unsigned int kv:1; ++ unsigned int sws:1; ++ unsigned int max_len:2; ++ unsigned int pd:1; ++ unsigned int pi:1; ++ unsigned int psidx:4; ++ unsigned int pr:1; /* policy route */ ++ unsigned int prs:2; /* policy route select */ ++ unsigned int ag:2; ++ unsigned int unused:3; ++#else ++ unsigned int unused:3; ++ unsigned int ag:2; ++ unsigned int prs:2; /* policy route select */ ++ unsigned int pr:1; /* policy route */ ++ unsigned int psidx:4; ++ unsigned int pi:1; ++ unsigned int pd:1; ++ unsigned int max_len:2; ++ unsigned int sws:1; ++ unsigned int kv:1; ++ unsigned int pri:3; ++ unsigned int fp:1; ++ unsigned int dscp:6; ++ unsigned int rd:1; ++ unsigned int udp:1; ++ unsigned int tcp:1; ++ unsigned int valid:1; ++#endif ++ ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int unused_1:24; ++ unsigned int loc:8; ++#else ++ unsigned int loc:8; ++ unsigned int unused_1:24; ++#endif ++ ++ unsigned int wanip; ++ unsigned int lanip; ++ unsigned short port_start; ++ unsigned short port_end; ++ unsigned int pkt_cnt; ++} SPPE_SNAT; ++ ++typedef struct _sppe_dnat { ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int valid:1; ++ unsigned int tcp:1; ++ unsigned int udp:1; ++ unsigned int rd:1; ++ unsigned int dscp:6; ++ unsigned int fp:1; ++ unsigned int pri:3; ++ unsigned int kv:1; ++ unsigned int sws:1; ++ unsigned int max_len:2; ++ unsigned int pd:1; ++ unsigned int pi:1; ++ unsigned int psidx:4; ++ unsigned int pr:1; /* policy route */ ++ unsigned int prs:2; /* policy route select */ ++ unsigned int ag:2; ++ unsigned int unused:3; ++#else ++ unsigned int unused:3; ++ unsigned int ag:2; ++ unsigned int prs:2; /* policy route select */ ++ unsigned int pr:1; /* policy route */ ++ unsigned int psidx:4; ++ unsigned int pi:1; ++ unsigned int pd:1; ++ unsigned int max_len:2; ++ unsigned int sws:1; ++ unsigned int kv:1; ++ unsigned int pri:3; ++ unsigned int fp:1; ++ unsigned int dscp:6; ++ unsigned int rd:1; ++ unsigned int udp:1; ++ unsigned int tcp:1; ++ unsigned int valid:1; ++#endif ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int unused_1:24; ++ unsigned int loc:8; ++#else ++ unsigned int loc:8; ++ unsigned int unused_1:24; ++#endif ++ ++ unsigned int wanip; ++ unsigned int lanip; ++ unsigned short port_start; ++ unsigned short port_end; ++ unsigned int pkt_cnt; ++} SPPE_DNAT; ++#endif ++typedef struct _sppe_limit { ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int drop_red:1; ++ unsigned int pass_green:1; ++ unsigned int force_color:1; ++ unsigned int color_select:2; ++ unsigned int time_stamp:21; ++ unsigned int reserved:6; ++#else ++ unsigned int reserved:6; ++ unsigned int time_stamp:21; ++ unsigned int color_select:2; ++ unsigned int force_color:1; ++ unsigned int pass_green:1; ++ unsigned int drop_red:1; ++#endif ++ unsigned short min_rate; ++ unsigned short max_rate; ++} SPPE_LIMIT; ++ ++typedef struct _sppe_global_rate_limit { ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int valid:1; ++ unsigned int wan:1; ++ unsigned int ipv6:1; ++ unsigned int tcp:1; ++ unsigned int udp:1; ++ unsigned int unused:17; ++#else ++ unsigned int unused:17; ++ unsigned int udp:1; ++ unsigned int tcp:1; ++ unsigned int ipv6:1; ++ unsigned int wan:1; ++ unsigned int valid:1; ++#endif ++ ++ unsigned int sip[4]; ++ unsigned int dip[4]; ++ unsigned short sip_mask; ++ unsigned short dip_mask; ++ unsigned short sport_start; ++ unsigned short sport_end; ++ unsigned short dport_start; ++ unsigned short dport_end; ++ SPPE_LIMIT limit; ++} SPPE_GLOBAL_RATE_LIMIT; ++ ++/* ++ * SPPE_CMD_FLOW_BRIDGE_IPV4 ++ * type = 1 , as = 3 ++ */ ++typedef struct _sppe_flow_bridge_ipv4 { ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int fw:1; ++ unsigned int s:1; ++ unsigned int sws:1; ++ unsigned int ag:2; ++ unsigned int rl:1; ++ unsigned int l4_prot:2; ++ unsigned int l2s:2; /* L2 select */ ++ unsigned int prs:2; ++ unsigned int kv:1; ++ unsigned int fp:1; ++ unsigned int pri:3; ++ unsigned int max_len:2; /* Max. length select */ ++ unsigned int reserved:13; ++#else ++ unsigned int reserved:13; ++ unsigned int max_len:2; /* Max. length select */ ++ unsigned int pri:3; ++ unsigned int fp:1; ++ unsigned int kv:1; ++ unsigned int prs:2; ++ unsigned int l2s:2; /* L2 select */ ++ unsigned int l4_prot:2; ++ unsigned int rl:1; ++ unsigned int ag:2; ++ unsigned int sws:1; ++ unsigned int s:1; ++ unsigned int fw:1; ++#endif ++ ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int unused:16; ++ unsigned int mac4732:16; ++#else ++ unsigned int mac4732:16; ++ unsigned int unused:16; ++#endif ++ ++ unsigned int mac3100; ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int lp:1; ++ unsigned int fr:1; ++ unsigned int pm:4; ++ unsigned int sv:1; ++ unsigned int svid:12; ++ unsigned int cv:1; ++ unsigned int cvid:12; ++#else ++ unsigned int cvid:12; ++ unsigned int cv:1; ++ unsigned int svid:12; ++ unsigned int sv:1; ++ unsigned int pm:4; ++ unsigned int fr:1; ++ unsigned int lp:1; ++#endif ++ unsigned int sip; ++ unsigned int dip; ++ ++ union { ++ struct { ++ unsigned short src; ++ unsigned short dst; ++ } port; ++ struct { ++ unsigned short call_id; ++ } gre; ++ struct { ++ unsigned char protocol; ++ } others; ++ } l4; ++ ++ SPPE_LIMIT limit; ++ unsigned int pkt_cnt; ++} SPPE_FLOW_BRIDGE_IPV4; ++ ++/* ++ * SPPE_CMD_FLOW_BRIDGE_IPV6 ++ * type = 2 , as = 3 ++ */ ++typedef struct _sppe_flow_bridge_ipv6 { ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int fw:1; ++ unsigned int s:1; ++ unsigned int sws:1; ++ unsigned int ag:2; ++ unsigned int rl:1; ++ unsigned int l4_prot:2; ++ unsigned int l2s:2; /* L2 select */ ++ unsigned int prs:2; ++ unsigned int kv:1; ++ unsigned int fp:1; ++ unsigned int pri:3; ++ unsigned int max_len:2; /* Max. length select */ ++ unsigned int reserved:13; ++#else ++ unsigned int reserved:13; ++ unsigned int max_len:2; /* Max. length select */ ++ unsigned int pri:3; ++ unsigned int fp:1; ++ unsigned int kv:1; ++ unsigned int prs:2; ++ unsigned int l2s:2; /* L2 select */ ++ unsigned int l4_prot:2; ++ unsigned int rl:1; ++ unsigned int ag:2; ++ unsigned int sws:1; ++ unsigned int s:1; ++ unsigned int fw:1; ++#endif ++ ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int unused:16; ++ unsigned int mac4732:16; ++#else ++ unsigned int mac4732:16; ++ unsigned int unused:16; ++#endif ++ ++ unsigned int mac3100; ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int lp:1; ++ unsigned int fr:1; ++ unsigned int pm:4; ++ unsigned int sv:1; ++ unsigned int svid:12; ++ unsigned int cv:1; ++ unsigned int cvid:12; ++#else ++ unsigned int cvid:12; ++ unsigned int cv:1; ++ unsigned int svid:12; ++ unsigned int sv:1; ++ unsigned int pm:4; ++ unsigned int fr:1; ++ unsigned int lp:1; ++#endif ++ unsigned int sip[4]; ++ unsigned int dip[4]; ++ union { ++ struct { ++ unsigned short src; ++ unsigned short dst; ++ } port; ++ struct { ++ unsigned short call_id; ++ } gre; ++ struct { ++ unsigned char protocol; ++ } others; ++ } l4; ++ SPPE_LIMIT limit; ++ unsigned int pkt_cnt; ++} SPPE_FLOW_BRIDGE_IPV6; ++ ++/* ++ * SPPE_CMD_FLOW_ROUTE_IPV4 ++ * type = 1, as = 0 ++ */ ++typedef struct _sppe_flow_route_ipv4 { ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int fw:1; ++ unsigned int s:1; ++ unsigned int sws:1; ++ unsigned int ag:2; ++ unsigned int rl:1; ++ unsigned int l4_prot:2; ++ unsigned int l2s:2; /* L2 select */ ++ unsigned int prs:2; ++ unsigned int kv:1; ++ unsigned int rd:1; ++ unsigned int dscp:6; ++ unsigned int fp:1; ++ unsigned int pri:3; ++ unsigned int max_len:2; /* Max. length select */ ++ unsigned int pd:1; ++ unsigned int pi:1; ++ unsigned int psidx:4; ++#else ++ unsigned int psidx:4; ++ unsigned int pi:1; ++ unsigned int pd:1; ++ unsigned int max_len:2; /* Max. length select */ ++ unsigned int pri:3; ++ unsigned int fp:1; ++ unsigned int dscp:6; ++ unsigned int rd:1; ++ unsigned int kv:1; ++ unsigned int prs:2; ++ unsigned int l2s:2; /* L2 select */ ++ unsigned int l4_prot:2; ++ unsigned int rl:1; ++ unsigned int ag:2; ++ unsigned int sws:1; ++ unsigned int s:1; ++ unsigned int fw:1; ++#endif ++ ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int unused:16; ++ unsigned int mac4732:16; ++#else ++ unsigned int mac4732:16; ++ unsigned int unused:16; ++#endif ++ unsigned int mac3100; ++ ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int lp:1; ++ unsigned int fr:1; ++ unsigned int pm:4; ++ unsigned int sv:1; ++ unsigned int svid:12; ++ unsigned int cv:1; ++ unsigned int cvid:12; ++#else ++ unsigned int cvid:12; ++ unsigned int cv:1; ++ unsigned int svid:12; ++ unsigned int sv:1; ++ unsigned int pm:4; ++ unsigned int fr:1; ++ unsigned int lp:1; ++#endif ++ ++ unsigned int sip; ++ unsigned int dip; ++ union { ++ struct { ++ unsigned short src; ++ unsigned short dst; ++ } port; ++ struct { ++ unsigned short call_id; ++ } gre; ++ struct { ++ unsigned char protocol; ++ } others; ++ } l4; ++ SPPE_LIMIT limit; ++ unsigned int pkt_cnt; ++} SPPE_FLOW_ROUTE_IPV4; ++ ++/* ++ * SPPE_CMD_FLOW_ROUTE_IPV6 ++ * type = 2, as = 0 ++ */ ++typedef struct _sppe_flow_route_ipv6 { ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int fw:1; ++ unsigned int s:1; ++ unsigned int sws:1; ++ unsigned int ag:2; ++ unsigned int rl:1; ++ unsigned int l4_prot:2; ++ unsigned int l2s:2; /* L2 select */ ++ unsigned int prs:2; ++ unsigned int kv:1; ++ unsigned int rd:1; ++ unsigned int dscp:6; ++ unsigned int fp:1; ++ unsigned int pri:3; ++ unsigned int max_len:2; /* Max. length select */ ++ unsigned int pd:1; ++ unsigned int pi:1; ++ unsigned int psidx:4; ++#else ++ unsigned int psidx:4; ++ unsigned int pi:1; ++ unsigned int pd:1; ++ unsigned int max_len:2; /* Max. length select */ ++ unsigned int pri:3; ++ unsigned int fp:1; ++ unsigned int dscp:6; ++ unsigned int rd:1; ++ unsigned int kv:1; ++ unsigned int prs:2; ++ unsigned int l2s:2; /* L2 select */ ++ unsigned int l4_prot:2; ++ unsigned int rl:1; ++ unsigned int ag:2; ++ unsigned int sws:1; ++ unsigned int s:1; ++ unsigned int fw:1; ++#endif ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int unused:16; ++ unsigned int mac4732:16; ++#else ++ unsigned int mac4732:16; ++ unsigned int unused:16; ++#endif ++ unsigned int mac3100; ++ ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int lp:1; ++ unsigned int fr:1; ++ unsigned int pm:4; ++ unsigned int sv:1; ++ unsigned int svid:12; ++ unsigned int cv:1; ++ unsigned int cvid:12; ++#else ++ unsigned int cvid:12; ++ unsigned int cv:1; ++ unsigned int svid:12; ++ unsigned int sv:1; ++ unsigned int pm:4; ++ unsigned int fr:1; ++ unsigned int lp:1; ++#endif ++ unsigned int sip[4]; ++ unsigned int dip[4]; ++ union { ++ struct { ++ unsigned short src; ++ unsigned short dst; ++ } port; ++ struct { ++ unsigned short call_id; ++ } gre; ++ struct { ++ unsigned char protocol; ++ } others; ++ } l4; ++ SPPE_LIMIT limit; ++ unsigned int pkt_cnt; ++} SPPE_FLOW_ROUTE_IPV6; ++ ++/* ++ * SPPE_CMD_FLOW_NAT_IPV4 ++ * type = 0, as = 1 ++ */ ++typedef struct _sppe_flow_nat_ipv4 { ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int fw:1; ++ unsigned int s:1; ++ unsigned int sws:1; ++ unsigned int ag:2; ++ unsigned int rl:1; ++ unsigned int l4_prot:2; ++ unsigned int l2s:2; /* L2 select */ ++ unsigned int prs:2; ++ unsigned int kv:1; ++ unsigned int rd:1; ++ unsigned int dscp:6; ++ unsigned int fp:1; ++ unsigned int pri:3; ++ unsigned int max_len:2; /* Max. length select */ ++ unsigned int pd:1; ++ unsigned int pi:1; ++ unsigned int psidx:4; ++#else ++ unsigned int psidx:4; ++ unsigned int pi:1; ++ unsigned int pd:1; ++ unsigned int max_len:2; /* Max. length select */ ++ unsigned int pri:3; ++ unsigned int fp:1; ++ unsigned int dscp:6; ++ unsigned int rd:1; ++ unsigned int kv:1; ++ unsigned int prs:2; ++ unsigned int l2s:2; /* L2 select */ ++ unsigned int l4_prot:2; ++ unsigned int rl:1; ++ unsigned int ag:2; ++ unsigned int sws:1; ++ unsigned int s:1; ++ unsigned int fw:1; ++#endif ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int unused:16; ++ unsigned int mac4732:16; ++#else ++ unsigned int mac4732:16; ++ unsigned int unused:16; ++#endif ++ ++ unsigned int mac3100; ++ ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int lp:1; ++ unsigned int fr:1; ++ unsigned int pm:4; ++ unsigned int sv:1; ++ unsigned int svid:12; ++ unsigned int cv:1; ++ unsigned int cvid:12; ++#else ++ unsigned int cvid:12; ++ unsigned int cv:1; ++ unsigned int svid:12; ++ unsigned int sv:1; ++ unsigned int pm:4; ++ unsigned int fr:1; ++ unsigned int lp:1; ++#endif ++ ++ unsigned int sip; ++ unsigned int dip; ++ union { ++ struct { ++ unsigned short src; ++ unsigned short dst; ++ } port; ++ struct { ++ unsigned short call_id; ++ unsigned short nat_call_id; ++ } gre; ++ struct { ++ unsigned char protocol; ++ } others; ++ } l4; ++ unsigned int nat_ip; ++ unsigned short nat_port; ++ SPPE_LIMIT limit; ++ unsigned int pkt_cnt; ++} SPPE_FLOW_NAT_IPV4; ++ ++/* ++ * SPPE_CMD_FLOW_NAT_IPV6 ++ * type = 1, as = 1 ++ */ ++typedef struct _sppe_flow_nat_ipv6 { ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int fw:1; ++ unsigned int s:1; ++ unsigned int sws:1; ++ unsigned int ag:2; ++ unsigned int rl:1; ++ unsigned int l4_prot:2; ++ unsigned int l2s:2; /* L2 select */ ++ unsigned int prs:2; ++ unsigned int kv:1; ++ unsigned int rd:1; ++ unsigned int dscp:6; ++ unsigned int fp:1; ++ unsigned int pri:3; ++ unsigned int max_len:2; /* Max. length select */ ++ unsigned int pd:1; ++ unsigned int pi:1; ++ unsigned int psidx:4; ++#else ++ unsigned int psidx:4; ++ unsigned int pi:1; ++ unsigned int pd:1; ++ unsigned int max_len:2; /* Max. length select */ ++ unsigned int pri:3; ++ unsigned int fp:1; ++ unsigned int dscp:6; ++ unsigned int rd:1; ++ unsigned int kv:1; ++ unsigned int prs:2; ++ unsigned int l2s:2; /* L2 select */ ++ unsigned int l4_prot:2; ++ unsigned int rl:1; ++ unsigned int ag:2; ++ unsigned int sws:1; ++ unsigned int s:1; ++ unsigned int fw:1; ++#endif ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int unused:16; ++ unsigned int mac4732:16; ++#else ++ unsigned int mac4732:16; ++ unsigned int unused:16; ++#endif ++ unsigned int mac3100; ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int lp:1; ++ unsigned int fr:1; ++ unsigned int pm:4; ++ unsigned int sv:1; ++ unsigned int svid:12; ++ unsigned int cv:1; ++ unsigned int cvid:12; ++#else ++ unsigned int cvid:12; ++ unsigned int cv:1; ++ unsigned int svid:12; ++ unsigned int sv:1; ++ unsigned int pm:4; ++ unsigned int fr:1; ++ unsigned int lp:1; ++#endif ++ unsigned int sip[4]; ++ unsigned int dip[4]; ++ union { ++ struct { ++ unsigned short src; ++ unsigned short dst; ++ } port; ++ struct { ++ unsigned short call_id; ++ unsigned short nat_call_id; ++ } gre; ++ struct { ++ unsigned char protocol; ++ } others; ++ } l4; ++ unsigned int nat_ip[4]; ++ unsigned short nat_port; ++ SPPE_LIMIT limit; ++ unsigned int pkt_cnt; ++} SPPE_FLOW_NAT_IPV6; ++ ++/* ++ * SPPE_CMD_FLOW_TWICE_NAT ++ * type = 0, as = 2 ++ */ ++typedef struct _sppe_flow_twice_nat { ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int fw:1; ++ unsigned int s:1; ++ unsigned int sws:1; ++ unsigned int ag:2; ++ unsigned int rl:1; ++ unsigned int l4_prot:2; ++ unsigned int l2s:2; /* L2 select */ ++ unsigned int prs:2; ++ unsigned int kv:1; ++ unsigned int rd:1; ++ unsigned int dscp:6; ++ unsigned int fp:1; ++ unsigned int pri:3; ++ unsigned int max_len:2; /* Max. length select */ ++ unsigned int psidx:4; ++ unsigned int reserved:2; ++#else ++ unsigned int reserved:2; ++ unsigned int psidx:4; ++ unsigned int max_len:2; /* Max. length select */ ++ unsigned int pri:3; ++ unsigned int fp:1; ++ unsigned int dscp:6; ++ unsigned int rd:1; ++ unsigned int kv:1; ++ unsigned int prs:2; ++ unsigned int l2s:2; /* L2 select */ ++ unsigned int l4_prot:2; ++ unsigned int rl:1; ++ unsigned int ag:2; ++ unsigned int sws:1; ++ unsigned int s:1; ++ unsigned int fw:1; ++#endif ++ ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int unused:16; ++ unsigned int mac4732:16; ++#else ++ unsigned int mac4732:16; ++ unsigned int unused:16; ++#endif ++ unsigned int mac3100; ++ ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int lp:1; ++ unsigned int fr:1; ++ unsigned int pm:4; ++ unsigned int sv:1; ++ unsigned int svid:12; ++ unsigned int cv:1; ++ unsigned int cvid:12; ++#else ++ unsigned int cvid:12; ++ unsigned int cv:1; ++ unsigned int svid:12; ++ unsigned int sv:1; ++ unsigned int pm:4; ++ unsigned int fr:1; ++ unsigned int lp:1; ++#endif ++ unsigned int sip; ++ unsigned int dip; ++ unsigned short sport; ++ unsigned short dport; ++ unsigned int natsip; ++ unsigned int natdip; ++ unsigned short natsport; ++ unsigned short natdport; ++ SPPE_LIMIT limit; ++ unsigned int pkt_cnt; ++} SPPE_FLOW_TWICE_NAT; ++ ++/* ++ * SPPE_CMD_FLOW_MULTICAST_IPV4 ++ * type = 0, as = 0 or 3 ++ */ ++typedef struct _sppe_flow_multicast_ipv4 { ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int fw:1; ++ unsigned int s:1; ++ unsigned int sws:1; ++ unsigned int ag:2; ++ unsigned int rl:1; ++ unsigned int l2s:2; /* L2 select */ ++ unsigned int prs:2; ++ unsigned int kv:1; ++ unsigned int rd:1; ++ unsigned int dscp:6; ++ unsigned int fp:1; ++ unsigned int pri:3; ++ unsigned int max_len:2; /* Max. length select */ ++ unsigned int bridge:1; ++ unsigned int reserved:7; ++#else ++ unsigned int reserved:7; ++ unsigned int bridge:1; ++ unsigned int max_len:2; /* Max. length select */ ++ unsigned int pri:3; ++ unsigned int fp:1; ++ unsigned int dscp:6; ++ unsigned int rd:1; ++ unsigned int kv:1; ++ unsigned int prs:2; ++ unsigned int l2s:2; /* L2 select */ ++ unsigned int rl:1; ++ unsigned int ag:2; ++ unsigned int sws:1; ++ unsigned int s:1; ++ unsigned int fw:1; ++#endif ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int unused:16; ++ unsigned int mac4732:16; ++#else ++ unsigned int mac4732:16; ++ unsigned int unused:16; ++#endif ++ unsigned int mac3100; ++ ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int lp:1; ++ unsigned int fr:1; ++ unsigned int pm:4; ++ unsigned int sv:1; ++ unsigned int svid:12; ++ unsigned int cv:1; ++ unsigned int cvid:12; ++#else ++ unsigned int cvid:12; ++ unsigned int cv:1; ++ unsigned int svid:12; ++ unsigned int sv:1; ++ unsigned int pm:4; ++ unsigned int fr:1; ++ unsigned int lp:1; ++#endif ++ ++ unsigned int sip; ++ unsigned int dip; ++ SPPE_LIMIT limit; ++ unsigned int pkt_cnt; ++} SPPE_FLOW_MCAST_IPV4; ++ ++/* ++ * SPPE_CMD_FLOW_MULTICAST_IPV6 ++ * type = 1, as = 0 or 3 ++ */ ++typedef struct _sppe_flow_multicast_ipv6 { ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int fw:1; ++ unsigned int s:1; ++ unsigned int sws:1; ++ unsigned int ag:2; ++ unsigned int rl:1; ++ unsigned int l2s:2; /* L2 select */ ++ unsigned int prs:2; ++ unsigned int kv:1; ++ unsigned int rd:1; ++ unsigned int dscp:6; ++ unsigned int fp:1; ++ unsigned int pri:3; ++ unsigned int max_len:2; /* Max. length select */ ++ unsigned int bridge:1; ++ unsigned int reserved:7; ++#else ++ unsigned int reserved:7; ++ unsigned int bridge:1; ++ unsigned int max_len:2; /* Max. length select */ ++ unsigned int pri:3; ++ unsigned int fp:1; ++ unsigned int dscp:6; ++ unsigned int rd:1; ++ unsigned int kv:1; ++ unsigned int prs:2; ++ unsigned int l2s:2; /* L2 select */ ++ unsigned int rl:1; ++ unsigned int ag:2; ++ unsigned int sws:1; ++ unsigned int s:1; ++ unsigned int fw:1; ++#endif ++ ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int unused:16; ++ unsigned int mac4732:16; ++#else ++ unsigned int mac4732:16; ++ unsigned int unused:16; ++#endif ++ unsigned int mac3100; ++ ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int lp:1; ++ unsigned int fr:1; ++ unsigned int pm:4; ++ unsigned int sv:1; ++ unsigned int svid:12; ++ unsigned int cv:1; ++ unsigned int cvid:12; ++#else ++ unsigned int cvid:12; ++ unsigned int cv:1; ++ unsigned int svid:12; ++ unsigned int sv:1; ++ unsigned int pm:4; ++ unsigned int fr:1; ++ unsigned int lp:1; ++#endif ++ ++ unsigned int sip[4]; ++ unsigned int dip[4]; ++ SPPE_LIMIT limit; ++ unsigned int pkt_cnt; ++} SPPE_FLOW_MCAST_IPV6; ++ ++/* ++ * SPPE_CMD_FLOW_LAYER_TWO ++ * type = 2 ++ */ ++typedef struct _sppe_flow_bridge_l2 { ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int fw:1; ++ unsigned int s:1; ++ unsigned int sws:1; ++ unsigned int ag:2; ++ unsigned int rl:1; ++ unsigned int l2_prot:2; ++ unsigned int kv:1; ++ unsigned int fp:1; ++ unsigned int pri:3; ++ unsigned int psidx:4; ++ unsigned int reserved:15; ++#else ++ unsigned int reserved:15; ++ unsigned int psidx:4; ++ unsigned int pri:3; ++ unsigned int fp:1; ++ unsigned int kv:1; ++ unsigned int l2_prot:2; ++ unsigned int rl:1; ++ unsigned int ag:2; ++ unsigned int sws:1; ++ unsigned int s:1; ++ unsigned int fw:1; ++#endif ++ ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int lp:1; ++ unsigned int fr:1; ++ unsigned int pm:4; ++ unsigned int sv:1; ++ unsigned int svid:12; ++ unsigned int cv:1; ++ unsigned int cvid:12; ++#else ++ unsigned int cvid:12; ++ unsigned int cv:1; ++ unsigned int svid:12; ++ unsigned int sv:1; ++ unsigned int pm:4; ++ unsigned int fr:1; ++ unsigned int lp:1; ++#endif ++ ++ unsigned short smac[3]; ++ unsigned short dmac[3]; ++ ++ SPPE_LIMIT limit; ++ unsigned int pkt_cnt; ++} SPPE_FLOW_BRIDGE_L2; ++ ++typedef struct _sppe_arl { ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int vid:12; ++ unsigned int pmap:5; ++ unsigned int age:3; ++ unsigned int mymac:1; ++ unsigned int filter:1; ++ unsigned int reserved:10; ++#else ++ unsigned int reserved:10; ++ unsigned int filter:1; ++ unsigned int mymac:1; ++ unsigned int age:3; ++ unsigned int pmap:5; ++ unsigned int vid:12; ++#endif ++ unsigned char mac[6]; ++} SPPE_ARL; ++ ++typedef struct _sppe_init { ++ unsigned int flow_pre_match_paddr; ++ unsigned int flow_pre_match_vaddr; ++ unsigned int flow_body_paddr; ++ unsigned int flow_body_vaddr; ++ unsigned int flow_ext_paddr; ++ unsigned int flow_ext_vaddr; ++ unsigned int flow_size; ++ unsigned int arp_pre_match_paddr; ++ unsigned int arp_pre_match_vaddr; ++ unsigned int arp_body_paddr; ++ unsigned int arp_body_vaddr; ++ unsigned int arp_size; ++ unsigned int ipv6_napt; ++} SPPE_INIT; ++ ++typedef struct _sppe_param_t { ++ SPPE_CMD cmd; ++ SPPE_OP op; ++ ++ union { ++ struct { ++ unsigned char major; ++ unsigned char minor; ++ unsigned char very_minor; ++ unsigned char pre; ++ } sppe_version; ++ ++ SPPE_BOOL sppe_enable; ++ unsigned int sppe_lanip; ++ ++ struct { ++ unsigned int index; ++ unsigned int ip; ++ unsigned int session_id; ++ } sppe_wanip; ++ ++ struct { ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int index:2; ++ unsigned int to:4; ++ unsigned int sv:1; ++ unsigned int stag_vid:12; ++ unsigned int cv:1; ++ unsigned int ctag_vid:12; ++#else ++ unsigned int ctag_vid:12; ++ unsigned int cv:1; ++ unsigned int stag_vid:12; ++ unsigned int sv:1; ++ unsigned int to:4; ++ unsigned int index:2; ++#endif ++ unsigned char mac[6]; /* MAC address */ ++ } sppe_prda; ++ ++ struct { ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int interval:2; ++ unsigned int mfactor:1; ++ unsigned int ununsed:29; ++#else ++ unsigned int ununsed:29; ++ unsigned int mfactor:1; ++ unsigned int interval:2; ++#endif ++ } sppe_rlcfg; ++ ++ struct { ++ unsigned int index; ++ SPPE_PPPOE_RELAY rule; ++ } sppe_pppoe_relay; ++ ++ struct { ++ unsigned int index; ++ SPPE_BRIDGE rule; ++ } sppe_bridge; ++ ++ struct { ++ unsigned int index; ++ SPPE_ACL rule; ++ } sppe_acl; ++ ++ struct { ++ unsigned int index; ++ SPPE_ROUTE rule; ++ } sppe_route; ++#if 0 ++ struct { ++ unsigned int index; ++ SPPE_VSERVER rule; ++ } sppe_vserver; ++#else ++ struct { ++ unsigned int index; ++ SPPE_SNAT rule; ++ } sppe_snat; ++ ++ struct { ++ unsigned int index; ++ SPPE_DNAT rule; ++ } sppe_dnat; ++#endif ++ struct { ++ unsigned int index; ++ SPPE_GLOBAL_RATE_LIMIT rule; ++ } sppe_grl; ++ ++ struct { ++ unsigned char unit; ++ unsigned char arp; ++ unsigned char bridge; ++ unsigned char tcp; ++ unsigned char udp; ++ unsigned char pptp; ++ unsigned char other; ++ } sppe_agingout; ++ ++ struct { ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int index:2; ++ unsigned int reserved:20; ++ unsigned int max:10; ++#else ++ unsigned int max:10; ++ unsigned int reserved:20; ++ unsigned int index:2; ++#endif ++ } sppe_max_length; ++ ++ struct { ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int v6:1; ++ unsigned int s:1; ++ unsigned int r:1; ++ unsigned int fr:1; ++ unsigned int to:4; ++ unsigned int unused:24; ++#else ++ unsigned int unused:24; ++ unsigned int to:4; ++ unsigned int fr:1; ++ unsigned int r:1; ++ unsigned int s:1; ++ unsigned int v6:1; ++#endif ++ ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int sv:1; ++ unsigned int stag_vid:12; ++ unsigned int cv:1; ++ unsigned int ctag_vid:12; ++ unsigned int unused_1:6; ++#else ++ unsigned int unused_1:6; ++ unsigned int ctag_vid:12; ++ unsigned int cv:1; ++ unsigned int stag_vid:12; ++ unsigned int sv:1; ++#endif ++ unsigned int ip[4]; ++ unsigned char mac[6]; ++ } sppe_arp; ++ ++ SPPE_ARL sppe_arl; ++ ++ struct { ++ unsigned int sid; ++ unsigned int index; ++ } sppe_pppoe_sid; ++ ++ SPPE_FLOW_BRIDGE_IPV4 flow_bridge_ipv4; ++ SPPE_FLOW_BRIDGE_IPV6 flow_bridge_ipv6; ++ SPPE_FLOW_ROUTE_IPV4 flow_route_ipv4; ++ SPPE_FLOW_ROUTE_IPV6 flow_route_ipv6; ++ SPPE_FLOW_NAT_IPV4 flow_nat_ipv4; ++ SPPE_FLOW_NAT_IPV6 flow_nat_ipv6; ++ SPPE_FLOW_TWICE_NAT flow_twice_nat; ++ SPPE_FLOW_MCAST_IPV4 flow_mcast_ipv4; ++ SPPE_FLOW_MCAST_IPV6 flow_mcast_ipv6; ++ SPPE_FLOW_BRIDGE_L2 flow_bridge_l2; ++ ++ struct { ++ SPPE_DUMP_TYPE type; ++ unsigned short key; ++ unsigned short way; ++ unsigned int raw[23]; ++ } sppe_dump; ++ ++ unsigned int sppe_sna_th; ++ ++ struct { ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int enable:1; ++ unsigned int lan:6; ++ unsigned int wan:6; ++ unsigned int reserved:19; ++#else ++ unsigned int reserved:19; ++ unsigned int wan:6; ++ unsigned int lan:6; ++ unsigned int enable:1; ++#endif ++ } sppe_chgdscp; ++ ++ struct { ++#ifndef CONFIG_SWITCH_BIG_ENDIAN ++ unsigned int enable:1; ++ unsigned int lan:3; ++ unsigned int wan:3; ++ unsigned int reserved:25; ++#else ++ unsigned int reserved:25; ++ unsigned int wan:3; ++ unsigned int lan:3; ++ unsigned int enable:1; ++#endif ++ } sppe_chgpri; ++ ++ struct { ++ int enable; ++ int module; ++ int level; ++ } sppe_debug; ++ ++ struct { ++ unsigned int offset; ++ unsigned int data; ++ } sppe_reg; ++ ++ struct { ++ unsigned int offset; ++ unsigned int data; ++ } sppe_sram; ++ ++ struct { ++ char enable; ++ unsigned int max; ++ unsigned int min; ++ char drop_red; ++ char pass_green; ++ } sppe_rl_flow; ++ ++ struct { ++ char enable; ++ unsigned int max; ++ unsigned int min; ++ char drop_red; ++ char pass_green; ++ } sppe_rl_rule; ++ ++ struct { ++ unsigned int index; ++ unsigned short start; ++ unsigned short end; ++ SPPE_LIMIT limit; ++ } sppe_bm_flow; ++ ++ struct { ++ unsigned int index; ++ unsigned int pkt_cnt; ++ unsigned int byte_cnt; ++ } sppe_accounting_group; ++ ++ struct { ++ unsigned int pkt_cnt; ++ } sppe_drop_ipcs_err; /* IP checksum error */ ++ ++ struct { ++ unsigned int pkt_cnt; ++ } sppe_drop_rate_limit; ++ ++ struct { ++ unsigned int pkt_cnt; ++ } sppe_drop_others; ++ ++ struct { ++ unsigned int index; ++ unsigned char name[16]; ++ struct net_device *dev; ++ unsigned int vid; ++ } sppe_pci_fp_dev; ++ ++ SPPE_INIT sppe_init; ++ ++ } data; ++} SPPE_PARAM; ++ ++extern int sppe_hook_ready; ++extern int (*sppe_func_hook)(SPPE_PARAM *param); ++ ++extern int sppe_pci_fp_ready; ++extern int (*sppe_pci_fp_hook)(SPPE_PARAM *param); ++ ++#endif /* CONFIG_CNS3XXX_SPPE */ ++ ++#endif /* _SPPE_H_ */ +--- /dev/null ++++ b/include/linux/cns3xxx/switch_api.h +@@ -0,0 +1,366 @@ ++/******************************************************************************* ++ * ++ * Copyright (c) 2008 Cavium Networks ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the Free ++ * Software Foundation; either version 2 of the License, or (at your option) ++ * any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ * more details. ++ * ++ * You should have received a copy of the GNU General Public License along with ++ * this program; if not, write to the Free Software Foundation, Inc., 59 ++ * Temple Place - Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * The full GNU General Public License is included in this distribution in the ++ * file called LICENSE. ++ * ++ * Contact Information: ++ * Technology Support ++ * Star Semiconductor 4F, No.1, Chin-Shan 8th St, Hsin-Chu,300 Taiwan, R.O.C ++ * ++ ********************************************************************************/ ++ ++#ifndef SWITCH_API_H_K ++#define SWITCH_API_H_K ++ ++ ++#ifndef __KERNEL__ ++typedef unsigned int u32; ++typedef unsigned short int u16; ++typedef unsigned char u8; ++typedef int s32; ++#else ++ ++#include ++ ++#endif ++ ++ ++#define CAVM_OK 0 ++#define CAVM_ERR 1 ++#define CAVM_NOT_FOUND 2 ++#define CAVM_FOUND 3 ++#define CAVM_FAIL -1 // use minus ++ ++#define MAC_PORT0 0 ++#define MAC_PORT1 1 ++#define MAC_PORT2 2 ++#define CPU_PORT 3 ++ ++typedef enum ++{ ++ ++ ++ CNS3XXX_ARL_TABLE_LOOKUP, ++ CNS3XXX_ARL_TABLE_ADD, ++ CNS3XXX_ARL_TABLE_DEL, ++ CNS3XXX_ARL_TABLE_SEARCH, ++ CNS3XXX_ARL_TABLE_SEARCH_AGAIN, ++ CNS3XXX_ARL_IS_TABLE_END, ++ CNS3XXX_ARL_TABLE_FLUSH, ++ ++ CNS3XXX_VLAN_TABLE_LOOKUP, ++ CNS3XXX_VLAN_TABLE_ADD, ++ CNS3XXX_VLAN_TABLE_DEL, ++ CNS3XXX_VLAN_TABLE_READ, ++ ++ CNS3XXX_SKEW_SET, ++ CNS3XXX_SKEW_GET, ++ ++ CNS3XXX_BRIDGE_SET, ++ CNS3XXX_BRIDGE_GET, ++ ++ CNS3XXX_PORT_NEIGHBOR_SET, ++ CNS3XXX_PORT_NEIGHBOR_GET, ++ ++ CNS3XXX_HOL_PREVENT_SET, ++ CNS3XXX_HOL_PREVENT_GET, ++ ++ CNS3XXX_TC_SET, // traffic class, for 1, 2, 4, traffic class ++ CNS3XXX_TC_GET, ++ ++ CNS3XXX_PRI_CTRL_SET, ++ CNS3XXX_PRI_CTRL_GET, ++ ++ CNS3XXX_DMA_RING_CTRL_SET, ++ CNS3XXX_DMA_RING_CTRL_GET, ++ ++ CNS3XXX_PRI_IP_DSCP_SET, ++ CNS3XXX_PRI_IP_DSCP_GET, ++ ++ CNS3XXX_ETYPE_SET, ++ CNS3XXX_ETYPE_GET, ++ ++ CNS3XXX_UDP_RANGE_SET, ++ CNS3XXX_UDP_RANGE_GET, ++ ++ CNS3XXX_ARP_REQUEST_SET, ++ CNS3XXX_ARP_REQUEST_GET, ++ ++ CNS3XXX_RATE_LIMIT_SET, ++ CNS3XXX_RATE_LIMIT_GET, ++ ++ CNS3XXX_QUEUE_WEIGHT_SET, ++ CNS3XXX_QUEUE_WEIGHT_GET, ++ ++ CNS3XXX_FC_RLS_SET, ++ CNS3XXX_FC_RLS_GET, ++ ++ CNS3XXX_FC_SET_SET, ++ CNS3XXX_FC_SET_GET, ++ ++ CNS3XXX_SARL_RLS_SET, ++ CNS3XXX_SARL_RLS_GET, ++ ++ CNS3XXX_SARL_SET_SET, ++ CNS3XXX_SARL_SET_GET, ++ ++ CNS3XXX_SARL_OQ_SET, ++ CNS3XXX_SARL_OQ_GET, ++ ++ CNS3XXX_SARL_ENABLE_SET, ++ CNS3XXX_SARL_ENABLE_GET, ++ ++ CNS3XXX_FC_SET, ++ CNS3XXX_FC_GET, ++ ++ CNS3XXX_IVL_SET, ++ CNS3XXX_IVL_GET, ++ ++ CNS3XXX_WAN_PORT_SET, ++ CNS3XXX_WAN_PORT_GET, ++ ++ CNS3XXX_PVID_GET, ++ CNS3XXX_PVID_SET, ++ ++ CNS3XXX_QA_GET, // queue allocate ++ CNS3XXX_QA_SET, ++ ++ CNS3XXX_PACKET_MAX_LEN_GET, // set maximun frame length. ++ CNS3XXX_PACKET_MAX_LEN_SET, ++ ++ CNS3XXX_BCM53115M_REG_READ, ++ CNS3XXX_BCM53115M_REG_WRITE, ++ ++ CNS3XXX_RXRING_STATUS, ++ CNS3XXX_TXRING_STATUS, ++ ++ CNS3XXX_DUMP_MIB_COUNTER, ++ ++ CNS3XXX_REG_READ, ++ CNS3XXX_REG_WRITE, ++ ++}CNS3XXXIoctlCmd; ++ ++typedef struct ++{ ++ u8 vlan_index; ++ u8 valid; ++ u16 vid; ++ u8 wan_side; ++ u8 etag_pmap; ++ u8 mb_pmap; ++ //u8 my_mac[6]; ++ u8 *my_mac; ++}VLANTableEntry; // for vlan table function ++ ++typedef struct ++{ ++ u16 vid; ++ u8 pmap; ++ //u8 mac[6]; ++ u8 *mac; ++ u8 age_field; ++ u8 vlan_mac; ++ u8 filter; ++}ARLTableEntry; // for arl table function ++ ++ ++typedef struct ++{ ++ CNS3XXXIoctlCmd cmd; ++ ARLTableEntry entry; ++}CNS3XXXARLTableEntry; // for ioctl arl ... ++ ++typedef struct ++{ ++ CNS3XXXIoctlCmd cmd; ++ VLANTableEntry entry; ++}CNS3XXXVLANTableEntry; // for ioctl VLAN table ... ++ ++typedef struct ++{ ++ CNS3XXXIoctlCmd cmd; ++ u8 enable; ++}CNS3XXXHOLPreventControl; ++ ++ ++typedef struct ++{ ++ CNS3XXXIoctlCmd cmd; ++ unsigned char which_port; // 0, 1, 2, 3 (cpu port) ++ unsigned char type; // 0: C-Neighbor, 1: S-Neighbor ++}CNS3XXXPortNeighborControl; ++ ++typedef struct ++{ ++ CNS3XXXIoctlCmd cmd; ++ unsigned char type; // 0: C-Component, 1: S-Component ++}CNS3XXXBridgeControl; ++ ++typedef struct ++{ ++ CNS3XXXIoctlCmd cmd; ++ unsigned char tc; // traffic class, for 1, 2, 4, traffic class ++}CNS3XXXTrafficClassControl; ++ ++typedef struct ++{ ++ CNS3XXXIoctlCmd cmd; ++ unsigned char which_port; // 0, 1, 2, 3 (cpu port) ++ unsigned int val; ++ unsigned char port_pri; ++ unsigned char udp_pri_en; ++ unsigned char dscp_pri_en; ++ unsigned char vlan_pri_en; ++ unsigned char ether_pri_en; ++}CNS3XXXPriCtrlControl; ++ ++typedef struct ++{ ++ CNS3XXXIoctlCmd cmd; ++ unsigned char ts_double_ring_en; ++ unsigned char fs_double_ring_en; ++ unsigned char fs_pkt_allocate; ++}CNS3XXXDmaRingCtrlControl; ++ ++typedef struct ++{ ++ CNS3XXXIoctlCmd cmd; ++ unsigned int ip_dscp_num; // 0 ~ 63 ++ unsigned char pri; // 3 bits ++}CNS3XXXPriIpDscpControl; ++ ++typedef struct ++{ ++ CNS3XXXIoctlCmd cmd; ++ unsigned int etype_num; ++ unsigned int val; ++ unsigned int pri; ++}CNS3XXXEtypeControl; ++ ++typedef struct ++{ ++ CNS3XXXIoctlCmd cmd; ++ unsigned int udp_range_num; ++ unsigned short int port_start; ++ unsigned short int port_end; ++}CNS3XXXUdpRangeEtypeControl; ++ ++typedef struct ++{ ++ CNS3XXXIoctlCmd cmd; ++ unsigned char val; // 0: boradcast forward, 1: redirect to the CPU ++}CNS3XXXArpRequestControl; ++ ++typedef struct ++{ ++ CNS3XXXIoctlCmd cmd; ++ unsigned char which_port; // 0, 1, 2, 3 (port 0 extra dma) ++ unsigned char band_width; ++ unsigned char base_rate; ++ ++}CNS3XXXRateLimitEntry; // for ioctl arl ... ++ ++typedef struct ++{ ++ CNS3XXXIoctlCmd cmd; ++ unsigned char which_port; // 0, 1, 2, 3 (port 0 extra dma) ++ unsigned char sch_mode; ++ unsigned char q0_w; ++ unsigned char q1_w; ++ unsigned char q2_w; ++ unsigned char q3_w; ++}CNS3XXXQueueWeightEntry; // for ioctl arl ... ++ ++typedef struct ++{ ++ CNS3XXXIoctlCmd cmd; ++ unsigned int val; ++ unsigned char tc; // 0-3 ++ unsigned char gyr; // 0 (green), 1(yellow), 2(red) ++}CNS3XXXSARLEntry; // for ioctl arl ... ++ ++typedef struct ++{ ++ CNS3XXXIoctlCmd cmd; ++ unsigned char port; // 0, 1, 2, 3 (cpu port) ++ unsigned char fc_en; // 0(rx/tx disable), 1(rx enable), 2(tx enable), 3(rx/tx enable) ++}CNS3XXXFCEntry; // for ioctl arl ... ++ ++typedef struct ++{ ++ CNS3XXXIoctlCmd cmd; ++ unsigned char enable; // enable: 1 -> IVL, enable: 0 -> SVL ++}CNS3XXXIVLEntry; // for ioctl arl ... ++ ++typedef struct ++{ ++ CNS3XXXIoctlCmd cmd; ++ unsigned char wan_port; ++}CNS3XXXWANPortEntry; // for ioctl arl ... ++ ++typedef struct ++{ ++ CNS3XXXIoctlCmd cmd; ++ unsigned char which_port; ++ unsigned int pvid; ++}CNS3XXXPVIDEntry; // for ioctl arl ... ++ ++typedef struct ++{ ++ CNS3XXXIoctlCmd cmd; ++ unsigned char qa; // queue allocate ++}CNS3XXXQAEntry; // for ioctl arl ... ++ ++typedef struct ++{ ++ CNS3XXXIoctlCmd cmd; ++ unsigned char max_len; // maximum frame length ++}CNS3XXXMaxLenEntry; // for ioctl arl ... ++ ++typedef struct ++{ ++ CNS3XXXIoctlCmd cmd; ++ u8 page; ++ u8 offset; ++ u32 u32_val; ++ u16 u16_val; ++ u8 u8_val; ++ u8 data_len; ++ ++}CNS3XXXBCM53115M; ++ ++typedef struct ++{ ++ CNS3XXXIoctlCmd cmd; ++ u32 mib[52]; ++ u16 mib_len; ++}CNS3XXXMIBCounter; ++ ++#if 0 ++typedef struct ++{ ++ CNS3XXXIoctlCmd cmd; ++ TXRing *tx_ring; ++ RXRing *rx_ring; ++}CNS3XXXRingStatus; ++#endif ++ ++ ++#endif +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -133,6 +133,10 @@ + + #include "net-sysfs.h" + ++#if defined (CONFIG_CNS3XXX_SPPE) ++#include ++#endif ++ + /* Instead of increasing this, you should create a hash table. */ + #define MAX_GRO_SKBS 8 + +@@ -1944,6 +1948,197 @@ int weight_p __read_mostly = 64; + + DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; + ++#if defined (CONFIG_CNS3XXX_SPPE) ++static struct net_device *tun_netdev = NULL; ++ ++int sppe_pci_fp(struct sk_buff *skb) ++{ ++ SPPE_PARAM param; ++ struct iphdr *iph; ++#if defined (CONFIG_IPV6) ++ struct ipv6hdr *ipv6h; ++#endif ++ struct tcphdr *th; ++ struct udphdr *uh; ++ int pci_dev_index; ++ ++ if (!sppe_hook_ready) { ++ goto NOT_IN_FP; ++ } ++ ++ if (!sppe_pci_fp_ready) { ++ goto NOT_IN_FP; ++ } ++ ++ /* check device packet comes from, is a registed device? */ ++ memset(¶m, 0, sizeof(SPPE_PARAM)); ++ param.cmd = SPPE_CMD_PCI_FP_DEV; ++ param.op = SPPE_OP_GET; ++ param.data.sppe_pci_fp_dev.dev = skb->dev; ++ sppe_pci_fp_hook(¶m); ++ ++ pci_dev_index = param.data.sppe_pci_fp_dev.index; ++ ++ if ((-1) == pci_dev_index) { ++ goto NOT_IN_FP; ++ } ++ ++ if (!tun_netdev) { ++ tun_netdev = dev_get_by_name(&init_net, "fp"); ++ ++ if (!tun_netdev) { ++ goto NOT_IN_FP; ++ } ++ } ++ ++ /* check PPE status */ ++ memset(¶m, 0, sizeof(SPPE_PARAM)); ++ param.cmd = SPPE_CMD_ENABLE; ++ param.op = SPPE_OP_GET; ++ ++ if (sppe_func_hook(¶m)) { ++ printk("<0><%s> fail to get PPE status!!\n", __FUNCTION__); ++ goto NOT_IN_FP; ++ } ++ ++ if (!param.data.sppe_enable) { ++ goto NOT_IN_FP; ++ } ++ ++ memset(¶m, 0, sizeof(SPPE_PARAM)); ++ ++ switch (htons(skb->protocol)) { ++ case ETH_P_IP: ++ iph = (struct iphdr *)skb->data; ++ ++ if (5 != iph->ihl) { goto NOT_IN_FP; } ++ ++ if (iph->frag_off & 0x20) { goto NOT_IN_FP; } ++ ++ param.cmd = SPPE_CMD_FLOW_NAT_IPV4; ++ param.op = SPPE_OP_GET; ++ ++ param.data.flow_nat_ipv4.sip = ntohl(iph->saddr); ++ param.data.flow_nat_ipv4.dip = ntohl(iph->daddr); ++ ++ switch (iph->protocol) { ++ case IPPROTO_TCP: ++ th = (struct tcphdr *) ((int *)iph + 5); /* IP header length is 20 */ ++ ++ if ((th->syn) || (th->fin) || (th->rst)) { goto NOT_IN_FP; } ++ ++ param.data.flow_nat_ipv4.l4_prot = SPPE_PROT_TCP; ++ param.data.flow_nat_ipv4.l4.port.src = ntohs(th->source); ++ param.data.flow_nat_ipv4.l4.port.dst = ntohs(th->dest); ++ break; ++ case IPPROTO_UDP: ++ uh = (struct udphdr *) ((int *)iph + 5); /* IP header length is 20 */ ++ param.data.flow_nat_ipv4.l4_prot = SPPE_PROT_UDP; ++ param.data.flow_nat_ipv4.l4.port.src = ntohs(uh->source); ++ param.data.flow_nat_ipv4.l4.port.dst = ntohs(uh->dest); ++ break; ++ default: ++ goto NOT_IN_FP; ++ } ++ ++ if (SPPE_RESULT_SUCCESS != sppe_func_hook(¶m)) { ++ goto NOT_IN_FP; ++ } else { ++ struct ethhdr *eth; ++ ++ eth = (struct ethhdr *)skb->mac_header; ++ ++ memset(¶m, 0, sizeof(SPPE_PARAM)); ++ param.cmd = SPPE_CMD_ARP; ++ param.op = SPPE_OP_SET; ++ param.data.sppe_arp.s = 1; ++ param.data.sppe_arp.ip[0] = iph->saddr; ++ param.data.sppe_arp.mac[0] = eth->h_source[0]; ++ param.data.sppe_arp.mac[1] = eth->h_source[1]; ++ param.data.sppe_arp.mac[2] = eth->h_source[2]; ++ param.data.sppe_arp.mac[3] = eth->h_source[3]; ++ param.data.sppe_arp.mac[4] = eth->h_source[4]; ++ param.data.sppe_arp.mac[5] = eth->h_source[5]; ++ param.data.sppe_arp.unused_1 = pci_dev_index; ++ ++ if (SPPE_RESULT_SUCCESS != sppe_func_hook(¶m)) { ++ printk("add ARP fail\n"); ++ #if 0 ++ } else { ++ param.data.sppe_arp.unused_1 = 0xf; ++ param.op = SPPE_OP_GET; ++ if (SPPE_RESULT_SUCCESS != sppe_func_hook(¶m)) { ++ printk("read ARP fail\n"); ++ } else { ++ printk("param.data.sppe_arp.unused_1 %d\n", param.data.sppe_arp.unused_1); ++ } ++ #endif ++ } ++ } ++ break; /* case ETH_P_IP: */ ++#if defined (CONFIG_IPV6) ++ case ETH_P_IPV6: ++ ipv6h = (struct ipv6hdr *)skb->data; ++ switch (ipv6h->nexthdr) { ++ case IPPROTO_TCP: ++ th = (struct tcphdr *) ((int *)ipv6h + 10); /* IPv6 header length is 40 bytes */ ++ ++ if ((th->syn) || (th->fin) || (th->rst)) { goto NOT_IN_FP; } ++ ++ param.data.flow_route_ipv6.l4_prot = SPPE_PROT_TCP; ++ param.data.flow_route_ipv6.l4.port.src = ntohs(th->source); ++ param.data.flow_route_ipv6.l4.port.dst = ntohs(th->dest); ++ param.data.flow_route_ipv6.l4_prot = SPPE_PROT_TCP; ++ break; ++ case IPPROTO_UDP: ++ uh = (struct udphdr *) ((int *)ipv6h + 10); /* IPv6 header length is 40 byte */ ++ param.data.flow_route_ipv6.l4_prot = SPPE_PROT_UDP; ++ param.data.flow_route_ipv6.l4.port.src = ntohs(uh->source); ++ param.data.flow_route_ipv6.l4.port.dst = ntohs(uh->dest); ++ break; ++ default: ++ goto NOT_IN_FP; ++ } ++ ++ param.data.flow_route_ipv6.sip[0] = ntohl(ipv6h->saddr.s6_addr32[0]); ++ param.data.flow_route_ipv6.sip[1] = ntohl(ipv6h->saddr.s6_addr32[1]); ++ param.data.flow_route_ipv6.sip[2] = ntohl(ipv6h->saddr.s6_addr32[2]); ++ param.data.flow_route_ipv6.sip[3] = ntohl(ipv6h->saddr.s6_addr32[3]); ++ param.data.flow_route_ipv6.dip[0] = ntohl(ipv6h->daddr.s6_addr32[0]); ++ param.data.flow_route_ipv6.dip[1] = ntohl(ipv6h->daddr.s6_addr32[1]); ++ param.data.flow_route_ipv6.dip[2] = ntohl(ipv6h->daddr.s6_addr32[2]); ++ param.data.flow_route_ipv6.dip[3] = ntohl(ipv6h->daddr.s6_addr32[3]); ++ ++ param.cmd = SPPE_CMD_FLOW_ROUTE_IPV6; ++ param.op = SPPE_OP_GET; ++ ++ if (SPPE_RESULT_SUCCESS != sppe_func_hook(¶m)) { ++ goto NOT_IN_FP; ++ } ++ ++ break; /* case ETH_P_IPV6: */ ++#endif ++ case ETH_P_PPP_SES: ++ break; ++ default: /* unsupport protocol */ ++ goto NOT_IN_FP; ++ } ++ /* Update counter */ ++ skb->dev = tun_netdev; ++ skb->ip_summed = CHECKSUM_NONE; ++ skb_push(skb, ETH_HLEN); ++ ++ dev_queue_xmit(skb); ++ ++return 0; ++ ++NOT_IN_FP: ++ return (-1); ++} ++#endif ++ ++ ++ + + /** + * netif_rx - post buffer to the network code +@@ -1965,6 +2160,12 @@ int netif_rx(struct sk_buff *skb) + struct softnet_data *queue; + unsigned long flags; + ++#if defined (CONFIG_CNS3XXX_SPPE) ++ if (0 == sppe_pci_fp(skb)) { ++ return NET_RX_SUCCESS; ++ } ++#endif ++ + /* if netpoll wants it, pretend we never saw it */ + if (netpoll_rx(skb)) + return NET_RX_DROP; +@@ -2259,6 +2460,12 @@ int netif_receive_skb(struct sk_buff *sk + if (!skb->tstamp.tv64) + net_timestamp(skb); + ++#if defined (CONFIG_CNS3XXX_SPPE) ++ if (0 == sppe_pci_fp(skb)) { ++ return NET_RX_SUCCESS; ++ } ++#endif ++ + if (skb->vlan_tci && vlan_hwaccel_do_receive(skb)) + return NET_RX_SUCCESS; + +--- a/net/netfilter/nf_conntrack_core.c ++++ b/net/netfilter/nf_conntrack_core.c +@@ -42,6 +42,9 @@ + #include + #include + #include ++#if defined (CONFIG_CNS3XXX_SPPE) ++#include ++#endif + + #define NF_CONNTRACK_VERSION "0.5.0" + +@@ -275,6 +278,92 @@ void nf_ct_insert_dying_list(struct nf_c + } + EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list); + ++#if defined (CONFIG_CNS3XXX_SPPE) ++static int sppe_flow_del(struct nf_conn *ct) ++{ ++ if (sppe_hook_ready) { ++ SPPE_PARAM param; ++ ++ struct nf_conntrack_tuple *orig, *reply; ++ ++ orig = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; ++ reply = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; ++ ++ if (PF_INET == orig->src.l3num) { ++ param.cmd = SPPE_CMD_FLOW_NAT_IPV4; ++ } else if (PF_INET6 == orig->src.l3num) { ++ param.cmd = SPPE_CMD_FLOW_ROUTE_IPV6; ++ } else { ++ goto SPPE_FLOW_DEL_FINI; ++ } ++ ++ if (IPPROTO_TCP == orig->dst.protonum) { ++ param.data.flow_nat_ipv4.l4_prot = SPPE_PROT_TCP; ++ } else if (IPPROTO_UDP == orig->dst.protonum) { ++ param.data.flow_nat_ipv4.l4_prot = SPPE_PROT_UDP; ++ } else if (IPPROTO_GRE == orig->dst.protonum) { ++ param.data.flow_nat_ipv4.l4_prot = SPPE_PROT_PPTP_GRE; ++ } else { ++ goto SPPE_FLOW_DEL_FINI; ++ } ++ ++ param.op = SPPE_OP_DELETE_OUTDATED; ++ ++ param.data.flow_nat_ipv4.fw = 0; ++ if (SPPE_CMD_FLOW_ROUTE_IPV6 == param.cmd) { ++ param.data.flow_route_ipv6.sip[0] = htonl(orig->src.u3.ip6[0]); ++ param.data.flow_route_ipv6.sip[1] = htonl(orig->src.u3.ip6[1]); ++ param.data.flow_route_ipv6.sip[2] = htonl(orig->src.u3.ip6[2]); ++ param.data.flow_route_ipv6.sip[3] = htonl(orig->src.u3.ip6[3]); ++ param.data.flow_route_ipv6.dip[0] = htonl(orig->dst.u3.ip6[0]); ++ param.data.flow_route_ipv6.dip[1] = htonl(orig->dst.u3.ip6[1]); ++ param.data.flow_route_ipv6.dip[2] = htonl(orig->dst.u3.ip6[2]); ++ param.data.flow_route_ipv6.dip[3] = htonl(orig->dst.u3.ip6[3]); ++ param.data.flow_route_ipv6.l4.port.src = htons(orig->src.u.tcp.port); ++ param.data.flow_route_ipv6.l4.port.dst = htons(orig->dst.u.tcp.port); ++ } else { ++ param.data.flow_nat_ipv4.sip = htonl(orig->src.u3.ip); ++ param.data.flow_nat_ipv4.dip = htonl(orig->dst.u3.ip); ++ param.data.flow_nat_ipv4.l4.port.src = htons(orig->src.u.tcp.port); ++ param.data.flow_nat_ipv4.l4.port.dst = htons(orig->dst.u.tcp.port); ++ } ++ ++ if (SPPE_RESULT_FAIL == sppe_func_hook(¶m)) { ++ return (-1); ++ } ++ ++ param.data.flow_nat_ipv4.fw = 1; ++ ++ if (SPPE_CMD_FLOW_ROUTE_IPV6 == param.cmd) { ++ param.data.flow_route_ipv6.sip[0] = htonl(reply->src.u3.ip6[0]); ++ param.data.flow_route_ipv6.sip[1] = htonl(reply->src.u3.ip6[1]); ++ param.data.flow_route_ipv6.sip[2] = htonl(reply->src.u3.ip6[2]); ++ param.data.flow_route_ipv6.sip[3] = htonl(reply->src.u3.ip6[3]); ++ param.data.flow_route_ipv6.dip[0] = htonl(reply->dst.u3.ip6[0]); ++ param.data.flow_route_ipv6.dip[1] = htonl(reply->dst.u3.ip6[1]); ++ param.data.flow_route_ipv6.dip[2] = htonl(reply->dst.u3.ip6[2]); ++ param.data.flow_route_ipv6.dip[3] = htonl(reply->dst.u3.ip6[3]); ++ ++ param.data.flow_route_ipv6.l4.port.src = htons(reply->src.u.tcp.port); ++ param.data.flow_route_ipv6.l4.port.dst = htons(reply->dst.u.tcp.port); ++ } else { ++ param.data.flow_nat_ipv4.sip = htonl(reply->src.u3.ip); ++ param.data.flow_nat_ipv4.dip = htonl(reply->dst.u3.ip); ++ param.data.flow_nat_ipv4.l4.port.src = htons(reply->src.u.tcp.port); ++ param.data.flow_nat_ipv4.l4.port.dst = htons(reply->dst.u.tcp.port); ++ } ++ ++ if (SPPE_RESULT_FAIL == sppe_func_hook(¶m)) { ++ return (-1); ++ } ++ } ++ ++SPPE_FLOW_DEL_FINI: ++ return 0; ++} ++#endif ++ ++ + static void death_by_timeout(unsigned long ul_conntrack) + { + struct nf_conn *ct = (void *)ul_conntrack; +@@ -289,6 +378,16 @@ static void death_by_timeout(unsigned lo + set_bit(IPS_DYING_BIT, &ct->status); + nf_ct_delete_from_lists(ct); + nf_ct_put(ct); ++ ++#if defined (CONFIG_CNS3XXX_SPPE) ++ if (sppe_flow_del(ct)) { ++ #if 0 ++ ct->timeout.expires = jiffies + (120*HZ); ++ add_timer(&ct->timeout); ++ #endif ++ } ++#endif ++ + } + + /* +--- a/net/netfilter/nf_conntrack_proto_gre.c ++++ b/net/netfilter/nf_conntrack_proto_gre.c +@@ -40,6 +40,10 @@ + #include + #include + ++#if defined (CONFIG_CNS3XXX_SPPE) ++#include ++#endif ++ + #define GRE_TIMEOUT (30 * HZ) + #define GRE_STREAM_TIMEOUT (180 * HZ) + +@@ -226,6 +230,57 @@ static int gre_print_conntrack(struct se + (ct->proto.gre.stream_timeout / HZ)); + } + ++#if defined (CONFIG_CNS3XXX_SPPE) ++static int sppe_gre_flow_add(struct nf_conn *ct) ++{ ++ SPPE_PARAM param; ++ struct nf_conntrack_tuple *orig, *reply; ++ ++ if (0 == sppe_hook_ready) { ++ return 0; ++ } ++ ++ memset(¶m, 0, sizeof(SPPE_PARAM)); ++ ++ orig = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; ++ reply = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; ++ ++ param.cmd = SPPE_CMD_FLOW_NAT_IPV4; ++ param.op = SPPE_OP_SET; ++ ++ param.data.flow_nat_ipv4.fw = 0; ++ param.data.flow_nat_ipv4.sip = htonl(orig->src.u3.ip); ++ param.data.flow_nat_ipv4.dip = htonl(orig->dst.u3.ip); ++ ++ param.data.flow_nat_ipv4.l4_prot = SPPE_PROT_PPTP_GRE; ++ param.data.flow_nat_ipv4.l4.gre.call_id = htons(orig->dst.u.gre.key); ++ ++ param.data.flow_nat_ipv4.nat_ip = htonl(reply->dst.u3.ip); ++ param.data.flow_nat_ipv4.l4.gre.nat_call_id = htons(reply->src.u.gre.key); ++ ++ if (sppe_func_hook(¶m)) { ++ printk("<0><%s> fail to add IPv4 from-LAN flow!!\n", __FUNCTION__); ++ } ++ ++ param.data.flow_nat_ipv4.fw = 1; ++ ++ param.data.flow_nat_ipv4.sip = htonl(reply->src.u3.ip); ++ param.data.flow_nat_ipv4.dip = htonl(reply->dst.u3.ip); ++ param.data.flow_nat_ipv4.l4.gre.call_id = htons(reply->dst.u.gre.key); ++ ++ param.data.flow_nat_ipv4.nat_ip = htonl(orig->src.u3.ip); ++ param.data.flow_nat_ipv4.l4.gre.nat_call_id = htons(orig->src.u.gre.key); ++ ++ if (sppe_func_hook(¶m)) { ++ printk("<0><%s> fail to add IPv4 from-WAN flow!!\n", __FUNCTION__); ++ } ++ ++ return 0; ++} ++#endif ++ ++ ++ + /* Returns verdict for packet, and may modify conntrack */ + static int gre_packet(struct nf_conn *ct, + const struct sk_buff *skb, +@@ -242,6 +297,10 @@ static int gre_packet(struct nf_conn *ct + /* Also, more likely to be important, and not a probe. */ + set_bit(IPS_ASSURED_BIT, &ct->status); + nf_conntrack_event_cache(IPCT_STATUS, ct); ++#if defined (CONFIG_CNS3XXX_SPPE) ++ sppe_gre_flow_add(ct); ++#endif ++ + } else + nf_ct_refresh_acct(ct, ctinfo, skb, + ct->proto.gre.timeout); +--- a/net/netfilter/nf_conntrack_proto_tcp.c ++++ b/net/netfilter/nf_conntrack_proto_tcp.c +@@ -29,6 +29,10 @@ + #include + #include + ++#if defined (CONFIG_CNS3XXX_SPPE) ++#include ++#endif ++ + /* "Be conservative in what you do, + be liberal in what you accept from others." + If it's non-zero, we mark only out of window RST segments as INVALID. */ +@@ -814,6 +818,141 @@ static int tcp_error(struct net *net, + return NF_ACCEPT; + } + ++#if defined (CONFIG_CNS3XXX_SPPE) ++static int sppe_tcp_flow_add_ipv4(struct nf_conn *ct) ++{ ++ SPPE_PARAM param; ++ struct nf_conntrack_tuple *orig, *reply; ++ ++ orig = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; ++ reply = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; ++ ++#if defined (CONFIG_NF_CONNTRACK_PPTP) ++ if (1723 == htons(orig->dst.u.tcp.port)) { ++ /* PPTP Control Protocol, PPTP GRE tunneling need this kind of packet */ ++ return 0; ++ } ++#endif ++#if defined (CONFIG_NF_CONNTRACK_FTP) ++ if (21 == htons(orig->dst.u.tcp.port)) { ++ /* PPTP Control Protocol, PPTP GRE tunneling need this kind of packet */ ++ return 0; ++ } ++#endif ++ ++ memset(¶m, 0, sizeof(SPPE_PARAM)); ++ ++ param.cmd = SPPE_CMD_FLOW_NAT_IPV4; ++ param.op = SPPE_OP_SET; ++ ++ param.data.flow_nat_ipv4.fw = 0; ++ param.data.flow_nat_ipv4.sip = htonl(orig->src.u3.ip); ++ param.data.flow_nat_ipv4.dip = htonl(orig->dst.u3.ip); ++ ++ param.data.flow_nat_ipv4.l4_prot = SPPE_PROT_TCP; ++ param.data.flow_nat_ipv4.l4.port.src = htons(orig->src.u.tcp.port); ++ param.data.flow_nat_ipv4.l4.port.dst = htons(orig->dst.u.tcp.port); ++ ++ param.data.flow_nat_ipv4.nat_ip = htonl(reply->dst.u3.ip); ++ param.data.flow_nat_ipv4.nat_port = htons(reply->dst.u.tcp.port); ++ param.data.flow_nat_ipv4.max_len = 0x3; ++ ++ if (sppe_func_hook(¶m)) { ++ printk("<0><%s> fail to add IPv4 from-LAN flow!!\n", __FUNCTION__); ++ } ++ ++ param.data.flow_nat_ipv4.fw = 1; ++ param.data.flow_nat_ipv4.sip = htonl(reply->src.u3.ip); ++ param.data.flow_nat_ipv4.dip = htonl(reply->dst.u3.ip); ++ param.data.flow_nat_ipv4.l4.port.src = htons(reply->src.u.tcp.port); ++ param.data.flow_nat_ipv4.l4.port.dst = htons(reply->dst.u.tcp.port); ++ ++ param.data.flow_nat_ipv4.nat_ip = htonl(orig->src.u3.ip); ++ param.data.flow_nat_ipv4.nat_port = htons(orig->src.u.tcp.port); ++ ++ if (sppe_func_hook(¶m)) { ++ printk("<0><%s> fail to add IPv4 from-WAN flow!!\n", __FUNCTION__); ++ } ++ ++ return 0; ++} ++ ++static int sppe_tcp_flow_add_ipv6(struct nf_conn *ct) ++{ ++ SPPE_PARAM param; ++ struct nf_conntrack_tuple *orig, *reply; ++ ++ orig = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; ++ reply = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; ++ ++ if (1723 == htons(orig->dst.u.tcp.port)) { ++ /* PPTP Control Protocol, PPTP GRE tunneling need this kind of packet */ ++ return 0; ++ } ++ ++ memset(¶m, 0, sizeof(SPPE_PARAM)); ++ ++ param.cmd = SPPE_CMD_FLOW_ROUTE_IPV6; ++ param.op = SPPE_OP_SET; ++ ++ /* from-LAN flow */ ++ param.data.flow_route_ipv6.fw = 0; ++ param.data.flow_route_ipv6.sip[0] = htonl(orig->src.u3.ip6[0]); ++ param.data.flow_route_ipv6.sip[1] = htonl(orig->src.u3.ip6[1]); ++ param.data.flow_route_ipv6.sip[2] = htonl(orig->src.u3.ip6[2]); ++ param.data.flow_route_ipv6.sip[3] = htonl(orig->src.u3.ip6[3]); ++ param.data.flow_route_ipv6.dip[0] = htonl(orig->dst.u3.ip6[0]); ++ param.data.flow_route_ipv6.dip[1] = htonl(orig->dst.u3.ip6[1]); ++ param.data.flow_route_ipv6.dip[2] = htonl(orig->dst.u3.ip6[2]); ++ param.data.flow_route_ipv6.dip[3] = htonl(orig->dst.u3.ip6[3]); ++ param.data.flow_route_ipv6.l4_prot = SPPE_PROT_TCP; ++ param.data.flow_route_ipv6.l4.port.src = htons(orig->src.u.tcp.port); ++ param.data.flow_route_ipv6.l4.port.dst = htons(orig->dst.u.tcp.port); ++ param.data.flow_route_ipv6.max_len = 0x3; ++ ++ if (sppe_func_hook(¶m)) { ++ printk("<0><%s> fail to add IPv6 from-LAN flow!!\n", __FUNCTION__); ++ } ++ ++ /* from-WAN flow */ ++ param.data.flow_route_ipv6.fw = 1; ++ param.data.flow_route_ipv6.sip[0] = htonl(reply->src.u3.ip6[0]); ++ param.data.flow_route_ipv6.sip[1] = htonl(reply->src.u3.ip6[1]); ++ param.data.flow_route_ipv6.sip[2] = htonl(reply->src.u3.ip6[2]); ++ param.data.flow_route_ipv6.sip[3] = htonl(reply->src.u3.ip6[3]); ++ param.data.flow_route_ipv6.dip[0] = htonl(reply->dst.u3.ip6[0]); ++ param.data.flow_route_ipv6.dip[1] = htonl(reply->dst.u3.ip6[1]); ++ param.data.flow_route_ipv6.dip[2] = htonl(reply->dst.u3.ip6[2]); ++ param.data.flow_route_ipv6.dip[3] = htonl(reply->dst.u3.ip6[3]); ++ param.data.flow_route_ipv6.l4.port.src = htons(reply->src.u.tcp.port); ++ param.data.flow_route_ipv6.l4.port.dst = htons(reply->dst.u.tcp.port); ++ ++ if (sppe_func_hook(¶m)) { ++ printk("<0><%s> fail to add IPv6 from-LAN flow!!\n", __FUNCTION__); ++ } ++ ++ return 0; ++} ++ ++static int sppe_tcp_flow_add(struct nf_conn *ct) ++{ ++ if (0 == sppe_hook_ready) { ++ return 0; ++ } ++ ++ if (AF_INET == ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num) { ++ sppe_tcp_flow_add_ipv4(ct); ++ return 0; ++ } else if (AF_INET6 == ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num) { ++ sppe_tcp_flow_add_ipv6(ct); ++ return 0; ++ } ++ ++ /* return fail */ ++ return (-1); ++} ++#endif ++ + /* Returns verdict for packet, or -1 for invalid. */ + static int tcp_packet(struct nf_conn *ct, + const struct sk_buff *skb, +@@ -961,11 +1100,18 @@ static int tcp_packet(struct nf_conn *ct + break; + } + ++#if defined (CONFIG_CNS3XXX_SPPE) ++ if(!(th->rst == 1 || th->fin == 1)) { ++#endif + if (!tcp_in_window(ct, &ct->proto.tcp, dir, index, + skb, dataoff, th, pf)) { + spin_unlock_bh(&ct->lock); + return -NF_ACCEPT; + } ++#if defined (CONFIG_CNS3XXX_SPPE) ++ } ++#endif ++ + in_window: + /* From now on we have got in-window packets */ + ct->proto.tcp.last_index = index; +@@ -1015,6 +1161,10 @@ static int tcp_packet(struct nf_conn *ct + connection. */ + set_bit(IPS_ASSURED_BIT, &ct->status); + nf_conntrack_event_cache(IPCT_STATUS, ct); ++#if defined (CONFIG_CNS3XXX_SPPE) ++ /* Add SPPE hardware flow */ ++ sppe_tcp_flow_add(ct); ++#endif + } + nf_ct_refresh_acct(ct, ctinfo, skb, timeout); + +--- a/net/netfilter/nf_conntrack_proto_udp.c ++++ b/net/netfilter/nf_conntrack_proto_udp.c +@@ -24,6 +24,9 @@ + #include + #include + #include ++#if defined (CONFIG_CNS3XXX_SPPE) ++#include ++#endif + + static unsigned int nf_ct_udp_timeout __read_mostly = 30*HZ; + static unsigned int nf_ct_udp_timeout_stream __read_mostly = 180*HZ; +@@ -63,6 +66,122 @@ static int udp_print_tuple(struct seq_fi + ntohs(tuple->dst.u.udp.port)); + } + ++#if defined (CONFIG_CNS3XXX_SPPE) ++static int sppe_udp_flow_add_ipv4(struct nf_conn *ct) ++{ ++ SPPE_PARAM param; ++ struct nf_conntrack_tuple *orig, *reply; ++ ++ orig = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; ++ reply = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; ++ ++ memset(¶m, 0, sizeof(SPPE_PARAM)); ++ ++ param.cmd = SPPE_CMD_FLOW_NAT_IPV4; ++ param.op = SPPE_OP_SET; ++ ++ param.data.flow_nat_ipv4.fw = 0; ++ param.data.flow_nat_ipv4.sip = htonl(orig->src.u3.ip); ++ param.data.flow_nat_ipv4.dip = htonl(orig->dst.u3.ip); ++ param.data.flow_nat_ipv4.l4_prot = SPPE_PROT_UDP; ++ ++ param.data.flow_nat_ipv4.l4.port.src = htons(orig->src.u.tcp.port); ++ param.data.flow_nat_ipv4.l4.port.dst = htons(orig->dst.u.tcp.port); ++ ++ param.data.flow_nat_ipv4.nat_ip = htonl(reply->dst.u3.ip); ++ param.data.flow_nat_ipv4.nat_port = htons(reply->dst.u.tcp.port); ++ ++ if (sppe_func_hook(¶m)) { ++ printk("<0><%s> fail to add IPv4 UDP from-LAN flow!!\n", __FUNCTION__); ++ } ++ param.data.flow_nat_ipv4.fw = 1; ++ param.data.flow_nat_ipv4.sip = htonl(reply->src.u3.ip); ++ param.data.flow_nat_ipv4.dip = htonl(reply->dst.u3.ip); ++ ++ param.data.flow_nat_ipv4.l4.port.src = htons(reply->src.u.tcp.port); ++ param.data.flow_nat_ipv4.l4.port.dst = htons(reply->dst.u.tcp.port); ++ ++ param.data.flow_nat_ipv4.nat_ip = htonl(orig->src.u3.ip); ++ param.data.flow_nat_ipv4.nat_port = htons(orig->src.u.tcp.port); ++ ++ if (sppe_func_hook(¶m)) { ++ printk("<0><%s> fail to add IPv4 from-WAN flow!!\n", __FUNCTION__); ++ } ++ ++ return 0; ++} ++ ++static int sppe_udp_flow_add_ipv6(struct nf_conn *ct) ++{ ++ SPPE_PARAM param; ++ struct nf_conntrack_tuple *orig, *reply; ++ ++ orig = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; ++ reply = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; ++ ++ memset(¶m, 0, sizeof(SPPE_PARAM)); ++ ++ param.cmd = SPPE_CMD_FLOW_ROUTE_IPV6; ++ param.op = SPPE_OP_SET; ++ ++ /* from-LAN flow */ ++ param.data.flow_route_ipv6.fw = 0; ++ param.data.flow_route_ipv6.sip[0] = htonl(orig->src.u3.ip6[0]); ++ param.data.flow_route_ipv6.sip[1] = htonl(orig->src.u3.ip6[1]); ++ param.data.flow_route_ipv6.sip[2] = htonl(orig->src.u3.ip6[2]); ++ param.data.flow_route_ipv6.sip[3] = htonl(orig->src.u3.ip6[3]); ++ param.data.flow_route_ipv6.dip[0] = htonl(orig->dst.u3.ip6[0]); ++ param.data.flow_route_ipv6.dip[1] = htonl(orig->dst.u3.ip6[1]); ++ param.data.flow_route_ipv6.dip[2] = htonl(orig->dst.u3.ip6[2]); ++ param.data.flow_route_ipv6.dip[3] = htonl(orig->dst.u3.ip6[3]); ++ param.data.flow_route_ipv6.l4_prot = SPPE_PROT_UDP; ++ param.data.flow_route_ipv6.l4.port.src = htons(orig->src.u.udp.port); ++ param.data.flow_route_ipv6.l4.port.dst = htons(orig->dst.u.udp.port); ++ ++ if (sppe_func_hook(¶m)) { ++ printk("<0><%s> fail to add IPv6 from-LAN flow!!\n", __FUNCTION__); ++ } ++ ++ /* from-WAN flow */ ++ param.data.flow_route_ipv6.fw = 1; ++ param.data.flow_route_ipv6.sip[0] = htonl(reply->src.u3.ip6[0]); ++ param.data.flow_route_ipv6.sip[1] = htonl(reply->src.u3.ip6[1]); ++ param.data.flow_route_ipv6.sip[2] = htonl(reply->src.u3.ip6[2]); ++ param.data.flow_route_ipv6.sip[3] = htonl(reply->src.u3.ip6[3]); ++ param.data.flow_route_ipv6.dip[0] = htonl(reply->dst.u3.ip6[0]); ++ param.data.flow_route_ipv6.dip[1] = htonl(reply->dst.u3.ip6[1]); ++ param.data.flow_route_ipv6.dip[2] = htonl(reply->dst.u3.ip6[2]); ++ param.data.flow_route_ipv6.dip[3] = htonl(reply->dst.u3.ip6[3]); ++ param.data.flow_route_ipv6.l4.port.src = htons(reply->src.u.udp.port); ++ param.data.flow_route_ipv6.l4.port.dst = htons(reply->dst.u.udp.port); ++ ++ if (sppe_func_hook(¶m)) { ++ printk("<0><%s> fail to add IPv6 from-LAN flow!!\n", __FUNCTION__); ++ } ++ ++ return 0; ++} ++ ++static int sppe_udp_flow_add(struct nf_conn *ct) ++{ ++ if (0 == sppe_hook_ready) { ++ return 0; ++ } ++ ++ if (AF_INET == ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num) { ++ sppe_udp_flow_add_ipv4(ct); ++ return 0; ++ } else if (AF_INET6 == ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num) { ++ sppe_udp_flow_add_ipv6(ct); ++ return 0; ++ } ++ ++ /* return fail */ ++ return (-1); ++} ++#endif ++ ++ + /* Returns verdict for packet, and may modify conntracktype */ + static int udp_packet(struct nf_conn *ct, + const struct sk_buff *skb, +@@ -77,7 +196,15 @@ static int udp_packet(struct nf_conn *ct + nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udp_timeout_stream); + /* Also, more likely to be important, and not a probe */ + if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) ++#if defined (CONFIG_CNS3XXX_SPPE) ++ { ++#endif + nf_conntrack_event_cache(IPCT_STATUS, ct); ++#if defined (CONFIG_CNS3XXX_SPPE) ++ /* Add SPPE hardware flow */ ++ sppe_udp_flow_add(ct); ++ } ++#endif + } else + nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udp_timeout); + diff --git a/target/linux/cns3xxx/patches-2.6.31/206-cns3xxx_raid_support.patch b/target/linux/cns3xxx/patches-2.6.31/206-cns3xxx_raid_support.patch new file mode 100644 index 0000000000..d6a8bef580 --- /dev/null +++ b/target/linux/cns3xxx/patches-2.6.31/206-cns3xxx_raid_support.patch @@ -0,0 +1,438 @@ +--- a/crypto/xor.c ++++ b/crypto/xor.c +@@ -25,6 +25,26 @@ + /* The xor routines to use. */ + static struct xor_block_template *active_template; + ++#ifdef CONFIG_CNS3XXX_RAID ++extern void do_cns_rdma_xorgen(unsigned int src_no, unsigned int bytes, ++ void **bh_ptr, void *dst_ptr); ++/** ++ * xor_blocks - one pass xor ++ * @src_count: source count ++ * @bytes: length in bytes ++ * @dest: dest ++ * @srcs: srcs ++ * ++ * Desc: ++ * 1. dest = xor(srcs[0...src_count-1]) within one calc ++ * 2. don't care if dest also be placed in srcs list or not. ++ */ ++void xor_blocks(unsigned int src_count, unsigned int bytes, void *dest, ++ void **srcs) ++{ ++ do_cns_rdma_xorgen(src_count, bytes, srcs, dest); ++} ++#else + void + xor_blocks(unsigned int src_count, unsigned int bytes, void *dest, void **srcs) + { +@@ -51,6 +71,7 @@ xor_blocks(unsigned int src_count, unsig + p4 = (unsigned long *) srcs[3]; + active_template->do_5(bytes, dest, p1, p2, p3, p4); + } ++#endif /* CONFIG_CNS3XXX_RAID */ + EXPORT_SYMBOL(xor_blocks); + + /* Set of all registered templates. */ +@@ -95,7 +116,11 @@ do_xor_speed(struct xor_block_template * + speed / 1000, speed % 1000); + } + ++#ifdef CONFIG_CNS3XXX_RAID ++int ++#else + static int __init ++#endif /* CONFIG_CNS3XXX_RAID */ + calibrate_xor_blocks(void) + { + void *b1, *b2; +@@ -139,7 +164,10 @@ calibrate_xor_blocks(void) + if (f->speed > fastest->speed) + fastest = f; + } +- ++#ifdef CONFIG_CNS3XXX_RAID ++ /* preferred */ ++ fastest = template_list; ++#endif /* CONFIG_CNS3XXX_RAID */ + printk(KERN_INFO "xor: using function: %s (%d.%03d MB/sec)\n", + fastest->name, fastest->speed / 1000, fastest->speed % 1000); + +@@ -151,10 +179,20 @@ calibrate_xor_blocks(void) + return 0; + } + +-static __exit void xor_exit(void) { } ++#ifndef CONFIG_CNS3XXX_RAID ++static __exit void xor_exit(void) ++{ ++} ++#endif /* ! CONFIG_CNS3XXX_RAID */ + + MODULE_LICENSE("GPL"); + ++#ifdef CONFIG_CNS3XXX_RAID ++/* ++ * Calibrate in R5 init. ++ */ ++#else + /* when built-in xor.o must initialize before drivers/md/md.o */ + core_initcall(calibrate_xor_blocks); + module_exit(xor_exit); ++#endif /* ! CONFIG_CNS3XXX_RAID */ +--- a/drivers/md/Makefile ++++ b/drivers/md/Makefile +@@ -17,7 +17,7 @@ raid6_pq-y += raid6algos.o raid6recov.o + raid6int8.o raid6int16.o raid6int32.o \ + raid6altivec1.o raid6altivec2.o raid6altivec4.o \ + raid6altivec8.o \ +- raid6mmx.o raid6sse1.o raid6sse2.o ++ raid6mmx.o raid6sse1.o raid6sse2.o raid6cns.o + hostprogs-y += mktables + + # Note: link order is important. All raid personalities +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -1817,11 +1817,30 @@ static void compute_block_2(struct strip + compute_parity6(sh, UPDATE_PARITY); + return; + } else { ++#ifdef CONFIG_CNS3XXX_RAID ++ void *ptrs[disks]; ++ ++ count = 0; ++ i = d0_idx; ++ do { ++ ptrs[count++] = page_address(sh->dev[i].page); ++ i = raid6_next_disk(i, disks); ++ if (i != dd_idx1 && i != dd_idx2 && ++ !test_bit(R5_UPTODATE, &sh->dev[i].flags)) ++ printk ++ ("compute_2 with missing block %d/%d\n", ++ count, i); ++ } while (i != d0_idx); ++ ++ raid6_dataq_recov(disks, STRIPE_SIZE, faila, ptrs); ++#else ++ + /* We're missing D+Q; recompute D from P */ + compute_block_1(sh, ((dd_idx1 == sh->qd_idx) ? + dd_idx2 : dd_idx1), + 0); + compute_parity6(sh, UPDATE_PARITY); /* Is this necessary? */ ++#endif /* CONFIG_CNS3XXX_RAID */ + return; + } + } +@@ -5412,8 +5431,21 @@ static struct mdk_personality raid4_pers + .quiesce = raid5_quiesce, + }; + ++#ifdef CONFIG_CNS3XXX_RAID ++extern int calibrate_xor_blocks(void); ++#endif /* CONFIG_CNS3XXX_RAID */ ++ + static int __init raid5_init(void) + { ++ ++#ifdef CONFIG_CNS3XXX_RAID ++ /* Just execute calibrate xor blocks */ ++ int e; ++ e = calibrate_xor_blocks(); ++ if (e) ++ return e; ++#endif /* CONFIG_CNS3XXX_RAID */ ++ + register_md_personality(&raid6_personality); + register_md_personality(&raid5_personality); + register_md_personality(&raid4_personality); +--- a/drivers/md/raid6algos.c ++++ b/drivers/md/raid6algos.c +@@ -49,6 +49,9 @@ extern const struct raid6_calls raid6_al + extern const struct raid6_calls raid6_altivec2; + extern const struct raid6_calls raid6_altivec4; + extern const struct raid6_calls raid6_altivec8; ++#ifdef CONFIG_CNS3XXX_RAID ++extern const struct raid6_calls raid6_cns_raid; ++#endif /* CONFIG_CNS3XXX_RAID */ + + const struct raid6_calls * const raid6_algos[] = { + &raid6_intx1, +@@ -78,6 +81,11 @@ const struct raid6_calls * const raid6_a + &raid6_altivec4, + &raid6_altivec8, + #endif ++#ifdef CONFIG_CNS3XXX_RAID ++ /* CNS3000 HW RAID acceleration */ ++ &raid6_cns_raid, ++#endif /* CONFIG_CNS3XXX_RAID */ ++ + NULL + }; + +@@ -125,7 +133,9 @@ int __init raid6_select_algo(void) + if ( !(*algo)->valid || (*algo)->valid() ) { + perf = 0; + ++#ifndef CONFIG_CNS3XXX_RAID + preempt_disable(); ++#endif + j0 = jiffies; + while ( (j1 = jiffies) == j0 ) + cpu_relax(); +@@ -134,7 +144,9 @@ int __init raid6_select_algo(void) + (*algo)->gen_syndrome(disks, PAGE_SIZE, dptrs); + perf++; + } ++#ifndef CONFIG_CNS3XXX_RAID + preempt_enable(); ++#endif + + if ( (*algo)->prefer > bestprefer || + ((*algo)->prefer == bestprefer && +--- /dev/null ++++ b/drivers/md/raid6cns.c +@@ -0,0 +1,38 @@ ++/* ++ * raid6cns.c ++ * ++ * CNS3xxx xor & gen_syndrome functions ++ * ++ */ ++ ++#ifdef CONFIG_CNS3XXX_RAID ++ ++#include ++ ++extern void do_cns_rdma_gfgen(unsigned int src_no, unsigned int bytes, void **bh_ptr, ++ void *p_dst, void *q_dst); ++ ++/** ++ * raid6_cnsraid_gen_syndrome - CNSRAID Syndrome Generate ++ * ++ * @disks: raid disks ++ * @bytes: length ++ * @ptrs: already arranged stripe ptrs, ++ * disk0=[0], diskNNN=[disks-3], ++ * P/Q=[z0+1] & [z0+2], or, [disks-2], [disks-1] ++ */ ++static void raid6_cnsraid_gen_syndrome(int disks, size_t bytes, void **ptrs) ++{ ++ do_cns_rdma_gfgen(disks - 2, bytes, ptrs, ptrs[disks-2], ptrs[disks-1]); ++} ++ ++const struct raid6_calls raid6_cns_raid = { ++ raid6_cnsraid_gen_syndrome, /* callback */ ++ NULL, /* always valid */ ++ "CNS-RAID", /* name */ ++ 1 /* preferred: revise it to "0" to compare/compete with others algos */ ++}; ++ ++EXPORT_SYMBOL(raid6_cns_raid); ++ ++#endif /* CONFIG_CNS3XXX_RAID */ +--- a/drivers/md/raid6recov.c ++++ b/drivers/md/raid6recov.c +@@ -20,6 +20,136 @@ + + #include + ++#ifdef CONFIG_CNS3XXX_RAID ++#define R6_RECOV_PD 1 ++#define R6_RECOV_DD 2 ++#define R6_RECOV_DQ 3 ++extern void do_cns_rdma_gfgen_pd_dd_dq(unsigned int src_no, unsigned int bytes, ++ void **bh_ptr, void *w1_dst, ++ void *w2_dst, int pd_dd_qd, ++ unsigned int w1_idx, unsigned int w2_idx, ++ unsigned int *src_idx); ++ ++/** ++ * @disks: nr_disks ++ * @bytes: len ++ * @faila: 1st failed DD ++ * @ptrs: ptrs by order {d0, d1, ..., da, ..., dn, P, Q} ++ * ++ * Desc: ++ * new_read_ptrs = {d0, d1, ... dn, Q} ++ * dd1 = faila ++ * p_dst = P ++ */ ++void raid6_datap_recov(int disks, size_t bytes, int faila, void **ptrs) ++{ ++ int cnt = 0; ++ int count = 0; ++ void *p_dst, *q; ++ void *dd1_dst; ++ void *new_read_ptrs[disks - 2]; ++ unsigned int read_idx[disks - 2]; ++ ++ q = ptrs[disks - 1]; ++ p_dst = ptrs[disks - 2]; ++ dd1_dst = ptrs[faila]; ++ ++ while (cnt < disks) { ++ if (cnt != faila && cnt != disks - 2) { ++ new_read_ptrs[count] = ptrs[cnt]; ++ read_idx[count] = cnt; ++ count++; ++ } ++ cnt++; ++ } ++ ++ do_cns_rdma_gfgen_pd_dd_dq(disks - 2, bytes, ++ new_read_ptrs, p_dst, dd1_dst, ++ R6_RECOV_PD, disks - 1, faila + 1, read_idx); ++} ++ ++/** ++ * @disks: nr_disks ++ * @bytes: len ++ * @faila: 1st failed DD ++ * @failb: 2nd failed DD ++ * @ptrs: ptrs by order {d0, d1, ..., da, ..., db, ..., dn, P, Q} ++ * ++ * Desc: ++ * new_read_ptrs = {d0, d1, ... dn, P, Q} ++ * dd1_dst = faila ++ * dd2_dst = failb ++ */ ++void raid6_2data_recov(int disks, size_t bytes, int faila, int failb, ++ void **ptrs) ++{ ++ ++ int cnt = 0; ++ int count = 0; ++ void *p, *q; ++ void *dd1_dst, *dd2_dst; ++ void *new_read_ptrs[disks - 2]; ++ unsigned int read_idx[disks - 2]; ++ ++ q = ptrs[disks - 1]; ++ p = ptrs[disks - 2]; ++ dd1_dst = ptrs[faila]; ++ dd2_dst = ptrs[failb]; ++ ++ while (cnt < disks) { ++ if (cnt != faila && cnt != failb) { ++ new_read_ptrs[count] = ptrs[cnt]; ++ read_idx[count] = cnt; ++ count++; ++ } ++ cnt++; ++ } ++ ++ do_cns_rdma_gfgen_pd_dd_dq(disks - 2, bytes, ++ new_read_ptrs, dd1_dst, dd2_dst, ++ R6_RECOV_DD, faila + 1, failb + 1, read_idx); ++} ++ ++/** ++ * @disks: nr_disks ++ * @bytes: len ++ * @faila: 1st failed DD ++ * @ptrs: ptrs by order {d0, d1, ..., da, ..., dn, P, Q} ++ * ++ * Desc: ++ * new_read_ptrs = {d0, d1, ... dn, P} ++ * dd1 = faila ++ * q_dst = Q ++ */ ++void raid6_dataq_recov(int disks, size_t bytes, int faila, void **ptrs) ++{ ++ int cnt = 0; ++ int count = 0; ++ void *q_dst, *p; ++ void *dd1_dst; ++ void *new_read_ptrs[disks - 2]; ++ unsigned int read_idx[disks - 2]; ++ ++ p = ptrs[disks - 2]; ++ q_dst = ptrs[disks - 1]; ++ dd1_dst = ptrs[faila]; ++ ++ while (cnt < disks) { ++ if (cnt != faila && cnt != disks - 1) { ++ new_read_ptrs[count] = ptrs[cnt]; ++ read_idx[count] = cnt; ++ count++; ++ } ++ cnt++; ++ } ++ ++ do_cns_rdma_gfgen_pd_dd_dq(disks - 2, bytes, ++ new_read_ptrs, dd1_dst, q_dst, ++ R6_RECOV_DQ, faila + 1, disks, read_idx); ++} ++ ++#else /* CONFIG_CNS3XXX_RAID ++ + /* Recover two failed data blocks. */ + void raid6_2data_recov(int disks, size_t bytes, int faila, int failb, + void **ptrs) +@@ -96,6 +226,7 @@ void raid6_datap_recov(int disks, size_t + } + } + EXPORT_SYMBOL_GPL(raid6_datap_recov); ++#endif /* CONFIG_CNS3XXX_RAID */ + + #ifndef __KERNEL__ + /* Testing only */ +--- a/include/linux/raid/pq.h ++++ b/include/linux/raid/pq.h +@@ -100,6 +100,9 @@ void raid6_2data_recov(int disks, size_t + void raid6_datap_recov(int disks, size_t bytes, int faila, void **ptrs); + void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, + void **ptrs); ++#ifdef CONFIG_CNS3XXX_RAID ++void raid6_dataq_recov(int disks, size_t bytes, int faila, void **ptrs); ++#endif /* CONFIG_CNS3XXX_RAID */ + + /* Some definitions to allow code to be compiled for testing in userspace */ + #ifndef __KERNEL__ +--- a/include/linux/raid/xor.h ++++ b/include/linux/raid/xor.h +@@ -1,7 +1,11 @@ + #ifndef _XOR_H + #define _XOR_H + ++#ifdef CONFIG_CNS3XXX_RAID ++#define MAX_XOR_BLOCKS 32 ++#else + #define MAX_XOR_BLOCKS 4 ++#endif /* CONFIG_CNS3XXX_RAID */ + + extern void xor_blocks(unsigned int count, unsigned int bytes, + void *dest, void **srcs); +--- a/mm/mempool.c ++++ b/mm/mempool.c +@@ -250,6 +250,28 @@ repeat_alloc: + } + EXPORT_SYMBOL(mempool_alloc); + ++#ifdef CONFIG_CNS3XXX_RAID ++/** ++ * acs_mempool_alloc - allocate an element from a specific memory pool ++ * @pool: pointer to the memory pool which was allocated via ++ * mempool_create(). ++ * ++ * this function differs from mempool_alloc by directly allocating an element ++ * from @pool without calling @pool->alloc(). ++ */ ++void *acs_mempool_alloc(mempool_t * pool) ++{ ++ unsigned long flags; ++ void *element = NULL; ++ ++ spin_lock_irqsave(&pool->lock, flags); ++ if (likely(pool->curr_nr)) ++ element = remove_element(pool); ++ spin_unlock_irqrestore(&pool->lock, flags); ++ return element; ++} ++#endif /* CONFIG_CNS3XXX_RAID */ ++ + /** + * mempool_free - return an element to the pool. + * @element: pool element pointer. diff --git a/target/linux/cns3xxx/patches-2.6.31/207-cns3xxx_spi_support.patch b/target/linux/cns3xxx/patches-2.6.31/207-cns3xxx_spi_support.patch new file mode 100644 index 0000000000..5556010b06 --- /dev/null +++ b/target/linux/cns3xxx/patches-2.6.31/207-cns3xxx_spi_support.patch @@ -0,0 +1,1071 @@ +--- a/drivers/spi/Kconfig ++++ b/drivers/spi/Kconfig +@@ -236,6 +236,39 @@ config SPI_XILINX + See the "OPB Serial Peripheral Interface (SPI) (v1.00e)" + Product Specification document (DS464) for hardware details. + ++config SPI_CNS3XXX ++ tristate "CNS3XXX SPI controller" ++ depends on ARCH_CNS3XXX && SPI_MASTER && EXPERIMENTAL ++ select SPI_BITBANG ++ help ++ This enables using the CNS3XXX SPI controller in master ++ mode. ++ ++config SPI_CNS3XXX_DEBUG ++ boolean "Debug support for CNS3XXX SPI drivers" ++ depends on SPI_CNS3XXX ++ help ++ Say "yes" to enable debug messaging ++ ++config SPI_CNS3XXX_2IOREAD ++ bool "CNS3XXX SPI 2 IO Read Mode" ++ depends on SPI_CNS3XXX ++ help ++ This enables 2 IO Read Mode ++ ++config SPI_CNS3XXX_USEDMA ++ bool "CNS3XXX SPI DMA Mode" ++ depends on SPI_CNS3XXX ++ select CNS3XXX_DMAC ++ help ++ This enables DMA Mode ++ ++config SPI_CNS3XXX_USEDMA_DEBUG ++ boolean "Debug support for CNS3XXX SPI DMA drivers" ++ depends on SPI_CNS3XXX_USEDMA ++ help ++ Say "yes" to enable debug messaging ++ + # + # Add new SPI master controllers in alphabetical order above this line + # +--- a/drivers/spi/Makefile ++++ b/drivers/spi/Makefile +@@ -32,6 +32,7 @@ obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24x + obj-$(CONFIG_SPI_TXX9) += spi_txx9.o + obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o + obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.o ++obj-$(CONFIG_SPI_CNS3XXX) += spi_cns3xxx.o + # ... add above this line ... + + # SPI protocol drivers (device/link on bus) +--- a/drivers/spi/spi_bitbang.c ++++ b/drivers/spi/spi_bitbang.c +@@ -334,6 +334,14 @@ static void bitbang_work(struct work_str + */ + if (!m->is_dma_mapped) + t->rx_dma = t->tx_dma = 0; ++ ++#ifdef CONFIG_ARCH_CNS3XXX ++ if (t->transfer_list.next == &m->transfers) { ++ t->last_in_message_list = 1; ++ } else { ++ t->last_in_message_list = 0; ++ } ++#endif + status = bitbang->txrx_bufs(spi, t); + } + if (status > 0) +--- a/drivers/spi/spi.c ++++ b/drivers/spi/spi.c +@@ -769,6 +769,89 @@ int spi_write_then_read(struct spi_devic + } + EXPORT_SYMBOL_GPL(spi_write_then_read); + ++#ifdef CONFIG_ARCH_CNS3XXX ++/** ++ * spi_write_read_sync - SPI synchronous write & read ++ * @spi: device with which data will be exchanged ++ * @txbuf: data to be written (need not be dma-safe) ++ * @n_tx: size of txbuf, in bytes ++ * @rxbuf: buffer into which data will be read ++ * @n_rx: size of rxbuf, in bytes (need not be dma-safe) ++ * ++ * This performs a half duplex MicroWire style transaction with the ++ * device, sending txbuf and then reading rxbuf. The return value ++ * is zero for success, else a negative errno status code. ++ * This call may only be used from a context that may sleep. ++ * ++ * Parameters to this routine are always copied using a small buffer; ++ * performance-sensitive or bulk transfer code should instead use ++ * spi_{async,sync}() calls with dma-safe buffers. ++ */ ++int spi_write_read_sync(struct spi_device *spi, ++ const u8 *txbuf, unsigned n_tx, ++ u8 *rxbuf, unsigned n_rx) ++{ ++ static DECLARE_MUTEX(lock); ++ ++ int status; ++ struct spi_message message; ++ struct spi_transfer x; ++ u8 *local_buf; ++ ++ /* Use preallocated DMA-safe buffer. We can't avoid copying here, ++ * (as a pure convenience thing), but we can keep heap costs ++ * out of the hot path ... ++ */ ++#if 0 ++ while (!str8131_spi_bus_idle()){ ++ printk("spi bus is not idle \n"); // do nothing ++ } ++ while (!str8131_spi_tx_buffer_empty()){ ++ printk("spi tx buffer is not empty \n"); // do nothing ++ } ++#endif ++ if ((n_tx + n_rx) > SPI_BUFSIZ) ++ return -EINVAL; ++ spi_message_init(&message); ++ memset(&x, 0, sizeof x); ++ x.len = n_tx; ++ spi_message_add_tail(&x, &message); ++ ++ /* ... unless someone else is using the pre-allocated buffer */ ++ if (down_trylock(&lock)) { ++ local_buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); ++ if (!local_buf) ++ return -ENOMEM; ++ } else ++ local_buf = buf; ++ ++ memcpy(local_buf, txbuf, n_tx); ++ x.tx_buf = local_buf; ++ x.rx_buf = local_buf + n_tx; ++ ++ /* do the i/o */ ++ status = spi_sync(spi, &message); ++ if (status == 0) { ++ memcpy(rxbuf, x.rx_buf, n_rx); ++ status = message.status; ++ } ++ ++ if (x.tx_buf == buf) ++ up(&lock); ++ else ++ kfree(local_buf); ++ ++ return status; ++} ++ ++EXPORT_SYMBOL_GPL(spi_write_read_sync); ++#endif /* CONFIG_ARCH_CNS3XXX */ ++ ++ ++ ++ ++ ++ + /*-------------------------------------------------------------------------*/ + + static int __init spi_init(void) +--- /dev/null ++++ b/drivers/spi/spi_cns3xxx.c +@@ -0,0 +1,878 @@ ++/******************************************************************************* ++ * ++ * CNS3XXX SPI controller driver (master mode only) ++ * ++ * Copyright (c) 2008 Cavium Networks ++ * ++ * This file is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, Version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or ++ * NONINFRINGEMENT. See the GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this file; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA or ++ * visit http://www.gnu.org/licenses/. ++ * ++ * This file may also be available under a different license from Cavium. ++ * Contact Cavium Networks for more information ++ * ++ ******************************************************************************/ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define LE8221_SPI_CS 1 ++#define SI3226_SPI_CS 1 ++ ++#define CNS3XXX_SPI_INTERRUPT ++#undef CNS3XXX_SPI_INTERRUPT /* Interrupt is not supported for D2 and SEN */ ++ ++/* ++ * define access macros ++ */ ++#define SPI_MEM_MAP_VALUE(reg_offset) (*((u32 volatile *)(CNS3XXX_SSP_BASE_VIRT + reg_offset))) ++ ++#define SPI_CONFIGURATION_REG SPI_MEM_MAP_VALUE(0x40) ++#define SPI_SERVICE_STATUS_REG SPI_MEM_MAP_VALUE(0x44) ++#define SPI_BIT_RATE_CONTROL_REG SPI_MEM_MAP_VALUE(0x48) ++#define SPI_TRANSMIT_CONTROL_REG SPI_MEM_MAP_VALUE(0x4C) ++#define SPI_TRANSMIT_BUFFER_REG SPI_MEM_MAP_VALUE(0x50) ++#define SPI_RECEIVE_CONTROL_REG SPI_MEM_MAP_VALUE(0x54) ++#define SPI_RECEIVE_BUFFER_REG SPI_MEM_MAP_VALUE(0x58) ++#define SPI_FIFO_TRANSMIT_CONFIG_REG SPI_MEM_MAP_VALUE(0x5C) ++#define SPI_FIFO_TRANSMIT_CONTROL_REG SPI_MEM_MAP_VALUE(0x60) ++#define SPI_FIFO_RECEIVE_CONFIG_REG SPI_MEM_MAP_VALUE(0x64) ++#define SPI_INTERRUPT_STATUS_REG SPI_MEM_MAP_VALUE(0x68) ++#define SPI_INTERRUPT_ENABLE_REG SPI_MEM_MAP_VALUE(0x6C) ++ ++#define SPI_TRANSMIT_BUFFER_REG_ADDR (CNS3XXX_SSP_BASE +0x50) ++#define SPI_RECEIVE_BUFFER_REG_ADDR (CNS3XXX_SSP_BASE +0x58) ++ ++/* Structure for SPI controller of CNS3XXX SOCs */ ++struct cns3xxx_spi { ++ /* bitbang has to be first */ ++ struct spi_bitbang bitbang; ++ struct completion done; ++ wait_queue_head_t wait; ++ ++ int len; ++ int count; ++ int last_in_message_list; ++ ++ /* data buffers */ ++ const unsigned char *tx; ++ unsigned char *rx; ++ ++ struct spi_master *master; ++ struct platform_device *pdev; ++ struct device *dev; ++}; ++ ++static inline u8 cns3xxx_spi_bus_idle(void) ++{ ++ return ((SPI_SERVICE_STATUS_REG & 0x1) ? 0 : 1); ++} ++ ++static inline u8 cns3xxx_spi_tx_buffer_empty(void) ++{ ++ return ((SPI_INTERRUPT_STATUS_REG & (0x1 << 3)) ? 1 : 0); ++} ++ ++static inline u8 cns3xxx_spi_rx_buffer_full(void) ++{ ++ return ((SPI_INTERRUPT_STATUS_REG & (0x1 << 2)) ? 1 : 0); ++} ++ ++u8 cns3xxx_spi_tx_rx(u8 tx_channel, u8 tx_eof, u32 tx_data, ++ u32 * rx_data) ++{ ++ u8 rx_channel; ++ u8 rx_eof; ++ ++ while (!cns3xxx_spi_bus_idle()) ; // do nothing ++ ++ while (!cns3xxx_spi_tx_buffer_empty()) ; // do nothing ++ ++ SPI_TRANSMIT_CONTROL_REG &= ~(0x7); ++ SPI_TRANSMIT_CONTROL_REG |= (tx_channel & 0x3) | ((tx_eof & 0x1) << 2); ++ ++ SPI_TRANSMIT_BUFFER_REG = tx_data; ++ ++ while (!cns3xxx_spi_rx_buffer_full()) ; // do nothing ++ ++ rx_channel = SPI_RECEIVE_CONTROL_REG & 0x3; ++ rx_eof = (SPI_RECEIVE_CONTROL_REG & (0x1 << 2)) ? 1 : 0; ++ ++ *rx_data = SPI_RECEIVE_BUFFER_REG; ++ ++ if ((tx_channel != rx_channel) || (tx_eof != rx_eof)) { ++ return 0; ++ } else { ++ return 1; ++ } ++} ++ ++u8 cns3xxx_spi_tx(u8 tx_channel, u8 tx_eof, u32 tx_data) ++{ ++ ++ while (!cns3xxx_spi_bus_idle()) ; // do nothing ++ ++ while (!cns3xxx_spi_tx_buffer_empty()) ; // do nothing ++ ++ SPI_TRANSMIT_CONTROL_REG &= ~(0x7); ++ SPI_TRANSMIT_CONTROL_REG |= (tx_channel & 0x3) | ((tx_eof & 0x1) << 2); ++ ++ SPI_TRANSMIT_BUFFER_REG = tx_data; ++ ++ return 1; ++} ++ ++ ++ ++#ifdef CONFIG_SPI_CNS3XXX_DEBUG ++static void spi_slave_probe(void) ++{ ++ int i; ++ u32 rx_data1, rx_data2, rx_data3; ++ ++ cns3xxx_spi_tx_rx(0, 0, 0x9f, &rx_data1); ++ cns3xxx_spi_tx_rx(0, 0, 0xff, &rx_data1); ++ cns3xxx_spi_tx_rx(0, 0, 0xff, &rx_data2); ++ cns3xxx_spi_tx_rx(0, 1, 0xff, &rx_data3); ++ printk("[SPI_CNS3XXX_DEBUG] manufacturer: %x\n", rx_data1); ++ printk("[SPI_CNS3XXX_DEBUG] device: %x\n", ++ ((rx_data2 & 0xff) << 8) | (u16) (rx_data3 & 0xff)); ++ ++ cns3xxx_spi_tx_rx(0, 0, 0x03, &rx_data1); ++ cns3xxx_spi_tx_rx(0, 0, 0x00, &rx_data1); ++ cns3xxx_spi_tx_rx(0, 0, 0x00, &rx_data1); ++ cns3xxx_spi_tx_rx(0, 0, 0x00, &rx_data1); ++ for (i = 0; i < 15; i++) { ++ cns3xxx_spi_tx_rx(0, 0, 0xff, &rx_data1); ++ printk("[SPI_CNS3XXX_DEBUG] flash[%02d]:0x%02x\n", i, ++ rx_data1 & 0xff); ++ } ++ cns3xxx_spi_tx_rx(0, 1, 0xff, &rx_data1); ++ printk("[SPI_CNS3XXX_DEBUG] flash[%02d]:0x%02x\n", i, rx_data1 & 0xff); ++} ++#else ++#define spi_slave_probe() do{}while(0) ++#endif ++ ++static inline struct cns3xxx_spi *to_hw(struct spi_device *sdev) ++{ ++ return spi_master_get_devdata(sdev->master); ++} ++ ++static int cns3xxx_spi_setup_transfer(struct spi_device *spi, ++ struct spi_transfer *t) ++{ ++ return 0; ++} ++ ++static void cns3xxx_spi_chipselect(struct spi_device *spi, int value) ++{ ++ unsigned int spi_config; ++ ++ switch (value) { ++ case BITBANG_CS_INACTIVE: ++ break; ++ ++ case BITBANG_CS_ACTIVE: ++ spi_config = SPI_CONFIGURATION_REG; ++ ++ if (spi->mode & SPI_CPHA) ++ spi_config |= (0x1 << 13); ++ else ++ spi_config &= ~(0x1 << 13); ++ ++ if (spi->mode & SPI_CPOL) ++ spi_config |= (0x1 << 14); ++ else ++ spi_config &= ~(0x1 << 14); ++ ++ /* write new configration */ ++ SPI_CONFIGURATION_REG = spi_config; ++ ++ SPI_TRANSMIT_CONTROL_REG &= ~(0x7); ++ SPI_TRANSMIT_CONTROL_REG |= (spi->chip_select & 0x3); ++ ++#if defined(CONFIG_LE8221_CONTROL) ++ if (spi->chip_select == LE8221_SPI_CS) { ++ SPI_CONFIGURATION_REG |= (0x1 << 9); ++ } ++#elif defined (CONFIG_SI3226_CONTROL_API) ++ if (spi->chip_select == SI3226_SPI_CS) { ++ SPI_CONFIGURATION_REG &= ~(0x1 << 9); ++ } ++#endif ++ break; ++ } ++} ++ ++static int cns3xxx_spi_setup(struct spi_device *spi) ++{ ++ if (!spi->bits_per_word) ++ spi->bits_per_word = 8; ++ ++ return 0; ++} ++ ++#ifdef CONFIG_SPI_CNS3XXX_USEDMA ++ ++int cns3xxx_spi_dma_irq_handler(void *pdata) ++{ ++ ++ struct cns3xxx_spi *hw = pdata; ++ complete(&hw->done); ++ ++ return 0; ++} ++ ++static int cns3xxx_spi_dma_initialize(int *rxchan, int *txchan, int *rxevtno, ++ int *txevtno, void *handlerargs) ++{ ++ *rxchan = dmac_get_channel(cns3xxx_spi_dma_irq_handler, handlerargs); ++ if ((*rxchan) == -1) ++ goto fail1; ++ *txchan = dmac_get_channel(NULL, NULL); ++ if ((*txchan) == -1) ++ goto fail2; ++ *rxevtno = 9; ++ if (dmac_get_event(*rxchan, *rxevtno) == -1) ++ goto fail3; ++ *txevtno = 10; ++ if (dmac_get_event(*txchan, *txevtno) == -1) ++ goto fail4; ++ return 0; ++ ++fail4: ++ dmac_release_event(*rxchan, *rxevtno); ++fail3: ++ dmac_release_channel(*txchan); ++fail2: ++ dmac_release_channel(*rxchan); ++fail1: ++ return -1; ++} ++ ++static int cns3xxx_spi_start_dma(int rch, int tch, int rev, int tev, ++ struct spi_transfer *t, struct cns3xxx_spi *hw) ++{ ++ static void *dummy; ++ static dma_addr_t dummy_dma; ++ dma_addr_t rdma, tdma; ++ int rx_inc, tx_inc; ++ int lc0, totlps, lc1, rump; ++ u32 rx_data; ++ ++ if (!dummy) { ++ dummy = dma_alloc_coherent(NULL, 16, &dummy_dma, GFP_KERNEL); ++#ifdef CONFIG_SPI_CNS3XXX_DEBUG_DMA ++ printk("Allocated Memory for dummy buffer va:%p,pa:%x\n", dummy, ++ dummy_dma); ++#endif ++ } ++ if (!dummy) { ++ return -1; ++ } ++ *((uint32_t *) dummy) = 0xffffffff; ++ ++ (t->tx_buf) ? (tdma = t->tx_dma, tx_inc = 1) : ++ (tdma = dummy_dma, tx_inc = 0); ++ (t->rx_buf) ? (rdma = t->rx_dma, rx_inc = 1) : ++ (rdma = dummy_dma, rx_inc = 0); ++ ++#ifdef CONFIG_SPI_CNS3XXX_DEBUG_DMA ++ printk("Here with tdma %x, rdma %x\n", tdma, rdma); ++#endif ++ ++ if(t->len < 3) { ++ if(t->len == 2){ ++ cns3xxx_spi_tx_rx(0,0,(t->tx_buf) ? hw->tx[0] : 0xff ,&rx_data); ++ if(!(t->tx_buf)) ++ hw->rx[0] = rx_data & 0xff; ++ } ++ cns3xxx_spi_dma_irq_handler(hw); ++ return 0; ++ } ++ ++ ++ totlps = t->len - 1 -1; ++ if (totlps > 0x100) { ++ lc0 = 0x100; ++ lc1 = totlps / lc0; ++ rump = totlps % lc0; ++ } else { ++ lc0 = totlps; ++ lc1 = 0; ++ rump = 0; ++ } ++ ++ if(t->tx_buf) { ++ cns3xxx_spi_tx(0,0,*((uint32_t *) t->tx_buf)); ++ tdma+=1; ++ } ++ else { ++ cns3xxx_spi_tx(0,0,0xff); ++ } ++ ++ //SPI_RECEIVE_BUFFER_REG; ++ { ++ DMAC_DMAMOV(tch, SAR, tdma); ++ DMAC_DMAMOV(tch, DAR, SPI_TRANSMIT_BUFFER_REG_ADDR); ++ DMAC_DMAMOV(tch, CCR, ++ dmac_create_ctrlval(tx_inc, 1, 1, 0, 1, 1, 0)); ++ //DMAC_WFE(tch, rev); ++ if (lc1) ++ DMAC_DMALP(tch, 1, lc1); ++ DMAC_DMALP(tch, 0, lc0); ++ DMAC_WFE(tch, rev); ++ DMAC_DMALDS(tch); ++ DMAC_DMASTS(tch); ++ DMAC_DMAWMB(tch); ++ DMAC_DMASEV(tch, tev); ++ DMAC_DMALPEND(tch, 0, ++ DMAWFE_INSTR_SIZE + DMASEV_INSTR_SIZE + ++ DMAWMB_INSTR_SIZE + DMAST_INSTR_SIZE + ++ DMALD_INSTR_SIZE, 1); ++ if (lc1) ++ DMAC_DMALPEND(tch, 1, ++ DMALP_INSTR_SIZE + DMALPEND_INSTR_SIZE + ++ DMAWFE_INSTR_SIZE + DMASEV_INSTR_SIZE + ++ DMAWMB_INSTR_SIZE + DMAST_INSTR_SIZE + ++ DMALD_INSTR_SIZE, 1); ++ ++ if (rump) { ++ DMAC_DMALP(tch, 0, rump); ++ DMAC_WFE(tch, rev); ++ DMAC_DMALDS(tch); ++ DMAC_DMASTS(tch); ++ DMAC_DMAWMB(tch); ++ DMAC_DMASEV(tch, tev); ++ DMAC_DMALPEND(tch, 0, ++ DMAWFE_INSTR_SIZE + DMASEV_INSTR_SIZE + ++ DMAWMB_INSTR_SIZE + DMAST_INSTR_SIZE + ++ DMALD_INSTR_SIZE, 1); ++ } ++ ++ ++ DMAC_DMAEND(tch); ++ DMAC_DMAGO(tch); ++ } ++ { ++ DMAC_DMAMOV(rch, SAR, SPI_RECEIVE_BUFFER_REG_ADDR); ++ DMAC_DMAMOV(rch, DAR, rdma); ++ DMAC_DMAMOV(rch, CCR, ++ dmac_create_ctrlval(0, 1, 1, rx_inc, 1, 1, 0)); ++ ++ if (lc1) ++ DMAC_DMALP(rch, 1, lc1); ++ DMAC_DMALP(rch, 0, lc0); ++ DMAC_DMAWFP(rch, DMAC_SPI_PERIPH_ID, PERIPHERAL); ++ DMAC_DMALDP(rch, DMAC_SPI_PERIPH_ID, 0); ++ DMAC_DMASTS(rch); ++ DMAC_DMAWMB(rch); ++ DMAC_DMASEV(rch, rev); ++ DMAC_WFE(rch, tev); ++ DMAC_DMALPEND(rch, 0, ++ DMAWFE_INSTR_SIZE + DMASEV_INSTR_SIZE + ++ DMAWMB_INSTR_SIZE + DMAST_INSTR_SIZE + ++ DMALDP_INSTR_SIZE + DMAWFP_INSTR_SIZE, 1); ++ if (lc1) ++ DMAC_DMALPEND(rch, 1, ++ DMAWFE_INSTR_SIZE + ++ DMASEV_INSTR_SIZE + DMAWMB_INSTR_SIZE + ++ DMAST_INSTR_SIZE + DMALDP_INSTR_SIZE + ++ DMAWFP_INSTR_SIZE + DMALP_INSTR_SIZE + ++ DMALPEND_INSTR_SIZE, 1); ++ ++ ++ if (rump) { ++ DMAC_DMALP(rch, 0, rump); ++ DMAC_DMAWFP(rch, DMAC_SPI_PERIPH_ID, PERIPHERAL); ++ DMAC_DMALDP(rch, DMAC_SPI_PERIPH_ID, 0); ++ DMAC_DMASTS(rch); ++ DMAC_DMAWMB(rch); ++ DMAC_DMASEV(rch, rev); ++ DMAC_WFE(rch, tev); ++ DMAC_DMALPEND(rch, 0, ++ DMAWFE_INSTR_SIZE + ++ DMASEV_INSTR_SIZE + DMAWMB_INSTR_SIZE + ++ DMAST_INSTR_SIZE + DMALDP_INSTR_SIZE + ++ DMAWFP_INSTR_SIZE, 1); ++ } ++ // extra RX ++ DMAC_DMAWFP(rch, DMAC_SPI_PERIPH_ID, PERIPHERAL); ++ DMAC_DMALDP(rch, DMAC_SPI_PERIPH_ID, 0); ++ DMAC_DMASTS(rch); ++ DMAC_DMAWMB(rch); ++ ++ DMAC_DMAFLUSHP(rch, DMAC_SPI_PERIPH_ID); ++ DMAC_DMASEV(rch, rch); // This will generate an interrupt ++ DMAC_DMAEND(rch); ++ DMAC_DMAGO(rch); ++ } ++ return 0; ++} ++ ++static void cns3xxx_spi_dma_uninitialize(int rch, int tch, int revt, int tevt) ++{ ++ dmac_release_event(rch, revt); ++ dmac_release_event(tch, tevt); ++ dmac_release_channel(rch); ++ dmac_release_channel(tch); ++ return; ++} ++ ++#endif /* CONFIG_SPI_CNS3XXX_USEDMA */ ++ ++static int cns3xxx_spi_txrx(struct spi_device *spi, struct spi_transfer *t) ++{ ++ struct cns3xxx_spi *hw = to_hw(spi); ++#ifdef CONFIG_SPI_CNS3XXX_USEDMA ++ int spi_rxchan, spi_txchan, spi_rxevt, spi_txevt; ++ int rx_data; ++#endif ++ dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n", t->tx_buf, t->rx_buf, ++ t->len); ++ ++ hw->tx = t->tx_buf; ++ hw->rx = t->rx_buf; ++ hw->len = t->len; ++ hw->count = 0; ++ hw->last_in_message_list = t->last_in_message_list; ++ ++#ifdef CONFIG_SPI_CNS3XXX_USEDMA ++ init_completion(&hw->done); ++ ++ if (cns3xxx_spi_dma_initialize ++ (&spi_rxchan, &spi_txchan, &spi_rxevt, &spi_txevt, hw)) { ++ dev_dbg(&spi->dev, "%s:%d Could not initialize DMA. \n", ++ __FUNCTION__, __LINE__); ++ return 0; ++ } ++ ++ if (t->tx_buf) ++ t->tx_dma = ++ dma_map_single(NULL, t->tx_buf, t->len, DMA_TO_DEVICE); ++ if (t->rx_buf) ++ t->rx_dma = ++ dma_map_single(NULL, t->rx_buf, t->len, DMA_FROM_DEVICE); ++ ++ if (cns3xxx_spi_start_dma ++ (spi_rxchan, spi_txchan, spi_rxevt, spi_txevt, t, hw)) { ++ dev_dbg(&spi->dev, "Could not start DMA. \n"); ++ if (t->tx_buf) ++ dma_unmap_single(NULL, t->tx_dma, t->len, ++ DMA_TO_DEVICE); ++ t->tx_dma = 0; ++ if (t->rx_buf) ++ dma_unmap_single(NULL, t->rx_dma, t->len, ++ DMA_FROM_DEVICE); ++ t->rx_dma = 0; ++ cns3xxx_spi_dma_uninitialize(spi_rxchan, spi_txchan, spi_rxevt, ++ spi_txevt); ++ return 0; ++ } ++ ++ wait_for_completion(&hw->done); ++ ++ dev_dbg(&spi->dev, "DMA reported completion of transfer of %d bytes\n", ++ t->len - 1); ++ ++ if (t->tx_buf) ++ dma_unmap_single(NULL, t->tx_dma, t->len, DMA_TO_DEVICE); ++ t->tx_dma = 0; ++ if (t->rx_buf) ++ dma_unmap_single(NULL, t->rx_dma, t->len, DMA_FROM_DEVICE); ++ t->rx_dma = 0; ++ cns3xxx_spi_dma_uninitialize(spi_rxchan, spi_txchan, spi_rxevt, ++ spi_txevt); ++ ++ if (t->last_in_message_list) ++ cns3xxx_spi_tx_rx(spi->chip_select, 1, ++ (hw->tx) ? hw->tx[hw->len - 1] : 0xff, ++ &rx_data); ++ else ++ cns3xxx_spi_tx_rx(spi->chip_select, 0, ++ (hw->tx) ? hw->tx[hw->len - 1] : 0xff, ++ &rx_data); ++ ++ if (hw->rx) ++ hw->rx[hw->len - 1] = rx_data & 0xff; ++ ++ return hw->len; ++ ++#else /* !CONFIG_SPI_CNS3XXX_USEDMA */ ++ ++#ifdef CNS3XXX_SPI_INTERRUPT ++ ++ init_completion(&hw->done); ++ ++ /* Effectively, we are enabling only the Receive Buffer Interrupt Enable */ ++ /* TX Buf Underrun and RX Buf Overrun are not to happen */ ++ SPI_INTERRUPT_ENABLE_REG = (0x1 << 2); ++// (0x0) | (0x1 << 2) | (0x0 << 3) | (0x1 << 6) | (0x1 << 7); ++ ++ /* Write data and wait for completion */ ++ SPI_TRANSMIT_CONTROL_REG &= ~(0x7); ++ SPI_TRANSMIT_CONTROL_REG |= (spi->chip_select & 0x3) | ++ ((((hw->last_in_message_list) && (hw->len == 1)) ? 0x1 : 0x0) << 2); ++ ++ SPI_TRANSMIT_BUFFER_REG = (hw->tx) ? hw->tx[hw->count] : 0xff; ++ ++ wait_for_completion(&hw->done); ++ ++ SPI_INTERRUPT_ENABLE_REG = 0; ++ ++ return hw->count; ++ ++#else /* !CNS3XXX_SPI_INTERRUPT */ ++ ++ init_completion(&hw->done); ++ ++ if (hw->tx) { ++ int i; ++ u32 rx_data; ++ for (i = 0; i < (hw->len - 1); i++) { ++ dev_dbg(&spi->dev, ++ "[SPI_CNS3XXX_DEBUG] hw->tx[%02d]: 0x%02x\n", i, ++ hw->tx[i]); ++ cns3xxx_spi_tx_rx(spi->chip_select, 0, hw->tx[i], ++ &rx_data); ++ if (hw->rx) { ++ hw->rx[i] = rx_data; ++ dev_dbg(&spi->dev, ++ "[SPI_CNS3XXX_DEBUG] hw->rx[%02d]: 0x%02x\n", ++ i, hw->rx[i]); ++ } ++ } ++ ++ if (t->last_in_message_list) { ++ cns3xxx_spi_tx_rx(spi->chip_select, 1, hw->tx[i], ++ &rx_data); ++ if (hw->rx) { ++ hw->rx[i] = rx_data; ++ dev_dbg(&spi->dev, ++ "[SPI_CNS3XXX_DEBUG] hw->rx[%02d]: 0x%02x\n", ++ i, hw->rx[i]); ++ } ++ } else { ++ cns3xxx_spi_tx_rx(spi->chip_select, 0, hw->tx[i], ++ &rx_data); ++ } ++ goto done; ++ } ++ ++ if (hw->rx) { ++ int i; ++ u32 rx_data; ++ for (i = 0; i < (hw->len - 1); i++) { ++ cns3xxx_spi_tx_rx(spi->chip_select, 0, 0xff, &rx_data); ++ hw->rx[i] = rx_data; ++ dev_dbg(&spi->dev, ++ "[SPI_CNS3XXX_DEBUG] hw->rx[%02d]: 0x%02x\n", i, ++ hw->rx[i]); ++ } ++ ++ if (t->last_in_message_list) { ++ cns3xxx_spi_tx_rx(spi->chip_select, 1, 0xff, &rx_data); ++ } else { ++ cns3xxx_spi_tx_rx(spi->chip_select, 0, 0xff, &rx_data); ++ } ++ hw->rx[i] = rx_data; ++ dev_dbg(&spi->dev, "[SPI_CNS3XXX_DEBUG] hw->rx[%02d]: 0x%02x\n", ++ i, hw->rx[i]); ++ } ++done: ++ return hw->len; ++ ++#endif /* CNS3XXX_SPI_INTERRUPT */ ++ ++#endif /* CONFIG_SPI_CNS3XXX_USEDMA */ ++} ++ ++#ifdef CNS3XXX_SPI_INTERRUPT ++/* Driver supports single master only. ++ * We have disabled fifo, so we wait for the receive buff full interrupt. ++ * Receive Buff overrun, transmit buff underrun are not to happen ++ */ ++static irqreturn_t cns3xxx_spi_irq(int irq, void *dev) ++{ ++ struct cns3xxx_spi *hw = dev; ++ uint32_t int_status; ++ uint8_t data; ++ unsigned int count = hw->count; ++ ++ /* Read the interrupt status and clear interrupt */ ++ int_status = SPI_INTERRUPT_STATUS_REG; ++ ++ if (!(int_status & (0x1 << 2))) { ++ printk("DEBUG THIS ! Unexpected interrupt (status = 0x%x)", int_status); ++ /* Clearing spurious interrupts */ ++ SPI_INTERRUPT_STATUS_REG = (0xF << 4); ++ goto irq_done; ++ } ++ ++ /* Read to clear */ ++ data = SPI_RECEIVE_BUFFER_REG & 0xff; ++ ++ if (hw->rx) ++ hw->rx[hw->count] = data; ++ ++ hw->count++; ++ hw->len--; ++ ++ if (hw->len) { ++ SPI_TRANSMIT_CONTROL_REG |= ++ ((((hw->last_in_message_list) && (hw->len == 1)) ? 0x1 : 0x0) << 2); ++ SPI_TRANSMIT_BUFFER_REG = (hw->tx) ? hw->tx[hw->count] : 0xff; ++ } else { ++ complete(&hw->done); ++ } ++ ++irq_done: ++ return IRQ_HANDLED; ++} ++#endif ++ ++static void __init cns3xxx_spi_initial(void) ++{ ++ ++ /* share pin config. */ ++#if 1 ++#if 0 ++ /* GPIOB18 is set to PCM by default */ ++ MISC_GPIOB_PIN_ENABLE_REG &= ~(MISC_GSW_P0_CRS_PIN); ++ gpio_direction_output(50, 1); ++#endif ++ PM_PLL_HM_PD_CTRL_REG &= ~(0x1 << 5); ++ HAL_MISC_ENABLE_SPI_PINS(); ++ HAL_MISC_ENABLE_PCM_PINS(); /* this just for PCM test */ ++ cns3xxx_pwr_clk_en(CNS3XXX_PWR_CLK_EN(SPI_PCM_I2C)); ++ cns3xxx_pwr_soft_rst(CNS3XXX_PWR_SOFTWARE_RST(SPI_PCM_I2C)); ++#endif ++ ++ SPI_CONFIGURATION_REG = (((0x0 & 0x3) << 0) | /* 8bits shift length */ ++ (0x0 << 9) | /* SPI mode */ ++ (0x0 << 10) | /* disable FIFO */ ++ (0x1 << 11) | /* SPI master mode */ ++ (0x0 << 12) | /* disable SPI loopback mode */ ++ (0x1 << 13) | /* clock phase */ ++ (0x1 << 14) | /* clock polarity */ ++ (0x0 << 24) | /* disable - SPI data swap */ ++#ifdef CONFIG_SPI_CNS3XXX_2IOREAD ++ (0x1 << 29) | /* enable - 2IO Read mode */ ++#else ++ (0x0 << 29) | /* disablea - 2IO Read mode */ ++#endif ++ (0x0 << 30) | /* disable - SPI high speed read for system boot up */ ++ (0x0 << 31)); /* disable - SPI */ ++ ++ /* Set SPI bit rate PCLK/2 */ ++ SPI_BIT_RATE_CONTROL_REG = 0x1; ++ ++ /* Set SPI Tx channel 0 */ ++ SPI_TRANSMIT_CONTROL_REG = 0x0; ++ ++ /* Set Tx FIFO Threshold, Tx FIFO has 2 words */ ++ SPI_FIFO_TRANSMIT_CONFIG_REG &= ~(0x03 << 4); ++ SPI_FIFO_TRANSMIT_CONFIG_REG |= ((0x0 & 0x03) << 4); ++ ++ /* Set Rx FIFO Threshold, Rx FIFO has 2 words */ ++ SPI_FIFO_RECEIVE_CONFIG_REG &= ~(0x03 << 4); ++ SPI_FIFO_RECEIVE_CONFIG_REG |= ((0x0 & 0x03) << 4); ++ ++ /* Disable all interrupt */ ++ SPI_INTERRUPT_ENABLE_REG = 0x0; ++ ++ /* Clear spurious interrupt sources */ ++ SPI_INTERRUPT_STATUS_REG = (0x0F << 4); ++ ++ /* Enable SPI */ ++ SPI_CONFIGURATION_REG |= (0x1 << 31); ++ ++ return; ++} ++ ++static int __init cns3xxx_spi_probe(struct platform_device *pdev) ++{ ++ struct spi_master *master; ++ struct cns3xxx_spi *hw; ++ int err = 0; ++ ++ printk("%s: setup CNS3XXX SPI Controller", __FUNCTION__); ++#ifdef CONFIG_SPI_CNS3XXX_USEDMA ++ printk(" w/ DMA \n"); ++#else ++#ifdef CNS3XXX_SPI_INTERRUPT ++ printk(" in Interrupt mode, w/o DMA \n"); ++#else ++ printk(" in polling mode, w/o DMA \n"); ++#endif ++#endif ++ ++ /* share pin config. */ ++// HAL_MISC_ENABLE_SPI_PINS(); ++ ++ /* Allocate master with space for cns3xxx_spi */ ++ master = spi_alloc_master(&pdev->dev, sizeof(struct cns3xxx_spi)); ++ if (master == NULL) { ++ dev_err(&pdev->dev, "No memory for spi_master\n"); ++ err = -ENOMEM; ++ goto err_nomem; ++ } ++ ++ hw = spi_master_get_devdata(master); ++ memset(hw, 0, sizeof(struct cns3xxx_spi)); ++ ++ hw->master = spi_master_get(master); ++ hw->dev = &pdev->dev; ++ ++ platform_set_drvdata(pdev, hw); ++ init_completion(&hw->done); ++ ++ /* setup the master state. */ ++ ++ master->num_chipselect = 4; ++ master->bus_num = 1; ++ ++ /* setup the state for the bitbang driver */ ++ ++ hw->bitbang.master = hw->master; ++ hw->bitbang.setup_transfer = cns3xxx_spi_setup_transfer; ++ hw->bitbang.chipselect = cns3xxx_spi_chipselect; ++ hw->bitbang.txrx_bufs = cns3xxx_spi_txrx; ++ hw->bitbang.master->setup = cns3xxx_spi_setup; ++ ++ dev_dbg(hw->dev, "bitbang at %p\n", &hw->bitbang); ++ ++#ifdef CNS3XXX_SPI_INTERRUPT ++ err = request_irq(IRQ_CNS3XXX_SPI, cns3xxx_spi_irq, IRQF_SHARED, "cns3xxx_spi", hw); ++ if (err) { ++ dev_err(&pdev->dev, "Cannot claim IRQ\n"); ++ goto err_no_irq; ++ } ++#endif ++ ++ /* SPI controller initializations */ ++ cns3xxx_spi_initial(); ++ ++ /* register SPI controller */ ++ ++ err = spi_bitbang_start(&hw->bitbang); ++ if (err) { ++ dev_err(&pdev->dev, "Failed to register SPI master\n"); ++ goto err_register; ++ } ++ ++ spi_slave_probe(); ++ ++ return 0; ++ ++err_register: ++#ifdef CNS3XXX_SPI_INTERRUPT ++err_no_irq: ++#endif ++ spi_master_put(hw->master);; ++ ++err_nomem: ++ return err; ++} ++ ++static int __devexit cns3xxx_spi_remove(struct platform_device *dev) ++{ ++ struct cns3xxx_spi *hw = platform_get_drvdata(dev); ++ ++ platform_set_drvdata(dev, NULL); ++ ++ spi_unregister_master(hw->master); ++ ++ //cns3xxx_spi_clk_disable(); ++ ++ spi_master_put(hw->master); ++ return 0; ++} ++ ++#ifdef CONFIG_PM ++ ++static int cns3xxx_spi_suspend(struct platform_device *pdev, pm_message_t msg) ++{ ++ struct cns3xxx_spi *hw = platform_get_drvdata(pdev); ++ ++ //cns3xxx_spi_clk_disable(); ++ return 0; ++} ++ ++static int cns3xxx_spi_resume(struct platform_device *pdev) ++{ ++ struct cns3xxx_spi *hw = platform_get_drvdata(pdev); ++ ++ //cns3xxx_spi_clk_enable() ++ return 0; ++} ++ ++#else ++#define cns3xxx_spi_suspend NULL ++#define cns3xxx_spi_resume NULL ++#endif ++ ++static struct platform_driver cns3xxx_spi_driver = { ++ .probe = cns3xxx_spi_probe, ++ .remove = __devexit_p(cns3xxx_spi_remove), ++ .suspend = cns3xxx_spi_suspend, ++ .resume = cns3xxx_spi_resume, ++ .driver = { ++ .name = "cns3xxx_spi", ++ .owner = THIS_MODULE, ++ }, ++}; ++ ++static int __init cns3xxx_spi_init(void) ++{ ++ return platform_driver_register(&cns3xxx_spi_driver); ++} ++ ++static void __exit cns3xxx_spi_exit(void) ++{ ++ platform_driver_unregister(&cns3xxx_spi_driver); ++} ++ ++module_init(cns3xxx_spi_init); ++module_exit(cns3xxx_spi_exit); ++ ++MODULE_AUTHOR("Cavium Networks"); ++MODULE_DESCRIPTION("CNS3XXX SPI Controller Driver"); ++MODULE_LICENSE("GPL"); ++MODULE_ALIAS("platform:cns3xxx_spi"); ++ ++EXPORT_SYMBOL_GPL(cns3xxx_spi_tx_rx); +--- a/include/linux/spi/spi.h ++++ b/include/linux/spi/spi.h +@@ -424,6 +424,12 @@ struct spi_transfer { + u16 delay_usecs; + u32 speed_hz; + ++#ifdef CONFIG_ARCH_CNS3XXX ++ unsigned last_in_message_list; ++#ifdef CONFIG_SPI_CNS3XXX_2IOREAD ++ u8 dio_read; ++#endif ++#endif + struct list_head transfer_list; + }; + +@@ -627,6 +633,13 @@ spi_read(struct spi_device *spi, u8 *buf + return spi_sync(spi, &m); + } + ++#ifdef CONFIG_ARCH_CNS3XXX ++extern int spi_write_read_sync(struct spi_device *spi, ++ const u8 *txbuf, unsigned n_tx, ++ u8 *rxbuf, unsigned n_rx); ++ ++#endif ++ + /* this copies txbuf and rxbuf data; for small transfers only! */ + extern int spi_write_then_read(struct spi_device *spi, + const u8 *txbuf, unsigned n_tx, diff --git a/target/linux/cns3xxx/patches-2.6.31/208-cns3xxx_usb_support.patch b/target/linux/cns3xxx/patches-2.6.31/208-cns3xxx_usb_support.patch new file mode 100644 index 0000000000..15efaca199 --- /dev/null +++ b/target/linux/cns3xxx/patches-2.6.31/208-cns3xxx_usb_support.patch @@ -0,0 +1,25625 @@ +--- a/drivers/usb/core/Kconfig ++++ b/drivers/usb/core/Kconfig +@@ -106,11 +106,11 @@ config USB_SUSPEND + + If you are unsure about this, say N here. + +-config USB_OTG +- bool +- depends on USB && EXPERIMENTAL +- select USB_SUSPEND +- default n ++#config USB_OTG ++# bool ++# depends on USB && EXPERIMENTAL ++# select USB_SUSPEND ++# default n + + + config USB_OTG_WHITELIST +--- a/drivers/usb/core/urb.c ++++ b/drivers/usb/core/urb.c +@@ -17,7 +17,11 @@ static void urb_destroy(struct kref *kre + + if (urb->transfer_flags & URB_FREE_BUFFER) + kfree(urb->transfer_buffer); +- ++ if(urb->aligned_transfer_buffer){ ++ kfree(urb->aligned_transfer_buffer); ++ urb->aligned_transfer_buffer=0; ++ urb->aligned_transfer_dma=0; ++ } + kfree(urb); + } + +@@ -91,6 +95,7 @@ void usb_free_urb(struct urb *urb) + { + if (urb) + kref_put(&urb->kref, urb_destroy); ++ + } + EXPORT_SYMBOL_GPL(usb_free_urb); + +--- a/drivers/usb/gadget/file_storage.c ++++ b/drivers/usb/gadget/file_storage.c +@@ -225,9 +225,9 @@ + * of the Gadget, USB Mass Storage, and SCSI protocols. + */ + +- +-/* #define VERBOSE_DEBUG */ +-/* #define DUMP_MSGS */ ++#define DEBUG ++#define VERBOSE_DEBUG ++#define DUMP_MSGS + + + #include +@@ -3086,7 +3086,9 @@ static int received_cbw(struct fsg_dev * + if (req->actual != USB_BULK_CB_WRAP_LEN || + cbw->Signature != cpu_to_le32( + USB_BULK_CB_SIG)) { +- DBG(fsg, "invalid CBW: len %u sig 0x%x\n", ++ DBG(fsg, "invalid CBW: bh %.8x buf %.8x len %u sig 0x%x\n", ++ (u32)bh, ++ (u32)bh->buf, + req->actual, + le32_to_cpu(cbw->Signature)); + +@@ -4097,6 +4099,7 @@ static int __init fsg_bind(struct usb_ga + * the buffer will also work with the bulk-out (and + * interrupt-in) endpoint. */ + bh->buf = kmalloc(mod_data.buflen, GFP_KERNEL); ++ VDBG(fsg,"%s: %d, bh=%.8x, buf=%.8x\n",__func__,i,bh,bh->buf); + if (!bh->buf) + goto out; + bh->next = bh + 1; +--- a/drivers/usb/gadget/Kconfig ++++ b/drivers/usb/gadget/Kconfig +@@ -495,6 +495,16 @@ config USB_LANGWELL + default USB_GADGET + select USB_GADGET_SELECTED + ++config USB_GADGET_CNS3XXX_OTG ++ boolean "CNS3XXX peripheral controller" ++ depends on USB_CNS3XXX_OTG_BOTH || USB_CNS3XXX_OTG_PCD_ONLY ++# select USB_OTG ++ select USB_GADGET_DUALSPEED ++ select USB_GADGET_SELECTED ++ select USB_GADGET_SNPS_DWC_OTG ++ help ++ Selects the CNS3XXX Perpheral Controller driver ++ + + # + # LAST -- dummy/emulated controller +--- /dev/null ++++ b/drivers/usb/host/ehci-cns3xxx.c +@@ -0,0 +1,171 @@ ++ ++#include ++#include ++#include ++ ++#define cns3xxx_ioremap ioremap ++#define cns3xxx_iounmap(addr) iounmap ++ ++static int cns3xxx_ehci_init(struct usb_hcd *hcd) ++{ ++ struct ehci_hcd *ehci = hcd_to_ehci(hcd); ++ int retval = 0; ++ ++ printk("%s: !!WARNING!! to verify the following ehci->caps ehci->regs \n", ++ __FUNCTION__); ++#ifdef CONFIG_SILICON ++ //OTG PHY ++ //cns3xxx_pwr_power_up(1<caps = hcd->regs; ++ ehci->regs = hcd->regs ++ + HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase)); ++ ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); ++ ++ hcd->has_tt = 0; ++ ehci_reset(ehci); ++ ++ retval = ehci_init(hcd); ++ if (retval) ++ return retval; ++ ++ /* XXX: Only for FPGA, remove it later */ ++ ehci_writel(ehci, 0x00800080, hcd->regs + 0x94); ++ ++ ehci_port_power(ehci, 0); ++ ++ return retval; ++} ++ ++static const struct hc_driver cns3xxx_ehci_hc_driver = { ++ .description = hcd_name, ++ .product_desc = "CNS3XXX EHCI Host Controller", ++ .hcd_priv_size = sizeof(struct ehci_hcd), ++ .irq = ehci_irq, ++ .flags = HCD_MEMORY | HCD_USB2, ++ .reset = cns3xxx_ehci_init, ++ .start = ehci_run, ++ .stop = ehci_stop, ++ .shutdown = ehci_shutdown, ++ .urb_enqueue = ehci_urb_enqueue, ++ .urb_dequeue = ehci_urb_dequeue, ++ .endpoint_disable = ehci_endpoint_disable, ++ .get_frame_number = ehci_get_frame, ++ .hub_status_data = ehci_hub_status_data, ++ .hub_control = ehci_hub_control, ++#if defined(CONFIG_PM) ++ .bus_suspend = ehci_bus_suspend, ++ .bus_resume = ehci_bus_resume, ++#endif ++ .relinquish_port = ehci_relinquish_port, ++ .port_handed_over = ehci_port_handed_over, ++}; ++ ++static int cns3xxx_ehci_probe(struct platform_device *pdev) ++{ ++ struct usb_hcd *hcd; ++ const struct hc_driver *driver = &cns3xxx_ehci_hc_driver; ++ struct resource *res; ++ int irq; ++ int retval; ++ ++ if (usb_disabled()) ++ return -ENODEV; ++ ++ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); ++ if (!res) { ++ dev_err(&pdev->dev, ++ "Found HC with no IRQ. Check %s setup!\n", ++ dev_name(&pdev->dev)); ++ return -ENODEV; ++ } ++ irq = res->start; ++ ++ hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev)); ++ if (!hcd) { ++ retval = -ENOMEM; ++ goto fail_create_hcd; ++ } ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res) { ++ dev_err(&pdev->dev, ++ "Found HC with no register addr. Check %s setup!\n", ++ dev_name(&pdev->dev)); ++ retval = -ENODEV; ++ goto fail_request_resource; ++ } ++ hcd->rsrc_start = res->start; ++ hcd->rsrc_len = res->end - res->start + 1; ++ ++#ifdef CNS3XXX_USB_BASE_VIRT ++ hcd->regs = (void __iomem *) CNS3XXX_USB_BASE_VIRT; ++#else ++ if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, ++ driver->description)) { ++ dev_dbg(&pdev->dev, "controller already in use\n"); ++ retval = -EBUSY; ++ goto fail_request_resource; ++ } ++ ++ hcd->regs = cns3xxx_ioremap(hcd->rsrc_start, hcd->rsrc_len); ++ ++ if (hcd->regs == NULL) { ++ dev_dbg(&pdev->dev, "error mapping memory\n"); ++ retval = -EFAULT; ++ goto fail_ioremap; ++ } ++#endif ++ ++ retval = usb_add_hcd(hcd, irq, IRQF_SHARED); /* TODO: IRQF_DISABLED if any interrupt issues */ ++ if (retval) ++ goto fail_add_hcd; ++ ++ return retval; ++ ++#ifndef CNS3XXX_USB_BASE_VIRT ++fail_add_hcd: ++ cns3xxx_iounmap(hcd->regs); ++fail_ioremap: ++ release_mem_region(hcd->rsrc_start, hcd->rsrc_len); ++#else ++fail_request_resource: ++fail_add_hcd: ++#endif ++ usb_put_hcd(hcd); ++fail_create_hcd: ++ dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), retval); ++ return retval; ++} ++ ++static int cns3xxx_ehci_remove(struct platform_device *pdev) ++{ ++ struct usb_hcd *hcd = platform_get_drvdata(pdev); ++ ++ usb_remove_hcd(hcd); ++#ifndef CNS3XXX_USB_BASE_VIRT ++ cns3xxx_iounmap(hcd->regs); ++ release_mem_region(hcd->rsrc_start, hcd->rsrc_len); ++#endif ++ usb_put_hcd(hcd); ++ ++ return 0; ++} ++ ++MODULE_ALIAS("platform:cns3xxx-ehci"); ++ ++static struct platform_driver cns3xxx_ehci_driver = { ++ .probe = cns3xxx_ehci_probe, ++ .remove = cns3xxx_ehci_remove, ++ .driver = { ++ .name = "cns3xxx-ehci", ++ }, ++}; +--- a/drivers/usb/host/ehci.h ++++ b/drivers/usb/host/ehci.h +@@ -602,6 +602,13 @@ ehci_port_speed(struct ehci_hcd *ehci, u + #define writel_be(val, addr) __raw_writel(val, (__force unsigned *)addr) + #endif + ++#if defined(CONFIG_ARM) && defined(CONFIG_ARCH_CNS3XXX) ++#undef readl ++#undef writel ++#define readl(addr) __raw_readl((__force unsigned *)addr) ++#define writel(val, addr) __raw_writel(val, (__force unsigned *)addr) ++#endif ++ + static inline unsigned int ehci_readl(const struct ehci_hcd *ehci, + __u32 __iomem * regs) + { +--- a/drivers/usb/host/ehci-hcd.c ++++ b/drivers/usb/host/ehci-hcd.c +@@ -1120,6 +1120,11 @@ MODULE_LICENSE ("GPL"); + #define PLATFORM_DRIVER ixp4xx_ehci_driver + #endif + ++#ifdef CONFIG_USB_CNS3XXX_EHCI ++#include "ehci-cns3xxx.c" ++#define PLATFORM_DRIVER cns3xxx_ehci_driver ++#endif ++ + #if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) && \ + !defined(PS3_SYSTEM_BUS_DRIVER) && !defined(OF_PLATFORM_DRIVER) + #error "missing bus glue for ehci-hcd" +--- a/drivers/usb/host/Kconfig ++++ b/drivers/usb/host/Kconfig +@@ -153,6 +153,45 @@ config USB_ISP1760_HCD + To compile this driver as a module, choose M here: the + module will be called isp1760. + ++config USB_CNS3XXX_EHCI ++ bool "Cavium CNS3XXX EHCI Module" ++ depends on USB && USB_EHCI_HCD ++ ---help--- ++ Cavium CNS3XXX USB EHCI Chipset support ++ ++config USB_CNS3XXX_OTG ++ tristate "Cavium CNS3XXX OTG Module" ++ depends on USB ++ ---help--- ++ Cavium CNS3XXX USB OTG Chipset support ++ ++choice ++ prompt "OTG function includes" ++ depends on USB_CNS3XXX_OTG ++ default USB_CNS3XXX_OTG_BOTH ++ ++config USB_CNS3XXX_OTG_BOTH ++ bool "both HCD and PCD" ++ ++config USB_CNS3XXX_OTG_HCD_ONLY ++ bool "HCD only" ++ ++config USB_CNS3XXX_OTG_PCD_ONLY ++ bool "PCD only" ++ ++endchoice ++config USB_CNS3XXX_OTG_ENABLE_OTG_DRVVBUS ++ bool "Enable OTG_DRVVBUS" ++ depends on USB_CNS3XXX_OTG ++ default y ++ ---help--- ++ The Power control IC (FB6862B), which is located around the OTG mini ++ USB type A/B receptacle, in some early EVB board v1.0/v1.1(#1~#22) is ++ incorrect(FB6862A), and need to be patched so that VBUS can be applied ++ properly. In that case, we don't use the OTG_DRVVBUS to control the VBUS. ++ ++ Check the board that you are using, if the IC is FB6862B, say Y. Otherwise, say N. ++ + config USB_OHCI_HCD + tristate "OHCI HCD support" + depends on USB && USB_ARCH_HAS_OHCI +@@ -225,6 +264,12 @@ config USB_OHCI_HCD_SSB + + If unsure, say N. + ++config USB_CNS3XXX_OHCI ++ bool "Cavium CNS3XXX OHCI Module" ++ depends on USB_OHCI_HCD ++ ---help--- ++ Cavium CNS3XXX USB OHCI Chipset support ++ + config USB_OHCI_BIG_ENDIAN_DESC + bool + depends on USB_OHCI_HCD +--- a/drivers/usb/host/Makefile ++++ b/drivers/usb/host/Makefile +@@ -31,3 +31,6 @@ obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o + obj-$(CONFIG_USB_R8A66597_HCD) += r8a66597-hcd.o + obj-$(CONFIG_USB_ISP1760_HCD) += isp1760.o + obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o ++obj-$(CONFIG_USB_CNS3XXX_OTG) += otg/ ++obj-$(CONFIG_USB_GADGET_CNS3XXX_OTG) += otg/ ++ +--- /dev/null ++++ b/drivers/usb/host/ohci-cns3xxx.c +@@ -0,0 +1,143 @@ ++ ++#include ++#include ++ ++#define cns3xxx_ioremap ioremap ++#define cns3xxx_iounmap(addr) iounmap ++ ++static int __devinit ++cns3xxx_ohci_start (struct usb_hcd *hcd) ++{ ++ struct ohci_hcd *ohci = hcd_to_ohci (hcd); ++ int ret; ++ ++ if ((ret = ohci_init(ohci)) < 0) ++ return ret; ++ ++ ohci->num_ports = 1; ++ ++ if ((ret = ohci_run(ohci)) < 0) { ++ err("can't start %s", hcd->self.bus_name); ++ ohci_stop(hcd); ++ return ret; ++ } ++ return 0; ++} ++ ++static const struct hc_driver cns3xxx_ohci_hc_driver = { ++ .description = hcd_name, ++ .product_desc = "CNS3XXX OHCI Host controller", ++ .hcd_priv_size = sizeof(struct ohci_hcd), ++ .irq = ohci_irq, ++ .flags = HCD_USB11 | HCD_MEMORY, ++ .start = cns3xxx_ohci_start, ++ .stop = ohci_stop, ++ .shutdown = ohci_shutdown, ++ .urb_enqueue = ohci_urb_enqueue, ++ .urb_dequeue = ohci_urb_dequeue, ++ .endpoint_disable = ohci_endpoint_disable, ++ .get_frame_number = ohci_get_frame, ++ .hub_status_data = ohci_hub_status_data, ++ .hub_control = ohci_hub_control, ++#ifdef CONFIG_PM ++ .bus_suspend = ohci_bus_suspend, ++ .bus_resume = ohci_bus_resume, ++#endif ++ .start_port_reset = ohci_start_port_reset, ++}; ++ ++static int cns3xxx_ohci_probe(struct platform_device *pdev) ++{ ++ struct usb_hcd *hcd = NULL; ++ const struct hc_driver *driver = &cns3xxx_ohci_hc_driver; ++ struct resource *res; ++ int irq; ++ int retval; ++ ++ if (usb_disabled()) ++ return -ENODEV; ++ ++ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); ++ if (!res) { ++ dev_err(&pdev->dev, ++ "Found HC with no IRQ. Check %s setup!\n", ++ dev_name(&pdev->dev)); ++ return -ENODEV; ++ } ++ irq = res->start; ++ ++ hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev)); ++ if (!hcd) ++ return -ENOMEM; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res) { ++ dev_err(&pdev->dev, ++ "Found HC with no register addr. Check %s setup!\n", ++ dev_name(&pdev->dev)); ++ retval = -ENODEV; ++ goto err1; ++ } ++ hcd->rsrc_start = res->start; ++ hcd->rsrc_len = res->end - res->start + 1; ++ ++#ifdef CNS3XXX_USB_OHCI_BASE_VIRT ++ hcd->regs = (void __iomem *) CNS3XXX_USB_OHCI_BASE_VIRT; ++#else ++ if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, ++ driver->description)) { ++ dev_dbg(&pdev->dev, "controller already in use\n"); ++ retval = -EBUSY; ++ goto err1; ++ } ++ ++ hcd->regs = cns3xxx_ioremap(hcd->rsrc_start, hcd->rsrc_len); ++ ++ if (hcd->regs == NULL) { ++ dev_dbg(&pdev->dev, "error mapping memory\n"); ++ retval = -EFAULT; ++ goto err2; ++ } ++#endif ++ ++ ohci_hcd_init(hcd_to_ohci(hcd)); ++ ++ retval = usb_add_hcd(hcd, irq, IRQF_SHARED); ++ if (retval == 0) ++ return retval; ++ ++#ifndef CNS3XXX_USB_OHCI_BASE_VIRT ++ cns3xxx_iounmap(hcd->regs); ++ ++err2: ++ release_mem_region(hcd->rsrc_start, hcd->rsrc_len); ++#endif ++ ++err1: ++ usb_put_hcd(hcd); ++ return retval; ++} ++ ++static int cns3xxx_ohci_remove(struct platform_device *pdev) ++{ ++ struct usb_hcd *hcd = platform_get_drvdata(pdev); ++ ++ usb_remove_hcd(hcd); ++#ifndef CNS3XXX_USB_OHCI_BASE_VIRT ++ cns3xxx_iounmap(hcd->regs); ++ release_mem_region(hcd->rsrc_start, hcd->rsrc_len); ++#endif ++ usb_put_hcd(hcd); ++ ++ return 0; ++} ++ ++MODULE_ALIAS("platform:cns3xxx-ohci"); ++ ++static struct platform_driver ohci_hcd_cns3xxx_driver = { ++ .probe = cns3xxx_ohci_probe, ++ .remove = cns3xxx_ohci_remove, ++ .driver = { ++ .name = "cns3xxx-ohci", ++ }, ++}; +--- a/drivers/usb/host/ohci.h ++++ b/drivers/usb/host/ohci.h +@@ -550,6 +550,14 @@ static inline struct usb_hcd *ohci_to_hc + * Other arches can be added if/when they're needed. + * + */ ++ ++#if defined(CONFIG_ARM) && defined(CONFIG_ARCH_CNS3XXX) ++#undef readl ++#undef writel ++#define readl(addr) __raw_readl((__force unsigned *)addr) ++#define writel(val, addr) __raw_writel(val, (__force unsigned *)addr) ++#endif ++ + static inline unsigned int _ohci_readl (const struct ohci_hcd *ohci, + __hc32 __iomem * regs) + { +--- a/drivers/usb/host/ohci-hcd.c ++++ b/drivers/usb/host/ohci-hcd.c +@@ -1047,6 +1047,11 @@ MODULE_LICENSE ("GPL"); + #define PLATFORM_DRIVER ohci_hcd_at91_driver + #endif + ++#ifdef CONFIG_USB_CNS3XXX_OHCI ++#include "ohci-cns3xxx.c" ++#define PLATFORM_DRIVER ohci_hcd_cns3xxx_driver ++#endif ++ + #ifdef CONFIG_ARCH_PNX4008 + #include "ohci-pnx4008.c" + #define PLATFORM_DRIVER usb_hcd_pnx4008_driver +--- /dev/null ++++ b/drivers/usb/host/otg/dummy_audio.c +@@ -0,0 +1,1575 @@ ++/* ++ * zero.c -- Gadget Zero, for USB development ++ * ++ * Copyright (C) 2003-2004 David Brownell ++ * All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * 1. Redistributions of source code must retain the above copyright ++ * notice, this list of conditions, and the following disclaimer, ++ * without modification. ++ * 2. Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * 3. The names of the above-listed copyright holders may not be used ++ * to endorse or promote products derived from this software without ++ * specific prior written permission. ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS ++ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, ++ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ++ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR ++ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ++ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ++ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ++ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ++ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ++ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++ ++/* ++ * Gadget Zero only needs two bulk endpoints, and is an example of how you ++ * can write a hardware-agnostic gadget driver running inside a USB device. ++ * ++ * Hardware details are visible (see CONFIG_USB_ZERO_* below) but don't ++ * affect most of the driver. ++ * ++ * Use it with the Linux host/master side "usbtest" driver to get a basic ++ * functional test of your device-side usb stack, or with "usb-skeleton". ++ * ++ * It supports two similar configurations. One sinks whatever the usb host ++ * writes, and in return sources zeroes. The other loops whatever the host ++ * writes back, so the host can read it. Module options include: ++ * ++ * buflen=N default N=4096, buffer size used ++ * qlen=N default N=32, how many buffers in the loopback queue ++ * loopdefault default false, list loopback config first ++ * ++ * Many drivers will only have one configuration, letting them be much ++ * simpler if they also don't support high speed operation (like this ++ * driver does). ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) ++# include ++#else ++# include ++#endif ++ ++#include ++ ++ ++/*-------------------------------------------------------------------------*/ ++/*-------------------------------------------------------------------------*/ ++ ++ ++static int utf8_to_utf16le(const char *s, u16 *cp, unsigned len) ++{ ++ int count = 0; ++ u8 c; ++ u16 uchar; ++ ++ /* this insists on correct encodings, though not minimal ones. ++ * BUT it currently rejects legit 4-byte UTF-8 code points, ++ * which need surrogate pairs. (Unicode 3.1 can use them.) ++ */ ++ while (len != 0 && (c = (u8) *s++) != 0) { ++ if (unlikely(c & 0x80)) { ++ // 2-byte sequence: ++ // 00000yyyyyxxxxxx = 110yyyyy 10xxxxxx ++ if ((c & 0xe0) == 0xc0) { ++ uchar = (c & 0x1f) << 6; ++ ++ c = (u8) *s++; ++ if ((c & 0xc0) != 0xc0) ++ goto fail; ++ c &= 0x3f; ++ uchar |= c; ++ ++ // 3-byte sequence (most CJKV characters): ++ // zzzzyyyyyyxxxxxx = 1110zzzz 10yyyyyy 10xxxxxx ++ } else if ((c & 0xf0) == 0xe0) { ++ uchar = (c & 0x0f) << 12; ++ ++ c = (u8) *s++; ++ if ((c & 0xc0) != 0xc0) ++ goto fail; ++ c &= 0x3f; ++ uchar |= c << 6; ++ ++ c = (u8) *s++; ++ if ((c & 0xc0) != 0xc0) ++ goto fail; ++ c &= 0x3f; ++ uchar |= c; ++ ++ /* no bogus surrogates */ ++ if (0xd800 <= uchar && uchar <= 0xdfff) ++ goto fail; ++ ++ // 4-byte sequence (surrogate pairs, currently rare): ++ // 11101110wwwwzzzzyy + 110111yyyyxxxxxx ++ // = 11110uuu 10uuzzzz 10yyyyyy 10xxxxxx ++ // (uuuuu = wwww + 1) ++ // FIXME accept the surrogate code points (only) ++ ++ } else ++ goto fail; ++ } else ++ uchar = c; ++ put_unaligned (cpu_to_le16 (uchar), cp++); ++ count++; ++ len--; ++ } ++ return count; ++fail: ++ return -1; ++} ++ ++ ++/** ++ * usb_gadget_get_string - fill out a string descriptor ++ * @table: of c strings encoded using UTF-8 ++ * @id: string id, from low byte of wValue in get string descriptor ++ * @buf: at least 256 bytes ++ * ++ * Finds the UTF-8 string matching the ID, and converts it into a ++ * string descriptor in utf16-le. ++ * Returns length of descriptor (always even) or negative errno ++ * ++ * If your driver needs stings in multiple languages, you'll probably ++ * "switch (wIndex) { ... }" in your ep0 string descriptor logic, ++ * using this routine after choosing which set of UTF-8 strings to use. ++ * Note that US-ASCII is a strict subset of UTF-8; any string bytes with ++ * the eighth bit set will be multibyte UTF-8 characters, not ISO-8859/1 ++ * characters (which are also widely used in C strings). ++ */ ++int ++usb_gadget_get_string (struct usb_gadget_strings *table, int id, u8 *buf) ++{ ++ struct usb_string *s; ++ int len; ++ ++ /* descriptor 0 has the language id */ ++ if (id == 0) { ++ buf [0] = 4; ++ buf [1] = USB_DT_STRING; ++ buf [2] = (u8) table->language; ++ buf [3] = (u8) (table->language >> 8); ++ return 4; ++ } ++ for (s = table->strings; s && s->s; s++) ++ if (s->id == id) ++ break; ++ ++ /* unrecognized: stall. */ ++ if (!s || !s->s) ++ return -EINVAL; ++ ++ /* string descriptors have length, tag, then UTF16-LE text */ ++ len = min ((size_t) 126, strlen (s->s)); ++ memset (buf + 2, 0, 2 * len); /* zero all the bytes */ ++ len = utf8_to_utf16le(s->s, (u16 *)&buf[2], len); ++ if (len < 0) ++ return -EINVAL; ++ buf [0] = (len + 1) * 2; ++ buf [1] = USB_DT_STRING; ++ return buf [0]; ++} ++ ++ ++/*-------------------------------------------------------------------------*/ ++/*-------------------------------------------------------------------------*/ ++ ++ ++/** ++ * usb_descriptor_fillbuf - fill buffer with descriptors ++ * @buf: Buffer to be filled ++ * @buflen: Size of buf ++ * @src: Array of descriptor pointers, terminated by null pointer. ++ * ++ * Copies descriptors into the buffer, returning the length or a ++ * negative error code if they can't all be copied. Useful when ++ * assembling descriptors for an associated set of interfaces used ++ * as part of configuring a composite device; or in other cases where ++ * sets of descriptors need to be marshaled. ++ */ ++int ++usb_descriptor_fillbuf(void *buf, unsigned buflen, ++ const struct usb_descriptor_header **src) ++{ ++ u8 *dest = buf; ++ ++ if (!src) ++ return -EINVAL; ++ ++ /* fill buffer from src[] until null descriptor ptr */ ++ for (; 0 != *src; src++) { ++ unsigned len = (*src)->bLength; ++ ++ if (len > buflen) ++ return -EINVAL; ++ memcpy(dest, *src, len); ++ buflen -= len; ++ dest += len; ++ } ++ return dest - (u8 *)buf; ++} ++ ++ ++/** ++ * usb_gadget_config_buf - builts a complete configuration descriptor ++ * @config: Header for the descriptor, including characteristics such ++ * as power requirements and number of interfaces. ++ * @desc: Null-terminated vector of pointers to the descriptors (interface, ++ * endpoint, etc) defining all functions in this device configuration. ++ * @buf: Buffer for the resulting configuration descriptor. ++ * @length: Length of buffer. If this is not big enough to hold the ++ * entire configuration descriptor, an error code will be returned. ++ * ++ * This copies descriptors into the response buffer, building a descriptor ++ * for that configuration. It returns the buffer length or a negative ++ * status code. The config.wTotalLength field is set to match the length ++ * of the result, but other descriptor fields (including power usage and ++ * interface count) must be set by the caller. ++ * ++ * Gadget drivers could use this when constructing a config descriptor ++ * in response to USB_REQ_GET_DESCRIPTOR. They will need to patch the ++ * resulting bDescriptorType value if USB_DT_OTHER_SPEED_CONFIG is needed. ++ */ ++int usb_gadget_config_buf( ++ const struct usb_config_descriptor *config, ++ void *buf, ++ unsigned length, ++ const struct usb_descriptor_header **desc ++) ++{ ++ struct usb_config_descriptor *cp = buf; ++ int len; ++ ++ /* config descriptor first */ ++ if (length < USB_DT_CONFIG_SIZE || !desc) ++ return -EINVAL; ++ *cp = *config; ++ ++ /* then interface/endpoint/class/vendor/... */ ++ len = usb_descriptor_fillbuf(USB_DT_CONFIG_SIZE + (u8*)buf, ++ length - USB_DT_CONFIG_SIZE, desc); ++ if (len < 0) ++ return len; ++ len += USB_DT_CONFIG_SIZE; ++ if (len > 0xffff) ++ return -EINVAL; ++ ++ /* patch up the config descriptor */ ++ cp->bLength = USB_DT_CONFIG_SIZE; ++ cp->bDescriptorType = USB_DT_CONFIG; ++ cp->wTotalLength = cpu_to_le16(len); ++ cp->bmAttributes |= USB_CONFIG_ATT_ONE; ++ return len; ++} ++ ++/*-------------------------------------------------------------------------*/ ++/*-------------------------------------------------------------------------*/ ++ ++ ++#define RBUF_LEN (1024*1024) ++static int rbuf_start; ++static int rbuf_len; ++static __u8 rbuf[RBUF_LEN]; ++ ++/*-------------------------------------------------------------------------*/ ++ ++#define DRIVER_VERSION "St Patrick's Day 2004" ++ ++static const char shortname [] = "zero"; ++static const char longname [] = "YAMAHA YST-MS35D USB Speaker "; ++ ++static const char source_sink [] = "source and sink data"; ++static const char loopback [] = "loop input to output"; ++ ++/*-------------------------------------------------------------------------*/ ++ ++/* ++ * driver assumes self-powered hardware, and ++ * has no way for users to trigger remote wakeup. ++ * ++ * this version autoconfigures as much as possible, ++ * which is reasonable for most "bulk-only" drivers. ++ */ ++static const char *EP_IN_NAME; /* source */ ++static const char *EP_OUT_NAME; /* sink */ ++ ++/*-------------------------------------------------------------------------*/ ++ ++/* big enough to hold our biggest descriptor */ ++#define USB_BUFSIZ 512 ++ ++struct zero_dev { ++ spinlock_t lock; ++ struct usb_gadget *gadget; ++ struct usb_request *req; /* for control responses */ ++ ++ /* when configured, we have one of two configs: ++ * - source data (in to host) and sink it (out from host) ++ * - or loop it back (out from host back in to host) ++ */ ++ u8 config; ++ struct usb_ep *in_ep, *out_ep; ++ ++ /* autoresume timer */ ++ struct timer_list resume; ++}; ++ ++#define xprintk(d,level,fmt,args...) \ ++ dev_printk(level , &(d)->gadget->dev , fmt , ## args) ++ ++#ifdef DEBUG ++#define DBG(dev,fmt,args...) \ ++ xprintk(dev , KERN_DEBUG , fmt , ## args) ++#else ++#define DBG(dev,fmt,args...) \ ++ do { } while (0) ++#endif /* DEBUG */ ++ ++#ifdef VERBOSE ++#define VDBG DBG ++#else ++#define VDBG(dev,fmt,args...) \ ++ do { } while (0) ++#endif /* VERBOSE */ ++ ++#define ERROR(dev,fmt,args...) \ ++ xprintk(dev , KERN_ERR , fmt , ## args) ++#define WARN(dev,fmt,args...) \ ++ xprintk(dev , KERN_WARNING , fmt , ## args) ++#define INFO(dev,fmt,args...) \ ++ xprintk(dev , KERN_INFO , fmt , ## args) ++ ++/*-------------------------------------------------------------------------*/ ++ ++static unsigned buflen = 4096; ++static unsigned qlen = 32; ++static unsigned pattern = 0; ++ ++module_param (buflen, uint, S_IRUGO|S_IWUSR); ++module_param (qlen, uint, S_IRUGO|S_IWUSR); ++module_param (pattern, uint, S_IRUGO|S_IWUSR); ++ ++/* ++ * if it's nonzero, autoresume says how many seconds to wait ++ * before trying to wake up the host after suspend. ++ */ ++static unsigned autoresume = 0; ++module_param (autoresume, uint, 0); ++ ++/* ++ * Normally the "loopback" configuration is second (index 1) so ++ * it's not the default. Here's where to change that order, to ++ * work better with hosts where config changes are problematic. ++ * Or controllers (like superh) that only support one config. ++ */ ++static int loopdefault = 0; ++ ++module_param (loopdefault, bool, S_IRUGO|S_IWUSR); ++ ++/*-------------------------------------------------------------------------*/ ++ ++/* Thanks to NetChip Technologies for donating this product ID. ++ * ++ * DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!! ++ * Instead: allocate your own, using normal USB-IF procedures. ++ */ ++#ifndef CONFIG_USB_ZERO_HNPTEST ++#define DRIVER_VENDOR_NUM 0x0525 /* NetChip */ ++#define DRIVER_PRODUCT_NUM 0xa4a0 /* Linux-USB "Gadget Zero" */ ++#else ++#define DRIVER_VENDOR_NUM 0x1a0a /* OTG test device IDs */ ++#define DRIVER_PRODUCT_NUM 0xbadd ++#endif ++ ++/*-------------------------------------------------------------------------*/ ++ ++/* ++ * DESCRIPTORS ... most are static, but strings and (full) ++ * configuration descriptors are built on demand. ++ */ ++ ++/* ++#define STRING_MANUFACTURER 25 ++#define STRING_PRODUCT 42 ++#define STRING_SERIAL 101 ++*/ ++#define STRING_MANUFACTURER 1 ++#define STRING_PRODUCT 2 ++#define STRING_SERIAL 3 ++ ++#define STRING_SOURCE_SINK 250 ++#define STRING_LOOPBACK 251 ++ ++/* ++ * This device advertises two configurations; these numbers work ++ * on a pxa250 as well as more flexible hardware. ++ */ ++#define CONFIG_SOURCE_SINK 3 ++#define CONFIG_LOOPBACK 2 ++ ++/* ++static struct usb_device_descriptor ++device_desc = { ++ .bLength = sizeof device_desc, ++ .bDescriptorType = USB_DT_DEVICE, ++ ++ .bcdUSB = __constant_cpu_to_le16 (0x0200), ++ .bDeviceClass = USB_CLASS_VENDOR_SPEC, ++ ++ .idVendor = __constant_cpu_to_le16 (DRIVER_VENDOR_NUM), ++ .idProduct = __constant_cpu_to_le16 (DRIVER_PRODUCT_NUM), ++ .iManufacturer = STRING_MANUFACTURER, ++ .iProduct = STRING_PRODUCT, ++ .iSerialNumber = STRING_SERIAL, ++ .bNumConfigurations = 2, ++}; ++*/ ++static struct usb_device_descriptor ++device_desc = { ++ .bLength = sizeof device_desc, ++ .bDescriptorType = USB_DT_DEVICE, ++ .bcdUSB = __constant_cpu_to_le16 (0x0100), ++ .bDeviceClass = USB_CLASS_PER_INTERFACE, ++ .bDeviceSubClass = 0, ++ .bDeviceProtocol = 0, ++ .bMaxPacketSize0 = 64, ++ .bcdDevice = __constant_cpu_to_le16 (0x0100), ++ .idVendor = __constant_cpu_to_le16 (0x0499), ++ .idProduct = __constant_cpu_to_le16 (0x3002), ++ .iManufacturer = STRING_MANUFACTURER, ++ .iProduct = STRING_PRODUCT, ++ .iSerialNumber = STRING_SERIAL, ++ .bNumConfigurations = 1, ++}; ++ ++static struct usb_config_descriptor ++z_config = { ++ .bLength = sizeof z_config, ++ .bDescriptorType = USB_DT_CONFIG, ++ ++ /* compute wTotalLength on the fly */ ++ .bNumInterfaces = 2, ++ .bConfigurationValue = 1, ++ .iConfiguration = 0, ++ .bmAttributes = 0x40, ++ .bMaxPower = 0, /* self-powered */ ++}; ++ ++ ++static struct usb_otg_descriptor ++otg_descriptor = { ++ .bLength = sizeof otg_descriptor, ++ .bDescriptorType = USB_DT_OTG, ++ ++ .bmAttributes = USB_OTG_SRP, ++}; ++ ++/* one interface in each configuration */ ++#ifdef CONFIG_USB_GADGET_DUALSPEED ++ ++/* ++ * usb 2.0 devices need to expose both high speed and full speed ++ * descriptors, unless they only run at full speed. ++ * ++ * that means alternate endpoint descriptors (bigger packets) ++ * and a "device qualifier" ... plus more construction options ++ * for the config descriptor. ++ */ ++ ++static struct usb_qualifier_descriptor ++dev_qualifier = { ++ .bLength = sizeof dev_qualifier, ++ .bDescriptorType = USB_DT_DEVICE_QUALIFIER, ++ ++ .bcdUSB = __constant_cpu_to_le16 (0x0200), ++ .bDeviceClass = USB_CLASS_VENDOR_SPEC, ++ ++ .bNumConfigurations = 2, ++}; ++ ++ ++struct usb_cs_as_general_descriptor { ++ __u8 bLength; ++ __u8 bDescriptorType; ++ ++ __u8 bDescriptorSubType; ++ __u8 bTerminalLink; ++ __u8 bDelay; ++ __u16 wFormatTag; ++} __attribute__ ((packed)); ++ ++struct usb_cs_as_format_descriptor { ++ __u8 bLength; ++ __u8 bDescriptorType; ++ ++ __u8 bDescriptorSubType; ++ __u8 bFormatType; ++ __u8 bNrChannels; ++ __u8 bSubframeSize; ++ __u8 bBitResolution; ++ __u8 bSamfreqType; ++ __u8 tLowerSamFreq[3]; ++ __u8 tUpperSamFreq[3]; ++} __attribute__ ((packed)); ++ ++static const struct usb_interface_descriptor ++z_audio_control_if_desc = { ++ .bLength = sizeof z_audio_control_if_desc, ++ .bDescriptorType = USB_DT_INTERFACE, ++ .bInterfaceNumber = 0, ++ .bAlternateSetting = 0, ++ .bNumEndpoints = 0, ++ .bInterfaceClass = USB_CLASS_AUDIO, ++ .bInterfaceSubClass = 0x1, ++ .bInterfaceProtocol = 0, ++ .iInterface = 0, ++}; ++ ++static const struct usb_interface_descriptor ++z_audio_if_desc = { ++ .bLength = sizeof z_audio_if_desc, ++ .bDescriptorType = USB_DT_INTERFACE, ++ .bInterfaceNumber = 1, ++ .bAlternateSetting = 0, ++ .bNumEndpoints = 0, ++ .bInterfaceClass = USB_CLASS_AUDIO, ++ .bInterfaceSubClass = 0x2, ++ .bInterfaceProtocol = 0, ++ .iInterface = 0, ++}; ++ ++static const struct usb_interface_descriptor ++z_audio_if_desc2 = { ++ .bLength = sizeof z_audio_if_desc, ++ .bDescriptorType = USB_DT_INTERFACE, ++ .bInterfaceNumber = 1, ++ .bAlternateSetting = 1, ++ .bNumEndpoints = 1, ++ .bInterfaceClass = USB_CLASS_AUDIO, ++ .bInterfaceSubClass = 0x2, ++ .bInterfaceProtocol = 0, ++ .iInterface = 0, ++}; ++ ++static const struct usb_cs_as_general_descriptor ++z_audio_cs_as_if_desc = { ++ .bLength = 7, ++ .bDescriptorType = 0x24, ++ ++ .bDescriptorSubType = 0x01, ++ .bTerminalLink = 0x01, ++ .bDelay = 0x0, ++ .wFormatTag = __constant_cpu_to_le16 (0x0001) ++}; ++ ++ ++static const struct usb_cs_as_format_descriptor ++z_audio_cs_as_format_desc = { ++ .bLength = 0xe, ++ .bDescriptorType = 0x24, ++ ++ .bDescriptorSubType = 2, ++ .bFormatType = 1, ++ .bNrChannels = 1, ++ .bSubframeSize = 1, ++ .bBitResolution = 8, ++ .bSamfreqType = 0, ++ .tLowerSamFreq = {0x7e, 0x13, 0x00}, ++ .tUpperSamFreq = {0xe2, 0xd6, 0x00}, ++}; ++ ++static const struct usb_endpoint_descriptor ++z_iso_ep = { ++ .bLength = 0x09, ++ .bDescriptorType = 0x05, ++ .bEndpointAddress = 0x04, ++ .bmAttributes = 0x09, ++ .wMaxPacketSize = 0x0038, ++ .bInterval = 0x01, ++ .bRefresh = 0x00, ++ .bSynchAddress = 0x00, ++}; ++ ++static char z_iso_ep2[] = {0x07, 0x25, 0x01, 0x00, 0x02, 0x00, 0x02}; ++ ++// 9 bytes ++static char z_ac_interface_header_desc[] = ++{ 0x09, 0x24, 0x01, 0x00, 0x01, 0x2b, 0x00, 0x01, 0x01 }; ++ ++// 12 bytes ++static char z_0[] = {0x0c, 0x24, 0x02, 0x01, 0x01, 0x01, 0x00, 0x02, ++ 0x03, 0x00, 0x00, 0x00}; ++// 13 bytes ++static char z_1[] = {0x0d, 0x24, 0x06, 0x02, 0x01, 0x02, 0x15, 0x00, ++ 0x02, 0x00, 0x02, 0x00, 0x00}; ++// 9 bytes ++static char z_2[] = {0x09, 0x24, 0x03, 0x03, 0x01, 0x03, 0x00, 0x02, ++ 0x00}; ++ ++static char za_0[] = {0x09, 0x04, 0x01, 0x02, 0x01, 0x01, 0x02, 0x00, ++ 0x00}; ++ ++static char za_1[] = {0x07, 0x24, 0x01, 0x01, 0x00, 0x01, 0x00}; ++ ++static char za_2[] = {0x0e, 0x24, 0x02, 0x01, 0x02, 0x01, 0x08, 0x00, ++ 0x7e, 0x13, 0x00, 0xe2, 0xd6, 0x00}; ++ ++static char za_3[] = {0x09, 0x05, 0x04, 0x09, 0x70, 0x00, 0x01, 0x00, ++ 0x00}; ++ ++static char za_4[] = {0x07, 0x25, 0x01, 0x00, 0x02, 0x00, 0x02}; ++ ++static char za_5[] = {0x09, 0x04, 0x01, 0x03, 0x01, 0x01, 0x02, 0x00, ++ 0x00}; ++ ++static char za_6[] = {0x07, 0x24, 0x01, 0x01, 0x00, 0x01, 0x00}; ++ ++static char za_7[] = {0x0e, 0x24, 0x02, 0x01, 0x01, 0x02, 0x10, 0x00, ++ 0x7e, 0x13, 0x00, 0xe2, 0xd6, 0x00}; ++ ++static char za_8[] = {0x09, 0x05, 0x04, 0x09, 0x70, 0x00, 0x01, 0x00, ++ 0x00}; ++ ++static char za_9[] = {0x07, 0x25, 0x01, 0x00, 0x02, 0x00, 0x02}; ++ ++static char za_10[] = {0x09, 0x04, 0x01, 0x04, 0x01, 0x01, 0x02, 0x00, ++ 0x00}; ++ ++static char za_11[] = {0x07, 0x24, 0x01, 0x01, 0x00, 0x01, 0x00}; ++ ++static char za_12[] = {0x0e, 0x24, 0x02, 0x01, 0x02, 0x02, 0x10, 0x00, ++ 0x73, 0x13, 0x00, 0xe2, 0xd6, 0x00}; ++ ++static char za_13[] = {0x09, 0x05, 0x04, 0x09, 0xe0, 0x00, 0x01, 0x00, ++ 0x00}; ++ ++static char za_14[] = {0x07, 0x25, 0x01, 0x00, 0x02, 0x00, 0x02}; ++ ++static char za_15[] = {0x09, 0x04, 0x01, 0x05, 0x01, 0x01, 0x02, 0x00, ++ 0x00}; ++ ++static char za_16[] = {0x07, 0x24, 0x01, 0x01, 0x00, 0x01, 0x00}; ++ ++static char za_17[] = {0x0e, 0x24, 0x02, 0x01, 0x01, 0x03, 0x14, 0x00, ++ 0x7e, 0x13, 0x00, 0xe2, 0xd6, 0x00}; ++ ++static char za_18[] = {0x09, 0x05, 0x04, 0x09, 0xa8, 0x00, 0x01, 0x00, ++ 0x00}; ++ ++static char za_19[] = {0x07, 0x25, 0x01, 0x00, 0x02, 0x00, 0x02}; ++ ++static char za_20[] = {0x09, 0x04, 0x01, 0x06, 0x01, 0x01, 0x02, 0x00, ++ 0x00}; ++ ++static char za_21[] = {0x07, 0x24, 0x01, 0x01, 0x00, 0x01, 0x00}; ++ ++static char za_22[] = {0x0e, 0x24, 0x02, 0x01, 0x02, 0x03, 0x14, 0x00, ++ 0x7e, 0x13, 0x00, 0xe2, 0xd6, 0x00}; ++ ++static char za_23[] = {0x09, 0x05, 0x04, 0x09, 0x50, 0x01, 0x01, 0x00, ++ 0x00}; ++ ++static char za_24[] = {0x07, 0x25, 0x01, 0x00, 0x02, 0x00, 0x02}; ++ ++ ++ ++static const struct usb_descriptor_header *z_function [] = { ++ (struct usb_descriptor_header *) &z_audio_control_if_desc, ++ (struct usb_descriptor_header *) &z_ac_interface_header_desc, ++ (struct usb_descriptor_header *) &z_0, ++ (struct usb_descriptor_header *) &z_1, ++ (struct usb_descriptor_header *) &z_2, ++ (struct usb_descriptor_header *) &z_audio_if_desc, ++ (struct usb_descriptor_header *) &z_audio_if_desc2, ++ (struct usb_descriptor_header *) &z_audio_cs_as_if_desc, ++ (struct usb_descriptor_header *) &z_audio_cs_as_format_desc, ++ (struct usb_descriptor_header *) &z_iso_ep, ++ (struct usb_descriptor_header *) &z_iso_ep2, ++ (struct usb_descriptor_header *) &za_0, ++ (struct usb_descriptor_header *) &za_1, ++ (struct usb_descriptor_header *) &za_2, ++ (struct usb_descriptor_header *) &za_3, ++ (struct usb_descriptor_header *) &za_4, ++ (struct usb_descriptor_header *) &za_5, ++ (struct usb_descriptor_header *) &za_6, ++ (struct usb_descriptor_header *) &za_7, ++ (struct usb_descriptor_header *) &za_8, ++ (struct usb_descriptor_header *) &za_9, ++ (struct usb_descriptor_header *) &za_10, ++ (struct usb_descriptor_header *) &za_11, ++ (struct usb_descriptor_header *) &za_12, ++ (struct usb_descriptor_header *) &za_13, ++ (struct usb_descriptor_header *) &za_14, ++ (struct usb_descriptor_header *) &za_15, ++ (struct usb_descriptor_header *) &za_16, ++ (struct usb_descriptor_header *) &za_17, ++ (struct usb_descriptor_header *) &za_18, ++ (struct usb_descriptor_header *) &za_19, ++ (struct usb_descriptor_header *) &za_20, ++ (struct usb_descriptor_header *) &za_21, ++ (struct usb_descriptor_header *) &za_22, ++ (struct usb_descriptor_header *) &za_23, ++ (struct usb_descriptor_header *) &za_24, ++ NULL, ++}; ++ ++/* maxpacket and other transfer characteristics vary by speed. */ ++#define ep_desc(g,hs,fs) (((g)->speed==USB_SPEED_HIGH)?(hs):(fs)) ++ ++#else ++ ++/* if there's no high speed support, maxpacket doesn't change. */ ++#define ep_desc(g,hs,fs) fs ++ ++#endif /* !CONFIG_USB_GADGET_DUALSPEED */ ++ ++static char manufacturer [40]; ++//static char serial [40]; ++static char serial [] = "Ser 00 em"; ++ ++/* static strings, in UTF-8 */ ++static struct usb_string strings [] = { ++ { STRING_MANUFACTURER, manufacturer, }, ++ { STRING_PRODUCT, longname, }, ++ { STRING_SERIAL, serial, }, ++ { STRING_LOOPBACK, loopback, }, ++ { STRING_SOURCE_SINK, source_sink, }, ++ { } /* end of list */ ++}; ++ ++static struct usb_gadget_strings stringtab = { ++ .language = 0x0409, /* en-us */ ++ .strings = strings, ++}; ++ ++/* ++ * config descriptors are also handcrafted. these must agree with code ++ * that sets configurations, and with code managing interfaces and their ++ * altsettings. other complexity may come from: ++ * ++ * - high speed support, including "other speed config" rules ++ * - multiple configurations ++ * - interfaces with alternate settings ++ * - embedded class or vendor-specific descriptors ++ * ++ * this handles high speed, and has a second config that could as easily ++ * have been an alternate interface setting (on most hardware). ++ * ++ * NOTE: to demonstrate (and test) more USB capabilities, this driver ++ * should include an altsetting to test interrupt transfers, including ++ * high bandwidth modes at high speed. (Maybe work like Intel's test ++ * device?) ++ */ ++static int ++config_buf (struct usb_gadget *gadget, u8 *buf, u8 type, unsigned index) ++{ ++ int len; ++ const struct usb_descriptor_header **function; ++ ++ function = z_function; ++ len = usb_gadget_config_buf (&z_config, buf, USB_BUFSIZ, function); ++ if (len < 0) ++ return len; ++ ((struct usb_config_descriptor *) buf)->bDescriptorType = type; ++ return len; ++} ++ ++/*-------------------------------------------------------------------------*/ ++ ++static struct usb_request * ++alloc_ep_req (struct usb_ep *ep, unsigned length) ++{ ++ struct usb_request *req; ++ ++ req = usb_ep_alloc_request (ep, GFP_ATOMIC); ++ if (req) { ++ req->length = length; ++ req->buf = usb_ep_alloc_buffer (ep, length, ++ &req->dma, GFP_ATOMIC); ++ if (!req->buf) { ++ usb_ep_free_request (ep, req); ++ req = NULL; ++ } ++ } ++ return req; ++} ++ ++static void free_ep_req (struct usb_ep *ep, struct usb_request *req) ++{ ++ if (req->buf) ++ usb_ep_free_buffer (ep, req->buf, req->dma, req->length); ++ usb_ep_free_request (ep, req); ++} ++ ++/*-------------------------------------------------------------------------*/ ++ ++/* optionally require specific source/sink data patterns */ ++ ++static int ++check_read_data ( ++ struct zero_dev *dev, ++ struct usb_ep *ep, ++ struct usb_request *req ++) ++{ ++ unsigned i; ++ u8 *buf = req->buf; ++ ++ for (i = 0; i < req->actual; i++, buf++) { ++ switch (pattern) { ++ /* all-zeroes has no synchronization issues */ ++ case 0: ++ if (*buf == 0) ++ continue; ++ break; ++ /* mod63 stays in sync with short-terminated transfers, ++ * or otherwise when host and gadget agree on how large ++ * each usb transfer request should be. resync is done ++ * with set_interface or set_config. ++ */ ++ case 1: ++ if (*buf == (u8)(i % 63)) ++ continue; ++ break; ++ } ++ ERROR (dev, "bad OUT byte, buf [%d] = %d\n", i, *buf); ++ usb_ep_set_halt (ep); ++ return -EINVAL; ++ } ++ return 0; ++} ++ ++/*-------------------------------------------------------------------------*/ ++ ++static void zero_reset_config (struct zero_dev *dev) ++{ ++ if (dev->config == 0) ++ return; ++ ++ DBG (dev, "reset config\n"); ++ ++ /* just disable endpoints, forcing completion of pending i/o. ++ * all our completion handlers free their requests in this case. ++ */ ++ if (dev->in_ep) { ++ usb_ep_disable (dev->in_ep); ++ dev->in_ep = NULL; ++ } ++ if (dev->out_ep) { ++ usb_ep_disable (dev->out_ep); ++ dev->out_ep = NULL; ++ } ++ dev->config = 0; ++ del_timer (&dev->resume); ++} ++ ++#define _write(f, buf, sz) (f->f_op->write(f, buf, sz, &f->f_pos)) ++ ++static void ++zero_isoc_complete (struct usb_ep *ep, struct usb_request *req) ++{ ++ struct zero_dev *dev = ep->driver_data; ++ int status = req->status; ++ int i, j; ++ ++ switch (status) { ++ ++ case 0: /* normal completion? */ ++ //printk ("\nzero ---------------> isoc normal completion %d bytes\n", req->actual); ++ for (i=0, j=rbuf_start; iactual; i++) { ++ //printk ("%02x ", ((__u8*)req->buf)[i]); ++ rbuf[j] = ((__u8*)req->buf)[i]; ++ j++; ++ if (j >= RBUF_LEN) j=0; ++ } ++ rbuf_start = j; ++ //printk ("\n\n"); ++ ++ if (rbuf_len < RBUF_LEN) { ++ rbuf_len += req->actual; ++ if (rbuf_len > RBUF_LEN) { ++ rbuf_len = RBUF_LEN; ++ } ++ } ++ ++ break; ++ ++ /* this endpoint is normally active while we're configured */ ++ case -ECONNABORTED: /* hardware forced ep reset */ ++ case -ECONNRESET: /* request dequeued */ ++ case -ESHUTDOWN: /* disconnect from host */ ++ VDBG (dev, "%s gone (%d), %d/%d\n", ep->name, status, ++ req->actual, req->length); ++ if (ep == dev->out_ep) ++ check_read_data (dev, ep, req); ++ free_ep_req (ep, req); ++ return; ++ ++ case -EOVERFLOW: /* buffer overrun on read means that ++ * we didn't provide a big enough ++ * buffer. ++ */ ++ default: ++#if 1 ++ DBG (dev, "%s complete --> %d, %d/%d\n", ep->name, ++ status, req->actual, req->length); ++#endif ++ case -EREMOTEIO: /* short read */ ++ break; ++ } ++ ++ status = usb_ep_queue (ep, req, GFP_ATOMIC); ++ if (status) { ++ ERROR (dev, "kill %s: resubmit %d bytes --> %d\n", ++ ep->name, req->length, status); ++ usb_ep_set_halt (ep); ++ /* FIXME recover later ... somehow */ ++ } ++} ++ ++static struct usb_request * ++zero_start_isoc_ep (struct usb_ep *ep, int gfp_flags) ++{ ++ struct usb_request *req; ++ int status; ++ ++ req = alloc_ep_req (ep, 512); ++ if (!req) ++ return NULL; ++ ++ req->complete = zero_isoc_complete; ++ ++ status = usb_ep_queue (ep, req, gfp_flags); ++ if (status) { ++ struct zero_dev *dev = ep->driver_data; ++ ++ ERROR (dev, "start %s --> %d\n", ep->name, status); ++ free_ep_req (ep, req); ++ req = NULL; ++ } ++ ++ return req; ++} ++ ++/* change our operational config. this code must agree with the code ++ * that returns config descriptors, and altsetting code. ++ * ++ * it's also responsible for power management interactions. some ++ * configurations might not work with our current power sources. ++ * ++ * note that some device controller hardware will constrain what this ++ * code can do, perhaps by disallowing more than one configuration or ++ * by limiting configuration choices (like the pxa2xx). ++ */ ++static int ++zero_set_config (struct zero_dev *dev, unsigned number, int gfp_flags) ++{ ++ int result = 0; ++ struct usb_gadget *gadget = dev->gadget; ++ const struct usb_endpoint_descriptor *d; ++ struct usb_ep *ep; ++ ++ if (number == dev->config) ++ return 0; ++ ++ zero_reset_config (dev); ++ ++ gadget_for_each_ep (ep, gadget) { ++ ++ if (strcmp (ep->name, "ep4") == 0) { ++ ++ d = (struct usb_endpoint_descripter *)&za_23; // isoc ep desc for audio i/f alt setting 6 ++ result = usb_ep_enable (ep, d); ++ ++ if (result == 0) { ++ ep->driver_data = dev; ++ dev->in_ep = ep; ++ ++ if (zero_start_isoc_ep (ep, gfp_flags) != 0) { ++ ++ dev->in_ep = ep; ++ continue; ++ } ++ ++ usb_ep_disable (ep); ++ result = -EIO; ++ } ++ } ++ ++ } ++ ++ dev->config = number; ++ return result; ++} ++ ++/*-------------------------------------------------------------------------*/ ++ ++static void zero_setup_complete (struct usb_ep *ep, struct usb_request *req) ++{ ++ if (req->status || req->actual != req->length) ++ DBG ((struct zero_dev *) ep->driver_data, ++ "setup complete --> %d, %d/%d\n", ++ req->status, req->actual, req->length); ++} ++ ++/* ++ * The setup() callback implements all the ep0 functionality that's ++ * not handled lower down, in hardware or the hardware driver (like ++ * device and endpoint feature flags, and their status). It's all ++ * housekeeping for the gadget function we're implementing. Most of ++ * the work is in config-specific setup. ++ */ ++static int ++zero_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) ++{ ++ struct zero_dev *dev = get_gadget_data (gadget); ++ struct usb_request *req = dev->req; ++ int value = -EOPNOTSUPP; ++ ++ /* usually this stores reply data in the pre-allocated ep0 buffer, ++ * but config change events will reconfigure hardware. ++ */ ++ req->zero = 0; ++ switch (ctrl->bRequest) { ++ ++ case USB_REQ_GET_DESCRIPTOR: ++ ++ switch (ctrl->wValue >> 8) { ++ ++ case USB_DT_DEVICE: ++ value = min (ctrl->wLength, (u16) sizeof device_desc); ++ memcpy (req->buf, &device_desc, value); ++ break; ++#ifdef CONFIG_USB_GADGET_DUALSPEED ++ case USB_DT_DEVICE_QUALIFIER: ++ if (!gadget->is_dualspeed) ++ break; ++ value = min (ctrl->wLength, (u16) sizeof dev_qualifier); ++ memcpy (req->buf, &dev_qualifier, value); ++ break; ++ ++ case USB_DT_OTHER_SPEED_CONFIG: ++ if (!gadget->is_dualspeed) ++ break; ++ // FALLTHROUGH ++#endif /* CONFIG_USB_GADGET_DUALSPEED */ ++ case USB_DT_CONFIG: ++ value = config_buf (gadget, req->buf, ++ ctrl->wValue >> 8, ++ ctrl->wValue & 0xff); ++ if (value >= 0) ++ value = min (ctrl->wLength, (u16) value); ++ break; ++ ++ case USB_DT_STRING: ++ /* wIndex == language code. ++ * this driver only handles one language, you can ++ * add string tables for other languages, using ++ * any UTF-8 characters ++ */ ++ value = usb_gadget_get_string (&stringtab, ++ ctrl->wValue & 0xff, req->buf); ++ if (value >= 0) { ++ value = min (ctrl->wLength, (u16) value); ++ } ++ break; ++ } ++ break; ++ ++ /* currently two configs, two speeds */ ++ case USB_REQ_SET_CONFIGURATION: ++ if (ctrl->bRequestType != 0) ++ goto unknown; ++ ++ spin_lock (&dev->lock); ++ value = zero_set_config (dev, ctrl->wValue, GFP_ATOMIC); ++ spin_unlock (&dev->lock); ++ break; ++ case USB_REQ_GET_CONFIGURATION: ++ if (ctrl->bRequestType != USB_DIR_IN) ++ goto unknown; ++ *(u8 *)req->buf = dev->config; ++ value = min (ctrl->wLength, (u16) 1); ++ break; ++ ++ /* until we add altsetting support, or other interfaces, ++ * only 0/0 are possible. pxa2xx only supports 0/0 (poorly) ++ * and already killed pending endpoint I/O. ++ */ ++ case USB_REQ_SET_INTERFACE: ++ ++ if (ctrl->bRequestType != USB_RECIP_INTERFACE) ++ goto unknown; ++ spin_lock (&dev->lock); ++ if (dev->config) { ++ u8 config = dev->config; ++ ++ /* resets interface configuration, forgets about ++ * previous transaction state (queued bufs, etc) ++ * and re-inits endpoint state (toggle etc) ++ * no response queued, just zero status == success. ++ * if we had more than one interface we couldn't ++ * use this "reset the config" shortcut. ++ */ ++ zero_reset_config (dev); ++ zero_set_config (dev, config, GFP_ATOMIC); ++ value = 0; ++ } ++ spin_unlock (&dev->lock); ++ break; ++ case USB_REQ_GET_INTERFACE: ++ if ((ctrl->bRequestType == 0x21) && (ctrl->wIndex == 0x02)) { ++ value = ctrl->wLength; ++ break; ++ } ++ else { ++ if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE)) ++ goto unknown; ++ if (!dev->config) ++ break; ++ if (ctrl->wIndex != 0) { ++ value = -EDOM; ++ break; ++ } ++ *(u8 *)req->buf = 0; ++ value = min (ctrl->wLength, (u16) 1); ++ } ++ break; ++ ++ /* ++ * These are the same vendor-specific requests supported by ++ * Intel's USB 2.0 compliance test devices. We exceed that ++ * device spec by allowing multiple-packet requests. ++ */ ++ case 0x5b: /* control WRITE test -- fill the buffer */ ++ if (ctrl->bRequestType != (USB_DIR_OUT|USB_TYPE_VENDOR)) ++ goto unknown; ++ if (ctrl->wValue || ctrl->wIndex) ++ break; ++ /* just read that many bytes into the buffer */ ++ if (ctrl->wLength > USB_BUFSIZ) ++ break; ++ value = ctrl->wLength; ++ break; ++ case 0x5c: /* control READ test -- return the buffer */ ++ if (ctrl->bRequestType != (USB_DIR_IN|USB_TYPE_VENDOR)) ++ goto unknown; ++ if (ctrl->wValue || ctrl->wIndex) ++ break; ++ /* expect those bytes are still in the buffer; send back */ ++ if (ctrl->wLength > USB_BUFSIZ ++ || ctrl->wLength != req->length) ++ break; ++ value = ctrl->wLength; ++ break; ++ ++ case 0x01: // SET_CUR ++ case 0x02: ++ case 0x03: ++ case 0x04: ++ case 0x05: ++ value = ctrl->wLength; ++ break; ++ case 0x81: ++ switch (ctrl->wValue) { ++ case 0x0201: ++ case 0x0202: ++ ((u8*)req->buf)[0] = 0x00; ++ ((u8*)req->buf)[1] = 0xe3; ++ break; ++ case 0x0300: ++ case 0x0500: ++ ((u8*)req->buf)[0] = 0x00; ++ break; ++ } ++ //((u8*)req->buf)[0] = 0x81; ++ //((u8*)req->buf)[1] = 0x81; ++ value = ctrl->wLength; ++ break; ++ case 0x82: ++ switch (ctrl->wValue) { ++ case 0x0201: ++ case 0x0202: ++ ((u8*)req->buf)[0] = 0x00; ++ ((u8*)req->buf)[1] = 0xc3; ++ break; ++ case 0x0300: ++ case 0x0500: ++ ((u8*)req->buf)[0] = 0x00; ++ break; ++ } ++ //((u8*)req->buf)[0] = 0x82; ++ //((u8*)req->buf)[1] = 0x82; ++ value = ctrl->wLength; ++ break; ++ case 0x83: ++ switch (ctrl->wValue) { ++ case 0x0201: ++ case 0x0202: ++ ((u8*)req->buf)[0] = 0x00; ++ ((u8*)req->buf)[1] = 0x00; ++ break; ++ case 0x0300: ++ ((u8*)req->buf)[0] = 0x60; ++ break; ++ case 0x0500: ++ ((u8*)req->buf)[0] = 0x18; ++ break; ++ } ++ //((u8*)req->buf)[0] = 0x83; ++ //((u8*)req->buf)[1] = 0x83; ++ value = ctrl->wLength; ++ break; ++ case 0x84: ++ switch (ctrl->wValue) { ++ case 0x0201: ++ case 0x0202: ++ ((u8*)req->buf)[0] = 0x00; ++ ((u8*)req->buf)[1] = 0x01; ++ break; ++ case 0x0300: ++ case 0x0500: ++ ((u8*)req->buf)[0] = 0x08; ++ break; ++ } ++ //((u8*)req->buf)[0] = 0x84; ++ //((u8*)req->buf)[1] = 0x84; ++ value = ctrl->wLength; ++ break; ++ case 0x85: ++ ((u8*)req->buf)[0] = 0x85; ++ ((u8*)req->buf)[1] = 0x85; ++ value = ctrl->wLength; ++ break; ++ ++ ++ default: ++unknown: ++ printk("unknown control req%02x.%02x v%04x i%04x l%d\n", ++ ctrl->bRequestType, ctrl->bRequest, ++ ctrl->wValue, ctrl->wIndex, ctrl->wLength); ++ } ++ ++ /* respond with data transfer before status phase? */ ++ if (value >= 0) { ++ req->length = value; ++ req->zero = value < ctrl->wLength ++ && (value % gadget->ep0->maxpacket) == 0; ++ value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC); ++ if (value < 0) { ++ DBG (dev, "ep_queue < 0 --> %d\n", value); ++ req->status = 0; ++ zero_setup_complete (gadget->ep0, req); ++ } ++ } ++ ++ /* device either stalls (value < 0) or reports success */ ++ return value; ++} ++ ++static void ++zero_disconnect (struct usb_gadget *gadget) ++{ ++ struct zero_dev *dev = get_gadget_data (gadget); ++ unsigned long flags; ++ ++ spin_lock_irqsave (&dev->lock, flags); ++ zero_reset_config (dev); ++ ++ /* a more significant application might have some non-usb ++ * activities to quiesce here, saving resources like power ++ * or pushing the notification up a network stack. ++ */ ++ spin_unlock_irqrestore (&dev->lock, flags); ++ ++ /* next we may get setup() calls to enumerate new connections; ++ * or an unbind() during shutdown (including removing module). ++ */ ++} ++ ++static void ++zero_autoresume (unsigned long _dev) ++{ ++ struct zero_dev *dev = (struct zero_dev *) _dev; ++ int status; ++ ++ /* normally the host would be woken up for something ++ * more significant than just a timer firing... ++ */ ++ if (dev->gadget->speed != USB_SPEED_UNKNOWN) { ++ status = usb_gadget_wakeup (dev->gadget); ++ DBG (dev, "wakeup --> %d\n", status); ++ } ++} ++ ++/*-------------------------------------------------------------------------*/ ++ ++static void ++zero_unbind (struct usb_gadget *gadget) ++{ ++ struct zero_dev *dev = get_gadget_data (gadget); ++ ++ DBG (dev, "unbind\n"); ++ ++ /* we've already been disconnected ... no i/o is active */ ++ if (dev->req) ++ free_ep_req (gadget->ep0, dev->req); ++ del_timer_sync (&dev->resume); ++ kfree (dev); ++ set_gadget_data (gadget, NULL); ++} ++ ++static int ++zero_bind (struct usb_gadget *gadget) ++{ ++ struct zero_dev *dev; ++ //struct usb_ep *ep; ++ ++ printk("binding\n"); ++ /* ++ * DRIVER POLICY CHOICE: you may want to do this differently. ++ * One thing to avoid is reusing a bcdDevice revision code ++ * with different host-visible configurations or behavior ++ * restrictions -- using ep1in/ep2out vs ep1out/ep3in, etc ++ */ ++ //device_desc.bcdDevice = __constant_cpu_to_le16 (0x0201); ++ ++ ++ /* ok, we made sense of the hardware ... */ ++ dev = kmalloc (sizeof *dev, SLAB_KERNEL); ++ if (!dev) ++ return -ENOMEM; ++ memset (dev, 0, sizeof *dev); ++ spin_lock_init (&dev->lock); ++ dev->gadget = gadget; ++ set_gadget_data (gadget, dev); ++ ++ /* preallocate control response and buffer */ ++ dev->req = usb_ep_alloc_request (gadget->ep0, GFP_KERNEL); ++ if (!dev->req) ++ goto enomem; ++ dev->req->buf = usb_ep_alloc_buffer (gadget->ep0, USB_BUFSIZ, ++ &dev->req->dma, GFP_KERNEL); ++ if (!dev->req->buf) ++ goto enomem; ++ ++ dev->req->complete = zero_setup_complete; ++ ++ device_desc.bMaxPacketSize0 = gadget->ep0->maxpacket; ++ ++#ifdef CONFIG_USB_GADGET_DUALSPEED ++ /* assume ep0 uses the same value for both speeds ... */ ++ dev_qualifier.bMaxPacketSize0 = device_desc.bMaxPacketSize0; ++ ++ /* and that all endpoints are dual-speed */ ++ //hs_source_desc.bEndpointAddress = fs_source_desc.bEndpointAddress; ++ //hs_sink_desc.bEndpointAddress = fs_sink_desc.bEndpointAddress; ++#endif ++ ++ usb_gadget_set_selfpowered (gadget); ++ ++ init_timer (&dev->resume); ++ dev->resume.function = zero_autoresume; ++ dev->resume.data = (unsigned long) dev; ++ ++ gadget->ep0->driver_data = dev; ++ ++ INFO (dev, "%s, version: " DRIVER_VERSION "\n", longname); ++ INFO (dev, "using %s, OUT %s IN %s\n", gadget->name, ++ EP_OUT_NAME, EP_IN_NAME); ++ ++ snprintf (manufacturer, sizeof manufacturer, ++ UTS_SYSNAME " " UTS_RELEASE " with %s", ++ gadget->name); ++ ++ return 0; ++ ++enomem: ++ zero_unbind (gadget); ++ return -ENOMEM; ++} ++ ++/*-------------------------------------------------------------------------*/ ++ ++static void ++zero_suspend (struct usb_gadget *gadget) ++{ ++ struct zero_dev *dev = get_gadget_data (gadget); ++ ++ if (gadget->speed == USB_SPEED_UNKNOWN) ++ return; ++ ++ if (autoresume) { ++ mod_timer (&dev->resume, jiffies + (HZ * autoresume)); ++ DBG (dev, "suspend, wakeup in %d seconds\n", autoresume); ++ } else ++ DBG (dev, "suspend\n"); ++} ++ ++static void ++zero_resume (struct usb_gadget *gadget) ++{ ++ struct zero_dev *dev = get_gadget_data (gadget); ++ ++ DBG (dev, "resume\n"); ++ del_timer (&dev->resume); ++} ++ ++ ++/*-------------------------------------------------------------------------*/ ++ ++static struct usb_gadget_driver zero_driver = { ++#ifdef CONFIG_USB_GADGET_DUALSPEED ++ .speed = USB_SPEED_HIGH, ++#else ++ .speed = USB_SPEED_FULL, ++#endif ++ .function = (char *) longname, ++ .bind = zero_bind, ++ .unbind = zero_unbind, ++ ++ .setup = zero_setup, ++ .disconnect = zero_disconnect, ++ ++ .suspend = zero_suspend, ++ .resume = zero_resume, ++ ++ .driver = { ++ .name = (char *) shortname, ++ // .shutdown = ... ++ // .suspend = ... ++ // .resume = ... ++ }, ++}; ++ ++MODULE_AUTHOR ("David Brownell"); ++MODULE_LICENSE ("Dual BSD/GPL"); ++ ++static struct proc_dir_entry *pdir, *pfile; ++ ++static int isoc_read_data (char *page, char **start, ++ off_t off, int count, ++ int *eof, void *data) ++{ ++ int i; ++ static int c = 0; ++ static int done = 0; ++ static int s = 0; ++ ++/* ++ printk ("\ncount: %d\n", count); ++ printk ("rbuf_start: %d\n", rbuf_start); ++ printk ("rbuf_len: %d\n", rbuf_len); ++ printk ("off: %d\n", off); ++ printk ("start: %p\n\n", *start); ++*/ ++ if (done) { ++ c = 0; ++ done = 0; ++ *eof = 1; ++ return 0; ++ } ++ ++ if (c == 0) { ++ if (rbuf_len == RBUF_LEN) ++ s = rbuf_start; ++ else s = 0; ++ } ++ ++ for (i=0; i= rbuf_len) { ++ *eof = 1; ++ done = 1; ++ } ++ ++ ++ return i; ++} ++ ++static int __init init (void) ++{ ++ ++ int retval = 0; ++ ++ pdir = proc_mkdir("isoc_test", NULL); ++ if(pdir == NULL) { ++ retval = -ENOMEM; ++ printk("Error creating dir\n"); ++ goto done; ++ } ++ pdir->owner = THIS_MODULE; ++ ++ pfile = create_proc_read_entry("isoc_data", ++ 0444, pdir, ++ isoc_read_data, ++ NULL); ++ if (pfile == NULL) { ++ retval = -ENOMEM; ++ printk("Error creating file\n"); ++ goto no_file; ++ } ++ pfile->owner = THIS_MODULE; ++ ++ return usb_gadget_register_driver (&zero_driver); ++ ++ no_file: ++ remove_proc_entry("isoc_data", NULL); ++ done: ++ return retval; ++} ++module_init (init); ++ ++static void __exit cleanup (void) ++{ ++ ++ usb_gadget_unregister_driver (&zero_driver); ++ ++ remove_proc_entry("isoc_data", pdir); ++ remove_proc_entry("isoc_test", NULL); ++} ++module_exit (cleanup); +--- /dev/null ++++ b/drivers/usb/host/otg/dwc_otg_attr.c +@@ -0,0 +1,1055 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_attr.c $ ++ * $Revision: #31 $ ++ * $Date: 2008/07/15 $ ++ * $Change: 1064918 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++/** @file ++ * ++ * The diagnostic interface will provide access to the controller for ++ * bringing up the hardware and testing. The Linux driver attributes ++ * feature will be used to provide the Linux Diagnostic ++ * Interface. These attributes are accessed through sysfs. ++ */ ++ ++/** @page "Linux Module Attributes" ++ * ++ * The Linux module attributes feature is used to provide the Linux ++ * Diagnostic Interface. These attributes are accessed through sysfs. ++ * The diagnostic interface will provide access to the controller for ++ * bringing up the hardware and testing. ++ ++ ++ The following table shows the attributes. ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++
Name Description Access
mode Returns the current mode: 0 for device mode, 1 for host mode Read
hnpcapable Gets or sets the "HNP-capable" bit in the Core USB Configuraton Register. ++ Read returns the current value. Read/Write
srpcapable Gets or sets the "SRP-capable" bit in the Core USB Configuraton Register. ++ Read returns the current value. Read/Write
hnp Initiates the Host Negotiation Protocol. Read returns the status. Read/Write
srp Initiates the Session Request Protocol. Read returns the status. Read/Write
buspower Gets or sets the Power State of the bus (0 - Off or 1 - On) Read/Write
bussuspend Suspends the USB bus. Read/Write
busconnected Gets the connection status of the bus Read
gotgctl Gets or sets the Core Control Status Register. Read/Write
gusbcfg Gets or sets the Core USB Configuration Register Read/Write
grxfsiz Gets or sets the Receive FIFO Size Register Read/Write
gnptxfsiz Gets or sets the non-periodic Transmit Size Register Read/Write
gpvndctl Gets or sets the PHY Vendor Control Register Read/Write
ggpio Gets the value in the lower 16-bits of the General Purpose IO Register ++ or sets the upper 16 bits. Read/Write
guid Gets or sets the value of the User ID Register Read/Write
gsnpsid Gets the value of the Synopsys ID Regester Read
devspeed Gets or sets the device speed setting in the DCFG register Read/Write
enumspeed Gets the device enumeration Speed. Read
hptxfsiz Gets the value of the Host Periodic Transmit FIFO Read
hprt0 Gets or sets the value in the Host Port Control and Status Register Read/Write
regoffset Sets the register offset for the next Register Access Read/Write
regvalue Gets or sets the value of the register at the offset in the regoffset attribute. Read/Write
remote_wakeup On read, shows the status of Remote Wakeup. On write, initiates a remote ++ wakeup of the host. When bit 0 is 1 and Remote Wakeup is enabled, the Remote ++ Wakeup signalling bit in the Device Control Register is set for 1 ++ milli-second. Read/Write
regdump Dumps the contents of core registers. Read
spramdump Dumps the contents of core registers. Read
hcddump Dumps the current HCD state. Read
hcd_frrem Shows the average value of the Frame Remaining ++ field in the Host Frame Number/Frame Remaining register when an SOF interrupt ++ occurs. This can be used to determine the average interrupt latency. Also ++ shows the average Frame Remaining value for start_transfer and the "a" and ++ "b" sample points. The "a" and "b" sample points may be used during debugging ++ bto determine how long it takes to execute a section of the HCD code. Read
rd_reg_test Displays the time required to read the GNPTXFSIZ register many times ++ (the output shows the number of times the register is read). ++ Read
wr_reg_test Displays the time required to write the GNPTXFSIZ register many times ++ (the output shows the number of times the register is written). ++ Read
++ ++ Example usage: ++ To get the current mode: ++ cat /sys/devices/lm0/mode ++ ++ To power down the USB: ++ echo 0 > /sys/devices/lm0/buspower ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include /* permission constants */ ++#include ++ ++#include ++#include ++//#include ++#include ++#include ++ ++#include "dwc_otg_plat.h" ++#include "dwc_otg_attr.h" ++#include "dwc_otg_driver.h" ++#include "dwc_otg_pcd.h" ++#include "dwc_otg_hcd.h" ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++/* ++ * MACROs for defining sysfs attribute ++ */ ++#define DWC_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ ++static ssize_t _otg_attr_name_##_show (struct device *_dev, struct device_attribute *attr, char *buf) \ ++{ \ ++ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev); \ ++ dwc_otg_device_t *otg_dev = lm_get_drvdata(lm_dev); \ ++ uint32_t val; \ ++ val = dwc_read_reg32 (_addr_); \ ++ val = (val & (_mask_)) >> _shift_; \ ++ return sprintf (buf, "%s = 0x%x\n", _string_, val); \ ++} ++#define DWC_OTG_DEVICE_ATTR_BITFIELD_STORE(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ ++static ssize_t _otg_attr_name_##_store (struct device *_dev, struct device_attribute *attr, \ ++ const char *buf, size_t count) \ ++{ \ ++ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev); \ ++ dwc_otg_device_t *otg_dev = lm_get_drvdata(lm_dev); \ ++ uint32_t set = simple_strtoul(buf, NULL, 16); \ ++ uint32_t clear = set; \ ++ clear = ((~clear) << _shift_) & _mask_; \ ++ set = (set << _shift_) & _mask_; \ ++ dev_dbg(_dev, "Storing Address=0x%08x Set=0x%08x Clear=0x%08x\n", (uint32_t)_addr_, set, clear); \ ++ dwc_modify_reg32(_addr_, clear, set); \ ++ return count; \ ++} ++ ++/* ++ * MACROs for defining sysfs attribute for 32-bit registers ++ */ ++#define DWC_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_,_addr_,_string_) \ ++static ssize_t _otg_attr_name_##_show (struct device *_dev, struct device_attribute *attr, char *buf) \ ++{ \ ++ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev); \ ++ dwc_otg_device_t *otg_dev = lm_get_drvdata(lm_dev); \ ++ uint32_t val; \ ++ val = dwc_read_reg32 (_addr_); \ ++ return sprintf (buf, "%s = 0x%08x\n", _string_, val); \ ++} ++#define DWC_OTG_DEVICE_ATTR_REG_STORE(_otg_attr_name_,_addr_,_string_) \ ++static ssize_t _otg_attr_name_##_store (struct device *_dev, struct device_attribute *attr, \ ++ const char *buf, size_t count) \ ++{ \ ++ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev); \ ++ dwc_otg_device_t *otg_dev = lm_get_drvdata(lm_dev); \ ++ uint32_t val = simple_strtoul(buf, NULL, 16); \ ++ dev_dbg(_dev, "Storing Address=0x%08x Val=0x%08x\n", (uint32_t)_addr_, val); \ ++ dwc_write_reg32(_addr_, val); \ ++ return count; \ ++} ++ ++#else ++ ++/* ++ * MACROs for defining sysfs attribute ++ */ ++#define DWC_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ ++static ssize_t _otg_attr_name_##_show (struct device *_dev, char *buf) \ ++{ \ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);\ ++ uint32_t val; \ ++ val = dwc_read_reg32 (_addr_); \ ++ val = (val & (_mask_)) >> _shift_; \ ++ return sprintf (buf, "%s = 0x%x\n", _string_, val); \ ++} ++#define DWC_OTG_DEVICE_ATTR_BITFIELD_STORE(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ ++static ssize_t _otg_attr_name_##_store (struct device *_dev, const char *buf, size_t count) \ ++{ \ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);\ ++ uint32_t set = simple_strtoul(buf, NULL, 16); \ ++ uint32_t clear = set; \ ++ clear = ((~clear) << _shift_) & _mask_; \ ++ set = (set << _shift_) & _mask_; \ ++ dev_dbg(_dev, "Storing Address=0x%08x Set=0x%08x Clear=0x%08x\n", (uint32_t)_addr_, set, clear); \ ++ dwc_modify_reg32(_addr_, clear, set); \ ++ return count; \ ++} ++ ++/* ++ * MACROs for defining sysfs attribute for 32-bit registers ++ */ ++#define DWC_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_,_addr_,_string_) \ ++static ssize_t _otg_attr_name_##_show (struct device *_dev, char *buf) \ ++{ \ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);\ ++ uint32_t val; \ ++ val = dwc_read_reg32 (_addr_); \ ++ return sprintf (buf, "%s = 0x%08x\n", _string_, val); \ ++} ++#define DWC_OTG_DEVICE_ATTR_REG_STORE(_otg_attr_name_,_addr_,_string_) \ ++static ssize_t _otg_attr_name_##_store (struct device *_dev, const char *buf, size_t count) \ ++{ \ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);\ ++ uint32_t val = simple_strtoul(buf, NULL, 16); \ ++ dev_dbg(_dev, "Storing Address=0x%08x Val=0x%08x\n", (uint32_t)_addr_, val); \ ++ dwc_write_reg32(_addr_, val); \ ++ return count; \ ++} ++ ++#endif ++ ++#define DWC_OTG_DEVICE_ATTR_BITFIELD_RW(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ ++DWC_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ ++DWC_OTG_DEVICE_ATTR_BITFIELD_STORE(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ ++DEVICE_ATTR(_otg_attr_name_,0644,_otg_attr_name_##_show,_otg_attr_name_##_store); ++ ++#define DWC_OTG_DEVICE_ATTR_BITFIELD_RO(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ ++DWC_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ ++DEVICE_ATTR(_otg_attr_name_,0444,_otg_attr_name_##_show,NULL); ++ ++#define DWC_OTG_DEVICE_ATTR_REG32_RW(_otg_attr_name_,_addr_,_string_) \ ++DWC_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_,_addr_,_string_) \ ++DWC_OTG_DEVICE_ATTR_REG_STORE(_otg_attr_name_,_addr_,_string_) \ ++DEVICE_ATTR(_otg_attr_name_,0644,_otg_attr_name_##_show,_otg_attr_name_##_store); ++ ++#define DWC_OTG_DEVICE_ATTR_REG32_RO(_otg_attr_name_,_addr_,_string_) \ ++DWC_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_,_addr_,_string_) \ ++DEVICE_ATTR(_otg_attr_name_,0444,_otg_attr_name_##_show,NULL); ++ ++ ++/** @name Functions for Show/Store of Attributes */ ++/**@{*/ ++ ++/** ++ * Show the register offset of the Register Access. ++ */ ++static ssize_t regoffset_show( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ char *buf) ++{ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev); ++ dwc_otg_device_t *otg_dev = lm_get_drvdata(lm_dev); ++#else ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++#endif ++ return snprintf(buf, sizeof("0xFFFFFFFF\n")+1,"0x%08x\n", otg_dev->reg_offset); ++} ++ ++/** ++ * Set the register offset for the next Register Access Read/Write ++ */ ++static ssize_t regoffset_store( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ const char *buf, ++ size_t count ) ++{ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev); ++ dwc_otg_device_t *otg_dev = lm_get_drvdata(lm_dev); ++#else ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++#endif ++ uint32_t offset = simple_strtoul(buf, NULL, 16); ++ //dev_dbg(_dev, "Offset=0x%08x\n", offset); ++ if (offset < SZ_256K ) { ++ otg_dev->reg_offset = offset; ++ } ++ else { ++ dev_err( _dev, "invalid offset\n" ); ++ } ++ ++ return count; ++} ++DEVICE_ATTR(regoffset, S_IRUGO|S_IWUSR, (void *)regoffset_show, regoffset_store); ++ ++ ++/** ++ * Show the value of the register at the offset in the reg_offset ++ * attribute. ++ */ ++static ssize_t regvalue_show( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ char *buf) ++{ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev); ++ dwc_otg_device_t *otg_dev = lm_get_drvdata(lm_dev); ++#else ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++#endif ++ uint32_t val; ++ volatile uint32_t *addr; ++ ++ if (otg_dev->reg_offset != 0xFFFFFFFF && ++ 0 != otg_dev->base) { ++ /* Calculate the address */ ++ addr = (uint32_t*)(otg_dev->reg_offset + ++ (uint8_t*)otg_dev->base); ++ //dev_dbg(_dev, "@0x%08x\n", (unsigned)addr); ++ val = dwc_read_reg32( addr ); ++ return snprintf(buf, sizeof("Reg@0xFFFFFFFF = 0xFFFFFFFF\n")+1, ++ "Reg@0x%06x = 0x%08x\n", ++ otg_dev->reg_offset, val); ++ } ++ else { ++ dev_err(_dev, "Invalid offset (0x%0x)\n", ++ otg_dev->reg_offset); ++ return sprintf(buf, "invalid offset\n" ); ++ } ++} ++ ++/** ++ * Store the value in the register at the offset in the reg_offset ++ * attribute. ++ * ++ */ ++static ssize_t regvalue_store( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ const char *buf, ++ size_t count ) ++{ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev); ++ dwc_otg_device_t *otg_dev = lm_get_drvdata(lm_dev); ++#else ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++#endif ++ volatile uint32_t * addr; ++ uint32_t val = simple_strtoul(buf, NULL, 16); ++ //dev_dbg(_dev, "Offset=0x%08x Val=0x%08x\n", otg_dev->reg_offset, val); ++ if (otg_dev->reg_offset != 0xFFFFFFFF && 0 != otg_dev->base) { ++ /* Calculate the address */ ++ addr = (uint32_t*)(otg_dev->reg_offset + ++ (uint8_t*)otg_dev->base); ++ //dev_dbg(_dev, "@0x%08x\n", (unsigned)addr); ++ dwc_write_reg32( addr, val ); ++ } ++ else { ++ dev_err(_dev, "Invalid Register Offset (0x%08x)\n", ++ otg_dev->reg_offset); ++ } ++ return count; ++} ++DEVICE_ATTR(regvalue, S_IRUGO|S_IWUSR, regvalue_show, regvalue_store); ++ ++/* ++ * Attributes ++ */ ++DWC_OTG_DEVICE_ATTR_BITFIELD_RO(mode,&(otg_dev->core_if->core_global_regs->gotgctl),(1<<20),20,"Mode"); ++DWC_OTG_DEVICE_ATTR_BITFIELD_RW(hnpcapable,&(otg_dev->core_if->core_global_regs->gusbcfg),(1<<9),9,"Mode"); ++DWC_OTG_DEVICE_ATTR_BITFIELD_RW(srpcapable,&(otg_dev->core_if->core_global_regs->gusbcfg),(1<<8),8,"Mode"); ++ ++//DWC_OTG_DEVICE_ATTR_BITFIELD_RW(buspower,&(otg_dev->core_if->core_global_regs->gotgctl),(1<<8),8,"Mode"); ++//DWC_OTG_DEVICE_ATTR_BITFIELD_RW(bussuspend,&(otg_dev->core_if->core_global_regs->gotgctl),(1<<8),8,"Mode"); ++DWC_OTG_DEVICE_ATTR_BITFIELD_RO(busconnected,otg_dev->core_if->host_if->hprt0,0x01,0,"Bus Connected"); ++ ++DWC_OTG_DEVICE_ATTR_REG32_RW(gotgctl,&(otg_dev->core_if->core_global_regs->gotgctl),"GOTGCTL"); ++DWC_OTG_DEVICE_ATTR_REG32_RW(gusbcfg,&(otg_dev->core_if->core_global_regs->gusbcfg),"GUSBCFG"); ++DWC_OTG_DEVICE_ATTR_REG32_RW(grxfsiz,&(otg_dev->core_if->core_global_regs->grxfsiz),"GRXFSIZ"); ++DWC_OTG_DEVICE_ATTR_REG32_RW(gnptxfsiz,&(otg_dev->core_if->core_global_regs->gnptxfsiz),"GNPTXFSIZ"); ++DWC_OTG_DEVICE_ATTR_REG32_RW(gpvndctl,&(otg_dev->core_if->core_global_regs->gpvndctl),"GPVNDCTL"); ++DWC_OTG_DEVICE_ATTR_REG32_RW(ggpio,&(otg_dev->core_if->core_global_regs->ggpio),"GGPIO"); ++DWC_OTG_DEVICE_ATTR_REG32_RW(guid,&(otg_dev->core_if->core_global_regs->guid),"GUID"); ++DWC_OTG_DEVICE_ATTR_REG32_RO(gsnpsid,&(otg_dev->core_if->core_global_regs->gsnpsid),"GSNPSID"); ++DWC_OTG_DEVICE_ATTR_BITFIELD_RW(devspeed,&(otg_dev->core_if->dev_if->dev_global_regs->dcfg),0x3,0,"Device Speed"); ++DWC_OTG_DEVICE_ATTR_BITFIELD_RO(enumspeed,&(otg_dev->core_if->dev_if->dev_global_regs->dsts),0x6,1,"Device Enumeration Speed"); ++ ++DWC_OTG_DEVICE_ATTR_REG32_RO(hptxfsiz,&(otg_dev->core_if->core_global_regs->hptxfsiz),"HPTXFSIZ"); ++DWC_OTG_DEVICE_ATTR_REG32_RW(hprt0,otg_dev->core_if->host_if->hprt0,"HPRT0"); ++ ++ ++/** ++ * @todo Add code to initiate the HNP. ++ */ ++/** ++ * Show the HNP status bit ++ */ ++static ssize_t hnp_show( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ char *buf) ++{ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev); ++ dwc_otg_device_t *otg_dev = lm_get_drvdata(lm_dev); ++#else ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++#endif ++ gotgctl_data_t val; ++ val.d32 = dwc_read_reg32 (&(otg_dev->core_if->core_global_regs->gotgctl)); ++ return sprintf (buf, "HstNegScs = 0x%x\n", val.b.hstnegscs); ++} ++ ++/** ++ * Set the HNP Request bit ++ */ ++static ssize_t hnp_store( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ const char *buf, ++ size_t count ) ++{ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev); ++ dwc_otg_device_t *otg_dev = lm_get_drvdata(lm_dev); ++#else ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++#endif ++ uint32_t in = simple_strtoul(buf, NULL, 16); ++ uint32_t *addr = (uint32_t *)&(otg_dev->core_if->core_global_regs->gotgctl); ++ gotgctl_data_t mem; ++ mem.d32 = dwc_read_reg32(addr); ++ mem.b.hnpreq = in; ++ dev_dbg(_dev, "Storing Address=0x%08x Data=0x%08x\n", (uint32_t)addr, mem.d32); ++ dwc_write_reg32(addr, mem.d32); ++ return count; ++} ++DEVICE_ATTR(hnp, 0644, hnp_show, hnp_store); ++ ++/** ++ * @todo Add code to initiate the SRP. ++ */ ++/** ++ * Show the SRP status bit ++ */ ++static ssize_t srp_show( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ char *buf) ++{ ++#ifndef DWC_HOST_ONLY ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev); ++ dwc_otg_device_t *otg_dev = lm_get_drvdata(lm_dev); ++#else ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++#endif ++ gotgctl_data_t val; ++ val.d32 = dwc_read_reg32 (&(otg_dev->core_if->core_global_regs->gotgctl)); ++ return sprintf (buf, "SesReqScs = 0x%x\n", val.b.sesreqscs); ++#else ++ return sprintf(buf, "Host Only Mode!\n"); ++#endif ++} ++ ++ ++ ++/** ++ * Set the SRP Request bit ++ */ ++static ssize_t srp_store( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ const char *buf, ++ size_t count ) ++{ ++#ifndef DWC_HOST_ONLY ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev); ++ dwc_otg_device_t *otg_dev = lm_get_drvdata(lm_dev); ++#else ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++#endif ++ dwc_otg_pcd_initiate_srp(otg_dev->pcd); ++#endif ++ return count; ++} ++DEVICE_ATTR(srp, 0644, srp_show, srp_store); ++ ++/** ++ * @todo Need to do more for power on/off? ++ */ ++/** ++ * Show the Bus Power status ++ */ ++static ssize_t buspower_show( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ char *buf) ++{ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev); ++ dwc_otg_device_t *otg_dev = lm_get_drvdata(lm_dev); ++#else ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++#endif ++ hprt0_data_t val; ++ val.d32 = dwc_read_reg32 (otg_dev->core_if->host_if->hprt0); ++ return sprintf (buf, "Bus Power = 0x%x\n", val.b.prtpwr); ++} ++ ++ ++/** ++ * Set the Bus Power status ++ */ ++static ssize_t buspower_store( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ const char *buf, ++ size_t count ) ++{ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev); ++ dwc_otg_device_t *otg_dev = lm_get_drvdata(lm_dev); ++#else ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++#endif ++ uint32_t on = simple_strtoul(buf, NULL, 16); ++ uint32_t *addr = (uint32_t *)otg_dev->core_if->host_if->hprt0; ++ hprt0_data_t mem; ++ ++ mem.d32 = dwc_read_reg32(addr); ++ mem.b.prtpwr = on; ++ ++ //dev_dbg(_dev, "Storing Address=0x%08x Data=0x%08x\n", (uint32_t)addr, mem.d32); ++ dwc_write_reg32(addr, mem.d32); ++ ++ return count; ++} ++DEVICE_ATTR(buspower, 0644, buspower_show, buspower_store); ++ ++/** ++ * @todo Need to do more for suspend? ++ */ ++/** ++ * Show the Bus Suspend status ++ */ ++static ssize_t bussuspend_show( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ char *buf) ++{ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev); ++ dwc_otg_device_t *otg_dev = lm_get_drvdata(lm_dev); ++#else ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++#endif ++ hprt0_data_t val; ++ val.d32 = dwc_read_reg32 (otg_dev->core_if->host_if->hprt0); ++ return sprintf (buf, "Bus Suspend = 0x%x\n", val.b.prtsusp); ++} ++ ++/** ++ * Set the Bus Suspend status ++ */ ++static ssize_t bussuspend_store( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ const char *buf, ++ size_t count ) ++{ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev); ++ dwc_otg_device_t *otg_dev = lm_get_drvdata(lm_dev); ++#else ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++#endif ++ uint32_t in = simple_strtoul(buf, NULL, 16); ++ uint32_t *addr = (uint32_t *)otg_dev->core_if->host_if->hprt0; ++ hprt0_data_t mem; ++ mem.d32 = dwc_read_reg32(addr); ++ mem.b.prtsusp = in; ++ dev_dbg(_dev, "Storing Address=0x%08x Data=0x%08x\n", (uint32_t)addr, mem.d32); ++ dwc_write_reg32(addr, mem.d32); ++ return count; ++} ++DEVICE_ATTR(bussuspend, 0644, bussuspend_show, bussuspend_store); ++ ++/** ++ * Show the status of Remote Wakeup. ++ */ ++static ssize_t remote_wakeup_show( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ char *buf) ++{ ++#ifndef DWC_HOST_ONLY ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev); ++ dwc_otg_device_t *otg_dev = lm_get_drvdata(lm_dev); ++#else ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++#endif ++ dctl_data_t val; ++ val.d32 = ++ dwc_read_reg32( &otg_dev->core_if->dev_if->dev_global_regs->dctl); ++ return sprintf( buf, "Remote Wakeup = %d Enabled = %d\n", ++ val.b.rmtwkupsig, otg_dev->pcd->remote_wakeup_enable); ++#else ++ return sprintf(buf, "Host Only Mode!\n"); ++#endif ++} ++/** ++ * Initiate a remote wakeup of the host. The Device control register ++ * Remote Wakeup Signal bit is written if the PCD Remote wakeup enable ++ * flag is set. ++ * ++ */ ++static ssize_t remote_wakeup_store( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ const char *buf, ++ size_t count ) ++{ ++#ifndef DWC_HOST_ONLY ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev); ++ dwc_otg_device_t *otg_dev = lm_get_drvdata(lm_dev); ++#else ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++#endif ++ uint32_t val = simple_strtoul(buf, NULL, 16); ++ if (val&1) { ++ dwc_otg_pcd_remote_wakeup(otg_dev->pcd, 1); ++ } ++ else { ++ dwc_otg_pcd_remote_wakeup(otg_dev->pcd, 0); ++ } ++#endif ++ return count; ++} ++DEVICE_ATTR(remote_wakeup, S_IRUGO|S_IWUSR, remote_wakeup_show, ++ remote_wakeup_store); ++ ++/** ++ * Dump global registers and either host or device registers (depending on the ++ * current mode of the core). ++ */ ++static ssize_t regdump_show( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ char *buf) ++{ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev); ++ dwc_otg_device_t *otg_dev = lm_get_drvdata(lm_dev); ++#else ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++#endif ++ dwc_otg_dump_global_registers( otg_dev->core_if); ++ if (dwc_otg_is_host_mode(otg_dev->core_if)) { ++ dwc_otg_dump_host_registers( otg_dev->core_if); ++ } else { ++ dwc_otg_dump_dev_registers( otg_dev->core_if); ++ ++ } ++ return sprintf( buf, "Register Dump\n" ); ++} ++ ++DEVICE_ATTR(regdump, S_IRUGO|S_IWUSR, regdump_show, 0); ++ ++/** ++ * Dump global registers and either host or device registers (depending on the ++ * current mode of the core). ++ */ ++static ssize_t spramdump_show( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ char *buf) ++{ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev); ++ dwc_otg_device_t *otg_dev = lm_get_drvdata(lm_dev); ++#else ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++#endif ++ dwc_otg_dump_spram( otg_dev->core_if); ++ ++ return sprintf( buf, "SPRAM Dump\n" ); ++} ++ ++DEVICE_ATTR(spramdump, S_IRUGO|S_IWUSR, spramdump_show, 0); ++ ++/** ++ * Dump the current hcd state. ++ */ ++static ssize_t hcddump_show( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ char *buf) ++{ ++#ifndef DWC_DEVICE_ONLY ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev); ++ dwc_otg_device_t *otg_dev = lm_get_drvdata(lm_dev); ++#else ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++#endif ++ dwc_otg_hcd_dump_state(otg_dev->hcd); ++#endif ++ return sprintf( buf, "HCD Dump\n" ); ++} ++ ++DEVICE_ATTR(hcddump, S_IRUGO|S_IWUSR, hcddump_show, 0); ++ ++/** ++ * Dump the average frame remaining at SOF. This can be used to ++ * determine average interrupt latency. Frame remaining is also shown for ++ * start transfer and two additional sample points. ++ */ ++static ssize_t hcd_frrem_show( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ char *buf) ++{ ++#ifndef DWC_DEVICE_ONLY ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev); ++ dwc_otg_device_t *otg_dev = lm_get_drvdata(lm_dev); ++#else ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++#endif ++ dwc_otg_hcd_dump_frrem(otg_dev->hcd); ++#endif ++ return sprintf( buf, "HCD Dump Frame Remaining\n" ); ++} ++ ++DEVICE_ATTR(hcd_frrem, S_IRUGO|S_IWUSR, hcd_frrem_show, 0); ++ ++/** ++ * Displays the time required to read the GNPTXFSIZ register many times (the ++ * output shows the number of times the register is read). ++ */ ++#define RW_REG_COUNT 10000000 ++#define MSEC_PER_JIFFIE 1000/HZ ++static ssize_t rd_reg_test_show( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ char *buf) ++{ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev); ++ dwc_otg_device_t *otg_dev = lm_get_drvdata(lm_dev); ++#else ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++#endif ++ int i; ++ int time; ++ int start_jiffies; ++ ++ printk("HZ %d, MSEC_PER_JIFFIE %d, loops_per_jiffy %lu\n", ++ HZ, MSEC_PER_JIFFIE, loops_per_jiffy); ++ start_jiffies = jiffies; ++ for (i = 0; i < RW_REG_COUNT; i++) { ++ dwc_read_reg32(&otg_dev->core_if->core_global_regs->gnptxfsiz); ++ } ++ time = jiffies - start_jiffies; ++ return sprintf( buf, "Time to read GNPTXFSIZ reg %d times: %d msecs (%d jiffies)\n", ++ RW_REG_COUNT, time * MSEC_PER_JIFFIE, time ); ++} ++ ++DEVICE_ATTR(rd_reg_test, S_IRUGO|S_IWUSR, rd_reg_test_show, 0); ++ ++/** ++ * Displays the time required to write the GNPTXFSIZ register many times (the ++ * output shows the number of times the register is written). ++ */ ++static ssize_t wr_reg_test_show( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ char *buf) ++{ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct lm_device *lm_dev = container_of(_dev, struct lm_device, dev); ++ dwc_otg_device_t *otg_dev = lm_get_drvdata(lm_dev); ++#else ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++#endif ++ uint32_t reg_val; ++ int i; ++ int time; ++ int start_jiffies; ++ ++ printk("HZ %d, MSEC_PER_JIFFIE %d, loops_per_jiffy %lu\n", ++ HZ, MSEC_PER_JIFFIE, loops_per_jiffy); ++ reg_val = dwc_read_reg32(&otg_dev->core_if->core_global_regs->gnptxfsiz); ++ start_jiffies = jiffies; ++ for (i = 0; i < RW_REG_COUNT; i++) { ++ dwc_write_reg32(&otg_dev->core_if->core_global_regs->gnptxfsiz, reg_val); ++ } ++ time = jiffies - start_jiffies; ++ return sprintf( buf, "Time to write GNPTXFSIZ reg %d times: %d msecs (%d jiffies)\n", ++ RW_REG_COUNT, time * MSEC_PER_JIFFIE, time); ++} ++ ++DEVICE_ATTR(wr_reg_test, S_IRUGO|S_IWUSR, wr_reg_test_show, 0); ++/**@}*/ ++ ++/** ++ * Create the device files ++ */ ++void dwc_otg_attr_create (struct lm_device *lmdev) ++{ ++ int error; ++ ++ error = device_create_file(&lmdev->dev, &dev_attr_regoffset); ++ error = device_create_file(&lmdev->dev, &dev_attr_regvalue); ++ error = device_create_file(&lmdev->dev, &dev_attr_mode); ++ error = device_create_file(&lmdev->dev, &dev_attr_hnpcapable); ++ error = device_create_file(&lmdev->dev, &dev_attr_srpcapable); ++ error = device_create_file(&lmdev->dev, &dev_attr_hnp); ++ error = device_create_file(&lmdev->dev, &dev_attr_srp); ++ error = device_create_file(&lmdev->dev, &dev_attr_buspower); ++ error = device_create_file(&lmdev->dev, &dev_attr_bussuspend); ++ error = device_create_file(&lmdev->dev, &dev_attr_busconnected); ++ error = device_create_file(&lmdev->dev, &dev_attr_gotgctl); ++ error = device_create_file(&lmdev->dev, &dev_attr_gusbcfg); ++ error = device_create_file(&lmdev->dev, &dev_attr_grxfsiz); ++ error = device_create_file(&lmdev->dev, &dev_attr_gnptxfsiz); ++ error = device_create_file(&lmdev->dev, &dev_attr_gpvndctl); ++ error = device_create_file(&lmdev->dev, &dev_attr_ggpio); ++ error = device_create_file(&lmdev->dev, &dev_attr_guid); ++ error = device_create_file(&lmdev->dev, &dev_attr_gsnpsid); ++ error = device_create_file(&lmdev->dev, &dev_attr_devspeed); ++ error = device_create_file(&lmdev->dev, &dev_attr_enumspeed); ++ error = device_create_file(&lmdev->dev, &dev_attr_hptxfsiz); ++ error = device_create_file(&lmdev->dev, &dev_attr_hprt0); ++ error = device_create_file(&lmdev->dev, &dev_attr_remote_wakeup); ++ error = device_create_file(&lmdev->dev, &dev_attr_regdump); ++ error = device_create_file(&lmdev->dev, &dev_attr_spramdump); ++ error = device_create_file(&lmdev->dev, &dev_attr_hcddump); ++ error = device_create_file(&lmdev->dev, &dev_attr_hcd_frrem); ++ error = device_create_file(&lmdev->dev, &dev_attr_rd_reg_test); ++ error = device_create_file(&lmdev->dev, &dev_attr_wr_reg_test); ++} ++ ++/** ++ * Remove the device files ++ */ ++void dwc_otg_attr_remove (struct lm_device *lmdev) ++{ ++ device_remove_file(&lmdev->dev, &dev_attr_regoffset); ++ device_remove_file(&lmdev->dev, &dev_attr_regvalue); ++ device_remove_file(&lmdev->dev, &dev_attr_mode); ++ device_remove_file(&lmdev->dev, &dev_attr_hnpcapable); ++ device_remove_file(&lmdev->dev, &dev_attr_srpcapable); ++ device_remove_file(&lmdev->dev, &dev_attr_hnp); ++ device_remove_file(&lmdev->dev, &dev_attr_srp); ++ device_remove_file(&lmdev->dev, &dev_attr_buspower); ++ device_remove_file(&lmdev->dev, &dev_attr_bussuspend); ++ device_remove_file(&lmdev->dev, &dev_attr_busconnected); ++ device_remove_file(&lmdev->dev, &dev_attr_gotgctl); ++ device_remove_file(&lmdev->dev, &dev_attr_gusbcfg); ++ device_remove_file(&lmdev->dev, &dev_attr_grxfsiz); ++ device_remove_file(&lmdev->dev, &dev_attr_gnptxfsiz); ++ device_remove_file(&lmdev->dev, &dev_attr_gpvndctl); ++ device_remove_file(&lmdev->dev, &dev_attr_ggpio); ++ device_remove_file(&lmdev->dev, &dev_attr_guid); ++ device_remove_file(&lmdev->dev, &dev_attr_gsnpsid); ++ device_remove_file(&lmdev->dev, &dev_attr_devspeed); ++ device_remove_file(&lmdev->dev, &dev_attr_enumspeed); ++ device_remove_file(&lmdev->dev, &dev_attr_hptxfsiz); ++ device_remove_file(&lmdev->dev, &dev_attr_hprt0); ++ device_remove_file(&lmdev->dev, &dev_attr_remote_wakeup); ++ device_remove_file(&lmdev->dev, &dev_attr_regdump); ++ device_remove_file(&lmdev->dev, &dev_attr_spramdump); ++ device_remove_file(&lmdev->dev, &dev_attr_hcddump); ++ device_remove_file(&lmdev->dev, &dev_attr_hcd_frrem); ++ device_remove_file(&lmdev->dev, &dev_attr_rd_reg_test); ++ device_remove_file(&lmdev->dev, &dev_attr_wr_reg_test); ++} +--- /dev/null ++++ b/drivers/usb/host/otg/dwc_otg_attr.h +@@ -0,0 +1,67 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_attr.h $ ++ * $Revision: #7 $ ++ * $Date: 2005/03/28 $ ++ * $Change: 477051 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++#if !defined(__DWC_OTG_ATTR_H__) ++#define __DWC_OTG_ATTR_H__ ++ ++/** @file ++ * This file contains the interface to the Linux device attributes. ++ */ ++extern struct device_attribute dev_attr_regoffset; ++extern struct device_attribute dev_attr_regvalue; ++ ++extern struct device_attribute dev_attr_mode; ++extern struct device_attribute dev_attr_hnpcapable; ++extern struct device_attribute dev_attr_srpcapable; ++extern struct device_attribute dev_attr_hnp; ++extern struct device_attribute dev_attr_srp; ++extern struct device_attribute dev_attr_buspower; ++extern struct device_attribute dev_attr_bussuspend; ++extern struct device_attribute dev_attr_busconnected; ++extern struct device_attribute dev_attr_gotgctl; ++extern struct device_attribute dev_attr_gusbcfg; ++extern struct device_attribute dev_attr_grxfsiz; ++extern struct device_attribute dev_attr_gnptxfsiz; ++extern struct device_attribute dev_attr_gpvndctl; ++extern struct device_attribute dev_attr_ggpio; ++extern struct device_attribute dev_attr_guid; ++extern struct device_attribute dev_attr_gsnpsid; ++extern struct device_attribute dev_attr_devspeed; ++extern struct device_attribute dev_attr_enumspeed; ++extern struct device_attribute dev_attr_hptxfsiz; ++extern struct device_attribute dev_attr_hprt0; ++ ++void dwc_otg_attr_create (struct lm_device *lmdev); ++void dwc_otg_attr_remove (struct lm_device *lmdev); ++ ++#endif +--- /dev/null ++++ b/drivers/usb/host/otg/dwc_otg_cil.c +@@ -0,0 +1,3842 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_cil.c $ ++ * $Revision: #147 $ ++ * $Date: 2008/10/16 $ ++ * $Change: 1117667 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++/** @file ++ * ++ * The Core Interface Layer provides basic services for accessing and ++ * managing the DWC_otg hardware. These services are used by both the ++ * Host Controller Driver and the Peripheral Controller Driver. ++ * ++ * The CIL manages the memory map for the core so that the HCD and PCD ++ * don't have to do this separately. It also handles basic tasks like ++ * reading/writing the registers and data FIFOs in the controller. ++ * Some of the data access functions provide encapsulation of several ++ * operations required to perform a task, such as writing multiple ++ * registers to start a transfer. Finally, the CIL performs basic ++ * services that are not specific to either the host or device modes ++ * of operation. These services include management of the OTG Host ++ * Negotiation Protocol (HNP) and Session Request Protocol (SRP). A ++ * Diagnostic API is also provided to allow testing of the controller ++ * hardware. ++ * ++ * The Core Interface Layer has the following requirements: ++ * - Provides basic controller operations. ++ * - Minimal use of OS services. ++ * - The OS services used will be abstracted by using inline functions ++ * or macros. ++ * ++ */ ++#include ++#include ++#ifdef DEBUG ++#include ++#endif ++ ++#include "dwc_otg_plat.h" ++#include "dwc_otg_regs.h" ++#include "dwc_otg_cil.h" ++#include "dwc_otg_pcd.h" ++ ++ ++/** ++ * This function is called to initialize the DWC_otg CSR data ++ * structures. The register addresses in the device and host ++ * structures are initialized from the base address supplied by the ++ * caller. The calling function must make the OS calls to get the ++ * base address of the DWC_otg controller registers. The core_params ++ * argument holds the parameters that specify how the core should be ++ * configured. ++ * ++ * @param[in] reg_base_addr Base address of DWC_otg core registers ++ * @param[in] core_params Pointer to the core configuration parameters ++ * ++ */ ++dwc_otg_core_if_t *dwc_otg_cil_init(const uint32_t *reg_base_addr, ++ dwc_otg_core_params_t *core_params) ++{ ++ dwc_otg_core_if_t *core_if = 0; ++ dwc_otg_dev_if_t *dev_if = 0; ++ dwc_otg_host_if_t *host_if = 0; ++ uint8_t *reg_base = (uint8_t *)reg_base_addr; ++ int i = 0; ++ ++ DWC_DEBUGPL(DBG_CILV, "%s(%p,%p)\n", __func__, reg_base_addr, core_params); ++ ++ core_if = kmalloc(sizeof(dwc_otg_core_if_t), GFP_KERNEL); ++ ++ if (core_if == 0) { ++ DWC_DEBUGPL(DBG_CIL, "Allocation of dwc_otg_core_if_t failed\n"); ++ return 0; ++ } ++ ++ memset(core_if, 0, sizeof(dwc_otg_core_if_t)); ++ ++ core_if->core_params = core_params; ++ core_if->core_global_regs = (dwc_otg_core_global_regs_t *)reg_base; ++ ++ /* ++ * Allocate the Device Mode structures. ++ */ ++ dev_if = kmalloc(sizeof(dwc_otg_dev_if_t), GFP_KERNEL); ++ ++ if (dev_if == 0) { ++ DWC_DEBUGPL(DBG_CIL, "Allocation of dwc_otg_dev_if_t failed\n"); ++ kfree(core_if); ++ return 0; ++ } ++ ++ dev_if->dev_global_regs = ++ (dwc_otg_device_global_regs_t *)(reg_base + DWC_DEV_GLOBAL_REG_OFFSET); ++ ++ for (i=0; iin_ep_regs[i] = (dwc_otg_dev_in_ep_regs_t *) ++ (reg_base + DWC_DEV_IN_EP_REG_OFFSET + ++ (i * DWC_EP_REG_OFFSET)); ++ ++ dev_if->out_ep_regs[i] = (dwc_otg_dev_out_ep_regs_t *) ++ (reg_base + DWC_DEV_OUT_EP_REG_OFFSET + ++ (i * DWC_EP_REG_OFFSET)); ++ DWC_DEBUGPL(DBG_CILV, "in_ep_regs[%d]->diepctl=%p\n", ++ i, &dev_if->in_ep_regs[i]->diepctl); ++ DWC_DEBUGPL(DBG_CILV, "out_ep_regs[%d]->doepctl=%p\n", ++ i, &dev_if->out_ep_regs[i]->doepctl); ++ } ++ ++ dev_if->speed = 0; // unknown ++ ++ core_if->dev_if = dev_if; ++ ++ /* ++ * Allocate the Host Mode structures. ++ */ ++ host_if = kmalloc(sizeof(dwc_otg_host_if_t), GFP_KERNEL); ++ ++ if (host_if == 0) { ++ DWC_DEBUGPL(DBG_CIL, "Allocation of dwc_otg_host_if_t failed\n"); ++ kfree(dev_if); ++ kfree(core_if); ++ return 0; ++ } ++ ++ host_if->host_global_regs = (dwc_otg_host_global_regs_t *) ++ (reg_base + DWC_OTG_HOST_GLOBAL_REG_OFFSET); ++ ++ host_if->hprt0 = (uint32_t*)(reg_base + DWC_OTG_HOST_PORT_REGS_OFFSET); ++ ++ for (i=0; ihc_regs[i] = (dwc_otg_hc_regs_t *) ++ (reg_base + DWC_OTG_HOST_CHAN_REGS_OFFSET + ++ (i * DWC_OTG_CHAN_REGS_OFFSET)); ++ DWC_DEBUGPL(DBG_CILV, "hc_reg[%d]->hcchar=%p\n", ++ i, &host_if->hc_regs[i]->hcchar); ++ } ++ ++ host_if->num_host_channels = MAX_EPS_CHANNELS; ++ core_if->host_if = host_if; ++ ++ for (i=0; idata_fifo[i] = ++ (uint32_t *)(reg_base + DWC_OTG_DATA_FIFO_OFFSET + ++ (i * DWC_OTG_DATA_FIFO_SIZE)); ++ DWC_DEBUGPL(DBG_CILV, "data_fifo[%d]=0x%08x\n", ++ i, (unsigned)core_if->data_fifo[i]); ++ } ++ ++ core_if->pcgcctl = (uint32_t*)(reg_base + DWC_OTG_PCGCCTL_OFFSET); ++ ++ /* ++ * Store the contents of the hardware configuration registers here for ++ * easy access later. ++ */ ++ core_if->hwcfg1.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg1); ++ core_if->hwcfg2.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg2); ++ core_if->hwcfg3.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg3); ++ core_if->hwcfg4.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg4); ++ ++ DWC_DEBUGPL(DBG_CILV,"hwcfg1=%08x\n",core_if->hwcfg1.d32); ++ DWC_DEBUGPL(DBG_CILV,"hwcfg2=%08x\n",core_if->hwcfg2.d32); ++ DWC_DEBUGPL(DBG_CILV,"hwcfg3=%08x\n",core_if->hwcfg3.d32); ++ DWC_DEBUGPL(DBG_CILV,"hwcfg4=%08x\n",core_if->hwcfg4.d32); ++ ++ core_if->hcfg.d32 = dwc_read_reg32(&core_if->host_if->host_global_regs->hcfg); ++ core_if->dcfg.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dcfg); ++ ++ DWC_DEBUGPL(DBG_CILV,"hcfg=%08x\n",core_if->hcfg.d32); ++ DWC_DEBUGPL(DBG_CILV,"dcfg=%08x\n",core_if->dcfg.d32); ++ ++ DWC_DEBUGPL(DBG_CILV,"op_mode=%0x\n",core_if->hwcfg2.b.op_mode); ++ DWC_DEBUGPL(DBG_CILV,"arch=%0x\n",core_if->hwcfg2.b.architecture); ++ DWC_DEBUGPL(DBG_CILV,"num_dev_ep=%d\n",core_if->hwcfg2.b.num_dev_ep); ++ DWC_DEBUGPL(DBG_CILV,"num_host_chan=%d\n",core_if->hwcfg2.b.num_host_chan); ++ DWC_DEBUGPL(DBG_CILV,"nonperio_tx_q_depth=0x%0x\n",core_if->hwcfg2.b.nonperio_tx_q_depth); ++ DWC_DEBUGPL(DBG_CILV,"host_perio_tx_q_depth=0x%0x\n",core_if->hwcfg2.b.host_perio_tx_q_depth); ++ DWC_DEBUGPL(DBG_CILV,"dev_token_q_depth=0x%0x\n",core_if->hwcfg2.b.dev_token_q_depth); ++ ++ DWC_DEBUGPL(DBG_CILV,"Total FIFO SZ=%d\n", core_if->hwcfg3.b.dfifo_depth); ++ DWC_DEBUGPL(DBG_CILV,"xfer_size_cntr_width=%0x\n", core_if->hwcfg3.b.xfer_size_cntr_width); ++ ++ /* ++ * Set the SRP sucess bit for FS-I2c ++ */ ++ core_if->srp_success = 0; ++ core_if->srp_timer_started = 0; ++ ++ ++ /* ++ * Create new workqueue and init works ++ */ ++ core_if->wq_otg = create_singlethread_workqueue("dwc_otg"); ++ if(core_if->wq_otg == 0) { ++ DWC_DEBUGPL(DBG_CIL, "Creation of wq_otg failed\n"); ++ kfree(host_if); ++ kfree(dev_if); ++ kfree(core_if); ++ return 0 * HZ; ++ } ++ ++ ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ ++ INIT_WORK(&core_if->w_conn_id, w_conn_id_status_change, core_if); ++ INIT_WORK(&core_if->w_wkp, w_wakeup_detected, core_if); ++ ++#else ++ ++ INIT_WORK(&core_if->w_conn_id, w_conn_id_status_change); ++ INIT_DELAYED_WORK(&core_if->w_wkp, w_wakeup_detected); ++ ++#endif ++ return core_if; ++} ++ ++/** ++ * This function frees the structures allocated by dwc_otg_cil_init(). ++ * ++ * @param[in] core_if The core interface pointer returned from ++ * dwc_otg_cil_init(). ++ * ++ */ ++void dwc_otg_cil_remove(dwc_otg_core_if_t *core_if) ++{ ++ /* Disable all interrupts */ ++ dwc_modify_reg32(&core_if->core_global_regs->gahbcfg, 1, 0); ++ dwc_write_reg32(&core_if->core_global_regs->gintmsk, 0); ++ ++ if (core_if->wq_otg) { ++ destroy_workqueue(core_if->wq_otg); ++ } ++ if (core_if->dev_if) { ++ kfree(core_if->dev_if); ++ } ++ if (core_if->host_if) { ++ kfree(core_if->host_if); ++ } ++ kfree(core_if); ++} ++ ++/** ++ * This function enables the controller's Global Interrupt in the AHB Config ++ * register. ++ * ++ * @param[in] core_if Programming view of DWC_otg controller. ++ */ ++void dwc_otg_enable_global_interrupts(dwc_otg_core_if_t *core_if) ++{ ++ gahbcfg_data_t ahbcfg = { .d32 = 0}; ++ ahbcfg.b.glblintrmsk = 1; /* Enable interrupts */ ++ dwc_modify_reg32(&core_if->core_global_regs->gahbcfg, 0, ahbcfg.d32); ++} ++ ++/** ++ * This function disables the controller's Global Interrupt in the AHB Config ++ * register. ++ * ++ * @param[in] core_if Programming view of DWC_otg controller. ++ */ ++void dwc_otg_disable_global_interrupts(dwc_otg_core_if_t *core_if) ++{ ++ gahbcfg_data_t ahbcfg = { .d32 = 0}; ++ ahbcfg.b.glblintrmsk = 1; /* Enable interrupts */ ++ dwc_modify_reg32(&core_if->core_global_regs->gahbcfg, ahbcfg.d32, 0); ++} ++ ++/** ++ * This function initializes the commmon interrupts, used in both ++ * device and host modes. ++ * ++ * @param[in] core_if Programming view of the DWC_otg controller ++ * ++ */ ++static void dwc_otg_enable_common_interrupts(dwc_otg_core_if_t *core_if) ++{ ++ dwc_otg_core_global_regs_t *global_regs = ++ core_if->core_global_regs; ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ ++ /* Clear any pending OTG Interrupts */ ++ dwc_write_reg32(&global_regs->gotgint, 0xFFFFFFFF); ++ ++ /* Clear any pending interrupts */ ++ dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF); ++ ++ /* ++ * Enable the interrupts in the GINTMSK. ++ */ ++ intr_mask.b.modemismatch = 1; ++ intr_mask.b.otgintr = 1; ++ ++ if (!core_if->dma_enable) { ++ intr_mask.b.rxstsqlvl = 1; ++ } ++ ++ intr_mask.b.conidstschng = 1; ++ intr_mask.b.wkupintr = 1; ++ intr_mask.b.disconnect = 1; ++ intr_mask.b.usbsuspend = 1; ++ intr_mask.b.sessreqintr = 1; ++ dwc_write_reg32(&global_regs->gintmsk, intr_mask.d32); ++} ++ ++/** ++ * Initializes the FSLSPClkSel field of the HCFG register depending on the PHY ++ * type. ++ */ ++static void init_fslspclksel(dwc_otg_core_if_t *core_if) ++{ ++ uint32_t val; ++ hcfg_data_t hcfg; ++ ++ if (((core_if->hwcfg2.b.hs_phy_type == 2) && ++ (core_if->hwcfg2.b.fs_phy_type == 1) && ++ (core_if->core_params->ulpi_fs_ls)) || ++ (core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) { ++ /* Full speed PHY */ ++ val = DWC_HCFG_48_MHZ; ++ } ++ else { ++ /* High speed PHY running at full speed or high speed */ ++ val = DWC_HCFG_30_60_MHZ; ++ } ++ ++ DWC_DEBUGPL(DBG_CIL, "Initializing HCFG.FSLSPClkSel to 0x%1x\n", val); ++ hcfg.d32 = dwc_read_reg32(&core_if->host_if->host_global_regs->hcfg); ++ hcfg.b.fslspclksel = val; ++ dwc_write_reg32(&core_if->host_if->host_global_regs->hcfg, hcfg.d32); ++} ++ ++/** ++ * Initializes the DevSpd field of the DCFG register depending on the PHY type ++ * and the enumeration speed of the device. ++ */ ++static void init_devspd(dwc_otg_core_if_t *core_if) ++{ ++ uint32_t val; ++ dcfg_data_t dcfg; ++ ++ if (((core_if->hwcfg2.b.hs_phy_type == 2) && ++ (core_if->hwcfg2.b.fs_phy_type == 1) && ++ (core_if->core_params->ulpi_fs_ls)) || ++ (core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) { ++ /* Full speed PHY */ ++ val = 0x3; ++ } ++ else if (core_if->core_params->speed == DWC_SPEED_PARAM_FULL) { ++ /* High speed PHY running at full speed */ ++ val = 0x1; ++ } ++ else { ++ /* High speed PHY running at high speed */ ++ val = 0x0; ++ } ++ ++ DWC_DEBUGPL(DBG_CIL, "Initializing DCFG.DevSpd to 0x%1x\n", val); ++ ++ dcfg.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dcfg); ++ dcfg.b.devspd = val; ++ dwc_write_reg32(&core_if->dev_if->dev_global_regs->dcfg, dcfg.d32); ++} ++ ++/** ++ * This function calculates the number of IN EPS ++ * using GHWCFG1 and GHWCFG2 registers values ++ * ++ * @param core_if Programming view of the DWC_otg controller ++ */ ++static uint32_t calc_num_in_eps(dwc_otg_core_if_t *core_if) ++{ ++ uint32_t num_in_eps = 0; ++ uint32_t num_eps = core_if->hwcfg2.b.num_dev_ep; ++ uint32_t hwcfg1 = core_if->hwcfg1.d32 >> 3; ++ uint32_t num_tx_fifos = core_if->hwcfg4.b.num_in_eps; ++ int i; ++ ++ ++ for(i = 0; i < num_eps; ++i) ++ { ++ if(!(hwcfg1 & 0x1)) ++ num_in_eps++; ++ ++ hwcfg1 >>= 2; ++ } ++ ++ if(core_if->hwcfg4.b.ded_fifo_en) { ++ num_in_eps = (num_in_eps > num_tx_fifos) ? num_tx_fifos : num_in_eps; ++ } ++ ++ return num_in_eps; ++} ++ ++ ++/** ++ * This function calculates the number of OUT EPS ++ * using GHWCFG1 and GHWCFG2 registers values ++ * ++ * @param core_if Programming view of the DWC_otg controller ++ */ ++static uint32_t calc_num_out_eps(dwc_otg_core_if_t *core_if) ++{ ++ uint32_t num_out_eps = 0; ++ uint32_t num_eps = core_if->hwcfg2.b.num_dev_ep; ++ uint32_t hwcfg1 = core_if->hwcfg1.d32 >> 2; ++ int i; ++ ++ for(i = 0; i < num_eps; ++i) ++ { ++ if(!(hwcfg1 & 0x2)) ++ num_out_eps++; ++ ++ hwcfg1 >>= 2; ++ } ++ return num_out_eps; ++} ++/** ++ * This function initializes the DWC_otg controller registers and ++ * prepares the core for device mode or host mode operation. ++ * ++ * @param core_if Programming view of the DWC_otg controller ++ * ++ */ ++void dwc_otg_core_init(dwc_otg_core_if_t *core_if) ++{ ++ int i = 0; ++ dwc_otg_core_global_regs_t *global_regs = ++ core_if->core_global_regs; ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ gahbcfg_data_t ahbcfg = { .d32 = 0 }; ++ gusbcfg_data_t usbcfg = { .d32 = 0 }; ++ gi2cctl_data_t i2cctl = { .d32 = 0 }; ++ ++ DWC_DEBUGPL(DBG_CILV, "dwc_otg_core_init(%p)\n", core_if); ++ ++ /* Common Initialization */ ++ ++ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); ++ ++// usbcfg.b.tx_end_delay = 1; ++ /* Program the ULPI External VBUS bit if needed */ ++ usbcfg.b.ulpi_ext_vbus_drv = ++ (core_if->core_params->phy_ulpi_ext_vbus == DWC_PHY_ULPI_EXTERNAL_VBUS) ? 1 : 0; ++ ++ /* Set external TS Dline pulsing */ ++ usbcfg.b.term_sel_dl_pulse = (core_if->core_params->ts_dline == 1) ? 1 : 0; ++ dwc_write_reg32 (&global_regs->gusbcfg, usbcfg.d32); ++ ++ ++ /* Reset the Controller */ ++ dwc_otg_core_reset(core_if); ++ ++ /* Initialize parameters from Hardware configuration registers. */ ++ dev_if->num_in_eps = calc_num_in_eps(core_if); ++ dev_if->num_out_eps = calc_num_out_eps(core_if); ++ ++ ++ DWC_DEBUGPL(DBG_CIL, "num_dev_perio_in_ep=%d\n", core_if->hwcfg4.b.num_dev_perio_in_ep); ++ ++ for (i=0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; i++) ++ { ++ dev_if->perio_tx_fifo_size[i] = ++ dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]) >> 16; ++ DWC_DEBUGPL(DBG_CIL, "Periodic Tx FIFO SZ #%d=0x%0x\n", ++ i, dev_if->perio_tx_fifo_size[i]); ++ } ++ ++ for (i=0; i < core_if->hwcfg4.b.num_in_eps; i++) ++ { ++ dev_if->tx_fifo_size[i] = ++ dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]) >> 16; ++ DWC_DEBUGPL(DBG_CIL, "Tx FIFO SZ #%d=0x%0x\n", ++ i, dev_if->perio_tx_fifo_size[i]); ++ } ++ ++ core_if->total_fifo_size = core_if->hwcfg3.b.dfifo_depth; ++ core_if->rx_fifo_size = ++ dwc_read_reg32(&global_regs->grxfsiz); ++ core_if->nperio_tx_fifo_size = ++ dwc_read_reg32(&global_regs->gnptxfsiz) >> 16; ++ ++ DWC_DEBUGPL(DBG_CIL, "Total FIFO SZ=%d\n", core_if->total_fifo_size); ++ DWC_DEBUGPL(DBG_CIL, "Rx FIFO SZ=%d\n", core_if->rx_fifo_size); ++ DWC_DEBUGPL(DBG_CIL, "NP Tx FIFO SZ=%d\n", core_if->nperio_tx_fifo_size); ++ ++ /* This programming sequence needs to happen in FS mode before any other ++ * programming occurs */ ++ if ((core_if->core_params->speed == DWC_SPEED_PARAM_FULL) && ++ (core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) { ++ /* If FS mode with FS PHY */ ++ ++ /* core_init() is now called on every switch so only call the ++ * following for the first time through. */ ++ if (!core_if->phy_init_done) { ++ core_if->phy_init_done = 1; ++ DWC_DEBUGPL(DBG_CIL, "FS_PHY detected\n"); ++ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); ++ usbcfg.b.physel = 1; ++ dwc_write_reg32 (&global_regs->gusbcfg, usbcfg.d32); ++ ++ /* Reset after a PHY select */ ++ dwc_otg_core_reset(core_if); ++ } ++ ++ /* Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also ++ * do this on HNP Dev/Host mode switches (done in dev_init and ++ * host_init). */ ++ if (dwc_otg_is_host_mode(core_if)) { ++ init_fslspclksel(core_if); ++ } ++ else { ++ init_devspd(core_if); ++ } ++ ++ if (core_if->core_params->i2c_enable) { ++ DWC_DEBUGPL(DBG_CIL, "FS_PHY Enabling I2c\n"); ++ /* Program GUSBCFG.OtgUtmifsSel to I2C */ ++ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); ++ usbcfg.b.otgutmifssel = 1; ++ dwc_write_reg32 (&global_regs->gusbcfg, usbcfg.d32); ++ ++ /* Program GI2CCTL.I2CEn */ ++ i2cctl.d32 = dwc_read_reg32(&global_regs->gi2cctl); ++ i2cctl.b.i2cdevaddr = 1; ++ i2cctl.b.i2cen = 0; ++ dwc_write_reg32 (&global_regs->gi2cctl, i2cctl.d32); ++ i2cctl.b.i2cen = 1; ++ dwc_write_reg32 (&global_regs->gi2cctl, i2cctl.d32); ++ } ++ ++ } /* endif speed == DWC_SPEED_PARAM_FULL */ ++ ++ else { ++ /* High speed PHY. */ ++ if (!core_if->phy_init_done) { ++ core_if->phy_init_done = 1; ++ /* HS PHY parameters. These parameters are preserved ++ * during soft reset so only program the first time. Do ++ * a soft reset immediately after setting phyif. */ ++ usbcfg.b.ulpi_utmi_sel = core_if->core_params->phy_type; ++ if (usbcfg.b.ulpi_utmi_sel == 1) { ++ /* ULPI interface */ ++ usbcfg.b.phyif = 0; ++ usbcfg.b.ddrsel = core_if->core_params->phy_ulpi_ddr; ++ } ++ else { ++ /* UTMI+ interface */ ++ if (core_if->core_params->phy_utmi_width == 16) { ++ usbcfg.b.phyif = 1; ++ } ++ else { ++ usbcfg.b.phyif = 0; ++ } ++ } ++ ++ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32); ++ ++ /* Reset after setting the PHY parameters */ ++ dwc_otg_core_reset(core_if); ++ } ++ } ++ ++ if ((core_if->hwcfg2.b.hs_phy_type == 2) && ++ (core_if->hwcfg2.b.fs_phy_type == 1) && ++ (core_if->core_params->ulpi_fs_ls)) { ++ DWC_DEBUGPL(DBG_CIL, "Setting ULPI FSLS\n"); ++ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); ++ usbcfg.b.ulpi_fsls = 1; ++ usbcfg.b.ulpi_clk_sus_m = 1; ++ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32); ++ } ++ else { ++ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); ++ usbcfg.b.ulpi_fsls = 0; ++ usbcfg.b.ulpi_clk_sus_m = 0; ++ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32); ++ } ++ ++ /* Program the GAHBCFG Register.*/ ++ switch (core_if->hwcfg2.b.architecture) { ++ ++ case DWC_SLAVE_ONLY_ARCH: ++ DWC_DEBUGPL(DBG_CIL, "Slave Only Mode\n"); ++ ahbcfg.b.nptxfemplvl_txfemplvl = DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY; ++ ahbcfg.b.ptxfemplvl = DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY; ++ core_if->dma_enable = 0; ++ core_if->dma_desc_enable = 0; ++ break; ++ ++ case DWC_EXT_DMA_ARCH: ++ DWC_DEBUGPL(DBG_CIL, "External DMA Mode\n"); ++ ahbcfg.b.hburstlen = core_if->core_params->dma_burst_size; ++ core_if->dma_enable = (core_if->core_params->dma_enable != 0); ++ core_if->dma_desc_enable = (core_if->core_params->dma_desc_enable != 0); ++ break; ++ ++ case DWC_INT_DMA_ARCH: ++ DWC_DEBUGPL(DBG_CIL, "Internal DMA Mode\n"); ++ ahbcfg.b.hburstlen = DWC_GAHBCFG_INT_DMA_BURST_INCR; ++ core_if->dma_enable = (core_if->core_params->dma_enable != 0); ++ core_if->dma_desc_enable = (core_if->core_params->dma_desc_enable != 0); ++ break; ++ ++ } ++ ahbcfg.b.dmaenable = core_if->dma_enable; ++ dwc_write_reg32(&global_regs->gahbcfg, ahbcfg.d32); ++ ++ core_if->en_multiple_tx_fifo = core_if->hwcfg4.b.ded_fifo_en; ++ ++ core_if->pti_enh_enable = core_if->core_params->pti_enable != 0; ++ core_if->multiproc_int_enable = core_if->core_params->mpi_enable; ++ DWC_PRINT("Periodic Transfer Interrupt Enhancement - %s\n", ((core_if->pti_enh_enable) ? "enabled": "disabled")); ++ DWC_PRINT("Multiprocessor Interrupt Enhancement - %s\n", ((core_if->multiproc_int_enable) ? "enabled": "disabled")); ++ ++ /* ++ * Program the GUSBCFG register. ++ */ ++ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); ++ ++ switch (core_if->hwcfg2.b.op_mode) { ++ case DWC_MODE_HNP_SRP_CAPABLE: ++ usbcfg.b.hnpcap = (core_if->core_params->otg_cap == ++ DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE); ++ usbcfg.b.srpcap = (core_if->core_params->otg_cap != ++ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE); ++ break; ++ ++ case DWC_MODE_SRP_ONLY_CAPABLE: ++ usbcfg.b.hnpcap = 0; ++ usbcfg.b.srpcap = (core_if->core_params->otg_cap != ++ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE); ++ break; ++ ++ case DWC_MODE_NO_HNP_SRP_CAPABLE: ++ usbcfg.b.hnpcap = 0; ++ usbcfg.b.srpcap = 0; ++ break; ++ ++ case DWC_MODE_SRP_CAPABLE_DEVICE: ++ usbcfg.b.hnpcap = 0; ++ usbcfg.b.srpcap = (core_if->core_params->otg_cap != ++ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE); ++ break; ++ ++ case DWC_MODE_NO_SRP_CAPABLE_DEVICE: ++ usbcfg.b.hnpcap = 0; ++ usbcfg.b.srpcap = 0; ++ break; ++ ++ case DWC_MODE_SRP_CAPABLE_HOST: ++ usbcfg.b.hnpcap = 0; ++ usbcfg.b.srpcap = (core_if->core_params->otg_cap != ++ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE); ++ break; ++ ++ case DWC_MODE_NO_SRP_CAPABLE_HOST: ++ usbcfg.b.hnpcap = 0; ++ usbcfg.b.srpcap = 0; ++ break; ++ } ++ ++ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32); ++ ++ /* Enable common interrupts */ ++ dwc_otg_enable_common_interrupts(core_if); ++ ++ /* Do device or host intialization based on mode during PCD ++ * and HCD initialization */ ++ if (dwc_otg_is_host_mode(core_if)) { ++ DWC_DEBUGPL(DBG_ANY, "Host Mode\n"); ++ core_if->op_state = A_HOST; ++ } ++ else { ++ DWC_DEBUGPL(DBG_ANY, "Device Mode\n"); ++ core_if->op_state = B_PERIPHERAL; ++#ifdef DWC_DEVICE_ONLY ++ dwc_otg_core_dev_init(core_if); ++#endif ++ } ++} ++ ++ ++/** ++ * This function enables the Device mode interrupts. ++ * ++ * @param core_if Programming view of DWC_otg controller ++ */ ++void dwc_otg_enable_device_interrupts(dwc_otg_core_if_t *core_if) ++{ ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ dwc_otg_core_global_regs_t *global_regs = ++ core_if->core_global_regs; ++ ++ DWC_DEBUGPL(DBG_CIL, "%s()\n", __func__); ++ ++ /* Disable all interrupts. */ ++ dwc_write_reg32(&global_regs->gintmsk, 0); ++ ++ /* Clear any pending interrupts */ ++ dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF); ++ ++ /* Enable the common interrupts */ ++ dwc_otg_enable_common_interrupts(core_if); ++ ++ /* Enable interrupts */ ++ intr_mask.b.usbreset = 1; ++ intr_mask.b.enumdone = 1; ++ ++ if(!core_if->multiproc_int_enable) { ++ intr_mask.b.inepintr = 1; ++ intr_mask.b.outepintr = 1; ++ } ++ ++ intr_mask.b.erlysuspend = 1; ++ ++ if(core_if->en_multiple_tx_fifo == 0) { ++ intr_mask.b.epmismatch = 1; ++ } ++ ++ ++#ifdef DWC_EN_ISOC ++ if(core_if->dma_enable) { ++ if(core_if->dma_desc_enable == 0) { ++ if(core_if->pti_enh_enable) { ++ dctl_data_t dctl = { .d32 = 0 }; ++ dctl.b.ifrmnum = 1; ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dctl, 0, dctl.d32); ++ } else { ++ intr_mask.b.incomplisoin = 1; ++ intr_mask.b.incomplisoout = 1; ++ } ++ } ++ } else { ++ intr_mask.b.incomplisoin = 1; ++ intr_mask.b.incomplisoout = 1; ++ } ++#endif // DWC_EN_ISOC ++ ++/** @todo NGS: Should this be a module parameter? */ ++#ifdef USE_PERIODIC_EP ++ intr_mask.b.isooutdrop = 1; ++ intr_mask.b.eopframe = 1; ++ intr_mask.b.incomplisoin = 1; ++ intr_mask.b.incomplisoout = 1; ++#endif ++ ++ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, intr_mask.d32); ++ ++ DWC_DEBUGPL(DBG_CIL, "%s() gintmsk=%0x\n", __func__, ++ dwc_read_reg32(&global_regs->gintmsk)); ++} ++ ++/** ++ * This function initializes the DWC_otg controller registers for ++ * device mode. ++ * ++ * @param core_if Programming view of DWC_otg controller ++ * ++ */ ++void dwc_otg_core_dev_init(dwc_otg_core_if_t *core_if) ++{ ++ int i,size; ++ u_int32_t *default_value_array; ++ ++ dwc_otg_core_global_regs_t *global_regs = ++ core_if->core_global_regs; ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ dwc_otg_core_params_t *params = core_if->core_params; ++ dcfg_data_t dcfg = { .d32 = 0}; ++ grstctl_t resetctl = { .d32 = 0 }; ++ uint32_t rx_fifo_size; ++ fifosize_data_t nptxfifosize; ++ fifosize_data_t txfifosize; ++ dthrctl_data_t dthrctl; ++ ++ /* Restart the Phy Clock */ ++ dwc_write_reg32(core_if->pcgcctl, 0); ++ ++ /* Device configuration register */ ++ init_devspd(core_if); ++ dcfg.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dcfg); ++ dcfg.b.descdma = (core_if->dma_desc_enable) ? 1 : 0; ++ dcfg.b.perfrint = DWC_DCFG_FRAME_INTERVAL_80; ++ ++ dwc_write_reg32(&dev_if->dev_global_regs->dcfg, dcfg.d32); ++ ++ /* Configure data FIFO sizes */ ++ if (core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo) { ++ DWC_DEBUGPL(DBG_CIL, "Total FIFO Size=%d\n", core_if->total_fifo_size); ++ DWC_DEBUGPL(DBG_CIL, "Rx FIFO Size=%d\n", params->dev_rx_fifo_size); ++ DWC_DEBUGPL(DBG_CIL, "NP Tx FIFO Size=%d\n", params->dev_nperio_tx_fifo_size); ++ ++ /* Rx FIFO */ ++ DWC_DEBUGPL(DBG_CIL, "initial grxfsiz=%08x\n", ++ dwc_read_reg32(&global_regs->grxfsiz)); ++ ++ rx_fifo_size = params->dev_rx_fifo_size; ++ dwc_write_reg32(&global_regs->grxfsiz, rx_fifo_size); ++ ++ DWC_DEBUGPL(DBG_CIL, "new grxfsiz=%08x\n", ++ dwc_read_reg32(&global_regs->grxfsiz)); ++ ++ /** Set Periodic Tx FIFO Mask all bits 0 */ ++ core_if->p_tx_msk = 0; ++ ++ /** Set Tx FIFO Mask all bits 0 */ ++ core_if->tx_msk = 0; ++ ++ /* Non-periodic Tx FIFO */ ++ DWC_DEBUGPL(DBG_CIL, "initial gnptxfsiz=%08x\n", ++ dwc_read_reg32(&global_regs->gnptxfsiz)); ++ ++ nptxfifosize.b.depth = params->dev_nperio_tx_fifo_size; ++ nptxfifosize.b.startaddr = params->dev_rx_fifo_size; ++ ++ dwc_write_reg32(&global_regs->gnptxfsiz, nptxfifosize.d32); ++ ++ DWC_DEBUGPL(DBG_CIL, "new gnptxfsiz=%08x\n", ++ dwc_read_reg32(&global_regs->gnptxfsiz)); ++ ++ txfifosize.b.startaddr = nptxfifosize.b.startaddr + nptxfifosize.b.depth; ++ if(core_if->en_multiple_tx_fifo == 0) { ++ //core_if->hwcfg4.b.ded_fifo_en==0 ++ ++ /**@todo NGS: Fix Periodic FIFO Sizing! */ ++ /* ++ * Periodic Tx FIFOs These FIFOs are numbered from 1 to 15. ++ * Indexes of the FIFO size module parameters in the ++ * dev_perio_tx_fifo_size array and the FIFO size registers in ++ * the dptxfsiz array run from 0 to 14. ++ */ ++ /** @todo Finish debug of this */ ++ size=core_if->hwcfg4.b.num_dev_perio_in_ep; ++ default_value_array=params->dev_perio_tx_fifo_size; ++ ++ } ++ else { ++ //core_if->hwcfg4.b.ded_fifo_en==1 ++ /* ++ * Tx FIFOs These FIFOs are numbered from 1 to 15. ++ * Indexes of the FIFO size module parameters in the ++ * dev_tx_fifo_size array and the FIFO size registers in ++ * the dptxfsiz_dieptxf array run from 0 to 14. ++ */ ++ ++ size=core_if->hwcfg4.b.num_in_eps; ++ default_value_array=params->dev_tx_fifo_size; ++ ++ } ++ for (i=0; i < size; i++) ++ { ++ ++ txfifosize.b.depth = default_value_array[i]; ++ DWC_DEBUGPL(DBG_CIL, "initial dptxfsiz_dieptxf[%d]=%08x\n", i, ++ dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i])); ++ dwc_write_reg32(&global_regs->dptxfsiz_dieptxf[i], ++ txfifosize.d32); ++ DWC_DEBUGPL(DBG_CIL, "new dptxfsiz_dieptxf[%d]=%08x\n", i, ++ dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i])); ++ txfifosize.b.startaddr += txfifosize.b.depth; ++ } ++ } ++ /* Flush the FIFOs */ ++ dwc_otg_flush_tx_fifo(core_if, 0x10); /* all Tx FIFOs */ ++ dwc_otg_flush_rx_fifo(core_if); ++ ++ /* Flush the Learning Queue. */ ++ resetctl.b.intknqflsh = 1; ++ dwc_write_reg32(&core_if->core_global_regs->grstctl, resetctl.d32); ++ ++ /* Clear all pending Device Interrupts */ ++ ++ if(core_if->multiproc_int_enable) { ++ } ++ ++ /** @todo - if the condition needed to be checked ++ * or in any case all pending interrutps should be cleared? ++ */ ++ if(core_if->multiproc_int_enable) { ++ for(i = 0; i < core_if->dev_if->num_in_eps; ++i) { ++ dwc_write_reg32(&dev_if->dev_global_regs->diepeachintmsk[i], 0); ++ } ++ ++ for(i = 0; i < core_if->dev_if->num_out_eps; ++i) { ++ dwc_write_reg32(&dev_if->dev_global_regs->doepeachintmsk[i], 0); ++ } ++ ++ dwc_write_reg32(&dev_if->dev_global_regs->deachint, 0xFFFFFFFF); ++ dwc_write_reg32(&dev_if->dev_global_regs->deachintmsk, 0); ++ } else { ++ dwc_write_reg32(&dev_if->dev_global_regs->diepmsk, 0); ++ dwc_write_reg32(&dev_if->dev_global_regs->doepmsk, 0); ++ dwc_write_reg32(&dev_if->dev_global_regs->daint, 0xFFFFFFFF); ++ dwc_write_reg32(&dev_if->dev_global_regs->daintmsk, 0); ++ } ++ ++ for (i=0; i <= dev_if->num_in_eps; i++) ++ { ++ depctl_data_t depctl; ++ depctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[i]->diepctl); ++ if (depctl.b.epena) { ++ depctl.d32 = 0; ++ depctl.b.epdis = 1; ++ depctl.b.snak = 1; ++ } ++ else { ++ depctl.d32 = 0; ++ } ++ ++ dwc_write_reg32(&dev_if->in_ep_regs[i]->diepctl, depctl.d32); ++ ++ ++ dwc_write_reg32(&dev_if->in_ep_regs[i]->dieptsiz, 0); ++ dwc_write_reg32(&dev_if->in_ep_regs[i]->diepdma, 0); ++ dwc_write_reg32(&dev_if->in_ep_regs[i]->diepint, 0xFF); ++ } ++ ++ for (i=0; i <= dev_if->num_out_eps; i++) ++ { ++ depctl_data_t depctl; ++ depctl.d32 = dwc_read_reg32(&dev_if->out_ep_regs[i]->doepctl); ++ if (depctl.b.epena) { ++ depctl.d32 = 0; ++ depctl.b.epdis = 1; ++ depctl.b.snak = 1; ++ } ++ else { ++ depctl.d32 = 0; ++ } ++ ++ dwc_write_reg32(&dev_if->out_ep_regs[i]->doepctl, depctl.d32); ++ ++ dwc_write_reg32(&dev_if->out_ep_regs[i]->doeptsiz, 0); ++ dwc_write_reg32(&dev_if->out_ep_regs[i]->doepdma, 0); ++ dwc_write_reg32(&dev_if->out_ep_regs[i]->doepint, 0xFF); ++ } ++ ++ if(core_if->en_multiple_tx_fifo && core_if->dma_enable) { ++ dev_if->non_iso_tx_thr_en = params->thr_ctl & 0x1; ++ dev_if->iso_tx_thr_en = (params->thr_ctl >> 1) & 0x1; ++ dev_if->rx_thr_en = (params->thr_ctl >> 2) & 0x1; ++ ++ dev_if->rx_thr_length = params->rx_thr_length; ++ dev_if->tx_thr_length = params->tx_thr_length; ++ ++ dev_if->setup_desc_index = 0; ++ ++ dthrctl.d32 = 0; ++ dthrctl.b.non_iso_thr_en = dev_if->non_iso_tx_thr_en; ++ dthrctl.b.iso_thr_en = dev_if->iso_tx_thr_en; ++ dthrctl.b.tx_thr_len = dev_if->tx_thr_length; ++ dthrctl.b.rx_thr_en = dev_if->rx_thr_en; ++ dthrctl.b.rx_thr_len = dev_if->rx_thr_length; ++ ++ dwc_write_reg32(&dev_if->dev_global_regs->dtknqr3_dthrctl, dthrctl.d32); ++ ++ DWC_DEBUGPL(DBG_CIL, "Non ISO Tx Thr - %d\nISO Tx Thr - %d\nRx Thr - %d\nTx Thr Len - %d\nRx Thr Len - %d\n", ++ dthrctl.b.non_iso_thr_en, dthrctl.b.iso_thr_en, dthrctl.b.rx_thr_en, dthrctl.b.tx_thr_len, dthrctl.b.rx_thr_len); ++ ++ } ++ ++ dwc_otg_enable_device_interrupts(core_if); ++ ++ { ++ diepmsk_data_t msk = { .d32 = 0 }; ++ msk.b.txfifoundrn = 1; ++ if(core_if->multiproc_int_enable) { ++ dwc_modify_reg32(&dev_if->dev_global_regs->diepeachintmsk[0], msk.d32, msk.d32); ++ } else { ++ dwc_modify_reg32(&dev_if->dev_global_regs->diepmsk, msk.d32, msk.d32); ++ } ++ } ++ ++ ++ if(core_if->multiproc_int_enable) { ++ /* Set NAK on Babble */ ++ dctl_data_t dctl = { .d32 = 0}; ++ dctl.b.nakonbble = 1; ++ dwc_modify_reg32(&dev_if->dev_global_regs->dctl, 0, dctl.d32); ++ } ++} ++ ++/** ++ * This function enables the Host mode interrupts. ++ * ++ * @param core_if Programming view of DWC_otg controller ++ */ ++void dwc_otg_enable_host_interrupts(dwc_otg_core_if_t *core_if) ++{ ++ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; ++ gintmsk_data_t intr_mask = { .d32 = 0 }; ++ ++ DWC_DEBUGPL(DBG_CIL, "%s()\n", __func__); ++ ++ /* Disable all interrupts. */ ++ dwc_write_reg32(&global_regs->gintmsk, 0); ++ ++ /* Clear any pending interrupts. */ ++ dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF); ++ ++ /* Enable the common interrupts */ ++ dwc_otg_enable_common_interrupts(core_if); ++ ++ /* ++ * Enable host mode interrupts without disturbing common ++ * interrupts. ++ */ ++ intr_mask.b.sofintr = 1; ++ intr_mask.b.portintr = 1; ++ intr_mask.b.hcintr = 1; ++ ++ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, intr_mask.d32); ++} ++ ++/** ++ * This function disables the Host Mode interrupts. ++ * ++ * @param core_if Programming view of DWC_otg controller ++ */ ++void dwc_otg_disable_host_interrupts(dwc_otg_core_if_t *core_if) ++{ ++ dwc_otg_core_global_regs_t *global_regs = ++ core_if->core_global_regs; ++ gintmsk_data_t intr_mask = { .d32 = 0 }; ++ ++ DWC_DEBUGPL(DBG_CILV, "%s()\n", __func__); ++ ++ /* ++ * Disable host mode interrupts without disturbing common ++ * interrupts. ++ */ ++ intr_mask.b.sofintr = 1; ++ intr_mask.b.portintr = 1; ++ intr_mask.b.hcintr = 1; ++ intr_mask.b.ptxfempty = 1; ++ intr_mask.b.nptxfempty = 1; ++ ++ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, 0); ++} ++ ++/** ++ * This function initializes the DWC_otg controller registers for ++ * host mode. ++ * ++ * This function flushes the Tx and Rx FIFOs and it flushes any entries in the ++ * request queues. Host channels are reset to ensure that they are ready for ++ * performing transfers. ++ * ++ * @param core_if Programming view of DWC_otg controller ++ * ++ */ ++void dwc_otg_core_host_init(dwc_otg_core_if_t *core_if) ++{ ++ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; ++ dwc_otg_host_if_t *host_if = core_if->host_if; ++ dwc_otg_core_params_t *params = core_if->core_params; ++ hprt0_data_t hprt0 = { .d32 = 0 }; ++ fifosize_data_t nptxfifosize; ++ fifosize_data_t ptxfifosize; ++ int i; ++ hcchar_data_t hcchar; ++ hcfg_data_t hcfg; ++ dwc_otg_hc_regs_t *hc_regs; ++ int num_channels; ++ gotgctl_data_t gotgctl = { .d32 = 0 }; ++ ++ DWC_DEBUGPL(DBG_CILV,"%s(%p)\n", __func__, core_if); ++ ++ /* Restart the Phy Clock */ ++ dwc_write_reg32(core_if->pcgcctl, 0); ++ ++ /* Initialize Host Configuration Register */ ++ init_fslspclksel(core_if); ++ if (core_if->core_params->speed == DWC_SPEED_PARAM_FULL) ++ { ++ hcfg.d32 = dwc_read_reg32(&host_if->host_global_regs->hcfg); ++ hcfg.b.fslssupp = 1; ++ dwc_write_reg32(&host_if->host_global_regs->hcfg, hcfg.d32); ++ } ++ ++ /* Configure data FIFO sizes */ ++ if (core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo) { ++ DWC_DEBUGPL(DBG_CIL,"Total FIFO Size=%d\n", core_if->total_fifo_size); ++ DWC_DEBUGPL(DBG_CIL,"Rx FIFO Size=%d\n", params->host_rx_fifo_size); ++ DWC_DEBUGPL(DBG_CIL,"NP Tx FIFO Size=%d\n", params->host_nperio_tx_fifo_size); ++ DWC_DEBUGPL(DBG_CIL,"P Tx FIFO Size=%d\n", params->host_perio_tx_fifo_size); ++ ++ /* Rx FIFO */ ++ DWC_DEBUGPL(DBG_CIL,"initial grxfsiz=%08x\n", dwc_read_reg32(&global_regs->grxfsiz)); ++ dwc_write_reg32(&global_regs->grxfsiz, params->host_rx_fifo_size); ++ DWC_DEBUGPL(DBG_CIL,"new grxfsiz=%08x\n", dwc_read_reg32(&global_regs->grxfsiz)); ++ ++ /* Non-periodic Tx FIFO */ ++ DWC_DEBUGPL(DBG_CIL,"initial gnptxfsiz=%08x\n", dwc_read_reg32(&global_regs->gnptxfsiz)); ++ nptxfifosize.b.depth = params->host_nperio_tx_fifo_size; ++ nptxfifosize.b.startaddr = params->host_rx_fifo_size; ++ dwc_write_reg32(&global_regs->gnptxfsiz, nptxfifosize.d32); ++ DWC_DEBUGPL(DBG_CIL,"new gnptxfsiz=%08x\n", dwc_read_reg32(&global_regs->gnptxfsiz)); ++ ++ /* Periodic Tx FIFO */ ++ DWC_DEBUGPL(DBG_CIL,"initial hptxfsiz=%08x\n", dwc_read_reg32(&global_regs->hptxfsiz)); ++ ptxfifosize.b.depth = params->host_perio_tx_fifo_size; ++ ptxfifosize.b.startaddr = nptxfifosize.b.startaddr + nptxfifosize.b.depth; ++ dwc_write_reg32(&global_regs->hptxfsiz, ptxfifosize.d32); ++ DWC_DEBUGPL(DBG_CIL,"new hptxfsiz=%08x\n", dwc_read_reg32(&global_regs->hptxfsiz)); ++ } ++ ++ /* Clear Host Set HNP Enable in the OTG Control Register */ ++ gotgctl.b.hstsethnpen = 1; ++ dwc_modify_reg32(&global_regs->gotgctl, gotgctl.d32, 0); ++ ++ /* Make sure the FIFOs are flushed. */ ++ dwc_otg_flush_tx_fifo(core_if, 0x10 /* all Tx FIFOs */); ++ dwc_otg_flush_rx_fifo(core_if); ++ ++ /* Flush out any leftover queued requests. */ ++ num_channels = core_if->core_params->host_channels; ++ for (i = 0; i < num_channels; i++) ++ { ++ hc_regs = core_if->host_if->hc_regs[i]; ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcchar.b.chen = 0; ++ hcchar.b.chdis = 1; ++ hcchar.b.epdir = 0; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ } ++ ++ /* Halt all channels to put them into a known state. */ ++ for (i = 0; i < num_channels; i++) ++ { ++ int count = 0; ++ hc_regs = core_if->host_if->hc_regs[i]; ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcchar.b.chen = 1; ++ hcchar.b.chdis = 1; ++ hcchar.b.epdir = 0; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ DWC_DEBUGPL(DBG_HCDV, "%s: Halt channel %d\n", __func__, i); ++ do { ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ if (++count > 1000) ++ { ++ DWC_ERROR("%s: Unable to clear halt on channel %d\n", ++ __func__, i); ++ break; ++ } ++ } ++ while (hcchar.b.chen); ++ } ++ ++ /* Turn on the vbus power. */ ++ DWC_PRINT("Init: Port Power? op_state=%d\n", core_if->op_state); ++ if (core_if->op_state == A_HOST) { ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ DWC_PRINT("Init: Power Port (%d)\n", hprt0.b.prtpwr); ++ if (hprt0.b.prtpwr == 0) { ++ hprt0.b.prtpwr = 1; ++ dwc_write_reg32(host_if->hprt0, hprt0.d32); ++ } ++ } ++ ++ dwc_otg_enable_host_interrupts(core_if); ++} ++ ++/** ++ * Prepares a host channel for transferring packets to/from a specific ++ * endpoint. The HCCHARn register is set up with the characteristics specified ++ * in _hc. Host channel interrupts that may need to be serviced while this ++ * transfer is in progress are enabled. ++ * ++ * @param core_if Programming view of DWC_otg controller ++ * @param hc Information needed to initialize the host channel ++ */ ++void dwc_otg_hc_init(dwc_otg_core_if_t *core_if, dwc_hc_t *hc) ++{ ++ uint32_t intr_enable; ++ hcintmsk_data_t hc_intr_mask; ++ gintmsk_data_t gintmsk = { .d32 = 0 }; ++ hcchar_data_t hcchar; ++ hcsplt_data_t hcsplt; ++ ++ uint8_t hc_num = hc->hc_num; ++ dwc_otg_host_if_t *host_if = core_if->host_if; ++ dwc_otg_hc_regs_t *hc_regs = host_if->hc_regs[hc_num]; ++ ++ /* Clear old interrupt conditions for this host channel. */ ++ hc_intr_mask.d32 = 0xFFFFFFFF; ++ hc_intr_mask.b.reserved = 0; ++ dwc_write_reg32(&hc_regs->hcint, hc_intr_mask.d32); ++ ++ /* Enable channel interrupts required for this transfer. */ ++ hc_intr_mask.d32 = 0; ++ hc_intr_mask.b.chhltd = 1; ++ if (core_if->dma_enable) { ++ hc_intr_mask.b.ahberr = 1; ++ if (hc->error_state && !hc->do_split && ++ hc->ep_type != DWC_OTG_EP_TYPE_ISOC) { ++ hc_intr_mask.b.ack = 1; ++ if (hc->ep_is_in) { ++ hc_intr_mask.b.datatglerr = 1; ++ if (hc->ep_type != DWC_OTG_EP_TYPE_INTR) { ++ hc_intr_mask.b.nak = 1; ++ } ++ } ++ } ++ } ++ else { ++ switch (hc->ep_type) { ++ case DWC_OTG_EP_TYPE_CONTROL: ++ case DWC_OTG_EP_TYPE_BULK: ++ hc_intr_mask.b.xfercompl = 1; ++ hc_intr_mask.b.stall = 1; ++ hc_intr_mask.b.xacterr = 1; ++ hc_intr_mask.b.datatglerr = 1; ++ if (hc->ep_is_in) { ++ hc_intr_mask.b.bblerr = 1; ++ } ++ else { ++ hc_intr_mask.b.nak = 1; ++ hc_intr_mask.b.nyet = 1; ++ if (hc->do_ping) { ++ hc_intr_mask.b.ack = 1; ++ } ++ } ++ ++ if (hc->do_split) { ++ hc_intr_mask.b.nak = 1; ++ if (hc->complete_split) { ++ hc_intr_mask.b.nyet = 1; ++ } ++ else { ++ hc_intr_mask.b.ack = 1; ++ } ++ } ++ ++ if (hc->error_state) { ++ hc_intr_mask.b.ack = 1; ++ } ++ break; ++ case DWC_OTG_EP_TYPE_INTR: ++ hc_intr_mask.b.xfercompl = 1; ++ hc_intr_mask.b.nak = 1; ++ hc_intr_mask.b.stall = 1; ++ hc_intr_mask.b.xacterr = 1; ++ hc_intr_mask.b.datatglerr = 1; ++ hc_intr_mask.b.frmovrun = 1; ++ ++ if (hc->ep_is_in) { ++ hc_intr_mask.b.bblerr = 1; ++ } ++ if (hc->error_state) { ++ hc_intr_mask.b.ack = 1; ++ } ++ if (hc->do_split) { ++ if (hc->complete_split) { ++ hc_intr_mask.b.nyet = 1; ++ } ++ else { ++ hc_intr_mask.b.ack = 1; ++ } ++ } ++ break; ++ case DWC_OTG_EP_TYPE_ISOC: ++ hc_intr_mask.b.xfercompl = 1; ++ hc_intr_mask.b.frmovrun = 1; ++ hc_intr_mask.b.ack = 1; ++ ++ if (hc->ep_is_in) { ++ hc_intr_mask.b.xacterr = 1; ++ hc_intr_mask.b.bblerr = 1; ++ } ++ break; ++ } ++ } ++ dwc_write_reg32(&hc_regs->hcintmsk, hc_intr_mask.d32); ++ ++// if(hc->ep_type == DWC_OTG_EP_TYPE_BULK && !hc->ep_is_in) ++// hc->max_packet = 512; ++ /* Enable the top level host channel interrupt. */ ++ intr_enable = (1 << hc_num); ++ dwc_modify_reg32(&host_if->host_global_regs->haintmsk, 0, intr_enable); ++ ++ /* Make sure host channel interrupts are enabled. */ ++ gintmsk.b.hcintr = 1; ++ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, 0, gintmsk.d32); ++ ++ /* ++ * Program the HCCHARn register with the endpoint characteristics for ++ * the current transfer. ++ */ ++ hcchar.d32 = 0; ++ hcchar.b.devaddr = hc->dev_addr; ++ hcchar.b.epnum = hc->ep_num; ++ hcchar.b.epdir = hc->ep_is_in; ++ hcchar.b.lspddev = (hc->speed == DWC_OTG_EP_SPEED_LOW); ++ hcchar.b.eptype = hc->ep_type; ++ hcchar.b.mps = hc->max_packet; ++ ++ dwc_write_reg32(&host_if->hc_regs[hc_num]->hcchar, hcchar.d32); ++ ++ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num); ++ DWC_DEBUGPL(DBG_HCDV, " Dev Addr: %d\n", hcchar.b.devaddr); ++ DWC_DEBUGPL(DBG_HCDV, " Ep Num: %d\n", hcchar.b.epnum); ++ DWC_DEBUGPL(DBG_HCDV, " Is In: %d\n", hcchar.b.epdir); ++ DWC_DEBUGPL(DBG_HCDV, " Is Low Speed: %d\n", hcchar.b.lspddev); ++ DWC_DEBUGPL(DBG_HCDV, " Ep Type: %d\n", hcchar.b.eptype); ++ DWC_DEBUGPL(DBG_HCDV, " Max Pkt: %d\n", hcchar.b.mps); ++ DWC_DEBUGPL(DBG_HCDV, " Multi Cnt: %d\n", hcchar.b.multicnt); ++ ++ /* ++ * Program the HCSPLIT register for SPLITs ++ */ ++ hcsplt.d32 = 0; ++ if (hc->do_split) { ++ DWC_DEBUGPL(DBG_HCDV, "Programming HC %d with split --> %s\n", hc->hc_num, ++ hc->complete_split ? "CSPLIT" : "SSPLIT"); ++ hcsplt.b.compsplt = hc->complete_split; ++ hcsplt.b.xactpos = hc->xact_pos; ++ hcsplt.b.hubaddr = hc->hub_addr; ++ hcsplt.b.prtaddr = hc->port_addr; ++ DWC_DEBUGPL(DBG_HCDV, " comp split %d\n", hc->complete_split); ++ DWC_DEBUGPL(DBG_HCDV, " xact pos %d\n", hc->xact_pos); ++ DWC_DEBUGPL(DBG_HCDV, " hub addr %d\n", hc->hub_addr); ++ DWC_DEBUGPL(DBG_HCDV, " port addr %d\n", hc->port_addr); ++ DWC_DEBUGPL(DBG_HCDV, " is_in %d\n", hc->ep_is_in); ++ DWC_DEBUGPL(DBG_HCDV, " Max Pkt: %d\n", hcchar.b.mps); ++ DWC_DEBUGPL(DBG_HCDV, " xferlen: %d\n", hc->xfer_len); ++ } ++ dwc_write_reg32(&host_if->hc_regs[hc_num]->hcsplt, hcsplt.d32); ++ ++} ++ ++/** ++ * Attempts to halt a host channel. This function should only be called in ++ * Slave mode or to abort a transfer in either Slave mode or DMA mode. Under ++ * normal circumstances in DMA mode, the controller halts the channel when the ++ * transfer is complete or a condition occurs that requires application ++ * intervention. ++ * ++ * In slave mode, checks for a free request queue entry, then sets the Channel ++ * Enable and Channel Disable bits of the Host Channel Characteristics ++ * register of the specified channel to intiate the halt. If there is no free ++ * request queue entry, sets only the Channel Disable bit of the HCCHARn ++ * register to flush requests for this channel. In the latter case, sets a ++ * flag to indicate that the host channel needs to be halted when a request ++ * queue slot is open. ++ * ++ * In DMA mode, always sets the Channel Enable and Channel Disable bits of the ++ * HCCHARn register. The controller ensures there is space in the request ++ * queue before submitting the halt request. ++ * ++ * Some time may elapse before the core flushes any posted requests for this ++ * host channel and halts. The Channel Halted interrupt handler completes the ++ * deactivation of the host channel. ++ * ++ * @param core_if Controller register interface. ++ * @param hc Host channel to halt. ++ * @param halt_status Reason for halting the channel. ++ */ ++void dwc_otg_hc_halt(dwc_otg_core_if_t *core_if, ++ dwc_hc_t *hc, ++ dwc_otg_halt_status_e halt_status) ++{ ++ gnptxsts_data_t nptxsts; ++ hptxsts_data_t hptxsts; ++ hcchar_data_t hcchar; ++ dwc_otg_hc_regs_t *hc_regs; ++ dwc_otg_core_global_regs_t *global_regs; ++ dwc_otg_host_global_regs_t *host_global_regs; ++ ++ hc_regs = core_if->host_if->hc_regs[hc->hc_num]; ++ global_regs = core_if->core_global_regs; ++ host_global_regs = core_if->host_if->host_global_regs; ++ ++ WARN_ON(halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS); ++ ++ if (halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE || ++ halt_status == DWC_OTG_HC_XFER_AHB_ERR) { ++ /* ++ * Disable all channel interrupts except Ch Halted. The QTD ++ * and QH state associated with this transfer has been cleared ++ * (in the case of URB_DEQUEUE), so the channel needs to be ++ * shut down carefully to prevent crashes. ++ */ ++ hcintmsk_data_t hcintmsk; ++ hcintmsk.d32 = 0; ++ hcintmsk.b.chhltd = 1; ++ dwc_write_reg32(&hc_regs->hcintmsk, hcintmsk.d32); ++ ++ /* ++ * Make sure no other interrupts besides halt are currently ++ * pending. Handling another interrupt could cause a crash due ++ * to the QTD and QH state. ++ */ ++ dwc_write_reg32(&hc_regs->hcint, ~hcintmsk.d32); ++ ++ /* ++ * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR ++ * even if the channel was already halted for some other ++ * reason. ++ */ ++ hc->halt_status = halt_status; ++ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ if (hcchar.b.chen == 0) { ++ /* ++ * The channel is either already halted or it hasn't ++ * started yet. In DMA mode, the transfer may halt if ++ * it finishes normally or a condition occurs that ++ * requires driver intervention. Don't want to halt ++ * the channel again. In either Slave or DMA mode, ++ * it's possible that the transfer has been assigned ++ * to a channel, but not started yet when an URB is ++ * dequeued. Don't want to halt a channel that hasn't ++ * started yet. ++ */ ++ return; ++ } ++ } ++ ++ if (hc->halt_pending) { ++ /* ++ * A halt has already been issued for this channel. This might ++ * happen when a transfer is aborted by a higher level in ++ * the stack. ++ */ ++#ifdef DEBUG ++ DWC_PRINT("*** %s: Channel %d, _hc->halt_pending already set ***\n", ++ __func__, hc->hc_num); ++ ++/* dwc_otg_dump_global_registers(core_if); */ ++/* dwc_otg_dump_host_registers(core_if); */ ++#endif ++ return; ++ } ++ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcchar.b.chen = 1; ++ hcchar.b.chdis = 1; ++ ++ if (!core_if->dma_enable) { ++ /* Check for space in the request queue to issue the halt. */ ++ if (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL || ++ hc->ep_type == DWC_OTG_EP_TYPE_BULK) { ++ nptxsts.d32 = dwc_read_reg32(&global_regs->gnptxsts); ++ if (nptxsts.b.nptxqspcavail == 0) { ++ hcchar.b.chen = 0; ++ } ++ } ++ else { ++ hptxsts.d32 = dwc_read_reg32(&host_global_regs->hptxsts); ++ if ((hptxsts.b.ptxqspcavail == 0) || (core_if->queuing_high_bandwidth)) { ++ hcchar.b.chen = 0; ++ } ++ } ++ } ++ ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ ++ hc->halt_status = halt_status; ++ ++ if (hcchar.b.chen) { ++ hc->halt_pending = 1; ++ hc->halt_on_queue = 0; ++ } ++ else { ++ hc->halt_on_queue = 1; ++ } ++ ++ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num); ++ DWC_DEBUGPL(DBG_HCDV, " hcchar: 0x%08x\n", hcchar.d32); ++ DWC_DEBUGPL(DBG_HCDV, " halt_pending: %d\n", hc->halt_pending); ++ DWC_DEBUGPL(DBG_HCDV, " halt_on_queue: %d\n", hc->halt_on_queue); ++ DWC_DEBUGPL(DBG_HCDV, " halt_status: %d\n", hc->halt_status); ++ ++ return; ++} ++ ++/** ++ * Clears the transfer state for a host channel. This function is normally ++ * called after a transfer is done and the host channel is being released. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param hc Identifies the host channel to clean up. ++ */ ++void dwc_otg_hc_cleanup(dwc_otg_core_if_t *core_if, dwc_hc_t *hc) ++{ ++ dwc_otg_hc_regs_t *hc_regs; ++ ++ hc->xfer_started = 0; ++ ++ /* ++ * Clear channel interrupt enables and any unhandled channel interrupt ++ * conditions. ++ */ ++ hc_regs = core_if->host_if->hc_regs[hc->hc_num]; ++ dwc_write_reg32(&hc_regs->hcintmsk, 0); ++ dwc_write_reg32(&hc_regs->hcint, 0xFFFFFFFF); ++ ++#ifdef DEBUG ++ del_timer(&core_if->hc_xfer_timer[hc->hc_num]); ++ { ++ hcchar_data_t hcchar; ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ if (hcchar.b.chdis) { ++ DWC_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n", ++ __func__, hc->hc_num, hcchar.d32); ++ } ++ } ++#endif ++} ++ ++/** ++ * Sets the channel property that indicates in which frame a periodic transfer ++ * should occur. This is always set to the _next_ frame. This function has no ++ * effect on non-periodic transfers. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param hc Identifies the host channel to set up and its properties. ++ * @param hcchar Current value of the HCCHAR register for the specified host ++ * channel. ++ */ ++static inline void hc_set_even_odd_frame(dwc_otg_core_if_t *core_if, ++ dwc_hc_t *hc, ++ hcchar_data_t *hcchar) ++{ ++ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || ++ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { ++ hfnum_data_t hfnum; ++ hfnum.d32 = dwc_read_reg32(&core_if->host_if->host_global_regs->hfnum); ++ ++ /* 1 if _next_ frame is odd, 0 if it's even */ ++ hcchar->b.oddfrm = (hfnum.b.frnum & 0x1) ? 0 : 1; ++#ifdef DEBUG ++ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR && hc->do_split && !hc->complete_split) { ++ switch (hfnum.b.frnum & 0x7) { ++ case 7: ++ core_if->hfnum_7_samples++; ++ core_if->hfnum_7_frrem_accum += hfnum.b.frrem; ++ break; ++ case 0: ++ core_if->hfnum_0_samples++; ++ core_if->hfnum_0_frrem_accum += hfnum.b.frrem; ++ break; ++ default: ++ core_if->hfnum_other_samples++; ++ core_if->hfnum_other_frrem_accum += hfnum.b.frrem; ++ break; ++ } ++ } ++#endif ++ } ++} ++ ++#ifdef DEBUG ++static void hc_xfer_timeout(unsigned long ptr) ++{ ++ hc_xfer_info_t *xfer_info = (hc_xfer_info_t *)ptr; ++ int hc_num = xfer_info->hc->hc_num; ++ DWC_WARN("%s: timeout on channel %d\n", __func__, hc_num); ++ DWC_WARN(" start_hcchar_val 0x%08x\n", xfer_info->core_if->start_hcchar_val[hc_num]); ++} ++#endif ++ ++/* ++ * This function does the setup for a data transfer for a host channel and ++ * starts the transfer. May be called in either Slave mode or DMA mode. In ++ * Slave mode, the caller must ensure that there is sufficient space in the ++ * request queue and Tx Data FIFO. ++ * ++ * For an OUT transfer in Slave mode, it loads a data packet into the ++ * appropriate FIFO. If necessary, additional data packets will be loaded in ++ * the Host ISR. ++ * ++ * For an IN transfer in Slave mode, a data packet is requested. The data ++ * packets are unloaded from the Rx FIFO in the Host ISR. If necessary, ++ * additional data packets are requested in the Host ISR. ++ * ++ * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ ++ * register along with a packet count of 1 and the channel is enabled. This ++ * causes a single PING transaction to occur. Other fields in HCTSIZ are ++ * simply set to 0 since no data transfer occurs in this case. ++ * ++ * For a PING transfer in DMA mode, the HCTSIZ register is initialized with ++ * all the information required to perform the subsequent data transfer. In ++ * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the ++ * controller performs the entire PING protocol, then starts the data ++ * transfer. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param hc Information needed to initialize the host channel. The xfer_len ++ * value may be reduced to accommodate the max widths of the XferSize and ++ * PktCnt fields in the HCTSIZn register. The multi_count value may be changed ++ * to reflect the final xfer_len value. ++ */ ++void dwc_otg_hc_start_transfer(dwc_otg_core_if_t *core_if, dwc_hc_t *hc) ++{ ++ hcchar_data_t hcchar; ++ hctsiz_data_t hctsiz; ++ uint16_t num_packets; ++ uint32_t max_hc_xfer_size = core_if->core_params->max_transfer_size; ++ uint16_t max_hc_pkt_count = core_if->core_params->max_packet_count; ++ dwc_otg_hc_regs_t *hc_regs = core_if->host_if->hc_regs[hc->hc_num]; ++ ++ hctsiz.d32 = 0; ++ ++ if (hc->do_ping) { ++ if (!core_if->dma_enable) { ++ dwc_otg_hc_do_ping(core_if, hc); ++ hc->xfer_started = 1; ++ return; ++ } ++ else { ++ hctsiz.b.dopng = 1; ++ } ++ } ++ ++ if (hc->do_split) { ++ num_packets = 1; ++ ++ if (hc->complete_split && !hc->ep_is_in) { ++ /* For CSPLIT OUT Transfer, set the size to 0 so the ++ * core doesn't expect any data written to the FIFO */ ++ hc->xfer_len = 0; ++ } ++ else if (hc->ep_is_in || (hc->xfer_len > hc->max_packet)) { ++ hc->xfer_len = hc->max_packet; ++ } ++ else if (!hc->ep_is_in && (hc->xfer_len > 188)) { ++ hc->xfer_len = 188; ++ } ++ ++ hctsiz.b.xfersize = hc->xfer_len; ++ } ++ else { ++ /* ++ * Ensure that the transfer length and packet count will fit ++ * in the widths allocated for them in the HCTSIZn register. ++ */ ++ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || ++ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { ++ /* ++ * Make sure the transfer size is no larger than one ++ * (micro)frame's worth of data. (A check was done ++ * when the periodic transfer was accepted to ensure ++ * that a (micro)frame's worth of data can be ++ * programmed into a channel.) ++ */ ++ uint32_t max_periodic_len = hc->multi_count * hc->max_packet; ++ if (hc->xfer_len > max_periodic_len) { ++ hc->xfer_len = max_periodic_len; ++ } ++ else { ++ } ++ } ++ else if (hc->xfer_len > max_hc_xfer_size) { ++ /* Make sure that xfer_len is a multiple of max packet size. */ ++ hc->xfer_len = max_hc_xfer_size - hc->max_packet + 1; ++ } ++ ++ if (hc->xfer_len > 0) { ++ num_packets = (hc->xfer_len + hc->max_packet - 1) / hc->max_packet; ++ if (num_packets > max_hc_pkt_count) { ++ num_packets = max_hc_pkt_count; ++ hc->xfer_len = num_packets * hc->max_packet; ++ } ++ } ++ else { ++ /* Need 1 packet for transfer length of 0. */ ++ num_packets = 1; ++ } ++ ++#if 0 ++//host testusb item 10, would do series of Control transfer ++//with URB_SHORT_NOT_OK set in transfer_flags , ++//changing the xfer_len would cause the test fail ++ if (hc->ep_is_in) { ++ /* Always program an integral # of max packets for IN transfers. */ ++ hc->xfer_len = num_packets * hc->max_packet; ++ } ++#endif ++ ++ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || ++ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { ++ /* ++ * Make sure that the multi_count field matches the ++ * actual transfer length. ++ */ ++ hc->multi_count = num_packets; ++ } ++ ++ if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { ++ /* Set up the initial PID for the transfer. */ ++ if (hc->speed == DWC_OTG_EP_SPEED_HIGH) { ++ if (hc->ep_is_in) { ++ if (hc->multi_count == 1) { ++ hc->data_pid_start = DWC_OTG_HC_PID_DATA0; ++ } ++ else if (hc->multi_count == 2) { ++ hc->data_pid_start = DWC_OTG_HC_PID_DATA1; ++ } ++ else { ++ hc->data_pid_start = DWC_OTG_HC_PID_DATA2; ++ } ++ } ++ else { ++ if (hc->multi_count == 1) { ++ hc->data_pid_start = DWC_OTG_HC_PID_DATA0; ++ } ++ else { ++ hc->data_pid_start = DWC_OTG_HC_PID_MDATA; ++ } ++ } ++ } ++ else { ++ hc->data_pid_start = DWC_OTG_HC_PID_DATA0; ++ } ++ } ++ ++ hctsiz.b.xfersize = hc->xfer_len; ++ } ++ ++ hc->start_pkt_count = num_packets; ++ hctsiz.b.pktcnt = num_packets; ++ hctsiz.b.pid = hc->data_pid_start; ++ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); ++ ++ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num); ++ DWC_DEBUGPL(DBG_HCDV, " Xfer Size: %d\n", hctsiz.b.xfersize); ++ DWC_DEBUGPL(DBG_HCDV, " Num Pkts: %d\n", hctsiz.b.pktcnt); ++ DWC_DEBUGPL(DBG_HCDV, " Start PID: %d\n", hctsiz.b.pid); ++ ++ if (core_if->dma_enable) { ++ dwc_write_reg32(&hc_regs->hcdma, (uint32_t)hc->xfer_buff); ++ } ++ ++ /* Start the split */ ++ if (hc->do_split) { ++ hcsplt_data_t hcsplt; ++ hcsplt.d32 = dwc_read_reg32 (&hc_regs->hcsplt); ++ hcsplt.b.spltena = 1; ++ dwc_write_reg32(&hc_regs->hcsplt, hcsplt.d32); ++ } ++ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcchar.b.multicnt = hc->multi_count; ++ hc_set_even_odd_frame(core_if, hc, &hcchar); ++#ifdef DEBUG ++ core_if->start_hcchar_val[hc->hc_num] = hcchar.d32; ++ if (hcchar.b.chdis) { ++ DWC_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n", ++ __func__, hc->hc_num, hcchar.d32); ++ } ++#endif ++ ++ /* Set host channel enable after all other setup is complete. */ ++ hcchar.b.chen = 1; ++ hcchar.b.chdis = 0; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ ++ hc->xfer_started = 1; ++ hc->requests++; ++ ++ if (!core_if->dma_enable && ++ !hc->ep_is_in && hc->xfer_len > 0) { ++ /* Load OUT packet into the appropriate Tx FIFO. */ ++ dwc_otg_hc_write_packet(core_if, hc); ++ } ++ ++#ifdef DEBUG ++ /* Start a timer for this transfer. */ ++ core_if->hc_xfer_timer[hc->hc_num].function = hc_xfer_timeout; ++ core_if->hc_xfer_info[hc->hc_num].core_if = core_if; ++ core_if->hc_xfer_info[hc->hc_num].hc = hc; ++ core_if->hc_xfer_timer[hc->hc_num].data = (unsigned long)(&core_if->hc_xfer_info[hc->hc_num]); ++ core_if->hc_xfer_timer[hc->hc_num].expires = jiffies + (HZ*10); ++ add_timer(&core_if->hc_xfer_timer[hc->hc_num]); ++#endif ++} ++ ++/** ++ * This function continues a data transfer that was started by previous call ++ * to dwc_otg_hc_start_transfer. The caller must ensure there is ++ * sufficient space in the request queue and Tx Data FIFO. This function ++ * should only be called in Slave mode. In DMA mode, the controller acts ++ * autonomously to complete transfers programmed to a host channel. ++ * ++ * For an OUT transfer, a new data packet is loaded into the appropriate FIFO ++ * if there is any data remaining to be queued. For an IN transfer, another ++ * data packet is always requested. For the SETUP phase of a control transfer, ++ * this function does nothing. ++ * ++ * @return 1 if a new request is queued, 0 if no more requests are required ++ * for this transfer. ++ */ ++int dwc_otg_hc_continue_transfer(dwc_otg_core_if_t *core_if, dwc_hc_t *hc) ++{ ++ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num); ++ ++ if (hc->do_split) { ++ /* SPLITs always queue just once per channel */ ++ return 0; ++ } ++ else if (hc->data_pid_start == DWC_OTG_HC_PID_SETUP) { ++ /* SETUPs are queued only once since they can't be NAKed. */ ++ return 0; ++ } ++ else if (hc->ep_is_in) { ++ /* ++ * Always queue another request for other IN transfers. If ++ * back-to-back INs are issued and NAKs are received for both, ++ * the driver may still be processing the first NAK when the ++ * second NAK is received. When the interrupt handler clears ++ * the NAK interrupt for the first NAK, the second NAK will ++ * not be seen. So we can't depend on the NAK interrupt ++ * handler to requeue a NAKed request. Instead, IN requests ++ * are issued each time this function is called. When the ++ * transfer completes, the extra requests for the channel will ++ * be flushed. ++ */ ++ hcchar_data_t hcchar; ++ dwc_otg_hc_regs_t *hc_regs = core_if->host_if->hc_regs[hc->hc_num]; ++ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hc_set_even_odd_frame(core_if, hc, &hcchar); ++ hcchar.b.chen = 1; ++ hcchar.b.chdis = 0; ++ DWC_DEBUGPL(DBG_HCDV, " IN xfer: hcchar = 0x%08x\n", hcchar.d32); ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ hc->requests++; ++ return 1; ++ } ++ else { ++ /* OUT transfers. */ ++ if (hc->xfer_count < hc->xfer_len) { ++ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || ++ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { ++ hcchar_data_t hcchar; ++ dwc_otg_hc_regs_t *hc_regs; ++ hc_regs = core_if->host_if->hc_regs[hc->hc_num]; ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hc_set_even_odd_frame(core_if, hc, &hcchar); ++ } ++ ++ /* Load OUT packet into the appropriate Tx FIFO. */ ++ dwc_otg_hc_write_packet(core_if, hc); ++ hc->requests++; ++ return 1; ++ } ++ else { ++ return 0; ++ } ++ } ++} ++ ++/** ++ * Starts a PING transfer. This function should only be called in Slave mode. ++ * The Do Ping bit is set in the HCTSIZ register, then the channel is enabled. ++ */ ++void dwc_otg_hc_do_ping(dwc_otg_core_if_t *core_if, dwc_hc_t *hc) ++{ ++ hcchar_data_t hcchar; ++ hctsiz_data_t hctsiz; ++ dwc_otg_hc_regs_t *hc_regs = core_if->host_if->hc_regs[hc->hc_num]; ++ ++ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num); ++ ++ hctsiz.d32 = 0; ++ hctsiz.b.dopng = 1; ++ hctsiz.b.pktcnt = 1; ++ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); ++ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcchar.b.chen = 1; ++ hcchar.b.chdis = 0; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++} ++ ++/* ++ * This function writes a packet into the Tx FIFO associated with the Host ++ * Channel. For a channel associated with a non-periodic EP, the non-periodic ++ * Tx FIFO is written. For a channel associated with a periodic EP, the ++ * periodic Tx FIFO is written. This function should only be called in Slave ++ * mode. ++ * ++ * Upon return the xfer_buff and xfer_count fields in _hc are incremented by ++ * then number of bytes written to the Tx FIFO. ++ */ ++void dwc_otg_hc_write_packet(dwc_otg_core_if_t *core_if, dwc_hc_t *hc) ++{ ++ uint32_t i; ++ uint32_t remaining_count; ++ uint32_t byte_count; ++ uint32_t dword_count; ++ ++ uint32_t *data_buff = (uint32_t *)(hc->xfer_buff); ++ uint32_t *data_fifo = core_if->data_fifo[hc->hc_num]; ++ ++ remaining_count = hc->xfer_len - hc->xfer_count; ++ if (remaining_count > hc->max_packet) { ++ byte_count = hc->max_packet; ++ } ++ else { ++ byte_count = remaining_count; ++ } ++ ++ dword_count = (byte_count + 3) / 4; ++ ++ if ((((unsigned long)data_buff) & 0x3) == 0) { ++ /* xfer_buff is DWORD aligned. */ ++ for (i = 0; i < dword_count; i++, data_buff++) ++ { ++ dwc_write_reg32(data_fifo, *data_buff); ++ } ++ } ++ else { ++ /* xfer_buff is not DWORD aligned. */ ++ for (i = 0; i < dword_count; i++, data_buff++) ++ { ++ dwc_write_reg32(data_fifo, get_unaligned(data_buff)); ++ } ++ } ++ ++ hc->xfer_count += byte_count; ++ hc->xfer_buff += byte_count; ++} ++ ++/** ++ * Gets the current USB frame number. This is the frame number from the last ++ * SOF packet. ++ */ ++uint32_t dwc_otg_get_frame_number(dwc_otg_core_if_t *core_if) ++{ ++ dsts_data_t dsts; ++ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); ++ ++ /* read current frame/microframe number from DSTS register */ ++ return dsts.b.soffn; ++} ++ ++/** ++ * This function reads a setup packet from the Rx FIFO into the destination ++ * buffer. This function is called from the Rx Status Queue Level (RxStsQLvl) ++ * Interrupt routine when a SETUP packet has been received in Slave mode. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param dest Destination buffer for packet data. ++ */ ++void dwc_otg_read_setup_packet(dwc_otg_core_if_t *core_if, uint32_t *dest) ++{ ++ /* Get the 8 bytes of a setup transaction data */ ++ ++ /* Pop 2 DWORDS off the receive data FIFO into memory */ ++ dest[0] = dwc_read_reg32(core_if->data_fifo[0]); ++ dest[1] = dwc_read_reg32(core_if->data_fifo[0]); ++} ++ ++ ++/** ++ * This function enables EP0 OUT to receive SETUP packets and configures EP0 ++ * IN for transmitting packets. It is normally called when the ++ * "Enumeration Done" interrupt occurs. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP0 data. ++ */ ++void dwc_otg_ep0_activate(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ dsts_data_t dsts; ++ depctl_data_t diepctl; ++ depctl_data_t doepctl; ++ dctl_data_t dctl = { .d32 = 0 }; ++ ++ /* Read the Device Status and Endpoint 0 Control registers */ ++ dsts.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dsts); ++ diepctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl); ++ doepctl.d32 = dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl); ++ ++ /* Set the MPS of the IN EP based on the enumeration speed */ ++ switch (dsts.b.enumspd) { ++ case DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ: ++ case DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ: ++ case DWC_DSTS_ENUMSPD_FS_PHY_48MHZ: ++ diepctl.b.mps = DWC_DEP0CTL_MPS_64; ++ break; ++ case DWC_DSTS_ENUMSPD_LS_PHY_6MHZ: ++ diepctl.b.mps = DWC_DEP0CTL_MPS_8; ++ break; ++ } ++ ++ dwc_write_reg32(&dev_if->in_ep_regs[0]->diepctl, diepctl.d32); ++ ++ /* Enable OUT EP for receive */ ++ doepctl.b.epena = 1; ++ dwc_write_reg32(&dev_if->out_ep_regs[0]->doepctl, doepctl.d32); ++ ++#ifdef VERBOSE ++ DWC_DEBUGPL(DBG_PCDV,"doepctl0=%0x\n", ++ dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl)); ++ DWC_DEBUGPL(DBG_PCDV,"diepctl0=%0x\n", ++ dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl)); ++#endif ++ dctl.b.cgnpinnak = 1; ++ ++ dwc_modify_reg32(&dev_if->dev_global_regs->dctl, dctl.d32, dctl.d32); ++ DWC_DEBUGPL(DBG_PCDV,"dctl=%0x\n", ++ dwc_read_reg32(&dev_if->dev_global_regs->dctl)); ++} ++ ++/** ++ * This function activates an EP. The Device EP control register for ++ * the EP is configured as defined in the ep structure. Note: This ++ * function is not used for EP0. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to activate. ++ */ ++void dwc_otg_ep_activate(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ depctl_data_t depctl; ++ volatile uint32_t *addr; ++ daint_data_t daintmsk = { .d32 = 0 }; ++ ++ DWC_DEBUGPL(DBG_PCDV, "%s() EP%d-%s\n", __func__, ep->num, ++ (ep->is_in?"IN":"OUT")); ++ ++ /* Read DEPCTLn register */ ++ if (ep->is_in == 1) { ++ addr = &dev_if->in_ep_regs[ep->num]->diepctl; ++ daintmsk.ep.in = 1<num; ++ } ++ else { ++ addr = &dev_if->out_ep_regs[ep->num]->doepctl; ++ daintmsk.ep.out = 1<num; ++ } ++ ++ /* If the EP is already active don't change the EP Control ++ * register. */ ++ depctl.d32 = dwc_read_reg32(addr); ++ if (!depctl.b.usbactep) { ++ depctl.b.mps = ep->maxpacket; ++ depctl.b.eptype = ep->type; ++ depctl.b.txfnum = ep->tx_fifo_num; ++ ++ if (ep->type == DWC_OTG_EP_TYPE_ISOC) { ++ depctl.b.setd0pid = 1; // ??? ++ } ++ else { ++ depctl.b.setd0pid = 1; ++ } ++ depctl.b.usbactep = 1; ++ ++ dwc_write_reg32(addr, depctl.d32); ++ DWC_DEBUGPL(DBG_PCDV,"DEPCTL(%.8x)=%08x\n",(u32)addr, dwc_read_reg32(addr)); ++ } ++ ++ /* Enable the Interrupt for this EP */ ++ if(core_if->multiproc_int_enable) { ++ if (ep->is_in == 1) { ++ diepmsk_data_t diepmsk = { .d32 = 0}; ++ diepmsk.b.xfercompl = 1; ++ diepmsk.b.timeout = 1; ++ diepmsk.b.epdisabled = 1; ++ diepmsk.b.ahberr = 1; ++ diepmsk.b.intknepmis = 1; ++ diepmsk.b.txfifoundrn = 1; //????? ++ ++ ++ if(core_if->dma_desc_enable) { ++ diepmsk.b.bna = 1; ++ } ++/* ++ if(core_if->dma_enable) { ++ doepmsk.b.nak = 1; ++ } ++*/ ++ dwc_write_reg32(&dev_if->dev_global_regs->diepeachintmsk[ep->num], diepmsk.d32); ++ ++ } else { ++ doepmsk_data_t doepmsk = { .d32 = 0}; ++ doepmsk.b.xfercompl = 1; ++ doepmsk.b.ahberr = 1; ++ doepmsk.b.epdisabled = 1; ++ ++ ++ if(core_if->dma_desc_enable) { ++ doepmsk.b.bna = 1; ++ } ++/* ++ doepmsk.b.babble = 1; ++ doepmsk.b.nyet = 1; ++ doepmsk.b.nak = 1; ++*/ ++ dwc_write_reg32(&dev_if->dev_global_regs->doepeachintmsk[ep->num], doepmsk.d32); ++ } ++ dwc_modify_reg32(&dev_if->dev_global_regs->deachintmsk, ++ 0, daintmsk.d32); ++ } else { ++ dwc_modify_reg32(&dev_if->dev_global_regs->daintmsk, ++ 0, daintmsk.d32); ++ } ++ ++ DWC_DEBUGPL(DBG_PCDV,"DAINTMSK=%0x\n", ++ dwc_read_reg32(&dev_if->dev_global_regs->daintmsk)); ++ ++ ep->stall_clear_flag = 0; ++ return; ++} ++ ++/** ++ * This function deactivates an EP. This is done by clearing the USB Active ++ * EP bit in the Device EP control register. Note: This function is not used ++ * for EP0. EP0 cannot be deactivated. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to deactivate. ++ */ ++void dwc_otg_ep_deactivate(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ depctl_data_t depctl = { .d32 = 0 }; ++ volatile uint32_t *addr; ++ daint_data_t daintmsk = { .d32 = 0}; ++ ++ /* Read DEPCTLn register */ ++ if (ep->is_in == 1) { ++ addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl; ++ daintmsk.ep.in = 1<num; ++ } ++ else { ++ addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl; ++ daintmsk.ep.out = 1<num; ++ } ++ ++ //disabled ep only when ep is enabled ++ //or got halt in the loop in test in cv9 ++ depctl.d32=dwc_read_reg32(addr); ++ if(depctl.b.epena){ ++ if (ep->is_in == 1) { ++ diepint_data_t diepint; ++ dwc_otg_dev_in_ep_regs_t *in_reg=core_if->dev_if->in_ep_regs[ep->num]; ++ ++ //Set ep nak ++ depctl.d32=dwc_read_reg32(&in_reg->diepctl); ++ depctl.b.snak=1; ++ dwc_write_reg32(&in_reg->diepctl,depctl.d32); ++ ++ //wait for diepint.b.inepnakeff ++ diepint.d32=dwc_read_reg32(&in_reg->diepint); ++ while(!diepint.b.inepnakeff){ ++ udelay(1); ++ diepint.d32=dwc_read_reg32(&in_reg->diepint); ++ } ++ diepint.d32=0; ++ diepint.b.inepnakeff=1; ++ dwc_write_reg32(&in_reg->diepint,diepint.d32); ++ ++ //set ep disable and snak ++ depctl.d32=dwc_read_reg32(&in_reg->diepctl); ++ depctl.b.snak=1; ++ depctl.b.epdis=1; ++ dwc_write_reg32(&in_reg->diepctl,depctl.d32); ++ ++ //wait for diepint.b.epdisabled ++ diepint.d32=dwc_read_reg32(&in_reg->diepint); ++ while(!diepint.b.epdisabled){ ++ udelay(1); ++ diepint.d32=dwc_read_reg32(&in_reg->diepint); ++ } ++ diepint.d32=0; ++ diepint.b.epdisabled=1; ++ dwc_write_reg32(&in_reg->diepint,diepint.d32); ++ ++ //clear ep enable and disable bit ++ depctl.d32=dwc_read_reg32(&in_reg->diepctl); ++ depctl.b.epena=0; ++ depctl.b.epdis=0; ++ dwc_write_reg32(&in_reg->diepctl,depctl.d32); ++ ++ } ++#if 0 ++//following DWC OTG DataBook v2.72a, 6.4.2.1.3 Disabling an OUT Endpoint, ++//but this doesn't work, the old code do. ++ else { ++ doepint_data_t doepint; ++ dwc_otg_dev_out_ep_regs_t *out_reg=core_if->dev_if->out_ep_regs[ep->num]; ++ dctl_data_t dctl; ++ gintsts_data_t gintsts; ++ ++ //set dctl global out nak ++ dctl.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dctl); ++ dctl.b.sgoutnak=1; ++ dwc_write_reg32(&core_if->dev_if->dev_global_regs->dctl,dctl.d32); ++ ++ //wait for gintsts.goutnakeff ++ gintsts.d32=dwc_read_reg32(&core_if->core_global_regs->gintsts); ++ while(!gintsts.b.goutnakeff){ ++ udelay(1); ++ gintsts.d32=dwc_read_reg32(&core_if->core_global_regs->gintsts); ++ } ++ gintsts.d32=0; ++ gintsts.b.goutnakeff=1; ++ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); ++ ++ //set ep disable and snak ++ depctl.d32=dwc_read_reg32(&out_reg->doepctl); ++ depctl.b.snak=1; ++ depctl.b.epdis=1; ++ dwc_write_reg32(&out_reg->doepctl,depctl.d32); ++ ++ //wait for diepint.b.epdisabled ++ doepint.d32=dwc_read_reg32(&out_reg->doepint); ++ while(!doepint.b.epdisabled){ ++ udelay(1); ++ doepint.d32=dwc_read_reg32(&out_reg->doepint); ++ } ++ doepint.d32=0; ++ doepint.b.epdisabled=1; ++ dwc_write_reg32(&out_reg->doepint,doepint.d32); ++ ++ //clear ep enable and disable bit ++ depctl.d32=dwc_read_reg32(&out_reg->doepctl); ++ depctl.b.epena=0; ++ depctl.b.epdis=0; ++ dwc_write_reg32(&out_reg->doepctl,depctl.d32); ++ } ++#endif ++ ++ depctl.d32=0; ++ depctl.b.usbactep = 0; ++ ++ if (ep->is_in == 0) { ++ if(core_if->dma_enable||core_if->dma_desc_enable) ++ depctl.b.epdis = 1; ++ } ++ ++ dwc_write_reg32(addr, depctl.d32); ++ } ++ ++ /* Disable the Interrupt for this EP */ ++ if(core_if->multiproc_int_enable) { ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->deachintmsk, ++ daintmsk.d32, 0); ++ ++ if (ep->is_in == 1) { ++ dwc_write_reg32(&core_if->dev_if->dev_global_regs->diepeachintmsk[ep->num], 0); ++ } else { ++ dwc_write_reg32(&core_if->dev_if->dev_global_regs->doepeachintmsk[ep->num], 0); ++ } ++ } else { ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->daintmsk, ++ daintmsk.d32, 0); ++ } ++ ++ if (ep->is_in == 1) { ++ DWC_DEBUGPL(DBG_PCD, "DIEPCTL(%.8x)=%08x DIEPTSIZ=%08x, DIEPINT=%.8x, DIEPDMA=%.8x, DTXFSTS=%.8x\n", ++ (u32)&core_if->dev_if->in_ep_regs[ep->num]->diepctl, ++ dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->diepctl), ++ dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dieptsiz), ++ dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->diepint), ++ dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->diepdma), ++ dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dtxfsts)); ++ DWC_DEBUGPL(DBG_PCD, "DAINTMSK=%08x GINTMSK=%08x\n", ++ dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk), ++ dwc_read_reg32(&core_if->core_global_regs->gintmsk)); ++ } ++ else { ++ DWC_DEBUGPL(DBG_PCD, "DOEPCTL(%.8x)=%08x DOEPTSIZ=%08x, DOEPINT=%.8x, DOEPDMA=%.8x\n", ++ (u32)&core_if->dev_if->out_ep_regs[ep->num]->doepctl, ++ dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doepctl), ++ dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doeptsiz), ++ dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doepint), ++ dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doepdma)); ++ ++ DWC_DEBUGPL(DBG_PCD, "DAINTMSK=%08x GINTMSK=%08x\n", ++ dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk), ++ dwc_read_reg32(&core_if->core_global_regs->gintmsk)); ++ } ++ ++} ++ ++/** ++ * This function does the setup for a data transfer for an EP and ++ * starts the transfer. For an IN transfer, the packets will be ++ * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers, ++ * the packets are unloaded from the Rx FIFO in the ISR. the ISR. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to start the transfer on. ++ */ ++static void init_dma_desc_chain(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ dwc_otg_dma_desc_t* dma_desc; ++ uint32_t offset; ++ uint32_t xfer_est; ++ int i; ++ ++ ep->desc_cnt = ( ep->total_len / ep->maxxfer) + ++ ((ep->total_len % ep->maxxfer) ? 1 : 0); ++ if(!ep->desc_cnt) ++ ep->desc_cnt = 1; ++ ++ dma_desc = ep->desc_addr; ++ xfer_est = ep->total_len; ++ offset = 0; ++ for( i = 0; i < ep->desc_cnt; ++i) { ++ /** DMA Descriptor Setup */ ++ if(xfer_est > ep->maxxfer) { ++ dma_desc->status.b.bs = BS_HOST_BUSY; ++ dma_desc->status.b.l = 0; ++ dma_desc->status.b.ioc = 0; ++ dma_desc->status.b.sp = 0; ++ dma_desc->status.b.bytes = ep->maxxfer; ++ dma_desc->buf = ep->dma_addr + offset; ++ dma_desc->status.b.bs = BS_HOST_READY; ++ ++ xfer_est -= ep->maxxfer; ++ offset += ep->maxxfer; ++ } else { ++ dma_desc->status.b.bs = BS_HOST_BUSY; ++ dma_desc->status.b.l = 1; ++ dma_desc->status.b.ioc = 1; ++ if(ep->is_in) { ++ dma_desc->status.b.sp = (xfer_est % ep->maxpacket) ? ++ 1 : ((ep->sent_zlp) ? 1 : 0); ++ dma_desc->status.b.bytes = xfer_est; ++ } else { ++ dma_desc->status.b.bytes = xfer_est + ((4 - (xfer_est & 0x3)) & 0x3) ; ++ } ++ ++ dma_desc->buf = ep->dma_addr + offset; ++ dma_desc->status.b.bs = BS_HOST_READY; ++ } ++ dma_desc ++; ++ } ++} ++ ++/** ++ * This function does the setup for a data transfer for an EP and ++ * starts the transfer. For an IN transfer, the packets will be ++ * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers, ++ * the packets are unloaded from the Rx FIFO in the ISR. the ISR. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to start the transfer on. ++ */ ++ ++void dwc_otg_ep_start_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ depctl_data_t depctl; ++ deptsiz_data_t deptsiz; ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ ++ DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s()\n", __func__); ++ ++ DWC_DEBUGPL(DBG_PCD, "ep%d-%s xfer_len=%d xfer_cnt=%d " ++ "xfer_buff=%p start_xfer_buff=%p\n", ++ ep->num, (ep->is_in?"IN":"OUT"), ep->xfer_len, ++ ep->xfer_count, ep->xfer_buff, ep->start_xfer_buff); ++ ++ /* IN endpoint */ ++ if (ep->is_in == 1) { ++ dwc_otg_dev_in_ep_regs_t *in_regs = ++ core_if->dev_if->in_ep_regs[ep->num]; ++ ++ gnptxsts_data_t gtxstatus; ++ ++ gtxstatus.d32 = ++ dwc_read_reg32(&core_if->core_global_regs->gnptxsts); ++ ++ if(core_if->en_multiple_tx_fifo == 0 && gtxstatus.b.nptxqspcavail == 0) { ++#ifdef DEBUG ++ DWC_PRINT("TX Queue Full (0x%0x)\n", gtxstatus.d32); ++#endif ++ return; ++ } ++ ++ depctl.d32 = dwc_read_reg32(&(in_regs->diepctl)); ++ deptsiz.d32 = dwc_read_reg32(&(in_regs->dieptsiz)); ++ ++ ep->xfer_len += (ep->maxxfer < (ep->total_len - ep->xfer_len)) ? ++ ep->maxxfer : (ep->total_len - ep->xfer_len); ++ ++ /* Zero Length Packet? */ ++ if ((ep->xfer_len - ep->xfer_count) == 0) { ++ deptsiz.b.xfersize = 0; ++ deptsiz.b.pktcnt = 1; ++ } ++ else { ++ /* Program the transfer size and packet count ++ * as follows: xfersize = N * maxpacket + ++ * short_packet pktcnt = N + (short_packet ++ * exist ? 1 : 0) ++ */ ++ deptsiz.b.xfersize = ep->xfer_len - ep->xfer_count; ++ deptsiz.b.pktcnt = ++ (ep->xfer_len - ep->xfer_count - 1 + ep->maxpacket) / ++ ep->maxpacket; ++ } ++ ++ ++ /* Write the DMA register */ ++ if (core_if->dma_enable) { ++ if (/*(core_if->dma_enable)&&*/(ep->dma_addr==DMA_ADDR_INVALID)) { ++ ep->dma_addr=dma_map_single(NULL,(void *)(ep->xfer_buff),(ep->xfer_len),DMA_TO_DEVICE); ++ } ++ DWC_DEBUGPL(DBG_PCDV, "ep%d dma_addr=%.8x\n", ep->num, ep->dma_addr); ++ ++ if (core_if->dma_desc_enable == 0) { ++ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); ++ ++ VERIFY_PCD_DMA_ADDR(ep->dma_addr); ++ dwc_write_reg32 (&(in_regs->diepdma), ++ (uint32_t)ep->dma_addr); ++ } ++ else { ++ init_dma_desc_chain(core_if, ep); ++ /** DIEPDMAn Register write */ ++ ++ VERIFY_PCD_DMA_ADDR(ep->dma_desc_addr); ++ dwc_write_reg32(&in_regs->diepdma, ep->dma_desc_addr); ++ } ++ } ++ else ++ { ++ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); ++ if(ep->type != DWC_OTG_EP_TYPE_ISOC) { ++ /** ++ * Enable the Non-Periodic Tx FIFO empty interrupt, ++ * or the Tx FIFO epmty interrupt in dedicated Tx FIFO mode, ++ * the data will be written into the fifo by the ISR. ++ */ ++ if(core_if->en_multiple_tx_fifo == 0) { ++ intr_mask.b.nptxfempty = 1; ++ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, ++ intr_mask.d32, intr_mask.d32); ++ } ++ else { ++ /* Enable the Tx FIFO Empty Interrupt for this EP */ ++ if(ep->xfer_len > 0) { ++ uint32_t fifoemptymsk = 0; ++ fifoemptymsk = 1 << ep->num; ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk, ++ 0, fifoemptymsk); ++ ++ } ++ } ++ } ++ } ++ ++ /* EP enable, IN data in FIFO */ ++ depctl.b.cnak = 1; ++ depctl.b.epena = 1; ++ dwc_write_reg32(&in_regs->diepctl, depctl.d32); ++ ++ depctl.d32 = dwc_read_reg32 (&core_if->dev_if->in_ep_regs[0]->diepctl); ++ depctl.b.nextep = ep->num; ++ dwc_write_reg32 (&core_if->dev_if->in_ep_regs[0]->diepctl, depctl.d32); ++ ++ DWC_DEBUGPL(DBG_PCD, "DIEPCTL(%.8x)=%08x DIEPTSIZ=%08x, DIEPINT=%.8x, DIEPDMA=%.8x, DTXFSTS=%.8x\n", ++ (u32)&in_regs->diepctl, ++ dwc_read_reg32(&in_regs->diepctl), ++ dwc_read_reg32(&in_regs->dieptsiz), ++ dwc_read_reg32(&in_regs->diepint), ++ dwc_read_reg32(&in_regs->diepdma), ++ dwc_read_reg32(&in_regs->dtxfsts)); ++ DWC_DEBUGPL(DBG_PCD, "DAINTMSK=%08x GINTMSK=%08x\n", ++ dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk), ++ dwc_read_reg32(&core_if->core_global_regs->gintmsk)); ++ ++ } ++ else { ++ /* OUT endpoint */ ++ dwc_otg_dev_out_ep_regs_t *out_regs = ++ core_if->dev_if->out_ep_regs[ep->num]; ++ ++ depctl.d32 = dwc_read_reg32(&(out_regs->doepctl)); ++ deptsiz.d32 = dwc_read_reg32(&(out_regs->doeptsiz)); ++ ++ ep->xfer_len += (ep->maxxfer < (ep->total_len - ep->xfer_len)) ? ++ ep->maxxfer : (ep->total_len - ep->xfer_len); ++ ++ /* Program the transfer size and packet count as follows: ++ * ++ * pktcnt = N ++ * xfersize = N * maxpacket ++ */ ++ if ((ep->xfer_len - ep->xfer_count) == 0) { ++ /* Zero Length Packet */ ++ deptsiz.b.xfersize = ep->maxpacket; ++ deptsiz.b.pktcnt = 1; ++ } ++ else { ++ deptsiz.b.pktcnt = ++ (ep->xfer_len - ep->xfer_count + (ep->maxpacket - 1)) / ++ ep->maxpacket; ++ ep->xfer_len = deptsiz.b.pktcnt * ep->maxpacket + ep->xfer_count; ++ deptsiz.b.xfersize = ep->xfer_len - ep->xfer_count; ++ } ++ ++ DWC_DEBUGPL(DBG_PCDV, "ep%d xfersize=%d pktcnt=%d\n", ++ ep->num, ++ deptsiz.b.xfersize, deptsiz.b.pktcnt); ++ ++ if (core_if->dma_enable) { ++ if (/*(core_if->dma_enable)&&*/(ep->dma_addr==DMA_ADDR_INVALID)) { ++ ep->dma_addr=dma_map_single(NULL,(void *)(ep->xfer_buff),(ep->xfer_len),DMA_TO_DEVICE); ++ } ++ DWC_DEBUGPL(DBG_PCDV, "ep%d dma_addr=%.8x\n", ++ ep->num, ++ ep->dma_addr); ++ if (!core_if->dma_desc_enable) { ++ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); ++ ++ VERIFY_PCD_DMA_ADDR(ep->dma_addr); ++ dwc_write_reg32 (&(out_regs->doepdma), ++ (uint32_t)ep->dma_addr); ++ } ++ else { ++ init_dma_desc_chain(core_if, ep); ++ ++ /** DOEPDMAn Register write */ ++ ++ VERIFY_PCD_DMA_ADDR(ep->dma_desc_addr); ++ dwc_write_reg32(&out_regs->doepdma, ep->dma_desc_addr); ++ } ++ } ++ else { ++ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); ++ } ++ ++ /* EP enable */ ++ depctl.b.cnak = 1; ++ depctl.b.epena = 1; ++ ++ dwc_write_reg32(&out_regs->doepctl, depctl.d32); ++ ++ DWC_DEBUGPL(DBG_PCD, "DOEPCTL(%.8x)=%08x DOEPTSIZ=%08x, DOEPINT=%.8x, DOEPDMA=%.8x\n", ++ (u32)&out_regs->doepctl, ++ dwc_read_reg32(&out_regs->doepctl), ++ dwc_read_reg32(&out_regs->doeptsiz), ++ dwc_read_reg32(&out_regs->doepint), ++ dwc_read_reg32(&out_regs->doepdma)); ++ ++ DWC_DEBUGPL(DBG_PCD, "DAINTMSK=%08x GINTMSK=%08x\n", ++ dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk), ++ dwc_read_reg32(&core_if->core_global_regs->gintmsk)); ++ } ++} ++ ++/** ++ * This function setup a zero length transfer in Buffer DMA and ++ * Slave modes for usb requests with zero field set ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to start the transfer on. ++ * ++ */ ++void dwc_otg_ep_start_zl_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ ++ depctl_data_t depctl; ++ deptsiz_data_t deptsiz; ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ ++ DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s()\n", __func__); ++ ++ /* IN endpoint */ ++ if (ep->is_in == 1) { ++ dwc_otg_dev_in_ep_regs_t *in_regs = ++ core_if->dev_if->in_ep_regs[ep->num]; ++ ++ depctl.d32 = dwc_read_reg32(&(in_regs->diepctl)); ++ deptsiz.d32 = dwc_read_reg32(&(in_regs->dieptsiz)); ++ ++ deptsiz.b.xfersize = 0; ++ deptsiz.b.pktcnt = 1; ++ ++ ++ /* Write the DMA register */ ++ if (core_if->dma_enable) { ++ if (/*(core_if->dma_enable)&&*/(ep->dma_addr==DMA_ADDR_INVALID)) { ++ ep->dma_addr=dma_map_single(NULL,(void *)(ep->xfer_buff),(ep->xfer_len),DMA_TO_DEVICE); ++ } ++ if (core_if->dma_desc_enable == 0) { ++ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); ++ ++ VERIFY_PCD_DMA_ADDR(ep->dma_addr); ++ dwc_write_reg32 (&(in_regs->diepdma), ++ (uint32_t)ep->dma_addr); ++ } ++ } ++ else { ++ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); ++ /** ++ * Enable the Non-Periodic Tx FIFO empty interrupt, ++ * or the Tx FIFO epmty interrupt in dedicated Tx FIFO mode, ++ * the data will be written into the fifo by the ISR. ++ */ ++ if(core_if->en_multiple_tx_fifo == 0) { ++ intr_mask.b.nptxfempty = 1; ++ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, ++ intr_mask.d32, intr_mask.d32); ++ } ++ else { ++ /* Enable the Tx FIFO Empty Interrupt for this EP */ ++ if(ep->xfer_len > 0) { ++ uint32_t fifoemptymsk = 0; ++ fifoemptymsk = 1 << ep->num; ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk, ++ 0, fifoemptymsk); ++ } ++ } ++ } ++ ++ /* EP enable, IN data in FIFO */ ++ depctl.b.cnak = 1; ++ depctl.b.epena = 1; ++ dwc_write_reg32(&in_regs->diepctl, depctl.d32); ++ ++ depctl.d32 = dwc_read_reg32 (&core_if->dev_if->in_ep_regs[0]->diepctl); ++ depctl.b.nextep = ep->num; ++ dwc_write_reg32 (&core_if->dev_if->in_ep_regs[0]->diepctl, depctl.d32); ++ ++ } ++ else { ++ /* OUT endpoint */ ++ dwc_otg_dev_out_ep_regs_t *out_regs = ++ core_if->dev_if->out_ep_regs[ep->num]; ++ ++ depctl.d32 = dwc_read_reg32(&(out_regs->doepctl)); ++ deptsiz.d32 = dwc_read_reg32(&(out_regs->doeptsiz)); ++ ++ /* Zero Length Packet */ ++ deptsiz.b.xfersize = ep->maxpacket; ++ deptsiz.b.pktcnt = 1; ++ ++ if (core_if->dma_enable) { ++ if (/*(core_if->dma_enable)&&*/(ep->dma_addr==DMA_ADDR_INVALID)) { ++ ep->dma_addr=dma_map_single(NULL,(void *)(ep->xfer_buff),(ep->xfer_len),DMA_TO_DEVICE); ++ } ++ if (!core_if->dma_desc_enable) { ++ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); ++ ++ ++ VERIFY_PCD_DMA_ADDR(ep->dma_addr); ++ dwc_write_reg32 (&(out_regs->doepdma), ++ (uint32_t)ep->dma_addr); ++ } ++ } ++ else { ++ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); ++ } ++ ++ /* EP enable */ ++ depctl.b.cnak = 1; ++ depctl.b.epena = 1; ++ ++ dwc_write_reg32(&out_regs->doepctl, depctl.d32); ++ ++ } ++} ++ ++/** ++ * This function does the setup for a data transfer for EP0 and starts ++ * the transfer. For an IN transfer, the packets will be loaded into ++ * the appropriate Tx FIFO in the ISR. For OUT transfers, the packets are ++ * unloaded from the Rx FIFO in the ISR. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP0 data. ++ */ ++void dwc_otg_ep0_start_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ depctl_data_t depctl; ++ deptsiz0_data_t deptsiz; ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ dwc_otg_dma_desc_t* dma_desc; ++ ++ DWC_DEBUGPL(DBG_PCD, "ep%d-%s xfer_len=%d xfer_cnt=%d " ++ "xfer_buff=%p start_xfer_buff=%p, dma_addr=%.8x\n", ++ ep->num, (ep->is_in?"IN":"OUT"), ep->xfer_len, ++ ep->xfer_count, ep->xfer_buff, ep->start_xfer_buff,ep->dma_addr); ++ ++ ep->total_len = ep->xfer_len; ++ ++ /* IN endpoint */ ++ if (ep->is_in == 1) { ++ dwc_otg_dev_in_ep_regs_t *in_regs = ++ core_if->dev_if->in_ep_regs[0]; ++ ++ gnptxsts_data_t gtxstatus; ++ ++ gtxstatus.d32 = ++ dwc_read_reg32(&core_if->core_global_regs->gnptxsts); ++ ++ if(core_if->en_multiple_tx_fifo == 0 && gtxstatus.b.nptxqspcavail == 0) { ++#ifdef DEBUG ++ deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz); ++ DWC_DEBUGPL(DBG_PCD,"DIEPCTL0=%0x\n", ++ dwc_read_reg32(&in_regs->diepctl)); ++ DWC_DEBUGPL(DBG_PCD, "DIEPTSIZ0=%0x (sz=%d, pcnt=%d)\n", ++ deptsiz.d32, ++ deptsiz.b.xfersize, deptsiz.b.pktcnt); ++ DWC_PRINT("TX Queue or FIFO Full (0x%0x)\n", ++ gtxstatus.d32); ++#endif ++ return; ++ } ++ ++ ++ depctl.d32 = dwc_read_reg32(&in_regs->diepctl); ++ deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz); ++ ++ /* Zero Length Packet? */ ++ if (ep->xfer_len == 0) { ++ deptsiz.b.xfersize = 0; ++ deptsiz.b.pktcnt = 1; ++ } ++ else { ++ /* Program the transfer size and packet count ++ * as follows: xfersize = N * maxpacket + ++ * short_packet pktcnt = N + (short_packet ++ * exist ? 1 : 0) ++ */ ++ if (ep->xfer_len > ep->maxpacket) { ++ ep->xfer_len = ep->maxpacket; ++ deptsiz.b.xfersize = ep->maxpacket; ++ } ++ else { ++ deptsiz.b.xfersize = ep->xfer_len; ++ } ++ deptsiz.b.pktcnt = 1; ++ ++ } ++ DWC_DEBUGPL(DBG_PCDV, "IN len=%d xfersize=%d pktcnt=%d [%08x]\n", ++ ep->xfer_len, ++ deptsiz.b.xfersize, deptsiz.b.pktcnt, deptsiz.d32); ++ /* Write the DMA register */ ++ if (core_if->dma_enable) { ++ if (/*(core_if->dma_enable)&&*/(ep->dma_addr==DMA_ADDR_INVALID)) { ++ ep->dma_addr=dma_map_single(NULL,(void *)(ep->xfer_buff),(ep->xfer_len),DMA_TO_DEVICE); ++ } ++ if(core_if->dma_desc_enable == 0) { ++ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); ++ ++ VERIFY_PCD_DMA_ADDR(ep->dma_addr); ++ dwc_write_reg32 (&(in_regs->diepdma), ++ (uint32_t)ep->dma_addr); ++ } ++ else { ++ dma_desc = core_if->dev_if->in_desc_addr; ++ ++ /** DMA Descriptor Setup */ ++ dma_desc->status.b.bs = BS_HOST_BUSY; ++ dma_desc->status.b.l = 1; ++ dma_desc->status.b.ioc = 1; ++ dma_desc->status.b.sp = (ep->xfer_len == ep->maxpacket) ? 0 : 1; ++ dma_desc->status.b.bytes = ep->xfer_len; ++ dma_desc->buf = ep->dma_addr; ++ dma_desc->status.b.bs = BS_HOST_READY; ++ ++ /** DIEPDMA0 Register write */ ++ ++ VERIFY_PCD_DMA_ADDR(core_if->dev_if->dma_in_desc_addr); ++ dwc_write_reg32(&in_regs->diepdma, core_if->dev_if->dma_in_desc_addr); ++ } ++ } ++ else { ++ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); ++ } ++ ++ /* EP enable, IN data in FIFO */ ++ depctl.b.cnak = 1; ++ depctl.b.epena = 1; ++ dwc_write_reg32(&in_regs->diepctl, depctl.d32); ++ ++ /** ++ * Enable the Non-Periodic Tx FIFO empty interrupt, the ++ * data will be written into the fifo by the ISR. ++ */ ++ if (!core_if->dma_enable) { ++ if(core_if->en_multiple_tx_fifo == 0) { ++ intr_mask.b.nptxfempty = 1; ++ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, ++ intr_mask.d32, intr_mask.d32); ++ } ++ else { ++ /* Enable the Tx FIFO Empty Interrupt for this EP */ ++ if(ep->xfer_len > 0) { ++ uint32_t fifoemptymsk = 0; ++ fifoemptymsk |= 1 << ep->num; ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk, ++ 0, fifoemptymsk); ++ } ++ } ++ } ++ } ++ else { ++ /* OUT endpoint */ ++ dwc_otg_dev_out_ep_regs_t *out_regs = ++ core_if->dev_if->out_ep_regs[0]; ++ ++ depctl.d32 = dwc_read_reg32(&out_regs->doepctl); ++ deptsiz.d32 = dwc_read_reg32(&out_regs->doeptsiz); ++ ++ /* Program the transfer size and packet count as follows: ++ * xfersize = N * (maxpacket + 4 - (maxpacket % 4)) ++ * pktcnt = N */ ++ /* Zero Length Packet */ ++ deptsiz.b.xfersize = ep->maxpacket; ++ deptsiz.b.pktcnt = 1; ++ ++ DWC_DEBUGPL(DBG_PCDV, "len=%d xfersize=%d pktcnt=%d\n", ++ ep->xfer_len, ++ deptsiz.b.xfersize, deptsiz.b.pktcnt); ++ ++ if (core_if->dma_enable) { ++ if (/*(core_if->dma_enable)&&*/(ep->dma_addr==DMA_ADDR_INVALID)) { ++ ep->dma_addr=dma_map_single(NULL,(void *)(ep->xfer_buff),(ep->xfer_len),DMA_TO_DEVICE); ++ } ++ if(!core_if->dma_desc_enable) { ++ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); ++ ++ ++ VERIFY_PCD_DMA_ADDR(ep->dma_addr); ++ dwc_write_reg32 (&(out_regs->doepdma), ++ (uint32_t)ep->dma_addr); ++ } ++ else { ++ dma_desc = core_if->dev_if->out_desc_addr; ++ ++ /** DMA Descriptor Setup */ ++ dma_desc->status.b.bs = BS_HOST_BUSY; ++ dma_desc->status.b.l = 1; ++ dma_desc->status.b.ioc = 1; ++ dma_desc->status.b.bytes = ep->maxpacket; ++ dma_desc->buf = ep->dma_addr; ++ dma_desc->status.b.bs = BS_HOST_READY; ++ ++ /** DOEPDMA0 Register write */ ++ VERIFY_PCD_DMA_ADDR(core_if->dev_if->dma_out_desc_addr); ++ dwc_write_reg32(&out_regs->doepdma, core_if->dev_if->dma_out_desc_addr); ++ } ++ } ++ else { ++ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); ++ } ++ ++ /* EP enable */ ++ depctl.b.cnak = 1; ++ depctl.b.epena = 1; ++ dwc_write_reg32 (&(out_regs->doepctl), depctl.d32); ++ } ++} ++ ++/** ++ * This function continues control IN transfers started by ++ * dwc_otg_ep0_start_transfer, when the transfer does not fit in a ++ * single packet. NOTE: The DIEPCTL0/DOEPCTL0 registers only have one ++ * bit for the packet count. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP0 data. ++ */ ++void dwc_otg_ep0_continue_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ depctl_data_t depctl; ++ deptsiz0_data_t deptsiz; ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ dwc_otg_dma_desc_t* dma_desc; ++ ++ if (ep->is_in == 1) { ++ dwc_otg_dev_in_ep_regs_t *in_regs = ++ core_if->dev_if->in_ep_regs[0]; ++ gnptxsts_data_t tx_status = { .d32 = 0 }; ++ ++ tx_status.d32 = dwc_read_reg32(&core_if->core_global_regs->gnptxsts); ++ /** @todo Should there be check for room in the Tx ++ * Status Queue. If not remove the code above this comment. */ ++ ++ depctl.d32 = dwc_read_reg32(&in_regs->diepctl); ++ deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz); ++ ++ /* Program the transfer size and packet count ++ * as follows: xfersize = N * maxpacket + ++ * short_packet pktcnt = N + (short_packet ++ * exist ? 1 : 0) ++ */ ++ ++ ++ if(core_if->dma_desc_enable == 0) { ++ deptsiz.b.xfersize = (ep->total_len - ep->xfer_count) > ep->maxpacket ? ep->maxpacket : ++ (ep->total_len - ep->xfer_count); ++ deptsiz.b.pktcnt = 1; ++ if(core_if->dma_enable == 0) { ++ ep->xfer_len += deptsiz.b.xfersize; ++ } else { ++ ep->xfer_len = deptsiz.b.xfersize; ++ } ++ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); ++ } ++ else { ++ ep->xfer_len = (ep->total_len - ep->xfer_count) > ep->maxpacket ? ep->maxpacket : ++ (ep->total_len - ep->xfer_count); ++ ++ dma_desc = core_if->dev_if->in_desc_addr; ++ ++ /** DMA Descriptor Setup */ ++ dma_desc->status.b.bs = BS_HOST_BUSY; ++ dma_desc->status.b.l = 1; ++ dma_desc->status.b.ioc = 1; ++ dma_desc->status.b.sp = (ep->xfer_len == ep->maxpacket) ? 0 : 1; ++ dma_desc->status.b.bytes = ep->xfer_len; ++ dma_desc->buf = ep->dma_addr; ++ dma_desc->status.b.bs = BS_HOST_READY; ++ ++ ++ /** DIEPDMA0 Register write */ ++ VERIFY_PCD_DMA_ADDR(core_if->dev_if->dma_in_desc_addr); ++ dwc_write_reg32(&in_regs->diepdma, core_if->dev_if->dma_in_desc_addr); ++ } ++ ++ ++ DWC_DEBUGPL(DBG_PCDV, "IN len=%d xfersize=%d pktcnt=%d [%08x]\n", ++ ep->xfer_len, ++ deptsiz.b.xfersize, deptsiz.b.pktcnt, deptsiz.d32); ++ ++ /* Write the DMA register */ ++ if (core_if->hwcfg2.b.architecture == DWC_INT_DMA_ARCH) { ++ if(core_if->dma_desc_enable == 0){ ++ ++ VERIFY_PCD_DMA_ADDR(ep->dma_addr); ++ dwc_write_reg32 (&(in_regs->diepdma), (uint32_t)ep->dma_addr); ++ } ++ } ++ ++ /* EP enable, IN data in FIFO */ ++ depctl.b.cnak = 1; ++ depctl.b.epena = 1; ++ dwc_write_reg32(&in_regs->diepctl, depctl.d32); ++ ++ /** ++ * Enable the Non-Periodic Tx FIFO empty interrupt, the ++ * data will be written into the fifo by the ISR. ++ */ ++ if (!core_if->dma_enable) { ++ if(core_if->en_multiple_tx_fifo == 0) { ++ /* First clear it from GINTSTS */ ++ intr_mask.b.nptxfempty = 1; ++ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, ++ intr_mask.d32, intr_mask.d32); ++ ++ } ++ else { ++ /* Enable the Tx FIFO Empty Interrupt for this EP */ ++ if(ep->xfer_len > 0) { ++ uint32_t fifoemptymsk = 0; ++ fifoemptymsk |= 1 << ep->num; ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk, ++ 0, fifoemptymsk); ++ } ++ } ++ } ++ } ++ else { ++ dwc_otg_dev_out_ep_regs_t *out_regs = ++ core_if->dev_if->out_ep_regs[0]; ++ ++ ++ depctl.d32 = dwc_read_reg32(&out_regs->doepctl); ++ deptsiz.d32 = dwc_read_reg32(&out_regs->doeptsiz); ++ ++ /* Program the transfer size and packet count ++ * as follows: xfersize = N * maxpacket + ++ * short_packet pktcnt = N + (short_packet ++ * exist ? 1 : 0) ++ */ ++ deptsiz.b.xfersize = ep->maxpacket; ++ deptsiz.b.pktcnt = 1; ++ ++ ++ if(core_if->dma_desc_enable == 0) { ++ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); ++ } ++ else { ++ dma_desc = core_if->dev_if->out_desc_addr; ++ ++ /** DMA Descriptor Setup */ ++ dma_desc->status.b.bs = BS_HOST_BUSY; ++ dma_desc->status.b.l = 1; ++ dma_desc->status.b.ioc = 1; ++ dma_desc->status.b.bytes = ep->maxpacket; ++ dma_desc->buf = ep->dma_addr; ++ dma_desc->status.b.bs = BS_HOST_READY; ++ ++ /** DOEPDMA0 Register write */ ++ VERIFY_PCD_DMA_ADDR(core_if->dev_if->dma_out_desc_addr); ++ dwc_write_reg32(&out_regs->doepdma, core_if->dev_if->dma_out_desc_addr); ++ } ++ ++ ++ DWC_DEBUGPL(DBG_PCDV, "IN len=%d xfersize=%d pktcnt=%d [%08x]\n", ++ ep->xfer_len, ++ deptsiz.b.xfersize, deptsiz.b.pktcnt, deptsiz.d32); ++ ++ /* Write the DMA register */ ++ if (core_if->hwcfg2.b.architecture == DWC_INT_DMA_ARCH) { ++ if(core_if->dma_desc_enable == 0){ ++ ++ VERIFY_PCD_DMA_ADDR(ep->dma_addr); ++ dwc_write_reg32 (&(out_regs->doepdma), (uint32_t)ep->dma_addr); ++ } ++ } ++ ++ /* EP enable, IN data in FIFO */ ++ depctl.b.cnak = 1; ++ depctl.b.epena = 1; ++ dwc_write_reg32(&out_regs->doepctl, depctl.d32); ++ ++ } ++} ++ ++#ifdef DEBUG ++void dump_msg(const u8 *buf, unsigned int length) ++{ ++ unsigned int start, num, i; ++ char line[52], *p; ++ ++ if (length >= 512) ++ return; ++ start = 0; ++ while (length > 0) { ++ num = min(length, 16u); ++ p = line; ++ for (i = 0; i < num; ++i) ++ { ++ if (i == 8) ++ *p++ = ' '; ++ sprintf(p, " %02x", buf[i]); ++ p += 3; ++ } ++ *p = 0; ++ DWC_PRINT("%6x: %s\n", start, line); ++ buf += num; ++ start += num; ++ length -= num; ++ } ++} ++#else ++static inline void dump_msg(const u8 *buf, unsigned int length) ++{ ++} ++#endif ++ ++/** ++ * This function writes a packet into the Tx FIFO associated with the ++ * EP. For non-periodic EPs the non-periodic Tx FIFO is written. For ++ * periodic EPs the periodic Tx FIFO associated with the EP is written ++ * with all packets for the next micro-frame. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to write packet for. ++ * @param dma Indicates if DMA is being used. ++ */ ++void dwc_otg_ep_write_packet(dwc_otg_core_if_t *core_if, dwc_ep_t *ep, int dma) ++{ ++ /** ++ * The buffer is padded to DWORD on a per packet basis in ++ * slave/dma mode if the MPS is not DWORD aligned. The last ++ * packet, if short, is also padded to a multiple of DWORD. ++ * ++ * ep->xfer_buff always starts DWORD aligned in memory and is a ++ * multiple of DWORD in length ++ * ++ * ep->xfer_len can be any number of bytes ++ * ++ * ep->xfer_count is a multiple of ep->maxpacket until the last ++ * packet ++ * ++ * FIFO access is DWORD */ ++ ++ uint32_t i; ++ uint32_t byte_count; ++ uint32_t dword_count; ++ uint32_t *fifo; ++ uint32_t *data_buff = (uint32_t *)ep->xfer_buff; ++ ++ DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s(%p,%p)\n", __func__, core_if, ep); ++ if (ep->xfer_count >= ep->xfer_len) { ++ DWC_WARN("%s() No data for EP%d!!!\n", __func__, ep->num); ++ return; ++ } ++ ++ /* Find the byte length of the packet either short packet or MPS */ ++ if ((ep->xfer_len - ep->xfer_count) < ep->maxpacket) { ++ byte_count = ep->xfer_len - ep->xfer_count; ++ } ++ else { ++ byte_count = ep->maxpacket; ++ } ++ ++ /* Find the DWORD length, padded by extra bytes as neccessary if MPS ++ * is not a multiple of DWORD */ ++ dword_count = (byte_count + 3) / 4; ++ ++#ifdef VERBOSE ++ dump_msg(ep->xfer_buff, byte_count); ++#endif ++ ++ /**@todo NGS Where are the Periodic Tx FIFO addresses ++ * intialized? What should this be? */ ++ ++ fifo = core_if->data_fifo[ep->num]; ++ ++ ++ DWC_DEBUGPL((DBG_PCDV|DBG_CILV), "fifo=%p buff=%p *p=%08x bc=%d\n", fifo, data_buff, *data_buff, byte_count); ++ ++ if (!dma) { ++ for (i=0; ixfer_count += byte_count; ++ ep->xfer_buff += byte_count; ++ ep->dma_addr += byte_count; ++} ++ ++/** ++ * Set the EP STALL. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to set the stall on. ++ */ ++void dwc_otg_ep_set_stall(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ depctl_data_t depctl; ++ volatile uint32_t *depctl_addr; ++ ++ DWC_DEBUGPL(DBG_PCDV, "%s ep%d-%s1\n", __func__, ep->num, ++ (ep->is_in?"IN":"OUT")); ++ ++ DWC_PRINT("%s ep%d-%s\n", __func__, ep->num, ++ (ep->is_in?"in":"out")); ++ ++ if (ep->is_in == 1) { ++ depctl_addr = &(core_if->dev_if->in_ep_regs[ep->num]->diepctl); ++ depctl.d32 = dwc_read_reg32(depctl_addr); ++ ++ /* set the disable and stall bits */ ++#if 0 ++//epdis is set here but not cleared at latter dwc_otg_ep_clear_stall, ++//which cause the testusb item 13 failed(Host:pc, device: otg device) ++ if (depctl.b.epena) { ++ depctl.b.epdis = 1; ++ } ++#endif ++ depctl.b.stall = 1; ++ dwc_write_reg32(depctl_addr, depctl.d32); ++ } ++ else { ++ depctl_addr = &(core_if->dev_if->out_ep_regs[ep->num]->doepctl); ++ depctl.d32 = dwc_read_reg32(depctl_addr); ++ ++ /* set the stall bit */ ++ depctl.b.stall = 1; ++ dwc_write_reg32(depctl_addr, depctl.d32); ++ } ++ ++ DWC_DEBUGPL(DBG_PCDV,"%s: DEPCTL(%.8x)=%0x\n",__func__,(u32)depctl_addr,dwc_read_reg32(depctl_addr)); ++ ++ return; ++} ++ ++/** ++ * Clear the EP STALL. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to clear stall from. ++ */ ++void dwc_otg_ep_clear_stall(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ depctl_data_t depctl; ++ volatile uint32_t *depctl_addr; ++ ++ DWC_DEBUGPL(DBG_PCD, "%s ep%d-%s\n", __func__, ep->num, ++ (ep->is_in?"IN":"OUT")); ++ ++ if (ep->is_in == 1) { ++ depctl_addr = &(core_if->dev_if->in_ep_regs[ep->num]->diepctl); ++ } ++ else { ++ depctl_addr = &(core_if->dev_if->out_ep_regs[ep->num]->doepctl); ++ } ++ ++ depctl.d32 = dwc_read_reg32(depctl_addr); ++ ++ /* clear the stall bits */ ++ depctl.b.stall = 0; ++ ++ /* ++ * USB Spec 9.4.5: For endpoints using data toggle, regardless ++ * of whether an endpoint has the Halt feature set, a ++ * ClearFeature(ENDPOINT_HALT) request always results in the ++ * data toggle being reinitialized to DATA0. ++ */ ++ if (ep->type == DWC_OTG_EP_TYPE_INTR || ++ ep->type == DWC_OTG_EP_TYPE_BULK) { ++ depctl.b.setd0pid = 1; /* DATA0 */ ++ } ++ ++ dwc_write_reg32(depctl_addr, depctl.d32); ++ DWC_DEBUGPL(DBG_PCD,"DEPCTL=%0x\n",dwc_read_reg32(depctl_addr)); ++ return; ++} ++ ++/** ++ * This function reads a packet from the Rx FIFO into the destination ++ * buffer. To read SETUP data use dwc_otg_read_setup_packet. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param dest Destination buffer for the packet. ++ * @param bytes Number of bytes to copy to the destination. ++ */ ++void dwc_otg_read_packet(dwc_otg_core_if_t *core_if, ++ uint8_t *dest, ++ uint16_t bytes) ++{ ++ int i; ++ int word_count = (bytes + 3) / 4; ++ ++ volatile uint32_t *fifo = core_if->data_fifo[0]; ++ uint32_t *data_buff = (uint32_t *)dest; ++ ++ /** ++ * @todo Account for the case where _dest is not dword aligned. This ++ * requires reading data from the FIFO into a uint32_t temp buffer, ++ * then moving it into the data buffer. ++ */ ++ ++ DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s(%p,%p,%d)\n", __func__, ++ core_if, dest, bytes); ++ ++ for (i=0; idev_if->dev_global_regs->dcfg; ++ DWC_PRINT("DCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->dev_global_regs->dctl; ++ DWC_PRINT("DCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->dev_global_regs->dsts; ++ DWC_PRINT("DSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->dev_global_regs->diepmsk; ++ DWC_PRINT("DIEPMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->dev_global_regs->doepmsk; ++ DWC_PRINT("DOEPMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->dev_global_regs->daint; ++ DWC_PRINT("DAINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->dev_global_regs->daintmsk; ++ DWC_PRINT("DAINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->dev_global_regs->dtknqr1; ++ DWC_PRINT("DTKNQR1 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ if (core_if->hwcfg2.b.dev_token_q_depth > 6) { ++ addr=&core_if->dev_if->dev_global_regs->dtknqr2; ++ DWC_PRINT("DTKNQR2 @0x%08X : 0x%08X\n", ++ (uint32_t)addr,dwc_read_reg32(addr)); ++ } ++ ++ addr=&core_if->dev_if->dev_global_regs->dvbusdis; ++ DWC_PRINT("DVBUSID @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ ++ addr=&core_if->dev_if->dev_global_regs->dvbuspulse; ++ DWC_PRINT("DVBUSPULSE @0x%08X : 0x%08X\n", ++ (uint32_t)addr,dwc_read_reg32(addr)); ++ ++ if (core_if->hwcfg2.b.dev_token_q_depth > 14) { ++ addr=&core_if->dev_if->dev_global_regs->dtknqr3_dthrctl; ++ DWC_PRINT("DTKNQR3_DTHRCTL @0x%08X : 0x%08X\n", ++ (uint32_t)addr, dwc_read_reg32(addr)); ++ } ++/* ++ if (core_if->hwcfg2.b.dev_token_q_depth > 22) { ++ addr=&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk; ++ DWC_PRINT("DTKNQR4 @0x%08X : 0x%08X\n", ++ (uint32_t)addr, dwc_read_reg32(addr)); ++ } ++*/ ++ addr=&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk; ++ DWC_PRINT("FIFOEMPMSK @0x%08X : 0x%08X\n", (uint32_t)addr, dwc_read_reg32(addr)); ++ ++ addr=&core_if->dev_if->dev_global_regs->deachint; ++ DWC_PRINT("DEACHINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->dev_global_regs->deachintmsk; ++ DWC_PRINT("DEACHINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ ++ for (i=0; i<= core_if->dev_if->num_in_eps; i++) { ++ addr=&core_if->dev_if->dev_global_regs->diepeachintmsk[i]; ++ DWC_PRINT("DIEPEACHINTMSK[%d] @0x%08X : 0x%08X\n", i, (uint32_t)addr, dwc_read_reg32(addr)); ++ } ++ ++ ++ for (i=0; i<= core_if->dev_if->num_out_eps; i++) { ++ addr=&core_if->dev_if->dev_global_regs->doepeachintmsk[i]; ++ DWC_PRINT("DOEPEACHINTMSK[%d] @0x%08X : 0x%08X\n", i, (uint32_t)addr, dwc_read_reg32(addr)); ++ } ++ ++ for (i=0; i<= core_if->dev_if->num_in_eps; i++) { ++ DWC_PRINT("Device IN EP %d Registers\n", i); ++ addr=&core_if->dev_if->in_ep_regs[i]->diepctl; ++ DWC_PRINT("DIEPCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->in_ep_regs[i]->diepint; ++ DWC_PRINT("DIEPINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->in_ep_regs[i]->dieptsiz; ++ DWC_PRINT("DIETSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->in_ep_regs[i]->diepdma; ++ DWC_PRINT("DIEPDMA @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->in_ep_regs[i]->dtxfsts; ++ DWC_PRINT("DTXFSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ //reading depdmab in non desc dma mode would halt the ahb bus... ++ if(core_if->dma_desc_enable){ ++ addr=&core_if->dev_if->in_ep_regs[i]->diepdmab; ++ DWC_PRINT("DIEPDMAB @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ } ++ } ++ ++ ++ for (i=0; i<= core_if->dev_if->num_out_eps; i++) { ++ DWC_PRINT("Device OUT EP %d Registers\n", i); ++ addr=&core_if->dev_if->out_ep_regs[i]->doepctl; ++ DWC_PRINT("DOEPCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->out_ep_regs[i]->doepfn; ++ DWC_PRINT("DOEPFN @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->out_ep_regs[i]->doepint; ++ DWC_PRINT("DOEPINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->out_ep_regs[i]->doeptsiz; ++ DWC_PRINT("DOETSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->out_ep_regs[i]->doepdma; ++ DWC_PRINT("DOEPDMA @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ ++ //reading depdmab in non desc dma mode would halt the ahb bus... ++ if(core_if->dma_desc_enable){ ++ addr=&core_if->dev_if->out_ep_regs[i]->doepdmab; ++ DWC_PRINT("DOEPDMAB @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ } ++ ++ } ++ ++ ++ ++ return; ++} ++ ++/** ++ * This functions reads the SPRAM and prints its content ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++void dwc_otg_dump_spram(dwc_otg_core_if_t *core_if) ++{ ++ volatile uint8_t *addr, *start_addr, *end_addr; ++ ++ DWC_PRINT("SPRAM Data:\n"); ++ start_addr = (void*)core_if->core_global_regs; ++ DWC_PRINT("Base Address: 0x%8X\n", (uint32_t)start_addr); ++ start_addr += 0x00028000; ++ end_addr=(void*)core_if->core_global_regs; ++ end_addr += 0x000280e0; ++ ++ for(addr = start_addr; addr < end_addr; addr+=16) ++ { ++ DWC_PRINT("0x%8X:\t%2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X\n", (uint32_t)addr, ++ addr[0], ++ addr[1], ++ addr[2], ++ addr[3], ++ addr[4], ++ addr[5], ++ addr[6], ++ addr[7], ++ addr[8], ++ addr[9], ++ addr[10], ++ addr[11], ++ addr[12], ++ addr[13], ++ addr[14], ++ addr[15] ++ ); ++ } ++ ++ return; ++} ++/** ++ * This function reads the host registers and prints them ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++void dwc_otg_dump_host_registers(dwc_otg_core_if_t *core_if) ++{ ++ int i; ++ volatile uint32_t *addr; ++ ++ DWC_PRINT("Host Global Registers\n"); ++ addr=&core_if->host_if->host_global_regs->hcfg; ++ DWC_PRINT("HCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->host_if->host_global_regs->hfir; ++ DWC_PRINT("HFIR @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->host_if->host_global_regs->hfnum; ++ DWC_PRINT("HFNUM @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->host_if->host_global_regs->hptxsts; ++ DWC_PRINT("HPTXSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->host_if->host_global_regs->haint; ++ DWC_PRINT("HAINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->host_if->host_global_regs->haintmsk; ++ DWC_PRINT("HAINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=core_if->host_if->hprt0; ++ DWC_PRINT("HPRT0 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ ++ for (i=0; icore_params->host_channels; i++) ++ { ++ DWC_PRINT("Host Channel %d Specific Registers\n", i); ++ addr=&core_if->host_if->hc_regs[i]->hcchar; ++ DWC_PRINT("HCCHAR @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->host_if->hc_regs[i]->hcsplt; ++ DWC_PRINT("HCSPLT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->host_if->hc_regs[i]->hcint; ++ DWC_PRINT("HCINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->host_if->hc_regs[i]->hcintmsk; ++ DWC_PRINT("HCINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->host_if->hc_regs[i]->hctsiz; ++ DWC_PRINT("HCTSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->host_if->hc_regs[i]->hcdma; ++ DWC_PRINT("HCDMA @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ } ++ return; ++} ++ ++/** ++ * This function reads the core global registers and prints them ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++void dwc_otg_dump_global_registers(dwc_otg_core_if_t *core_if) ++{ ++ int i,size; ++ char* str; ++ volatile uint32_t *addr; ++ ++ DWC_PRINT("Core Global Registers\n"); ++ addr=&core_if->core_global_regs->gotgctl; ++ DWC_PRINT("GOTGCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->gotgint; ++ DWC_PRINT("GOTGINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->gahbcfg; ++ DWC_PRINT("GAHBCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->gusbcfg; ++ DWC_PRINT("GUSBCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->grstctl; ++ DWC_PRINT("GRSTCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->gintsts; ++ DWC_PRINT("GINTSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->gintmsk; ++ DWC_PRINT("GINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->grxstsr; ++ DWC_PRINT("GRXSTSR @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ //addr=&core_if->core_global_regs->grxstsp; ++ //DWC_PRINT("GRXSTSP @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->grxfsiz; ++ DWC_PRINT("GRXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->gnptxfsiz; ++ DWC_PRINT("GNPTXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->gnptxsts; ++ DWC_PRINT("GNPTXSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->gi2cctl; ++ DWC_PRINT("GI2CCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->gpvndctl; ++ DWC_PRINT("GPVNDCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->ggpio; ++ DWC_PRINT("GGPIO @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->guid; ++ DWC_PRINT("GUID @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->gsnpsid; ++ DWC_PRINT("GSNPSID @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->ghwcfg1; ++ DWC_PRINT("GHWCFG1 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->ghwcfg2; ++ DWC_PRINT("GHWCFG2 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->ghwcfg3; ++ DWC_PRINT("GHWCFG3 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->ghwcfg4; ++ DWC_PRINT("GHWCFG4 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->hptxfsiz; ++ DWC_PRINT("HPTXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ ++ size=(core_if->hwcfg4.b.ded_fifo_en)? ++ core_if->hwcfg4.b.num_in_eps:core_if->hwcfg4.b.num_dev_perio_in_ep; ++ str=(core_if->hwcfg4.b.ded_fifo_en)?"DIEPTXF":"DPTXFSIZ"; ++ for (i=0; icore_global_regs->dptxfsiz_dieptxf[i]; ++ DWC_PRINT("%s[%d] @0x%08X : 0x%08X\n",str,i,(uint32_t)addr,dwc_read_reg32(addr)); ++ } ++} ++ ++/** ++ * Flush a Tx FIFO. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param num Tx FIFO to flush. ++ */ ++void dwc_otg_flush_tx_fifo(dwc_otg_core_if_t *core_if, ++ const int num) ++{ ++ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; ++ volatile grstctl_t greset = { .d32 = 0}; ++ int count = 0; ++ ++ DWC_DEBUGPL((DBG_CIL|DBG_PCDV), "Flush Tx FIFO %d\n", num); ++ ++ greset.b.txfflsh = 1; ++ greset.b.txfnum = num; ++ dwc_write_reg32(&global_regs->grstctl, greset.d32); ++ ++ do { ++ greset.d32 = dwc_read_reg32(&global_regs->grstctl); ++ if (++count > 10000) { ++ DWC_WARN("%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n", ++ __func__, greset.d32, ++ dwc_read_reg32(&global_regs->gnptxsts)); ++ break; ++ } ++ } ++ while (greset.b.txfflsh == 1); ++ ++ /* Wait for 3 PHY Clocks*/ ++ UDELAY(1); ++} ++ ++/** ++ * Flush Rx FIFO. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++void dwc_otg_flush_rx_fifo(dwc_otg_core_if_t *core_if) ++{ ++ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; ++ volatile grstctl_t greset = { .d32 = 0}; ++ int count = 0; ++ ++ DWC_DEBUGPL((DBG_CIL|DBG_PCDV), "%s\n", __func__); ++ /* ++ * ++ */ ++ greset.b.rxfflsh = 1; ++ dwc_write_reg32(&global_regs->grstctl, greset.d32); ++ ++ do { ++ greset.d32 = dwc_read_reg32(&global_regs->grstctl); ++ if (++count > 10000) { ++ DWC_WARN("%s() HANG! GRSTCTL=%0x\n", __func__, ++ greset.d32); ++ break; ++ } ++ } ++ while (greset.b.rxfflsh == 1); ++ ++ /* Wait for 3 PHY Clocks*/ ++ UDELAY(1); ++} ++ ++/** ++ * Do core a soft reset of the core. Be careful with this because it ++ * resets all the internal state machines of the core. ++ */ ++void dwc_otg_core_reset(dwc_otg_core_if_t *core_if) ++{ ++ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; ++ volatile grstctl_t greset = { .d32 = 0}; ++ int count = 0; ++ ++ DWC_DEBUGPL(DBG_CILV, "%s\n", __func__); ++ /* Wait for AHB master IDLE state. */ ++ do { ++ UDELAY(10); ++ greset.d32 = dwc_read_reg32(&global_regs->grstctl); ++ if (++count > 100000) { ++ DWC_WARN("%s() HANG! AHB Idle GRSTCTL=%0x\n", __func__, ++ greset.d32); ++ return; ++ } ++ } ++ while (greset.b.ahbidle == 0); ++ ++ /* Core Soft Reset */ ++ count = 0; ++ greset.b.csftrst = 1; ++ dwc_write_reg32(&global_regs->grstctl, greset.d32); ++ do { ++ greset.d32 = dwc_read_reg32(&global_regs->grstctl); ++ if (++count > 10000) { ++ DWC_WARN("%s() HANG! Soft Reset GRSTCTL=%0x\n", __func__, ++ greset.d32); ++ break; ++ } ++ } ++ while (greset.b.csftrst == 1); ++ ++ /* Wait for 3 PHY Clocks*/ ++ MDELAY(100); ++ ++ DWC_DEBUGPL(DBG_CILV, "GINTSTS=%.8x\n", dwc_read_reg32(&global_regs->gintsts)); ++ DWC_DEBUGPL(DBG_CILV, "GINTSTS=%.8x\n", dwc_read_reg32(&global_regs->gintsts)); ++ DWC_DEBUGPL(DBG_CILV, "GINTSTS=%.8x\n", dwc_read_reg32(&global_regs->gintsts)); ++ ++} ++ ++ ++ ++/** ++ * Register HCD callbacks. The callbacks are used to start and stop ++ * the HCD for interrupt processing. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param cb the HCD callback structure. ++ * @param p pointer to be passed to callback function (usb_hcd*). ++ */ ++void dwc_otg_cil_register_hcd_callbacks(dwc_otg_core_if_t *core_if, ++ dwc_otg_cil_callbacks_t *cb, ++ void *p) ++{ ++ core_if->hcd_cb = cb; ++ cb->p = p; ++} ++ ++/** ++ * Register PCD callbacks. The callbacks are used to start and stop ++ * the PCD for interrupt processing. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param cb the PCD callback structure. ++ * @param p pointer to be passed to callback function (pcd*). ++ */ ++void dwc_otg_cil_register_pcd_callbacks(dwc_otg_core_if_t *core_if, ++ dwc_otg_cil_callbacks_t *cb, ++ void *p) ++{ ++ core_if->pcd_cb = cb; ++ cb->p = p; ++} ++ ++#ifdef DWC_EN_ISOC ++ ++/** ++ * This function writes isoc data per 1 (micro)frame into tx fifo ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to start the transfer on. ++ * ++ */ ++void write_isoc_frame_data(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ dwc_otg_dev_in_ep_regs_t *ep_regs; ++ dtxfsts_data_t txstatus = {.d32 = 0}; ++ uint32_t len = 0; ++ uint32_t dwords; ++ ++ ep->xfer_len = ep->data_per_frame; ++ ep->xfer_count = 0; ++ ++ ep_regs = core_if->dev_if->in_ep_regs[ep->num]; ++ ++ len = ep->xfer_len - ep->xfer_count; ++ ++ if (len > ep->maxpacket) { ++ len = ep->maxpacket; ++ } ++ ++ dwords = (len + 3)/4; ++ ++ /* While there is space in the queue and space in the FIFO and ++ * More data to tranfer, Write packets to the Tx FIFO */ ++ txstatus.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dtxfsts); ++ DWC_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n",ep->num,txstatus.d32); ++ ++ while (txstatus.b.txfspcavail > dwords && ++ ep->xfer_count < ep->xfer_len && ++ ep->xfer_len != 0) { ++ /* Write the FIFO */ ++ dwc_otg_ep_write_packet(core_if, ep, 0); ++ ++ len = ep->xfer_len - ep->xfer_count; ++ if (len > ep->maxpacket) { ++ len = ep->maxpacket; ++ } ++ ++ dwords = (len + 3)/4; ++ txstatus.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dtxfsts); ++ DWC_DEBUGPL(DBG_PCDV,"dtxfsts[%d]=0x%08x\n", ep->num, txstatus.d32); ++ } ++} ++ ++ ++/** ++ * This function initializes a descriptor chain for Isochronous transfer ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to start the transfer on. ++ * ++ */ ++void dwc_otg_iso_ep_start_frm_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ deptsiz_data_t deptsiz = { .d32 = 0 }; ++ depctl_data_t depctl = { .d32 = 0 }; ++ dsts_data_t dsts = { .d32 = 0 }; ++ volatile uint32_t *addr; ++ ++ if(ep->is_in) { ++ addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl; ++ } else { ++ addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl; ++ } ++ ++ ep->xfer_len = ep->data_per_frame; ++ ep->xfer_count = 0; ++ ep->xfer_buff = ep->cur_pkt_addr; ++ ep->dma_addr = ep->cur_pkt_dma_addr; ++ ++ if(ep->is_in) { ++ /* Program the transfer size and packet count ++ * as follows: xfersize = N * maxpacket + ++ * short_packet pktcnt = N + (short_packet ++ * exist ? 1 : 0) ++ */ ++ deptsiz.b.xfersize = ep->xfer_len; ++ deptsiz.b.pktcnt = ++ (ep->xfer_len - 1 + ep->maxpacket) / ++ ep->maxpacket; ++ deptsiz.b.mc = deptsiz.b.pktcnt; ++ dwc_write_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dieptsiz, deptsiz.d32); ++ ++ /* Write the DMA register */ ++ if (core_if->dma_enable) { ++ dwc_write_reg32 (&(core_if->dev_if->in_ep_regs[ep->num]->diepdma), (uint32_t)ep->dma_addr); ++ } ++ } else { ++ deptsiz.b.pktcnt = ++ (ep->xfer_len + (ep->maxpacket - 1)) / ++ ep->maxpacket; ++ deptsiz.b.xfersize = deptsiz.b.pktcnt * ep->maxpacket; ++ ++ dwc_write_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doeptsiz, deptsiz.d32); ++ ++ if (core_if->dma_enable) { ++ dwc_write_reg32 (&(core_if->dev_if->out_ep_regs[ep->num]->doepdma), ++ (uint32_t)ep->dma_addr); ++ } ++ } ++ ++ ++ /** Enable endpoint, clear nak */ ++ ++ depctl.d32 = 0; ++ if(ep->bInterval == 1) { ++ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); ++ ep->next_frame = dsts.b.soffn + ep->bInterval; ++ ++ if(ep->next_frame & 0x1) { ++ depctl.b.setd1pid = 1; ++ } else { ++ depctl.b.setd0pid = 1; ++ } ++ } else { ++ ep->next_frame += ep->bInterval; ++ ++ if(ep->next_frame & 0x1) { ++ depctl.b.setd1pid = 1; ++ } else { ++ depctl.b.setd0pid = 1; ++ } ++ } ++ depctl.b.epena = 1; ++ depctl.b.cnak = 1; ++ ++ dwc_modify_reg32(addr, 0, depctl.d32); ++ depctl.d32 = dwc_read_reg32(addr); ++ ++ if(ep->is_in && core_if->dma_enable == 0) { ++ write_isoc_frame_data(core_if, ep); ++ } ++ ++} ++ ++#endif //DWC_EN_ISOC +--- /dev/null ++++ b/drivers/usb/host/otg/dwc_otg_cil.h +@@ -0,0 +1,1119 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_cil.h $ ++ * $Revision: #91 $ ++ * $Date: 2008/09/19 $ ++ * $Change: 1099526 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++#if !defined(__DWC_CIL_H__) ++#define __DWC_CIL_H__ ++ ++#include ++#include ++#include ++//#include ++ ++#include "dwc_otg_plat.h" ++#include "dwc_otg_regs.h" ++#ifdef DEBUG ++#include "linux/timer.h" ++#endif ++ ++/** ++ * @file ++ * This file contains the interface to the Core Interface Layer. ++ */ ++ ++ ++/** Macros defined for DWC OTG HW Release verison */ ++#define OTG_CORE_REV_2_00 0x4F542000 ++#define OTG_CORE_REV_2_60a 0x4F54260A ++#define OTG_CORE_REV_2_71a 0x4F54271A ++#define OTG_CORE_REV_2_72a 0x4F54272A ++ ++/** ++*/ ++typedef struct iso_pkt_info ++{ ++ uint32_t offset; ++ uint32_t length; ++ int32_t status; ++} iso_pkt_info_t; ++/** ++ * The dwc_ep structure represents the state of a single ++ * endpoint when acting in device mode. It contains the data items ++ * needed for an endpoint to be activated and transfer packets. ++ */ ++typedef struct dwc_ep ++{ ++ /** EP number used for register address lookup */ ++ uint8_t num; ++ /** EP direction 0 = OUT */ ++ unsigned is_in : 1; ++ /** EP active. */ ++ unsigned active : 1; ++ ++ /** Periodic Tx FIFO # for IN EPs For INTR EP set to 0 to use non-periodic Tx FIFO ++ If dedicated Tx FIFOs are enabled for all IN Eps - Tx FIFO # FOR IN EPs*/ ++ unsigned tx_fifo_num : 4; ++ /** EP type: 0 - Control, 1 - ISOC, 2 - BULK, 3 - INTR */ ++ unsigned type : 2; ++#define DWC_OTG_EP_TYPE_CONTROL 0 ++#define DWC_OTG_EP_TYPE_ISOC 1 ++#define DWC_OTG_EP_TYPE_BULK 2 ++#define DWC_OTG_EP_TYPE_INTR 3 ++ ++ /** DATA start PID for INTR and BULK EP */ ++ unsigned data_pid_start : 1; ++ /** Frame (even/odd) for ISOC EP */ ++ unsigned even_odd_frame : 1; ++ /** Max Packet bytes */ ++ unsigned maxpacket : 11; ++ ++ /** Max Transfer size */ ++ unsigned maxxfer : 16; ++ ++ /** @name Transfer state */ ++ /** @{ */ ++ ++ /** ++ * Pointer to the beginning of the transfer buffer -- do not modify ++ * during transfer. ++ */ ++ ++ uint32_t dma_addr; ++ ++ uint32_t dma_desc_addr; ++ dwc_otg_dma_desc_t* desc_addr; ++ ++ ++ uint8_t *start_xfer_buff; ++ /** pointer to the transfer buffer */ ++ uint8_t *xfer_buff; ++ /** Number of bytes to transfer */ ++ unsigned xfer_len : 19; ++ /** Number of bytes transferred. */ ++ unsigned xfer_count : 19; ++ /** Sent ZLP */ ++ unsigned sent_zlp : 1; ++ /** Total len for control transfer */ ++ unsigned total_len : 19; ++ ++ /** stall clear flag */ ++ unsigned stall_clear_flag : 1; ++ ++ /** Allocated DMA Desc count */ ++ uint32_t desc_cnt; ++ ++ uint32_t aligned_dma_addr; ++ uint32_t aligned_buf_size; ++ uint8_t *aligned_buf; ++ ++ ++#ifdef DWC_EN_ISOC ++ /** ++ * Variables specific for ISOC EPs ++ * ++ */ ++ /** DMA addresses of ISOC buffers */ ++ uint32_t dma_addr0; ++ uint32_t dma_addr1; ++ ++ uint32_t iso_dma_desc_addr; ++ dwc_otg_dma_desc_t* iso_desc_addr; ++ ++ /** pointer to the transfer buffers */ ++ uint8_t *xfer_buff0; ++ uint8_t *xfer_buff1; ++ ++ /** number of ISOC Buffer is processing */ ++ uint32_t proc_buf_num; ++ /** Interval of ISOC Buffer processing */ ++ uint32_t buf_proc_intrvl; ++ /** Data size for regular frame */ ++ uint32_t data_per_frame; ++ ++ /* todo - pattern data support is to be implemented in the future */ ++ /** Data size for pattern frame */ ++ uint32_t data_pattern_frame; ++ /** Frame number of pattern data */ ++ uint32_t sync_frame; ++ ++ /** bInterval */ ++ uint32_t bInterval; ++ /** ISO Packet number per frame */ ++ uint32_t pkt_per_frm; ++ /** Next frame num for which will be setup DMA Desc */ ++ uint32_t next_frame; ++ /** Number of packets per buffer processing */ ++ uint32_t pkt_cnt; ++ /** Info for all isoc packets */ ++ iso_pkt_info_t *pkt_info; ++ /** current pkt number */ ++ uint32_t cur_pkt; ++ /** current pkt number */ ++ uint8_t *cur_pkt_addr; ++ /** current pkt number */ ++ uint32_t cur_pkt_dma_addr; ++#endif //DWC_EN_ISOC ++/** @} */ ++} dwc_ep_t; ++ ++/* ++ * Reasons for halting a host channel. ++ */ ++typedef enum dwc_otg_halt_status ++{ ++ DWC_OTG_HC_XFER_NO_HALT_STATUS, ++ DWC_OTG_HC_XFER_COMPLETE, ++ DWC_OTG_HC_XFER_URB_COMPLETE, ++ DWC_OTG_HC_XFER_ACK, ++ DWC_OTG_HC_XFER_NAK, ++ DWC_OTG_HC_XFER_NYET, ++ DWC_OTG_HC_XFER_STALL, ++ DWC_OTG_HC_XFER_XACT_ERR, ++ DWC_OTG_HC_XFER_FRAME_OVERRUN, ++ DWC_OTG_HC_XFER_BABBLE_ERR, ++ DWC_OTG_HC_XFER_DATA_TOGGLE_ERR, ++ DWC_OTG_HC_XFER_AHB_ERR, ++ DWC_OTG_HC_XFER_PERIODIC_INCOMPLETE, ++ DWC_OTG_HC_XFER_URB_DEQUEUE ++} dwc_otg_halt_status_e; ++ ++/** ++ * Host channel descriptor. This structure represents the state of a single ++ * host channel when acting in host mode. It contains the data items needed to ++ * transfer packets to an endpoint via a host channel. ++ */ ++typedef struct dwc_hc ++{ ++ /** Host channel number used for register address lookup */ ++ uint8_t hc_num; ++ ++ /** Device to access */ ++ unsigned dev_addr : 7; ++ ++ /** EP to access */ ++ unsigned ep_num : 4; ++ ++ /** EP direction. 0: OUT, 1: IN */ ++ unsigned ep_is_in : 1; ++ ++ /** ++ * EP speed. ++ * One of the following values: ++ * - DWC_OTG_EP_SPEED_LOW ++ * - DWC_OTG_EP_SPEED_FULL ++ * - DWC_OTG_EP_SPEED_HIGH ++ */ ++ unsigned speed : 2; ++#define DWC_OTG_EP_SPEED_LOW 0 ++#define DWC_OTG_EP_SPEED_FULL 1 ++#define DWC_OTG_EP_SPEED_HIGH 2 ++ ++ /** ++ * Endpoint type. ++ * One of the following values: ++ * - DWC_OTG_EP_TYPE_CONTROL: 0 ++ * - DWC_OTG_EP_TYPE_ISOC: 1 ++ * - DWC_OTG_EP_TYPE_BULK: 2 ++ * - DWC_OTG_EP_TYPE_INTR: 3 ++ */ ++ unsigned ep_type : 2; ++ ++ /** Max packet size in bytes */ ++ unsigned max_packet : 11; ++ ++ /** ++ * PID for initial transaction. ++ * 0: DATA0,
++ * 1: DATA2,
++ * 2: DATA1,
++ * 3: MDATA (non-Control EP), ++ * SETUP (Control EP) ++ */ ++ unsigned data_pid_start : 2; ++#define DWC_OTG_HC_PID_DATA0 0 ++#define DWC_OTG_HC_PID_DATA2 1 ++#define DWC_OTG_HC_PID_DATA1 2 ++#define DWC_OTG_HC_PID_MDATA 3 ++#define DWC_OTG_HC_PID_SETUP 3 ++ ++ /** Number of periodic transactions per (micro)frame */ ++ unsigned multi_count: 2; ++ ++ /** @name Transfer State */ ++ /** @{ */ ++ ++ /** Pointer to the current transfer buffer position. */ ++ uint8_t *xfer_buff; ++ /** Total number of bytes to transfer. */ ++ uint32_t xfer_len; ++ /** Number of bytes transferred so far. */ ++ uint32_t xfer_count; ++ /** Packet count at start of transfer.*/ ++ uint16_t start_pkt_count; ++ ++ /** ++ * Flag to indicate whether the transfer has been started. Set to 1 if ++ * it has been started, 0 otherwise. ++ */ ++ uint8_t xfer_started; ++ ++ /** ++ * Set to 1 to indicate that a PING request should be issued on this ++ * channel. If 0, process normally. ++ */ ++ uint8_t do_ping; ++ ++ /** ++ * Set to 1 to indicate that the error count for this transaction is ++ * non-zero. Set to 0 if the error count is 0. ++ */ ++ uint8_t error_state; ++ ++ /** ++ * Set to 1 to indicate that this channel should be halted the next ++ * time a request is queued for the channel. This is necessary in ++ * slave mode if no request queue space is available when an attempt ++ * is made to halt the channel. ++ */ ++ uint8_t halt_on_queue; ++ ++ /** ++ * Set to 1 if the host channel has been halted, but the core is not ++ * finished flushing queued requests. Otherwise 0. ++ */ ++ uint8_t halt_pending; ++ ++ /** ++ * Reason for halting the host channel. ++ */ ++ dwc_otg_halt_status_e halt_status; ++ ++ /* ++ * Split settings for the host channel ++ */ ++ uint8_t do_split; /**< Enable split for the channel */ ++ uint8_t complete_split; /**< Enable complete split */ ++ uint8_t hub_addr; /**< Address of high speed hub */ ++ ++ uint8_t port_addr; /**< Port of the low/full speed device */ ++ /** Split transaction position ++ * One of the following values: ++ * - DWC_HCSPLIT_XACTPOS_MID ++ * - DWC_HCSPLIT_XACTPOS_BEGIN ++ * - DWC_HCSPLIT_XACTPOS_END ++ * - DWC_HCSPLIT_XACTPOS_ALL */ ++ uint8_t xact_pos; ++ ++ /** Set when the host channel does a short read. */ ++ uint8_t short_read; ++ ++ /** ++ * Number of requests issued for this channel since it was assigned to ++ * the current transfer (not counting PINGs). ++ */ ++ uint8_t requests; ++ ++ /** ++ * Queue Head for the transfer being processed by this channel. ++ */ ++ struct dwc_otg_qh *qh; ++ ++ /** @} */ ++ ++ /** Entry in list of host channels. */ ++ struct list_head hc_list_entry; ++} dwc_hc_t; ++ ++/** ++ * The following parameters may be specified when starting the module. These ++ * parameters define how the DWC_otg controller should be configured. ++ * Parameter values are passed to the CIL initialization function ++ * dwc_otg_cil_init. ++ */ ++typedef struct dwc_otg_core_params ++{ ++ int32_t opt; ++#define dwc_param_opt_default 1 ++ ++ /** ++ * Specifies the OTG capabilities. The driver will automatically ++ * detect the value for this parameter if none is specified. ++ * 0 - HNP and SRP capable (default) ++ * 1 - SRP Only capable ++ * 2 - No HNP/SRP capable ++ */ ++ int32_t otg_cap; ++#define DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE 0 ++#define DWC_OTG_CAP_PARAM_SRP_ONLY_CAPABLE 1 ++#define DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE 2 ++//#define dwc_param_otg_cap_default DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE ++#define dwc_param_otg_cap_default DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE ++ ++ /** ++ * Specifies whether to use slave or DMA mode for accessing the data ++ * FIFOs. The driver will automatically detect the value for this ++ * parameter if none is specified. ++ * 0 - Slave ++ * 1 - DMA (default, if available) ++ */ ++ int32_t dma_enable; ++#define dwc_param_dma_enable_default 1 ++ ++ /** ++ * When DMA mode is enabled specifies whether to use address DMA or DMA Descritor mode for accessing the data ++ * FIFOs in device mode. The driver will automatically detect the value for this ++ * parameter if none is specified. ++ * 0 - address DMA ++ * 1 - DMA Descriptor(default, if available) ++ */ ++ int32_t dma_desc_enable; ++#define dwc_param_dma_desc_enable_default 0 ++ /** The DMA Burst size (applicable only for External DMA ++ * Mode). 1, 4, 8 16, 32, 64, 128, 256 (default 32) ++ */ ++ int32_t dma_burst_size; /* Translate this to GAHBCFG values */ ++//#define dwc_param_dma_burst_size_default 32 ++#define dwc_param_dma_burst_size_default 1 ++ ++ /** ++ * Specifies the maximum speed of operation in host and device mode. ++ * The actual speed depends on the speed of the attached device and ++ * the value of phy_type. The actual speed depends on the speed of the ++ * attached device. ++ * 0 - High Speed (default) ++ * 1 - Full Speed ++ */ ++ int32_t speed; ++#define dwc_param_speed_default 0 ++#define DWC_SPEED_PARAM_HIGH 0 ++#define DWC_SPEED_PARAM_FULL 1 ++ ++ /** Specifies whether low power mode is supported when attached ++ * to a Full Speed or Low Speed device in host mode. ++ * 0 - Don't support low power mode (default) ++ * 1 - Support low power mode ++ */ ++ int32_t host_support_fs_ls_low_power; ++#define dwc_param_host_support_fs_ls_low_power_default 0 ++ ++ /** Specifies the PHY clock rate in low power mode when connected to a ++ * Low Speed device in host mode. This parameter is applicable only if ++ * HOST_SUPPORT_FS_LS_LOW_POWER is enabled. If PHY_TYPE is set to FS ++ * then defaults to 6 MHZ otherwise 48 MHZ. ++ * ++ * 0 - 48 MHz ++ * 1 - 6 MHz ++ */ ++ int32_t host_ls_low_power_phy_clk; ++#define dwc_param_host_ls_low_power_phy_clk_default 0 ++#define DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ 0 ++#define DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ 1 ++ ++ /** ++ * 0 - Use cC FIFO size parameters ++ * 1 - Allow dynamic FIFO sizing (default) ++ */ ++ int32_t enable_dynamic_fifo; ++#define dwc_param_enable_dynamic_fifo_default 1 ++ ++ /** Total number of 4-byte words in the data FIFO memory. This ++ * memory includes the Rx FIFO, non-periodic Tx FIFO, and periodic ++ * Tx FIFOs. ++ * 32 to 32768 (default 8192) ++ * Note: The total FIFO memory depth in the FPGA configuration is 8192. ++ */ ++ int32_t data_fifo_size; ++#define dwc_param_data_fifo_size_default 8192 ++ ++ /** Number of 4-byte words in the Rx FIFO in device mode when dynamic ++ * FIFO sizing is enabled. ++ * 16 to 32768 (default 1064) ++ */ ++ int32_t dev_rx_fifo_size; ++//#define dwc_param_dev_rx_fifo_size_default 1064 ++#define dwc_param_dev_rx_fifo_size_default 0x100 ++ ++ /** ++ * Specifies whether dedicated transmit FIFOs are ++ * enabled for non periodic IN endpoints in device mode ++ * 0 - No ++ * 1 - Yes ++ */ ++ int32_t en_multiple_tx_fifo; ++#define dwc_param_en_multiple_tx_fifo_default 1 ++ ++ /** Number of 4-byte words in each of the Tx FIFOs in device ++ * mode when dynamic FIFO sizing is enabled. ++ * 4 to 768 (default 256) ++ */ ++ uint32_t dev_tx_fifo_size[MAX_TX_FIFOS]; ++//#define dwc_param_dev_tx_fifo_size_default 256 ++#define dwc_param_dev_tx_fifo_size_default 0x80 ++ ++ /** Number of 4-byte words in the non-periodic Tx FIFO in device mode ++ * when dynamic FIFO sizing is enabled. ++ * 16 to 32768 (default 1024) ++ */ ++ int32_t dev_nperio_tx_fifo_size; ++//#define dwc_param_dev_nperio_tx_fifo_size_default 1024 ++#define dwc_param_dev_nperio_tx_fifo_size_default 0x80 ++ ++ /** Number of 4-byte words in each of the periodic Tx FIFOs in device ++ * mode when dynamic FIFO sizing is enabled. ++ * 4 to 768 (default 256) ++ */ ++ uint32_t dev_perio_tx_fifo_size[MAX_PERIO_FIFOS]; ++//#define dwc_param_dev_perio_tx_fifo_size_default 256 ++#define dwc_param_dev_perio_tx_fifo_size_default 0x80 ++ ++ /** Number of 4-byte words in the Rx FIFO in host mode when dynamic ++ * FIFO sizing is enabled. ++ * 16 to 32768 (default 1024) ++ */ ++ int32_t host_rx_fifo_size; ++//#define dwc_param_host_rx_fifo_size_default 1024 ++#define dwc_param_host_rx_fifo_size_default 0x292 ++ ++ /** Number of 4-byte words in the non-periodic Tx FIFO in host mode ++ * when Dynamic FIFO sizing is enabled in the core. ++ * 16 to 32768 (default 1024) ++ */ ++ int32_t host_nperio_tx_fifo_size; ++//#define dwc_param_host_nperio_tx_fifo_size_default 1024 ++//#define dwc_param_host_nperio_tx_fifo_size_default 0x292 ++#define dwc_param_host_nperio_tx_fifo_size_default 0x80 ++ ++ /** Number of 4-byte words in the host periodic Tx FIFO when dynamic ++ * FIFO sizing is enabled. ++ * 16 to 32768 (default 1024) ++ */ ++ int32_t host_perio_tx_fifo_size; ++//#define dwc_param_host_perio_tx_fifo_size_default 1024 ++#define dwc_param_host_perio_tx_fifo_size_default 0x292 ++ ++ /** The maximum transfer size supported in bytes. ++ * 2047 to 65,535 (default 65,535) ++ */ ++ int32_t max_transfer_size; ++#define dwc_param_max_transfer_size_default 65535 ++ ++ /** The maximum number of packets in a transfer. ++ * 15 to 511 (default 511) ++ */ ++ int32_t max_packet_count; ++#define dwc_param_max_packet_count_default 511 ++ ++ /** The number of host channel registers to use. ++ * 1 to 16 (default 12) ++ * Note: The FPGA configuration supports a maximum of 12 host channels. ++ */ ++ int32_t host_channels; ++//#define dwc_param_host_channels_default 12 ++#define dwc_param_host_channels_default 16 ++ ++ /** The number of endpoints in addition to EP0 available for device ++ * mode operations. ++ * 1 to 15 (default 6 IN and OUT) ++ * Note: The FPGA configuration supports a maximum of 6 IN and OUT ++ * endpoints in addition to EP0. ++ */ ++ int32_t dev_endpoints; ++//#define dwc_param_dev_endpoints_default 6 ++#define dwc_param_dev_endpoints_default 8 ++ ++ /** ++ * Specifies the type of PHY interface to use. By default, the driver ++ * will automatically detect the phy_type. ++ * ++ * 0 - Full Speed PHY ++ * 1 - UTMI+ (default) ++ * 2 - ULPI ++ */ ++ int32_t phy_type; ++#define DWC_PHY_TYPE_PARAM_FS 0 ++#define DWC_PHY_TYPE_PARAM_UTMI 1 ++#define DWC_PHY_TYPE_PARAM_ULPI 2 ++#define dwc_param_phy_type_default DWC_PHY_TYPE_PARAM_UTMI ++ ++ /** ++ * Specifies the UTMI+ Data Width. This parameter is ++ * applicable for a PHY_TYPE of UTMI+ or ULPI. (For a ULPI ++ * PHY_TYPE, this parameter indicates the data width between ++ * the MAC and the ULPI Wrapper.) Also, this parameter is ++ * applicable only if the OTG_HSPHY_WIDTH cC parameter was set ++ * to "8 and 16 bits", meaning that the core has been ++ * configured to work at either data path width. ++ * ++ * 8 or 16 bits (default 16) ++ */ ++ int32_t phy_utmi_width; ++#define dwc_param_phy_utmi_width_default 16 ++ ++ /** ++ * Specifies whether the ULPI operates at double or single ++ * data rate. This parameter is only applicable if PHY_TYPE is ++ * ULPI. ++ * ++ * 0 - single data rate ULPI interface with 8 bit wide data ++ * bus (default) ++ * 1 - double data rate ULPI interface with 4 bit wide data ++ * bus ++ */ ++ int32_t phy_ulpi_ddr; ++#define dwc_param_phy_ulpi_ddr_default 0 ++ ++ /** ++ * Specifies whether to use the internal or external supply to ++ * drive the vbus with a ULPI phy. ++ */ ++ int32_t phy_ulpi_ext_vbus; ++#define DWC_PHY_ULPI_INTERNAL_VBUS 0 ++#define DWC_PHY_ULPI_EXTERNAL_VBUS 1 ++#define dwc_param_phy_ulpi_ext_vbus_default DWC_PHY_ULPI_INTERNAL_VBUS ++ ++ /** ++ * Specifies whether to use the I2Cinterface for full speed PHY. This ++ * parameter is only applicable if PHY_TYPE is FS. ++ * 0 - No (default) ++ * 1 - Yes ++ */ ++ int32_t i2c_enable; ++#define dwc_param_i2c_enable_default 0 ++ ++ int32_t ulpi_fs_ls; ++#define dwc_param_ulpi_fs_ls_default 0 ++ ++ int32_t ts_dline; ++#define dwc_param_ts_dline_default 0 ++ ++ /** Thresholding enable flag- ++ * bit 0 - enable non-ISO Tx thresholding ++ * bit 1 - enable ISO Tx thresholding ++ * bit 2 - enable Rx thresholding ++ */ ++ uint32_t thr_ctl; ++#define dwc_param_thr_ctl_default 0 ++ ++ /** Thresholding length for Tx ++ * FIFOs in 32 bit DWORDs ++ */ ++ uint32_t tx_thr_length; ++#define dwc_param_tx_thr_length_default 64 ++ ++ /** Thresholding length for Rx ++ * FIFOs in 32 bit DWORDs ++ */ ++ uint32_t rx_thr_length; ++#define dwc_param_rx_thr_length_default 64 ++ ++ /** Per Transfer Interrupt ++ * mode enable flag ++ * 1 - Enabled ++ * 0 - Disabled ++ */ ++ uint32_t pti_enable; ++#define dwc_param_pti_enable_default 0 ++ ++ /** Molti Processor Interrupt ++ * mode enable flag ++ * 1 - Enabled ++ * 0 - Disabled ++ */ ++ uint32_t mpi_enable; ++#define dwc_param_mpi_enable_default 0 ++ ++} dwc_otg_core_params_t; ++ ++#ifdef DEBUG ++struct dwc_otg_core_if; ++typedef struct hc_xfer_info ++{ ++ struct dwc_otg_core_if *core_if; ++ dwc_hc_t *hc; ++} hc_xfer_info_t; ++#endif ++ ++/** ++ * The dwc_otg_core_if structure contains information needed to manage ++ * the DWC_otg controller acting in either host or device mode. It ++ * represents the programming view of the controller as a whole. ++ */ ++typedef struct dwc_otg_core_if ++{ ++ /** Parameters that define how the core should be configured.*/ ++ dwc_otg_core_params_t *core_params; ++ ++ /** Core Global registers starting at offset 000h. */ ++ dwc_otg_core_global_regs_t *core_global_regs; ++ ++ /** Device-specific information */ ++ dwc_otg_dev_if_t *dev_if; ++ /** Host-specific information */ ++ dwc_otg_host_if_t *host_if; ++ ++ /** Value from SNPSID register */ ++ uint32_t snpsid; ++ ++ /* ++ * Set to 1 if the core PHY interface bits in USBCFG have been ++ * initialized. ++ */ ++ uint8_t phy_init_done; ++ ++ /* ++ * SRP Success flag, set by srp success interrupt in FS I2C mode ++ */ ++ uint8_t srp_success; ++ uint8_t srp_timer_started; ++ ++ /* Common configuration information */ ++ /** Power and Clock Gating Control Register */ ++ volatile uint32_t *pcgcctl; ++#define DWC_OTG_PCGCCTL_OFFSET 0xE00 ++ ++ /** Push/pop addresses for endpoints or host channels.*/ ++ uint32_t *data_fifo[MAX_EPS_CHANNELS]; ++#define DWC_OTG_DATA_FIFO_OFFSET 0x1000 ++#define DWC_OTG_DATA_FIFO_SIZE 0x1000 ++ ++ /** Total RAM for FIFOs (Bytes) */ ++ uint16_t total_fifo_size; ++ /** Size of Rx FIFO (Bytes) */ ++ uint16_t rx_fifo_size; ++ /** Size of Non-periodic Tx FIFO (Bytes) */ ++ uint16_t nperio_tx_fifo_size; ++ ++ ++ /** 1 if DMA is enabled, 0 otherwise. */ ++ uint8_t dma_enable; ++ ++ /** 1 if Descriptor DMA mode is enabled, 0 otherwise. */ ++ uint8_t dma_desc_enable; ++ ++ /** 1 if PTI Enhancement mode is enabled, 0 otherwise. */ ++ uint8_t pti_enh_enable; ++ ++ /** 1 if MPI Enhancement mode is enabled, 0 otherwise. */ ++ uint8_t multiproc_int_enable; ++ ++ /** 1 if dedicated Tx FIFOs are enabled, 0 otherwise. */ ++ uint8_t en_multiple_tx_fifo; ++ ++ /** Set to 1 if multiple packets of a high-bandwidth transfer is in ++ * process of being queued */ ++ uint8_t queuing_high_bandwidth; ++ ++ /** Hardware Configuration -- stored here for convenience.*/ ++ hwcfg1_data_t hwcfg1; ++ hwcfg2_data_t hwcfg2; ++ hwcfg3_data_t hwcfg3; ++ hwcfg4_data_t hwcfg4; ++ ++ /** Host and Device Configuration -- stored here for convenience.*/ ++ hcfg_data_t hcfg; ++ dcfg_data_t dcfg; ++ ++ /** The operational State, during transations ++ * (a_host>>a_peripherial and b_device=>b_host) this may not ++ * match the core but allows the software to determine ++ * transitions. ++ */ ++ uint8_t op_state; ++ ++ /** ++ * Set to 1 if the HCD needs to be restarted on a session request ++ * interrupt. This is required if no connector ID status change has ++ * occurred since the HCD was last disconnected. ++ */ ++ uint8_t restart_hcd_on_session_req; ++ ++ /** HCD callbacks */ ++ /** A-Device is a_host */ ++#define A_HOST (1) ++ /** A-Device is a_suspend */ ++#define A_SUSPEND (2) ++ /** A-Device is a_peripherial */ ++#define A_PERIPHERAL (3) ++ /** B-Device is operating as a Peripheral. */ ++#define B_PERIPHERAL (4) ++ /** B-Device is operating as a Host. */ ++#define B_HOST (5) ++ ++ /** HCD callbacks */ ++ struct dwc_otg_cil_callbacks *hcd_cb; ++ /** PCD callbacks */ ++ struct dwc_otg_cil_callbacks *pcd_cb; ++ ++ /** Device mode Periodic Tx FIFO Mask */ ++ uint32_t p_tx_msk; ++ /** Device mode Periodic Tx FIFO Mask */ ++ uint32_t tx_msk; ++ ++ /** Workqueue object used for handling several interrupts */ ++ struct workqueue_struct *wq_otg; ++ ++ /** Work object used for handling "Connector ID Status Change" Interrupt */ ++ struct work_struct w_conn_id; ++ ++ /** Work object used for handling "Wakeup Detected" Interrupt */ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ struct work_struct w_wkp; ++#else ++ struct delayed_work w_wkp; ++#endif ++ ++#ifdef DEBUG ++ uint32_t start_hcchar_val[MAX_EPS_CHANNELS]; ++ ++ hc_xfer_info_t hc_xfer_info[MAX_EPS_CHANNELS]; ++ struct timer_list hc_xfer_timer[MAX_EPS_CHANNELS]; ++ ++ uint32_t hfnum_7_samples; ++ uint64_t hfnum_7_frrem_accum; ++ uint32_t hfnum_0_samples; ++ uint64_t hfnum_0_frrem_accum; ++ uint32_t hfnum_other_samples; ++ uint64_t hfnum_other_frrem_accum; ++#endif ++ ++ ++} dwc_otg_core_if_t; ++ ++/*We must clear S3C24XX_EINTPEND external interrupt register ++ * because after clearing in this register trigerred IRQ from ++ * H/W core in kernel interrupt can be occured again before OTG ++ * handlers clear all IRQ sources of Core registers because of ++ * timing latencies and Low Level IRQ Type. ++ */ ++ ++#ifdef CONFIG_MACH_IPMATE ++#define S3C2410X_CLEAR_EINTPEND() \ ++do { \ ++ if (!dwc_otg_read_core_intr(core_if)) { \ ++ __raw_writel(1UL << 11,S3C24XX_EINTPEND); \ ++ } \ ++} while (0) ++#else ++#define S3C2410X_CLEAR_EINTPEND() do { } while (0) ++#endif ++ ++/* ++ * The following functions are functions for works ++ * using during handling some interrupts ++ */ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ ++extern void w_conn_id_status_change(void *p); ++extern void w_wakeup_detected(void *p); ++ ++#else ++ ++extern void w_conn_id_status_change(struct work_struct *p); ++extern void w_wakeup_detected(struct work_struct *p); ++ ++#endif ++ ++ ++/* ++ * The following functions support initialization of the CIL driver component ++ * and the DWC_otg controller. ++ */ ++extern dwc_otg_core_if_t *dwc_otg_cil_init(const uint32_t *_reg_base_addr, ++ dwc_otg_core_params_t *_core_params); ++extern void dwc_otg_cil_remove(dwc_otg_core_if_t *_core_if); ++extern void dwc_otg_core_init(dwc_otg_core_if_t *_core_if); ++extern void dwc_otg_core_host_init(dwc_otg_core_if_t *_core_if); ++extern void dwc_otg_core_dev_init(dwc_otg_core_if_t *_core_if); ++extern void dwc_otg_enable_global_interrupts( dwc_otg_core_if_t *_core_if ); ++extern void dwc_otg_disable_global_interrupts( dwc_otg_core_if_t *_core_if ); ++ ++/** @name Device CIL Functions ++ * The following functions support managing the DWC_otg controller in device ++ * mode. ++ */ ++/**@{*/ ++extern void dwc_otg_wakeup(dwc_otg_core_if_t *_core_if); ++extern void dwc_otg_read_setup_packet (dwc_otg_core_if_t *_core_if, uint32_t *_dest); ++extern uint32_t dwc_otg_get_frame_number(dwc_otg_core_if_t *_core_if); ++extern void dwc_otg_ep0_activate(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); ++extern void dwc_otg_ep_activate(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); ++extern void dwc_otg_ep_deactivate(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); ++extern void dwc_otg_ep_start_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); ++extern void dwc_otg_ep_start_zl_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); ++extern void dwc_otg_ep0_start_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); ++extern void dwc_otg_ep0_continue_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); ++extern void dwc_otg_ep_write_packet(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep, int _dma); ++extern void dwc_otg_ep_set_stall(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); ++extern void dwc_otg_ep_clear_stall(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); ++extern void dwc_otg_enable_device_interrupts(dwc_otg_core_if_t *_core_if); ++extern void dwc_otg_dump_dev_registers(dwc_otg_core_if_t *_core_if); ++extern void dwc_otg_dump_spram(dwc_otg_core_if_t *_core_if); ++#ifdef DWC_EN_ISOC ++extern void dwc_otg_iso_ep_start_frm_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep); ++extern void dwc_otg_iso_ep_start_buf_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep); ++#endif //DWC_EN_ISOC ++/**@}*/ ++ ++/** @name Host CIL Functions ++ * The following functions support managing the DWC_otg controller in host ++ * mode. ++ */ ++/**@{*/ ++extern void dwc_otg_hc_init(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); ++extern void dwc_otg_hc_halt(dwc_otg_core_if_t *_core_if, ++ dwc_hc_t *_hc, ++ dwc_otg_halt_status_e _halt_status); ++extern void dwc_otg_hc_cleanup(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); ++extern void dwc_otg_hc_start_transfer(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); ++extern int dwc_otg_hc_continue_transfer(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); ++extern void dwc_otg_hc_do_ping(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); ++extern void dwc_otg_hc_write_packet(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); ++extern void dwc_otg_enable_host_interrupts(dwc_otg_core_if_t *_core_if); ++extern void dwc_otg_disable_host_interrupts(dwc_otg_core_if_t *_core_if); ++ ++/** ++ * This function Reads HPRT0 in preparation to modify. It keeps the ++ * WC bits 0 so that if they are read as 1, they won't clear when you ++ * write it back ++ */ ++static inline uint32_t dwc_otg_read_hprt0(dwc_otg_core_if_t *_core_if) ++{ ++ hprt0_data_t hprt0; ++ hprt0.d32 = dwc_read_reg32(_core_if->host_if->hprt0); ++ hprt0.b.prtena = 0; ++ hprt0.b.prtconndet = 0; ++ hprt0.b.prtenchng = 0; ++ hprt0.b.prtovrcurrchng = 0; ++ return hprt0.d32; ++} ++ ++extern void dwc_otg_dump_host_registers(dwc_otg_core_if_t *_core_if); ++/**@}*/ ++ ++/** @name Common CIL Functions ++ * The following functions support managing the DWC_otg controller in either ++ * device or host mode. ++ */ ++/**@{*/ ++ ++extern void dwc_otg_read_packet(dwc_otg_core_if_t *core_if, ++ uint8_t *dest, ++ uint16_t bytes); ++ ++extern void dwc_otg_dump_global_registers(dwc_otg_core_if_t *_core_if); ++ ++extern void dwc_otg_flush_tx_fifo( dwc_otg_core_if_t *_core_if, ++ const int _num ); ++extern void dwc_otg_flush_rx_fifo( dwc_otg_core_if_t *_core_if ); ++extern void dwc_otg_core_reset( dwc_otg_core_if_t *_core_if ); ++ ++extern dwc_otg_dma_desc_t* dwc_otg_ep_alloc_desc_chain(uint32_t * dma_desc_addr, uint32_t count); ++extern void dwc_otg_ep_free_desc_chain(dwc_otg_dma_desc_t* desc_addr, uint32_t dma_desc_addr, uint32_t count); ++ ++/** ++ * This function returns the Core Interrupt register. ++ */ ++static inline uint32_t dwc_otg_read_core_intr(dwc_otg_core_if_t *_core_if) ++{ ++ return (dwc_read_reg32(&_core_if->core_global_regs->gintsts) & ++ dwc_read_reg32(&_core_if->core_global_regs->gintmsk)); ++} ++ ++/** ++ * This function returns the OTG Interrupt register. ++ */ ++static inline uint32_t dwc_otg_read_otg_intr (dwc_otg_core_if_t *_core_if) ++{ ++ return (dwc_read_reg32 (&_core_if->core_global_regs->gotgint)); ++} ++ ++/** ++ * This function reads the Device All Endpoints Interrupt register and ++ * returns the IN endpoint interrupt bits. ++ */ ++static inline uint32_t dwc_otg_read_dev_all_in_ep_intr(dwc_otg_core_if_t *core_if) ++{ ++ uint32_t v; ++ ++ if(core_if->multiproc_int_enable) { ++ v = dwc_read_reg32(&core_if->dev_if->dev_global_regs->deachint) & ++ dwc_read_reg32(&core_if->dev_if->dev_global_regs->deachintmsk); ++ } else { ++ v = dwc_read_reg32(&core_if->dev_if->dev_global_regs->daint) & ++ dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk); ++ } ++ return (v & 0xffff); ++ ++} ++ ++/** ++ * This function reads the Device All Endpoints Interrupt register and ++ * returns the OUT endpoint interrupt bits. ++ */ ++static inline uint32_t dwc_otg_read_dev_all_out_ep_intr(dwc_otg_core_if_t *core_if) ++{ ++ uint32_t v; ++ ++ if(core_if->multiproc_int_enable) { ++ v = dwc_read_reg32(&core_if->dev_if->dev_global_regs->deachint) & ++ dwc_read_reg32(&core_if->dev_if->dev_global_regs->deachintmsk); ++ } else { ++ v = dwc_read_reg32(&core_if->dev_if->dev_global_regs->daint) & ++ dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk); ++ } ++ ++ return ((v & 0xffff0000) >> 16); ++} ++ ++/** ++ * This function returns the Device IN EP Interrupt register ++ */ ++static inline uint32_t dwc_otg_read_dev_in_ep_intr(dwc_otg_core_if_t *core_if, ++ dwc_ep_t *ep) ++{ ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ uint32_t v, msk, emp; ++ ++ if(core_if->multiproc_int_enable) { ++ msk = dwc_read_reg32(&dev_if->dev_global_regs->diepeachintmsk[ep->num]); ++ emp = dwc_read_reg32(&dev_if->dev_global_regs->dtknqr4_fifoemptymsk); ++ msk |= ((emp >> ep->num) & 0x1) << 7; ++ v = dwc_read_reg32(&dev_if->in_ep_regs[ep->num]->diepint) & msk; ++ } else { ++ msk = dwc_read_reg32(&dev_if->dev_global_regs->diepmsk); ++ emp = dwc_read_reg32(&dev_if->dev_global_regs->dtknqr4_fifoemptymsk); ++ msk |= ((emp >> ep->num) & 0x1) << 7; ++ v = dwc_read_reg32(&dev_if->in_ep_regs[ep->num]->diepint) & msk; ++ } ++ ++ ++ return v; ++} ++/** ++ * This function returns the Device OUT EP Interrupt register ++ */ ++static inline uint32_t dwc_otg_read_dev_out_ep_intr(dwc_otg_core_if_t *_core_if, ++ dwc_ep_t *_ep) ++{ ++ dwc_otg_dev_if_t *dev_if = _core_if->dev_if; ++ uint32_t v; ++ doepmsk_data_t msk = { .d32 = 0 }; ++ ++ if(_core_if->multiproc_int_enable) { ++ msk.d32 = dwc_read_reg32(&dev_if->dev_global_regs->doepeachintmsk[_ep->num]); ++ if(_core_if->pti_enh_enable) { ++ msk.b.pktdrpsts = 1; ++ } ++ v = dwc_read_reg32( &dev_if->out_ep_regs[_ep->num]->doepint) & msk.d32; ++ } else { ++ msk.d32 = dwc_read_reg32(&dev_if->dev_global_regs->doepmsk); ++ if(_core_if->pti_enh_enable) { ++ msk.b.pktdrpsts = 1; ++ } ++ v = dwc_read_reg32( &dev_if->out_ep_regs[_ep->num]->doepint) & msk.d32; ++ } ++ return v; ++} ++ ++/** ++ * This function returns the Host All Channel Interrupt register ++ */ ++static inline uint32_t dwc_otg_read_host_all_channels_intr (dwc_otg_core_if_t *_core_if) ++{ ++ return (dwc_read_reg32 (&_core_if->host_if->host_global_regs->haint)); ++} ++ ++static inline uint32_t dwc_otg_read_host_channel_intr (dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc) ++{ ++ return (dwc_read_reg32 (&_core_if->host_if->hc_regs[_hc->hc_num]->hcint)); ++} ++ ++ ++/** ++ * This function returns the mode of the operation, host or device. ++ * ++ * @return 0 - Device Mode, 1 - Host Mode ++ */ ++static inline uint32_t dwc_otg_mode(dwc_otg_core_if_t *_core_if) ++{ ++ return (dwc_read_reg32( &_core_if->core_global_regs->gintsts ) & 0x1); ++} ++ ++static inline uint8_t dwc_otg_is_device_mode(dwc_otg_core_if_t *_core_if) ++{ ++ return (dwc_otg_mode(_core_if) != DWC_HOST_MODE); ++} ++static inline uint8_t dwc_otg_is_host_mode(dwc_otg_core_if_t *_core_if) ++{ ++ return (dwc_otg_mode(_core_if) == DWC_HOST_MODE); ++} ++ ++extern int32_t dwc_otg_handle_common_intr( dwc_otg_core_if_t *_core_if ); ++ ++ ++/**@}*/ ++ ++/** ++ * DWC_otg CIL callback structure. This structure allows the HCD and ++ * PCD to register functions used for starting and stopping the PCD ++ * and HCD for role change on for a DRD. ++ */ ++typedef struct dwc_otg_cil_callbacks ++{ ++ /** Start function for role change */ ++ int (*start) (void *_p); ++ /** Stop Function for role change */ ++ int (*stop) (void *_p); ++ /** Disconnect Function for role change */ ++ int (*disconnect) (void *_p); ++ /** Resume/Remote wakeup Function */ ++ int (*resume_wakeup) (void *_p); ++ /** Suspend function */ ++ int (*suspend) (void *_p); ++ /** Session Start (SRP) */ ++ int (*session_start) (void *_p); ++ /** Pointer passed to start() and stop() */ ++ void *p; ++} dwc_otg_cil_callbacks_t; ++ ++extern void dwc_otg_cil_register_pcd_callbacks( dwc_otg_core_if_t *_core_if, ++ dwc_otg_cil_callbacks_t *_cb, ++ void *_p); ++extern void dwc_otg_cil_register_hcd_callbacks( dwc_otg_core_if_t *_core_if, ++ dwc_otg_cil_callbacks_t *_cb, ++ void *_p); ++#ifndef warn ++#define warn printk ++#endif ++ ++#endif ++ +--- /dev/null ++++ b/drivers/usb/host/otg/dwc_otg_cil_intr.c +@@ -0,0 +1,881 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_cil_intr.c $ ++ * $Revision: #10 $ ++ * $Date: 2008/07/16 $ ++ * $Change: 1065567 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++/** @file ++ * ++ * The Core Interface Layer provides basic services for accessing and ++ * managing the DWC_otg hardware. These services are used by both the ++ * Host Controller Driver and the Peripheral Controller Driver. ++ * ++ * This file contains the Common Interrupt handlers. ++ */ ++#include "dwc_otg_plat.h" ++#include "dwc_otg_regs.h" ++#include "dwc_otg_cil.h" ++#include "dwc_otg_pcd.h" ++ ++#ifdef DEBUG ++inline const char *op_state_str(dwc_otg_core_if_t *core_if) ++{ ++ return (core_if->op_state==A_HOST?"a_host": ++ (core_if->op_state==A_SUSPEND?"a_suspend": ++ (core_if->op_state==A_PERIPHERAL?"a_peripheral": ++ (core_if->op_state==B_PERIPHERAL?"b_peripheral": ++ (core_if->op_state==B_HOST?"b_host": ++ "unknown"))))); ++} ++#endif ++ ++/** This function will log a debug message ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++int32_t dwc_otg_handle_mode_mismatch_intr (dwc_otg_core_if_t *core_if) ++{ ++ gintsts_data_t gintsts; ++ DWC_WARN("Mode Mismatch Interrupt: currently in %s mode\n", ++ dwc_otg_mode(core_if) ? "Host" : "Device"); ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.modemismatch = 1; ++ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); ++ return 1; ++} ++ ++/** Start the HCD. Helper function for using the HCD callbacks. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++static inline void hcd_start(dwc_otg_core_if_t *core_if) ++{ ++ if (core_if->hcd_cb && core_if->hcd_cb->start) { ++ core_if->hcd_cb->start(core_if->hcd_cb->p); ++ } ++} ++/** Stop the HCD. Helper function for using the HCD callbacks. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++static inline void hcd_stop(dwc_otg_core_if_t *core_if) ++{ ++ if (core_if->hcd_cb && core_if->hcd_cb->stop) { ++ core_if->hcd_cb->stop(core_if->hcd_cb->p); ++ } ++} ++/** Disconnect the HCD. Helper function for using the HCD callbacks. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++static inline void hcd_disconnect(dwc_otg_core_if_t *core_if) ++{ ++ if (core_if->hcd_cb && core_if->hcd_cb->disconnect) { ++ core_if->hcd_cb->disconnect(core_if->hcd_cb->p); ++ } ++} ++/** Inform the HCD the a New Session has begun. Helper function for ++ * using the HCD callbacks. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++static inline void hcd_session_start(dwc_otg_core_if_t *core_if) ++{ ++ if (core_if->hcd_cb && core_if->hcd_cb->session_start) { ++ core_if->hcd_cb->session_start(core_if->hcd_cb->p); ++ } ++} ++ ++/** Start the PCD. Helper function for using the PCD callbacks. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++static inline void pcd_start(dwc_otg_core_if_t *core_if) ++{ ++ if (core_if->pcd_cb && core_if->pcd_cb->start) { ++ core_if->pcd_cb->start(core_if->pcd_cb->p); ++ } ++} ++/** Stop the PCD. Helper function for using the PCD callbacks. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++static inline void pcd_stop(dwc_otg_core_if_t *core_if) ++{ ++ if (core_if->pcd_cb && core_if->pcd_cb->stop) { ++ core_if->pcd_cb->stop(core_if->pcd_cb->p); ++ } ++} ++/** Suspend the PCD. Helper function for using the PCD callbacks. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++static inline void pcd_suspend(dwc_otg_core_if_t *core_if) ++{ ++ if (core_if->pcd_cb && core_if->pcd_cb->suspend) { ++ core_if->pcd_cb->suspend(core_if->pcd_cb->p); ++ } ++} ++/** Resume the PCD. Helper function for using the PCD callbacks. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++static inline void pcd_resume(dwc_otg_core_if_t *core_if) ++{ ++ if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) { ++ core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->p); ++ } ++} ++ ++/** ++ * This function handles the OTG Interrupts. It reads the OTG ++ * Interrupt Register (GOTGINT) to determine what interrupt has ++ * occurred. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++int32_t dwc_otg_handle_otg_intr(dwc_otg_core_if_t *core_if) ++{ ++ dwc_otg_core_global_regs_t *global_regs = ++ core_if->core_global_regs; ++ gotgint_data_t gotgint; ++ gotgctl_data_t gotgctl; ++ gintmsk_data_t gintmsk; ++ ++ gotgint.d32 = dwc_read_reg32(&global_regs->gotgint); ++ gotgctl.d32 = dwc_read_reg32(&global_regs->gotgctl); ++ DWC_DEBUGPL(DBG_CIL, "++OTG Interrupt gotgint=%0x [%s]\n", gotgint.d32, ++ op_state_str(core_if)); ++ //DWC_DEBUGPL(DBG_CIL, "gotgctl=%08x\n", gotgctl.d32); ++ ++ if (gotgint.b.sesenddet) { ++ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: " ++ "Session End Detected++ (%s)\n", ++ op_state_str(core_if)); ++ gotgctl.d32 = dwc_read_reg32(&global_regs->gotgctl); ++ ++ if (core_if->op_state == B_HOST) { ++ ++ dwc_otg_pcd_t *pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; ++ if(unlikely(!pcd)) { ++ DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); ++ BUG(); ++ } ++ SPIN_LOCK(&pcd->lock); ++ ++ pcd_start(core_if); ++ ++ SPIN_UNLOCK(&pcd->lock); ++ core_if->op_state = B_PERIPHERAL; ++ } else { ++ dwc_otg_pcd_t *pcd; ++ ++ /* If not B_HOST and Device HNP still set. HNP ++ * Did not succeed!*/ ++ if (gotgctl.b.devhnpen) { ++ DWC_DEBUGPL(DBG_ANY, "Session End Detected\n"); ++ DWC_ERROR("Device Not Connected/Responding!\n"); ++ } ++ ++ /* If Session End Detected the B-Cable has ++ * been disconnected. */ ++ /* Reset PCD and Gadget driver to a ++ * clean state. */ ++ ++ pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; ++ if(unlikely(!pcd)) { ++ DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); ++ BUG(); ++ } ++ SPIN_LOCK(&pcd->lock); ++ ++ pcd_stop(core_if); ++ ++ SPIN_UNLOCK(&pcd->lock); ++ } ++ gotgctl.d32 = 0; ++ gotgctl.b.devhnpen = 1; ++ dwc_modify_reg32(&global_regs->gotgctl, ++ gotgctl.d32, 0); ++ } ++ if (gotgint.b.sesreqsucstschng) { ++ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: " ++ "Session Reqeust Success Status Change++\n"); ++ gotgctl.d32 = dwc_read_reg32(&global_regs->gotgctl); ++ if (gotgctl.b.sesreqscs) { ++ if ((core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS) && ++ (core_if->core_params->i2c_enable)) { ++ core_if->srp_success = 1; ++ } ++ else { ++ dwc_otg_pcd_t *pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; ++ if(unlikely(!pcd)) { ++ DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); ++ BUG(); ++ } ++ SPIN_LOCK(&pcd->lock); ++ ++ pcd_resume(core_if); ++ ++ SPIN_UNLOCK(&pcd->lock); ++ /* Clear Session Request */ ++ gotgctl.d32 = 0; ++ gotgctl.b.sesreq = 1; ++ dwc_modify_reg32(&global_regs->gotgctl, ++ gotgctl.d32, 0); ++ } ++ } ++ } ++ if (gotgint.b.hstnegsucstschng) { ++ /* Print statements during the HNP interrupt handling ++ * can cause it to fail.*/ ++ gotgctl.d32 = dwc_read_reg32(&global_regs->gotgctl); ++ if (gotgctl.b.hstnegscs) { ++ if (dwc_otg_is_host_mode(core_if)) { ++ dwc_otg_pcd_t *pcd; ++ ++ core_if->op_state = B_HOST; ++ /* ++ * Need to disable SOF interrupt immediately. ++ * When switching from device to host, the PCD ++ * interrupt handler won't handle the ++ * interrupt if host mode is already set. The ++ * HCD interrupt handler won't get called if ++ * the HCD state is HALT. This means that the ++ * interrupt does not get handled and Linux ++ * complains loudly. ++ */ ++ gintmsk.d32 = 0; ++ gintmsk.b.sofintr = 1; ++ dwc_modify_reg32(&global_regs->gintmsk, ++ gintmsk.d32, 0); ++ ++ pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; ++ if(unlikely(!pcd)) { ++ DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); ++ BUG(); ++ } ++ SPIN_LOCK(&pcd->lock); ++ ++ pcd_stop(core_if); ++ ++ SPIN_UNLOCK(&pcd->lock); ++ /* ++ * Initialize the Core for Host mode. ++ */ ++ hcd_start(core_if); ++ core_if->op_state = B_HOST; ++ } ++ } else { ++ gotgctl.d32 = 0; ++ gotgctl.b.hnpreq = 1; ++ gotgctl.b.devhnpen = 1; ++ dwc_modify_reg32(&global_regs->gotgctl, ++ gotgctl.d32, 0); ++ DWC_DEBUGPL(DBG_ANY, "HNP Failed\n"); ++ DWC_ERROR("Device Not Connected/Responding\n"); ++ } ++ } ++ if (gotgint.b.hstnegdet) { ++ /* The disconnect interrupt is set at the same time as ++ * Host Negotiation Detected. During the mode ++ * switch all interrupts are cleared so the disconnect ++ * interrupt handler will not get executed. ++ */ ++ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: " ++ "Host Negotiation Detected++ (%s)\n", ++ (dwc_otg_is_host_mode(core_if)?"Host":"Device")); ++ if (dwc_otg_is_device_mode(core_if)){ ++ dwc_otg_pcd_t *pcd; ++ ++ DWC_DEBUGPL(DBG_ANY, "a_suspend->a_peripheral (%d)\n", core_if->op_state); ++ hcd_disconnect(core_if); ++ ++ pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; ++ if(unlikely(!pcd)) { ++ DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); ++ BUG(); ++ } ++ SPIN_LOCK(&pcd->lock); ++ ++ pcd_start(core_if); ++ ++ SPIN_UNLOCK(&pcd->lock); ++ core_if->op_state = A_PERIPHERAL; ++ } else { ++ dwc_otg_pcd_t *pcd; ++ ++ /* ++ * Need to disable SOF interrupt immediately. When ++ * switching from device to host, the PCD interrupt ++ * handler won't handle the interrupt if host mode is ++ * already set. The HCD interrupt handler won't get ++ * called if the HCD state is HALT. This means that ++ * the interrupt does not get handled and Linux ++ * complains loudly. ++ */ ++ gintmsk.d32 = 0; ++ gintmsk.b.sofintr = 1; ++ dwc_modify_reg32(&global_regs->gintmsk, ++ gintmsk.d32, 0); ++ ++ pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; ++ if(unlikely(!pcd)) { ++ DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); ++ BUG(); ++ } ++ SPIN_LOCK(&pcd->lock); ++ ++ pcd_stop(core_if); ++ ++ SPIN_UNLOCK(&pcd->lock); ++ hcd_start(core_if); ++ core_if->op_state = A_HOST; ++ } ++ } ++ if (gotgint.b.adevtoutchng) { ++ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: " ++ "A-Device Timeout Change++\n"); ++ } ++ if (gotgint.b.debdone) { ++ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: " ++ "Debounce Done++\n"); ++ } ++ ++ /* Clear GOTGINT */ ++ dwc_write_reg32 (&core_if->core_global_regs->gotgint, gotgint.d32); ++ ++ return 1; ++} ++ ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ ++void w_conn_id_status_change(void *p) ++{ ++ dwc_otg_core_if_t *core_if = p; ++ ++#else ++ ++void w_conn_id_status_change(struct work_struct *p) ++{ ++ dwc_otg_core_if_t *core_if = container_of(p, dwc_otg_core_if_t, w_conn_id); ++ ++#endif ++ ++ ++ uint32_t count = 0; ++ gotgctl_data_t gotgctl = { .d32 = 0 }; ++ ++ gotgctl.d32 = dwc_read_reg32(&core_if->core_global_regs->gotgctl); ++ DWC_DEBUGPL(DBG_CIL, "gotgctl=%0x\n", gotgctl.d32); ++ DWC_DEBUGPL(DBG_CIL, "gotgctl.b.conidsts=%d\n", gotgctl.b.conidsts); ++ ++ /* B-Device connector (Device Mode) */ ++ if (gotgctl.b.conidsts) { ++ dwc_otg_pcd_t *pcd; ++ ++ /* Wait for switch to device mode. */ ++ while (!dwc_otg_is_device_mode(core_if)){ ++ DWC_PRINT("Waiting for Peripheral Mode, Mode=%s\n", ++ (dwc_otg_is_host_mode(core_if)?"Host":"Peripheral")); ++ MDELAY(100); ++ if (++count > 10000) *(uint32_t*)NULL=0; ++ } ++ core_if->op_state = B_PERIPHERAL; ++ dwc_otg_core_init(core_if); ++ dwc_otg_enable_global_interrupts(core_if); ++ ++ pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; ++ if(unlikely(!pcd)) { ++ DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); ++ BUG(); ++ } ++ SPIN_LOCK(&pcd->lock); ++ ++ pcd_start(core_if); ++ ++ SPIN_UNLOCK(&pcd->lock); ++ } else { ++ /* A-Device connector (Host Mode) */ ++ while (!dwc_otg_is_host_mode(core_if)) { ++ DWC_PRINT("Waiting for Host Mode, Mode=%s\n", ++ (dwc_otg_is_host_mode(core_if)?"Host":"Peripheral")); ++ MDELAY(100); ++ if (++count > 10000) *(uint32_t*)NULL=0; ++ } ++ core_if->op_state = A_HOST; ++ /* ++ * Initialize the Core for Host mode. ++ */ ++ dwc_otg_core_init(core_if); ++ dwc_otg_enable_global_interrupts(core_if); ++ hcd_start(core_if); ++ } ++} ++ ++ ++/** ++ * This function handles the Connector ID Status Change Interrupt. It ++ * reads the OTG Interrupt Register (GOTCTL) to determine whether this ++ * is a Device to Host Mode transition or a Host Mode to Device ++ * Transition. ++ * ++ * This only occurs when the cable is connected/removed from the PHY ++ * connector. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++int32_t dwc_otg_handle_conn_id_status_change_intr(dwc_otg_core_if_t *core_if) ++{ ++ ++ /* ++ * Need to disable SOF interrupt immediately. If switching from device ++ * to host, the PCD interrupt handler won't handle the interrupt if ++ * host mode is already set. The HCD interrupt handler won't get ++ * called if the HCD state is HALT. This means that the interrupt does ++ * not get handled and Linux complains loudly. ++ */ ++ gintmsk_data_t gintmsk = { .d32 = 0 }; ++ gintsts_data_t gintsts = { .d32 = 0 }; ++ ++ gintmsk.b.sofintr = 1; ++ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, gintmsk.d32, 0); ++ ++ DWC_DEBUGPL(DBG_CIL, " ++Connector ID Status Change Interrupt++ (%s)\n", ++ (dwc_otg_is_host_mode(core_if)?"Host":"Device")); ++ ++ /* ++ * Need to schedule a work, as there are possible DELAY function calls ++ */ ++ queue_work(core_if->wq_otg, &core_if->w_conn_id); ++ ++ /* Set flag and clear interrupt */ ++ gintsts.b.conidstschng = 1; ++ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); ++ ++ return 1; ++} ++ ++/** ++ * This interrupt indicates that a device is initiating the Session ++ * Request Protocol to request the host to turn on bus power so a new ++ * session can begin. The handler responds by turning on bus power. If ++ * the DWC_otg controller is in low power mode, the handler brings the ++ * controller out of low power mode before turning on bus power. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++int32_t dwc_otg_handle_session_req_intr(dwc_otg_core_if_t *core_if) ++{ ++ hprt0_data_t hprt0; ++ gintsts_data_t gintsts; ++ ++#ifndef DWC_HOST_ONLY ++ DWC_DEBUGPL(DBG_ANY, "++Session Request Interrupt++\n"); ++ ++ if (dwc_otg_is_device_mode(core_if)) { ++ DWC_PRINT("SRP: Device mode\n"); ++ } else { ++ DWC_PRINT("SRP: Host mode\n"); ++ ++ /* Turn on the port power bit. */ ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ hprt0.b.prtpwr = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ ++ /* Start the Connection timer. So a message can be displayed ++ * if connect does not occur within 10 seconds. */ ++ hcd_session_start(core_if); ++ } ++#endif ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.sessreqintr = 1; ++ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); ++ ++ return 1; ++} ++ ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++void w_wakeup_detected(void *p) ++{ ++ dwc_otg_core_if_t* core_if = p; ++ ++#else ++ ++void w_wakeup_detected(struct work_struct *p) ++{ ++ struct delayed_work *dw = container_of(p, struct delayed_work, work); ++ dwc_otg_core_if_t *core_if = container_of(dw, dwc_otg_core_if_t, w_wkp); ++ ++#endif ++ /* ++ * Clear the Resume after 70ms. (Need 20 ms minimum. Use 70 ms ++ * so that OPT tests pass with all PHYs). ++ */ ++ hprt0_data_t hprt0 = {.d32=0}; ++#if 0 ++ pcgcctl_data_t pcgcctl = {.d32=0}; ++ /* Restart the Phy Clock */ ++ pcgcctl.b.stoppclk = 1; ++ dwc_modify_reg32(core_if->pcgcctl, pcgcctl.d32, 0); ++ UDELAY(10); ++#endif //0 ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ DWC_DEBUGPL(DBG_ANY,"Resume: HPRT0=%0x\n", hprt0.d32); ++// MDELAY(70); ++ hprt0.b.prtres = 0; /* Resume */ ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ DWC_DEBUGPL(DBG_ANY,"Clear Resume: HPRT0=%0x\n", dwc_read_reg32(core_if->host_if->hprt0)); ++} ++/** ++ * This interrupt indicates that the DWC_otg controller has detected a ++ * resume or remote wakeup sequence. If the DWC_otg controller is in ++ * low power mode, the handler must brings the controller out of low ++ * power mode. The controller automatically begins resume ++ * signaling. The handler schedules a time to stop resume signaling. ++ */ ++int32_t dwc_otg_handle_wakeup_detected_intr(dwc_otg_core_if_t *core_if) ++{ ++ gintsts_data_t gintsts; ++ ++ DWC_DEBUGPL(DBG_ANY, "++Resume and Remote Wakeup Detected Interrupt++\n"); ++ ++ if (dwc_otg_is_device_mode(core_if)) { ++ dctl_data_t dctl = {.d32=0}; ++ DWC_DEBUGPL(DBG_PCD, "DSTS=0x%0x\n", ++ dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts)); ++#ifdef PARTIAL_POWER_DOWN ++ if (core_if->hwcfg4.b.power_optimiz) { ++ pcgcctl_data_t power = {.d32=0}; ++ ++ power.d32 = dwc_read_reg32(core_if->pcgcctl); ++ DWC_DEBUGPL(DBG_CIL, "PCGCCTL=%0x\n", power.d32); ++ ++ power.b.stoppclk = 0; ++ dwc_write_reg32(core_if->pcgcctl, power.d32); ++ ++ power.b.pwrclmp = 0; ++ dwc_write_reg32(core_if->pcgcctl, power.d32); ++ ++ power.b.rstpdwnmodule = 0; ++ dwc_write_reg32(core_if->pcgcctl, power.d32); ++ } ++#endif ++ /* Clear the Remote Wakeup Signalling */ ++ dctl.b.rmtwkupsig = 1; ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dctl, ++ dctl.d32, 0); ++ ++ if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) { ++ core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->p); ++ } ++ ++ } else { ++ pcgcctl_data_t pcgcctl = {.d32=0}; ++ ++ /* Restart the Phy Clock */ ++ pcgcctl.b.stoppclk = 1; ++ dwc_modify_reg32(core_if->pcgcctl, pcgcctl.d32, 0); ++ ++ queue_delayed_work(core_if->wq_otg, &core_if->w_wkp, ((70 * HZ / 1000) + 1)); ++ } ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.wkupintr = 1; ++ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); ++ ++ return 1; ++} ++ ++/** ++ * This interrupt indicates that a device has been disconnected from ++ * the root port. ++ */ ++int32_t dwc_otg_handle_disconnect_intr(dwc_otg_core_if_t *core_if) ++{ ++ gintsts_data_t gintsts; ++ ++ DWC_DEBUGPL(DBG_ANY, "++Disconnect Detected Interrupt++ (%s) %s\n", ++ (dwc_otg_is_host_mode(core_if)?"Host":"Device"), ++ op_state_str(core_if)); ++ ++/** @todo Consolidate this if statement. */ ++#ifndef DWC_HOST_ONLY ++ if (core_if->op_state == B_HOST) { ++ dwc_otg_pcd_t *pcd; ++ ++ /* If in device mode Disconnect and stop the HCD, then ++ * start the PCD. */ ++ hcd_disconnect(core_if); ++ ++ pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; ++ if(unlikely(!pcd)) { ++ DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); ++ BUG(); ++ } ++ SPIN_LOCK(&pcd->lock); ++ ++ pcd_start(core_if); ++ ++ SPIN_UNLOCK(&pcd->lock); ++ core_if->op_state = B_PERIPHERAL; ++ } else if (dwc_otg_is_device_mode(core_if)) { ++ gotgctl_data_t gotgctl = { .d32 = 0 }; ++ gotgctl.d32 = dwc_read_reg32(&core_if->core_global_regs->gotgctl); ++ if (gotgctl.b.hstsethnpen==1) { ++ /* Do nothing, if HNP in process the OTG ++ * interrupt "Host Negotiation Detected" ++ * interrupt will do the mode switch. ++ */ ++ } else if (gotgctl.b.devhnpen == 0) { ++ dwc_otg_pcd_t *pcd; ++ ++ /* If in device mode Disconnect and stop the HCD, then ++ * start the PCD. */ ++ hcd_disconnect(core_if); ++ ++ pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; ++ if(unlikely(!pcd)) { ++ DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); ++ BUG(); ++ } ++ SPIN_LOCK(&pcd->lock); ++ ++ pcd_start(core_if); ++ ++ SPIN_UNLOCK(&pcd->lock); ++ ++ core_if->op_state = B_PERIPHERAL; ++ } else { ++ DWC_DEBUGPL(DBG_ANY,"!a_peripheral && !devhnpen\n"); ++ } ++ } else { ++ if (core_if->op_state == A_HOST) { ++ /* A-Cable still connected but device disconnected. */ ++ hcd_disconnect(core_if); ++ } ++ } ++#endif ++ ++ gintsts.d32 = 0; ++ gintsts.b.disconnect = 1; ++ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); ++ return 1; ++} ++/** ++ * This interrupt indicates that SUSPEND state has been detected on ++ * the USB. ++ * ++ * For HNP the USB Suspend interrupt signals the change from ++ * "a_peripheral" to "a_host". ++ * ++ * When power management is enabled the core will be put in low power ++ * mode. ++ */ ++int32_t dwc_otg_handle_usb_suspend_intr(dwc_otg_core_if_t *core_if) ++{ ++ dsts_data_t dsts; ++ gintsts_data_t gintsts; ++ ++ DWC_DEBUGPL(DBG_ANY,"USB SUSPEND\n"); ++ ++ if (dwc_otg_is_device_mode(core_if)) { ++ dwc_otg_pcd_t *pcd; ++ ++ /* Check the Device status register to determine if the Suspend ++ * state is active. */ ++ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); ++ DWC_DEBUGPL(DBG_PCD, "DSTS=0x%0x\n", dsts.d32); ++ DWC_DEBUGPL(DBG_PCD, "DSTS.Suspend Status=%d " ++ "HWCFG4.power Optimize=%d\n", ++ dsts.b.suspsts, core_if->hwcfg4.b.power_optimiz); ++ ++ ++#ifdef PARTIAL_POWER_DOWN ++/** @todo Add a module parameter for power management. */ ++ ++ if (dsts.b.suspsts && core_if->hwcfg4.b.power_optimiz) { ++ pcgcctl_data_t power = {.d32=0}; ++ DWC_DEBUGPL(DBG_CIL, "suspend\n"); ++ ++ power.b.pwrclmp = 1; ++ dwc_write_reg32(core_if->pcgcctl, power.d32); ++ ++ power.b.rstpdwnmodule = 1; ++ dwc_modify_reg32(core_if->pcgcctl, 0, power.d32); ++ ++ power.b.stoppclk = 1; ++ dwc_modify_reg32(core_if->pcgcctl, 0, power.d32); ++ ++ } else { ++ DWC_DEBUGPL(DBG_ANY,"disconnect?\n"); ++ } ++#endif ++ /* PCD callback for suspend. */ ++ pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; ++ if(unlikely(!pcd)) { ++ DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); ++ BUG(); ++ } ++ SPIN_LOCK(&pcd->lock); ++ ++ pcd_suspend(core_if); ++ ++ SPIN_UNLOCK(&pcd->lock); ++ } else { ++ if (core_if->op_state == A_PERIPHERAL) { ++ dwc_otg_pcd_t *pcd; ++ ++ DWC_DEBUGPL(DBG_ANY,"a_peripheral->a_host\n"); ++ /* Clear the a_peripheral flag, back to a_host. */ ++ ++ pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; ++ if(unlikely(!pcd)) { ++ DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); ++ BUG(); ++ } ++ SPIN_LOCK(&pcd->lock); ++ ++ pcd_stop(core_if); ++ ++ SPIN_UNLOCK(&pcd->lock); ++ ++ hcd_start(core_if); ++ core_if->op_state = A_HOST; ++ } ++ } ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.usbsuspend = 1; ++ dwc_write_reg32(&core_if->core_global_regs->gintsts, gintsts.d32); ++ ++ return 1; ++} ++ ++ ++/** ++ * This function returns the Core Interrupt register. ++ */ ++static inline uint32_t dwc_otg_read_common_intr(dwc_otg_core_if_t *core_if) ++{ ++ gintsts_data_t gintsts; ++ gintmsk_data_t gintmsk; ++ gintmsk_data_t gintmsk_common = {.d32=0}; ++ gintmsk_common.b.wkupintr = 1; ++ gintmsk_common.b.sessreqintr = 1; ++ gintmsk_common.b.conidstschng = 1; ++ gintmsk_common.b.otgintr = 1; ++ gintmsk_common.b.modemismatch = 1; ++ gintmsk_common.b.disconnect = 1; ++ gintmsk_common.b.usbsuspend = 1; ++ /** @todo: The port interrupt occurs while in device ++ * mode. Added code to CIL to clear the interrupt for now! ++ */ ++ gintmsk_common.b.portintr = 1; ++ ++ gintsts.d32 = dwc_read_reg32(&core_if->core_global_regs->gintsts); ++ gintmsk.d32 = dwc_read_reg32(&core_if->core_global_regs->gintmsk); ++#ifdef DEBUG ++ /* if any common interrupts set */ ++ if (gintsts.d32 & gintmsk_common.d32) { ++ DWC_DEBUGPL(DBG_ANY, "gintsts=%08x gintmsk=%08x\n", ++ gintsts.d32, gintmsk.d32); ++ } ++#endif ++ ++ return ((gintsts.d32 & gintmsk.d32) & gintmsk_common.d32); ++ ++} ++ ++/** ++ * Common interrupt handler. ++ * ++ * The common interrupts are those that occur in both Host and Device mode. ++ * This handler handles the following interrupts: ++ * - Mode Mismatch Interrupt ++ * - Disconnect Interrupt ++ * - OTG Interrupt ++ * - Connector ID Status Change Interrupt ++ * - Session Request Interrupt. ++ * - Resume / Remote Wakeup Detected Interrupt. ++ * ++ */ ++int32_t dwc_otg_handle_common_intr(dwc_otg_core_if_t *core_if) ++{ ++ int retval = 0; ++ gintsts_data_t gintsts; ++ ++ gintsts.d32 = dwc_otg_read_common_intr(core_if); ++ ++ if (gintsts.b.modemismatch) { ++ retval |= dwc_otg_handle_mode_mismatch_intr(core_if); ++ } ++ if (gintsts.b.otgintr) { ++ retval |= dwc_otg_handle_otg_intr(core_if); ++ } ++ if (gintsts.b.conidstschng) { ++ retval |= dwc_otg_handle_conn_id_status_change_intr(core_if); ++ } ++ if (gintsts.b.disconnect) { ++ retval |= dwc_otg_handle_disconnect_intr(core_if); ++ } ++ if (gintsts.b.sessreqintr) { ++ retval |= dwc_otg_handle_session_req_intr(core_if); ++ } ++ if (gintsts.b.wkupintr) { ++ retval |= dwc_otg_handle_wakeup_detected_intr(core_if); ++ } ++ if (gintsts.b.usbsuspend) { ++ retval |= dwc_otg_handle_usb_suspend_intr(core_if); ++ } ++ if (gintsts.b.portintr && dwc_otg_is_device_mode(core_if)) { ++ /* The port interrupt occurs while in device mode with HPRT0 ++ * Port Enable/Disable. ++ */ ++ gintsts.d32 = 0; ++ gintsts.b.portintr = 1; ++ dwc_write_reg32(&core_if->core_global_regs->gintsts, ++ gintsts.d32); ++ retval |= 1; ++ ++ } ++ ++ S3C2410X_CLEAR_EINTPEND(); ++ ++ return retval; ++} +--- /dev/null ++++ b/drivers/usb/host/otg/dwc_otg_driver.c +@@ -0,0 +1,1283 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_driver.c $ ++ * $Revision: #63 $ ++ * $Date: 2008/09/24 $ ++ * $Change: 1101777 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++/** @file ++ * The dwc_otg_driver module provides the initialization and cleanup entry ++ * points for the DWC_otg driver. This module will be dynamically installed ++ * after Linux is booted using the insmod command. When the module is ++ * installed, the dwc_otg_driver_init function is called. When the module is ++ * removed (using rmmod), the dwc_otg_driver_cleanup function is called. ++ * ++ * This module also defines a data structure for the dwc_otg_driver, which is ++ * used in conjunction with the standard ARM lm_device structure. These ++ * structures allow the OTG driver to comply with the standard Linux driver ++ * model in which devices and drivers are registered with a bus driver. This ++ * has the benefit that Linux can expose attributes of the driver and device ++ * in its special sysfs file system. Users can then read or write files in ++ * this file system to perform diagnostics on the driver components or the ++ * device. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include /* permission constants */ ++#include ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++# include ++#endif ++ ++#include ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++# include ++#endif ++ ++//#include ++#include ++#include ++#include ++#include ++ ++#include "dwc_otg_plat.h" ++#include "dwc_otg_attr.h" ++#include "dwc_otg_driver.h" ++#include "dwc_otg_cil.h" ++#include "dwc_otg_pcd.h" ++#include "dwc_otg_hcd.h" ++ ++#define DWC_DRIVER_VERSION "2.72a 24-JUN-2008" ++#define DWC_DRIVER_DESC "HS OTG USB Controller driver" ++ ++static const char dwc_driver_name[] = "dwc_otg"; ++ ++/*-------------------------------------------------------------------------*/ ++/* Encapsulate the module parameter settings */ ++ ++static dwc_otg_core_params_t dwc_otg_module_params = { ++ .opt = -1, ++ .otg_cap = -1, ++ .dma_enable = -1, ++ .dma_desc_enable = -1, ++ .dma_burst_size = -1, ++ .speed = -1, ++ .host_support_fs_ls_low_power = -1, ++ .host_ls_low_power_phy_clk = -1, ++ .enable_dynamic_fifo = -1, ++ .data_fifo_size = -1, ++ .dev_rx_fifo_size = -1, ++ .dev_nperio_tx_fifo_size = -1, ++ .dev_perio_tx_fifo_size = { ++ /* dev_perio_tx_fifo_size_1 */ ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1 ++ /* 15 */ ++ }, ++ .host_rx_fifo_size = -1, ++ .host_nperio_tx_fifo_size = -1, ++ .host_perio_tx_fifo_size = -1, ++ .max_transfer_size = -1, ++ .max_packet_count = -1, ++ .host_channels = -1, ++ .dev_endpoints = -1, ++ .phy_type = -1, ++ .phy_utmi_width = -1, ++ .phy_ulpi_ddr = -1, ++ .phy_ulpi_ext_vbus = -1, ++ .i2c_enable = -1, ++ .ulpi_fs_ls = -1, ++ .ts_dline = -1, ++ .en_multiple_tx_fifo = -1, ++ .dev_tx_fifo_size = { ++ /* dev_tx_fifo_size */ ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1 ++ /* 15 */ ++ }, ++ .thr_ctl = -1, ++ .tx_thr_length = -1, ++ .rx_thr_length = -1, ++ .pti_enable = -1, ++ .mpi_enable = -1, ++}; ++ ++/** ++ * This function shows the Driver Version. ++ */ ++static ssize_t version_show(struct device_driver *dev, char *buf) ++{ ++ return snprintf(buf, sizeof(DWC_DRIVER_VERSION)+2, "%s\n", ++ DWC_DRIVER_VERSION); ++} ++static DRIVER_ATTR(version, S_IRUGO, version_show, NULL); ++ ++/** ++ * Global Debug Level Mask. ++ */ ++uint32_t g_dbg_lvl = 0; /* OFF */ ++ ++/** ++ * This function shows the driver Debug Level. ++ */ ++static ssize_t dbg_level_show(struct device_driver *drv, char *buf) ++{ ++ return sprintf(buf, "0x%0x\n", g_dbg_lvl); ++} ++ ++/** ++ * This function stores the driver Debug Level. ++ */ ++static ssize_t dbg_level_store(struct device_driver *drv, const char *buf, ++ size_t count) ++{ ++ g_dbg_lvl = simple_strtoul(buf, NULL, 16); ++ return count; ++} ++static DRIVER_ATTR(debuglevel, S_IRUGO|S_IWUSR, dbg_level_show, dbg_level_store); ++ ++/** ++ * This function is called during module intialization to verify that ++ * the module parameters are in a valid state. ++ */ ++static int check_parameters(dwc_otg_core_if_t *core_if) ++{ ++ int i; ++ int retval = 0; ++ ++/* Checks if the parameter is outside of its valid range of values */ ++#define DWC_OTG_PARAM_TEST(_param_, _low_, _high_) \ ++ ((dwc_otg_module_params._param_ < (_low_)) || \ ++ (dwc_otg_module_params._param_ > (_high_))) ++ ++/* If the parameter has been set by the user, check that the parameter value is ++ * within the value range of values. If not, report a module error. */ ++#define DWC_OTG_PARAM_ERR(_param_, _low_, _high_, _string_) \ ++ do { \ ++ if (dwc_otg_module_params._param_ != -1) { \ ++ if (DWC_OTG_PARAM_TEST(_param_, (_low_), (_high_))) { \ ++ DWC_ERROR("`%d' invalid for parameter `%s'\n", \ ++ dwc_otg_module_params._param_, _string_); \ ++ dwc_otg_module_params._param_ = dwc_param_##_param_##_default; \ ++ retval++; \ ++ } \ ++ } \ ++ } while (0) ++ ++ DWC_OTG_PARAM_ERR(opt,0,1,"opt"); ++ DWC_OTG_PARAM_ERR(otg_cap,0,2,"otg_cap"); ++ DWC_OTG_PARAM_ERR(dma_enable,0,1,"dma_enable"); ++ DWC_OTG_PARAM_ERR(dma_desc_enable,0,1,"dma_desc_enable"); ++ DWC_OTG_PARAM_ERR(speed,0,1,"speed"); ++ DWC_OTG_PARAM_ERR(host_support_fs_ls_low_power,0,1,"host_support_fs_ls_low_power"); ++ DWC_OTG_PARAM_ERR(host_ls_low_power_phy_clk,0,1,"host_ls_low_power_phy_clk"); ++ DWC_OTG_PARAM_ERR(enable_dynamic_fifo,0,1,"enable_dynamic_fifo"); ++ DWC_OTG_PARAM_ERR(data_fifo_size,32,32768,"data_fifo_size"); ++ DWC_OTG_PARAM_ERR(dev_rx_fifo_size,16,32768,"dev_rx_fifo_size"); ++ DWC_OTG_PARAM_ERR(dev_nperio_tx_fifo_size,16,32768,"dev_nperio_tx_fifo_size"); ++ DWC_OTG_PARAM_ERR(host_rx_fifo_size,16,32768,"host_rx_fifo_size"); ++ DWC_OTG_PARAM_ERR(host_nperio_tx_fifo_size,16,32768,"host_nperio_tx_fifo_size"); ++ DWC_OTG_PARAM_ERR(host_perio_tx_fifo_size,16,32768,"host_perio_tx_fifo_size"); ++ DWC_OTG_PARAM_ERR(max_transfer_size,2047,524288,"max_transfer_size"); ++ DWC_OTG_PARAM_ERR(max_packet_count,15,511,"max_packet_count"); ++ DWC_OTG_PARAM_ERR(host_channels,1,16,"host_channels"); ++ DWC_OTG_PARAM_ERR(dev_endpoints,1,15,"dev_endpoints"); ++ DWC_OTG_PARAM_ERR(phy_type,0,2,"phy_type"); ++ DWC_OTG_PARAM_ERR(phy_ulpi_ddr,0,1,"phy_ulpi_ddr"); ++ DWC_OTG_PARAM_ERR(phy_ulpi_ext_vbus,0,1,"phy_ulpi_ext_vbus"); ++ DWC_OTG_PARAM_ERR(i2c_enable,0,1,"i2c_enable"); ++ DWC_OTG_PARAM_ERR(ulpi_fs_ls,0,1,"ulpi_fs_ls"); ++ DWC_OTG_PARAM_ERR(ts_dline,0,1,"ts_dline"); ++ ++ if (dwc_otg_module_params.dma_burst_size != -1) { ++ if (DWC_OTG_PARAM_TEST(dma_burst_size,1,1) && ++ DWC_OTG_PARAM_TEST(dma_burst_size,4,4) && ++ DWC_OTG_PARAM_TEST(dma_burst_size,8,8) && ++ DWC_OTG_PARAM_TEST(dma_burst_size,16,16) && ++ DWC_OTG_PARAM_TEST(dma_burst_size,32,32) && ++ DWC_OTG_PARAM_TEST(dma_burst_size,64,64) && ++ DWC_OTG_PARAM_TEST(dma_burst_size,128,128) && ++ DWC_OTG_PARAM_TEST(dma_burst_size,256,256)) { ++ DWC_ERROR("`%d' invalid for parameter `dma_burst_size'\n", ++ dwc_otg_module_params.dma_burst_size); ++ dwc_otg_module_params.dma_burst_size = 32; ++ retval++; ++ } ++ ++ { ++ uint8_t brst_sz = 0; ++ while(dwc_otg_module_params.dma_burst_size > 1) { ++ brst_sz ++; ++ dwc_otg_module_params.dma_burst_size >>= 1; ++ } ++ dwc_otg_module_params.dma_burst_size = brst_sz; ++ } ++ } ++ ++ if (dwc_otg_module_params.phy_utmi_width != -1) { ++ if (DWC_OTG_PARAM_TEST(phy_utmi_width, 8, 8) && ++ DWC_OTG_PARAM_TEST(phy_utmi_width, 16, 16)) { ++ DWC_ERROR("`%d' invalid for parameter `phy_utmi_width'\n", ++ dwc_otg_module_params.phy_utmi_width); ++ dwc_otg_module_params.phy_utmi_width = 16; ++ retval++; ++ } ++ } ++ ++ for (i = 0; i < 15; i++) { ++ /** @todo should be like above */ ++ //DWC_OTG_PARAM_ERR(dev_perio_tx_fifo_size[i], 4, 768, "dev_perio_tx_fifo_size"); ++ if (dwc_otg_module_params.dev_perio_tx_fifo_size[i] != -1) { ++ if (DWC_OTG_PARAM_TEST(dev_perio_tx_fifo_size[i], 4, 768)) { ++ DWC_ERROR("`%d' invalid for parameter `%s_%d'\n", ++ dwc_otg_module_params.dev_perio_tx_fifo_size[i], "dev_perio_tx_fifo_size", i); ++ dwc_otg_module_params.dev_perio_tx_fifo_size[i] = dwc_param_dev_perio_tx_fifo_size_default; ++ retval++; ++ } ++ } ++ } ++ ++ DWC_OTG_PARAM_ERR(en_multiple_tx_fifo, 0, 1, "en_multiple_tx_fifo"); ++ ++ for (i = 0; i < 15; i++) { ++ /** @todo should be like above */ ++ //DWC_OTG_PARAM_ERR(dev_tx_fifo_size[i], 4, 768, "dev_tx_fifo_size"); ++ if (dwc_otg_module_params.dev_tx_fifo_size[i] != -1) { ++ if (DWC_OTG_PARAM_TEST(dev_tx_fifo_size[i], 4, 768)) { ++ DWC_ERROR("`%d' invalid for parameter `%s_%d'\n", ++ dwc_otg_module_params.dev_tx_fifo_size[i], "dev_tx_fifo_size", i); ++ dwc_otg_module_params.dev_tx_fifo_size[i] = dwc_param_dev_tx_fifo_size_default; ++ retval++; ++ } ++ } ++ } ++ ++ DWC_OTG_PARAM_ERR(thr_ctl, 0, 7, "thr_ctl"); ++ DWC_OTG_PARAM_ERR(tx_thr_length, 8, 128, "tx_thr_length"); ++ DWC_OTG_PARAM_ERR(rx_thr_length, 8, 128, "rx_thr_length"); ++ ++ DWC_OTG_PARAM_ERR(pti_enable,0,1,"pti_enable"); ++ DWC_OTG_PARAM_ERR(mpi_enable,0,1,"mpi_enable"); ++ ++ /* At this point, all module parameters that have been set by the user ++ * are valid, and those that have not are left unset. Now set their ++ * default values and/or check the parameters against the hardware ++ * configurations of the OTG core. */ ++ ++/* This sets the parameter to the default value if it has not been set by the ++ * user */ ++#define DWC_OTG_PARAM_SET_DEFAULT(_param_) \ ++ ({ \ ++ int changed = 1; \ ++ if (dwc_otg_module_params._param_ == -1) { \ ++ changed = 0; \ ++ dwc_otg_module_params._param_ = dwc_param_##_param_##_default; \ ++ } \ ++ changed; \ ++ }) ++ ++/* This checks the macro agains the hardware configuration to see if it is ++ * valid. It is possible that the default value could be invalid. In this ++ * case, it will report a module error if the user touched the parameter. ++ * Otherwise it will adjust the value without any error. */ ++#define DWC_OTG_PARAM_CHECK_VALID(_param_, _str_, _is_valid_, _set_valid_) \ ++ ({ \ ++ int changed = DWC_OTG_PARAM_SET_DEFAULT(_param_); \ ++ int error = 0; \ ++ if (!(_is_valid_)) { \ ++ if (changed) { \ ++ DWC_ERROR("`%d' invalid for parameter `%s'. Check HW configuration.\n", dwc_otg_module_params._param_, _str_); \ ++ error = 1; \ ++ } \ ++ dwc_otg_module_params._param_ = (_set_valid_); \ ++ } \ ++ error; \ ++ }) ++ ++ /* OTG Cap */ ++ retval += DWC_OTG_PARAM_CHECK_VALID(otg_cap, "otg_cap", ++ ({ ++ int valid; ++ valid = 1; ++ switch (dwc_otg_module_params.otg_cap) { ++ case DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE: ++ if (core_if->hwcfg2.b.op_mode != DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG) ++ valid = 0; ++ break; ++ case DWC_OTG_CAP_PARAM_SRP_ONLY_CAPABLE: ++ if ((core_if->hwcfg2.b.op_mode != DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG) && ++ (core_if->hwcfg2.b.op_mode != DWC_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG) && ++ (core_if->hwcfg2.b.op_mode != DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE) && ++ (core_if->hwcfg2.b.op_mode != DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST)) { ++ valid = 0; ++ } ++ break; ++ case DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE: ++ /* always valid */ ++ break; ++ } ++ valid; ++ }), ++ (((core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG) || ++ (core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG) || ++ (core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE) || ++ (core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST)) ? ++ DWC_OTG_CAP_PARAM_SRP_ONLY_CAPABLE : ++ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE)); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(dma_enable, "dma_enable", ++ ((dwc_otg_module_params.dma_enable == 1) && (core_if->hwcfg2.b.architecture == 0)) ? 0 : 1, ++ 0); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(dma_desc_enable, "dma_desc_enable", ++ ((dwc_otg_module_params.dma_desc_enable == 1) && ++ ((dwc_otg_module_params.dma_enable == 0) || (core_if->hwcfg4.b.desc_dma == 0))) ? 0 : 1, ++ 0); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(opt, "opt", 1, 0); ++ ++ DWC_OTG_PARAM_SET_DEFAULT(dma_burst_size); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(host_support_fs_ls_low_power, ++ "host_support_fs_ls_low_power", ++ 1, 0); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(enable_dynamic_fifo, ++ "enable_dynamic_fifo", ++ ((dwc_otg_module_params.enable_dynamic_fifo == 0) || ++ (core_if->hwcfg2.b.dynamic_fifo == 1)), 0); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(data_fifo_size, ++ "data_fifo_size", ++ (dwc_otg_module_params.data_fifo_size <= core_if->hwcfg3.b.dfifo_depth), ++ core_if->hwcfg3.b.dfifo_depth); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(dev_rx_fifo_size, ++ "dev_rx_fifo_size", ++ (dwc_otg_module_params.dev_rx_fifo_size <= dwc_read_reg32(&core_if->core_global_regs->grxfsiz)), ++ dwc_read_reg32(&core_if->core_global_regs->grxfsiz)); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(dev_nperio_tx_fifo_size, ++ "dev_nperio_tx_fifo_size", ++ (dwc_otg_module_params.dev_nperio_tx_fifo_size <= (dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16)), ++ (dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16)); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(host_rx_fifo_size, ++ "host_rx_fifo_size", ++ (dwc_otg_module_params.host_rx_fifo_size <= dwc_read_reg32(&core_if->core_global_regs->grxfsiz)), ++ dwc_read_reg32(&core_if->core_global_regs->grxfsiz)); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(host_nperio_tx_fifo_size, ++ "host_nperio_tx_fifo_size", ++ (dwc_otg_module_params.host_nperio_tx_fifo_size <= (dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16)), ++ (dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16)); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(host_perio_tx_fifo_size, ++ "host_perio_tx_fifo_size", ++ (dwc_otg_module_params.host_perio_tx_fifo_size <= ((dwc_read_reg32(&core_if->core_global_regs->hptxfsiz) >> 16))), ++ ((dwc_read_reg32(&core_if->core_global_regs->hptxfsiz) >> 16))); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(max_transfer_size, ++ "max_transfer_size", ++ (dwc_otg_module_params.max_transfer_size < (1 << (core_if->hwcfg3.b.xfer_size_cntr_width + 11))), ++ ((1 << (core_if->hwcfg3.b.xfer_size_cntr_width + 11)) - 1)); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(max_packet_count, ++ "max_packet_count", ++ (dwc_otg_module_params.max_packet_count < (1 << (core_if->hwcfg3.b.packet_size_cntr_width + 4))), ++ ((1 << (core_if->hwcfg3.b.packet_size_cntr_width + 4)) - 1)); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(host_channels, ++ "host_channels", ++ (dwc_otg_module_params.host_channels <= (core_if->hwcfg2.b.num_host_chan + 1)), ++ (core_if->hwcfg2.b.num_host_chan + 1)); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(dev_endpoints, ++ "dev_endpoints", ++ (dwc_otg_module_params.dev_endpoints <= (core_if->hwcfg2.b.num_dev_ep)), ++ core_if->hwcfg2.b.num_dev_ep); ++ ++/* ++ * Define the following to disable the FS PHY Hardware checking. This is for ++ * internal testing only. ++ * ++ * #define NO_FS_PHY_HW_CHECKS ++ */ ++ ++#ifdef NO_FS_PHY_HW_CHECKS ++ retval += DWC_OTG_PARAM_CHECK_VALID(phy_type, ++ "phy_type", 1, 0); ++#else ++ retval += DWC_OTG_PARAM_CHECK_VALID(phy_type, ++ "phy_type", ++ ({ ++ int valid = 0; ++ if ((dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_UTMI) && ++ ((core_if->hwcfg2.b.hs_phy_type == 1) || ++ (core_if->hwcfg2.b.hs_phy_type == 3))) { ++ valid = 1; ++ } ++ else if ((dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_ULPI) && ++ ((core_if->hwcfg2.b.hs_phy_type == 2) || ++ (core_if->hwcfg2.b.hs_phy_type == 3))) { ++ valid = 1; ++ } ++ else if ((dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS) && ++ (core_if->hwcfg2.b.fs_phy_type == 1)) { ++ valid = 1; ++ } ++ valid; ++ }), ++ ({ ++ int set = DWC_PHY_TYPE_PARAM_FS; ++ if (core_if->hwcfg2.b.hs_phy_type) { ++ if ((core_if->hwcfg2.b.hs_phy_type == 3) || ++ (core_if->hwcfg2.b.hs_phy_type == 1)) { ++ set = DWC_PHY_TYPE_PARAM_UTMI; ++ } ++ else { ++ set = DWC_PHY_TYPE_PARAM_ULPI; ++ } ++ } ++ set; ++ })); ++#endif ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(speed, "speed", ++ (dwc_otg_module_params.speed == 0) && (dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS) ? 0 : 1, ++ dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS ? 1 : 0); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(host_ls_low_power_phy_clk, ++ "host_ls_low_power_phy_clk", ++ ((dwc_otg_module_params.host_ls_low_power_phy_clk == DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ) && (dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS) ? 0 : 1), ++ ((dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS) ? DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ : DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ)); ++ ++ DWC_OTG_PARAM_SET_DEFAULT(phy_ulpi_ddr); ++ DWC_OTG_PARAM_SET_DEFAULT(phy_ulpi_ext_vbus); ++ DWC_OTG_PARAM_SET_DEFAULT(phy_utmi_width); ++ DWC_OTG_PARAM_SET_DEFAULT(ulpi_fs_ls); ++ DWC_OTG_PARAM_SET_DEFAULT(ts_dline); ++ ++#ifdef NO_FS_PHY_HW_CHECKS ++ retval += DWC_OTG_PARAM_CHECK_VALID(i2c_enable, "i2c_enable", 1, 0); ++#else ++ retval += DWC_OTG_PARAM_CHECK_VALID(i2c_enable, ++ "i2c_enable", ++ (dwc_otg_module_params.i2c_enable == 1) && (core_if->hwcfg3.b.i2c == 0) ? 0 : 1, ++ 0); ++#endif ++ ++ for (i = 0; i < 15; i++) { ++ int changed = 1; ++ int error = 0; ++ ++ if (dwc_otg_module_params.dev_perio_tx_fifo_size[i] == -1) { ++ changed = 0; ++ dwc_otg_module_params.dev_perio_tx_fifo_size[i] = dwc_param_dev_perio_tx_fifo_size_default; ++ } ++ if (!(dwc_otg_module_params.dev_perio_tx_fifo_size[i] <= (dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i])))) { ++ if (changed) { ++ DWC_ERROR("`%d' invalid for parameter `dev_perio_fifo_size_%d'. Check HW configuration.\n", dwc_otg_module_params.dev_perio_tx_fifo_size[i], i); ++ error = 1; ++ } ++ dwc_otg_module_params.dev_perio_tx_fifo_size[i] = dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i]); ++ } ++ retval += error; ++ } ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(en_multiple_tx_fifo, "en_multiple_tx_fifo", ++ ((dwc_otg_module_params.en_multiple_tx_fifo == 1) && (core_if->hwcfg4.b.ded_fifo_en == 0)) ? 0 : 1, ++ 0); ++ ++ for (i = 0; i < 15; i++) { ++ int changed = 1; ++ int error = 0; ++ ++ if (dwc_otg_module_params.dev_tx_fifo_size[i] == -1) { ++ changed = 0; ++ dwc_otg_module_params.dev_tx_fifo_size[i] = dwc_param_dev_tx_fifo_size_default; ++ } ++ if (!(dwc_otg_module_params.dev_tx_fifo_size[i] <= (dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i])))) { ++ if (changed) { ++ DWC_ERROR("%d' invalid for parameter `dev_perio_fifo_size_%d'. Check HW configuration.\n", dwc_otg_module_params.dev_tx_fifo_size[i], i); ++ error = 1; ++ } ++ dwc_otg_module_params.dev_tx_fifo_size[i] = dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i]); ++ } ++ retval += error; ++ } ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(thr_ctl, "thr_ctl", ++ ((dwc_otg_module_params.thr_ctl != 0) && ((dwc_otg_module_params.dma_enable == 0) || (core_if->hwcfg4.b.ded_fifo_en == 0))) ? 0 : 1, ++ 0); ++ ++ DWC_OTG_PARAM_SET_DEFAULT(tx_thr_length); ++ DWC_OTG_PARAM_SET_DEFAULT(rx_thr_length); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(pti_enable, "pti_enable", ++ ((dwc_otg_module_params.pti_enable == 0) || ((dwc_otg_module_params.pti_enable == 1) && (core_if->snpsid >= 0x4F54272A))) ? 1 : 0, ++ 0); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(mpi_enable, "mpi_enable", ++ ((dwc_otg_module_params.mpi_enable == 0) || ((dwc_otg_module_params.mpi_enable == 1) && (core_if->hwcfg2.b.multi_proc_int == 1))) ? 1 : 0, ++ 0); ++ return retval; ++} ++ ++/** ++ * This function is the top level interrupt handler for the Common ++ * (Device and host modes) interrupts. ++ */ ++static irqreturn_t dwc_otg_common_irq(int irq, void *dev ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ++ , struct pt_regs *r ++#endif ++ ) ++{ ++ dwc_otg_device_t *otg_dev = dev; ++ int32_t retval = IRQ_NONE; ++ ++ retval = dwc_otg_handle_common_intr(otg_dev->core_if); ++ return IRQ_RETVAL(retval); ++} ++ ++/** ++ * This function is called when a lm_device is unregistered with the ++ * dwc_otg_driver. This happens, for example, when the rmmod command is ++ * executed. The device may or may not be electrically present. If it is ++ * present, the driver stops device processing. Any resources used on behalf ++ * of this device are freed. ++ * ++ * @param[in] lmdev ++ */ ++static void dwc_otg_driver_remove(struct lm_device *lmdev) ++{ ++ dwc_otg_device_t *otg_dev = lm_get_drvdata(lmdev); ++ DWC_DEBUGPL(DBG_ANY, "%s(%p)\n", __func__, lmdev); ++ ++ if (!otg_dev) { ++ /* Memory allocation for the dwc_otg_device failed. */ ++ DWC_DEBUGPL(DBG_ANY, "%s: otg_dev NULL!\n", __func__); ++ return; ++ } ++ ++ /* ++ * Free the IRQ ++ */ ++ if (otg_dev->common_irq_installed) { ++ free_irq(lmdev->irq, otg_dev); ++ } ++ ++#ifndef DWC_DEVICE_ONLY ++ if (otg_dev->hcd) { ++ dwc_otg_hcd_remove(lmdev); ++ } else { ++ DWC_DEBUGPL(DBG_ANY, "%s: otg_dev->hcd NULL!\n", __func__); ++ return; ++ } ++#endif ++ ++#ifndef DWC_HOST_ONLY ++ if (otg_dev->pcd) { ++ dwc_otg_pcd_remove(lmdev); ++ } ++#endif ++ if (otg_dev->core_if) { ++ dwc_otg_cil_remove(otg_dev->core_if); ++ } ++ ++ /* ++ * Remove the device attributes ++ */ ++ dwc_otg_attr_remove(lmdev); ++ ++ /* ++ * Return the memory. ++ */ ++ if (otg_dev->base) { ++ cns3xxx_iounmap(otg_dev->base); ++ } ++ kfree(otg_dev); ++ ++ /* ++ * Clear the drvdata pointer. ++ */ ++ lm_set_drvdata(lmdev, 0); ++} ++ ++/** ++ * This function is called when an lm_device is bound to a ++ * dwc_otg_driver. It creates the driver components required to ++ * control the device (CIL, HCD, and PCD) and it initializes the ++ * device. The driver components are stored in a dwc_otg_device ++ * structure. A reference to the dwc_otg_device is saved in the ++ * lm_device. This allows the driver to access the dwc_otg_device ++ * structure on subsequent calls to driver methods for this device. ++ * ++ * @param[in] lmdev lm_device definition ++ */ ++static int dwc_otg_driver_probe(struct lm_device *lmdev) ++{ ++ int retval = 0; ++ uint32_t snpsid; ++ dwc_otg_device_t *dwc_otg_device; ++ u_int32_t val; ++ ++ dev_dbg(&lmdev->dev, "dwc_otg_driver_probe(%p)\n", lmdev); ++ dev_dbg(&lmdev->dev, "start=0x%08x\n", (unsigned)lmdev->resource.start); ++ ++ dwc_otg_device = kmalloc(sizeof(dwc_otg_device_t), GFP_KERNEL); ++ ++ if (!dwc_otg_device) { ++ dev_err(&lmdev->dev, "kmalloc of dwc_otg_device failed\n"); ++ retval = -ENOMEM; ++ goto fail; ++ } ++ ++ memset(dwc_otg_device, 0, sizeof(*dwc_otg_device)); ++ dwc_otg_device->reg_offset = 0xFFFFFFFF; ++ ++ /* ++ * Map the DWC_otg Core memory into virtual address space. ++ */ ++#ifdef CNS3XXX_USBOTG_BASE_VIRT ++ dwc_otg_device->base = (void __iomem *) CNS3XXX_USBOTG_BASE_VIRT; ++#else ++ dwc_otg_device->base = ioremap(lmdev->resource.start, SZ_256K); ++#endif ++ ++ if (!dwc_otg_device->base) { ++ dev_err(&lmdev->dev, "cns3xxx_ioremap() failed\n"); ++ retval = -ENOMEM; ++ goto fail; ++ } ++ dev_dbg(&lmdev->dev, "base=0x%08x\n", (unsigned)dwc_otg_device->base); ++ ++#ifdef CONFIG_SILICON ++#if 0 ++ //OTG PHY ++ cns3xxx_pwr_power_up(1<base + 0x40)); ++ ++ if ((snpsid & 0xFFFFF000) != OTG_CORE_REV_2_00) { ++ dev_err(&lmdev->dev, "Bad value for SNPSID: 0x%08x\n", snpsid); ++ retval = -EINVAL; ++ goto fail; ++ } ++ ++ DWC_PRINT("Core Release: %x.%x%x%x\n", ++ (snpsid >> 12 & 0xF), ++ (snpsid >> 8 & 0xF), ++ (snpsid >> 4 & 0xF), ++ (snpsid & 0xF)); ++ ++ ++ ++ // de-assert otgdisable ++ val=__raw_readl((void __iomem *)(CNS3XXX_MISC_BASE_VIRT + 0x0808)); ++ __raw_writel(val&(~(1 << 10)), (void __iomem *)(CNS3XXX_MISC_BASE_VIRT + 0x0808)); ++ val=__raw_readl((void __iomem *)(CNS3XXX_MISC_BASE_VIRT + 0x0808)); ++ DWC_DEBUGPL(DBG_CIL, "de-assert otgdisable(bit10): MISC_USBPHY00_CFG_REG=%.8x\n",val); ++ ++ ++#ifdef ENDIAN_MODE_BIG_ENDIAN ++ // bit[18]:otg endian, bit[19]:usbh endian ++ val=__raw_readl((void __iomem *)(CNS3XXX_MISC_BASE_VIRT + 0x0800)); ++ __raw_writel(val|(1 << 18), (void __iomem *)(CNS3XXX_MISC_BASE_VIRT + 0x0800)); ++#endif ++ val=__raw_readl((void __iomem *)(CNS3XXX_MISC_BASE_VIRT + 0x0800)); ++ DWC_DEBUGPL(DBG_CIL, "OTG endian(bit18): MISC_USB_CFG_REG=%.8x, OTG in %s endian mode\n",val,(val&(1<<18))?"big":"little"); ++ ++/* ++ // PMU control ++ HAL_PMU_POWER_ON_USB_PHY1(); ++ HAL_PMU_POWER_ON_USB_PHY0(); ++ ++ HAL_PMU_POWER_ON_USB(); ++ ++ HAL_PMU_ENABLE_USB_OTG_CLOCK(); ++ HAL_PMU_ENABLE_USB_HOST_CLOCK(); ++ ++ Hal_Pmu_Software_Reset(PMU_USB_OTG_SOFTWARE_RESET_BIT_INDEX); ++ Hal_Pmu_Software_Reset(PMU_USB_HOST_SOFTWARE_RESET_BIT_INDEX); ++*/ ++ ++ /* ++ * Initialize driver data to point to the global DWC_otg ++ * Device structure. ++ */ ++ lm_set_drvdata(lmdev, dwc_otg_device); ++ dev_dbg(&lmdev->dev, "dwc_otg_device=0x%p\n", dwc_otg_device); ++ ++ dwc_otg_device->core_if = dwc_otg_cil_init(dwc_otg_device->base, ++ &dwc_otg_module_params); ++ ++ dwc_otg_device->core_if->snpsid = snpsid; ++ ++ if (!dwc_otg_device->core_if) { ++ dev_err(&lmdev->dev, "CIL initialization failed!\n"); ++ retval = -ENOMEM; ++ goto fail; ++ } ++ ++ /* ++ * Validate parameter values. ++ */ ++ if (check_parameters(dwc_otg_device->core_if)) { ++ retval = -EINVAL; ++ goto fail; ++ } ++ ++ /* ++ * Create Device Attributes in sysfs ++ */ ++ dwc_otg_attr_create(lmdev); ++ ++ /* ++ * Disable the global interrupt until all the interrupt ++ * handlers are installed. ++ */ ++ dwc_otg_disable_global_interrupts(dwc_otg_device->core_if); ++ ++ /* ++ * Install the interrupt handler for the common interrupts before ++ * enabling common interrupts in core_init below. ++ */ ++ DWC_DEBUGPL(DBG_CIL, "registering (common) handler for irq%d\n", ++ lmdev->irq); ++ retval = request_irq(lmdev->irq, dwc_otg_common_irq, ++ IRQF_SHARED, "dwc_otg", dwc_otg_device); ++ if (retval) { ++ DWC_ERROR("request of irq%d failed\n", lmdev->irq); ++ retval = -EBUSY; ++ goto fail; ++ } else { ++ dwc_otg_device->common_irq_installed = 1; ++ } ++ ++ /* ++ * Initialize the DWC_otg core. ++ */ ++ dwc_otg_core_init(dwc_otg_device->core_if); ++ ++#ifndef DWC_HOST_ONLY ++ /* ++ * Initialize the PCD ++ */ ++ retval = dwc_otg_pcd_init(lmdev); ++ if (retval != 0) { ++ DWC_ERROR("dwc_otg_pcd_init failed\n"); ++ dwc_otg_device->pcd = NULL; ++ goto fail; ++ } ++#endif ++#ifndef DWC_DEVICE_ONLY ++ /* ++ * Initialize the HCD ++ */ ++ retval = dwc_otg_hcd_init(lmdev); ++ if (retval != 0) { ++ DWC_ERROR("dwc_otg_hcd_init failed\n"); ++ dwc_otg_device->hcd = NULL; ++ goto fail; ++ } ++#endif ++ ++ /* ++ * Enable the global interrupt after all the interrupt ++ * handlers are installed. ++ */ ++ dwc_otg_enable_global_interrupts(dwc_otg_device->core_if); ++ ++ return 0; ++ ++ fail: ++ dwc_otg_driver_remove(lmdev); ++ return retval; ++} ++ ++/** ++ * This structure defines the methods to be called by a bus driver ++ * during the lifecycle of a device on that bus. Both drivers and ++ * devices are registered with a bus driver. The bus driver matches ++ * devices to drivers based on information in the device and driver ++ * structures. ++ * ++ * The probe function is called when the bus driver matches a device ++ * to this driver. The remove function is called when a device is ++ * unregistered with the bus driver. ++ */ ++static struct lm_driver dwc_otg_driver = { ++ .drv = { ++ .name = (char *)dwc_driver_name, ++ }, ++ .probe = dwc_otg_driver_probe, ++ .remove = dwc_otg_driver_remove, ++}; ++ ++/** ++ * This function is called when the dwc_otg_driver is installed with the ++ * insmod command. It registers the dwc_otg_driver structure with the ++ * appropriate bus driver. This will cause the dwc_otg_driver_probe function ++ * to be called. In addition, the bus driver will automatically expose ++ * attributes defined for the device and driver in the special sysfs file ++ * system. ++ * ++ * @return ++ */ ++static int __init dwc_otg_driver_init(void) ++{ ++ int retval = 0; ++ int error; ++ printk(KERN_INFO "%s: version %s\n", dwc_driver_name, DWC_DRIVER_VERSION); ++ ++ retval = lm_driver_register(&dwc_otg_driver); ++ if (retval < 0) { ++ printk(KERN_ERR "%s retval=%d\n", __func__, retval); ++ return retval; ++ } ++ error = driver_create_file(&dwc_otg_driver.drv, &driver_attr_version); ++ error = driver_create_file(&dwc_otg_driver.drv, &driver_attr_debuglevel); ++ ++ return retval; ++} ++module_init(dwc_otg_driver_init); ++ ++/** ++ * This function is called when the driver is removed from the kernel ++ * with the rmmod command. The driver unregisters itself with its bus ++ * driver. ++ * ++ */ ++static void __exit dwc_otg_driver_cleanup(void) ++{ ++ printk(KERN_DEBUG "dwc_otg_driver_cleanup()\n"); ++ ++ driver_remove_file(&dwc_otg_driver.drv, &driver_attr_debuglevel); ++ driver_remove_file(&dwc_otg_driver.drv, &driver_attr_version); ++ ++ lm_driver_unregister(&dwc_otg_driver); ++ ++ printk(KERN_INFO "%s module removed\n", dwc_driver_name); ++} ++module_exit(dwc_otg_driver_cleanup); ++ ++MODULE_DESCRIPTION(DWC_DRIVER_DESC); ++MODULE_AUTHOR("Synopsys Inc."); ++MODULE_LICENSE("GPL"); ++ ++module_param_named(otg_cap, dwc_otg_module_params.otg_cap, int, 0444); ++MODULE_PARM_DESC(otg_cap, "OTG Capabilities 0=HNP&SRP 1=SRP Only 2=None"); ++module_param_named(opt, dwc_otg_module_params.opt, int, 0444); ++MODULE_PARM_DESC(opt, "OPT Mode"); ++module_param_named(dma_enable, dwc_otg_module_params.dma_enable, int, 0444); ++MODULE_PARM_DESC(dma_enable, "DMA Mode 0=Slave 1=DMA enabled"); ++ ++module_param_named(dma_desc_enable, dwc_otg_module_params.dma_desc_enable, int, 0444); ++MODULE_PARM_DESC(dma_desc_enable, "DMA Desc Mode 0=Address DMA 1=DMA Descriptor enabled"); ++ ++module_param_named(dma_burst_size, dwc_otg_module_params.dma_burst_size, int, 0444); ++MODULE_PARM_DESC(dma_burst_size, "DMA Burst Size 1, 4, 8, 16, 32, 64, 128, 256"); ++module_param_named(speed, dwc_otg_module_params.speed, int, 0444); ++MODULE_PARM_DESC(speed, "Speed 0=High Speed 1=Full Speed"); ++module_param_named(host_support_fs_ls_low_power, dwc_otg_module_params.host_support_fs_ls_low_power, int, 0444); ++MODULE_PARM_DESC(host_support_fs_ls_low_power, "Support Low Power w/FS or LS 0=Support 1=Don't Support"); ++module_param_named(host_ls_low_power_phy_clk, dwc_otg_module_params.host_ls_low_power_phy_clk, int, 0444); ++MODULE_PARM_DESC(host_ls_low_power_phy_clk, "Low Speed Low Power Clock 0=48Mhz 1=6Mhz"); ++module_param_named(enable_dynamic_fifo, dwc_otg_module_params.enable_dynamic_fifo, int, 0444); ++MODULE_PARM_DESC(enable_dynamic_fifo, "0=cC Setting 1=Allow Dynamic Sizing"); ++module_param_named(data_fifo_size, dwc_otg_module_params.data_fifo_size, int, 0444); ++MODULE_PARM_DESC(data_fifo_size, "Total number of words in the data FIFO memory 32-32768"); ++module_param_named(dev_rx_fifo_size, dwc_otg_module_params.dev_rx_fifo_size, int, 0444); ++MODULE_PARM_DESC(dev_rx_fifo_size, "Number of words in the Rx FIFO 16-32768"); ++module_param_named(dev_nperio_tx_fifo_size, dwc_otg_module_params.dev_nperio_tx_fifo_size, int, 0444); ++MODULE_PARM_DESC(dev_nperio_tx_fifo_size, "Number of words in the non-periodic Tx FIFO 16-32768"); ++module_param_named(dev_perio_tx_fifo_size_1, dwc_otg_module_params.dev_perio_tx_fifo_size[0], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_1, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_2, dwc_otg_module_params.dev_perio_tx_fifo_size[1], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_2, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_3, dwc_otg_module_params.dev_perio_tx_fifo_size[2], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_3, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_4, dwc_otg_module_params.dev_perio_tx_fifo_size[3], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_4, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_5, dwc_otg_module_params.dev_perio_tx_fifo_size[4], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_5, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_6, dwc_otg_module_params.dev_perio_tx_fifo_size[5], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_6, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_7, dwc_otg_module_params.dev_perio_tx_fifo_size[6], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_7, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_8, dwc_otg_module_params.dev_perio_tx_fifo_size[7], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_8, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_9, dwc_otg_module_params.dev_perio_tx_fifo_size[8], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_9, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_10, dwc_otg_module_params.dev_perio_tx_fifo_size[9], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_10, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_11, dwc_otg_module_params.dev_perio_tx_fifo_size[10], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_11, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_12, dwc_otg_module_params.dev_perio_tx_fifo_size[11], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_12, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_13, dwc_otg_module_params.dev_perio_tx_fifo_size[12], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_13, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_14, dwc_otg_module_params.dev_perio_tx_fifo_size[13], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_14, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_15, dwc_otg_module_params.dev_perio_tx_fifo_size[14], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_15, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(host_rx_fifo_size, dwc_otg_module_params.host_rx_fifo_size, int, 0444); ++MODULE_PARM_DESC(host_rx_fifo_size, "Number of words in the Rx FIFO 16-32768"); ++module_param_named(host_nperio_tx_fifo_size, dwc_otg_module_params.host_nperio_tx_fifo_size, int, 0444); ++MODULE_PARM_DESC(host_nperio_tx_fifo_size, "Number of words in the non-periodic Tx FIFO 16-32768"); ++module_param_named(host_perio_tx_fifo_size, dwc_otg_module_params.host_perio_tx_fifo_size, int, 0444); ++MODULE_PARM_DESC(host_perio_tx_fifo_size, "Number of words in the host periodic Tx FIFO 16-32768"); ++module_param_named(max_transfer_size, dwc_otg_module_params.max_transfer_size, int, 0444); ++/** @todo Set the max to 512K, modify checks */ ++MODULE_PARM_DESC(max_transfer_size, "The maximum transfer size supported in bytes 2047-65535"); ++module_param_named(max_packet_count, dwc_otg_module_params.max_packet_count, int, 0444); ++MODULE_PARM_DESC(max_packet_count, "The maximum number of packets in a transfer 15-511"); ++module_param_named(host_channels, dwc_otg_module_params.host_channels, int, 0444); ++MODULE_PARM_DESC(host_channels, "The number of host channel registers to use 1-16"); ++module_param_named(dev_endpoints, dwc_otg_module_params.dev_endpoints, int, 0444); ++MODULE_PARM_DESC(dev_endpoints, "The number of endpoints in addition to EP0 available for device mode 1-15"); ++module_param_named(phy_type, dwc_otg_module_params.phy_type, int, 0444); ++MODULE_PARM_DESC(phy_type, "0=Reserved 1=UTMI+ 2=ULPI"); ++module_param_named(phy_utmi_width, dwc_otg_module_params.phy_utmi_width, int, 0444); ++MODULE_PARM_DESC(phy_utmi_width, "Specifies the UTMI+ Data Width 8 or 16 bits"); ++module_param_named(phy_ulpi_ddr, dwc_otg_module_params.phy_ulpi_ddr, int, 0444); ++MODULE_PARM_DESC(phy_ulpi_ddr, "ULPI at double or single data rate 0=Single 1=Double"); ++module_param_named(phy_ulpi_ext_vbus, dwc_otg_module_params.phy_ulpi_ext_vbus, int, 0444); ++MODULE_PARM_DESC(phy_ulpi_ext_vbus, "ULPI PHY using internal or external vbus 0=Internal"); ++module_param_named(i2c_enable, dwc_otg_module_params.i2c_enable, int, 0444); ++MODULE_PARM_DESC(i2c_enable, "FS PHY Interface"); ++module_param_named(ulpi_fs_ls, dwc_otg_module_params.ulpi_fs_ls, int, 0444); ++MODULE_PARM_DESC(ulpi_fs_ls, "ULPI PHY FS/LS mode only"); ++module_param_named(ts_dline, dwc_otg_module_params.ts_dline, int, 0444); ++MODULE_PARM_DESC(ts_dline, "Term select Dline pulsing for all PHYs"); ++module_param_named(debug, g_dbg_lvl, int, 0444); ++MODULE_PARM_DESC(debug, ""); ++ ++module_param_named(en_multiple_tx_fifo, dwc_otg_module_params.en_multiple_tx_fifo, int, 0444); ++MODULE_PARM_DESC(en_multiple_tx_fifo, "Dedicated Non Periodic Tx FIFOs 0=disabled 1=enabled"); ++module_param_named(dev_tx_fifo_size_1, dwc_otg_module_params.dev_tx_fifo_size[0], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_1, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_2, dwc_otg_module_params.dev_tx_fifo_size[1], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_2, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_3, dwc_otg_module_params.dev_tx_fifo_size[2], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_3, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_4, dwc_otg_module_params.dev_tx_fifo_size[3], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_4, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_5, dwc_otg_module_params.dev_tx_fifo_size[4], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_5, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_6, dwc_otg_module_params.dev_tx_fifo_size[5], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_6, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_7, dwc_otg_module_params.dev_tx_fifo_size[6], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_7, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_8, dwc_otg_module_params.dev_tx_fifo_size[7], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_8, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_9, dwc_otg_module_params.dev_tx_fifo_size[8], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_9, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_10, dwc_otg_module_params.dev_tx_fifo_size[9], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_10, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_11, dwc_otg_module_params.dev_tx_fifo_size[10], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_11, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_12, dwc_otg_module_params.dev_tx_fifo_size[11], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_12, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_13, dwc_otg_module_params.dev_tx_fifo_size[12], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_13, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_14, dwc_otg_module_params.dev_tx_fifo_size[13], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_14, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_15, dwc_otg_module_params.dev_tx_fifo_size[14], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_15, "Number of words in the Tx FIFO 4-768"); ++ ++module_param_named(thr_ctl, dwc_otg_module_params.thr_ctl, int, 0444); ++MODULE_PARM_DESC(thr_ctl, "Thresholding enable flag bit 0 - non ISO Tx thr., 1 - ISO Tx thr., 2 - Rx thr.- bit 0=disabled 1=enabled"); ++module_param_named(tx_thr_length, dwc_otg_module_params.tx_thr_length, int, 0444); ++MODULE_PARM_DESC(tx_thr_length, "Tx Threshold length in 32 bit DWORDs"); ++module_param_named(rx_thr_length, dwc_otg_module_params.rx_thr_length, int, 0444); ++MODULE_PARM_DESC(rx_thr_length, "Rx Threshold length in 32 bit DWORDs"); ++ ++module_param_named(pti_enable, dwc_otg_module_params.pti_enable, int, 0444); ++MODULE_PARM_DESC(pti_enable, "Per Transfer Interrupt mode 0=disabled 1=enabled"); ++ ++module_param_named(mpi_enable, dwc_otg_module_params.mpi_enable, int, 0444); ++MODULE_PARM_DESC(mpi_enable, "Multiprocessor Interrupt mode 0=disabled 1=enabled"); ++ ++/** @page "Module Parameters" ++ * ++ * The following parameters may be specified when starting the module. ++ * These parameters define how the DWC_otg controller should be ++ * configured. Parameter values are passed to the CIL initialization ++ * function dwc_otg_cil_init ++ * ++ * Example: modprobe dwc_otg speed=1 otg_cap=1 ++ * ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++*/ +--- /dev/null ++++ b/drivers/usb/host/otg/dwc_otg_driver.h +@@ -0,0 +1,73 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_driver.h $ ++ * $Revision: #12 $ ++ * $Date: 2008/07/15 $ ++ * $Change: 1064918 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++#ifndef __DWC_OTG_DRIVER_H__ ++#define __DWC_OTG_DRIVER_H__ ++ ++/** @file ++ * This file contains the interface to the Linux driver. ++ */ ++#include "dwc_otg_cil.h" ++ ++/* Type declarations */ ++struct dwc_otg_pcd; ++struct dwc_otg_hcd; ++ ++/** ++ * This structure is a wrapper that encapsulates the driver components used to ++ * manage a single DWC_otg controller. ++ */ ++typedef struct dwc_otg_device { ++ /** Base address returned from ioremap() */ ++ void *base; ++ ++ struct lm_device *lmdev; ++ ++ /** Pointer to the core interface structure. */ ++ dwc_otg_core_if_t *core_if; ++ ++ /** Register offset for Diagnostic API. */ ++ uint32_t reg_offset; ++ ++ /** Pointer to the PCD structure. */ ++ struct dwc_otg_pcd *pcd; ++ ++ /** Pointer to the HCD structure. */ ++ struct dwc_otg_hcd *hcd; ++ ++ /** Flag to indicate whether the common IRQ handler is installed. */ ++ uint8_t common_irq_installed; ++ ++} dwc_otg_device_t; ++ ++#endif +--- /dev/null ++++ b/drivers/usb/host/otg/dwc_otg_hcd.c +@@ -0,0 +1,2919 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd.c $ ++ * $Revision: #75 $ ++ * $Date: 2008/07/15 $ ++ * $Change: 1064940 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++#ifndef DWC_DEVICE_ONLY ++ ++/** ++ * @file ++ * ++ * This file contains the implementation of the HCD. In Linux, the HCD ++ * implements the hc_driver API. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include "dwc_otg_driver.h" ++#include "dwc_otg_hcd.h" ++#include "dwc_otg_regs.h" ++ ++static const char dwc_otg_hcd_name[] = "dwc_otg_hcd"; ++ ++static const struct hc_driver dwc_otg_hc_driver = { ++ ++ .description = dwc_otg_hcd_name, ++ .product_desc = "DWC OTG Controller", ++ .hcd_priv_size = sizeof(dwc_otg_hcd_t), ++ ++ .irq = dwc_otg_hcd_irq, ++ ++ .flags = HCD_MEMORY | HCD_USB2, ++ ++ //.reset = ++ .start = dwc_otg_hcd_start, ++ //.suspend = ++ //.resume = ++ .stop = dwc_otg_hcd_stop, ++ ++ .urb_enqueue = dwc_otg_hcd_urb_enqueue, ++ .urb_dequeue = dwc_otg_hcd_urb_dequeue, ++ .endpoint_disable = dwc_otg_hcd_endpoint_disable, ++ ++ .get_frame_number = dwc_otg_hcd_get_frame_number, ++ ++ .hub_status_data = dwc_otg_hcd_hub_status_data, ++ .hub_control = dwc_otg_hcd_hub_control, ++ //.hub_suspend = ++ //.hub_resume = ++}; ++ ++/** ++ * Work queue function for starting the HCD when A-Cable is connected. ++ * The dwc_otg_hcd_start() must be called in a process context. ++ */ ++static void hcd_start_func( ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ void *_vp ++#else ++ struct work_struct *_work ++#endif ++ ) ++{ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ struct usb_hcd *usb_hcd = (struct usb_hcd *)_vp; ++#else ++ struct delayed_work *dw = container_of(_work, struct delayed_work, work); ++ struct dwc_otg_hcd *otg_hcd = container_of(dw, struct dwc_otg_hcd, start_work); ++ struct usb_hcd *usb_hcd = container_of((void *)otg_hcd, struct usb_hcd, hcd_priv); ++#endif ++ DWC_DEBUGPL(DBG_HCDV, "%s() %p\n", __func__, usb_hcd); ++ if (usb_hcd) { ++ dwc_otg_hcd_start(usb_hcd); ++ } ++} ++ ++/** ++ * HCD Callback function for starting the HCD when A-Cable is ++ * connected. ++ * ++ * @param p void pointer to the struct usb_hcd ++ */ ++static int32_t dwc_otg_hcd_start_cb(void *p) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(p); ++ dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if; ++ hprt0_data_t hprt0; ++ ++ if (core_if->op_state == B_HOST) { ++ /* ++ * Reset the port. During a HNP mode switch the reset ++ * needs to occur within 1ms and have a duration of at ++ * least 50ms. ++ */ ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ hprt0.b.prtrst = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ ((struct usb_hcd *)p)->self.is_b_host = 1; ++ } else { ++ ((struct usb_hcd *)p)->self.is_b_host = 0; ++ } ++ ++ /* Need to start the HCD in a non-interrupt context. */ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ INIT_WORK(&dwc_otg_hcd->start_work, hcd_start_func, p); ++// INIT_DELAYED_WORK(&dwc_otg_hcd->start_work, hcd_start_func, p); ++#else ++// INIT_WORK(&dwc_otg_hcd->start_work, hcd_start_func); ++ INIT_DELAYED_WORK(&dwc_otg_hcd->start_work, hcd_start_func); ++#endif ++// schedule_work(&dwc_otg_hcd->start_work); ++ queue_delayed_work(core_if->wq_otg, &dwc_otg_hcd->start_work, 50 * HZ / 1000); ++ ++ return 1; ++} ++ ++/** ++ * HCD Callback function for stopping the HCD. ++ * ++ * @param p void pointer to the struct usb_hcd ++ */ ++static int32_t dwc_otg_hcd_stop_cb(void *p) ++{ ++ struct usb_hcd *usb_hcd = (struct usb_hcd *)p; ++ DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, p); ++ dwc_otg_hcd_stop(usb_hcd); ++ return 1; ++} ++ ++static void del_xfer_timers(dwc_otg_hcd_t *hcd) ++{ ++#ifdef DEBUG ++ int i; ++ int num_channels = hcd->core_if->core_params->host_channels; ++ for (i = 0; i < num_channels; i++) { ++ del_timer(&hcd->core_if->hc_xfer_timer[i]); ++ } ++#endif ++} ++ ++static void del_timers(dwc_otg_hcd_t *hcd) ++{ ++ del_xfer_timers(hcd); ++ del_timer(&hcd->conn_timer); ++} ++ ++/** ++ * Processes all the URBs in a single list of QHs. Completes them with ++ * -ETIMEDOUT and frees the QTD. ++ */ ++static void kill_urbs_in_qh_list(dwc_otg_hcd_t *hcd, struct list_head *qh_list) ++{ ++ struct list_head *qh_item; ++ dwc_otg_qh_t *qh; ++ struct list_head *qtd_item; ++ dwc_otg_qtd_t *qtd; ++ ++ list_for_each(qh_item, qh_list) { ++ qh = list_entry(qh_item, dwc_otg_qh_t, qh_list_entry); ++ for (qtd_item = qh->qtd_list.next; ++ qtd_item != &qh->qtd_list; ++ qtd_item = qh->qtd_list.next) { ++ qtd = list_entry(qtd_item, dwc_otg_qtd_t, qtd_list_entry); ++ if (qtd->urb != NULL) { ++ dwc_otg_hcd_complete_urb(hcd, qtd->urb, ++ -ETIMEDOUT); ++ } ++ dwc_otg_hcd_qtd_remove_and_free(hcd, qtd); ++ } ++ } ++} ++ ++/** ++ * Responds with an error status of ETIMEDOUT to all URBs in the non-periodic ++ * and periodic schedules. The QTD associated with each URB is removed from ++ * the schedule and freed. This function may be called when a disconnect is ++ * detected or when the HCD is being stopped. ++ */ ++static void kill_all_urbs(dwc_otg_hcd_t *hcd) ++{ ++ kill_urbs_in_qh_list(hcd, &hcd->non_periodic_sched_inactive); ++ kill_urbs_in_qh_list(hcd, &hcd->non_periodic_sched_active); ++ kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_inactive); ++ kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_ready); ++ kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_assigned); ++ kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_queued); ++} ++ ++/** ++ * HCD Callback function for disconnect of the HCD. ++ * ++ * @param p void pointer to the struct usb_hcd ++ */ ++static int32_t dwc_otg_hcd_disconnect_cb(void *p) ++{ ++ gintsts_data_t intr; ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(p); ++ ++ //DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, p); ++ ++ /* ++ * Set status flags for the hub driver. ++ */ ++ dwc_otg_hcd->flags.b.port_connect_status_change = 1; ++ dwc_otg_hcd->flags.b.port_connect_status = 0; ++ ++ /* ++ * Shutdown any transfers in process by clearing the Tx FIFO Empty ++ * interrupt mask and status bits and disabling subsequent host ++ * channel interrupts. ++ */ ++ intr.d32 = 0; ++ intr.b.nptxfempty = 1; ++ intr.b.ptxfempty = 1; ++ intr.b.hcintr = 1; ++ dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_regs->gintmsk, intr.d32, 0); ++ dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_regs->gintsts, intr.d32, 0); ++ ++ del_timers(dwc_otg_hcd); ++ ++ /* ++ * Turn off the vbus power only if the core has transitioned to device ++ * mode. If still in host mode, need to keep power on to detect a ++ * reconnection. ++ */ ++ if (dwc_otg_is_device_mode(dwc_otg_hcd->core_if)) { ++ if (dwc_otg_hcd->core_if->op_state != A_SUSPEND) { ++ hprt0_data_t hprt0 = { .d32=0 }; ++ DWC_PRINT("Disconnect: PortPower off\n"); ++ hprt0.b.prtpwr = 0; ++ dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0.d32); ++ } ++ ++ dwc_otg_disable_host_interrupts(dwc_otg_hcd->core_if); ++ } ++ ++ /* Respond with an error status to all URBs in the schedule. */ ++ kill_all_urbs(dwc_otg_hcd); ++ ++ if (dwc_otg_is_host_mode(dwc_otg_hcd->core_if)) { ++ /* Clean up any host channels that were in use. */ ++ int num_channels; ++ int i; ++ dwc_hc_t *channel; ++ dwc_otg_hc_regs_t *hc_regs; ++ hcchar_data_t hcchar; ++ ++ num_channels = dwc_otg_hcd->core_if->core_params->host_channels; ++ ++ if (!dwc_otg_hcd->core_if->dma_enable) { ++ /* Flush out any channel requests in slave mode. */ ++ for (i = 0; i < num_channels; i++) { ++ channel = dwc_otg_hcd->hc_ptr_array[i]; ++ if (list_empty(&channel->hc_list_entry)) { ++ hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[i]; ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ if (hcchar.b.chen) { ++ hcchar.b.chen = 0; ++ hcchar.b.chdis = 1; ++ hcchar.b.epdir = 0; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ } ++ } ++ } ++ } ++ ++ for (i = 0; i < num_channels; i++) { ++ channel = dwc_otg_hcd->hc_ptr_array[i]; ++ if (list_empty(&channel->hc_list_entry)) { ++ hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[i]; ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ if (hcchar.b.chen) { ++ /* Halt the channel. */ ++ hcchar.b.chdis = 1; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ } ++ ++ dwc_otg_hc_cleanup(dwc_otg_hcd->core_if, channel); ++ list_add_tail(&channel->hc_list_entry, ++ &dwc_otg_hcd->free_hc_list); ++ } ++ } ++ } ++ ++ /* A disconnect will end the session so the B-Device is no ++ * longer a B-host. */ ++ ((struct usb_hcd *)p)->self.is_b_host = 0; ++ return 1; ++} ++ ++/** ++ * Connection timeout function. An OTG host is required to display a ++ * message if the device does not connect within 10 seconds. ++ */ ++void dwc_otg_hcd_connect_timeout(unsigned long ptr) ++{ ++ DWC_DEBUGPL(DBG_HCDV, "%s(%x)\n", __func__, (int)ptr); ++ DWC_PRINT("Connect Timeout\n"); ++ DWC_ERROR("Device Not Connected/Responding\n"); ++} ++ ++/** ++ * Start the connection timer. An OTG host is required to display a ++ * message if the device does not connect within 10 seconds. The ++ * timer is deleted if a port connect interrupt occurs before the ++ * timer expires. ++ */ ++static void dwc_otg_hcd_start_connect_timer(dwc_otg_hcd_t *hcd) ++{ ++ init_timer(&hcd->conn_timer); ++ hcd->conn_timer.function = dwc_otg_hcd_connect_timeout; ++ hcd->conn_timer.data = 0; ++ hcd->conn_timer.expires = jiffies + (HZ * 10); ++ add_timer(&hcd->conn_timer); ++} ++ ++/** ++ * HCD Callback function for disconnect of the HCD. ++ * ++ * @param p void pointer to the struct usb_hcd ++ */ ++static int32_t dwc_otg_hcd_session_start_cb(void *p) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(p); ++ DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, p); ++ dwc_otg_hcd_start_connect_timer(dwc_otg_hcd); ++ return 1; ++} ++ ++/** ++ * HCD Callback structure for handling mode switching. ++ */ ++static dwc_otg_cil_callbacks_t hcd_cil_callbacks = { ++ .start = dwc_otg_hcd_start_cb, ++ .stop = dwc_otg_hcd_stop_cb, ++ .disconnect = dwc_otg_hcd_disconnect_cb, ++ .session_start = dwc_otg_hcd_session_start_cb, ++ .p = 0, ++}; ++ ++/** ++ * Reset tasklet function ++ */ ++static void reset_tasklet_func(unsigned long data) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = (dwc_otg_hcd_t *)data; ++ dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if; ++ hprt0_data_t hprt0; ++ ++ DWC_DEBUGPL(DBG_HCDV, "USB RESET tasklet called\n"); ++ ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ hprt0.b.prtrst = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ mdelay(60); ++ ++ hprt0.b.prtrst = 0; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ dwc_otg_hcd->flags.b.port_reset_change = 1; ++} ++ ++static struct tasklet_struct reset_tasklet = { ++ .next = NULL, ++ .state = 0, ++ .count = ATOMIC_INIT(0), ++ .func = reset_tasklet_func, ++ .data = 0, ++}; ++ ++/** ++ * Initializes the HCD. This function allocates memory for and initializes the ++ * static parts of the usb_hcd and dwc_otg_hcd structures. It also registers the ++ * USB bus with the core and calls the hc_driver->start() function. It returns ++ * a negative error on failure. ++ */ ++int dwc_otg_hcd_init(struct lm_device *lmdev) ++{ ++ struct usb_hcd *hcd = NULL; ++ dwc_otg_hcd_t *dwc_otg_hcd = NULL; ++ dwc_otg_device_t *otg_dev = lm_get_drvdata(lmdev); ++ ++ int num_channels; ++ int i; ++ dwc_hc_t *channel; ++ ++ int retval = 0; ++ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD INIT\n"); ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ /* 2.6.20+ requires dev.dma_mask to be set prior to calling usb_create_hcd() */ ++ ++ /* Set device flags indicating whether the HCD supports DMA. */ ++ if (otg_dev->core_if->dma_enable) { ++ DWC_PRINT("Using DMA mode\n"); ++#if 0 ++//090707: setting dma_mask would cause kernel to fetch 0xffffffff, result in crash, at scsi_calculate_bounce_limit ++ lmdev->dev.dma_mask = (void *)~0; ++ lmdev->dev.coherent_dma_mask = ~0; ++#endif ++ ++ if (otg_dev->core_if->dma_desc_enable) { ++ DWC_PRINT("Device using Descriptor DMA mode\n"); ++ } else { ++ DWC_PRINT("Device using Buffer DMA mode\n"); ++ } ++ } else { ++ DWC_PRINT("Using Slave mode\n"); ++ lmdev->dev.dma_mask = (void *)0; ++ lmdev->dev.coherent_dma_mask = 0; ++ } ++#endif ++ /* ++ * Allocate memory for the base HCD plus the DWC OTG HCD. ++ * Initialize the base HCD. ++ */ ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) ++ hcd = usb_create_hcd(&dwc_otg_hc_driver, &lmdev->dev, lmdev->dev.bus_id); ++#else ++ hcd = usb_create_hcd(&dwc_otg_hc_driver, &lmdev->dev, "gadget"); ++#endif ++ if (!hcd) { ++ retval = -ENOMEM; ++ goto error1; ++ } ++ ++ hcd->regs = otg_dev->base; ++ hcd->self.otg_port = 1; ++ ++ /* Initialize the DWC OTG HCD. */ ++ dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); ++ dwc_otg_hcd->core_if = otg_dev->core_if; ++ otg_dev->hcd = dwc_otg_hcd; ++ ++ /* */ ++ spin_lock_init(&dwc_otg_hcd->lock); ++ ++ /* Register the HCD CIL Callbacks */ ++ dwc_otg_cil_register_hcd_callbacks(otg_dev->core_if, ++ &hcd_cil_callbacks, hcd); ++ ++ /* Initialize the non-periodic schedule. */ ++ INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_inactive); ++ INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_active); ++ ++ /* Initialize the periodic schedule. */ ++ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_inactive); ++ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_ready); ++ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_assigned); ++ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_queued); ++ ++ /* ++ * Create a host channel descriptor for each host channel implemented ++ * in the controller. Initialize the channel descriptor array. ++ */ ++ INIT_LIST_HEAD(&dwc_otg_hcd->free_hc_list); ++ num_channels = dwc_otg_hcd->core_if->core_params->host_channels; ++ memset(dwc_otg_hcd->hc_ptr_array, 0, sizeof(dwc_otg_hcd->hc_ptr_array)); ++ for (i = 0; i < num_channels; i++) { ++ channel = kmalloc(sizeof(dwc_hc_t), GFP_KERNEL); ++ if (channel == NULL) { ++ retval = -ENOMEM; ++ DWC_ERROR("%s: host channel allocation failed\n", __func__); ++ goto error2; ++ } ++ memset(channel, 0, sizeof(dwc_hc_t)); ++ channel->hc_num = i; ++ dwc_otg_hcd->hc_ptr_array[i] = channel; ++#ifdef DEBUG ++ init_timer(&dwc_otg_hcd->core_if->hc_xfer_timer[i]); ++#endif ++ DWC_DEBUGPL(DBG_HCDV, "HCD Added channel #%d, hc=%p\n", i, channel); ++ } ++ ++ /* Initialize the Connection timeout timer. */ ++ init_timer(&dwc_otg_hcd->conn_timer); ++ ++ /* Initialize reset tasklet. */ ++ reset_tasklet.data = (unsigned long) dwc_otg_hcd; ++ dwc_otg_hcd->reset_tasklet = &reset_tasklet; ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ /* Set device flags indicating whether the HCD supports DMA. */ ++ if (otg_dev->core_if->dma_enable) { ++ DWC_PRINT("Using DMA mode\n"); ++ lmdev->dev.dma_mask = (void *)~0; ++ lmdev->dev.coherent_dma_mask = ~0; ++ ++ if (otg_dev->core_if->dma_desc_enable){ ++ DWC_PRINT("Device using Descriptor DMA mode\n"); ++ } else { ++ DWC_PRINT("Device using Buffer DMA mode\n"); ++ } ++ } else { ++ DWC_PRINT("Using Slave mode\n"); ++ lmdev->dev.dma_mask = (void *)0; ++ lmdev->dev.coherent_dma_mask = 0; ++ } ++#endif ++ /* ++ * Finish generic HCD initialization and start the HCD. This function ++ * allocates the DMA buffer pool, registers the USB bus, requests the ++ * IRQ line, and calls dwc_otg_hcd_start method. ++ */ ++ retval = usb_add_hcd(hcd, lmdev->irq, IRQF_SHARED); ++ if (retval < 0) { ++ goto error2; ++ } ++ ++ /* ++ * Allocate space for storing data on status transactions. Normally no ++ * data is sent, but this space acts as a bit bucket. This must be ++ * done after usb_add_hcd since that function allocates the DMA buffer ++ * pool. ++ */ ++ if (otg_dev->core_if->dma_enable) { ++ dwc_otg_hcd->status_buf = ++ dma_alloc_coherent(&lmdev->dev, ++ DWC_OTG_HCD_STATUS_BUF_SIZE, ++ &dwc_otg_hcd->status_buf_dma, ++ GFP_KERNEL | GFP_DMA); ++ } else { ++ dwc_otg_hcd->status_buf = kmalloc(DWC_OTG_HCD_STATUS_BUF_SIZE, ++ GFP_KERNEL); ++ } ++ if (!dwc_otg_hcd->status_buf) { ++ retval = -ENOMEM; ++ DWC_ERROR("%s: status_buf allocation failed\n", __func__); ++ goto error3; ++ } ++ ++ dwc_otg_hcd->otg_dev = otg_dev; ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Initialized HCD, bus=%s, usbbus=%d\n", ++ lmdev->dev.bus_id, hcd->self.busnum); ++#else ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Initialized HCD, usbbus=%d\n", ++ hcd->self.busnum); ++#endif ++ return 0; ++ ++ /* Error conditions */ ++ error3: ++ usb_remove_hcd(hcd); ++ error2: ++ dwc_otg_hcd_free(hcd); ++ usb_put_hcd(hcd); ++ error1: ++ return retval; ++} ++ ++/** ++ * Removes the HCD. ++ * Frees memory and resources associated with the HCD and deregisters the bus. ++ */ ++void dwc_otg_hcd_remove(struct lm_device *lmdev) ++{ ++ dwc_otg_device_t *otg_dev = lm_get_drvdata(lmdev); ++ dwc_otg_hcd_t *dwc_otg_hcd; ++ struct usb_hcd *hcd; ++ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD REMOVE\n"); ++ ++ if (!otg_dev) { ++ DWC_DEBUGPL(DBG_ANY, "%s: otg_dev NULL!\n", __func__); ++ return; ++ } ++ ++ dwc_otg_hcd = otg_dev->hcd; ++ ++ if (!dwc_otg_hcd) { ++ DWC_DEBUGPL(DBG_ANY, "%s: otg_dev->hcd NULL!\n", __func__); ++ return; ++ } ++ ++ hcd = dwc_otg_hcd_to_hcd(dwc_otg_hcd); ++ ++ if (!hcd) { ++ DWC_DEBUGPL(DBG_ANY, "%s: dwc_otg_hcd_to_hcd(dwc_otg_hcd) NULL!\n", __func__); ++ return; ++ } ++ ++ /* Turn off all interrupts */ ++ dwc_write_reg32(&dwc_otg_hcd->core_if->core_global_regs->gintmsk, 0); ++ dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_regs->gahbcfg, 1, 0); ++ ++ usb_remove_hcd(hcd); ++ dwc_otg_hcd_free(hcd); ++ usb_put_hcd(hcd); ++} ++ ++/* ========================================================================= ++ * Linux HC Driver Functions ++ * ========================================================================= */ ++ ++/** ++ * Initializes dynamic portions of the DWC_otg HCD state. ++ */ ++static void hcd_reinit(dwc_otg_hcd_t *hcd) ++{ ++ struct list_head *item; ++ int num_channels; ++ int i; ++ dwc_hc_t *channel; ++ ++ hcd->flags.d32 = 0; ++ ++ hcd->non_periodic_qh_ptr = &hcd->non_periodic_sched_active; ++ hcd->non_periodic_channels = 0; ++ hcd->periodic_channels = 0; ++ ++ /* ++ * Put all channels in the free channel list and clean up channel ++ * states. ++ */ ++ item = hcd->free_hc_list.next; ++ while (item != &hcd->free_hc_list) { ++ list_del(item); ++ item = hcd->free_hc_list.next; ++ } ++ num_channels = hcd->core_if->core_params->host_channels; ++ for (i = 0; i < num_channels; i++) { ++ channel = hcd->hc_ptr_array[i]; ++ list_add_tail(&channel->hc_list_entry, &hcd->free_hc_list); ++ dwc_otg_hc_cleanup(hcd->core_if, channel); ++ } ++ ++ /* Initialize the DWC core for host mode operation. */ ++ dwc_otg_core_host_init(hcd->core_if); ++} ++ ++/** Initializes the DWC_otg controller and its root hub and prepares it for host ++ * mode operation. Activates the root port. Returns 0 on success and a negative ++ * error code on failure. */ ++int dwc_otg_hcd_start(struct usb_hcd *hcd) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); ++ dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if; ++ struct usb_bus *bus; ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ struct usb_device *udev; ++ int retval; ++#endif ++ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD START\n"); ++ ++ bus = hcd_to_bus(hcd); ++ ++ /* Initialize the bus state. If the core is in Device Mode ++ * HALT the USB bus and return. */ ++ if (dwc_otg_is_device_mode(core_if)) { ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ hcd->state = HC_STATE_HALT; ++#else ++ hcd->state = HC_STATE_RUNNING; ++#endif ++ return 0; ++ } ++ hcd->state = HC_STATE_RUNNING; ++ ++ /* Initialize and connect root hub if one is not already attached */ ++ if (bus->root_hub) { ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Has Root Hub\n"); ++ /* Inform the HUB driver to resume. */ ++ usb_hcd_resume_root_hub(hcd); ++ } ++ else { ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Does Not Have Root Hub\n"); ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ udev = usb_alloc_dev(NULL, bus, 0); ++ udev->speed = USB_SPEED_HIGH; ++ if (!udev) { ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Error udev alloc\n"); ++ return -ENODEV; ++ } ++ if ((retval = usb_hcd_register_root_hub(udev, hcd)) != 0) { ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Error registering %d\n", retval); ++ return -ENODEV; ++ } ++#endif ++ } ++ ++ hcd_reinit(dwc_otg_hcd); ++ ++ return 0; ++} ++ ++static void qh_list_free(dwc_otg_hcd_t *hcd, struct list_head *qh_list) ++{ ++ struct list_head *item; ++ dwc_otg_qh_t *qh; ++ ++ if (!qh_list->next) { ++ /* The list hasn't been initialized yet. */ ++ return; ++ } ++ ++ /* Ensure there are no QTDs or URBs left. */ ++ kill_urbs_in_qh_list(hcd, qh_list); ++ ++ for (item = qh_list->next; item != qh_list; item = qh_list->next) { ++ qh = list_entry(item, dwc_otg_qh_t, qh_list_entry); ++ dwc_otg_hcd_qh_remove_and_free(hcd, qh); ++ } ++} ++ ++/** ++ * Halts the DWC_otg host mode operations in a clean manner. USB transfers are ++ * stopped. ++ */ ++void dwc_otg_hcd_stop(struct usb_hcd *hcd) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); ++ hprt0_data_t hprt0 = { .d32=0 }; ++ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD STOP\n"); ++ ++ /* Turn off all host-specific interrupts. */ ++ dwc_otg_disable_host_interrupts(dwc_otg_hcd->core_if); ++ ++ /* ++ * The root hub should be disconnected before this function is called. ++ * The disconnect will clear the QTD lists (via ..._hcd_urb_dequeue) ++ * and the QH lists (via ..._hcd_endpoint_disable). ++ */ ++ ++ /* Turn off the vbus power */ ++ DWC_PRINT("PortPower off\n"); ++ hprt0.b.prtpwr = 0; ++ dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0.d32); ++} ++ ++/** Returns the current frame number. */ ++int dwc_otg_hcd_get_frame_number(struct usb_hcd *hcd) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); ++ hfnum_data_t hfnum; ++ ++ hfnum.d32 = dwc_read_reg32(&dwc_otg_hcd->core_if-> ++ host_if->host_global_regs->hfnum); ++ ++#ifdef DEBUG_SOF ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD GET FRAME NUMBER %d\n", hfnum.b.frnum); ++#endif ++ return hfnum.b.frnum; ++} ++ ++/** ++ * Frees secondary storage associated with the dwc_otg_hcd structure contained ++ * in the struct usb_hcd field. ++ */ ++void dwc_otg_hcd_free(struct usb_hcd *hcd) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); ++ int i; ++ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD FREE\n"); ++ ++ del_timers(dwc_otg_hcd); ++ ++ /* Free memory for QH/QTD lists */ ++ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_sched_inactive); ++ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_sched_active); ++ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_inactive); ++ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_ready); ++ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_assigned); ++ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_queued); ++ ++ /* Free memory for the host channels. */ ++ for (i = 0; i < MAX_EPS_CHANNELS; i++) { ++ dwc_hc_t *hc = dwc_otg_hcd->hc_ptr_array[i]; ++ if (hc != NULL) { ++ DWC_DEBUGPL(DBG_HCDV, "HCD Free channel #%i, hc=%p\n", i, hc); ++ kfree(hc); ++ } ++ } ++ ++ if (dwc_otg_hcd->core_if->dma_enable) { ++ if (dwc_otg_hcd->status_buf_dma) { ++ dma_free_coherent(hcd->self.controller, ++ DWC_OTG_HCD_STATUS_BUF_SIZE, ++ dwc_otg_hcd->status_buf, ++ dwc_otg_hcd->status_buf_dma); ++ } ++ } else if (dwc_otg_hcd->status_buf != NULL) { ++ kfree(dwc_otg_hcd->status_buf); ++ } ++} ++ ++#ifdef DEBUG ++static void dump_urb_info(struct urb *urb, char* fn_name) ++{ ++ DWC_PRINT("%s, urb %p\n", fn_name, urb); ++ DWC_PRINT(" Device address: %d\n", usb_pipedevice(urb->pipe)); ++ DWC_PRINT(" Endpoint: %d, %s\n", usb_pipeendpoint(urb->pipe), ++ (usb_pipein(urb->pipe) ? "IN" : "OUT")); ++ DWC_PRINT(" Endpoint type: %s\n", ++ ({char *pipetype; ++ switch (usb_pipetype(urb->pipe)) { ++ case PIPE_CONTROL: pipetype = "CONTROL"; break; ++ case PIPE_BULK: pipetype = "BULK"; break; ++ case PIPE_INTERRUPT: pipetype = "INTERRUPT"; break; ++ case PIPE_ISOCHRONOUS: pipetype = "ISOCHRONOUS"; break; ++ default: pipetype = "UNKNOWN"; break; ++ }; pipetype;})); ++ DWC_PRINT(" Speed: %s\n", ++ ({char *speed; ++ switch (urb->dev->speed) { ++ case USB_SPEED_HIGH: speed = "HIGH"; break; ++ case USB_SPEED_FULL: speed = "FULL"; break; ++ case USB_SPEED_LOW: speed = "LOW"; break; ++ default: speed = "UNKNOWN"; break; ++ }; speed;})); ++ DWC_PRINT(" Max packet size: %d\n", ++ usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))); ++ DWC_PRINT(" Data buffer length: %d\n", urb->transfer_buffer_length); ++ DWC_PRINT(" Transfer buffer: %p, Transfer DMA: %p\n", ++ urb->transfer_buffer, (void *)urb->transfer_dma); ++ DWC_PRINT(" Setup buffer: %p, Setup DMA: %p\n", ++ urb->setup_packet, (void *)urb->setup_dma); ++ DWC_PRINT(" Interval: %d\n", urb->interval); ++ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { ++ int i; ++ for (i = 0; i < urb->number_of_packets; i++) { ++ DWC_PRINT(" ISO Desc %d:\n", i); ++ DWC_PRINT(" offset: %d, length %d\n", ++ urb->iso_frame_desc[i].offset, ++ urb->iso_frame_desc[i].length); ++ } ++ } ++} ++ ++static void dump_channel_info(dwc_otg_hcd_t *hcd, ++ dwc_otg_qh_t *qh) ++{ ++ if (qh->channel != NULL) { ++ dwc_hc_t *hc = qh->channel; ++ struct list_head *item; ++ dwc_otg_qh_t *qh_item; ++ int num_channels = hcd->core_if->core_params->host_channels; ++ int i; ++ ++ dwc_otg_hc_regs_t *hc_regs; ++ hcchar_data_t hcchar; ++ hcsplt_data_t hcsplt; ++ hctsiz_data_t hctsiz; ++ uint32_t hcdma; ++ ++ hc_regs = hcd->core_if->host_if->hc_regs[hc->hc_num]; ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcsplt.d32 = dwc_read_reg32(&hc_regs->hcsplt); ++ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); ++ hcdma = dwc_read_reg32(&hc_regs->hcdma); ++ ++ DWC_PRINT(" Assigned to channel %p:\n", hc); ++ DWC_PRINT(" hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.d32, hcsplt.d32); ++ DWC_PRINT(" hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d32, hcdma); ++ DWC_PRINT(" dev_addr: %d, ep_num: %d, ep_is_in: %d\n", ++ hc->dev_addr, hc->ep_num, hc->ep_is_in); ++ DWC_PRINT(" ep_type: %d\n", hc->ep_type); ++ DWC_PRINT(" max_packet: %d\n", hc->max_packet); ++ DWC_PRINT(" data_pid_start: %d\n", hc->data_pid_start); ++ DWC_PRINT(" xfer_started: %d\n", hc->xfer_started); ++ DWC_PRINT(" halt_status: %d\n", hc->halt_status); ++ DWC_PRINT(" xfer_buff: %p\n", hc->xfer_buff); ++ DWC_PRINT(" xfer_len: %d\n", hc->xfer_len); ++ DWC_PRINT(" qh: %p\n", hc->qh); ++ DWC_PRINT(" NP inactive sched:\n"); ++ list_for_each(item, &hcd->non_periodic_sched_inactive) { ++ qh_item = list_entry(item, dwc_otg_qh_t, qh_list_entry); ++ DWC_PRINT(" %p\n", qh_item); ++ } ++ DWC_PRINT(" NP active sched:\n"); ++ list_for_each(item, &hcd->non_periodic_sched_active) { ++ qh_item = list_entry(item, dwc_otg_qh_t, qh_list_entry); ++ DWC_PRINT(" %p\n", qh_item); ++ } ++ DWC_PRINT(" Channels: \n"); ++ for (i = 0; i < num_channels; i++) { ++ dwc_hc_t *hc = hcd->hc_ptr_array[i]; ++ DWC_PRINT(" %2d: %p\n", i, hc); ++ } ++ } ++} ++#endif ++ ++ ++//OTG host require the DMA addr is DWORD-aligned, ++//patch it if the buffer is not DWORD-aligned ++inline ++void hcd_check_and_patch_dma_addr(struct urb *urb){ ++ ++ if((!urb->transfer_buffer)||!urb->transfer_dma||urb->transfer_dma==0xffffffff) ++ return; ++ ++ if(((u32)urb->transfer_buffer)& 0x3){ ++ /* ++ printk("%s: " ++ "urb(%.8x) " ++ "transfer_buffer=%.8x, " ++ "transfer_dma=%.8x, " ++ "transfer_buffer_length=%d, " ++ "actual_length=%d(%x), " ++ "\n", ++ ((urb->transfer_flags & URB_DIR_MASK)==URB_DIR_OUT)?"OUT":"IN", ++ urb, ++ urb->transfer_buffer, ++ urb->transfer_dma, ++ urb->transfer_buffer_length, ++ urb->actual_length,urb->actual_length ++ ); ++ */ ++ if(!urb->aligned_transfer_buffer||urb->aligned_transfer_buffer_lengthtransfer_buffer_length){ ++ urb->aligned_transfer_buffer_length=urb->transfer_buffer_length; ++ if(urb->aligned_transfer_buffer) { ++ kfree(urb->aligned_transfer_buffer); ++ } ++ urb->aligned_transfer_buffer=kmalloc(urb->aligned_transfer_buffer_length,GFP_KERNEL|GFP_DMA|GFP_ATOMIC); ++ urb->aligned_transfer_dma=dma_map_single(NULL,(void *)(urb->aligned_transfer_buffer),(urb->aligned_transfer_buffer_length),DMA_FROM_DEVICE); ++ if(!urb->aligned_transfer_buffer){ ++ DWC_ERROR("Cannot alloc required buffer!!\n"); ++ BUG(); ++ } ++ //printk(" new allocated aligned_buf=%.8x aligned_buf_len=%d\n", (u32)urb->aligned_transfer_buffer, urb->aligned_transfer_buffer_length); ++ } ++ urb->transfer_dma=urb->aligned_transfer_dma; ++ if((urb->transfer_flags & URB_DIR_MASK)==URB_DIR_OUT) { ++ memcpy(urb->aligned_transfer_buffer,urb->transfer_buffer,urb->transfer_buffer_length); ++ dma_sync_single_for_device(NULL,urb->transfer_dma,urb->transfer_buffer_length,DMA_TO_DEVICE); ++ } ++ } ++} ++ ++ ++ ++/** Starts processing a USB transfer request specified by a USB Request Block ++ * (URB). mem_flags indicates the type of memory allocation to use while ++ * processing this URB. */ ++int dwc_otg_hcd_urb_enqueue(struct usb_hcd *hcd, ++// struct usb_host_endpoint *ep, ++ struct urb *urb, ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ int mem_flags ++#else ++ gfp_t mem_flags ++#endif ++ ) ++{ ++ int retval = 0; ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); ++ dwc_otg_qtd_t *qtd; ++ ++#ifdef DEBUG ++ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) { ++ dump_urb_info(urb, "dwc_otg_hcd_urb_enqueue"); ++ } ++#endif ++ if (!dwc_otg_hcd->flags.b.port_connect_status) { ++ /* No longer connected. */ ++ return -ENODEV; ++ } ++ ++ hcd_check_and_patch_dma_addr(urb); ++ qtd = dwc_otg_hcd_qtd_create(urb); ++ if (qtd == NULL) { ++ DWC_ERROR("DWC OTG HCD URB Enqueue failed creating QTD\n"); ++ return -ENOMEM; ++ } ++ ++ retval = dwc_otg_hcd_qtd_add(qtd, dwc_otg_hcd); ++ if (retval < 0) { ++ DWC_ERROR("DWC OTG HCD URB Enqueue failed adding QTD. " ++ "Error status %d\n", retval); ++ dwc_otg_hcd_qtd_free(qtd); ++ } ++ ++ return retval; ++} ++ ++/** Aborts/cancels a USB transfer request. Always returns 0 to indicate ++ * success. */ ++int dwc_otg_hcd_urb_dequeue(struct usb_hcd *hcd, ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ struct usb_host_endpoint *ep, ++#endif ++ struct urb *urb, int status) ++{ ++ unsigned long flags; ++ dwc_otg_hcd_t *dwc_otg_hcd; ++ dwc_otg_qtd_t *urb_qtd; ++ dwc_otg_qh_t *qh; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct usb_host_endpoint *ep = dwc_urb_to_endpoint(urb); ++#endif ++ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Dequeue\n"); ++ ++ dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); ++ ++ SPIN_LOCK_IRQSAVE(&dwc_otg_hcd->lock, flags); ++ ++ urb_qtd = (dwc_otg_qtd_t *)urb->hcpriv; ++ qh = (dwc_otg_qh_t *)ep->hcpriv; ++ ++#ifdef DEBUG ++ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) { ++ dump_urb_info(urb, "dwc_otg_hcd_urb_dequeue"); ++ if (urb_qtd == qh->qtd_in_process) { ++ dump_channel_info(dwc_otg_hcd, qh); ++ } ++ } ++#endif ++ ++ if (urb_qtd == qh->qtd_in_process) { ++ /* The QTD is in process (it has been assigned to a channel). */ ++ ++ if (dwc_otg_hcd->flags.b.port_connect_status) { ++ /* ++ * If still connected (i.e. in host mode), halt the ++ * channel so it can be used for other transfers. If ++ * no longer connected, the host registers can't be ++ * written to halt the channel since the core is in ++ * device mode. ++ */ ++ dwc_otg_hc_halt(dwc_otg_hcd->core_if, qh->channel, ++ DWC_OTG_HC_XFER_URB_DEQUEUE); ++ } ++ } ++ ++ /* ++ * Free the QTD and clean up the associated QH. Leave the QH in the ++ * schedule if it has any remaining QTDs. ++ */ ++ dwc_otg_hcd_qtd_remove_and_free(dwc_otg_hcd, urb_qtd); ++ if (urb_qtd == qh->qtd_in_process) { ++ dwc_otg_hcd_qh_deactivate(dwc_otg_hcd, qh, 0); ++ qh->channel = NULL; ++ qh->qtd_in_process = NULL; ++ } else if (list_empty(&qh->qtd_list)) { ++ dwc_otg_hcd_qh_remove(dwc_otg_hcd, qh); ++ } ++ ++ SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags); ++ ++ urb->hcpriv = NULL; ++ ++ /* Higher layer software sets URB status. */ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ usb_hcd_giveback_urb(hcd, urb, status); ++#else ++ usb_hcd_giveback_urb(hcd, urb, NULL); ++#endif ++ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) { ++ DWC_PRINT("Called usb_hcd_giveback_urb()\n"); ++ DWC_PRINT(" urb->status = %d\n", urb->status); ++ } ++ ++ return 0; ++} ++ ++/** Frees resources in the DWC_otg controller related to a given endpoint. Also ++ * clears state in the HCD related to the endpoint. Any URBs for the endpoint ++ * must already be dequeued. */ ++void dwc_otg_hcd_endpoint_disable(struct usb_hcd *hcd, ++ struct usb_host_endpoint *ep) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); ++ dwc_otg_qh_t *qh; ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ unsigned long flags; ++ int retry = 0; ++#endif ++ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD EP DISABLE: _bEndpointAddress=0x%02x, " ++ "endpoint=%d\n", ep->desc.bEndpointAddress, ++ dwc_ep_addr_to_endpoint(ep->desc.bEndpointAddress)); ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++rescan: ++ SPIN_LOCK_IRQSAVE(&dwc_otg_hcd->lock, flags); ++ qh = (dwc_otg_qh_t *)(ep->hcpriv); ++ if (!qh) ++ goto done; ++ ++ /** Check that the QTD list is really empty */ ++ if (!list_empty(&qh->qtd_list)) { ++ if (retry++ < 250) { ++ SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags); ++ schedule_timeout_uninterruptible(1); ++ goto rescan; ++ } ++ ++ DWC_WARN("DWC OTG HCD EP DISABLE:" ++ " QTD List for this endpoint is not empty\n"); ++ } ++ ++ dwc_otg_hcd_qh_remove_and_free(dwc_otg_hcd, qh); ++ ep->hcpriv = NULL; ++done: ++ SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags); ++ ++#else // LINUX_VERSION_CODE ++ ++ qh = (dwc_otg_qh_t *)(ep->hcpriv); ++ if (qh != NULL) { ++#ifdef DEBUG ++ /** Check that the QTD list is really empty */ ++ if (!list_empty(&qh->qtd_list)) { ++ DWC_WARN("DWC OTG HCD EP DISABLE:" ++ " QTD List for this endpoint is not empty\n"); ++ } ++#endif ++ dwc_otg_hcd_qh_remove_and_free(dwc_otg_hcd, qh); ++ ep->hcpriv = NULL; ++ } ++#endif // LINUX_VERSION_CODE ++} ++ ++/** Handles host mode interrupts for the DWC_otg controller. Returns IRQ_NONE if ++ * there was no interrupt to handle. Returns IRQ_HANDLED if there was a valid ++ * interrupt. ++ * ++ * This function is called by the USB core when an interrupt occurs */ ++irqreturn_t dwc_otg_hcd_irq(struct usb_hcd *hcd ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ++ , struct pt_regs *regs ++#endif ++ ) ++{ ++ int retVal = 0; ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); ++ retVal = dwc_otg_hcd_handle_intr(dwc_otg_hcd); ++ if (dwc_otg_hcd->flags.b.port_connect_status_change == 1) ++ usb_hcd_poll_rh_status(hcd); ++ return IRQ_RETVAL(retVal); ++} ++ ++/** Creates Status Change bitmap for the root hub and root port. The bitmap is ++ * returned in buf. Bit 0 is the status change indicator for the root hub. Bit 1 ++ * is the status change indicator for the single root port. Returns 1 if either ++ * change indicator is 1, otherwise returns 0. */ ++int dwc_otg_hcd_hub_status_data(struct usb_hcd *hcd, char *buf) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); ++ ++ buf[0] = 0; ++ buf[0] |= (dwc_otg_hcd->flags.b.port_connect_status_change || ++ dwc_otg_hcd->flags.b.port_reset_change || ++ dwc_otg_hcd->flags.b.port_enable_change || ++ dwc_otg_hcd->flags.b.port_suspend_change || ++ dwc_otg_hcd->flags.b.port_over_current_change) << 1; ++ ++#ifdef DEBUG ++ if (buf[0]) { ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB STATUS DATA:" ++ " Root port status changed\n"); ++ DWC_DEBUGPL(DBG_HCDV, " port_connect_status_change: %d\n", ++ dwc_otg_hcd->flags.b.port_connect_status_change); ++ DWC_DEBUGPL(DBG_HCDV, " port_reset_change: %d\n", ++ dwc_otg_hcd->flags.b.port_reset_change); ++ DWC_DEBUGPL(DBG_HCDV, " port_enable_change: %d\n", ++ dwc_otg_hcd->flags.b.port_enable_change); ++ DWC_DEBUGPL(DBG_HCDV, " port_suspend_change: %d\n", ++ dwc_otg_hcd->flags.b.port_suspend_change); ++ DWC_DEBUGPL(DBG_HCDV, " port_over_current_change: %d\n", ++ dwc_otg_hcd->flags.b.port_over_current_change); ++ } ++#endif ++ return (buf[0] != 0); ++} ++ ++#ifdef DWC_HS_ELECT_TST ++/* ++ * Quick and dirty hack to implement the HS Electrical Test ++ * SINGLE_STEP_GET_DEVICE_DESCRIPTOR feature. ++ * ++ * This code was copied from our userspace app "hset". It sends a ++ * Get Device Descriptor control sequence in two parts, first the ++ * Setup packet by itself, followed some time later by the In and ++ * Ack packets. Rather than trying to figure out how to add this ++ * functionality to the normal driver code, we just hijack the ++ * hardware, using these two function to drive the hardware ++ * directly. ++ */ ++ ++dwc_otg_core_global_regs_t *global_regs; ++dwc_otg_host_global_regs_t *hc_global_regs; ++dwc_otg_hc_regs_t *hc_regs; ++uint32_t *data_fifo; ++ ++static void do_setup(void) ++{ ++ gintsts_data_t gintsts; ++ hctsiz_data_t hctsiz; ++ hcchar_data_t hcchar; ++ haint_data_t haint; ++ hcint_data_t hcint; ++ ++ /* Enable HAINTs */ ++ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0001); ++ ++ /* Enable HCINTs */ ++ dwc_write_reg32(&hc_regs->hcintmsk, 0x04a3); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++ /* ++ * Send Setup packet (Get Device Descriptor) ++ */ ++ ++ /* Make sure channel is disabled */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ if (hcchar.b.chen) { ++ //fprintf(stderr, "Channel already enabled 1, HCCHAR = %08x\n", hcchar.d32); ++ hcchar.b.chdis = 1; ++// hcchar.b.chen = 1; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ //sleep(1); ++ mdelay(1000); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //if (hcchar.b.chen) { ++ // fprintf(stderr, "** Channel _still_ enabled 1, HCCHAR = %08x **\n", hcchar.d32); ++ //} ++ } ++ ++ /* Set HCTSIZ */ ++ hctsiz.d32 = 0; ++ hctsiz.b.xfersize = 8; ++ hctsiz.b.pktcnt = 1; ++ hctsiz.b.pid = DWC_OTG_HC_PID_SETUP; ++ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); ++ ++ /* Set HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL; ++ hcchar.b.epdir = 0; ++ hcchar.b.epnum = 0; ++ hcchar.b.mps = 8; ++ hcchar.b.chen = 1; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ ++ /* Fill FIFO with Setup data for Get Device Descriptor */ ++ data_fifo = (uint32_t *)((char *)global_regs + 0x1000); ++ dwc_write_reg32(data_fifo++, 0x01000680); ++ dwc_write_reg32(data_fifo++, 0x00080000); ++ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "Waiting for HCINTR intr 1, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Wait for host channel interrupt */ ++ do { ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ } while (gintsts.b.hcintr == 0); ++ ++ //fprintf(stderr, "Got HCINTR intr 1, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Disable HCINTs */ ++ dwc_write_reg32(&hc_regs->hcintmsk, 0x0000); ++ ++ /* Disable HAINTs */ ++ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0000); ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++} ++ ++static void do_in_ack(void) ++{ ++ gintsts_data_t gintsts; ++ hctsiz_data_t hctsiz; ++ hcchar_data_t hcchar; ++ haint_data_t haint; ++ hcint_data_t hcint; ++ host_grxsts_data_t grxsts; ++ ++ /* Enable HAINTs */ ++ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0001); ++ ++ /* Enable HCINTs */ ++ dwc_write_reg32(&hc_regs->hcintmsk, 0x04a3); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++ /* ++ * Receive Control In packet ++ */ ++ ++ /* Make sure channel is disabled */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ if (hcchar.b.chen) { ++ //fprintf(stderr, "Channel already enabled 2, HCCHAR = %08x\n", hcchar.d32); ++ hcchar.b.chdis = 1; ++ hcchar.b.chen = 1; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ //sleep(1); ++ mdelay(1000); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //if (hcchar.b.chen) { ++ // fprintf(stderr, "** Channel _still_ enabled 2, HCCHAR = %08x **\n", hcchar.d32); ++ //} ++ } ++ ++ /* Set HCTSIZ */ ++ hctsiz.d32 = 0; ++ hctsiz.b.xfersize = 8; ++ hctsiz.b.pktcnt = 1; ++ hctsiz.b.pid = DWC_OTG_HC_PID_DATA1; ++ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); ++ ++ /* Set HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL; ++ hcchar.b.epdir = 1; ++ hcchar.b.epnum = 0; ++ hcchar.b.mps = 8; ++ hcchar.b.chen = 1; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "Waiting for RXSTSQLVL intr 1, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Wait for receive status queue interrupt */ ++ do { ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ } while (gintsts.b.rxstsqlvl == 0); ++ ++ //fprintf(stderr, "Got RXSTSQLVL intr 1, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Read RXSTS */ ++ grxsts.d32 = dwc_read_reg32(&global_regs->grxstsp); ++ //fprintf(stderr, "GRXSTS: %08x\n", grxsts.d32); ++ ++ /* Clear RXSTSQLVL in GINTSTS */ ++ gintsts.d32 = 0; ++ gintsts.b.rxstsqlvl = 1; ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ switch (grxsts.b.pktsts) { ++ case DWC_GRXSTS_PKTSTS_IN: ++ /* Read the data into the host buffer */ ++ if (grxsts.b.bcnt > 0) { ++ int i; ++ int word_count = (grxsts.b.bcnt + 3) / 4; ++ ++ data_fifo = (uint32_t *)((char *)global_regs + 0x1000); ++ ++ for (i = 0; i < word_count; i++) { ++ (void)dwc_read_reg32(data_fifo++); ++ } ++ } ++ ++ //fprintf(stderr, "Received %u bytes\n", (unsigned)grxsts.b.bcnt); ++ break; ++ ++ default: ++ //fprintf(stderr, "** Unexpected GRXSTS packet status 1 **\n"); ++ break; ++ } ++ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "Waiting for RXSTSQLVL intr 2, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Wait for receive status queue interrupt */ ++ do { ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ } while (gintsts.b.rxstsqlvl == 0); ++ ++ //fprintf(stderr, "Got RXSTSQLVL intr 2, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Read RXSTS */ ++ grxsts.d32 = dwc_read_reg32(&global_regs->grxstsp); ++ //fprintf(stderr, "GRXSTS: %08x\n", grxsts.d32); ++ ++ /* Clear RXSTSQLVL in GINTSTS */ ++ gintsts.d32 = 0; ++ gintsts.b.rxstsqlvl = 1; ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ switch (grxsts.b.pktsts) { ++ case DWC_GRXSTS_PKTSTS_IN_XFER_COMP: ++ break; ++ ++ default: ++ //fprintf(stderr, "** Unexpected GRXSTS packet status 2 **\n"); ++ break; ++ } ++ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "Waiting for HCINTR intr 2, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Wait for host channel interrupt */ ++ do { ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ } while (gintsts.b.hcintr == 0); ++ ++ //fprintf(stderr, "Got HCINTR intr 2, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++// usleep(100000); ++// mdelay(100); ++ mdelay(1); ++ ++ /* ++ * Send handshake packet ++ */ ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++ /* Make sure channel is disabled */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ if (hcchar.b.chen) { ++ //fprintf(stderr, "Channel already enabled 3, HCCHAR = %08x\n", hcchar.d32); ++ hcchar.b.chdis = 1; ++ hcchar.b.chen = 1; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ //sleep(1); ++ mdelay(1000); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //if (hcchar.b.chen) { ++ // fprintf(stderr, "** Channel _still_ enabled 3, HCCHAR = %08x **\n", hcchar.d32); ++ //} ++ } ++ ++ /* Set HCTSIZ */ ++ hctsiz.d32 = 0; ++ hctsiz.b.xfersize = 0; ++ hctsiz.b.pktcnt = 1; ++ hctsiz.b.pid = DWC_OTG_HC_PID_DATA1; ++ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); ++ ++ /* Set HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL; ++ hcchar.b.epdir = 0; ++ hcchar.b.epnum = 0; ++ hcchar.b.mps = 8; ++ hcchar.b.chen = 1; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "Waiting for HCINTR intr 3, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Wait for host channel interrupt */ ++ do { ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ } while (gintsts.b.hcintr == 0); ++ ++ //fprintf(stderr, "Got HCINTR intr 3, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Disable HCINTs */ ++ dwc_write_reg32(&hc_regs->hcintmsk, 0x0000); ++ ++ /* Disable HAINTs */ ++ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0000); ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++} ++#endif /* DWC_HS_ELECT_TST */ ++ ++/** Handles hub class-specific requests. */ ++int dwc_otg_hcd_hub_control(struct usb_hcd *hcd, ++ u16 typeReq, ++ u16 wValue, ++ u16 wIndex, ++ char *buf, ++ u16 wLength) ++{ ++ int retval = 0; ++ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); ++ dwc_otg_core_if_t *core_if = hcd_to_dwc_otg_hcd(hcd)->core_if; ++ struct usb_hub_descriptor *desc; ++ hprt0_data_t hprt0 = {.d32 = 0}; ++ ++ uint32_t port_status; ++ ++ switch (typeReq) { ++ case ClearHubFeature: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearHubFeature 0x%x\n", wValue); ++ switch (wValue) { ++ case C_HUB_LOCAL_POWER: ++ case C_HUB_OVER_CURRENT: ++ /* Nothing required here */ ++ break; ++ default: ++ retval = -EINVAL; ++ DWC_ERROR("DWC OTG HCD - " ++ "ClearHubFeature request %xh unknown\n", wValue); ++ } ++ break; ++ case ClearPortFeature: ++ if (!wIndex || wIndex > 1) ++ goto error; ++ ++ switch (wValue) { ++ case USB_PORT_FEAT_ENABLE: ++ DWC_DEBUGPL(DBG_ANY, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_ENABLE\n"); ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ hprt0.b.prtena = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ break; ++ case USB_PORT_FEAT_SUSPEND: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_SUSPEND\n"); ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ hprt0.b.prtres = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ /* Clear Resume bit */ ++ mdelay(100); ++ hprt0.b.prtres = 0; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ break; ++ case USB_PORT_FEAT_POWER: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_POWER\n"); ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ hprt0.b.prtpwr = 0; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ break; ++ case USB_PORT_FEAT_INDICATOR: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_INDICATOR\n"); ++ /* Port inidicator not supported */ ++ break; ++ case USB_PORT_FEAT_C_CONNECTION: ++ /* Clears drivers internal connect status change ++ * flag */ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_C_CONNECTION\n"); ++ dwc_otg_hcd->flags.b.port_connect_status_change = 0; ++ break; ++ case USB_PORT_FEAT_C_RESET: ++ /* Clears the driver's internal Port Reset Change ++ * flag */ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_C_RESET\n"); ++ dwc_otg_hcd->flags.b.port_reset_change = 0; ++ break; ++ case USB_PORT_FEAT_C_ENABLE: ++ /* Clears the driver's internal Port ++ * Enable/Disable Change flag */ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_C_ENABLE\n"); ++ dwc_otg_hcd->flags.b.port_enable_change = 0; ++ break; ++ case USB_PORT_FEAT_C_SUSPEND: ++ /* Clears the driver's internal Port Suspend ++ * Change flag, which is set when resume signaling on ++ * the host port is complete */ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_C_SUSPEND\n"); ++ dwc_otg_hcd->flags.b.port_suspend_change = 0; ++ break; ++ case USB_PORT_FEAT_C_OVER_CURRENT: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_C_OVER_CURRENT\n"); ++ dwc_otg_hcd->flags.b.port_over_current_change = 0; ++ break; ++ default: ++ retval = -EINVAL; ++ DWC_ERROR("DWC OTG HCD - " ++ "ClearPortFeature request %xh " ++ "unknown or unsupported\n", wValue); ++ } ++ break; ++ case GetHubDescriptor: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "GetHubDescriptor\n"); ++ desc = (struct usb_hub_descriptor *)buf; ++ desc->bDescLength = 9; ++ desc->bDescriptorType = 0x29; ++ desc->bNbrPorts = 1; ++ desc->wHubCharacteristics = 0x08; ++ desc->bPwrOn2PwrGood = 1; ++ desc->bHubContrCurrent = 0; ++ desc->bitmap[0] = 0; ++ desc->bitmap[1] = 0xff; ++ break; ++ case GetHubStatus: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "GetHubStatus\n"); ++ memset(buf, 0, 4); ++ break; ++ case GetPortStatus: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "GetPortStatus\n"); ++ ++ if (!wIndex || wIndex > 1) ++ goto error; ++ ++ port_status = 0; ++ ++ if (dwc_otg_hcd->flags.b.port_connect_status_change) ++ port_status |= (1 << USB_PORT_FEAT_C_CONNECTION); ++ ++ if (dwc_otg_hcd->flags.b.port_enable_change) ++ port_status |= (1 << USB_PORT_FEAT_C_ENABLE); ++ ++ if (dwc_otg_hcd->flags.b.port_suspend_change) ++ port_status |= (1 << USB_PORT_FEAT_C_SUSPEND); ++ ++ if (dwc_otg_hcd->flags.b.port_reset_change) ++ port_status |= (1 << USB_PORT_FEAT_C_RESET); ++ ++ if (dwc_otg_hcd->flags.b.port_over_current_change) { ++ DWC_ERROR("Device Not Supported\n"); ++ port_status |= (1 << USB_PORT_FEAT_C_OVER_CURRENT); ++ } ++ ++ if (!dwc_otg_hcd->flags.b.port_connect_status) { ++ /* ++ * The port is disconnected, which means the core is ++ * either in device mode or it soon will be. Just ++ * return 0's for the remainder of the port status ++ * since the port register can't be read if the core ++ * is in device mode. ++ */ ++ *((__le32 *) buf) = cpu_to_le32(port_status); ++ break; ++ } ++ ++ hprt0.d32 = dwc_read_reg32(core_if->host_if->hprt0); ++ DWC_DEBUGPL(DBG_HCDV, " HPRT0: 0x%08x\n", hprt0.d32); ++ ++ if (hprt0.b.prtconnsts) ++ port_status |= (1 << USB_PORT_FEAT_CONNECTION); ++ ++ if (hprt0.b.prtena) ++ port_status |= (1 << USB_PORT_FEAT_ENABLE); ++ ++ if (hprt0.b.prtsusp) ++ port_status |= (1 << USB_PORT_FEAT_SUSPEND); ++ ++ if (hprt0.b.prtovrcurract) ++ port_status |= (1 << USB_PORT_FEAT_OVER_CURRENT); ++ ++ if (hprt0.b.prtrst) ++ port_status |= (1 << USB_PORT_FEAT_RESET); ++ ++ if (hprt0.b.prtpwr) ++ port_status |= (1 << USB_PORT_FEAT_POWER); ++ ++ if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_HIGH_SPEED) ++ port_status |= (1 << USB_PORT_FEAT_HIGHSPEED); ++ else if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED) ++ port_status |= (1 << USB_PORT_FEAT_LOWSPEED); ++ ++ if (hprt0.b.prttstctl) ++ port_status |= (1 << USB_PORT_FEAT_TEST); ++ ++ /* USB_PORT_FEAT_INDICATOR unsupported always 0 */ ++ ++ *((__le32 *) buf) = cpu_to_le32(port_status); ++ ++ break; ++ case SetHubFeature: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "SetHubFeature\n"); ++ /* No HUB features supported */ ++ break; ++ case SetPortFeature: ++ if (wValue != USB_PORT_FEAT_TEST && (!wIndex || wIndex > 1)) ++ goto error; ++ ++ if (!dwc_otg_hcd->flags.b.port_connect_status) { ++ /* ++ * The port is disconnected, which means the core is ++ * either in device mode or it soon will be. Just ++ * return without doing anything since the port ++ * register can't be written if the core is in device ++ * mode. ++ */ ++ break; ++ } ++ ++ switch (wValue) { ++ case USB_PORT_FEAT_SUSPEND: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "SetPortFeature - USB_PORT_FEAT_SUSPEND\n"); ++ if (hcd->self.otg_port == wIndex && ++ hcd->self.b_hnp_enable) { ++ gotgctl_data_t gotgctl = {.d32=0}; ++ gotgctl.b.hstsethnpen = 1; ++ dwc_modify_reg32(&core_if->core_global_regs->gotgctl, ++ 0, gotgctl.d32); ++ core_if->op_state = A_SUSPEND; ++ } ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ hprt0.b.prtsusp = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ //DWC_PRINT("SUSPEND: HPRT0=%0x\n", hprt0.d32); ++ /* Suspend the Phy Clock */ ++ { ++ pcgcctl_data_t pcgcctl = {.d32=0}; ++ pcgcctl.b.stoppclk = 1; ++ dwc_write_reg32(core_if->pcgcctl, pcgcctl.d32); ++ } ++ ++ /* For HNP the bus must be suspended for at least 200ms. */ ++ if (hcd->self.b_hnp_enable) { ++ mdelay(200); ++ //DWC_PRINT("SUSPEND: wait complete! (%d)\n", _hcd->state); ++ } ++ break; ++ case USB_PORT_FEAT_POWER: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "SetPortFeature - USB_PORT_FEAT_POWER\n"); ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ hprt0.b.prtpwr = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ break; ++ case USB_PORT_FEAT_RESET: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "SetPortFeature - USB_PORT_FEAT_RESET\n"); ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ /* When B-Host the Port reset bit is set in ++ * the Start HCD Callback function, so that ++ * the reset is started within 1ms of the HNP ++ * success interrupt. */ ++ if (!hcd->self.is_b_host) { ++ hprt0.b.prtrst = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ } ++ /* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */ ++ MDELAY(60); ++ hprt0.b.prtrst = 0; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ break; ++ ++#ifdef DWC_HS_ELECT_TST ++ case USB_PORT_FEAT_TEST: ++ { ++ uint32_t t; ++ gintmsk_data_t gintmsk; ++ ++ t = (wIndex >> 8); /* MSB wIndex USB */ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "SetPortFeature - USB_PORT_FEAT_TEST %d\n", t); ++ warn("USB_PORT_FEAT_TEST %d\n", t); ++ if (t < 6) { ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ hprt0.b.prttstctl = t; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ } else { ++ /* Setup global vars with reg addresses (quick and ++ * dirty hack, should be cleaned up) ++ */ ++ global_regs = core_if->core_global_regs; ++ hc_global_regs = core_if->host_if->host_global_regs; ++ hc_regs = (dwc_otg_hc_regs_t *)((char *)global_regs + 0x500); ++ data_fifo = (uint32_t *)((char *)global_regs + 0x1000); ++ ++ if (t == 6) { /* HS_HOST_PORT_SUSPEND_RESUME */ ++ /* Save current interrupt mask */ ++ gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk); ++ ++ /* Disable all interrupts while we muck with ++ * the hardware directly ++ */ ++ dwc_write_reg32(&global_regs->gintmsk, 0); ++ ++ /* 15 second delay per the test spec */ ++ mdelay(15000); ++ ++ /* Drive suspend on the root port */ ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ hprt0.b.prtsusp = 1; ++ hprt0.b.prtres = 0; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ ++ /* 15 second delay per the test spec */ ++ mdelay(15000); ++ ++ /* Drive resume on the root port */ ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ hprt0.b.prtsusp = 0; ++ hprt0.b.prtres = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ mdelay(100); ++ ++ /* Clear the resume bit */ ++ hprt0.b.prtres = 0; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ ++ /* Restore interrupts */ ++ dwc_write_reg32(&global_regs->gintmsk, gintmsk.d32); ++ } else if (t == 7) { /* SINGLE_STEP_GET_DEVICE_DESCRIPTOR setup */ ++ /* Save current interrupt mask */ ++ gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk); ++ ++ /* Disable all interrupts while we muck with ++ * the hardware directly ++ */ ++ dwc_write_reg32(&global_regs->gintmsk, 0); ++ ++ /* 15 second delay per the test spec */ ++ mdelay(15000); ++ ++ /* Send the Setup packet */ ++ do_setup(); ++ ++ /* 15 second delay so nothing else happens for awhile */ ++ mdelay(15000); ++ ++ /* Restore interrupts */ ++ dwc_write_reg32(&global_regs->gintmsk, gintmsk.d32); ++ } else if (t == 8) { /* SINGLE_STEP_GET_DEVICE_DESCRIPTOR execute */ ++ /* Save current interrupt mask */ ++ gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk); ++ ++ /* Disable all interrupts while we muck with ++ * the hardware directly ++ */ ++ dwc_write_reg32(&global_regs->gintmsk, 0); ++ ++ /* Send the Setup packet */ ++ do_setup(); ++ ++ /* 15 second delay so nothing else happens for awhile */ ++ mdelay(15000); ++ ++ /* Send the In and Ack packets */ ++ do_in_ack(); ++ ++ /* 15 second delay so nothing else happens for awhile */ ++ mdelay(15000); ++ ++ /* Restore interrupts */ ++ dwc_write_reg32(&global_regs->gintmsk, gintmsk.d32); ++ } ++ } ++ break; ++ } ++#endif /* DWC_HS_ELECT_TST */ ++ ++ case USB_PORT_FEAT_INDICATOR: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "SetPortFeature - USB_PORT_FEAT_INDICATOR\n"); ++ /* Not supported */ ++ break; ++ default: ++ retval = -EINVAL; ++ DWC_ERROR("DWC OTG HCD - " ++ "SetPortFeature request %xh " ++ "unknown or unsupported\n", wValue); ++ break; ++ } ++ break; ++ default: ++ error: ++ retval = -EINVAL; ++ DWC_WARN("DWC OTG HCD - " ++ "Unknown hub control request type or invalid typeReq: %xh wIndex: %xh wValue: %xh\n", ++ typeReq, wIndex, wValue); ++ break; ++ } ++ ++ return retval; ++} ++ ++/** ++ * Assigns transactions from a QTD to a free host channel and initializes the ++ * host channel to perform the transactions. The host channel is removed from ++ * the free list. ++ * ++ * @param hcd The HCD state structure. ++ * @param qh Transactions from the first QTD for this QH are selected and ++ * assigned to a free host channel. ++ */ ++static void assign_and_init_hc(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) ++{ ++ dwc_hc_t *hc; ++ dwc_otg_qtd_t *qtd; ++ struct urb *urb; ++ ++ DWC_DEBUGPL(DBG_HCDV, "%s(%p,%p)\n", __func__, hcd, qh); ++ ++ hc = list_entry(hcd->free_hc_list.next, dwc_hc_t, hc_list_entry); ++ ++ /* Remove the host channel from the free list. */ ++ list_del_init(&hc->hc_list_entry); ++ ++ qtd = list_entry(qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry); ++ urb = qtd->urb; ++ qh->channel = hc; ++ qh->qtd_in_process = qtd; ++ ++ /* ++ * Use usb_pipedevice to determine device address. This address is ++ * 0 before the SET_ADDRESS command and the correct address afterward. ++ */ ++ hc->dev_addr = usb_pipedevice(urb->pipe); ++ hc->ep_num = usb_pipeendpoint(urb->pipe); ++ ++ if (urb->dev->speed == USB_SPEED_LOW) { ++ hc->speed = DWC_OTG_EP_SPEED_LOW; ++ } else if (urb->dev->speed == USB_SPEED_FULL) { ++ hc->speed = DWC_OTG_EP_SPEED_FULL; ++ } else { ++ hc->speed = DWC_OTG_EP_SPEED_HIGH; ++ } ++ ++ hc->max_packet = dwc_max_packet(qh->maxp); ++ ++ hc->xfer_started = 0; ++ hc->halt_status = DWC_OTG_HC_XFER_NO_HALT_STATUS; ++ hc->error_state = (qtd->error_count > 0); ++ hc->halt_on_queue = 0; ++ hc->halt_pending = 0; ++ hc->requests = 0; ++ ++ /* ++ * The following values may be modified in the transfer type section ++ * below. The xfer_len value may be reduced when the transfer is ++ * started to accommodate the max widths of the XferSize and PktCnt ++ * fields in the HCTSIZn register. ++ */ ++ hc->do_ping = qh->ping_state; ++ hc->ep_is_in = (usb_pipein(urb->pipe) != 0); ++ hc->data_pid_start = qh->data_toggle; ++ hc->multi_count = 1; ++ ++ if (hcd->core_if->dma_enable) { ++ hc->xfer_buff = (uint8_t *)urb->transfer_dma + urb->actual_length; ++ } else { ++ hc->xfer_buff = (uint8_t *)urb->transfer_buffer + urb->actual_length; ++ } ++ hc->xfer_len = urb->transfer_buffer_length - urb->actual_length; ++ hc->xfer_count = 0; ++ ++ /* ++ * Set the split attributes ++ */ ++ hc->do_split = 0; ++ if (qh->do_split) { ++ hc->do_split = 1; ++ hc->xact_pos = qtd->isoc_split_pos; ++ hc->complete_split = qtd->complete_split; ++ hc->hub_addr = urb->dev->tt->hub->devnum; ++ hc->port_addr = urb->dev->ttport; ++ } ++ ++ switch (usb_pipetype(urb->pipe)) { ++ case PIPE_CONTROL: ++ hc->ep_type = DWC_OTG_EP_TYPE_CONTROL; ++ switch (qtd->control_phase) { ++ case DWC_OTG_CONTROL_SETUP: ++ DWC_DEBUGPL(DBG_HCDV, " Control setup transaction\n"); ++ hc->do_ping = 0; ++ hc->ep_is_in = 0; ++ hc->data_pid_start = DWC_OTG_HC_PID_SETUP; ++ if (hcd->core_if->dma_enable) { ++ hc->xfer_buff = (uint8_t *)urb->setup_dma; ++ } else { ++ hc->xfer_buff = (uint8_t *)urb->setup_packet; ++ } ++ hc->xfer_len = 8; ++ break; ++ case DWC_OTG_CONTROL_DATA: ++ DWC_DEBUGPL(DBG_HCDV, " Control data transaction\n"); ++ hc->data_pid_start = qtd->data_toggle; ++ break; ++ case DWC_OTG_CONTROL_STATUS: ++ /* ++ * Direction is opposite of data direction or IN if no ++ * data. ++ */ ++ DWC_DEBUGPL(DBG_HCDV, " Control status transaction\n"); ++ if (urb->transfer_buffer_length == 0) { ++ hc->ep_is_in = 1; ++ } else { ++ hc->ep_is_in = (usb_pipein(urb->pipe) != USB_DIR_IN); ++ } ++ if (hc->ep_is_in) { ++ hc->do_ping = 0; ++ } ++ hc->data_pid_start = DWC_OTG_HC_PID_DATA1; ++ hc->xfer_len = 0; ++ if (hcd->core_if->dma_enable) { ++ hc->xfer_buff = (uint8_t *)hcd->status_buf_dma; ++ } else { ++ hc->xfer_buff = (uint8_t *)hcd->status_buf; ++ } ++ break; ++ } ++ break; ++ case PIPE_BULK: ++ hc->ep_type = DWC_OTG_EP_TYPE_BULK; ++ break; ++ case PIPE_INTERRUPT: ++ hc->ep_type = DWC_OTG_EP_TYPE_INTR; ++ break; ++ case PIPE_ISOCHRONOUS: ++ { ++ struct usb_iso_packet_descriptor *frame_desc; ++ frame_desc = &urb->iso_frame_desc[qtd->isoc_frame_index]; ++ hc->ep_type = DWC_OTG_EP_TYPE_ISOC; ++ if (hcd->core_if->dma_enable) { ++ hc->xfer_buff = (uint8_t *)urb->transfer_dma; ++ } else { ++ hc->xfer_buff = (uint8_t *)urb->transfer_buffer; ++ } ++ hc->xfer_buff += frame_desc->offset + qtd->isoc_split_offset; ++ hc->xfer_len = frame_desc->length - qtd->isoc_split_offset; ++ ++ if (hc->xact_pos == DWC_HCSPLIT_XACTPOS_ALL) { ++ if (hc->xfer_len <= 188) { ++ hc->xact_pos = DWC_HCSPLIT_XACTPOS_ALL; ++ } ++ else { ++ hc->xact_pos = DWC_HCSPLIT_XACTPOS_BEGIN; ++ } ++ } ++ } ++ break; ++ } ++ ++ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || ++ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { ++ /* ++ * This value may be modified when the transfer is started to ++ * reflect the actual transfer length. ++ */ ++ hc->multi_count = dwc_hb_mult(qh->maxp); ++ } ++ ++ dwc_otg_hc_init(hcd->core_if, hc); ++ hc->qh = qh; ++} ++ ++/** ++ * This function selects transactions from the HCD transfer schedule and ++ * assigns them to available host channels. It is called from HCD interrupt ++ * handler functions. ++ * ++ * @param hcd The HCD state structure. ++ * ++ * @return The types of new transactions that were assigned to host channels. ++ */ ++dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t *hcd) ++{ ++ struct list_head *qh_ptr; ++ dwc_otg_qh_t *qh; ++ int num_channels; ++ dwc_otg_transaction_type_e ret_val = DWC_OTG_TRANSACTION_NONE; ++ ++#ifdef DEBUG_SOF ++ DWC_DEBUGPL(DBG_HCD, " Select Transactions\n"); ++#endif ++ ++ /* Process entries in the periodic ready list. */ ++ qh_ptr = hcd->periodic_sched_ready.next; ++ while (qh_ptr != &hcd->periodic_sched_ready && ++ !list_empty(&hcd->free_hc_list)) { ++ ++ qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry); ++ assign_and_init_hc(hcd, qh); ++ ++ /* ++ * Move the QH from the periodic ready schedule to the ++ * periodic assigned schedule. ++ */ ++ qh_ptr = qh_ptr->next; ++ list_move(&qh->qh_list_entry, &hcd->periodic_sched_assigned); ++ ++ ret_val = DWC_OTG_TRANSACTION_PERIODIC; ++ } ++ ++ /* ++ * Process entries in the inactive portion of the non-periodic ++ * schedule. Some free host channels may not be used if they are ++ * reserved for periodic transfers. ++ */ ++ qh_ptr = hcd->non_periodic_sched_inactive.next; ++ num_channels = hcd->core_if->core_params->host_channels; ++ while (qh_ptr != &hcd->non_periodic_sched_inactive && ++ (hcd->non_periodic_channels < ++ num_channels - hcd->periodic_channels) && ++ !list_empty(&hcd->free_hc_list)) { ++ ++ qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry); ++ assign_and_init_hc(hcd, qh); ++ ++ /* ++ * Move the QH from the non-periodic inactive schedule to the ++ * non-periodic active schedule. ++ */ ++ qh_ptr = qh_ptr->next; ++ list_move(&qh->qh_list_entry, &hcd->non_periodic_sched_active); ++ ++ if (ret_val == DWC_OTG_TRANSACTION_NONE) { ++ ret_val = DWC_OTG_TRANSACTION_NON_PERIODIC; ++ } else { ++ ret_val = DWC_OTG_TRANSACTION_ALL; ++ } ++ ++ hcd->non_periodic_channels++; ++ } ++ ++ return ret_val; ++} ++ ++/** ++ * Attempts to queue a single transaction request for a host channel ++ * associated with either a periodic or non-periodic transfer. This function ++ * assumes that there is space available in the appropriate request queue. For ++ * an OUT transfer or SETUP transaction in Slave mode, it checks whether space ++ * is available in the appropriate Tx FIFO. ++ * ++ * @param hcd The HCD state structure. ++ * @param hc Host channel descriptor associated with either a periodic or ++ * non-periodic transfer. ++ * @param fifo_dwords_avail Number of DWORDs available in the periodic Tx ++ * FIFO for periodic transfers or the non-periodic Tx FIFO for non-periodic ++ * transfers. ++ * ++ * @return 1 if a request is queued and more requests may be needed to ++ * complete the transfer, 0 if no more requests are required for this ++ * transfer, -1 if there is insufficient space in the Tx FIFO. ++ */ ++static int queue_transaction(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ uint16_t fifo_dwords_avail) ++{ ++ int retval; ++ ++ if (hcd->core_if->dma_enable) { ++ if (!hc->xfer_started) { ++ dwc_otg_hc_start_transfer(hcd->core_if, hc); ++ hc->qh->ping_state = 0; ++ } ++ retval = 0; ++ } else if (hc->halt_pending) { ++ /* Don't queue a request if the channel has been halted. */ ++ retval = 0; ++ } else if (hc->halt_on_queue) { ++ dwc_otg_hc_halt(hcd->core_if, hc, hc->halt_status); ++ retval = 0; ++ } else if (hc->do_ping) { ++ if (!hc->xfer_started) { ++ dwc_otg_hc_start_transfer(hcd->core_if, hc); ++ } ++ retval = 0; ++ } else if (!hc->ep_is_in || ++ hc->data_pid_start == DWC_OTG_HC_PID_SETUP) { ++ if ((fifo_dwords_avail * 4) >= hc->max_packet) { ++ if (!hc->xfer_started) { ++ dwc_otg_hc_start_transfer(hcd->core_if, hc); ++ retval = 1; ++ } else { ++ retval = dwc_otg_hc_continue_transfer(hcd->core_if, hc); ++ } ++ } else { ++ retval = -1; ++ } ++ } else { ++ if (!hc->xfer_started) { ++ dwc_otg_hc_start_transfer(hcd->core_if, hc); ++ retval = 1; ++ } else { ++ retval = dwc_otg_hc_continue_transfer(hcd->core_if, hc); ++ } ++ } ++ ++ return retval; ++} ++ ++/** ++ * Processes active non-periodic channels and queues transactions for these ++ * channels to the DWC_otg controller. After queueing transactions, the NP Tx ++ * FIFO Empty interrupt is enabled if there are more transactions to queue as ++ * NP Tx FIFO or request queue space becomes available. Otherwise, the NP Tx ++ * FIFO Empty interrupt is disabled. ++ */ ++static void process_non_periodic_channels(dwc_otg_hcd_t *hcd) ++{ ++ gnptxsts_data_t tx_status; ++ struct list_head *orig_qh_ptr; ++ dwc_otg_qh_t *qh; ++ int status; ++ int no_queue_space = 0; ++ int no_fifo_space = 0; ++ int more_to_do = 0; ++ ++ dwc_otg_core_global_regs_t *global_regs = hcd->core_if->core_global_regs; ++ ++ DWC_DEBUGPL(DBG_HCDV, "Queue non-periodic transactions\n"); ++#ifdef DEBUG ++ tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts); ++ DWC_DEBUGPL(DBG_HCDV, " NP Tx Req Queue Space Avail (before queue): %d\n", ++ tx_status.b.nptxqspcavail); ++ DWC_DEBUGPL(DBG_HCDV, " NP Tx FIFO Space Avail (before queue): %d\n", ++ tx_status.b.nptxfspcavail); ++#endif ++ /* ++ * Keep track of the starting point. Skip over the start-of-list ++ * entry. ++ */ ++ if (hcd->non_periodic_qh_ptr == &hcd->non_periodic_sched_active) { ++ hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next; ++ } ++ orig_qh_ptr = hcd->non_periodic_qh_ptr; ++ ++ /* ++ * Process once through the active list or until no more space is ++ * available in the request queue or the Tx FIFO. ++ */ ++ do { ++ tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts); ++ if (!hcd->core_if->dma_enable && tx_status.b.nptxqspcavail == 0) { ++ no_queue_space = 1; ++ break; ++ } ++ ++ qh = list_entry(hcd->non_periodic_qh_ptr, dwc_otg_qh_t, qh_list_entry); ++ status = queue_transaction(hcd, qh->channel, tx_status.b.nptxfspcavail); ++ ++ if (status > 0) { ++ more_to_do = 1; ++ } else if (status < 0) { ++ no_fifo_space = 1; ++ break; ++ } ++ ++ /* Advance to next QH, skipping start-of-list entry. */ ++ hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next; ++ if (hcd->non_periodic_qh_ptr == &hcd->non_periodic_sched_active) { ++ hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next; ++ } ++ ++ } while (hcd->non_periodic_qh_ptr != orig_qh_ptr); ++ ++ if (!hcd->core_if->dma_enable) { ++ gintmsk_data_t intr_mask = {.d32 = 0}; ++ intr_mask.b.nptxfempty = 1; ++ ++#ifdef DEBUG ++ tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts); ++ DWC_DEBUGPL(DBG_HCDV, " NP Tx Req Queue Space Avail (after queue): %d\n", ++ tx_status.b.nptxqspcavail); ++ DWC_DEBUGPL(DBG_HCDV, " NP Tx FIFO Space Avail (after queue): %d\n", ++ tx_status.b.nptxfspcavail); ++#endif ++ if (more_to_do || no_queue_space || no_fifo_space) { ++ /* ++ * May need to queue more transactions as the request ++ * queue or Tx FIFO empties. Enable the non-periodic ++ * Tx FIFO empty interrupt. (Always use the half-empty ++ * level to ensure that new requests are loaded as ++ * soon as possible.) ++ */ ++ dwc_modify_reg32(&global_regs->gintmsk, 0, intr_mask.d32); ++ } else { ++ /* ++ * Disable the Tx FIFO empty interrupt since there are ++ * no more transactions that need to be queued right ++ * now. This function is called from interrupt ++ * handlers to queue more transactions as transfer ++ * states change. ++ */ ++ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, 0); ++ } ++ } ++} ++ ++/** ++ * Processes periodic channels for the next frame and queues transactions for ++ * these channels to the DWC_otg controller. After queueing transactions, the ++ * Periodic Tx FIFO Empty interrupt is enabled if there are more transactions ++ * to queue as Periodic Tx FIFO or request queue space becomes available. ++ * Otherwise, the Periodic Tx FIFO Empty interrupt is disabled. ++ */ ++static void process_periodic_channels(dwc_otg_hcd_t *hcd) ++{ ++ hptxsts_data_t tx_status; ++ struct list_head *qh_ptr; ++ dwc_otg_qh_t *qh; ++ int status; ++ int no_queue_space = 0; ++ int no_fifo_space = 0; ++ ++ dwc_otg_host_global_regs_t *host_regs; ++ host_regs = hcd->core_if->host_if->host_global_regs; ++ ++ DWC_DEBUGPL(DBG_HCDV, "Queue periodic transactions\n"); ++#ifdef DEBUG ++ tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts); ++ DWC_DEBUGPL(DBG_HCDV, " P Tx Req Queue Space Avail (before queue): %d\n", ++ tx_status.b.ptxqspcavail); ++ DWC_DEBUGPL(DBG_HCDV, " P Tx FIFO Space Avail (before queue): %d\n", ++ tx_status.b.ptxfspcavail); ++#endif ++ ++ qh_ptr = hcd->periodic_sched_assigned.next; ++ while (qh_ptr != &hcd->periodic_sched_assigned) { ++ tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts); ++ if (tx_status.b.ptxqspcavail == 0) { ++ no_queue_space = 1; ++ break; ++ } ++ ++ qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry); ++ ++ /* ++ * Set a flag if we're queuing high-bandwidth in slave mode. ++ * The flag prevents any halts to get into the request queue in ++ * the middle of multiple high-bandwidth packets getting queued. ++ */ ++ if (!hcd->core_if->dma_enable && ++ qh->channel->multi_count > 1) ++ { ++ hcd->core_if->queuing_high_bandwidth = 1; ++ } ++ ++ status = queue_transaction(hcd, qh->channel, tx_status.b.ptxfspcavail); ++ if (status < 0) { ++ no_fifo_space = 1; ++ break; ++ } ++ ++ /* ++ * In Slave mode, stay on the current transfer until there is ++ * nothing more to do or the high-bandwidth request count is ++ * reached. In DMA mode, only need to queue one request. The ++ * controller automatically handles multiple packets for ++ * high-bandwidth transfers. ++ */ ++ if (hcd->core_if->dma_enable || status == 0 || ++ qh->channel->requests == qh->channel->multi_count) { ++ qh_ptr = qh_ptr->next; ++ /* ++ * Move the QH from the periodic assigned schedule to ++ * the periodic queued schedule. ++ */ ++ list_move(&qh->qh_list_entry, &hcd->periodic_sched_queued); ++ ++ /* done queuing high bandwidth */ ++ hcd->core_if->queuing_high_bandwidth = 0; ++ } ++ } ++ ++ if (!hcd->core_if->dma_enable) { ++ dwc_otg_core_global_regs_t *global_regs; ++ gintmsk_data_t intr_mask = {.d32 = 0}; ++ ++ global_regs = hcd->core_if->core_global_regs; ++ intr_mask.b.ptxfempty = 1; ++#ifdef DEBUG ++ tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts); ++ DWC_DEBUGPL(DBG_HCDV, " P Tx Req Queue Space Avail (after queue): %d\n", ++ tx_status.b.ptxqspcavail); ++ DWC_DEBUGPL(DBG_HCDV, " P Tx FIFO Space Avail (after queue): %d\n", ++ tx_status.b.ptxfspcavail); ++#endif ++ if (!list_empty(&hcd->periodic_sched_assigned) || ++ no_queue_space || no_fifo_space) { ++ /* ++ * May need to queue more transactions as the request ++ * queue or Tx FIFO empties. Enable the periodic Tx ++ * FIFO empty interrupt. (Always use the half-empty ++ * level to ensure that new requests are loaded as ++ * soon as possible.) ++ */ ++ dwc_modify_reg32(&global_regs->gintmsk, 0, intr_mask.d32); ++ } else { ++ /* ++ * Disable the Tx FIFO empty interrupt since there are ++ * no more transactions that need to be queued right ++ * now. This function is called from interrupt ++ * handlers to queue more transactions as transfer ++ * states change. ++ */ ++ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, 0); ++ } ++ } ++} ++ ++/** ++ * This function processes the currently active host channels and queues ++ * transactions for these channels to the DWC_otg controller. It is called ++ * from HCD interrupt handler functions. ++ * ++ * @param hcd The HCD state structure. ++ * @param tr_type The type(s) of transactions to queue (non-periodic, ++ * periodic, or both). ++ */ ++void dwc_otg_hcd_queue_transactions(dwc_otg_hcd_t *hcd, ++ dwc_otg_transaction_type_e tr_type) ++{ ++#ifdef DEBUG_SOF ++ DWC_DEBUGPL(DBG_HCD, "Queue Transactions\n"); ++#endif ++ /* Process host channels associated with periodic transfers. */ ++ if ((tr_type == DWC_OTG_TRANSACTION_PERIODIC || ++ tr_type == DWC_OTG_TRANSACTION_ALL) && ++ !list_empty(&hcd->periodic_sched_assigned)) { ++ ++ process_periodic_channels(hcd); ++ } ++ ++ /* Process host channels associated with non-periodic transfers. */ ++ if (tr_type == DWC_OTG_TRANSACTION_NON_PERIODIC || ++ tr_type == DWC_OTG_TRANSACTION_ALL) { ++ if (!list_empty(&hcd->non_periodic_sched_active)) { ++ process_non_periodic_channels(hcd); ++ } else { ++ /* ++ * Ensure NP Tx FIFO empty interrupt is disabled when ++ * there are no non-periodic transfers to process. ++ */ ++ gintmsk_data_t gintmsk = {.d32 = 0}; ++ gintmsk.b.nptxfempty = 1; ++ dwc_modify_reg32(&hcd->core_if->core_global_regs->gintmsk, ++ gintmsk.d32, 0); ++ } ++ } ++} ++ ++/** ++ * Sets the final status of an URB and returns it to the device driver. Any ++ * required cleanup of the URB is performed. ++ */ ++void dwc_otg_hcd_complete_urb(dwc_otg_hcd_t *hcd, struct urb *urb, int status) ++{ ++#ifdef DEBUG ++ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) { ++ DWC_PRINT("%s: urb %p, device %d, ep %d %s, status=%d\n", ++ __func__, urb, usb_pipedevice(urb->pipe), ++ usb_pipeendpoint(urb->pipe), ++ usb_pipein(urb->pipe) ? "IN" : "OUT", status); ++ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { ++ int i; ++ for (i = 0; i < urb->number_of_packets; i++) { ++ DWC_PRINT(" ISO Desc %d status: %d\n", ++ i, urb->iso_frame_desc[i].status); ++ } ++ } ++ } ++#endif ++ ++ //if we use the aligned buffer instead of the original unaligned buffer, ++ //for IN data, we have to move the data to the original buffer ++ if((urb->transfer_dma==urb->aligned_transfer_dma)&&((urb->transfer_flags & URB_DIR_MASK)==URB_DIR_IN)){ ++ dma_sync_single_for_device(NULL,urb->transfer_dma,urb->actual_length,DMA_FROM_DEVICE); ++ memcpy(urb->transfer_buffer,urb->aligned_transfer_buffer,urb->actual_length); ++ } ++ ++ ++ urb->status = status; ++ urb->hcpriv = NULL; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ usb_hcd_giveback_urb(dwc_otg_hcd_to_hcd(hcd), urb, status); ++#else ++ usb_hcd_giveback_urb(dwc_otg_hcd_to_hcd(hcd), urb, NULL); ++#endif ++} ++ ++/* ++ * Returns the Queue Head for an URB. ++ */ ++dwc_otg_qh_t *dwc_urb_to_qh(struct urb *urb) ++{ ++ struct usb_host_endpoint *ep = dwc_urb_to_endpoint(urb); ++ return (dwc_otg_qh_t *)ep->hcpriv; ++} ++ ++#ifdef DEBUG ++void dwc_print_setup_data(uint8_t *setup) ++{ ++ int i; ++ if (CHK_DEBUG_LEVEL(DBG_HCD)){ ++ DWC_PRINT("Setup Data = MSB "); ++ for (i = 7; i >= 0; i--) DWC_PRINT("%02x ", setup[i]); ++ DWC_PRINT("\n"); ++ DWC_PRINT(" bmRequestType Tranfer = %s\n", (setup[0] & 0x80) ? "Device-to-Host" : "Host-to-Device"); ++ DWC_PRINT(" bmRequestType Type = "); ++ switch ((setup[0] & 0x60) >> 5) { ++ case 0: DWC_PRINT("Standard\n"); break; ++ case 1: DWC_PRINT("Class\n"); break; ++ case 2: DWC_PRINT("Vendor\n"); break; ++ case 3: DWC_PRINT("Reserved\n"); break; ++ } ++ DWC_PRINT(" bmRequestType Recipient = "); ++ switch (setup[0] & 0x1f) { ++ case 0: DWC_PRINT("Device\n"); break; ++ case 1: DWC_PRINT("Interface\n"); break; ++ case 2: DWC_PRINT("Endpoint\n"); break; ++ case 3: DWC_PRINT("Other\n"); break; ++ default: DWC_PRINT("Reserved\n"); break; ++ } ++ DWC_PRINT(" bRequest = 0x%0x\n", setup[1]); ++ DWC_PRINT(" wValue = 0x%0x\n", *((uint16_t *)&setup[2])); ++ DWC_PRINT(" wIndex = 0x%0x\n", *((uint16_t *)&setup[4])); ++ DWC_PRINT(" wLength = 0x%0x\n\n", *((uint16_t *)&setup[6])); ++ } ++} ++#endif ++ ++void dwc_otg_hcd_dump_frrem(dwc_otg_hcd_t *hcd) { ++#if defined(DEBUG) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ DWC_PRINT("Frame remaining at SOF:\n"); ++ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", ++ hcd->frrem_samples, hcd->frrem_accum, ++ (hcd->frrem_samples > 0) ? ++ hcd->frrem_accum/hcd->frrem_samples : 0); ++ ++ DWC_PRINT("\n"); ++ DWC_PRINT("Frame remaining at start_transfer (uframe 7):\n"); ++ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", ++ hcd->core_if->hfnum_7_samples, hcd->core_if->hfnum_7_frrem_accum, ++ (hcd->core_if->hfnum_7_samples > 0) ? ++ hcd->core_if->hfnum_7_frrem_accum/hcd->core_if->hfnum_7_samples : 0); ++ DWC_PRINT("Frame remaining at start_transfer (uframe 0):\n"); ++ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", ++ hcd->core_if->hfnum_0_samples, hcd->core_if->hfnum_0_frrem_accum, ++ (hcd->core_if->hfnum_0_samples > 0) ? ++ hcd->core_if->hfnum_0_frrem_accum/hcd->core_if->hfnum_0_samples : 0); ++ DWC_PRINT("Frame remaining at start_transfer (uframe 1-6):\n"); ++ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", ++ hcd->core_if->hfnum_other_samples, hcd->core_if->hfnum_other_frrem_accum, ++ (hcd->core_if->hfnum_other_samples > 0) ? ++ hcd->core_if->hfnum_other_frrem_accum/hcd->core_if->hfnum_other_samples : 0); ++ ++ DWC_PRINT("\n"); ++ DWC_PRINT("Frame remaining at sample point A (uframe 7):\n"); ++ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", ++ hcd->hfnum_7_samples_a, hcd->hfnum_7_frrem_accum_a, ++ (hcd->hfnum_7_samples_a > 0) ? ++ hcd->hfnum_7_frrem_accum_a/hcd->hfnum_7_samples_a : 0); ++ DWC_PRINT("Frame remaining at sample point A (uframe 0):\n"); ++ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", ++ hcd->hfnum_0_samples_a, hcd->hfnum_0_frrem_accum_a, ++ (hcd->hfnum_0_samples_a > 0) ? ++ hcd->hfnum_0_frrem_accum_a/hcd->hfnum_0_samples_a : 0); ++ DWC_PRINT("Frame remaining at sample point A (uframe 1-6):\n"); ++ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", ++ hcd->hfnum_other_samples_a, hcd->hfnum_other_frrem_accum_a, ++ (hcd->hfnum_other_samples_a > 0) ? ++ hcd->hfnum_other_frrem_accum_a/hcd->hfnum_other_samples_a : 0); ++ ++ DWC_PRINT("\n"); ++ DWC_PRINT("Frame remaining at sample point B (uframe 7):\n"); ++ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", ++ hcd->hfnum_7_samples_b, hcd->hfnum_7_frrem_accum_b, ++ (hcd->hfnum_7_samples_b > 0) ? ++ hcd->hfnum_7_frrem_accum_b/hcd->hfnum_7_samples_b : 0); ++ DWC_PRINT("Frame remaining at sample point B (uframe 0):\n"); ++ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", ++ hcd->hfnum_0_samples_b, hcd->hfnum_0_frrem_accum_b, ++ (hcd->hfnum_0_samples_b > 0) ? ++ hcd->hfnum_0_frrem_accum_b/hcd->hfnum_0_samples_b : 0); ++ DWC_PRINT("Frame remaining at sample point B (uframe 1-6):\n"); ++ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", ++ hcd->hfnum_other_samples_b, hcd->hfnum_other_frrem_accum_b, ++ (hcd->hfnum_other_samples_b > 0) ? ++ hcd->hfnum_other_frrem_accum_b/hcd->hfnum_other_samples_b : 0); ++#endif ++} ++ ++void dwc_otg_hcd_dump_state(dwc_otg_hcd_t *hcd) ++{ ++#ifdef DEBUG ++ int num_channels; ++ int i; ++ gnptxsts_data_t np_tx_status; ++ hptxsts_data_t p_tx_status; ++ ++ num_channels = hcd->core_if->core_params->host_channels; ++ DWC_PRINT("\n"); ++ DWC_PRINT("************************************************************\n"); ++ DWC_PRINT("HCD State:\n"); ++ DWC_PRINT(" Num channels: %d\n", num_channels); ++ for (i = 0; i < num_channels; i++) { ++ dwc_hc_t *hc = hcd->hc_ptr_array[i]; ++ DWC_PRINT(" Channel %d:\n", i); ++ DWC_PRINT(" dev_addr: %d, ep_num: %d, ep_is_in: %d\n", ++ hc->dev_addr, hc->ep_num, hc->ep_is_in); ++ DWC_PRINT(" speed: %d\n", hc->speed); ++ DWC_PRINT(" ep_type: %d\n", hc->ep_type); ++ DWC_PRINT(" max_packet: %d\n", hc->max_packet); ++ DWC_PRINT(" data_pid_start: %d\n", hc->data_pid_start); ++ DWC_PRINT(" multi_count: %d\n", hc->multi_count); ++ DWC_PRINT(" xfer_started: %d\n", hc->xfer_started); ++ DWC_PRINT(" xfer_buff: %p\n", hc->xfer_buff); ++ DWC_PRINT(" xfer_len: %d\n", hc->xfer_len); ++ DWC_PRINT(" xfer_count: %d\n", hc->xfer_count); ++ DWC_PRINT(" halt_on_queue: %d\n", hc->halt_on_queue); ++ DWC_PRINT(" halt_pending: %d\n", hc->halt_pending); ++ DWC_PRINT(" halt_status: %d\n", hc->halt_status); ++ DWC_PRINT(" do_split: %d\n", hc->do_split); ++ DWC_PRINT(" complete_split: %d\n", hc->complete_split); ++ DWC_PRINT(" hub_addr: %d\n", hc->hub_addr); ++ DWC_PRINT(" port_addr: %d\n", hc->port_addr); ++ DWC_PRINT(" xact_pos: %d\n", hc->xact_pos); ++ DWC_PRINT(" requests: %d\n", hc->requests); ++ DWC_PRINT(" qh: %p\n", hc->qh); ++ if (hc->xfer_started) { ++ hfnum_data_t hfnum; ++ hcchar_data_t hcchar; ++ hctsiz_data_t hctsiz; ++ hcint_data_t hcint; ++ hcintmsk_data_t hcintmsk; ++ hfnum.d32 = dwc_read_reg32(&hcd->core_if->host_if->host_global_regs->hfnum); ++ hcchar.d32 = dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->hcchar); ++ hctsiz.d32 = dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->hctsiz); ++ hcint.d32 = dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->hcint); ++ hcintmsk.d32 = dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->hcintmsk); ++ DWC_PRINT(" hfnum: 0x%08x\n", hfnum.d32); ++ DWC_PRINT(" hcchar: 0x%08x\n", hcchar.d32); ++ DWC_PRINT(" hctsiz: 0x%08x\n", hctsiz.d32); ++ DWC_PRINT(" hcint: 0x%08x\n", hcint.d32); ++ DWC_PRINT(" hcintmsk: 0x%08x\n", hcintmsk.d32); ++ } ++ if (hc->xfer_started && hc->qh && hc->qh->qtd_in_process) { ++ dwc_otg_qtd_t *qtd; ++ struct urb *urb; ++ qtd = hc->qh->qtd_in_process; ++ urb = qtd->urb; ++ DWC_PRINT(" URB Info:\n"); ++ DWC_PRINT(" qtd: %p, urb: %p\n", qtd, urb); ++ if (urb) { ++ DWC_PRINT(" Dev: %d, EP: %d %s\n", ++ usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe), ++ usb_pipein(urb->pipe) ? "IN" : "OUT"); ++ DWC_PRINT(" Max packet size: %d\n", ++ usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))); ++ DWC_PRINT(" transfer_buffer: %p\n", urb->transfer_buffer); ++ DWC_PRINT(" transfer_dma: %p\n", (void *)urb->transfer_dma); ++ DWC_PRINT(" transfer_buffer_length: %d\n", urb->transfer_buffer_length); ++ DWC_PRINT(" actual_length: %d\n", urb->actual_length); ++ } ++ } ++ } ++ DWC_PRINT(" non_periodic_channels: %d\n", hcd->non_periodic_channels); ++ DWC_PRINT(" periodic_channels: %d\n", hcd->periodic_channels); ++ DWC_PRINT(" periodic_usecs: %d\n", hcd->periodic_usecs); ++ np_tx_status.d32 = dwc_read_reg32(&hcd->core_if->core_global_regs->gnptxsts); ++ DWC_PRINT(" NP Tx Req Queue Space Avail: %d\n", np_tx_status.b.nptxqspcavail); ++ DWC_PRINT(" NP Tx FIFO Space Avail: %d\n", np_tx_status.b.nptxfspcavail); ++ p_tx_status.d32 = dwc_read_reg32(&hcd->core_if->host_if->host_global_regs->hptxsts); ++ DWC_PRINT(" P Tx Req Queue Space Avail: %d\n", p_tx_status.b.ptxqspcavail); ++ DWC_PRINT(" P Tx FIFO Space Avail: %d\n", p_tx_status.b.ptxfspcavail); ++ dwc_otg_hcd_dump_frrem(hcd); ++ dwc_otg_dump_global_registers(hcd->core_if); ++ dwc_otg_dump_host_registers(hcd->core_if); ++ DWC_PRINT("************************************************************\n"); ++ DWC_PRINT("\n"); ++#endif ++} ++#endif /* DWC_DEVICE_ONLY */ +--- /dev/null ++++ b/drivers/usb/host/otg/dwc_otg_hcd.h +@@ -0,0 +1,663 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd.h $ ++ * $Revision: #45 $ ++ * $Date: 2008/07/15 $ ++ * $Change: 1064918 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++#ifndef DWC_DEVICE_ONLY ++#ifndef __DWC_HCD_H__ ++#define __DWC_HCD_H__ ++ ++#include ++#include ++#include <../drivers/usb/core/hcd.h> ++ ++struct lm_device; ++struct dwc_otg_device; ++ ++#include "dwc_otg_cil.h" ++ ++/** ++ * @file ++ * ++ * This file contains the structures, constants, and interfaces for ++ * the Host Contoller Driver (HCD). ++ * ++ * The Host Controller Driver (HCD) is responsible for translating requests ++ * from the USB Driver into the appropriate actions on the DWC_otg controller. ++ * It isolates the USBD from the specifics of the controller by providing an ++ * API to the USBD. ++ */ ++ ++/** ++ * Phases for control transfers. ++ */ ++typedef enum dwc_otg_control_phase { ++ DWC_OTG_CONTROL_SETUP, ++ DWC_OTG_CONTROL_DATA, ++ DWC_OTG_CONTROL_STATUS ++} dwc_otg_control_phase_e; ++ ++/** Transaction types. */ ++typedef enum dwc_otg_transaction_type { ++ DWC_OTG_TRANSACTION_NONE, ++ DWC_OTG_TRANSACTION_PERIODIC, ++ DWC_OTG_TRANSACTION_NON_PERIODIC, ++ DWC_OTG_TRANSACTION_ALL ++} dwc_otg_transaction_type_e; ++ ++/** ++ * A Queue Transfer Descriptor (QTD) holds the state of a bulk, control, ++ * interrupt, or isochronous transfer. A single QTD is created for each URB ++ * (of one of these types) submitted to the HCD. The transfer associated with ++ * a QTD may require one or multiple transactions. ++ * ++ * A QTD is linked to a Queue Head, which is entered in either the ++ * non-periodic or periodic schedule for execution. When a QTD is chosen for ++ * execution, some or all of its transactions may be executed. After ++ * execution, the state of the QTD is updated. The QTD may be retired if all ++ * its transactions are complete or if an error occurred. Otherwise, it ++ * remains in the schedule so more transactions can be executed later. ++ */ ++typedef struct dwc_otg_qtd { ++ /** ++ * Determines the PID of the next data packet for the data phase of ++ * control transfers. Ignored for other transfer types.
++ * One of the following values: ++ * - DWC_OTG_HC_PID_DATA0 ++ * - DWC_OTG_HC_PID_DATA1 ++ */ ++ uint8_t data_toggle; ++ ++ /** Current phase for control transfers (Setup, Data, or Status). */ ++ dwc_otg_control_phase_e control_phase; ++ ++ /** Keep track of the current split type ++ * for FS/LS endpoints on a HS Hub */ ++ uint8_t complete_split; ++ ++ /** How many bytes transferred during SSPLIT OUT */ ++ uint32_t ssplit_out_xfer_count; ++ ++ /** ++ * Holds the number of bus errors that have occurred for a transaction ++ * within this transfer. ++ */ ++ uint8_t error_count; ++ ++ /** ++ * Index of the next frame descriptor for an isochronous transfer. A ++ * frame descriptor describes the buffer position and length of the ++ * data to be transferred in the next scheduled (micro)frame of an ++ * isochronous transfer. It also holds status for that transaction. ++ * The frame index starts at 0. ++ */ ++ int isoc_frame_index; ++ ++ /** Position of the ISOC split on full/low speed */ ++ uint8_t isoc_split_pos; ++ ++ /** Position of the ISOC split in the buffer for the current frame */ ++ uint16_t isoc_split_offset; ++ ++ /** URB for this transfer */ ++ struct urb *urb; ++ ++ /** This list of QTDs */ ++ struct list_head qtd_list_entry; ++ ++} dwc_otg_qtd_t; ++ ++/** ++ * A Queue Head (QH) holds the static characteristics of an endpoint and ++ * maintains a list of transfers (QTDs) for that endpoint. A QH structure may ++ * be entered in either the non-periodic or periodic schedule. ++ */ ++typedef struct dwc_otg_qh { ++ /** ++ * Endpoint type. ++ * One of the following values: ++ * - USB_ENDPOINT_XFER_CONTROL ++ * - USB_ENDPOINT_XFER_ISOC ++ * - USB_ENDPOINT_XFER_BULK ++ * - USB_ENDPOINT_XFER_INT ++ */ ++ uint8_t ep_type; ++ uint8_t ep_is_in; ++ ++ /** wMaxPacketSize Field of Endpoint Descriptor. */ ++ uint16_t maxp; ++ ++ /** ++ * Determines the PID of the next data packet for non-control ++ * transfers. Ignored for control transfers.
++ * One of the following values: ++ * - DWC_OTG_HC_PID_DATA0 ++ * - DWC_OTG_HC_PID_DATA1 ++ */ ++ uint8_t data_toggle; ++ ++ /** Ping state if 1. */ ++ uint8_t ping_state; ++ ++ /** ++ * List of QTDs for this QH. ++ */ ++ struct list_head qtd_list; ++ ++ /** Host channel currently processing transfers for this QH. */ ++ dwc_hc_t *channel; ++ ++ /** QTD currently assigned to a host channel for this QH. */ ++ dwc_otg_qtd_t *qtd_in_process; ++ ++ /** Full/low speed endpoint on high-speed hub requires split. */ ++ uint8_t do_split; ++ ++ /** @name Periodic schedule information */ ++ /** @{ */ ++ ++ /** Bandwidth in microseconds per (micro)frame. */ ++ uint8_t usecs; ++ ++ /** Interval between transfers in (micro)frames. */ ++ uint16_t interval; ++ ++ /** ++ * (micro)frame to initialize a periodic transfer. The transfer ++ * executes in the following (micro)frame. ++ */ ++ uint16_t sched_frame; ++ ++ /** (micro)frame at which last start split was initialized. */ ++ uint16_t start_split_frame; ++ ++ /** @} */ ++ ++ /** Entry for QH in either the periodic or non-periodic schedule. */ ++ struct list_head qh_list_entry; ++} dwc_otg_qh_t; ++ ++/** ++ * This structure holds the state of the HCD, including the non-periodic and ++ * periodic schedules. ++ */ ++typedef struct dwc_otg_hcd { ++ /** The DWC otg device pointer */ ++ struct dwc_otg_device *otg_dev; ++ ++ /** DWC OTG Core Interface Layer */ ++ dwc_otg_core_if_t *core_if; ++ ++ /** Internal DWC HCD Flags */ ++ volatile union dwc_otg_hcd_internal_flags { ++ uint32_t d32; ++ struct { ++ unsigned port_connect_status_change : 1; ++ unsigned port_connect_status : 1; ++ unsigned port_reset_change : 1; ++ unsigned port_enable_change : 1; ++ unsigned port_suspend_change : 1; ++ unsigned port_over_current_change : 1; ++ unsigned reserved : 27; ++ } b; ++ } flags; ++ ++ /** ++ * Inactive items in the non-periodic schedule. This is a list of ++ * Queue Heads. Transfers associated with these Queue Heads are not ++ * currently assigned to a host channel. ++ */ ++ struct list_head non_periodic_sched_inactive; ++ ++ /** ++ * Active items in the non-periodic schedule. This is a list of ++ * Queue Heads. Transfers associated with these Queue Heads are ++ * currently assigned to a host channel. ++ */ ++ struct list_head non_periodic_sched_active; ++ ++ /** ++ * Pointer to the next Queue Head to process in the active ++ * non-periodic schedule. ++ */ ++ struct list_head *non_periodic_qh_ptr; ++ ++ /** ++ * Inactive items in the periodic schedule. This is a list of QHs for ++ * periodic transfers that are _not_ scheduled for the next frame. ++ * Each QH in the list has an interval counter that determines when it ++ * needs to be scheduled for execution. This scheduling mechanism ++ * allows only a simple calculation for periodic bandwidth used (i.e. ++ * must assume that all periodic transfers may need to execute in the ++ * same frame). However, it greatly simplifies scheduling and should ++ * be sufficient for the vast majority of OTG hosts, which need to ++ * connect to a small number of peripherals at one time. ++ * ++ * Items move from this list to periodic_sched_ready when the QH ++ * interval counter is 0 at SOF. ++ */ ++ struct list_head periodic_sched_inactive; ++ ++ /** ++ * List of periodic QHs that are ready for execution in the next ++ * frame, but have not yet been assigned to host channels. ++ * ++ * Items move from this list to periodic_sched_assigned as host ++ * channels become available during the current frame. ++ */ ++ struct list_head periodic_sched_ready; ++ ++ /** ++ * List of periodic QHs to be executed in the next frame that are ++ * assigned to host channels. ++ * ++ * Items move from this list to periodic_sched_queued as the ++ * transactions for the QH are queued to the DWC_otg controller. ++ */ ++ struct list_head periodic_sched_assigned; ++ ++ /** ++ * List of periodic QHs that have been queued for execution. ++ * ++ * Items move from this list to either periodic_sched_inactive or ++ * periodic_sched_ready when the channel associated with the transfer ++ * is released. If the interval for the QH is 1, the item moves to ++ * periodic_sched_ready because it must be rescheduled for the next ++ * frame. Otherwise, the item moves to periodic_sched_inactive. ++ */ ++ struct list_head periodic_sched_queued; ++ ++ /** ++ * Total bandwidth claimed so far for periodic transfers. This value ++ * is in microseconds per (micro)frame. The assumption is that all ++ * periodic transfers may occur in the same (micro)frame. ++ */ ++ uint16_t periodic_usecs; ++ ++ /** ++ * Frame number read from the core at SOF. The value ranges from 0 to ++ * DWC_HFNUM_MAX_FRNUM. ++ */ ++ uint16_t frame_number; ++ ++ /** ++ * Free host channels in the controller. This is a list of ++ * dwc_hc_t items. ++ */ ++ struct list_head free_hc_list; ++ ++ /** ++ * Number of host channels assigned to periodic transfers. Currently ++ * assuming that there is a dedicated host channel for each periodic ++ * transaction and at least one host channel available for ++ * non-periodic transactions. ++ */ ++ int periodic_channels; ++ ++ /** ++ * Number of host channels assigned to non-periodic transfers. ++ */ ++ int non_periodic_channels; ++ ++ /** ++ * Array of pointers to the host channel descriptors. Allows accessing ++ * a host channel descriptor given the host channel number. This is ++ * useful in interrupt handlers. ++ */ ++ dwc_hc_t *hc_ptr_array[MAX_EPS_CHANNELS]; ++ ++ /** ++ * Buffer to use for any data received during the status phase of a ++ * control transfer. Normally no data is transferred during the status ++ * phase. This buffer is used as a bit bucket. ++ */ ++ uint8_t *status_buf; ++ ++ /** ++ * DMA address for status_buf. ++ */ ++ dma_addr_t status_buf_dma; ++#define DWC_OTG_HCD_STATUS_BUF_SIZE 64 ++ ++ /** ++ * Structure to allow starting the HCD in a non-interrupt context ++ * during an OTG role change. ++ */ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ struct work_struct start_work; ++#else ++ struct delayed_work start_work; ++#endif ++ ++ /** ++ * Connection timer. An OTG host must display a message if the device ++ * does not connect. Started when the VBus power is turned on via ++ * sysfs attribute "buspower". ++ */ ++ struct timer_list conn_timer; ++ ++ /* Tasket to do a reset */ ++ struct tasklet_struct *reset_tasklet; ++ ++ /* */ ++ spinlock_t lock; ++ ++#ifdef DEBUG ++ uint32_t frrem_samples; ++ uint64_t frrem_accum; ++ ++ uint32_t hfnum_7_samples_a; ++ uint64_t hfnum_7_frrem_accum_a; ++ uint32_t hfnum_0_samples_a; ++ uint64_t hfnum_0_frrem_accum_a; ++ uint32_t hfnum_other_samples_a; ++ uint64_t hfnum_other_frrem_accum_a; ++ ++ uint32_t hfnum_7_samples_b; ++ uint64_t hfnum_7_frrem_accum_b; ++ uint32_t hfnum_0_samples_b; ++ uint64_t hfnum_0_frrem_accum_b; ++ uint32_t hfnum_other_samples_b; ++ uint64_t hfnum_other_frrem_accum_b; ++#endif ++} dwc_otg_hcd_t; ++ ++/** Gets the dwc_otg_hcd from a struct usb_hcd */ ++static inline dwc_otg_hcd_t *hcd_to_dwc_otg_hcd(struct usb_hcd *hcd) ++{ ++ return (dwc_otg_hcd_t *)(hcd->hcd_priv); ++} ++ ++/** Gets the struct usb_hcd that contains a dwc_otg_hcd_t. */ ++static inline struct usb_hcd *dwc_otg_hcd_to_hcd(dwc_otg_hcd_t *dwc_otg_hcd) ++{ ++ return container_of((void *)dwc_otg_hcd, struct usb_hcd, hcd_priv); ++} ++ ++/** @name HCD Create/Destroy Functions */ ++/** @{ */ ++extern int dwc_otg_hcd_init(struct lm_device *lmdev); ++extern void dwc_otg_hcd_remove(struct lm_device *lmdev); ++/** @} */ ++ ++/** @name Linux HC Driver API Functions */ ++/** @{ */ ++ ++extern int dwc_otg_hcd_start(struct usb_hcd *hcd); ++extern void dwc_otg_hcd_stop(struct usb_hcd *hcd); ++extern int dwc_otg_hcd_get_frame_number(struct usb_hcd *hcd); ++extern void dwc_otg_hcd_free(struct usb_hcd *hcd); ++extern int dwc_otg_hcd_urb_enqueue(struct usb_hcd *hcd, ++ // struct usb_host_endpoint *ep, ++ struct urb *urb, ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ int mem_flags ++#else ++ gfp_t mem_flags ++#endif ++ ); ++extern int dwc_otg_hcd_urb_dequeue(struct usb_hcd *hcd, ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ struct usb_host_endpoint *ep, ++#endif ++ struct urb *urb, int status); ++extern void dwc_otg_hcd_endpoint_disable(struct usb_hcd *hcd, ++ struct usb_host_endpoint *ep); ++extern irqreturn_t dwc_otg_hcd_irq(struct usb_hcd *hcd ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ , struct pt_regs *regs ++#endif ++ ); ++extern int dwc_otg_hcd_hub_status_data(struct usb_hcd *hcd, ++ char *buf); ++extern int dwc_otg_hcd_hub_control(struct usb_hcd *hcd, ++ u16 typeReq, ++ u16 wValue, ++ u16 wIndex, ++ char *buf, ++ u16 wLength); ++ ++/** @} */ ++ ++/** @name Transaction Execution Functions */ ++/** @{ */ ++extern dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t *hcd); ++extern void dwc_otg_hcd_queue_transactions(dwc_otg_hcd_t *hcd, ++ dwc_otg_transaction_type_e tr_type); ++extern void dwc_otg_hcd_complete_urb(dwc_otg_hcd_t *_hcd, struct urb *urb, ++ int status); ++/** @} */ ++ ++/** @name Interrupt Handler Functions */ ++/** @{ */ ++extern int32_t dwc_otg_hcd_handle_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_sof_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_rx_status_q_level_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_np_tx_fifo_empty_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_perio_tx_fifo_empty_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_incomplete_periodic_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_port_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_conn_id_status_change_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_disconnect_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_hc_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_hc_n_intr(dwc_otg_hcd_t *dwc_otg_hcd, uint32_t num); ++extern int32_t dwc_otg_hcd_handle_session_req_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_wakeup_detected_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++/** @} */ ++ ++ ++/** @name Schedule Queue Functions */ ++/** @{ */ ++ ++/* Implemented in dwc_otg_hcd_queue.c */ ++extern dwc_otg_qh_t *dwc_otg_hcd_qh_create(dwc_otg_hcd_t *hcd, struct urb *urb); ++extern void dwc_otg_hcd_qh_init(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, struct urb *urb); ++extern void dwc_otg_hcd_qh_free(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh); ++extern int dwc_otg_hcd_qh_add(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh); ++extern void dwc_otg_hcd_qh_remove(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh); ++extern void dwc_otg_hcd_qh_deactivate(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, int sched_csplit); ++ ++/** Remove and free a QH */ ++static inline void dwc_otg_hcd_qh_remove_and_free(dwc_otg_hcd_t *hcd, ++ dwc_otg_qh_t *qh) ++{ ++ dwc_otg_hcd_qh_remove(hcd, qh); ++ dwc_otg_hcd_qh_free(hcd, qh); ++} ++ ++/** Allocates memory for a QH structure. ++ * @return Returns the memory allocate or NULL on error. */ ++static inline dwc_otg_qh_t *dwc_otg_hcd_qh_alloc(void) ++{ ++ return (dwc_otg_qh_t *) kmalloc(sizeof(dwc_otg_qh_t), GFP_KERNEL); ++} ++ ++extern dwc_otg_qtd_t *dwc_otg_hcd_qtd_create(struct urb *urb); ++extern void dwc_otg_hcd_qtd_init(dwc_otg_qtd_t *qtd, struct urb *urb); ++extern int dwc_otg_hcd_qtd_add(dwc_otg_qtd_t *qtd, dwc_otg_hcd_t *dwc_otg_hcd); ++ ++/** Allocates memory for a QTD structure. ++ * @return Returns the memory allocate or NULL on error. */ ++static inline dwc_otg_qtd_t *dwc_otg_hcd_qtd_alloc(void) ++{ ++ return (dwc_otg_qtd_t *) kmalloc(sizeof(dwc_otg_qtd_t), GFP_KERNEL); ++} ++ ++/** Frees the memory for a QTD structure. QTD should already be removed from ++ * list. ++ * @param[in] qtd QTD to free.*/ ++static inline void dwc_otg_hcd_qtd_free(dwc_otg_qtd_t *qtd) ++{ ++ kfree(qtd); ++} ++ ++/** Removes a QTD from list. ++ * @param[in] hcd HCD instance. ++ * @param[in] qtd QTD to remove from list. */ ++static inline void dwc_otg_hcd_qtd_remove(dwc_otg_hcd_t *hcd, dwc_otg_qtd_t *qtd) ++{ ++ unsigned long flags; ++ SPIN_LOCK_IRQSAVE(&hcd->lock, flags); ++ list_del(&qtd->qtd_list_entry); ++ SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags); ++} ++ ++/** Remove and free a QTD */ ++static inline void dwc_otg_hcd_qtd_remove_and_free(dwc_otg_hcd_t *hcd, dwc_otg_qtd_t *qtd) ++{ ++ dwc_otg_hcd_qtd_remove(hcd, qtd); ++ dwc_otg_hcd_qtd_free(qtd); ++} ++ ++/** @} */ ++ ++ ++/** @name Internal Functions */ ++/** @{ */ ++dwc_otg_qh_t *dwc_urb_to_qh(struct urb *urb); ++void dwc_otg_hcd_dump_frrem(dwc_otg_hcd_t *hcd); ++void dwc_otg_hcd_dump_state(dwc_otg_hcd_t *hcd); ++/** @} */ ++ ++/** Gets the usb_host_endpoint associated with an URB. */ ++static inline struct usb_host_endpoint *dwc_urb_to_endpoint(struct urb *urb) ++{ ++ struct usb_device *dev = urb->dev; ++ int ep_num = usb_pipeendpoint(urb->pipe); ++ ++ if (usb_pipein(urb->pipe)) ++ return dev->ep_in[ep_num]; ++ else ++ return dev->ep_out[ep_num]; ++} ++ ++/** ++ * Gets the endpoint number from a _bEndpointAddress argument. The endpoint is ++ * qualified with its direction (possible 32 endpoints per device). ++ */ ++#define dwc_ep_addr_to_endpoint(_bEndpointAddress_) ((_bEndpointAddress_ & USB_ENDPOINT_NUMBER_MASK) | \ ++ ((_bEndpointAddress_ & USB_DIR_IN) != 0) << 4) ++ ++/** Gets the QH that contains the list_head */ ++#define dwc_list_to_qh(_list_head_ptr_) container_of(_list_head_ptr_, dwc_otg_qh_t, qh_list_entry) ++ ++/** Gets the QTD that contains the list_head */ ++#define dwc_list_to_qtd(_list_head_ptr_) container_of(_list_head_ptr_, dwc_otg_qtd_t, qtd_list_entry) ++ ++/** Check if QH is non-periodic */ ++#define dwc_qh_is_non_per(_qh_ptr_) ((_qh_ptr_->ep_type == USB_ENDPOINT_XFER_BULK) || \ ++ (_qh_ptr_->ep_type == USB_ENDPOINT_XFER_CONTROL)) ++ ++/** High bandwidth multiplier as encoded in highspeed endpoint descriptors */ ++#define dwc_hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03)) ++ ++/** Packet size for any kind of endpoint descriptor */ ++#define dwc_max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff) ++ ++/** ++ * Returns true if _frame1 is less than or equal to _frame2. The comparison is ++ * done modulo DWC_HFNUM_MAX_FRNUM. This accounts for the rollover of the ++ * frame number when the max frame number is reached. ++ */ ++static inline int dwc_frame_num_le(uint16_t frame1, uint16_t frame2) ++{ ++ return ((frame2 - frame1) & DWC_HFNUM_MAX_FRNUM) <= ++ (DWC_HFNUM_MAX_FRNUM >> 1); ++} ++ ++/** ++ * Returns true if _frame1 is greater than _frame2. The comparison is done ++ * modulo DWC_HFNUM_MAX_FRNUM. This accounts for the rollover of the frame ++ * number when the max frame number is reached. ++ */ ++static inline int dwc_frame_num_gt(uint16_t frame1, uint16_t frame2) ++{ ++ return (frame1 != frame2) && ++ (((frame1 - frame2) & DWC_HFNUM_MAX_FRNUM) < ++ (DWC_HFNUM_MAX_FRNUM >> 1)); ++} ++ ++/** ++ * Increments _frame by the amount specified by _inc. The addition is done ++ * modulo DWC_HFNUM_MAX_FRNUM. Returns the incremented value. ++ */ ++static inline uint16_t dwc_frame_num_inc(uint16_t frame, uint16_t inc) ++{ ++ return (frame + inc) & DWC_HFNUM_MAX_FRNUM; ++} ++ ++static inline uint16_t dwc_full_frame_num(uint16_t frame) ++{ ++ return (frame & DWC_HFNUM_MAX_FRNUM) >> 3; ++} ++ ++static inline uint16_t dwc_micro_frame_num(uint16_t frame) ++{ ++ return frame & 0x7; ++} ++ ++#ifdef DEBUG ++/** ++ * Macro to sample the remaining PHY clocks left in the current frame. This ++ * may be used during debugging to determine the average time it takes to ++ * execute sections of code. There are two possible sample points, "a" and ++ * "b", so the _letter argument must be one of these values. ++ * ++ * To dump the average sample times, read the "hcd_frrem" sysfs attribute. For ++ * example, "cat /sys/devices/lm0/hcd_frrem". ++ */ ++#define dwc_sample_frrem(_hcd, _qh, _letter) \ ++{ \ ++ hfnum_data_t hfnum; \ ++ dwc_otg_qtd_t *qtd; \ ++ qtd = list_entry(_qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry); \ ++ if (usb_pipeint(qtd->urb->pipe) && _qh->start_split_frame != 0 && !qtd->complete_split) { \ ++ hfnum.d32 = dwc_read_reg32(&_hcd->core_if->host_if->host_global_regs->hfnum); \ ++ switch (hfnum.b.frnum & 0x7) { \ ++ case 7: \ ++ _hcd->hfnum_7_samples_##_letter++; \ ++ _hcd->hfnum_7_frrem_accum_##_letter += hfnum.b.frrem; \ ++ break; \ ++ case 0: \ ++ _hcd->hfnum_0_samples_##_letter++; \ ++ _hcd->hfnum_0_frrem_accum_##_letter += hfnum.b.frrem; \ ++ break; \ ++ default: \ ++ _hcd->hfnum_other_samples_##_letter++; \ ++ _hcd->hfnum_other_frrem_accum_##_letter += hfnum.b.frrem; \ ++ break; \ ++ } \ ++ } \ ++} ++#else ++#define dwc_sample_frrem(_hcd, _qh, _letter) ++#endif ++#endif ++#endif /* DWC_DEVICE_ONLY */ +--- /dev/null ++++ b/drivers/usb/host/otg/dwc_otg_hcd_intr.c +@@ -0,0 +1,1829 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd_intr.c $ ++ * $Revision: #70 $ ++ * $Date: 2008/10/16 $ ++ * $Change: 1117667 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++#ifndef DWC_DEVICE_ONLY ++ ++#include ++ ++#include "dwc_otg_driver.h" ++#include "dwc_otg_hcd.h" ++#include "dwc_otg_regs.h" ++ ++/** @file ++ * This file contains the implementation of the HCD Interrupt handlers. ++ */ ++ ++/** This function handles interrupts for the HCD. */ ++int32_t dwc_otg_hcd_handle_intr(dwc_otg_hcd_t *dwc_otg_hcd) ++{ ++ int retval = 0; ++ ++ dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if; ++ gintsts_data_t gintsts; ++#ifdef DEBUG ++ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; ++#endif ++ ++ /* Check if HOST Mode */ ++ if (dwc_otg_is_host_mode(core_if)) { ++ gintsts.d32 = dwc_otg_read_core_intr(core_if); ++ if (!gintsts.d32) { ++ return 0; ++ } ++ ++#ifdef DEBUG ++ /* Don't print debug message in the interrupt handler on SOF */ ++# ifndef DEBUG_SOF ++ if (gintsts.d32 != DWC_SOF_INTR_MASK) ++# endif ++ DWC_DEBUGPL(DBG_HCD, "\n"); ++#endif ++ ++#ifdef DEBUG ++# ifndef DEBUG_SOF ++ if (gintsts.d32 != DWC_SOF_INTR_MASK) ++# endif ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n", gintsts.d32); ++#endif ++ if (gintsts.b.usbreset) { ++ DWC_PRINT("Usb Reset In Host Mode\n"); ++ } ++ ++ ++ if (gintsts.b.sofintr) { ++ retval |= dwc_otg_hcd_handle_sof_intr(dwc_otg_hcd); ++ } ++ if (gintsts.b.rxstsqlvl) { ++ retval |= dwc_otg_hcd_handle_rx_status_q_level_intr(dwc_otg_hcd); ++ } ++ if (gintsts.b.nptxfempty) { ++ retval |= dwc_otg_hcd_handle_np_tx_fifo_empty_intr(dwc_otg_hcd); ++ } ++ if (gintsts.b.i2cintr) { ++ /** @todo Implement i2cintr handler. */ ++ } ++ if (gintsts.b.portintr) { ++ retval |= dwc_otg_hcd_handle_port_intr(dwc_otg_hcd); ++ } ++ if (gintsts.b.hcintr) { ++ retval |= dwc_otg_hcd_handle_hc_intr(dwc_otg_hcd); ++ } ++ if (gintsts.b.ptxfempty) { ++ retval |= dwc_otg_hcd_handle_perio_tx_fifo_empty_intr(dwc_otg_hcd); ++ } ++#ifdef DEBUG ++# ifndef DEBUG_SOF ++ if (gintsts.d32 != DWC_SOF_INTR_MASK) ++# endif ++ { ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Finished Servicing Interrupts\n"); ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gintsts=0x%08x\n", ++ dwc_read_reg32(&global_regs->gintsts)); ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gintmsk=0x%08x\n", ++ dwc_read_reg32(&global_regs->gintmsk)); ++ } ++#endif ++ ++#ifdef DEBUG ++# ifndef DEBUG_SOF ++ if (gintsts.d32 != DWC_SOF_INTR_MASK) ++# endif ++ DWC_DEBUGPL(DBG_HCD, "\n"); ++#endif ++ ++ } ++ ++ S3C2410X_CLEAR_EINTPEND(); ++ ++ return retval; ++} ++ ++#ifdef DWC_TRACK_MISSED_SOFS ++#warning Compiling code to track missed SOFs ++#define FRAME_NUM_ARRAY_SIZE 1000 ++/** ++ * This function is for debug only. ++ */ ++static inline void track_missed_sofs(uint16_t curr_frame_number) ++{ ++ static uint16_t frame_num_array[FRAME_NUM_ARRAY_SIZE]; ++ static uint16_t last_frame_num_array[FRAME_NUM_ARRAY_SIZE]; ++ static int frame_num_idx = 0; ++ static uint16_t last_frame_num = DWC_HFNUM_MAX_FRNUM; ++ static int dumped_frame_num_array = 0; ++ ++ if (frame_num_idx < FRAME_NUM_ARRAY_SIZE) { ++ if (((last_frame_num + 1) & DWC_HFNUM_MAX_FRNUM) != curr_frame_number) { ++ frame_num_array[frame_num_idx] = curr_frame_number; ++ last_frame_num_array[frame_num_idx++] = last_frame_num; ++ } ++ } else if (!dumped_frame_num_array) { ++ int i; ++ printk(KERN_EMERG USB_DWC "Frame Last Frame\n"); ++ printk(KERN_EMERG USB_DWC "----- ----------\n"); ++ for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) { ++ printk(KERN_EMERG USB_DWC "0x%04x 0x%04x\n", ++ frame_num_array[i], last_frame_num_array[i]); ++ } ++ dumped_frame_num_array = 1; ++ } ++ last_frame_num = curr_frame_number; ++} ++#endif ++ ++/** ++ * Handles the start-of-frame interrupt in host mode. Non-periodic ++ * transactions may be queued to the DWC_otg controller for the current ++ * (micro)frame. Periodic transactions may be queued to the controller for the ++ * next (micro)frame. ++ */ ++int32_t dwc_otg_hcd_handle_sof_intr(dwc_otg_hcd_t *hcd) ++{ ++ hfnum_data_t hfnum; ++ struct list_head *qh_entry; ++ dwc_otg_qh_t *qh; ++ dwc_otg_transaction_type_e tr_type; ++ gintsts_data_t gintsts = {.d32 = 0}; ++ ++ hfnum.d32 = dwc_read_reg32(&hcd->core_if->host_if->host_global_regs->hfnum); ++ ++#ifdef DEBUG_SOF ++ DWC_DEBUGPL(DBG_HCD, "--Start of Frame Interrupt--\n"); ++#endif ++ hcd->frame_number = hfnum.b.frnum; ++ ++#ifdef DEBUG ++ hcd->frrem_accum += hfnum.b.frrem; ++ hcd->frrem_samples++; ++#endif ++ ++#ifdef DWC_TRACK_MISSED_SOFS ++ track_missed_sofs(hcd->frame_number); ++#endif ++ ++ /* Determine whether any periodic QHs should be executed. */ ++ qh_entry = hcd->periodic_sched_inactive.next; ++ while (qh_entry != &hcd->periodic_sched_inactive) { ++ qh = list_entry(qh_entry, dwc_otg_qh_t, qh_list_entry); ++ qh_entry = qh_entry->next; ++ if (dwc_frame_num_le(qh->sched_frame, hcd->frame_number)) { ++ /* ++ * Move QH to the ready list to be executed next ++ * (micro)frame. ++ */ ++ list_move(&qh->qh_list_entry, &hcd->periodic_sched_ready); ++ } ++ } ++ ++ tr_type = dwc_otg_hcd_select_transactions(hcd); ++ if (tr_type != DWC_OTG_TRANSACTION_NONE) { ++ dwc_otg_hcd_queue_transactions(hcd, tr_type); ++ } ++ ++ /* Clear interrupt */ ++ gintsts.b.sofintr = 1; ++ dwc_write_reg32(&hcd->core_if->core_global_regs->gintsts, gintsts.d32); ++ ++ return 1; ++} ++ ++/** Handles the Rx Status Queue Level Interrupt, which indicates that there is at ++ * least one packet in the Rx FIFO. The packets are moved from the FIFO to ++ * memory if the DWC_otg controller is operating in Slave mode. */ ++int32_t dwc_otg_hcd_handle_rx_status_q_level_intr(dwc_otg_hcd_t *dwc_otg_hcd) ++{ ++ host_grxsts_data_t grxsts; ++ dwc_hc_t *hc = NULL; ++ ++ DWC_DEBUGPL(DBG_HCD, "--RxStsQ Level Interrupt--\n"); ++ ++ grxsts.d32 = dwc_read_reg32(&dwc_otg_hcd->core_if->core_global_regs->grxstsp); ++ ++ hc = dwc_otg_hcd->hc_ptr_array[grxsts.b.chnum]; ++ ++ /* Packet Status */ ++ DWC_DEBUGPL(DBG_HCDV, " Ch num = %d\n", grxsts.b.chnum); ++ DWC_DEBUGPL(DBG_HCDV, " Count = %d\n", grxsts.b.bcnt); ++ DWC_DEBUGPL(DBG_HCDV, " DPID = %d, hc.dpid = %d\n", grxsts.b.dpid, hc->data_pid_start); ++ DWC_DEBUGPL(DBG_HCDV, " PStatus = %d\n", grxsts.b.pktsts); ++ ++ switch (grxsts.b.pktsts) { ++ case DWC_GRXSTS_PKTSTS_IN: ++ /* Read the data into the host buffer. */ ++ if (grxsts.b.bcnt > 0) { ++ dwc_otg_read_packet(dwc_otg_hcd->core_if, ++ hc->xfer_buff, ++ grxsts.b.bcnt); ++ ++ /* Update the HC fields for the next packet received. */ ++ hc->xfer_count += grxsts.b.bcnt; ++ hc->xfer_buff += grxsts.b.bcnt; ++ } ++ ++ case DWC_GRXSTS_PKTSTS_IN_XFER_COMP: ++ case DWC_GRXSTS_PKTSTS_DATA_TOGGLE_ERR: ++ case DWC_GRXSTS_PKTSTS_CH_HALTED: ++ /* Handled in interrupt, just ignore data */ ++ break; ++ default: ++ DWC_ERROR("RX_STS_Q Interrupt: Unknown status %d\n", grxsts.b.pktsts); ++ break; ++ } ++ ++ return 1; ++} ++ ++/** This interrupt occurs when the non-periodic Tx FIFO is half-empty. More ++ * data packets may be written to the FIFO for OUT transfers. More requests ++ * may be written to the non-periodic request queue for IN transfers. This ++ * interrupt is enabled only in Slave mode. */ ++int32_t dwc_otg_hcd_handle_np_tx_fifo_empty_intr(dwc_otg_hcd_t *dwc_otg_hcd) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Non-Periodic TxFIFO Empty Interrupt--\n"); ++ dwc_otg_hcd_queue_transactions(dwc_otg_hcd, ++ DWC_OTG_TRANSACTION_NON_PERIODIC); ++ return 1; ++} ++ ++/** This interrupt occurs when the periodic Tx FIFO is half-empty. More data ++ * packets may be written to the FIFO for OUT transfers. More requests may be ++ * written to the periodic request queue for IN transfers. This interrupt is ++ * enabled only in Slave mode. */ ++int32_t dwc_otg_hcd_handle_perio_tx_fifo_empty_intr(dwc_otg_hcd_t *dwc_otg_hcd) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Periodic TxFIFO Empty Interrupt--\n"); ++ dwc_otg_hcd_queue_transactions(dwc_otg_hcd, ++ DWC_OTG_TRANSACTION_PERIODIC); ++ return 1; ++} ++ ++/** There are multiple conditions that can cause a port interrupt. This function ++ * determines which interrupt conditions have occurred and handles them ++ * appropriately. */ ++int32_t dwc_otg_hcd_handle_port_intr(dwc_otg_hcd_t *dwc_otg_hcd) ++{ ++ int retval = 0; ++ hprt0_data_t hprt0; ++ hprt0_data_t hprt0_modify; ++ ++ hprt0.d32 = dwc_read_reg32(dwc_otg_hcd->core_if->host_if->hprt0); ++ hprt0_modify.d32 = dwc_read_reg32(dwc_otg_hcd->core_if->host_if->hprt0); ++ ++ /* Clear appropriate bits in HPRT0 to clear the interrupt bit in ++ * GINTSTS */ ++ ++ hprt0_modify.b.prtena = 0; ++ hprt0_modify.b.prtconndet = 0; ++ hprt0_modify.b.prtenchng = 0; ++ hprt0_modify.b.prtovrcurrchng = 0; ++ ++ /* Port Connect Detected ++ * Set flag and clear if detected */ ++ if (hprt0.b.prtconndet) { ++ DWC_DEBUGPL(DBG_HCD, "--Port Interrupt HPRT0=0x%08x " ++ "Port Connect Detected--\n", hprt0.d32); ++ dwc_otg_hcd->flags.b.port_connect_status_change = 1; ++ dwc_otg_hcd->flags.b.port_connect_status = 1; ++ hprt0_modify.b.prtconndet = 1; ++ ++ /* B-Device has connected, Delete the connection timer. */ ++ del_timer( &dwc_otg_hcd->conn_timer ); ++ ++ /* The Hub driver asserts a reset when it sees port connect ++ * status change flag */ ++ retval |= 1; ++ } ++ ++ /* Port Enable Changed ++ * Clear if detected - Set internal flag if disabled */ ++ if (hprt0.b.prtenchng) { ++ DWC_DEBUGPL(DBG_HCD, " --Port Interrupt HPRT0=0x%08x " ++ "Port Enable Changed--\n", hprt0.d32); ++ hprt0_modify.b.prtenchng = 1; ++ if (hprt0.b.prtena == 1) { ++ int do_reset = 0; ++ dwc_otg_core_params_t *params = dwc_otg_hcd->core_if->core_params; ++ dwc_otg_core_global_regs_t *global_regs = dwc_otg_hcd->core_if->core_global_regs; ++ dwc_otg_host_if_t *host_if = dwc_otg_hcd->core_if->host_if; ++ ++ /* Check if we need to adjust the PHY clock speed for ++ * low power and adjust it */ ++ if (params->host_support_fs_ls_low_power) { ++ gusbcfg_data_t usbcfg; ++ ++ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); ++ ++ if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED || ++ hprt0.b.prtspd == DWC_HPRT0_PRTSPD_FULL_SPEED) { ++ /* ++ * Low power ++ */ ++ hcfg_data_t hcfg; ++ if (usbcfg.b.phylpwrclksel == 0) { ++ /* Set PHY low power clock select for FS/LS devices */ ++ usbcfg.b.phylpwrclksel = 1; ++ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32); ++ do_reset = 1; ++ } ++ ++ hcfg.d32 = dwc_read_reg32(&host_if->host_global_regs->hcfg); ++ ++ if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED && ++ params->host_ls_low_power_phy_clk == ++ DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ) { ++ /* 6 MHZ */ ++ DWC_DEBUGPL(DBG_CIL, "FS_PHY programming HCFG to 6 MHz (Low Power)\n"); ++ if (hcfg.b.fslspclksel != DWC_HCFG_6_MHZ) { ++ hcfg.b.fslspclksel = DWC_HCFG_6_MHZ; ++ dwc_write_reg32(&host_if->host_global_regs->hcfg, ++ hcfg.d32); ++ do_reset = 1; ++ } ++ } else { ++ /* 48 MHZ */ ++ DWC_DEBUGPL(DBG_CIL, "FS_PHY programming HCFG to 48 MHz ()\n"); ++ if (hcfg.b.fslspclksel != DWC_HCFG_48_MHZ) { ++ hcfg.b.fslspclksel = DWC_HCFG_48_MHZ; ++ dwc_write_reg32(&host_if->host_global_regs->hcfg, ++ hcfg.d32); ++ do_reset = 1; ++ } ++ } ++ } else { ++ /* ++ * Not low power ++ */ ++ if (usbcfg.b.phylpwrclksel == 1) { ++ usbcfg.b.phylpwrclksel = 0; ++ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32); ++ do_reset = 1; ++ } ++ } ++ ++ if (do_reset) { ++ tasklet_schedule(dwc_otg_hcd->reset_tasklet); ++ } ++ } ++ ++ if (!do_reset) { ++ /* Port has been enabled set the reset change flag */ ++ dwc_otg_hcd->flags.b.port_reset_change = 1; ++ } ++ } else { ++ dwc_otg_hcd->flags.b.port_enable_change = 1; ++ } ++ retval |= 1; ++ } ++ ++ /** Overcurrent Change Interrupt */ ++ if (hprt0.b.prtovrcurrchng) { ++ DWC_DEBUGPL(DBG_HCD, " --Port Interrupt HPRT0=0x%08x " ++ "Port Overcurrent Changed--\n", hprt0.d32); ++ dwc_otg_hcd->flags.b.port_over_current_change = 1; ++ hprt0_modify.b.prtovrcurrchng = 1; ++ retval |= 1; ++ } ++ ++ /* Clear Port Interrupts */ ++ dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0_modify.d32); ++ ++ return retval; ++} ++ ++/** This interrupt indicates that one or more host channels has a pending ++ * interrupt. There are multiple conditions that can cause each host channel ++ * interrupt. This function determines which conditions have occurred for each ++ * host channel interrupt and handles them appropriately. */ ++int32_t dwc_otg_hcd_handle_hc_intr(dwc_otg_hcd_t *dwc_otg_hcd) ++{ ++ int i; ++ int retval = 0; ++ haint_data_t haint; ++ ++ /* Clear appropriate bits in HCINTn to clear the interrupt bit in ++ * GINTSTS */ ++ ++ haint.d32 = dwc_otg_read_host_all_channels_intr(dwc_otg_hcd->core_if); ++ ++ for (i = 0; i < dwc_otg_hcd->core_if->core_params->host_channels; i++) { ++ if (haint.b2.chint & (1 << i)) { ++ retval |= dwc_otg_hcd_handle_hc_n_intr(dwc_otg_hcd, i); ++ } ++ } ++ ++ return retval; ++} ++ ++/* Macro used to clear one channel interrupt */ ++#define clear_hc_int(_hc_regs_, _intr_) \ ++do { \ ++ hcint_data_t hcint_clear = {.d32 = 0}; \ ++ hcint_clear.b._intr_ = 1; \ ++ dwc_write_reg32(&(_hc_regs_)->hcint, hcint_clear.d32); \ ++} while (0) ++ ++/* ++ * Macro used to disable one channel interrupt. Channel interrupts are ++ * disabled when the channel is halted or released by the interrupt handler. ++ * There is no need to handle further interrupts of that type until the ++ * channel is re-assigned. In fact, subsequent handling may cause crashes ++ * because the channel structures are cleaned up when the channel is released. ++ */ ++#define disable_hc_int(_hc_regs_, _intr_) \ ++do { \ ++ hcintmsk_data_t hcintmsk = {.d32 = 0}; \ ++ hcintmsk.b._intr_ = 1; \ ++ dwc_modify_reg32(&(_hc_regs_)->hcintmsk, hcintmsk.d32, 0); \ ++} while (0) ++ ++/** ++ * Gets the actual length of a transfer after the transfer halts. _halt_status ++ * holds the reason for the halt. ++ * ++ * For IN transfers where halt_status is DWC_OTG_HC_XFER_COMPLETE, ++ * *short_read is set to 1 upon return if less than the requested ++ * number of bytes were transferred. Otherwise, *short_read is set to 0 upon ++ * return. short_read may also be NULL on entry, in which case it remains ++ * unchanged. ++ */ ++static uint32_t get_actual_xfer_length(dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd, ++ dwc_otg_halt_status_e halt_status, ++ int *short_read) ++{ ++ hctsiz_data_t hctsiz; ++ uint32_t length; ++ ++ if (short_read != NULL) { ++ *short_read = 0; ++ } ++ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); ++ ++ if (halt_status == DWC_OTG_HC_XFER_COMPLETE) { ++ if (hc->ep_is_in) { ++ length = hc->xfer_len - hctsiz.b.xfersize; ++ if (short_read != NULL) { ++ *short_read = (hctsiz.b.xfersize != 0); ++ } ++ } else if (hc->qh->do_split) { ++ length = qtd->ssplit_out_xfer_count; ++ } else { ++ length = hc->xfer_len; ++ } ++ } else { ++ /* ++ * Must use the hctsiz.pktcnt field to determine how much data ++ * has been transferred. This field reflects the number of ++ * packets that have been transferred via the USB. This is ++ * always an integral number of packets if the transfer was ++ * halted before its normal completion. (Can't use the ++ * hctsiz.xfersize field because that reflects the number of ++ * bytes transferred via the AHB, not the USB). ++ */ ++ length = (hc->start_pkt_count - hctsiz.b.pktcnt) * hc->max_packet; ++ } ++ ++ return length; ++} ++ ++/** ++ * Updates the state of the URB after a Transfer Complete interrupt on the ++ * host channel. Updates the actual_length field of the URB based on the ++ * number of bytes transferred via the host channel. Sets the URB status ++ * if the data transfer is finished. ++ * ++ * @return 1 if the data transfer specified by the URB is completely finished, ++ * 0 otherwise. ++ */ ++static int update_urb_state_xfer_comp(dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ struct urb *urb, ++ dwc_otg_qtd_t *qtd) ++{ ++ int xfer_done = 0; ++ int short_read = 0; ++ ++ urb->actual_length += get_actual_xfer_length(hc, hc_regs, qtd, ++ DWC_OTG_HC_XFER_COMPLETE, ++ &short_read); ++ ++ if (short_read || urb->actual_length == urb->transfer_buffer_length) { ++ xfer_done = 1; ++ if (short_read && (urb->transfer_flags & URB_SHORT_NOT_OK)) { ++ urb->status = -EREMOTEIO; ++ } else { ++ urb->status = 0; ++ } ++ } ++ ++#ifdef DEBUG ++ { ++ hctsiz_data_t hctsiz; ++ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); ++ DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, channel %d\n", ++ __func__, (hc->ep_is_in ? "IN" : "OUT"), hc->hc_num); ++ DWC_DEBUGPL(DBG_HCDV, " hc->xfer_len %d\n", hc->xfer_len); ++ DWC_DEBUGPL(DBG_HCDV, " hctsiz.xfersize %d\n", hctsiz.b.xfersize); ++ DWC_DEBUGPL(DBG_HCDV, " urb->transfer_buffer_length %d\n", ++ urb->transfer_buffer_length); ++ DWC_DEBUGPL(DBG_HCDV, " urb->actual_length %d\n", urb->actual_length); ++ DWC_DEBUGPL(DBG_HCDV, " short_read %d, xfer_done %d\n", ++ short_read, xfer_done); ++ } ++#endif ++ ++ return xfer_done; ++} ++ ++/* ++ * Save the starting data toggle for the next transfer. The data toggle is ++ * saved in the QH for non-control transfers and it's saved in the QTD for ++ * control transfers. ++ */ ++static void save_data_toggle(dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ hctsiz_data_t hctsiz; ++ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); ++ ++ if (hc->ep_type != DWC_OTG_EP_TYPE_CONTROL) { ++ dwc_otg_qh_t *qh = hc->qh; ++ if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) { ++ qh->data_toggle = DWC_OTG_HC_PID_DATA0; ++ } else { ++ qh->data_toggle = DWC_OTG_HC_PID_DATA1; ++ } ++ } else { ++ if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) { ++ qtd->data_toggle = DWC_OTG_HC_PID_DATA0; ++ } else { ++ qtd->data_toggle = DWC_OTG_HC_PID_DATA1; ++ } ++ } ++} ++ ++/** ++ * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic ++ * QHs, removes the QH from the active non-periodic schedule. If any QTDs are ++ * still linked to the QH, the QH is added to the end of the inactive ++ * non-periodic schedule. For periodic QHs, removes the QH from the periodic ++ * schedule if no more QTDs are linked to the QH. ++ */ ++static void deactivate_qh(dwc_otg_hcd_t *hcd, ++ dwc_otg_qh_t *qh, ++ int free_qtd) ++{ ++ int continue_split = 0; ++ dwc_otg_qtd_t *qtd; ++ ++ DWC_DEBUGPL(DBG_HCDV, " %s(%p,%p,%d)\n", __func__, hcd, qh, free_qtd); ++ ++ qtd = list_entry(qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry); ++ ++ if (qtd->complete_split) { ++ continue_split = 1; ++ } else if (qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_MID || ++ qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_END) { ++ continue_split = 1; ++ } ++ ++ if (free_qtd) { ++ dwc_otg_hcd_qtd_remove_and_free(hcd, qtd); ++ continue_split = 0; ++ } ++ ++ qh->channel = NULL; ++ qh->qtd_in_process = NULL; ++ dwc_otg_hcd_qh_deactivate(hcd, qh, continue_split); ++} ++ ++/** ++ * Updates the state of an Isochronous URB when the transfer is stopped for ++ * any reason. The fields of the current entry in the frame descriptor array ++ * are set based on the transfer state and the input _halt_status. Completes ++ * the Isochronous URB if all the URB frames have been completed. ++ * ++ * @return DWC_OTG_HC_XFER_COMPLETE if there are more frames remaining to be ++ * transferred in the URB. Otherwise return DWC_OTG_HC_XFER_URB_COMPLETE. ++ */ ++static dwc_otg_halt_status_e ++update_isoc_urb_state(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd, ++ dwc_otg_halt_status_e halt_status) ++{ ++ struct urb *urb = qtd->urb; ++ dwc_otg_halt_status_e ret_val = halt_status; ++ struct usb_iso_packet_descriptor *frame_desc; ++ ++ frame_desc = &urb->iso_frame_desc[qtd->isoc_frame_index]; ++ switch (halt_status) { ++ case DWC_OTG_HC_XFER_COMPLETE: ++ frame_desc->status = 0; ++ frame_desc->actual_length = ++ get_actual_xfer_length(hc, hc_regs, qtd, ++ halt_status, NULL); ++ break; ++ case DWC_OTG_HC_XFER_FRAME_OVERRUN: ++ urb->error_count++; ++ if (hc->ep_is_in) { ++ frame_desc->status = -ENOSR; ++ } else { ++ frame_desc->status = -ECOMM; ++ } ++ frame_desc->actual_length = 0; ++ break; ++ case DWC_OTG_HC_XFER_BABBLE_ERR: ++ urb->error_count++; ++ frame_desc->status = -EOVERFLOW; ++ /* Don't need to update actual_length in this case. */ ++ break; ++ case DWC_OTG_HC_XFER_XACT_ERR: ++ urb->error_count++; ++ frame_desc->status = -EPROTO; ++ frame_desc->actual_length = ++ get_actual_xfer_length(hc, hc_regs, qtd, ++ halt_status, NULL); ++ default: ++ DWC_ERROR("%s: Unhandled _halt_status (%d)\n", __func__, ++ halt_status); ++ BUG(); ++ break; ++ } ++ ++ if (++qtd->isoc_frame_index == urb->number_of_packets) { ++ /* ++ * urb->status is not used for isoc transfers. ++ * The individual frame_desc statuses are used instead. ++ */ ++ dwc_otg_hcd_complete_urb(hcd, urb, 0); ++ ret_val = DWC_OTG_HC_XFER_URB_COMPLETE; ++ } else { ++ ret_val = DWC_OTG_HC_XFER_COMPLETE; ++ } ++ ++ return ret_val; ++} ++ ++/** ++ * Releases a host channel for use by other transfers. Attempts to select and ++ * queue more transactions since at least one host channel is available. ++ * ++ * @param hcd The HCD state structure. ++ * @param hc The host channel to release. ++ * @param qtd The QTD associated with the host channel. This QTD may be freed ++ * if the transfer is complete or an error has occurred. ++ * @param halt_status Reason the channel is being released. This status ++ * determines the actions taken by this function. ++ */ ++static void release_channel(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_qtd_t *qtd, ++ dwc_otg_halt_status_e halt_status) ++{ ++ dwc_otg_transaction_type_e tr_type; ++ int free_qtd; ++ ++ DWC_DEBUGPL(DBG_HCDV, " %s: channel %d, halt_status %d\n", ++ __func__, hc->hc_num, halt_status); ++ ++ switch (halt_status) { ++ case DWC_OTG_HC_XFER_URB_COMPLETE: ++ free_qtd = 1; ++ break; ++ case DWC_OTG_HC_XFER_AHB_ERR: ++ case DWC_OTG_HC_XFER_STALL: ++ case DWC_OTG_HC_XFER_BABBLE_ERR: ++ free_qtd = 1; ++ break; ++ case DWC_OTG_HC_XFER_XACT_ERR: ++ if (qtd->error_count >= 3) { ++ DWC_DEBUGPL(DBG_HCDV, " Complete URB with transaction error\n"); ++ free_qtd = 1; ++ qtd->urb->status = -EPROTO; ++ dwc_otg_hcd_complete_urb(hcd, qtd->urb, -EPROTO); ++ } else { ++ free_qtd = 0; ++ } ++ break; ++ case DWC_OTG_HC_XFER_URB_DEQUEUE: ++ /* ++ * The QTD has already been removed and the QH has been ++ * deactivated. Don't want to do anything except release the ++ * host channel and try to queue more transfers. ++ */ ++ goto cleanup; ++ case DWC_OTG_HC_XFER_NO_HALT_STATUS: ++ DWC_ERROR("%s: No halt_status, channel %d\n", __func__, hc->hc_num); ++ free_qtd = 0; ++ break; ++ default: ++ free_qtd = 0; ++ break; ++ } ++ ++ deactivate_qh(hcd, hc->qh, free_qtd); ++ ++ cleanup: ++ /* ++ * Release the host channel for use by other transfers. The cleanup ++ * function clears the channel interrupt enables and conditions, so ++ * there's no need to clear the Channel Halted interrupt separately. ++ */ ++ dwc_otg_hc_cleanup(hcd->core_if, hc); ++ list_add_tail(&hc->hc_list_entry, &hcd->free_hc_list); ++ ++ switch (hc->ep_type) { ++ case DWC_OTG_EP_TYPE_CONTROL: ++ case DWC_OTG_EP_TYPE_BULK: ++ hcd->non_periodic_channels--; ++ break; ++ ++ default: ++ /* ++ * Don't release reservations for periodic channels here. ++ * That's done when a periodic transfer is descheduled (i.e. ++ * when the QH is removed from the periodic schedule). ++ */ ++ break; ++ } ++ ++ /* Try to queue more transfers now that there's a free channel. */ ++ tr_type = dwc_otg_hcd_select_transactions(hcd); ++ if (tr_type != DWC_OTG_TRANSACTION_NONE) { ++ dwc_otg_hcd_queue_transactions(hcd, tr_type); ++ } ++} ++ ++/** ++ * Halts a host channel. If the channel cannot be halted immediately because ++ * the request queue is full, this function ensures that the FIFO empty ++ * interrupt for the appropriate queue is enabled so that the halt request can ++ * be queued when there is space in the request queue. ++ * ++ * This function may also be called in DMA mode. In that case, the channel is ++ * simply released since the core always halts the channel automatically in ++ * DMA mode. ++ */ ++static void halt_channel(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_qtd_t *qtd, ++ dwc_otg_halt_status_e halt_status) ++{ ++ if (hcd->core_if->dma_enable) { ++ release_channel(hcd, hc, qtd, halt_status); ++ return; ++ } ++ ++ /* Slave mode processing... */ ++ dwc_otg_hc_halt(hcd->core_if, hc, halt_status); ++ ++ if (hc->halt_on_queue) { ++ gintmsk_data_t gintmsk = {.d32 = 0}; ++ dwc_otg_core_global_regs_t *global_regs; ++ global_regs = hcd->core_if->core_global_regs; ++ ++ if (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL || ++ hc->ep_type == DWC_OTG_EP_TYPE_BULK) { ++ /* ++ * Make sure the Non-periodic Tx FIFO empty interrupt ++ * is enabled so that the non-periodic schedule will ++ * be processed. ++ */ ++ gintmsk.b.nptxfempty = 1; ++ dwc_modify_reg32(&global_regs->gintmsk, 0, gintmsk.d32); ++ } else { ++ /* ++ * Move the QH from the periodic queued schedule to ++ * the periodic assigned schedule. This allows the ++ * halt to be queued when the periodic schedule is ++ * processed. ++ */ ++ list_move(&hc->qh->qh_list_entry, ++ &hcd->periodic_sched_assigned); ++ ++ /* ++ * Make sure the Periodic Tx FIFO Empty interrupt is ++ * enabled so that the periodic schedule will be ++ * processed. ++ */ ++ gintmsk.b.ptxfempty = 1; ++ dwc_modify_reg32(&global_regs->gintmsk, 0, gintmsk.d32); ++ } ++ } ++} ++ ++/** ++ * Performs common cleanup for non-periodic transfers after a Transfer ++ * Complete interrupt. This function should be called after any endpoint type ++ * specific handling is finished to release the host channel. ++ */ ++static void complete_non_periodic_xfer(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd, ++ dwc_otg_halt_status_e halt_status) ++{ ++ hcint_data_t hcint; ++ ++ qtd->error_count = 0; ++ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ if (hcint.b.nyet) { ++ /* ++ * Got a NYET on the last transaction of the transfer. This ++ * means that the endpoint should be in the PING state at the ++ * beginning of the next transfer. ++ */ ++ hc->qh->ping_state = 1; ++ clear_hc_int(hc_regs, nyet); ++ } ++ ++ /* ++ * Always halt and release the host channel to make it available for ++ * more transfers. There may still be more phases for a control ++ * transfer or more data packets for a bulk transfer at this point, ++ * but the host channel is still halted. A channel will be reassigned ++ * to the transfer when the non-periodic schedule is processed after ++ * the channel is released. This allows transactions to be queued ++ * properly via dwc_otg_hcd_queue_transactions, which also enables the ++ * Tx FIFO Empty interrupt if necessary. ++ */ ++ if (hc->ep_is_in) { ++ /* ++ * IN transfers in Slave mode require an explicit disable to ++ * halt the channel. (In DMA mode, this call simply releases ++ * the channel.) ++ */ ++ halt_channel(hcd, hc, qtd, halt_status); ++ } else { ++ /* ++ * The channel is automatically disabled by the core for OUT ++ * transfers in Slave mode. ++ */ ++ release_channel(hcd, hc, qtd, halt_status); ++ } ++} ++ ++/** ++ * Performs common cleanup for periodic transfers after a Transfer Complete ++ * interrupt. This function should be called after any endpoint type specific ++ * handling is finished to release the host channel. ++ */ ++static void complete_periodic_xfer(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd, ++ dwc_otg_halt_status_e halt_status) ++{ ++ hctsiz_data_t hctsiz; ++ qtd->error_count = 0; ++ ++ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); ++ if (!hc->ep_is_in || hctsiz.b.pktcnt == 0) { ++ /* Core halts channel in these cases. */ ++ release_channel(hcd, hc, qtd, halt_status); ++ } else { ++ /* Flush any outstanding requests from the Tx queue. */ ++ halt_channel(hcd, hc, qtd, halt_status); ++ } ++} ++ ++/** ++ * Handles a host channel Transfer Complete interrupt. This handler may be ++ * called in either DMA mode or Slave mode. ++ */ ++static int32_t handle_hc_xfercomp_intr(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ int urb_xfer_done; ++ dwc_otg_halt_status_e halt_status = DWC_OTG_HC_XFER_COMPLETE; ++ struct urb *urb = qtd->urb; ++ int pipe_type = usb_pipetype(urb->pipe); ++ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "Transfer Complete--\n", hc->hc_num); ++ ++ /* ++ * Handle xfer complete on CSPLIT. ++ */ ++ if (hc->qh->do_split) { ++ qtd->complete_split = 0; ++ } ++ ++ /* Update the QTD and URB states. */ ++ switch (pipe_type) { ++ case PIPE_CONTROL: ++ switch (qtd->control_phase) { ++ case DWC_OTG_CONTROL_SETUP: ++ if (urb->transfer_buffer_length > 0) { ++ qtd->control_phase = DWC_OTG_CONTROL_DATA; ++ } else { ++ qtd->control_phase = DWC_OTG_CONTROL_STATUS; ++ } ++ DWC_DEBUGPL(DBG_HCDV, " Control setup transaction done\n"); ++ halt_status = DWC_OTG_HC_XFER_COMPLETE; ++ break; ++ case DWC_OTG_CONTROL_DATA: { ++ urb_xfer_done = update_urb_state_xfer_comp(hc, hc_regs, urb, qtd); ++ if (urb_xfer_done) { ++ qtd->control_phase = DWC_OTG_CONTROL_STATUS; ++ DWC_DEBUGPL(DBG_HCDV, " Control data transfer done\n"); ++ } else { ++ save_data_toggle(hc, hc_regs, qtd); ++ } ++ halt_status = DWC_OTG_HC_XFER_COMPLETE; ++ break; ++ } ++ case DWC_OTG_CONTROL_STATUS: ++ DWC_DEBUGPL(DBG_HCDV, " Control transfer complete\n"); ++ if (urb->status == -EINPROGRESS) { ++ urb->status = 0; ++ } ++ dwc_otg_hcd_complete_urb(hcd, urb, urb->status); ++ halt_status = DWC_OTG_HC_XFER_URB_COMPLETE; ++ break; ++ } ++ ++ complete_non_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status); ++ break; ++ case PIPE_BULK: ++ DWC_DEBUGPL(DBG_HCDV, " Bulk transfer complete\n"); ++ urb_xfer_done = update_urb_state_xfer_comp(hc, hc_regs, urb, qtd); ++ if (urb_xfer_done) { ++ dwc_otg_hcd_complete_urb(hcd, urb, urb->status); ++ halt_status = DWC_OTG_HC_XFER_URB_COMPLETE; ++ } else { ++ halt_status = DWC_OTG_HC_XFER_COMPLETE; ++ } ++ ++ save_data_toggle(hc, hc_regs, qtd); ++ complete_non_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status); ++ break; ++ case PIPE_INTERRUPT: ++ DWC_DEBUGPL(DBG_HCDV, " Interrupt transfer complete\n"); ++ update_urb_state_xfer_comp(hc, hc_regs, urb, qtd); ++ ++ /* ++ * Interrupt URB is done on the first transfer complete ++ * interrupt. ++ */ ++ dwc_otg_hcd_complete_urb(hcd, urb, urb->status); ++ save_data_toggle(hc, hc_regs, qtd); ++ complete_periodic_xfer(hcd, hc, hc_regs, qtd, ++ DWC_OTG_HC_XFER_URB_COMPLETE); ++ break; ++ case PIPE_ISOCHRONOUS: ++ DWC_DEBUGPL(DBG_HCDV, " Isochronous transfer complete\n"); ++ if (qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_ALL) { ++ halt_status = update_isoc_urb_state(hcd, hc, hc_regs, qtd, ++ DWC_OTG_HC_XFER_COMPLETE); ++ } ++ complete_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status); ++ break; ++ } ++ ++ disable_hc_int(hc_regs, xfercompl); ++ ++ return 1; ++} ++ ++/** ++ * Handles a host channel STALL interrupt. This handler may be called in ++ * either DMA mode or Slave mode. ++ */ ++static int32_t handle_hc_stall_intr(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ struct urb *urb = qtd->urb; ++ int pipe_type = usb_pipetype(urb->pipe); ++ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "STALL Received--\n", hc->hc_num); ++ ++ if (pipe_type == PIPE_CONTROL) { ++ dwc_otg_hcd_complete_urb(hcd, urb, -EPIPE); ++ } ++ ++ if (pipe_type == PIPE_BULK || pipe_type == PIPE_INTERRUPT) { ++ dwc_otg_hcd_complete_urb(hcd, urb, -EPIPE); ++ /* ++ * USB protocol requires resetting the data toggle for bulk ++ * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT) ++ * setup command is issued to the endpoint. Anticipate the ++ * CLEAR_FEATURE command since a STALL has occurred and reset ++ * the data toggle now. ++ */ ++ hc->qh->data_toggle = 0; ++ } ++ ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_STALL); ++ ++ disable_hc_int(hc_regs, stall); ++ ++ return 1; ++} ++ ++/* ++ * Updates the state of the URB when a transfer has been stopped due to an ++ * abnormal condition before the transfer completes. Modifies the ++ * actual_length field of the URB to reflect the number of bytes that have ++ * actually been transferred via the host channel. ++ */ ++static void update_urb_state_xfer_intr(dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ struct urb *urb, ++ dwc_otg_qtd_t *qtd, ++ dwc_otg_halt_status_e halt_status) ++{ ++ uint32_t bytes_transferred = get_actual_xfer_length(hc, hc_regs, qtd, ++ halt_status, NULL); ++ urb->actual_length += bytes_transferred; ++ ++#ifdef DEBUG ++ { ++ hctsiz_data_t hctsiz; ++ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); ++ DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, channel %d\n", ++ __func__, (hc->ep_is_in ? "IN" : "OUT"), hc->hc_num); ++ DWC_DEBUGPL(DBG_HCDV, " hc->start_pkt_count %d\n", hc->start_pkt_count); ++ DWC_DEBUGPL(DBG_HCDV, " hctsiz.pktcnt %d\n", hctsiz.b.pktcnt); ++ DWC_DEBUGPL(DBG_HCDV, " hc->max_packet %d\n", hc->max_packet); ++ DWC_DEBUGPL(DBG_HCDV, " bytes_transferred %d\n", bytes_transferred); ++ DWC_DEBUGPL(DBG_HCDV, " urb->actual_length %d\n", urb->actual_length); ++ DWC_DEBUGPL(DBG_HCDV, " urb->transfer_buffer_length %d\n", ++ urb->transfer_buffer_length); ++ } ++#endif ++} ++ ++/** ++ * Handles a host channel NAK interrupt. This handler may be called in either ++ * DMA mode or Slave mode. ++ */ ++static int32_t handle_hc_nak_intr(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "NAK Received--\n", hc->hc_num); ++ ++ /* ++ * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and ++ * interrupt. Re-start the SSPLIT transfer. ++ */ ++ if (hc->do_split) { ++ if (hc->complete_split) { ++ qtd->error_count = 0; ++ } ++ qtd->complete_split = 0; ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NAK); ++ goto handle_nak_done; ++ } ++ ++ switch (usb_pipetype(qtd->urb->pipe)) { ++ case PIPE_CONTROL: ++ case PIPE_BULK: ++ if (hcd->core_if->dma_enable && hc->ep_is_in) { ++ /* ++ * NAK interrupts are enabled on bulk/control IN ++ * transfers in DMA mode for the sole purpose of ++ * resetting the error count after a transaction error ++ * occurs. The core will continue transferring data. ++ */ ++ qtd->error_count = 0; ++ goto handle_nak_done; ++ } ++ ++ /* ++ * NAK interrupts normally occur during OUT transfers in DMA ++ * or Slave mode. For IN transfers, more requests will be ++ * queued as request queue space is available. ++ */ ++ qtd->error_count = 0; ++ ++ if (!hc->qh->ping_state) { ++ update_urb_state_xfer_intr(hc, hc_regs, qtd->urb, ++ qtd, DWC_OTG_HC_XFER_NAK); ++ save_data_toggle(hc, hc_regs, qtd); ++ if (qtd->urb->dev->speed == USB_SPEED_HIGH) { ++ hc->qh->ping_state = 1; ++ } ++ } ++ ++ /* ++ * Halt the channel so the transfer can be re-started from ++ * the appropriate point or the PING protocol will ++ * start/continue. ++ */ ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NAK); ++ break; ++ case PIPE_INTERRUPT: ++ qtd->error_count = 0; ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NAK); ++ break; ++ case PIPE_ISOCHRONOUS: ++ /* Should never get called for isochronous transfers. */ ++ BUG(); ++ break; ++ } ++ ++ handle_nak_done: ++ disable_hc_int(hc_regs, nak); ++ ++ return 1; ++} ++ ++/** ++ * Handles a host channel ACK interrupt. This interrupt is enabled when ++ * performing the PING protocol in Slave mode, when errors occur during ++ * either Slave mode or DMA mode, and during Start Split transactions. ++ */ ++static int32_t handle_hc_ack_intr(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "ACK Received--\n", hc->hc_num); ++ ++ if (hc->do_split) { ++ /* ++ * Handle ACK on SSPLIT. ++ * ACK should not occur in CSPLIT. ++ */ ++ if (!hc->ep_is_in && hc->data_pid_start != DWC_OTG_HC_PID_SETUP) { ++ qtd->ssplit_out_xfer_count = hc->xfer_len; ++ } ++ if (!(hc->ep_type == DWC_OTG_EP_TYPE_ISOC && !hc->ep_is_in)) { ++ /* Don't need complete for isochronous out transfers. */ ++ qtd->complete_split = 1; ++ } ++ ++ /* ISOC OUT */ ++ if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC && !hc->ep_is_in) { ++ switch (hc->xact_pos) { ++ case DWC_HCSPLIT_XACTPOS_ALL: ++ break; ++ case DWC_HCSPLIT_XACTPOS_END: ++ qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_ALL; ++ qtd->isoc_split_offset = 0; ++ break; ++ case DWC_HCSPLIT_XACTPOS_BEGIN: ++ case DWC_HCSPLIT_XACTPOS_MID: ++ /* ++ * For BEGIN or MID, calculate the length for ++ * the next microframe to determine the correct ++ * SSPLIT token, either MID or END. ++ */ ++ { ++ struct usb_iso_packet_descriptor *frame_desc; ++ ++ frame_desc = &qtd->urb->iso_frame_desc[qtd->isoc_frame_index]; ++ qtd->isoc_split_offset += 188; ++ ++ if ((frame_desc->length - qtd->isoc_split_offset) <= 188) { ++ qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_END; ++ } else { ++ qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_MID; ++ } ++ ++ } ++ break; ++ } ++ } else { ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_ACK); ++ } ++ } else { ++ qtd->error_count = 0; ++ ++ if (hc->qh->ping_state) { ++ hc->qh->ping_state = 0; ++ /* ++ * Halt the channel so the transfer can be re-started ++ * from the appropriate point. This only happens in ++ * Slave mode. In DMA mode, the ping_state is cleared ++ * when the transfer is started because the core ++ * automatically executes the PING, then the transfer. ++ */ ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_ACK); ++ } ++ } ++ ++ /* ++ * If the ACK occurred when _not_ in the PING state, let the channel ++ * continue transferring data after clearing the error count. ++ */ ++ ++ disable_hc_int(hc_regs, ack); ++ ++ return 1; ++} ++ ++/** ++ * Handles a host channel NYET interrupt. This interrupt should only occur on ++ * Bulk and Control OUT endpoints and for complete split transactions. If a ++ * NYET occurs at the same time as a Transfer Complete interrupt, it is ++ * handled in the xfercomp interrupt handler, not here. This handler may be ++ * called in either DMA mode or Slave mode. ++ */ ++static int32_t handle_hc_nyet_intr(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "NYET Received--\n", hc->hc_num); ++ ++ /* ++ * NYET on CSPLIT ++ * re-do the CSPLIT immediately on non-periodic ++ */ ++ if (hc->do_split && hc->complete_split) { ++ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || ++ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { ++ int frnum = dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(hcd)); ++ ++ if (dwc_full_frame_num(frnum) != ++ dwc_full_frame_num(hc->qh->sched_frame)) { ++ /* ++ * No longer in the same full speed frame. ++ * Treat this as a transaction error. ++ */ ++#if 0 ++ /** @todo Fix system performance so this can ++ * be treated as an error. Right now complete ++ * splits cannot be scheduled precisely enough ++ * due to other system activity, so this error ++ * occurs regularly in Slave mode. ++ */ ++ qtd->error_count++; ++#endif ++ qtd->complete_split = 0; ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR); ++ /** @todo add support for isoc release */ ++ goto handle_nyet_done; ++ } ++ } ++ ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NYET); ++ goto handle_nyet_done; ++ } ++ ++ hc->qh->ping_state = 1; ++ qtd->error_count = 0; ++ ++ update_urb_state_xfer_intr(hc, hc_regs, qtd->urb, qtd, ++ DWC_OTG_HC_XFER_NYET); ++ save_data_toggle(hc, hc_regs, qtd); ++ ++ /* ++ * Halt the channel and re-start the transfer so the PING ++ * protocol will start. ++ */ ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NYET); ++ ++handle_nyet_done: ++ disable_hc_int(hc_regs, nyet); ++ return 1; ++} ++ ++/** ++ * Handles a host channel babble interrupt. This handler may be called in ++ * either DMA mode or Slave mode. ++ */ ++static int32_t handle_hc_babble_intr(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "Babble Error--\n", hc->hc_num); ++ if (hc->ep_type != DWC_OTG_EP_TYPE_ISOC) { ++ dwc_otg_hcd_complete_urb(hcd, qtd->urb, -EOVERFLOW); ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_BABBLE_ERR); ++ } else { ++ dwc_otg_halt_status_e halt_status; ++ halt_status = update_isoc_urb_state(hcd, hc, hc_regs, qtd, ++ DWC_OTG_HC_XFER_BABBLE_ERR); ++ halt_channel(hcd, hc, qtd, halt_status); ++ } ++ disable_hc_int(hc_regs, bblerr); ++ return 1; ++} ++ ++/** ++ * Handles a host channel AHB error interrupt. This handler is only called in ++ * DMA mode. ++ */ ++static int32_t handle_hc_ahberr_intr(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ hcchar_data_t hcchar; ++ hcsplt_data_t hcsplt; ++ hctsiz_data_t hctsiz; ++ uint32_t hcdma; ++ struct urb *urb = qtd->urb; ++ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "AHB Error--\n", hc->hc_num); ++ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcsplt.d32 = dwc_read_reg32(&hc_regs->hcsplt); ++ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); ++ hcdma = dwc_read_reg32(&hc_regs->hcdma); ++ ++ DWC_ERROR("AHB ERROR, Channel %d\n", hc->hc_num); ++ DWC_ERROR(" hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.d32, hcsplt.d32); ++ DWC_ERROR(" hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d32, hcdma); ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Enqueue\n"); ++ DWC_ERROR(" Device address: %d\n", usb_pipedevice(urb->pipe)); ++ DWC_ERROR(" Endpoint: %d, %s\n", usb_pipeendpoint(urb->pipe), ++ (usb_pipein(urb->pipe) ? "IN" : "OUT")); ++ DWC_ERROR(" Endpoint type: %s\n", ++ ({char *pipetype; ++ switch (usb_pipetype(urb->pipe)) { ++ case PIPE_CONTROL: pipetype = "CONTROL"; break; ++ case PIPE_BULK: pipetype = "BULK"; break; ++ case PIPE_INTERRUPT: pipetype = "INTERRUPT"; break; ++ case PIPE_ISOCHRONOUS: pipetype = "ISOCHRONOUS"; break; ++ default: pipetype = "UNKNOWN"; break; ++ }; pipetype;})); ++ DWC_ERROR(" Speed: %s\n", ++ ({char *speed; ++ switch (urb->dev->speed) { ++ case USB_SPEED_HIGH: speed = "HIGH"; break; ++ case USB_SPEED_FULL: speed = "FULL"; break; ++ case USB_SPEED_LOW: speed = "LOW"; break; ++ default: speed = "UNKNOWN"; break; ++ }; speed;})); ++ DWC_ERROR(" Max packet size: %d\n", ++ usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))); ++ DWC_ERROR(" Data buffer length: %d\n", urb->transfer_buffer_length); ++ DWC_ERROR(" Transfer buffer: %p, Transfer DMA: %p\n", ++ urb->transfer_buffer, (void *)urb->transfer_dma); ++ DWC_ERROR(" Setup buffer: %p, Setup DMA: %p\n", ++ urb->setup_packet, (void *)urb->setup_dma); ++ DWC_ERROR(" Interval: %d\n", urb->interval); ++ ++ dwc_otg_hcd_complete_urb(hcd, urb, -EIO); ++ ++ /* ++ * Force a channel halt. Don't call halt_channel because that won't ++ * write to the HCCHARn register in DMA mode to force the halt. ++ */ ++ dwc_otg_hc_halt(hcd->core_if, hc, DWC_OTG_HC_XFER_AHB_ERR); ++ ++ disable_hc_int(hc_regs, ahberr); ++ return 1; ++} ++ ++/** ++ * Handles a host channel transaction error interrupt. This handler may be ++ * called in either DMA mode or Slave mode. ++ */ ++static int32_t handle_hc_xacterr_intr(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "Transaction Error--\n", hc->hc_num); ++ ++ switch (usb_pipetype(qtd->urb->pipe)) { ++ case PIPE_CONTROL: ++ case PIPE_BULK: ++ qtd->error_count++; ++ if (!hc->qh->ping_state) { ++ update_urb_state_xfer_intr(hc, hc_regs, qtd->urb, ++ qtd, DWC_OTG_HC_XFER_XACT_ERR); ++ save_data_toggle(hc, hc_regs, qtd); ++ if (!hc->ep_is_in && qtd->urb->dev->speed == USB_SPEED_HIGH) { ++ hc->qh->ping_state = 1; ++ } ++ } ++ ++ /* ++ * Halt the channel so the transfer can be re-started from ++ * the appropriate point or the PING protocol will start. ++ */ ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR); ++ break; ++ case PIPE_INTERRUPT: ++ qtd->error_count++; ++ if (hc->do_split && hc->complete_split) { ++ qtd->complete_split = 0; ++ } ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR); ++ break; ++ case PIPE_ISOCHRONOUS: ++ { ++ dwc_otg_halt_status_e halt_status; ++ halt_status = update_isoc_urb_state(hcd, hc, hc_regs, qtd, ++ DWC_OTG_HC_XFER_XACT_ERR); ++ ++ halt_channel(hcd, hc, qtd, halt_status); ++ } ++ break; ++ } ++ ++ disable_hc_int(hc_regs, xacterr); ++ ++ return 1; ++} ++ ++/** ++ * Handles a host channel frame overrun interrupt. This handler may be called ++ * in either DMA mode or Slave mode. ++ */ ++static int32_t handle_hc_frmovrun_intr(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "Frame Overrun--\n", hc->hc_num); ++ ++ switch (usb_pipetype(qtd->urb->pipe)) { ++ case PIPE_CONTROL: ++ case PIPE_BULK: ++ break; ++ case PIPE_INTERRUPT: ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_FRAME_OVERRUN); ++ break; ++ case PIPE_ISOCHRONOUS: ++ { ++ dwc_otg_halt_status_e halt_status; ++ halt_status = update_isoc_urb_state(hcd, hc, hc_regs, qtd, ++ DWC_OTG_HC_XFER_FRAME_OVERRUN); ++ ++ halt_channel(hcd, hc, qtd, halt_status); ++ } ++ break; ++ } ++ ++ disable_hc_int(hc_regs, frmovrun); ++ ++ return 1; ++} ++ ++/** ++ * Handles a host channel data toggle error interrupt. This handler may be ++ * called in either DMA mode or Slave mode. ++ */ ++static int32_t handle_hc_datatglerr_intr(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "Data Toggle Error--\n", hc->hc_num); ++ ++ if (hc->ep_is_in) { ++ qtd->error_count = 0; ++ } else { ++ DWC_ERROR("Data Toggle Error on OUT transfer," ++ "channel %d\n", hc->hc_num); ++ } ++ ++ disable_hc_int(hc_regs, datatglerr); ++ ++ return 1; ++} ++ ++#ifdef DEBUG ++/** ++ * This function is for debug only. It checks that a valid halt status is set ++ * and that HCCHARn.chdis is clear. If there's a problem, corrective action is ++ * taken and a warning is issued. ++ * @return 1 if halt status is ok, 0 otherwise. ++ */ ++static inline int halt_status_ok(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ hcchar_data_t hcchar; ++ hctsiz_data_t hctsiz; ++ hcint_data_t hcint; ++ hcintmsk_data_t hcintmsk; ++ hcsplt_data_t hcsplt; ++ ++ if (hc->halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS) { ++ /* ++ * This code is here only as a check. This condition should ++ * never happen. Ignore the halt if it does occur. ++ */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ hcintmsk.d32 = dwc_read_reg32(&hc_regs->hcintmsk); ++ hcsplt.d32 = dwc_read_reg32(&hc_regs->hcsplt); ++ DWC_WARN("%s: hc->halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS, " ++ "channel %d, hcchar 0x%08x, hctsiz 0x%08x, " ++ "hcint 0x%08x, hcintmsk 0x%08x, " ++ "hcsplt 0x%08x, qtd->complete_split %d\n", ++ __func__, hc->hc_num, hcchar.d32, hctsiz.d32, ++ hcint.d32, hcintmsk.d32, ++ hcsplt.d32, qtd->complete_split); ++ ++ DWC_WARN("%s: no halt status, channel %d, ignoring interrupt\n", ++ __func__, hc->hc_num); ++ DWC_WARN("\n"); ++ clear_hc_int(hc_regs, chhltd); ++ return 0; ++ } ++ ++ /* ++ * This code is here only as a check. hcchar.chdis should ++ * never be set when the halt interrupt occurs. Halt the ++ * channel again if it does occur. ++ */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ if (hcchar.b.chdis) { ++ DWC_WARN("%s: hcchar.chdis set unexpectedly, " ++ "hcchar 0x%08x, trying to halt again\n", ++ __func__, hcchar.d32); ++ clear_hc_int(hc_regs, chhltd); ++ hc->halt_pending = 0; ++ halt_channel(hcd, hc, qtd, hc->halt_status); ++ return 0; ++ } ++ ++ return 1; ++} ++#endif ++ ++/** ++ * Handles a host Channel Halted interrupt in DMA mode. This handler ++ * determines the reason the channel halted and proceeds accordingly. ++ */ ++static void handle_hc_chhltd_intr_dma(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ hcint_data_t hcint; ++ hcintmsk_data_t hcintmsk; ++ int out_nak_enh = 0; ++ ++ /* For core with OUT NAK enhancement, the flow for high- ++ * speed CONTROL/BULK OUT is handled a little differently. ++ */ ++ if (hcd->core_if->snpsid >= 0x4F54271A) { ++ if (hc->speed == DWC_OTG_EP_SPEED_HIGH && !hc->ep_is_in && ++ (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL || ++ hc->ep_type == DWC_OTG_EP_TYPE_BULK)) { ++ DWC_DEBUGPL(DBG_HCD, "OUT NAK enhancement enabled\n"); ++ out_nak_enh = 1; ++ } else { ++ DWC_DEBUGPL(DBG_HCD, "OUT NAK enhancement disabled, not HS Ctrl/Bulk OUT EP\n"); ++ } ++ } else { ++ DWC_DEBUGPL(DBG_HCD, "OUT NAK enhancement disabled, no core support\n"); ++ } ++ ++ if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE || ++ hc->halt_status == DWC_OTG_HC_XFER_AHB_ERR) { ++ /* ++ * Just release the channel. A dequeue can happen on a ++ * transfer timeout. In the case of an AHB Error, the channel ++ * was forced to halt because there's no way to gracefully ++ * recover. ++ */ ++ release_channel(hcd, hc, qtd, hc->halt_status); ++ return; ++ } ++ ++ /* Read the HCINTn register to determine the cause for the halt. */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ hcintmsk.d32 = dwc_read_reg32(&hc_regs->hcintmsk); ++ ++ if (hcint.b.xfercomp) { ++ /** @todo This is here because of a possible hardware bug. Spec ++ * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT ++ * interrupt w/ACK bit set should occur, but I only see the ++ * XFERCOMP bit, even with it masked out. This is a workaround ++ * for that behavior. Should fix this when hardware is fixed. ++ */ ++ if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC && !hc->ep_is_in) { ++ handle_hc_ack_intr(hcd, hc, hc_regs, qtd); ++ } ++ handle_hc_xfercomp_intr(hcd, hc, hc_regs, qtd); ++ } else if (hcint.b.stall) { ++ handle_hc_stall_intr(hcd, hc, hc_regs, qtd); ++ } else if (hcint.b.xacterr) { ++ if (out_nak_enh) { ++ if (hcint.b.nyet || hcint.b.nak || hcint.b.ack) { ++ printk(KERN_DEBUG "XactErr with NYET/NAK/ACK\n"); ++ qtd->error_count = 0; ++ } else { ++ printk(KERN_DEBUG "XactErr without NYET/NAK/ACK\n"); ++ } ++ } ++ ++ /* ++ * Must handle xacterr before nak or ack. Could get a xacterr ++ * at the same time as either of these on a BULK/CONTROL OUT ++ * that started with a PING. The xacterr takes precedence. ++ */ ++ handle_hc_xacterr_intr(hcd, hc, hc_regs, qtd); ++ } else if (!out_nak_enh) { ++ if (hcint.b.nyet) { ++ /* ++ * Must handle nyet before nak or ack. Could get a nyet at the ++ * same time as either of those on a BULK/CONTROL OUT that ++ * started with a PING. The nyet takes precedence. ++ */ ++ handle_hc_nyet_intr(hcd, hc, hc_regs, qtd); ++ } else if (hcint.b.bblerr) { ++ handle_hc_babble_intr(hcd, hc, hc_regs, qtd); ++ } else if (hcint.b.frmovrun) { ++ handle_hc_frmovrun_intr(hcd, hc, hc_regs, qtd); ++ } else if (hcint.b.nak && !hcintmsk.b.nak) { ++ /* ++ * If nak is not masked, it's because a non-split IN transfer ++ * is in an error state. In that case, the nak is handled by ++ * the nak interrupt handler, not here. Handle nak here for ++ * BULK/CONTROL OUT transfers, which halt on a NAK to allow ++ * rewinding the buffer pointer. ++ */ ++ handle_hc_nak_intr(hcd, hc, hc_regs, qtd); ++ } else if (hcint.b.ack && !hcintmsk.b.ack) { ++ /* ++ * If ack is not masked, it's because a non-split IN transfer ++ * is in an error state. In that case, the ack is handled by ++ * the ack interrupt handler, not here. Handle ack here for ++ * split transfers. Start splits halt on ACK. ++ */ ++ handle_hc_ack_intr(hcd, hc, hc_regs, qtd); ++ } else { ++ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || ++ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { ++ /* ++ * A periodic transfer halted with no other channel ++ * interrupts set. Assume it was halted by the core ++ * because it could not be completed in its scheduled ++ * (micro)frame. ++ */ ++#ifdef DEBUG ++ DWC_PRINT("%s: Halt channel %d (assume incomplete periodic transfer)\n", ++ __func__, hc->hc_num); ++#endif ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_PERIODIC_INCOMPLETE); ++ } else { ++ DWC_ERROR("%s: Channel %d, DMA Mode -- ChHltd set, but reason " ++ "for halting is unknown, hcint 0x%08x, intsts 0x%08x\n", ++ __func__, hc->hc_num, hcint.d32, ++ dwc_read_reg32(&hcd->core_if->core_global_regs->gintsts)); ++ } ++ } ++ } else { ++ printk(KERN_DEBUG "NYET/NAK/ACK/other in non-error case, 0x%08x\n", hcint.d32); ++ } ++} ++ ++/** ++ * Handles a host channel Channel Halted interrupt. ++ * ++ * In slave mode, this handler is called only when the driver specifically ++ * requests a halt. This occurs during handling other host channel interrupts ++ * (e.g. nak, xacterr, stall, nyet, etc.). ++ * ++ * In DMA mode, this is the interrupt that occurs when the core has finished ++ * processing a transfer on a channel. Other host channel interrupts (except ++ * ahberr) are disabled in DMA mode. ++ */ ++static int32_t handle_hc_chhltd_intr(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "Channel Halted--\n", hc->hc_num); ++ ++ if (hcd->core_if->dma_enable) { ++ handle_hc_chhltd_intr_dma(hcd, hc, hc_regs, qtd); ++ } else { ++#ifdef DEBUG ++ if (!halt_status_ok(hcd, hc, hc_regs, qtd)) { ++ return 1; ++ } ++#endif ++ release_channel(hcd, hc, qtd, hc->halt_status); ++ } ++ ++ return 1; ++} ++ ++/** Handles interrupt for a specific Host Channel */ ++int32_t dwc_otg_hcd_handle_hc_n_intr(dwc_otg_hcd_t *dwc_otg_hcd, uint32_t num) ++{ ++ int retval = 0; ++ hcint_data_t hcint; ++ hcintmsk_data_t hcintmsk; ++ dwc_hc_t *hc; ++ dwc_otg_hc_regs_t *hc_regs; ++ dwc_otg_qtd_t *qtd; ++ ++ DWC_DEBUGPL(DBG_HCDV, "--Host Channel Interrupt--, Channel %d\n", num); ++ ++ hc = dwc_otg_hcd->hc_ptr_array[num]; ++ hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[num]; ++ qtd = list_entry(hc->qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry); ++ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ hcintmsk.d32 = dwc_read_reg32(&hc_regs->hcintmsk); ++ DWC_DEBUGPL(DBG_HCDV, " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n", ++ hcint.d32, hcintmsk.d32, (hcint.d32 & hcintmsk.d32)); ++ hcint.d32 = hcint.d32 & hcintmsk.d32; ++ ++ if (!dwc_otg_hcd->core_if->dma_enable) { ++ if (hcint.b.chhltd && hcint.d32 != 0x2) { ++ hcint.b.chhltd = 0; ++ } ++ } ++ ++ if (hcint.b.xfercomp) { ++ retval |= handle_hc_xfercomp_intr(dwc_otg_hcd, hc, hc_regs, qtd); ++ /* ++ * If NYET occurred at same time as Xfer Complete, the NYET is ++ * handled by the Xfer Complete interrupt handler. Don't want ++ * to call the NYET interrupt handler in this case. ++ */ ++ hcint.b.nyet = 0; ++ } ++ if (hcint.b.chhltd) { ++ retval |= handle_hc_chhltd_intr(dwc_otg_hcd, hc, hc_regs, qtd); ++ } ++ if (hcint.b.ahberr) { ++ retval |= handle_hc_ahberr_intr(dwc_otg_hcd, hc, hc_regs, qtd); ++ } ++ if (hcint.b.stall) { ++ retval |= handle_hc_stall_intr(dwc_otg_hcd, hc, hc_regs, qtd); ++ } ++ if (hcint.b.nak) { ++ retval |= handle_hc_nak_intr(dwc_otg_hcd, hc, hc_regs, qtd); ++ } ++ if (hcint.b.ack) { ++ retval |= handle_hc_ack_intr(dwc_otg_hcd, hc, hc_regs, qtd); ++ } ++ if (hcint.b.nyet) { ++ retval |= handle_hc_nyet_intr(dwc_otg_hcd, hc, hc_regs, qtd); ++ } ++ if (hcint.b.xacterr) { ++ retval |= handle_hc_xacterr_intr(dwc_otg_hcd, hc, hc_regs, qtd); ++ } ++ if (hcint.b.bblerr) { ++ retval |= handle_hc_babble_intr(dwc_otg_hcd, hc, hc_regs, qtd); ++ } ++ if (hcint.b.frmovrun) { ++ retval |= handle_hc_frmovrun_intr(dwc_otg_hcd, hc, hc_regs, qtd); ++ } ++ if (hcint.b.datatglerr) { ++ retval |= handle_hc_datatglerr_intr(dwc_otg_hcd, hc, hc_regs, qtd); ++ } ++ ++ return retval; ++} ++ ++#endif /* DWC_DEVICE_ONLY */ +--- /dev/null ++++ b/drivers/usb/host/otg/dwc_otg_hcd_queue.c +@@ -0,0 +1,716 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd_queue.c $ ++ * $Revision: #33 $ ++ * $Date: 2008/07/15 $ ++ * $Change: 1064918 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++#ifndef DWC_DEVICE_ONLY ++ ++/** ++ * @file ++ * ++ * This file contains the functions to manage Queue Heads and Queue ++ * Transfer Descriptors. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include "dwc_otg_driver.h" ++#include "dwc_otg_hcd.h" ++#include "dwc_otg_regs.h" ++ ++/** ++ * This function allocates and initializes a QH. ++ * ++ * @param hcd The HCD state structure for the DWC OTG controller. ++ * @param[in] urb Holds the information about the device/endpoint that we need ++ * to initialize the QH. ++ * ++ * @return Returns pointer to the newly allocated QH, or NULL on error. */ ++dwc_otg_qh_t *dwc_otg_hcd_qh_create (dwc_otg_hcd_t *hcd, struct urb *urb) ++{ ++ dwc_otg_qh_t *qh; ++ ++ /* Allocate memory */ ++ /** @todo add memflags argument */ ++ qh = dwc_otg_hcd_qh_alloc (); ++ if (qh == NULL) { ++ return NULL; ++ } ++ ++ dwc_otg_hcd_qh_init (hcd, qh, urb); ++ return qh; ++} ++ ++/** Free each QTD in the QH's QTD-list then free the QH. QH should already be ++ * removed from a list. QTD list should already be empty if called from URB ++ * Dequeue. ++ * ++ * @param[in] hcd HCD instance. ++ * @param[in] qh The QH to free. ++ */ ++void dwc_otg_hcd_qh_free (dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) ++{ ++ dwc_otg_qtd_t *qtd; ++ struct list_head *pos; ++ //unsigned long flags; ++ ++ /* Free each QTD in the QTD list */ ++ ++#if CONFIG_SMP ++ //the spinlock is locked before this function get called, ++ //but in case the lock is needed, the check function is preserved ++ ++ //but in non-SMP mode, all spinlock is lockable. ++ //don't do the test in non-SMP mode ++ ++ if(spin_trylock(&hcd->lock)) { ++ printk("%s: It is not supposed to be lockable!!\n",__func__); ++ BUG(); ++ } ++#endif ++// SPIN_LOCK_IRQSAVE(&hcd->lock, flags) ++ for (pos = qh->qtd_list.next; ++ pos != &qh->qtd_list; ++ pos = qh->qtd_list.next) ++ { ++ list_del (pos); ++ qtd = dwc_list_to_qtd (pos); ++ dwc_otg_hcd_qtd_free (qtd); ++ } ++// SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags) ++ ++ kfree (qh); ++ return; ++} ++ ++/** Initializes a QH structure. ++ * ++ * @param[in] hcd The HCD state structure for the DWC OTG controller. ++ * @param[in] qh The QH to init. ++ * @param[in] urb Holds the information about the device/endpoint that we need ++ * to initialize the QH. */ ++#define SCHEDULE_SLOP 10 ++void dwc_otg_hcd_qh_init(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, struct urb *urb) ++{ ++ char *speed, *type; ++ memset (qh, 0, sizeof (dwc_otg_qh_t)); ++ ++ /* Initialize QH */ ++ switch (usb_pipetype(urb->pipe)) { ++ case PIPE_CONTROL: ++ qh->ep_type = USB_ENDPOINT_XFER_CONTROL; ++ break; ++ case PIPE_BULK: ++ qh->ep_type = USB_ENDPOINT_XFER_BULK; ++ break; ++ case PIPE_ISOCHRONOUS: ++ qh->ep_type = USB_ENDPOINT_XFER_ISOC; ++ break; ++ case PIPE_INTERRUPT: ++ qh->ep_type = USB_ENDPOINT_XFER_INT; ++ break; ++ } ++ ++ qh->ep_is_in = usb_pipein(urb->pipe) ? 1 : 0; ++ ++ qh->data_toggle = DWC_OTG_HC_PID_DATA0; ++ qh->maxp = usb_maxpacket(urb->dev, urb->pipe, !(usb_pipein(urb->pipe))); ++ INIT_LIST_HEAD(&qh->qtd_list); ++ INIT_LIST_HEAD(&qh->qh_list_entry); ++ qh->channel = NULL; ++ ++ /* FS/LS Enpoint on HS Hub ++ * NOT virtual root hub */ ++ qh->do_split = 0; ++ if (((urb->dev->speed == USB_SPEED_LOW) || ++ (urb->dev->speed == USB_SPEED_FULL)) && ++ (urb->dev->tt) && (urb->dev->tt->hub) && (urb->dev->tt->hub->devnum != 1)) ++ { ++ DWC_DEBUGPL(DBG_HCD, "QH init: EP %d: TT found at hub addr %d, for port %d\n", ++ usb_pipeendpoint(urb->pipe), urb->dev->tt->hub->devnum, ++ urb->dev->ttport); ++ qh->do_split = 1; ++ } ++ ++ if (qh->ep_type == USB_ENDPOINT_XFER_INT || ++ qh->ep_type == USB_ENDPOINT_XFER_ISOC) { ++ /* Compute scheduling parameters once and save them. */ ++ hprt0_data_t hprt; ++ ++ /** @todo Account for split transfers in the bus time. */ ++ int bytecount = dwc_hb_mult(qh->maxp) * dwc_max_packet(qh->maxp); ++ qh->usecs = usb_calc_bus_time(urb->dev->speed, ++ usb_pipein(urb->pipe), ++ (qh->ep_type == USB_ENDPOINT_XFER_ISOC), ++ bytecount); ++ ++ /* Start in a slightly future (micro)frame. */ ++ qh->sched_frame = dwc_frame_num_inc(hcd->frame_number, ++ SCHEDULE_SLOP); ++ qh->interval = urb->interval; ++#if 0 ++ /* Increase interrupt polling rate for debugging. */ ++ if (qh->ep_type == USB_ENDPOINT_XFER_INT) { ++ qh->interval = 8; ++ } ++#endif ++ hprt.d32 = dwc_read_reg32(hcd->core_if->host_if->hprt0); ++ if ((hprt.b.prtspd == DWC_HPRT0_PRTSPD_HIGH_SPEED) && ++ ((urb->dev->speed == USB_SPEED_LOW) || ++ (urb->dev->speed == USB_SPEED_FULL))) { ++ qh->interval *= 8; ++ qh->sched_frame |= 0x7; ++ qh->start_split_frame = qh->sched_frame; ++ } ++ ++ } ++ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD QH Initialized\n"); ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - qh = %p\n", qh); ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Device Address = %d\n", ++ urb->dev->devnum); ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Endpoint %d, %s\n", ++ usb_pipeendpoint(urb->pipe), ++ usb_pipein(urb->pipe) == USB_DIR_IN ? "IN" : "OUT"); ++ ++ switch(urb->dev->speed) { ++ case USB_SPEED_LOW: ++ speed = "low"; ++ break; ++ case USB_SPEED_FULL: ++ speed = "full"; ++ break; ++ case USB_SPEED_HIGH: ++ speed = "high"; ++ break; ++ default: ++ speed = "?"; ++ break; ++ } ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Speed = %s\n", speed); ++ ++ switch (qh->ep_type) { ++ case USB_ENDPOINT_XFER_ISOC: ++ type = "isochronous"; ++ break; ++ case USB_ENDPOINT_XFER_INT: ++ type = "interrupt"; ++ break; ++ case USB_ENDPOINT_XFER_CONTROL: ++ type = "control"; ++ break; ++ case USB_ENDPOINT_XFER_BULK: ++ type = "bulk"; ++ break; ++ default: ++ type = "?"; ++ break; ++ } ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Type = %s\n",type); ++ ++#ifdef DEBUG ++ if (qh->ep_type == USB_ENDPOINT_XFER_INT) { ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - usecs = %d\n", ++ qh->usecs); ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - interval = %d\n", ++ qh->interval); ++ } ++#endif ++ ++ return; ++} ++ ++/** ++ * Checks that a channel is available for a periodic transfer. ++ * ++ * @return 0 if successful, negative error code otherise. ++ */ ++static int periodic_channel_available(dwc_otg_hcd_t *hcd) ++{ ++ /* ++ * Currently assuming that there is a dedicated host channnel for each ++ * periodic transaction plus at least one host channel for ++ * non-periodic transactions. ++ */ ++ int status; ++ int num_channels; ++ ++ num_channels = hcd->core_if->core_params->host_channels; ++ if ((hcd->periodic_channels + hcd->non_periodic_channels < num_channels) && ++ (hcd->periodic_channels < num_channels - 1)) { ++ status = 0; ++ } ++ else { ++ DWC_NOTICE("%s: Total channels: %d, Periodic: %d, Non-periodic: %d\n", ++ __func__, num_channels, hcd->periodic_channels, ++ hcd->non_periodic_channels); ++ status = -ENOSPC; ++ } ++ ++ return status; ++} ++ ++/** ++ * Checks that there is sufficient bandwidth for the specified QH in the ++ * periodic schedule. For simplicity, this calculation assumes that all the ++ * transfers in the periodic schedule may occur in the same (micro)frame. ++ * ++ * @param hcd The HCD state structure for the DWC OTG controller. ++ * @param qh QH containing periodic bandwidth required. ++ * ++ * @return 0 if successful, negative error code otherwise. ++ */ ++static int check_periodic_bandwidth(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) ++{ ++ int status; ++ uint16_t max_claimed_usecs; ++ ++ status = 0; ++ ++ if (hcd->core_if->core_params->speed == DWC_SPEED_PARAM_HIGH) { ++ /* ++ * High speed mode. ++ * Max periodic usecs is 80% x 125 usec = 100 usec. ++ */ ++ max_claimed_usecs = 100 - qh->usecs; ++ } else { ++ /* ++ * Full speed mode. ++ * Max periodic usecs is 90% x 1000 usec = 900 usec. ++ */ ++ max_claimed_usecs = 900 - qh->usecs; ++ } ++ ++ if (hcd->periodic_usecs > max_claimed_usecs) { ++ DWC_NOTICE("%s: already claimed usecs %d, required usecs %d\n", ++ __func__, hcd->periodic_usecs, qh->usecs); ++ status = -ENOSPC; ++ } ++ ++ return status; ++} ++ ++/** ++ * Checks that the max transfer size allowed in a host channel is large enough ++ * to handle the maximum data transfer in a single (micro)frame for a periodic ++ * transfer. ++ * ++ * @param hcd The HCD state structure for the DWC OTG controller. ++ * @param qh QH for a periodic endpoint. ++ * ++ * @return 0 if successful, negative error code otherwise. ++ */ ++static int check_max_xfer_size(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) ++{ ++ int status; ++ uint32_t max_xfer_size; ++ uint32_t max_channel_xfer_size; ++ ++ status = 0; ++ ++ max_xfer_size = dwc_max_packet(qh->maxp) * dwc_hb_mult(qh->maxp); ++ max_channel_xfer_size = hcd->core_if->core_params->max_transfer_size; ++ ++ if (max_xfer_size > max_channel_xfer_size) { ++ DWC_NOTICE("%s: Periodic xfer length %d > " ++ "max xfer length for channel %d\n", ++ __func__, max_xfer_size, max_channel_xfer_size); ++ status = -ENOSPC; ++ } ++ ++ return status; ++} ++ ++/** ++ * Schedules an interrupt or isochronous transfer in the periodic schedule. ++ * ++ * @param hcd The HCD state structure for the DWC OTG controller. ++ * @param qh QH for the periodic transfer. The QH should already contain the ++ * scheduling information. ++ * ++ * @return 0 if successful, negative error code otherwise. ++ */ ++static int schedule_periodic(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) ++{ ++ int status = 0; ++ ++ status = periodic_channel_available(hcd); ++ if (status) { ++ DWC_NOTICE("%s: No host channel available for periodic " ++ "transfer.\n", __func__); ++ return status; ++ } ++ ++ status = check_periodic_bandwidth(hcd, qh); ++ if (status) { ++ DWC_NOTICE("%s: Insufficient periodic bandwidth for " ++ "periodic transfer.\n", __func__); ++ return status; ++ } ++ ++ status = check_max_xfer_size(hcd, qh); ++ if (status) { ++ DWC_NOTICE("%s: Channel max transfer size too small " ++ "for periodic transfer.\n", __func__); ++ return status; ++ } ++ ++ /* Always start in the inactive schedule. */ ++ list_add_tail(&qh->qh_list_entry, &hcd->periodic_sched_inactive); ++ ++ /* Reserve the periodic channel. */ ++ hcd->periodic_channels++; ++ ++ /* Update claimed usecs per (micro)frame. */ ++ hcd->periodic_usecs += qh->usecs; ++ ++ /* Update average periodic bandwidth claimed and # periodic reqs for usbfs. */ ++ hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_allocated += qh->usecs / qh->interval; ++ if (qh->ep_type == USB_ENDPOINT_XFER_INT) { ++ hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_int_reqs++; ++ DWC_DEBUGPL(DBG_HCD, "Scheduled intr: qh %p, usecs %d, period %d\n", ++ qh, qh->usecs, qh->interval); ++ } else { ++ hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_isoc_reqs++; ++ DWC_DEBUGPL(DBG_HCD, "Scheduled isoc: qh %p, usecs %d, period %d\n", ++ qh, qh->usecs, qh->interval); ++ } ++ ++ return status; ++} ++ ++/** ++ * This function adds a QH to either the non periodic or periodic schedule if ++ * it is not already in the schedule. If the QH is already in the schedule, no ++ * action is taken. ++ * ++ * @return 0 if successful, negative error code otherwise. ++ */ ++int dwc_otg_hcd_qh_add (dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) ++{ ++ //unsigned long flags; ++ int status = 0; ++ ++#if CONFIG_SMP ++ //the spinlock is locked before this function get called, ++ //but in case the lock is needed, the check function is preserved ++ ++ //but in non-SMP mode, all spinlock is lockable. ++ //don't do the test in non-SMP mode ++ ++ if(spin_trylock(&hcd->lock)) { ++ printk("%s: It is not supposed to be lockable!!\n",__func__); ++ BUG(); ++ } ++#endif ++// SPIN_LOCK_IRQSAVE(&hcd->lock, flags) ++ ++ if (!list_empty(&qh->qh_list_entry)) { ++ /* QH already in a schedule. */ ++ goto done; ++ } ++ ++ /* Add the new QH to the appropriate schedule */ ++ if (dwc_qh_is_non_per(qh)) { ++ /* Always start in the inactive schedule. */ ++ list_add_tail(&qh->qh_list_entry, &hcd->non_periodic_sched_inactive); ++ } else { ++ status = schedule_periodic(hcd, qh); ++ } ++ ++ done: ++// SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags) ++ ++ return status; ++} ++ ++/** ++ * Removes an interrupt or isochronous transfer from the periodic schedule. ++ * ++ * @param hcd The HCD state structure for the DWC OTG controller. ++ * @param qh QH for the periodic transfer. ++ */ ++static void deschedule_periodic(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) ++{ ++ list_del_init(&qh->qh_list_entry); ++ ++ /* Release the periodic channel reservation. */ ++ hcd->periodic_channels--; ++ ++ /* Update claimed usecs per (micro)frame. */ ++ hcd->periodic_usecs -= qh->usecs; ++ ++ /* Update average periodic bandwidth claimed and # periodic reqs for usbfs. */ ++ hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_allocated -= qh->usecs / qh->interval; ++ ++ if (qh->ep_type == USB_ENDPOINT_XFER_INT) { ++ hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_int_reqs--; ++ DWC_DEBUGPL(DBG_HCD, "Descheduled intr: qh %p, usecs %d, period %d\n", ++ qh, qh->usecs, qh->interval); ++ } else { ++ hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_isoc_reqs--; ++ DWC_DEBUGPL(DBG_HCD, "Descheduled isoc: qh %p, usecs %d, period %d\n", ++ qh, qh->usecs, qh->interval); ++ } ++} ++ ++/** ++ * Removes a QH from either the non-periodic or periodic schedule. Memory is ++ * not freed. ++ * ++ * @param[in] hcd The HCD state structure. ++ * @param[in] qh QH to remove from schedule. */ ++void dwc_otg_hcd_qh_remove (dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) ++{ ++ //unsigned long flags; ++ ++#if CONFIG_SMP ++ //the spinlock is locked before this function get called, ++ //but in case the lock is needed, the check function is preserved ++ ++ //but in non-SMP mode, all spinlock is lockable. ++ //don't do the test in non-SMP mode ++ ++ if(spin_trylock(&hcd->lock)) { ++ printk("%s: It is not supposed to be lockable!!\n",__func__); ++ BUG(); ++ } ++#endif ++// SPIN_LOCK_IRQSAVE(&hcd->lock, flags); ++ ++ if (list_empty(&qh->qh_list_entry)) { ++ /* QH is not in a schedule. */ ++ goto done; ++ } ++ ++ if (dwc_qh_is_non_per(qh)) { ++ if (hcd->non_periodic_qh_ptr == &qh->qh_list_entry) { ++ hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next; ++ } ++ list_del_init(&qh->qh_list_entry); ++ } else { ++ deschedule_periodic(hcd, qh); ++ } ++ ++ done: ++// SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags); ++ return; ++} ++ ++/** ++ * Deactivates a QH. For non-periodic QHs, removes the QH from the active ++ * non-periodic schedule. The QH is added to the inactive non-periodic ++ * schedule if any QTDs are still attached to the QH. ++ * ++ * For periodic QHs, the QH is removed from the periodic queued schedule. If ++ * there are any QTDs still attached to the QH, the QH is added to either the ++ * periodic inactive schedule or the periodic ready schedule and its next ++ * scheduled frame is calculated. The QH is placed in the ready schedule if ++ * the scheduled frame has been reached already. Otherwise it's placed in the ++ * inactive schedule. If there are no QTDs attached to the QH, the QH is ++ * completely removed from the periodic schedule. ++ */ ++void dwc_otg_hcd_qh_deactivate(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, int sched_next_periodic_split) ++{ ++ unsigned long flags; ++ SPIN_LOCK_IRQSAVE(&hcd->lock, flags); ++ ++ if (dwc_qh_is_non_per(qh)) { ++ dwc_otg_hcd_qh_remove(hcd, qh); ++ if (!list_empty(&qh->qtd_list)) { ++ /* Add back to inactive non-periodic schedule. */ ++ dwc_otg_hcd_qh_add(hcd, qh); ++ } ++ } else { ++ uint16_t frame_number = dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(hcd)); ++ ++ if (qh->do_split) { ++ /* Schedule the next continuing periodic split transfer */ ++ if (sched_next_periodic_split) { ++ ++ qh->sched_frame = frame_number; ++ if (dwc_frame_num_le(frame_number, ++ dwc_frame_num_inc(qh->start_split_frame, 1))) { ++ /* ++ * Allow one frame to elapse after start ++ * split microframe before scheduling ++ * complete split, but DONT if we are ++ * doing the next start split in the ++ * same frame for an ISOC out. ++ */ ++ if ((qh->ep_type != USB_ENDPOINT_XFER_ISOC) || (qh->ep_is_in != 0)) { ++ qh->sched_frame = dwc_frame_num_inc(qh->sched_frame, 1); ++ } ++ } ++ } else { ++ qh->sched_frame = dwc_frame_num_inc(qh->start_split_frame, ++ qh->interval); ++ if (dwc_frame_num_le(qh->sched_frame, frame_number)) { ++ qh->sched_frame = frame_number; ++ } ++ qh->sched_frame |= 0x7; ++ qh->start_split_frame = qh->sched_frame; ++ } ++ } else { ++ qh->sched_frame = dwc_frame_num_inc(qh->sched_frame, qh->interval); ++ if (dwc_frame_num_le(qh->sched_frame, frame_number)) { ++ qh->sched_frame = frame_number; ++ } ++ } ++ ++ if (list_empty(&qh->qtd_list)) { ++ dwc_otg_hcd_qh_remove(hcd, qh); ++ } else { ++ /* ++ * Remove from periodic_sched_queued and move to ++ * appropriate queue. ++ */ ++ if (qh->sched_frame == frame_number) { ++ list_move(&qh->qh_list_entry, ++ &hcd->periodic_sched_ready); ++ } else { ++ list_move(&qh->qh_list_entry, ++ &hcd->periodic_sched_inactive); ++ } ++ } ++ } ++ ++ SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags); ++} ++ ++/** ++ * This function allocates and initializes a QTD. ++ * ++ * @param[in] urb The URB to create a QTD from. Each URB-QTD pair will end up ++ * pointing to each other so each pair should have a unique correlation. ++ * ++ * @return Returns pointer to the newly allocated QTD, or NULL on error. */ ++dwc_otg_qtd_t *dwc_otg_hcd_qtd_create (struct urb *urb) ++{ ++ dwc_otg_qtd_t *qtd; ++ ++ qtd = dwc_otg_hcd_qtd_alloc (); ++ if (qtd == NULL) { ++ return NULL; ++ } ++ ++ dwc_otg_hcd_qtd_init (qtd, urb); ++ return qtd; ++} ++ ++/** ++ * Initializes a QTD structure. ++ * ++ * @param[in] qtd The QTD to initialize. ++ * @param[in] urb The URB to use for initialization. */ ++void dwc_otg_hcd_qtd_init (dwc_otg_qtd_t *qtd, struct urb *urb) ++{ ++ memset (qtd, 0, sizeof (dwc_otg_qtd_t)); ++ qtd->urb = urb; ++ if (usb_pipecontrol(urb->pipe)) { ++ /* ++ * The only time the QTD data toggle is used is on the data ++ * phase of control transfers. This phase always starts with ++ * DATA1. ++ */ ++ qtd->data_toggle = DWC_OTG_HC_PID_DATA1; ++ qtd->control_phase = DWC_OTG_CONTROL_SETUP; ++ } ++ ++ /* start split */ ++ qtd->complete_split = 0; ++ qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_ALL; ++ qtd->isoc_split_offset = 0; ++ ++ /* Store the qtd ptr in the urb to reference what QTD. */ ++ urb->hcpriv = qtd; ++ return; ++} ++ ++/** ++ * This function adds a QTD to the QTD-list of a QH. It will find the correct ++ * QH to place the QTD into. If it does not find a QH, then it will create a ++ * new QH. If the QH to which the QTD is added is not currently scheduled, it ++ * is placed into the proper schedule based on its EP type. ++ * ++ * @param[in] qtd The QTD to add ++ * @param[in] dwc_otg_hcd The DWC HCD structure ++ * ++ * @return 0 if successful, negative error code otherwise. ++ */ ++int dwc_otg_hcd_qtd_add (dwc_otg_qtd_t *qtd, ++ dwc_otg_hcd_t *dwc_otg_hcd) ++{ ++ struct usb_host_endpoint *ep; ++ dwc_otg_qh_t *qh; ++ unsigned long flags; ++ int retval = 0; ++ ++ struct urb *urb = qtd->urb; ++ ++ SPIN_LOCK_IRQSAVE(&dwc_otg_hcd->lock, flags); ++ ++ /* ++ * Get the QH which holds the QTD-list to insert to. Create QH if it ++ * doesn't exist. ++ */ ++ ep = dwc_urb_to_endpoint(urb); ++ qh = (dwc_otg_qh_t *)ep->hcpriv; ++ if (qh == NULL) { ++ qh = dwc_otg_hcd_qh_create (dwc_otg_hcd, urb); ++ if (qh == NULL) { ++ goto done; ++ } ++ ep->hcpriv = qh; ++ } ++ ++ retval = dwc_otg_hcd_qh_add(dwc_otg_hcd, qh); ++ if (retval == 0) { ++ list_add_tail(&qtd->qtd_list_entry, &qh->qtd_list); ++ } ++ ++ done: ++ SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags); ++ ++ return retval; ++} ++ ++#endif /* DWC_DEVICE_ONLY */ +--- /dev/null ++++ b/drivers/usb/host/otg/dwc_otg_pcd.c +@@ -0,0 +1,2542 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd.c $ ++ * $Revision: #70 $ ++ * $Date: 2008/10/14 $ ++ * $Change: 1115682 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++#ifndef DWC_HOST_ONLY ++ ++/** @file ++ * This file implements the Peripheral Controller Driver. ++ * ++ * The Peripheral Controller Driver (PCD) is responsible for ++ * translating requests from the Function Driver into the appropriate ++ * actions on the DWC_otg controller. It isolates the Function Driver ++ * from the specifics of the controller by providing an API to the ++ * Function Driver. ++ * ++ * The Peripheral Controller Driver for Linux will implement the ++ * Gadget API, so that the existing Gadget drivers can be used. ++ * (Gadget Driver is the Linux terminology for a Function Driver.) ++ * ++ * The Linux Gadget API is defined in the header file ++ * . The USB EP operations API is ++ * defined in the structure usb_ep_ops and the USB ++ * Controller API is defined in the structure ++ * usb_gadget_ops. ++ * ++ * An important function of the PCD is managing interrupts generated ++ * by the DWC_otg controller. The implementation of the DWC_otg device ++ * mode interrupt service routines is in dwc_otg_pcd_intr.c. ++ * ++ * @todo Add Device Mode test modes (Test J mode, Test K mode, etc). ++ * @todo Does it work when the request size is greater than DEPTSIZ ++ * transfer size ++ * ++ */ ++ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++//#include ++#include ++#include ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) ++# include ++#else ++# include ++#endif ++ ++//#include ++ ++ ++ ++#include "dwc_otg_driver.h" ++#include "dwc_otg_pcd.h" ++ ++ ++ ++/** ++ * Static PCD pointer for use in usb_gadget_register_driver and ++ * usb_gadget_unregister_driver. Initialized in dwc_otg_pcd_init. ++ */ ++static dwc_otg_pcd_t *s_pcd = 0; ++ ++ ++/* Display the contents of the buffer */ ++extern void dump_msg(const u8 *buf, unsigned int length); ++ ++ ++/** ++ * This function completes a request. It call's the request call back. ++ */ ++void dwc_otg_request_done(dwc_otg_pcd_ep_t *ep, dwc_otg_pcd_request_t *req, ++ int status) ++{ ++ unsigned stopped = ep->stopped; ++ ++ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, ep); ++ list_del_init(&req->queue); ++ ++ if (req->req.status == -EINPROGRESS) { ++ req->req.status = status; ++ } else { ++ status = req->req.status; ++ } ++ ++ /* don't modify queue heads during completion callback */ ++ ep->stopped = 1; ++ SPIN_UNLOCK(&ep->pcd->lock); ++ req->req.complete(&ep->ep, &req->req); ++ SPIN_LOCK(&ep->pcd->lock); ++ ++ if (ep->pcd->request_pending > 0) { ++ --ep->pcd->request_pending; ++ } ++ ++ ep->stopped = stopped; ++} ++ ++/** ++ * This function terminates all the requsts in the EP request queue. ++ */ ++void dwc_otg_request_nuke(dwc_otg_pcd_ep_t *ep) ++{ ++ dwc_otg_pcd_request_t *req; ++ ++ ep->stopped = 1; ++ ++ /* called with irqs blocked?? */ ++ while (!list_empty(&ep->queue)) { ++ req = list_entry(ep->queue.next, dwc_otg_pcd_request_t, ++ queue); ++ dwc_otg_request_done(ep, req, -ESHUTDOWN); ++ } ++} ++ ++/* USB Endpoint Operations */ ++/* ++ * The following sections briefly describe the behavior of the Gadget ++ * API endpoint operations implemented in the DWC_otg driver ++ * software. Detailed descriptions of the generic behavior of each of ++ * these functions can be found in the Linux header file ++ * include/linux/usb_gadget.h. ++ * ++ * The Gadget API provides wrapper functions for each of the function ++ * pointers defined in usb_ep_ops. The Gadget Driver calls the wrapper ++ * function, which then calls the underlying PCD function. The ++ * following sections are named according to the wrapper ++ * functions. Within each section, the corresponding DWC_otg PCD ++ * function name is specified. ++ * ++ */ ++ ++/** ++ * This function assigns periodic Tx FIFO to an periodic EP ++ * in shared Tx FIFO mode ++ */ ++static uint32_t assign_perio_tx_fifo(dwc_otg_core_if_t *core_if) ++{ ++ uint32_t PerTxMsk = 1; ++ int i; ++ for(i = 0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; ++i) ++ { ++ if((PerTxMsk & core_if->p_tx_msk) == 0) { ++ core_if->p_tx_msk |= PerTxMsk; ++ return i + 1; ++ } ++ PerTxMsk <<= 1; ++ } ++ return 0; ++} ++/** ++ * This function releases periodic Tx FIFO ++ * in shared Tx FIFO mode ++ */ ++static void release_perio_tx_fifo(dwc_otg_core_if_t *core_if, uint32_t fifo_num) ++{ ++ core_if->p_tx_msk = (core_if->p_tx_msk & (1 << (fifo_num - 1))) ^ core_if->p_tx_msk; ++} ++/** ++ * This function assigns periodic Tx FIFO to an periodic EP ++ * in shared Tx FIFO mode ++ */ ++static uint32_t assign_tx_fifo(dwc_otg_core_if_t *core_if) ++{ ++ uint32_t TxMsk = 1; ++ int i; ++ ++ for(i = 0; i < core_if->hwcfg4.b.num_in_eps; ++i) ++ { ++ if((TxMsk & core_if->tx_msk) == 0) { ++ core_if->tx_msk |= TxMsk; ++ return i + 1; ++ } ++ TxMsk <<= 1; ++ } ++ return 0; ++} ++/** ++ * This function releases periodic Tx FIFO ++ * in shared Tx FIFO mode ++ */ ++static void release_tx_fifo(dwc_otg_core_if_t *core_if, uint32_t fifo_num) ++{ ++ core_if->tx_msk = (core_if->tx_msk & (1 << (fifo_num - 1))) ^ core_if->tx_msk; ++} ++ ++/** ++ * This function is called by the Gadget Driver for each EP to be ++ * configured for the current configuration (SET_CONFIGURATION). ++ * ++ * This function initializes the dwc_otg_ep_t data structure, and then ++ * calls dwc_otg_ep_activate. ++ */ ++static int dwc_otg_pcd_ep_enable(struct usb_ep *usb_ep, ++ const struct usb_endpoint_descriptor *ep_desc) ++{ ++ dwc_otg_pcd_ep_t *ep = 0; ++ dwc_otg_pcd_t *pcd = 0; ++ unsigned long flags; ++ ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p,%p)\n", __func__, usb_ep, ep_desc); ++ ++ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); ++ if (!usb_ep || !ep_desc || ep->desc || ++ ep_desc->bDescriptorType != USB_DT_ENDPOINT) { ++ DWC_WARN("%s, bad ep or descriptor\n", __func__); ++ return -EINVAL; ++ } ++ if (ep == &ep->pcd->ep0) { ++ DWC_WARN("%s, bad ep(0)\n", __func__); ++ return -EINVAL; ++ } ++ ++ /* Check FIFO size? */ ++ if (!ep_desc->wMaxPacketSize) { ++ DWC_WARN("%s, bad %s maxpacket\n", __func__, usb_ep->name); ++ return -ERANGE; ++ } ++ ++ pcd = ep->pcd; ++ if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) { ++ DWC_WARN("%s, bogus device state\n", __func__); ++ return -ESHUTDOWN; ++ } ++ ++ SPIN_LOCK_IRQSAVE(&pcd->lock, flags); ++ ++ ep->desc = ep_desc; ++ ep->ep.maxpacket = le16_to_cpu (ep_desc->wMaxPacketSize); ++ ++ /* ++ * Activate the EP ++ */ ++ ep->stopped = 0; ++ ++ ep->dwc_ep.is_in = (USB_DIR_IN & ep_desc->bEndpointAddress) != 0; ++ ep->dwc_ep.maxpacket = ep->ep.maxpacket; ++ ++ ep->dwc_ep.type = ep_desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; ++ ++ if(ep->dwc_ep.is_in) { ++ if(!pcd->otg_dev->core_if->en_multiple_tx_fifo) { ++ ep->dwc_ep.tx_fifo_num = 0; ++ ++ if (ep->dwc_ep.type == USB_ENDPOINT_XFER_ISOC) { ++ /* ++ * if ISOC EP then assign a Periodic Tx FIFO. ++ */ ++ ep->dwc_ep.tx_fifo_num = assign_perio_tx_fifo(pcd->otg_dev->core_if); ++ } ++ } else { ++ /* ++ * if Dedicated FIFOs mode is on then assign a Tx FIFO. ++ */ ++ ep->dwc_ep.tx_fifo_num = assign_tx_fifo(pcd->otg_dev->core_if); ++ ++ } ++ } ++ /* Set initial data PID. */ ++ if (ep->dwc_ep.type == USB_ENDPOINT_XFER_BULK) { ++ ep->dwc_ep.data_pid_start = 0; ++ } ++ ++ DWC_DEBUGPL(DBG_PCD, "Activate %s-%s: type=%d, mps=%d desc=%p\n", ++ ep->ep.name, (ep->dwc_ep.is_in ?"IN":"OUT"), ++ ep->dwc_ep.type, ep->dwc_ep.maxpacket, ep->desc); ++ ++ if(ep->dwc_ep.type != USB_ENDPOINT_XFER_ISOC) { ++ ep->dwc_ep.desc_addr = dwc_otg_ep_alloc_desc_chain(&ep->dwc_ep.dma_desc_addr, MAX_DMA_DESC_CNT); ++ } ++ ++ dwc_otg_ep_activate(GET_CORE_IF(pcd), &ep->dwc_ep); ++ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); ++ ++ return 0; ++} ++ ++/** ++ * This function is called when an EP is disabled due to disconnect or ++ * change in configuration. Any pending requests will terminate with a ++ * status of -ESHUTDOWN. ++ * ++ * This function modifies the dwc_otg_ep_t data structure for this EP, ++ * and then calls dwc_otg_ep_deactivate. ++ */ ++static int dwc_otg_pcd_ep_disable(struct usb_ep *usb_ep) ++{ ++ dwc_otg_pcd_ep_t *ep; ++ dwc_otg_pcd_t *pcd = 0; ++ unsigned long flags; ++ ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n", __func__, usb_ep); ++ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); ++ if (!usb_ep || !ep->desc) { ++ DWC_DEBUGPL(DBG_PCD, "%s, %s not enabled\n", __func__, ++ usb_ep ? ep->ep.name : NULL); ++ return -EINVAL; ++ } ++ ++ SPIN_LOCK_IRQSAVE(&ep->pcd->lock, flags); ++ ++ dwc_otg_request_nuke(ep); ++ ++ dwc_otg_ep_deactivate(GET_CORE_IF(ep->pcd), &ep->dwc_ep); ++ ep->desc = 0; ++ ep->stopped = 1; ++ ++ if(ep->dwc_ep.is_in) { ++ dwc_otg_flush_tx_fifo(GET_CORE_IF(ep->pcd), ep->dwc_ep.tx_fifo_num); ++ release_perio_tx_fifo(GET_CORE_IF(ep->pcd), ep->dwc_ep.tx_fifo_num); ++ release_tx_fifo(GET_CORE_IF(ep->pcd), ep->dwc_ep.tx_fifo_num); ++ } ++ ++ /* Free DMA Descriptors */ ++ pcd = ep->pcd; ++ ++ SPIN_UNLOCK_IRQRESTORE(&ep->pcd->lock, flags); ++ ++ if(ep->dwc_ep.type != USB_ENDPOINT_XFER_ISOC && ep->dwc_ep.desc_addr) { ++ dwc_otg_ep_free_desc_chain(ep->dwc_ep.desc_addr, ep->dwc_ep.dma_desc_addr, MAX_DMA_DESC_CNT); ++ } ++ ++ DWC_DEBUGPL(DBG_PCD, "%s disabled\n", usb_ep->name); ++ return 0; ++} ++ ++ ++/** ++ * This function allocates a request object to use with the specified ++ * endpoint. ++ * ++ * @param ep The endpoint to be used with with the request ++ * @param gfp_flags the GFP_* flags to use. ++ */ ++static struct usb_request *dwc_otg_pcd_alloc_request(struct usb_ep *ep, ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ int gfp_flags ++#else ++ gfp_t gfp_flags ++#endif ++ ) ++{ ++ dwc_otg_pcd_request_t *req; ++ ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p,%d)\n", __func__, ep, gfp_flags); ++ if (0 == ep) { ++ DWC_WARN("%s() %s\n", __func__, "Invalid EP!\n"); ++ return 0; ++ } ++ req = kmalloc(sizeof(dwc_otg_pcd_request_t), gfp_flags); ++ if (0 == req) { ++ DWC_WARN("%s() %s\n", __func__, ++ "request allocation failed!\n"); ++ return 0; ++ } ++ memset(req, 0, sizeof(dwc_otg_pcd_request_t)); ++ req->req.dma = DMA_ADDR_INVALID; ++ INIT_LIST_HEAD(&req->queue); ++ return &req->req; ++} ++ ++/** ++ * This function frees a request object. ++ * ++ * @param ep The endpoint associated with the request ++ * @param req The request being freed ++ */ ++static void dwc_otg_pcd_free_request(struct usb_ep *ep, ++ struct usb_request *req) ++{ ++ dwc_otg_pcd_request_t *request; ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p,%p)\n", __func__, ep, req); ++ ++ if (0 == ep || 0 == req) { ++ DWC_WARN("%s() %s\n", __func__, ++ "Invalid ep or req argument!\n"); ++ return; ++ } ++ ++ request = container_of(req, dwc_otg_pcd_request_t, req); ++ kfree(request); ++} ++ ++#if 0 ++/** ++ * This function allocates an I/O buffer to be used for a transfer ++ * to/from the specified endpoint. ++ * ++ * @param usb_ep The endpoint to be used with with the request ++ * @param bytes The desired number of bytes for the buffer ++ * @param dma Pointer to the buffer's DMA address; must be valid ++ * @param gfp_flags the GFP_* flags to use. ++ * @return address of a new buffer or null is buffer could not be allocated. ++ */ ++static void *dwc_otg_pcd_alloc_buffer(struct usb_ep *usb_ep, unsigned bytes, ++ dma_addr_t *dma, ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ int gfp_flags ++#else ++ gfp_t gfp_flags ++#endif ++ ) ++{ ++ void *buf; ++ dwc_otg_pcd_ep_t *ep; ++ dwc_otg_pcd_t *pcd = 0; ++ ++ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); ++ pcd = ep->pcd; ++ ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p,%d,%p,%0x)\n", __func__, usb_ep, bytes, ++ dma, gfp_flags); ++ ++ /* Check dword alignment */ ++ if ((bytes & 0x3UL) != 0) { ++ DWC_WARN("%s() Buffer size is not a multiple of" ++ "DWORD size (%d)",__func__, bytes); ++ } ++ ++ if (GET_CORE_IF(pcd)->dma_enable) { ++ buf = dma_alloc_coherent (NULL, bytes, dma, gfp_flags); ++ } ++ else { ++ buf = kmalloc(bytes, gfp_flags); ++ } ++ ++ /* Check dword alignment */ ++ if (((int)buf & 0x3UL) != 0) { ++ DWC_WARN("%s() Buffer is not DWORD aligned (%p)", ++ __func__, buf); ++ } ++ ++ return buf; ++} ++ ++/** ++ * This function frees an I/O buffer that was allocated by alloc_buffer. ++ * ++ * @param usb_ep the endpoint associated with the buffer ++ * @param buf address of the buffer ++ * @param dma The buffer's DMA address ++ * @param bytes The number of bytes of the buffer ++ */ ++static void dwc_otg_pcd_free_buffer(struct usb_ep *usb_ep, void *buf, ++ dma_addr_t dma, unsigned bytes) ++{ ++ dwc_otg_pcd_ep_t *ep; ++ dwc_otg_pcd_t *pcd = 0; ++ ++ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); ++ pcd = ep->pcd; ++ ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p,%p,%0x,%d)\n", __func__, ep, buf, dma, bytes); ++ ++ if (GET_CORE_IF(pcd)->dma_enable) { ++ dma_free_coherent (NULL, bytes, buf, dma); ++ } ++ else { ++ kfree(buf); ++ } ++} ++#endif ++ ++/** ++ * This function is used to submit an I/O Request to an EP. ++ * ++ * - When the request completes the request's completion callback ++ * is called to return the request to the driver. ++ * - An EP, except control EPs, may have multiple requests ++ * pending. ++ * - Once submitted the request cannot be examined or modified. ++ * - Each request is turned into one or more packets. ++ * - A BULK EP can queue any amount of data; the transfer is ++ * packetized. ++ * - Zero length Packets are specified with the request 'zero' ++ * flag. ++ */ ++static int dwc_otg_pcd_ep_queue(struct usb_ep *usb_ep, ++ struct usb_request *usb_req, ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ int gfp_flags ++#else ++ gfp_t gfp_flags ++#endif ++ ) ++{ ++ int prevented = 0; ++ dwc_otg_pcd_request_t *req; ++ dwc_otg_pcd_ep_t *ep; ++ dwc_otg_pcd_t *pcd; ++ unsigned long flags = 0; ++ ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p,%p,%d)\n", ++ __func__, usb_ep, usb_req, gfp_flags); ++ ++ req = container_of(usb_req, dwc_otg_pcd_request_t, req); ++ if (!usb_req || !usb_req->complete || !usb_req->buf || ++ !list_empty(&req->queue)) { ++ DWC_WARN("%s, bad params\n", __func__); ++ return -EINVAL; ++ } ++ ++ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); ++ if (!usb_ep || (!ep->desc && ep->dwc_ep.num != 0)/* || ep->stopped != 0*/) { ++ DWC_WARN("%s, bad ep\n", __func__); ++ return -EINVAL; ++ } ++ ++ pcd = ep->pcd; ++ if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) { ++ DWC_DEBUGPL(DBG_PCDV, "gadget.speed=%d\n", pcd->gadget.speed); ++ DWC_WARN("%s, bogus device state\n", __func__); ++ return -ESHUTDOWN; ++ } ++ ++ ++ DWC_DEBUGPL(DBG_PCD, "%s queue req %p, len %d buf %p\n", ++ usb_ep->name, usb_req, usb_req->length, usb_req->buf); ++ ++ if (!GET_CORE_IF(pcd)->core_params->opt) { ++ if (ep->dwc_ep.num != 0) { ++ DWC_ERROR("%s queue req %p, len %d buf %p\n", ++ usb_ep->name, usb_req, usb_req->length, usb_req->buf); ++ } ++ } ++ ++ SPIN_LOCK_IRQSAVE(&ep->pcd->lock, flags); ++ ++#if defined(DEBUG) & defined(VERBOSE) ++ dump_msg(usb_req->buf, usb_req->length); ++#endif ++ ++ usb_req->status = -EINPROGRESS; ++ usb_req->actual = 0; ++ ++ /* ++ * For EP0 IN without premature status, zlp is required? ++ */ ++ if (ep->dwc_ep.num == 0 && ep->dwc_ep.is_in) { ++ DWC_DEBUGPL(DBG_PCDV, "%s-OUT ZLP\n", usb_ep->name); ++ //_req->zero = 1; ++ } ++ ++ /* Start the transfer */ ++ if (list_empty(&ep->queue) && !ep->stopped) { ++ /* EP0 Transfer? */ ++ if (ep->dwc_ep.num == 0) { ++ switch (pcd->ep0state) { ++ case EP0_IN_DATA_PHASE: ++ DWC_DEBUGPL(DBG_PCD, ++ "%s ep0: EP0_IN_DATA_PHASE\n", ++ __func__); ++ break; ++ ++ case EP0_OUT_DATA_PHASE: ++ DWC_DEBUGPL(DBG_PCD, ++ "%s ep0: EP0_OUT_DATA_PHASE\n", ++ __func__); ++ if (pcd->request_config) { ++ /* Complete STATUS PHASE */ ++ ep->dwc_ep.is_in = 1; ++ pcd->ep0state = EP0_IN_STATUS_PHASE; ++ } ++ break; ++ ++ case EP0_IN_STATUS_PHASE: ++ DWC_DEBUGPL(DBG_PCD, ++ "%s ep0: EP0_IN_STATUS_PHASE\n", ++ __func__); ++ break; ++ ++ default: ++ DWC_DEBUGPL(DBG_ANY, "ep0: odd state %d\n", ++ pcd->ep0state); ++ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); ++ return -EL2HLT; ++ } ++ ep->dwc_ep.dma_addr = usb_req->dma; ++ ep->dwc_ep.start_xfer_buff = usb_req->buf; ++ ep->dwc_ep.xfer_buff = usb_req->buf; ++ ep->dwc_ep.xfer_len = usb_req->length; ++ ep->dwc_ep.xfer_count = 0; ++ ep->dwc_ep.sent_zlp = 0; ++ ep->dwc_ep.total_len = ep->dwc_ep.xfer_len; ++ ++ if(usb_req->zero) { ++ if((ep->dwc_ep.xfer_len % ep->dwc_ep.maxpacket == 0) ++ && (ep->dwc_ep.xfer_len != 0)) { ++ ep->dwc_ep.sent_zlp = 1; ++ } ++ ++ } ++ ++ ep_check_and_patch_dma_addr(ep); ++ dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep->dwc_ep); ++ } ++ else { ++ ++ uint32_t max_transfer = GET_CORE_IF(ep->pcd)->core_params->max_transfer_size; ++ ++ /* Setup and start the Transfer */ ++ ep->dwc_ep.dma_addr = usb_req->dma; ++ ep->dwc_ep.start_xfer_buff = usb_req->buf; ++ ep->dwc_ep.xfer_buff = usb_req->buf; ++ ep->dwc_ep.sent_zlp = 0; ++ ep->dwc_ep.total_len = usb_req->length; ++ ep->dwc_ep.xfer_len = 0; ++ ep->dwc_ep.xfer_count = 0; ++ ++ if(max_transfer > MAX_TRANSFER_SIZE) { ++ ep->dwc_ep.maxxfer = max_transfer - (max_transfer % ep->dwc_ep.maxpacket); ++ } else { ++ ep->dwc_ep.maxxfer = max_transfer; ++ } ++ ++ if(usb_req->zero) { ++ if((ep->dwc_ep.total_len % ep->dwc_ep.maxpacket == 0) ++ && (ep->dwc_ep.total_len != 0)) { ++ ep->dwc_ep.sent_zlp = 1; ++ } ++ ++ } ++ ++ ep_check_and_patch_dma_addr(ep); ++ dwc_otg_ep_start_transfer(GET_CORE_IF(pcd), &ep->dwc_ep); ++ } ++ } ++ ++ if ((req != 0) || prevented) { ++ ++pcd->request_pending; ++ list_add_tail(&req->queue, &ep->queue); ++ if (ep->dwc_ep.is_in && ep->stopped && !(GET_CORE_IF(pcd)->dma_enable)) { ++ /** @todo NGS Create a function for this. */ ++ diepmsk_data_t diepmsk = { .d32 = 0}; ++ diepmsk.b.intktxfemp = 1; ++ if(&GET_CORE_IF(pcd)->multiproc_int_enable) { ++ dwc_modify_reg32(&GET_CORE_IF(pcd)->dev_if->dev_global_regs->diepeachintmsk[ep->dwc_ep.num], ++ 0, diepmsk.d32); ++ } else { ++ dwc_modify_reg32(&GET_CORE_IF(pcd)->dev_if->dev_global_regs->diepmsk, 0, diepmsk.d32); ++ } ++ } ++ } ++ ++ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); ++ return 0; ++} ++ ++/** ++ * This function cancels an I/O request from an EP. ++ */ ++static int dwc_otg_pcd_ep_dequeue(struct usb_ep *usb_ep, ++ struct usb_request *usb_req) ++{ ++ dwc_otg_pcd_request_t *req; ++ dwc_otg_pcd_ep_t *ep; ++ dwc_otg_pcd_t *pcd; ++ unsigned long flags; ++ ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p,%p)\n", __func__, usb_ep, usb_req); ++ ++ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); ++ if (!usb_ep || !usb_req || (!ep->desc && ep->dwc_ep.num != 0)) { ++ DWC_WARN("%s, bad argument\n", __func__); ++ return -EINVAL; ++ } ++ pcd = ep->pcd; ++ if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) { ++ DWC_WARN("%s, bogus device state\n", __func__); ++ return -ESHUTDOWN; ++ } ++ ++ SPIN_LOCK_IRQSAVE(&pcd->lock, flags); ++ DWC_DEBUGPL(DBG_PCDV, "%s %s %s %p\n", __func__, usb_ep->name, ++ ep->dwc_ep.is_in ? "IN" : "OUT", ++ usb_req); ++ ++ /* make sure it's actually queued on this endpoint */ ++ list_for_each_entry(req, &ep->queue, queue) ++ { ++ if (&req->req == usb_req) { ++ break; ++ } ++ } ++ ++ if (&req->req != usb_req) { ++ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); ++ return -EINVAL; ++ } ++ ++ if (!list_empty(&req->queue)) { ++ dwc_otg_request_done(ep, req, -ECONNRESET); ++ } ++ else { ++ req = 0; ++ } ++ ++ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); ++ ++ return req ? 0 : -EOPNOTSUPP; ++} ++ ++/** ++ * usb_ep_set_halt stalls an endpoint. ++ * ++ * usb_ep_clear_halt clears an endpoint halt and resets its data ++ * toggle. ++ * ++ * Both of these functions are implemented with the same underlying ++ * function. The behavior depends on the value argument. ++ * ++ * @param[in] usb_ep the Endpoint to halt or clear halt. ++ * @param[in] value ++ * - 0 means clear_halt. ++ * - 1 means set_halt, ++ * - 2 means clear stall lock flag. ++ * - 3 means set stall lock flag. ++ */ ++static int dwc_otg_pcd_ep_set_halt(struct usb_ep *usb_ep, int value) ++{ ++ int retval = 0; ++ unsigned long flags; ++ dwc_otg_pcd_ep_t *ep = 0; ++ ++ ++ DWC_DEBUGPL(DBG_PCD,"HALT %s %d\n", usb_ep->name, value); ++ ++ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); ++ ++ if (!usb_ep || (!ep->desc && ep != &ep->pcd->ep0) || ++ ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) { ++ DWC_WARN("%s, bad ep\n", __func__); ++ return -EINVAL; ++ } ++ ++ SPIN_LOCK_IRQSAVE(&ep->pcd->lock, flags); ++ if (!list_empty(&ep->queue)) { ++ DWC_WARN("%s() %s XFer In process\n", __func__, usb_ep->name); ++ retval = -EAGAIN; ++ } ++ else if (value == 0) { ++ dwc_otg_ep_clear_stall(ep->pcd->otg_dev->core_if, ++ &ep->dwc_ep); ++ } ++ else if(value == 1) { ++ if (ep->dwc_ep.is_in == 1 && ep->pcd->otg_dev->core_if->dma_desc_enable) { ++ dtxfsts_data_t txstatus; ++ fifosize_data_t txfifosize; ++ ++ txfifosize.d32 = dwc_read_reg32(&ep->pcd->otg_dev->core_if->core_global_regs->dptxfsiz_dieptxf[ep->dwc_ep.tx_fifo_num]); ++ txstatus.d32 = dwc_read_reg32(&ep->pcd->otg_dev->core_if->dev_if->in_ep_regs[ep->dwc_ep.num]->dtxfsts); ++ ++ if(txstatus.b.txfspcavail < txfifosize.b.depth) { ++ DWC_WARN("%s() %s Data In Tx Fifo\n", __func__, usb_ep->name); ++ retval = -EAGAIN; ++ } ++ else { ++ if (ep->dwc_ep.num == 0) { ++ ep->pcd->ep0state = EP0_STALL; ++ } ++ ++ ep->stopped = 1; ++ dwc_otg_ep_set_stall(ep->pcd->otg_dev->core_if, ++ &ep->dwc_ep); ++ } ++ } ++ else { ++ if (ep->dwc_ep.num == 0) { ++ ep->pcd->ep0state = EP0_STALL; ++ } ++ ++ ep->stopped = 1; ++ dwc_otg_ep_set_stall(ep->pcd->otg_dev->core_if, ++ &ep->dwc_ep); ++ } ++ } ++ else if (value == 2) { ++ ep->dwc_ep.stall_clear_flag = 0; ++ } ++ else if (value == 3) { ++ ep->dwc_ep.stall_clear_flag = 1; ++ } ++ ++ SPIN_UNLOCK_IRQRESTORE(&ep->pcd->lock, flags); ++ return retval; ++} ++ ++/** ++ * This function allocates a DMA Descriptor chain for the Endpoint ++ * buffer to be used for a transfer to/from the specified endpoint. ++ */ ++dwc_otg_dma_desc_t* dwc_otg_ep_alloc_desc_chain(uint32_t * dma_desc_addr, uint32_t count) ++{ ++ ++ return dma_alloc_coherent(NULL, count * sizeof(dwc_otg_dma_desc_t), dma_desc_addr, GFP_KERNEL); ++} ++ ++LIST_HEAD(tofree_list); ++spinlock_t tofree_list_lock=SPIN_LOCK_UNLOCKED; ++ ++struct free_param { ++ struct list_head list; ++ ++ void* addr; ++ dma_addr_t dma_addr; ++ uint32_t size; ++}; ++void free_list_agent_fn(void *data){ ++ struct list_head free_list; ++ struct free_param *cur,*next; ++ ++ spin_lock(&tofree_list_lock); ++ list_add(&free_list,&tofree_list); ++ list_del_init(&tofree_list); ++ spin_unlock(&tofree_list_lock); ++ ++ list_for_each_entry_safe(cur,next,&free_list,list){ ++ if(cur==&free_list) break; ++ dma_free_coherent(NULL,cur->size,cur->addr,cur->dma_addr); ++ list_del(&cur->list); ++ kfree(cur); ++ } ++} ++DECLARE_WORK(free_list_agent,free_list_agent_fn); ++/** ++ * This function frees a DMA Descriptor chain that was allocated by ep_alloc_desc. ++ */ ++void dwc_otg_ep_free_desc_chain(dwc_otg_dma_desc_t* desc_addr, uint32_t dma_desc_addr, uint32_t count) ++{ ++ if(irqs_disabled()){ ++ struct free_param* fp=kmalloc(sizeof(struct free_param),GFP_KERNEL); ++ fp->addr=desc_addr; ++ fp->dma_addr=dma_desc_addr; ++ fp->size=count*sizeof(dwc_otg_dma_desc_t); ++ ++ spin_lock(&tofree_list_lock); ++ list_add(&fp->list,&tofree_list); ++ spin_unlock(&tofree_list_lock); ++ ++ schedule_work(&free_list_agent); ++ return ; ++ } ++ dma_free_coherent(NULL, count * sizeof(dwc_otg_dma_desc_t), desc_addr, dma_desc_addr); ++} ++ ++#ifdef DWC_EN_ISOC ++ ++/** ++ * This function initializes a descriptor chain for Isochronous transfer ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param dwc_ep The EP to start the transfer on. ++ * ++ */ ++void dwc_otg_iso_ep_start_ddma_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *dwc_ep) ++{ ++ ++ dsts_data_t dsts = { .d32 = 0}; ++ depctl_data_t depctl = { .d32 = 0 }; ++ volatile uint32_t *addr; ++ int i, j; ++ ++ if(dwc_ep->is_in) ++ dwc_ep->desc_cnt = dwc_ep->buf_proc_intrvl / dwc_ep->bInterval; ++ else ++ dwc_ep->desc_cnt = dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm / dwc_ep->bInterval; ++ ++ ++ /** Allocate descriptors for double buffering */ ++ dwc_ep->iso_desc_addr = dwc_otg_ep_alloc_desc_chain(&dwc_ep->iso_dma_desc_addr,dwc_ep->desc_cnt*2); ++ if(dwc_ep->desc_addr) { ++ DWC_WARN("%s, can't allocate DMA descriptor chain\n", __func__); ++ return; ++ } ++ ++ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); ++ ++ /** ISO OUT EP */ ++ if(dwc_ep->is_in == 0) { ++ desc_sts_data_t sts = { .d32 =0 }; ++ dwc_otg_dma_desc_t* dma_desc = dwc_ep->iso_desc_addr; ++ dma_addr_t dma_ad; ++ uint32_t data_per_desc; ++ dwc_otg_dev_out_ep_regs_t *out_regs = ++ core_if->dev_if->out_ep_regs[dwc_ep->num]; ++ int offset; ++ ++ addr = &core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl; ++ dma_ad = (dma_addr_t)dwc_read_reg32(&(out_regs->doepdma)); ++ ++ /** Buffer 0 descriptors setup */ ++ dma_ad = dwc_ep->dma_addr0; ++ ++ sts.b_iso_out.bs = BS_HOST_READY; ++ sts.b_iso_out.rxsts = 0; ++ sts.b_iso_out.l = 0; ++ sts.b_iso_out.sp = 0; ++ sts.b_iso_out.ioc = 0; ++ sts.b_iso_out.pid = 0; ++ sts.b_iso_out.framenum = 0; ++ ++ offset = 0; ++ for(i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm; i+= dwc_ep->pkt_per_frm) ++ { ++ ++ for(j = 0; j < dwc_ep->pkt_per_frm; ++j) ++ { ++ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? ++ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; ++ ++ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; ++ sts.b_iso_out.rxbytes = data_per_desc; ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ ++ offset += data_per_desc; ++ dma_desc ++; ++ //(uint32_t)dma_ad += data_per_desc; ++ dma_ad = (uint32_t)dma_ad + data_per_desc; ++ } ++ } ++ ++ for(j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) ++ { ++ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? ++ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; ++ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; ++ sts.b_iso_out.rxbytes = data_per_desc; ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ ++ offset += data_per_desc; ++ dma_desc ++; ++ //(uint32_t)dma_ad += data_per_desc; ++ dma_ad = (uint32_t)dma_ad + data_per_desc; ++ } ++ ++ sts.b_iso_out.ioc = 1; ++ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? ++ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; ++ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; ++ sts.b_iso_out.rxbytes = data_per_desc; ++ ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ dma_desc ++; ++ ++ /** Buffer 1 descriptors setup */ ++ sts.b_iso_out.ioc = 0; ++ dma_ad = dwc_ep->dma_addr1; ++ ++ offset = 0; ++ for(i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm; i+= dwc_ep->pkt_per_frm) ++ { ++ for(j = 0; j < dwc_ep->pkt_per_frm; ++j) ++ { ++ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? ++ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; ++ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; ++ sts.b_iso_out.rxbytes = data_per_desc; ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ ++ offset += data_per_desc; ++ dma_desc ++; ++ //(uint32_t)dma_ad += data_per_desc; ++ dma_ad = (uint32_t)dma_ad + data_per_desc; ++ } ++ } ++ for(j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) ++ { ++ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? ++ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; ++ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; ++ sts.b_iso_out.rxbytes = data_per_desc; ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ ++ offset += data_per_desc; ++ dma_desc ++; ++ //(uint32_t)dma_ad += data_per_desc; ++ dma_ad = (uint32_t)dma_ad + data_per_desc; ++ } ++ ++ sts.b_iso_out.ioc = 1; ++ sts.b_iso_out.l = 1; ++ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? ++ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; ++ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; ++ sts.b_iso_out.rxbytes = data_per_desc; ++ ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ ++ dwc_ep->next_frame = 0; ++ ++ /** Write dma_ad into DOEPDMA register */ ++ dwc_write_reg32(&(out_regs->doepdma),(uint32_t)dwc_ep->iso_dma_desc_addr); ++ ++ } ++ /** ISO IN EP */ ++ else { ++ desc_sts_data_t sts = { .d32 =0 }; ++ dwc_otg_dma_desc_t* dma_desc = dwc_ep->iso_desc_addr; ++ dma_addr_t dma_ad; ++ dwc_otg_dev_in_ep_regs_t *in_regs = ++ core_if->dev_if->in_ep_regs[dwc_ep->num]; ++ unsigned int frmnumber; ++ fifosize_data_t txfifosize,rxfifosize; ++ ++ txfifosize.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[dwc_ep->num]->dtxfsts); ++ rxfifosize.d32 = dwc_read_reg32(&core_if->core_global_regs->grxfsiz); ++ ++ ++ addr = &core_if->dev_if->in_ep_regs[dwc_ep->num]->diepctl; ++ ++ dma_ad = dwc_ep->dma_addr0; ++ ++ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); ++ ++ sts.b_iso_in.bs = BS_HOST_READY; ++ sts.b_iso_in.txsts = 0; ++ sts.b_iso_in.sp = (dwc_ep->data_per_frame % dwc_ep->maxpacket)? 1 : 0; ++ sts.b_iso_in.ioc = 0; ++ sts.b_iso_in.pid = dwc_ep->pkt_per_frm; ++ ++ ++ frmnumber = dwc_ep->next_frame; ++ ++ sts.b_iso_in.framenum = frmnumber; ++ sts.b_iso_in.txbytes = dwc_ep->data_per_frame; ++ sts.b_iso_in.l = 0; ++ ++ /** Buffer 0 descriptors setup */ ++ for(i = 0; i < dwc_ep->desc_cnt - 1; i++) ++ { ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ dma_desc ++; ++ ++ //(uint32_t)dma_ad += dwc_ep->data_per_frame; ++ dma_ad = (uint32_t)dma_ad + dwc_ep->data_per_frame; ++ sts.b_iso_in.framenum += dwc_ep->bInterval; ++ } ++ ++ sts.b_iso_in.ioc = 1; ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ ++dma_desc; ++ ++ /** Buffer 1 descriptors setup */ ++ sts.b_iso_in.ioc = 0; ++ dma_ad = dwc_ep->dma_addr1; ++ ++ for(i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm; i+= dwc_ep->pkt_per_frm) ++ { ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ dma_desc ++; ++ ++ //(uint32_t)dma_ad += dwc_ep->data_per_frame; ++ dma_ad = (uint32_t)dma_ad + dwc_ep->data_per_frame; ++ sts.b_iso_in.framenum += dwc_ep->bInterval; ++ ++ sts.b_iso_in.ioc = 0; ++ } ++ sts.b_iso_in.ioc = 1; ++ sts.b_iso_in.l = 1; ++ ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ ++ dwc_ep->next_frame = sts.b_iso_in.framenum + dwc_ep->bInterval; ++ ++ /** Write dma_ad into diepdma register */ ++ dwc_write_reg32(&(in_regs->diepdma),(uint32_t)dwc_ep->iso_dma_desc_addr); ++ } ++ /** Enable endpoint, clear nak */ ++ depctl.d32 = 0; ++ depctl.b.epena = 1; ++ depctl.b.usbactep = 1; ++ depctl.b.cnak = 1; ++ ++ dwc_modify_reg32(addr, depctl.d32,depctl.d32); ++ depctl.d32 = dwc_read_reg32(addr); ++} ++ ++/** ++ * This function initializes a descriptor chain for Isochronous transfer ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to start the transfer on. ++ * ++ */ ++ ++void dwc_otg_iso_ep_start_buf_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ depctl_data_t depctl = { .d32 = 0 }; ++ volatile uint32_t *addr; ++ ++ ++ if(ep->is_in) { ++ addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl; ++ } else { ++ addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl; ++ } ++ ++ ++ if(core_if->dma_enable == 0 || core_if->dma_desc_enable!= 0) { ++ return; ++ } else { ++ deptsiz_data_t deptsiz = { .d32 = 0 }; ++ ++ ep->xfer_len = ep->data_per_frame * ep->buf_proc_intrvl / ep->bInterval; ++ ep->pkt_cnt = (ep->xfer_len - 1 + ep->maxpacket) / ++ ep->maxpacket; ++ ep->xfer_count = 0; ++ ep->xfer_buff = (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0; ++ ep->dma_addr = (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0; ++ ++ if(ep->is_in) { ++ /* Program the transfer size and packet count ++ * as follows: xfersize = N * maxpacket + ++ * short_packet pktcnt = N + (short_packet ++ * exist ? 1 : 0) ++ */ ++ deptsiz.b.mc = ep->pkt_per_frm; ++ deptsiz.b.xfersize = ep->xfer_len; ++ deptsiz.b.pktcnt = ++ (ep->xfer_len - 1 + ep->maxpacket) / ++ ep->maxpacket; ++ dwc_write_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dieptsiz, deptsiz.d32); ++ ++ /* Write the DMA register */ ++ dwc_write_reg32 (&(core_if->dev_if->in_ep_regs[ep->num]->diepdma), (uint32_t)ep->dma_addr); ++ ++ } else { ++ deptsiz.b.pktcnt = ++ (ep->xfer_len + (ep->maxpacket - 1)) / ++ ep->maxpacket; ++ deptsiz.b.xfersize = deptsiz.b.pktcnt * ep->maxpacket; ++ ++ dwc_write_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doeptsiz, deptsiz.d32); ++ ++ /* Write the DMA register */ ++ dwc_write_reg32 (&(core_if->dev_if->out_ep_regs[ep->num]->doepdma), (uint32_t)ep->dma_addr); ++ ++ } ++ /** Enable endpoint, clear nak */ ++ depctl.d32 = 0; ++ dwc_modify_reg32(addr, depctl.d32,depctl.d32); ++ ++ depctl.b.epena = 1; ++ depctl.b.cnak = 1; ++ ++ dwc_modify_reg32(addr, depctl.d32,depctl.d32); ++ } ++} ++ ++ ++/** ++ * This function does the setup for a data transfer for an EP and ++ * starts the transfer. For an IN transfer, the packets will be ++ * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers, ++ * the packets are unloaded from the Rx FIFO in the ISR. the ISR. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to start the transfer on. ++ */ ++ ++void dwc_otg_iso_ep_start_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ if(core_if->dma_enable) { ++ if(core_if->dma_desc_enable) { ++ if(ep->is_in) { ++ ep->desc_cnt = ep->pkt_cnt / ep->pkt_per_frm; ++ } else { ++ ep->desc_cnt = ep->pkt_cnt; ++ } ++ dwc_otg_iso_ep_start_ddma_transfer(core_if, ep); ++ } else { ++ if(core_if->pti_enh_enable) { ++ dwc_otg_iso_ep_start_buf_transfer(core_if, ep); ++ } else { ++ ep->cur_pkt_addr = (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0; ++ ep->cur_pkt_dma_addr = (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0; ++ dwc_otg_iso_ep_start_frm_transfer(core_if, ep); ++ } ++ } ++ } else { ++ ep->cur_pkt_addr = (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0; ++ ep->cur_pkt_dma_addr = (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0; ++ dwc_otg_iso_ep_start_frm_transfer(core_if, ep); ++ } ++} ++ ++/** ++ * This function does the setup for a data transfer for an EP and ++ * starts the transfer. For an IN transfer, the packets will be ++ * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers, ++ * the packets are unloaded from the Rx FIFO in the ISR. the ISR. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to start the transfer on. ++ */ ++ ++void dwc_otg_iso_ep_stop_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ depctl_data_t depctl = { .d32 = 0 }; ++ volatile uint32_t *addr; ++ ++ if(ep->is_in == 1) { ++ addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl; ++ } ++ else { ++ addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl; ++ } ++ ++ /* disable the ep */ ++ depctl.d32 = dwc_read_reg32(addr); ++ ++ depctl.b.epdis = 1; ++ depctl.b.snak = 1; ++ ++ dwc_write_reg32(addr, depctl.d32); ++ ++ if(core_if->dma_desc_enable && ++ ep->iso_desc_addr && ep->iso_dma_desc_addr) { ++ dwc_otg_ep_free_desc_chain(ep->iso_desc_addr,ep->iso_dma_desc_addr,ep->desc_cnt * 2); ++ } ++ ++ /* reset varibales */ ++ ep->dma_addr0 = 0; ++ ep->dma_addr1 = 0; ++ ep->xfer_buff0 = 0; ++ ep->xfer_buff1 = 0; ++ ep->data_per_frame = 0; ++ ep->data_pattern_frame = 0; ++ ep->sync_frame = 0; ++ ep->buf_proc_intrvl = 0; ++ ep->bInterval = 0; ++ ep->proc_buf_num = 0; ++ ep->pkt_per_frm = 0; ++ ep->pkt_per_frm = 0; ++ ep->desc_cnt = 0; ++ ep->iso_desc_addr = 0; ++ ep->iso_dma_desc_addr = 0; ++} ++ ++ ++/** ++ * This function is used to submit an ISOC Transfer Request to an EP. ++ * ++ * - Every time a sync period completes the request's completion callback ++ * is called to provide data to the gadget driver. ++ * - Once submitted the request cannot be modified. ++ * - Each request is turned into periodic data packets untill ISO ++ * Transfer is stopped.. ++ */ ++static int dwc_otg_pcd_iso_ep_start(struct usb_ep *usb_ep, struct usb_iso_request *req, ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ int gfp_flags ++#else ++ gfp_t gfp_flags ++#endif ++) ++{ ++ dwc_otg_pcd_ep_t *ep; ++ dwc_otg_pcd_t *pcd; ++ dwc_ep_t *dwc_ep; ++ unsigned long flags = 0; ++ int32_t frm_data; ++ dwc_otg_core_if_t *core_if; ++ dcfg_data_t dcfg; ++ dsts_data_t dsts; ++ ++ ++ if (!req || !req->process_buffer || !req->buf0 || !req->buf1) { ++ DWC_WARN("%s, bad params\n", __func__); ++ return -EINVAL; ++ } ++ ++ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); ++ ++ if (!usb_ep || !ep->desc || ep->dwc_ep.num == 0) { ++ DWC_WARN("%s, bad ep\n", __func__); ++ return -EINVAL; ++ } ++ ++ pcd = ep->pcd; ++ core_if = GET_CORE_IF(pcd); ++ ++ dcfg.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dcfg); ++ ++ if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) { ++ DWC_DEBUGPL(DBG_PCDV, "gadget.speed=%d\n", pcd->gadget.speed); ++ DWC_WARN("%s, bogus device state\n", __func__); ++ return -ESHUTDOWN; ++ } ++ ++ SPIN_LOCK_IRQSAVE(&ep->pcd->lock, flags); ++ ++ dwc_ep = &ep->dwc_ep; ++ ++ if(ep->iso_req) { ++ DWC_WARN("%s, iso request in progress\n", __func__); ++ } ++ req->status = -EINPROGRESS; ++ ++ dwc_ep->dma_addr0 = req->dma0; ++ dwc_ep->dma_addr1 = req->dma1; ++ ++ dwc_ep->xfer_buff0 = req->buf0; ++ dwc_ep->xfer_buff1 = req->buf1; ++ ++ ep->iso_req = req; ++ ++ dwc_ep->data_per_frame = req->data_per_frame; ++ ++ /** @todo - pattern data support is to be implemented in the future */ ++ dwc_ep->data_pattern_frame = req->data_pattern_frame; ++ dwc_ep->sync_frame = req->sync_frame; ++ ++ dwc_ep->buf_proc_intrvl = req->buf_proc_intrvl; ++ ++ dwc_ep->bInterval = 1 << (ep->desc->bInterval - 1); ++ ++ dwc_ep->proc_buf_num = 0; ++ ++ dwc_ep->pkt_per_frm = 0; ++ frm_data = ep->dwc_ep.data_per_frame; ++ while(frm_data > 0) { ++ dwc_ep->pkt_per_frm++; ++ frm_data -= ep->dwc_ep.maxpacket; ++ } ++ ++ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); ++ ++ if(req->flags & USB_REQ_ISO_ASAP) { ++ dwc_ep->next_frame = dsts.b.soffn + 1; ++ if(dwc_ep->bInterval != 1){ ++ dwc_ep->next_frame = dwc_ep->next_frame + (dwc_ep->bInterval - 1 - dwc_ep->next_frame % dwc_ep->bInterval); ++ } ++ } else { ++ dwc_ep->next_frame = req->start_frame; ++ } ++ ++ ++ if(!core_if->pti_enh_enable) { ++ dwc_ep->pkt_cnt = dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm / dwc_ep->bInterval; ++ } else { ++ dwc_ep->pkt_cnt = ++ (dwc_ep->data_per_frame * (dwc_ep->buf_proc_intrvl / dwc_ep->bInterval) ++ - 1 + dwc_ep->maxpacket) / dwc_ep->maxpacket; ++ } ++ ++ if(core_if->dma_desc_enable) { ++ dwc_ep->desc_cnt = ++ dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm / dwc_ep->bInterval; ++ } ++ ++ dwc_ep->pkt_info = kmalloc(sizeof(iso_pkt_info_t) * dwc_ep->pkt_cnt, GFP_KERNEL); ++ if(!dwc_ep->pkt_info) { ++ return -ENOMEM; ++ } ++ if(core_if->pti_enh_enable) { ++ memset(dwc_ep->pkt_info, 0, sizeof(iso_pkt_info_t) * dwc_ep->pkt_cnt); ++ } ++ ++ dwc_ep->cur_pkt = 0; ++ ++ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); ++ ++ dwc_otg_iso_ep_start_transfer(core_if, dwc_ep); ++ ++ return 0; ++} ++ ++/** ++ * This function stops ISO EP Periodic Data Transfer. ++ */ ++static int dwc_otg_pcd_iso_ep_stop(struct usb_ep *usb_ep, struct usb_iso_request *req) ++{ ++ dwc_otg_pcd_ep_t *ep; ++ dwc_otg_pcd_t *pcd; ++ dwc_ep_t *dwc_ep; ++ unsigned long flags; ++ ++ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); ++ ++ if (!usb_ep || !ep->desc || ep->dwc_ep.num == 0) { ++ DWC_WARN("%s, bad ep\n", __func__); ++ return -EINVAL; ++ } ++ ++ pcd = ep->pcd; ++ ++ if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) { ++ DWC_DEBUGPL(DBG_PCDV, "gadget.speed=%d\n", pcd->gadget.speed); ++ DWC_WARN("%s, bogus device state\n", __func__); ++ return -ESHUTDOWN; ++ } ++ ++ dwc_ep = &ep->dwc_ep; ++ ++ dwc_otg_iso_ep_stop_transfer(GET_CORE_IF(pcd), dwc_ep); ++ ++ kfree(dwc_ep->pkt_info); ++ ++ SPIN_LOCK_IRQSAVE(&pcd->lock, flags); ++ ++ if(ep->iso_req != req) { ++ return -EINVAL; ++ } ++ ++ req->status = -ECONNRESET; ++ ++ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); ++ ++ ++ ep->iso_req = 0; ++ ++ return 0; ++} ++ ++/** ++ * This function is used for perodical data exchnage between PCD and gadget drivers. ++ * for Isochronous EPs ++ * ++ * - Every time a sync period completes this function is called to ++ * perform data exchange between PCD and gadget ++ */ ++void dwc_otg_iso_buffer_done(dwc_otg_pcd_ep_t *ep, dwc_otg_pcd_iso_request_t *req) ++{ ++ int i; ++ struct usb_gadget_iso_packet_descriptor *iso_packet; ++ dwc_ep_t *dwc_ep; ++ ++ dwc_ep = &ep->dwc_ep; ++ ++ if(ep->iso_req->status == -ECONNRESET) { ++ DWC_PRINT("Device has already disconnected\n"); ++ /*Device has been disconnected*/ ++ return; ++ } ++ ++ if(dwc_ep->proc_buf_num != 0) { ++ iso_packet = ep->iso_req->iso_packet_desc0; ++ } ++ ++ else { ++ iso_packet = ep->iso_req->iso_packet_desc1; ++ } ++ ++ /* Fill in ISOC packets descriptors & pass to gadget driver*/ ++ ++ for(i = 0; i < dwc_ep->pkt_cnt; ++i) { ++ iso_packet[i].status = dwc_ep->pkt_info[i].status; ++ iso_packet[i].offset = dwc_ep->pkt_info[i].offset; ++ iso_packet[i].actual_length = dwc_ep->pkt_info[i].length; ++ dwc_ep->pkt_info[i].status = 0; ++ dwc_ep->pkt_info[i].offset = 0; ++ dwc_ep->pkt_info[i].length = 0; ++ } ++ ++ /* Call callback function to process data buffer */ ++ ep->iso_req->status = 0;/* success */ ++ ++ SPIN_UNLOCK(&ep->pcd->lock); ++ ep->iso_req->process_buffer(&ep->ep, ep->iso_req); ++ SPIN_LOCK(&ep->pcd->lock); ++} ++ ++ ++static struct usb_iso_request *dwc_otg_pcd_alloc_iso_request(struct usb_ep *ep,int packets, ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ int gfp_flags ++#else ++ gfp_t gfp_flags ++#endif ++) ++{ ++ struct usb_iso_request *pReq = NULL; ++ uint32_t req_size; ++ ++ ++ req_size = sizeof(struct usb_iso_request); ++ req_size += (2 * packets * (sizeof(struct usb_gadget_iso_packet_descriptor))); ++ ++ ++ pReq = kmalloc(req_size, gfp_flags); ++ if (!pReq) { ++ DWC_WARN("%s, can't allocate Iso Request\n", __func__); ++ return 0; ++ } ++ pReq->iso_packet_desc0 = (void*) (pReq + 1); ++ ++ pReq->iso_packet_desc1 = pReq->iso_packet_desc0 + packets; ++ ++ return pReq; ++} ++ ++static void dwc_otg_pcd_free_iso_request(struct usb_ep *ep, struct usb_iso_request *req) ++{ ++ kfree(req); ++} ++ ++static struct usb_isoc_ep_ops dwc_otg_pcd_ep_ops = ++{ ++ .ep_ops = ++ { ++ .enable = dwc_otg_pcd_ep_enable, ++ .disable = dwc_otg_pcd_ep_disable, ++ ++ .alloc_request = dwc_otg_pcd_alloc_request, ++ .free_request = dwc_otg_pcd_free_request, ++ ++ //.alloc_buffer = dwc_otg_pcd_alloc_buffer, ++ //.free_buffer = dwc_otg_pcd_free_buffer, ++ ++ .queue = dwc_otg_pcd_ep_queue, ++ .dequeue = dwc_otg_pcd_ep_dequeue, ++ ++ .set_halt = dwc_otg_pcd_ep_set_halt, ++ .fifo_status = 0, ++ .fifo_flush = 0, ++ }, ++ .iso_ep_start = dwc_otg_pcd_iso_ep_start, ++ .iso_ep_stop = dwc_otg_pcd_iso_ep_stop, ++ .alloc_iso_request = dwc_otg_pcd_alloc_iso_request, ++ .free_iso_request = dwc_otg_pcd_free_iso_request, ++}; ++ ++#else ++ ++ ++static struct usb_ep_ops dwc_otg_pcd_ep_ops = ++{ ++ .enable = dwc_otg_pcd_ep_enable, ++ .disable = dwc_otg_pcd_ep_disable, ++ ++ .alloc_request = dwc_otg_pcd_alloc_request, ++ .free_request = dwc_otg_pcd_free_request, ++ ++// .alloc_buffer = dwc_otg_pcd_alloc_buffer, ++// .free_buffer = dwc_otg_pcd_free_buffer, ++ ++ .queue = dwc_otg_pcd_ep_queue, ++ .dequeue = dwc_otg_pcd_ep_dequeue, ++ ++ .set_halt = dwc_otg_pcd_ep_set_halt, ++ .fifo_status = 0, ++ .fifo_flush = 0, ++ ++ ++}; ++ ++#endif /* DWC_EN_ISOC */ ++/* Gadget Operations */ ++/** ++ * The following gadget operations will be implemented in the DWC_otg ++ * PCD. Functions in the API that are not described below are not ++ * implemented. ++ * ++ * The Gadget API provides wrapper functions for each of the function ++ * pointers defined in usb_gadget_ops. The Gadget Driver calls the ++ * wrapper function, which then calls the underlying PCD function. The ++ * following sections are named according to the wrapper functions ++ * (except for ioctl, which doesn't have a wrapper function). Within ++ * each section, the corresponding DWC_otg PCD function name is ++ * specified. ++ * ++ */ ++ ++/** ++ *Gets the USB Frame number of the last SOF. ++ */ ++static int dwc_otg_pcd_get_frame(struct usb_gadget *gadget) ++{ ++ dwc_otg_pcd_t *pcd; ++ ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n", __func__, gadget); ++ ++ if (gadget == 0) { ++ return -ENODEV; ++ } ++ else { ++ pcd = container_of(gadget, dwc_otg_pcd_t, gadget); ++ dwc_otg_get_frame_number(GET_CORE_IF(pcd)); ++ } ++ ++ return 0; ++} ++ ++void dwc_otg_pcd_initiate_srp(dwc_otg_pcd_t *pcd) ++{ ++ uint32_t *addr = (uint32_t *)&(GET_CORE_IF(pcd)->core_global_regs->gotgctl); ++ gotgctl_data_t mem; ++ gotgctl_data_t val; ++ ++ val.d32 = dwc_read_reg32(addr); ++ if (val.b.sesreq) { ++ DWC_ERROR("Session Request Already active!\n"); ++ return; ++ } ++ ++ DWC_NOTICE("Session Request Initated\n"); ++ mem.d32 = dwc_read_reg32(addr); ++ mem.b.sesreq = 1; ++ dwc_write_reg32(addr, mem.d32); ++ ++ /* Start the SRP timer */ ++ dwc_otg_pcd_start_srp_timer(pcd); ++ return; ++} ++ ++void dwc_otg_pcd_remote_wakeup(dwc_otg_pcd_t *pcd, int set) ++{ ++ dctl_data_t dctl = {.d32=0}; ++ volatile uint32_t *addr = &(GET_CORE_IF(pcd)->dev_if->dev_global_regs->dctl); ++ ++ if (dwc_otg_is_device_mode(GET_CORE_IF(pcd))) { ++ if (pcd->remote_wakeup_enable) { ++ if (set) { ++ dctl.b.rmtwkupsig = 1; ++ dwc_modify_reg32(addr, 0, dctl.d32); ++ DWC_DEBUGPL(DBG_PCD, "Set Remote Wakeup\n"); ++ mdelay(1); ++ dwc_modify_reg32(addr, dctl.d32, 0); ++ DWC_DEBUGPL(DBG_PCD, "Clear Remote Wakeup\n"); ++ } ++ else { ++ } ++ } ++ else { ++ DWC_DEBUGPL(DBG_PCD, "Remote Wakeup is disabled\n"); ++ } ++ } ++ return; ++} ++ ++/** ++ * Initiates Session Request Protocol (SRP) to wakeup the host if no ++ * session is in progress. If a session is already in progress, but ++ * the device is suspended, remote wakeup signaling is started. ++ * ++ */ ++static int dwc_otg_pcd_wakeup(struct usb_gadget *gadget) ++{ ++ unsigned long flags; ++ dwc_otg_pcd_t *pcd; ++ dsts_data_t dsts; ++ gotgctl_data_t gotgctl; ++ ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n", __func__, gadget); ++ ++ if (gadget == 0) { ++ return -ENODEV; ++ } ++ else { ++ pcd = container_of(gadget, dwc_otg_pcd_t, gadget); ++ } ++ SPIN_LOCK_IRQSAVE(&pcd->lock, flags); ++ ++ /* ++ * This function starts the Protocol if no session is in progress. If ++ * a session is already in progress, but the device is suspended, ++ * remote wakeup signaling is started. ++ */ ++ ++ /* Check if valid session */ ++ gotgctl.d32 = dwc_read_reg32(&(GET_CORE_IF(pcd)->core_global_regs->gotgctl)); ++ if (gotgctl.b.bsesvld) { ++ /* Check if suspend state */ ++ dsts.d32 = dwc_read_reg32(&(GET_CORE_IF(pcd)->dev_if->dev_global_regs->dsts)); ++ if (dsts.b.suspsts) { ++ dwc_otg_pcd_remote_wakeup(pcd, 1); ++ } ++ } ++ else { ++ dwc_otg_pcd_initiate_srp(pcd); ++ } ++ ++ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); ++ return 0; ++} ++ ++static const struct usb_gadget_ops dwc_otg_pcd_ops = ++{ ++ .get_frame = dwc_otg_pcd_get_frame, ++ .wakeup = dwc_otg_pcd_wakeup, ++ // current versions must always be self-powered ++}; ++ ++/** ++ * This function updates the otg values in the gadget structure. ++ */ ++void dwc_otg_pcd_update_otg(dwc_otg_pcd_t *pcd, const unsigned reset) ++{ ++ ++ if (!pcd->gadget.is_otg) ++ return; ++ ++ if (reset) { ++ pcd->b_hnp_enable = 0; ++ pcd->a_hnp_support = 0; ++ pcd->a_alt_hnp_support = 0; ++ } ++ ++ pcd->gadget.b_hnp_enable = pcd->b_hnp_enable; ++ pcd->gadget.a_hnp_support = pcd->a_hnp_support; ++ pcd->gadget.a_alt_hnp_support = pcd->a_alt_hnp_support; ++} ++ ++/** ++ * This function is the top level PCD interrupt handler. ++ */ ++static irqreturn_t dwc_otg_pcd_irq(int irq, void *dev ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ++ , struct pt_regs *r ++#endif ++ ) ++{ ++ dwc_otg_pcd_t *pcd = dev; ++ int32_t retval = IRQ_NONE; ++ ++ retval = dwc_otg_pcd_handle_intr(pcd); ++ return IRQ_RETVAL(retval); ++} ++ ++/** ++ * PCD Callback function for initializing the PCD when switching to ++ * device mode. ++ * ++ * @param p void pointer to the dwc_otg_pcd_t ++ */ ++static int32_t dwc_otg_pcd_start_cb(void *p) ++{ ++ dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *)p; ++ ++ /* ++ * Initialized the Core for Device mode. ++ */ ++ if (dwc_otg_is_device_mode(GET_CORE_IF(pcd))) { ++ dwc_otg_core_dev_init(GET_CORE_IF(pcd)); ++ } ++ return 1; ++} ++ ++/** ++ * PCD Callback function for stopping the PCD when switching to Host ++ * mode. ++ * ++ * @param p void pointer to the dwc_otg_pcd_t ++ */ ++static int32_t dwc_otg_pcd_stop_cb(void *p) ++{ ++ dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *)p; ++ extern void dwc_otg_pcd_stop(dwc_otg_pcd_t *_pcd); ++ ++ dwc_otg_pcd_stop(pcd); ++ return 1; ++} ++ ++ ++/** ++ * PCD Callback function for notifying the PCD when resuming from ++ * suspend. ++ * ++ * @param p void pointer to the dwc_otg_pcd_t ++ */ ++static int32_t dwc_otg_pcd_suspend_cb(void *p) ++{ ++ dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *)p; ++ ++ if (pcd->driver && pcd->driver->resume) { ++ SPIN_UNLOCK(&pcd->lock); ++ pcd->driver->suspend(&pcd->gadget); ++ SPIN_LOCK(&pcd->lock); ++ } ++ ++ return 1; ++} ++ ++ ++/** ++ * PCD Callback function for notifying the PCD when resuming from ++ * suspend. ++ * ++ * @param p void pointer to the dwc_otg_pcd_t ++ */ ++static int32_t dwc_otg_pcd_resume_cb(void *p) ++{ ++ dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *)p; ++ ++ if (pcd->driver && pcd->driver->resume) { ++ SPIN_UNLOCK(&pcd->lock); ++ pcd->driver->resume(&pcd->gadget); ++ SPIN_LOCK(&pcd->lock); ++ } ++ ++ /* Stop the SRP timeout timer. */ ++ if ((GET_CORE_IF(pcd)->core_params->phy_type != DWC_PHY_TYPE_PARAM_FS) || ++ (!GET_CORE_IF(pcd)->core_params->i2c_enable)) { ++ if (GET_CORE_IF(pcd)->srp_timer_started) { ++ GET_CORE_IF(pcd)->srp_timer_started = 0; ++ del_timer(&pcd->srp_timer); ++ } ++ } ++ return 1; ++} ++ ++ ++/** ++ * PCD Callback structure for handling mode switching. ++ */ ++static dwc_otg_cil_callbacks_t pcd_callbacks = ++{ ++ .start = dwc_otg_pcd_start_cb, ++ .stop = dwc_otg_pcd_stop_cb, ++ .suspend = dwc_otg_pcd_suspend_cb, ++ .resume_wakeup = dwc_otg_pcd_resume_cb, ++ .p = 0, /* Set at registration */ ++}; ++ ++/** ++ * This function is called when the SRP timer expires. The SRP should ++ * complete within 6 seconds. ++ */ ++static void srp_timeout(unsigned long ptr) ++{ ++ gotgctl_data_t gotgctl; ++ dwc_otg_core_if_t *core_if = (dwc_otg_core_if_t *)ptr; ++ volatile uint32_t *addr = &core_if->core_global_regs->gotgctl; ++ ++ gotgctl.d32 = dwc_read_reg32(addr); ++ ++ core_if->srp_timer_started = 0; ++ ++ if ((core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS) && ++ (core_if->core_params->i2c_enable)) { ++ DWC_PRINT("SRP Timeout\n"); ++ ++ if ((core_if->srp_success) && ++ (gotgctl.b.bsesvld)) { ++ if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) { ++ core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->p); ++ } ++ ++ /* Clear Session Request */ ++ gotgctl.d32 = 0; ++ gotgctl.b.sesreq = 1; ++ dwc_modify_reg32(&core_if->core_global_regs->gotgctl, ++ gotgctl.d32, 0); ++ ++ core_if->srp_success = 0; ++ } ++ else { ++ DWC_ERROR("Device not connected/responding\n"); ++ gotgctl.b.sesreq = 0; ++ dwc_write_reg32(addr, gotgctl.d32); ++ } ++ } ++ else if (gotgctl.b.sesreq) { ++ DWC_PRINT("SRP Timeout\n"); ++ ++ DWC_ERROR("Device not connected/responding\n"); ++ gotgctl.b.sesreq = 0; ++ dwc_write_reg32(addr, gotgctl.d32); ++ } ++ else { ++ DWC_PRINT(" SRP GOTGCTL=%0x\n", gotgctl.d32); ++ } ++} ++ ++/** ++ * Start the SRP timer to detect when the SRP does not complete within ++ * 6 seconds. ++ * ++ * @param pcd the pcd structure. ++ */ ++void dwc_otg_pcd_start_srp_timer(dwc_otg_pcd_t *pcd) ++{ ++ struct timer_list *srp_timer = &pcd->srp_timer; ++ GET_CORE_IF(pcd)->srp_timer_started = 1; ++ init_timer(srp_timer); ++ srp_timer->function = srp_timeout; ++ srp_timer->data = (unsigned long)GET_CORE_IF(pcd); ++ srp_timer->expires = jiffies + (HZ*6); ++ add_timer(srp_timer); ++} ++ ++/** ++ * Tasklet ++ * ++ */ ++extern void start_next_request(dwc_otg_pcd_ep_t *ep); ++ ++static void start_xfer_tasklet_func (unsigned long data) ++{ ++ dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t*)data; ++ dwc_otg_core_if_t *core_if = pcd->otg_dev->core_if; ++ ++ int i; ++ depctl_data_t diepctl; ++ ++ DWC_DEBUGPL(DBG_PCDV, "Start xfer tasklet\n"); ++ ++ diepctl.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[0]->diepctl); ++ ++ if (pcd->ep0.queue_sof) { ++ pcd->ep0.queue_sof = 0; ++ start_next_request (&pcd->ep0); ++ // break; ++ } ++ ++ for (i=0; idev_if->num_in_eps; i++) ++ { ++ depctl_data_t diepctl; ++ diepctl.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[i]->diepctl); ++ ++ if (pcd->in_ep[i].queue_sof) { ++ pcd->in_ep[i].queue_sof = 0; ++ start_next_request (&pcd->in_ep[i]); ++ // break; ++ } ++ } ++ ++ return; ++} ++ ++ ++ ++ ++ ++ ++ ++static struct tasklet_struct start_xfer_tasklet = { ++ .next = NULL, ++ .state = 0, ++ .count = ATOMIC_INIT(0), ++ .func = start_xfer_tasklet_func, ++ .data = 0, ++}; ++/** ++ * This function initialized the pcd Dp structures to there default ++ * state. ++ * ++ * @param pcd the pcd structure. ++ */ ++void dwc_otg_pcd_reinit(dwc_otg_pcd_t *pcd) ++{ ++ static const char * names[] = ++ { ++ ++ "ep0", ++ "ep1in", ++ "ep2in", ++ "ep3in", ++ "ep4in", ++ "ep5in", ++ "ep6in", ++ "ep7in", ++ "ep8in", ++ "ep9in", ++ "ep10in", ++ "ep11in", ++ "ep12in", ++ "ep13in", ++ "ep14in", ++ "ep15in", ++ "ep1out", ++ "ep2out", ++ "ep3out", ++ "ep4out", ++ "ep5out", ++ "ep6out", ++ "ep7out", ++ "ep8out", ++ "ep9out", ++ "ep10out", ++ "ep11out", ++ "ep12out", ++ "ep13out", ++ "ep14out", ++ "ep15out" ++ ++ }; ++ ++ int i; ++ int in_ep_cntr, out_ep_cntr; ++ uint32_t hwcfg1; ++ uint32_t num_in_eps = (GET_CORE_IF(pcd))->dev_if->num_in_eps; ++ uint32_t num_out_eps = (GET_CORE_IF(pcd))->dev_if->num_out_eps; ++ dwc_otg_pcd_ep_t *ep; ++ ++ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, pcd); ++ ++ INIT_LIST_HEAD (&pcd->gadget.ep_list); ++ pcd->gadget.ep0 = &pcd->ep0.ep; ++ pcd->gadget.speed = USB_SPEED_UNKNOWN; ++ ++ INIT_LIST_HEAD (&pcd->gadget.ep0->ep_list); ++ ++ /** ++ * Initialize the EP0 structure. ++ */ ++ ep = &pcd->ep0; ++ ++ /* Init EP structure */ ++ ep->desc = 0; ++ ep->pcd = pcd; ++ ep->stopped = 1; ++ ++ /* Init DWC ep structure */ ++ ep->dwc_ep.num = 0; ++ ep->dwc_ep.active = 0; ++ ep->dwc_ep.tx_fifo_num = 0; ++ /* Control until ep is actvated */ ++ ep->dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL; ++ ep->dwc_ep.maxpacket = MAX_PACKET_SIZE; ++ ep->dwc_ep.dma_addr = 0; ++ ep->dwc_ep.start_xfer_buff = 0; ++ ep->dwc_ep.xfer_buff = 0; ++ ep->dwc_ep.xfer_len = 0; ++ ep->dwc_ep.xfer_count = 0; ++ ep->dwc_ep.sent_zlp = 0; ++ ep->dwc_ep.total_len = 0; ++ ep->queue_sof = 0; ++ ep->dwc_ep.desc_addr = 0; ++ ep->dwc_ep.dma_desc_addr = 0; ++ ++ ep->dwc_ep.aligned_buf=NULL; ++ ep->dwc_ep.aligned_buf_size=0; ++ ep->dwc_ep.aligned_dma_addr=0; ++ ++ ++ /* Init the usb_ep structure. */ ++ ep->ep.name = names[0]; ++ ep->ep.ops = (struct usb_ep_ops*)&dwc_otg_pcd_ep_ops; ++ ++ /** ++ * @todo NGS: What should the max packet size be set to ++ * here? Before EP type is set? ++ */ ++ ep->ep.maxpacket = MAX_PACKET_SIZE; ++ ++ list_add_tail (&ep->ep.ep_list, &pcd->gadget.ep_list); ++ ++ INIT_LIST_HEAD (&ep->queue); ++ /** ++ * Initialize the EP structures. ++ */ ++ in_ep_cntr = 0; ++ hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 3; ++ ++ for (i = 1; in_ep_cntr < num_in_eps; i++) ++ { ++ if((hwcfg1 & 0x1) == 0) { ++ dwc_otg_pcd_ep_t *ep = &pcd->in_ep[in_ep_cntr]; ++ in_ep_cntr ++; ++ ++ /* Init EP structure */ ++ ep->desc = 0; ++ ep->pcd = pcd; ++ ep->stopped = 1; ++ ++ /* Init DWC ep structure */ ++ ep->dwc_ep.is_in = 1; ++ ep->dwc_ep.num = i; ++ ep->dwc_ep.active = 0; ++ ep->dwc_ep.tx_fifo_num = 0; ++ ++ /* Control until ep is actvated */ ++ ep->dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL; ++ ep->dwc_ep.maxpacket = MAX_PACKET_SIZE; ++ ep->dwc_ep.dma_addr = 0; ++ ep->dwc_ep.start_xfer_buff = 0; ++ ep->dwc_ep.xfer_buff = 0; ++ ep->dwc_ep.xfer_len = 0; ++ ep->dwc_ep.xfer_count = 0; ++ ep->dwc_ep.sent_zlp = 0; ++ ep->dwc_ep.total_len = 0; ++ ep->queue_sof = 0; ++ ep->dwc_ep.desc_addr = 0; ++ ep->dwc_ep.dma_desc_addr = 0; ++ ++ /* Init the usb_ep structure. */ ++ ep->ep.name = names[i]; ++ ep->ep.ops = (struct usb_ep_ops*)&dwc_otg_pcd_ep_ops; ++ ++ /** ++ * @todo NGS: What should the max packet size be set to ++ * here? Before EP type is set? ++ */ ++ ep->ep.maxpacket = MAX_PACKET_SIZE; ++ ++ //add only even number ep as in ++ if((i%2)==1) ++ list_add_tail (&ep->ep.ep_list, &pcd->gadget.ep_list); ++ ++ INIT_LIST_HEAD (&ep->queue); ++ } ++ hwcfg1 >>= 2; ++ } ++ ++ out_ep_cntr = 0; ++ hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 2; ++ ++ for (i = 1; out_ep_cntr < num_out_eps; i++) ++ { ++ if((hwcfg1 & 0x1) == 0) { ++ dwc_otg_pcd_ep_t *ep = &pcd->out_ep[out_ep_cntr]; ++ out_ep_cntr++; ++ ++ /* Init EP structure */ ++ ep->desc = 0; ++ ep->pcd = pcd; ++ ep->stopped = 1; ++ ++ /* Init DWC ep structure */ ++ ep->dwc_ep.is_in = 0; ++ ep->dwc_ep.num = i; ++ ep->dwc_ep.active = 0; ++ ep->dwc_ep.tx_fifo_num = 0; ++ /* Control until ep is actvated */ ++ ep->dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL; ++ ep->dwc_ep.maxpacket = MAX_PACKET_SIZE; ++ ep->dwc_ep.dma_addr = 0; ++ ep->dwc_ep.start_xfer_buff = 0; ++ ep->dwc_ep.xfer_buff = 0; ++ ep->dwc_ep.xfer_len = 0; ++ ep->dwc_ep.xfer_count = 0; ++ ep->dwc_ep.sent_zlp = 0; ++ ep->dwc_ep.total_len = 0; ++ ep->queue_sof = 0; ++ ++ /* Init the usb_ep structure. */ ++ ep->ep.name = names[15 + i]; ++ ep->ep.ops = (struct usb_ep_ops*)&dwc_otg_pcd_ep_ops; ++ /** ++ * @todo NGS: What should the max packet size be set to ++ * here? Before EP type is set? ++ */ ++ ep->ep.maxpacket = MAX_PACKET_SIZE; ++ ++ //add only odd number ep as out ++ if((i%2)==0) ++ list_add_tail (&ep->ep.ep_list, &pcd->gadget.ep_list); ++ ++ INIT_LIST_HEAD (&ep->queue); ++ } ++ hwcfg1 >>= 2; ++ } ++ ++ /* remove ep0 from the list. There is a ep0 pointer.*/ ++ list_del_init (&pcd->ep0.ep.ep_list); ++ ++ pcd->ep0state = EP0_DISCONNECT; ++ pcd->ep0.ep.maxpacket = MAX_EP0_SIZE; ++ pcd->ep0.dwc_ep.maxpacket = MAX_EP0_SIZE; ++ pcd->ep0.dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL; ++} ++ ++/** ++ * This function releases the Gadget device. ++ * required by device_unregister(). ++ * ++ * @todo Should this do something? Should it free the PCD? ++ */ ++static void dwc_otg_pcd_gadget_release(struct device *dev) ++{ ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n", __func__, dev); ++} ++ ++ ++ ++/** ++ * This function initialized the PCD portion of the driver. ++ * ++ */ ++u8 dev_id[]="gadget"; ++int dwc_otg_pcd_init(struct lm_device *lmdev) ++{ ++ static char pcd_name[] = "dwc_otg_pcd"; ++ dwc_otg_pcd_t *pcd; ++ dwc_otg_core_if_t* core_if; ++ dwc_otg_dev_if_t* dev_if; ++ dwc_otg_device_t *otg_dev = lm_get_drvdata(lmdev); ++ int retval = 0; ++ ++ ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n",__func__, lmdev); ++ /* ++ * Allocate PCD structure ++ */ ++ pcd = kmalloc(sizeof(dwc_otg_pcd_t), GFP_KERNEL); ++ ++ if (pcd == 0) { ++ return -ENOMEM; ++ } ++ ++ memset(pcd, 0, sizeof(dwc_otg_pcd_t)); ++ spin_lock_init(&pcd->lock); ++ ++ otg_dev->pcd = pcd; ++ s_pcd = pcd; ++ pcd->gadget.name = pcd_name; ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) ++ strcpy(pcd->gadget.dev.bus_id, "gadget"); ++#else ++ pcd->gadget.dev.init_name = dev_id; ++#endif ++ pcd->otg_dev = lm_get_drvdata(lmdev); ++ ++ pcd->gadget.dev.parent = &lmdev->dev; ++ pcd->gadget.dev.release = dwc_otg_pcd_gadget_release; ++ pcd->gadget.ops = &dwc_otg_pcd_ops; ++ ++ core_if = GET_CORE_IF(pcd); ++ dev_if = core_if->dev_if; ++ ++ if(core_if->hwcfg4.b.ded_fifo_en) { ++ DWC_PRINT("Dedicated Tx FIFOs mode\n"); ++ } ++ else { ++ DWC_PRINT("Shared Tx FIFO mode\n"); ++ } ++ ++ /* If the module is set to FS or if the PHY_TYPE is FS then the gadget ++ * should not report as dual-speed capable. replace the following line ++ * with the block of code below it once the software is debugged for ++ * this. If is_dualspeed = 0 then the gadget driver should not report ++ * a device qualifier descriptor when queried. */ ++ if ((GET_CORE_IF(pcd)->core_params->speed == DWC_SPEED_PARAM_FULL) || ++ ((GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type == 2) && ++ (GET_CORE_IF(pcd)->hwcfg2.b.fs_phy_type == 1) && ++ (GET_CORE_IF(pcd)->core_params->ulpi_fs_ls))) { ++ pcd->gadget.is_dualspeed = 0; ++ } ++ else { ++ pcd->gadget.is_dualspeed = 1; ++ } ++ ++ if ((otg_dev->core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE) || ++ (otg_dev->core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST) || ++ (otg_dev->core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE) || ++ (otg_dev->core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST)) { ++ pcd->gadget.is_otg = 0; ++ } ++ else { ++ pcd->gadget.is_otg = 1; ++ } ++ ++ ++ pcd->driver = 0; ++ /* Register the gadget device */ ++printk("%s: 1\n",__func__); ++ retval = device_register(&pcd->gadget.dev); ++ if (retval != 0) { ++ kfree (pcd); ++printk("%s: 2\n",__func__); ++ return retval; ++ } ++ ++ ++ /* ++ * Initialized the Core for Device mode. ++ */ ++ if (dwc_otg_is_device_mode(core_if)) { ++ dwc_otg_core_dev_init(core_if); ++ } ++ ++ /* ++ * Initialize EP structures ++ */ ++ dwc_otg_pcd_reinit(pcd); ++ ++ /* ++ * Register the PCD Callbacks. ++ */ ++ dwc_otg_cil_register_pcd_callbacks(otg_dev->core_if, &pcd_callbacks, ++ pcd); ++ /* ++ * Setup interupt handler ++ */ ++ DWC_DEBUGPL(DBG_ANY, "registering handler for irq%d\n", lmdev->irq); ++ retval = request_irq(lmdev->irq, dwc_otg_pcd_irq, ++ IRQF_SHARED, pcd->gadget.name, pcd); ++ if (retval != 0) { ++ DWC_ERROR("request of irq%d failed\n", lmdev->irq); ++ device_unregister(&pcd->gadget.dev); ++ kfree (pcd); ++ return -EBUSY; ++ } ++ ++ /* ++ * Initialize the DMA buffer for SETUP packets ++ */ ++ if (GET_CORE_IF(pcd)->dma_enable) { ++ pcd->setup_pkt = dma_alloc_coherent (NULL, sizeof (*pcd->setup_pkt) * 5, &pcd->setup_pkt_dma_handle, 0); ++ if (pcd->setup_pkt == 0) { ++ free_irq(lmdev->irq, pcd); ++ device_unregister(&pcd->gadget.dev); ++ kfree (pcd); ++ return -ENOMEM; ++ } ++ ++ pcd->status_buf = dma_alloc_coherent (NULL, sizeof (uint16_t), &pcd->status_buf_dma_handle, 0); ++ if (pcd->status_buf == 0) { ++ dma_free_coherent(NULL, sizeof(*pcd->setup_pkt), pcd->setup_pkt, pcd->setup_pkt_dma_handle); ++ free_irq(lmdev->irq, pcd); ++ device_unregister(&pcd->gadget.dev); ++ kfree (pcd); ++ return -ENOMEM; ++ } ++ ++ if (GET_CORE_IF(pcd)->dma_desc_enable) { ++ dev_if->setup_desc_addr[0] = dwc_otg_ep_alloc_desc_chain(&dev_if->dma_setup_desc_addr[0], 1); ++ dev_if->setup_desc_addr[1] = dwc_otg_ep_alloc_desc_chain(&dev_if->dma_setup_desc_addr[1], 1); ++ dev_if->in_desc_addr = dwc_otg_ep_alloc_desc_chain(&dev_if->dma_in_desc_addr, 1); ++ dev_if->out_desc_addr = dwc_otg_ep_alloc_desc_chain(&dev_if->dma_out_desc_addr, 1); ++ ++ if(dev_if->setup_desc_addr[0] == 0 ++ || dev_if->setup_desc_addr[1] == 0 ++ || dev_if->in_desc_addr == 0 ++ || dev_if->out_desc_addr == 0 ) { ++ ++ if(dev_if->out_desc_addr) ++ dwc_otg_ep_free_desc_chain(dev_if->out_desc_addr, dev_if->dma_out_desc_addr, 1); ++ if(dev_if->in_desc_addr) ++ dwc_otg_ep_free_desc_chain(dev_if->in_desc_addr, dev_if->dma_in_desc_addr, 1); ++ if(dev_if->setup_desc_addr[1]) ++ dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[1], dev_if->dma_setup_desc_addr[1], 1); ++ if(dev_if->setup_desc_addr[0]) ++ dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[0], dev_if->dma_setup_desc_addr[0], 1); ++ ++ ++ dma_free_coherent(NULL, sizeof(*pcd->status_buf), pcd->status_buf, pcd->setup_pkt_dma_handle); ++ dma_free_coherent(NULL, sizeof(*pcd->setup_pkt), pcd->setup_pkt, pcd->setup_pkt_dma_handle); ++ ++ free_irq(lmdev->irq, pcd); ++ device_unregister(&pcd->gadget.dev); ++ kfree (pcd); ++ ++ return -ENOMEM; ++ } ++ } ++ } ++ else { ++ pcd->setup_pkt = kmalloc (sizeof (*pcd->setup_pkt) * 5, GFP_KERNEL); ++ if (pcd->setup_pkt == 0) { ++ free_irq(lmdev->irq, pcd); ++ device_unregister(&pcd->gadget.dev); ++ kfree (pcd); ++ return -ENOMEM; ++ } ++ ++ pcd->status_buf = kmalloc (sizeof (uint16_t), GFP_KERNEL); ++ if (pcd->status_buf == 0) { ++ kfree(pcd->setup_pkt); ++ free_irq(lmdev->irq, pcd); ++ device_unregister(&pcd->gadget.dev); ++ kfree (pcd); ++ return -ENOMEM; ++ } ++ } ++ ++ ++ /* Initialize tasklet */ ++ start_xfer_tasklet.data = (unsigned long)pcd; ++ pcd->start_xfer_tasklet = &start_xfer_tasklet; ++ ++ return 0; ++} ++ ++/** ++ * Cleanup the PCD. ++ */ ++void dwc_otg_pcd_remove(struct lm_device *lmdev) ++{ ++ dwc_otg_device_t *otg_dev = lm_get_drvdata(lmdev); ++ dwc_otg_pcd_t *pcd = otg_dev->pcd; ++ dwc_otg_dev_if_t* dev_if = GET_CORE_IF(pcd)->dev_if; ++ ++ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, lmdev); ++ ++ /* ++ * Free the IRQ ++ */ ++ free_irq(lmdev->irq, pcd); ++ ++ /* start with the driver above us */ ++ if (pcd->driver) { ++ /* should have been done already by driver model core */ ++ DWC_WARN("driver '%s' is still registered\n", ++ pcd->driver->driver.name); ++ usb_gadget_unregister_driver(pcd->driver); ++ } ++ device_unregister(&pcd->gadget.dev); ++ ++ if (GET_CORE_IF(pcd)->dma_enable) { ++ dma_free_coherent (NULL, sizeof (*pcd->setup_pkt) * 5, pcd->setup_pkt, pcd->setup_pkt_dma_handle); ++ dma_free_coherent (NULL, sizeof (uint16_t), pcd->status_buf, pcd->status_buf_dma_handle); ++ if (GET_CORE_IF(pcd)->dma_desc_enable) { ++ dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[0], dev_if->dma_setup_desc_addr[0], 1); ++ dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[1], dev_if->dma_setup_desc_addr[1], 1); ++ dwc_otg_ep_free_desc_chain(dev_if->in_desc_addr, dev_if->dma_in_desc_addr, 1); ++ dwc_otg_ep_free_desc_chain(dev_if->out_desc_addr, dev_if->dma_out_desc_addr, 1); ++ } ++ } ++ else { ++ kfree (pcd->setup_pkt); ++ kfree (pcd->status_buf); ++ } ++ ++ kfree(pcd); ++ otg_dev->pcd = 0; ++} ++ ++/** ++ * This function registers a gadget driver with the PCD. ++ * ++ * When a driver is successfully registered, it will receive control ++ * requests including set_configuration(), which enables non-control ++ * requests. then usb traffic follows until a disconnect is reported. ++ * then a host may connect again, or the driver might get unbound. ++ * ++ * @param driver The driver being registered ++ */ ++int usb_gadget_register_driver(struct usb_gadget_driver *driver) ++{ ++ int retval; ++ ++ DWC_DEBUGPL(DBG_PCD, "registering gadget driver '%s'\n", driver->driver.name); ++ ++ if (!driver || driver->speed == USB_SPEED_UNKNOWN || ++ !driver->bind || ++ !driver->unbind || ++ !driver->disconnect || ++ !driver->setup) { ++ DWC_DEBUGPL(DBG_PCDV,"EINVAL\n"); ++ return -EINVAL; ++ } ++ if (s_pcd == 0) { ++ DWC_DEBUGPL(DBG_PCDV,"ENODEV\n"); ++ return -ENODEV; ++ } ++ if (s_pcd->driver != 0) { ++ DWC_DEBUGPL(DBG_PCDV,"EBUSY (%p)\n", s_pcd->driver); ++ return -EBUSY; ++ } ++ ++ /* hook up the driver */ ++ s_pcd->driver = driver; ++ s_pcd->gadget.dev.driver = &driver->driver; ++ ++ DWC_DEBUGPL(DBG_PCD, "bind to driver %s\n", driver->driver.name); ++ retval = driver->bind(&s_pcd->gadget); ++ if (retval) { ++ DWC_ERROR("bind to driver %s --> error %d\n", ++ driver->driver.name, retval); ++ s_pcd->driver = 0; ++ s_pcd->gadget.dev.driver = 0; ++ return retval; ++ } ++ DWC_DEBUGPL(DBG_ANY, "registered gadget driver '%s'\n", ++ driver->driver.name); ++ return 0; ++} ++ ++EXPORT_SYMBOL(usb_gadget_register_driver); ++ ++/** ++ * This function unregisters a gadget driver ++ * ++ * @param driver The driver being unregistered ++ */ ++int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) ++{ ++ //DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n", __func__, _driver); ++ ++ if (s_pcd == 0) { ++ DWC_DEBUGPL(DBG_ANY, "%s Return(%d): s_pcd==0\n", __func__, ++ -ENODEV); ++ return -ENODEV; ++ } ++ if (driver == 0 || driver != s_pcd->driver) { ++ DWC_DEBUGPL(DBG_ANY, "%s Return(%d): driver?\n", __func__, ++ -EINVAL); ++ return -EINVAL; ++ } ++ ++ driver->unbind(&s_pcd->gadget); ++ s_pcd->driver = 0; ++ ++ DWC_DEBUGPL(DBG_ANY, "unregistered driver '%s'\n", ++ driver->driver.name); ++ return 0; ++} ++EXPORT_SYMBOL(usb_gadget_unregister_driver); ++ ++#endif /* DWC_HOST_ONLY */ +--- /dev/null ++++ b/drivers/usb/host/otg/dwc_otg_pcd.h +@@ -0,0 +1,297 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd.h $ ++ * $Revision: #36 $ ++ * $Date: 2008/09/26 $ ++ * $Change: 1103515 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++#ifndef DWC_HOST_ONLY ++#if !defined(__DWC_PCD_H__) ++#define __DWC_PCD_H__ ++ ++#include ++#include ++#include ++#include ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) ++# include ++# include ++#else ++# include ++# include ++#endif ++ ++#include ++#include ++ ++struct lm_device; ++struct dwc_otg_device; ++ ++#include "dwc_otg_cil.h" ++ ++/** ++ * @file ++ * ++ * This file contains the structures, constants, and interfaces for ++ * the Perpherial Contoller Driver (PCD). ++ * ++ * The Peripheral Controller Driver (PCD) for Linux will implement the ++ * Gadget API, so that the existing Gadget drivers can be used. For ++ * the Mass Storage Function driver the File-backed USB Storage Gadget ++ * (FBS) driver will be used. The FBS driver supports the ++ * Control-Bulk (CB), Control-Bulk-Interrupt (CBI), and Bulk-Only ++ * transports. ++ * ++ */ ++ ++/** Invalid DMA Address */ ++#define DMA_ADDR_INVALID (~(dma_addr_t)0) ++/** Maxpacket size for EP0 */ ++#define MAX_EP0_SIZE 64 ++/** Maxpacket size for any EP */ ++#define MAX_PACKET_SIZE 1024 ++ ++/** Max Transfer size for any EP */ ++#define MAX_TRANSFER_SIZE 65535 ++ ++/** Max DMA Descriptor count for any EP */ ++#define MAX_DMA_DESC_CNT 64 ++ ++/** ++ * Get the pointer to the core_if from the pcd pointer. ++ */ ++#define GET_CORE_IF( _pcd ) (_pcd->otg_dev->core_if) ++ ++/** ++ * States of EP0. ++ */ ++typedef enum ep0_state ++{ ++ EP0_DISCONNECT, /* no host */ ++ EP0_IDLE, ++ EP0_IN_DATA_PHASE, ++ EP0_OUT_DATA_PHASE, ++ EP0_IN_STATUS_PHASE, ++ EP0_OUT_STATUS_PHASE, ++ EP0_STALL, ++} ep0state_e; ++ ++/** Fordward declaration.*/ ++struct dwc_otg_pcd; ++ ++/** DWC_otg iso request structure. ++ * ++ */ ++typedef struct usb_iso_request dwc_otg_pcd_iso_request_t; ++ ++/** PCD EP structure. ++ * This structure describes an EP, there is an array of EPs in the PCD ++ * structure. ++ */ ++typedef struct dwc_otg_pcd_ep ++{ ++ /** USB EP data */ ++ struct usb_ep ep; ++ /** USB EP Descriptor */ ++ const struct usb_endpoint_descriptor *desc; ++ ++ /** queue of dwc_otg_pcd_requests. */ ++ struct list_head queue; ++ unsigned stopped : 1; ++ unsigned disabling : 1; ++ unsigned dma : 1; ++ unsigned queue_sof : 1; ++ ++#ifdef DWC_EN_ISOC ++ /** DWC_otg Isochronous Transfer */ ++ struct usb_iso_request* iso_req; ++#endif //DWC_EN_ISOC ++ ++ /** DWC_otg ep data. */ ++ dwc_ep_t dwc_ep; ++ ++ /** Pointer to PCD */ ++ struct dwc_otg_pcd *pcd; ++}dwc_otg_pcd_ep_t; ++ ++ ++ ++/** DWC_otg PCD Structure. ++ * This structure encapsulates the data for the dwc_otg PCD. ++ */ ++typedef struct dwc_otg_pcd ++{ ++ /** USB gadget */ ++ struct usb_gadget gadget; ++ /** USB gadget driver pointer*/ ++ struct usb_gadget_driver *driver; ++ /** The DWC otg device pointer. */ ++ struct dwc_otg_device *otg_dev; ++ ++ /** State of EP0 */ ++ ep0state_e ep0state; ++ /** EP0 Request is pending */ ++ unsigned ep0_pending : 1; ++ /** Indicates when SET CONFIGURATION Request is in process */ ++ unsigned request_config : 1; ++ /** The state of the Remote Wakeup Enable. */ ++ unsigned remote_wakeup_enable : 1; ++ /** The state of the B-Device HNP Enable. */ ++ unsigned b_hnp_enable : 1; ++ /** The state of A-Device HNP Support. */ ++ unsigned a_hnp_support : 1; ++ /** The state of the A-Device Alt HNP support. */ ++ unsigned a_alt_hnp_support : 1; ++ /** Count of pending Requests */ ++ unsigned request_pending; ++ ++ /** SETUP packet for EP0 ++ * This structure is allocated as a DMA buffer on PCD initialization ++ * with enough space for up to 3 setup packets. ++ */ ++ union ++ { ++ struct usb_ctrlrequest req; ++ uint32_t d32[2]; ++ } *setup_pkt; ++ ++ dma_addr_t setup_pkt_dma_handle; ++ ++ /** 2-byte dma buffer used to return status from GET_STATUS */ ++ uint16_t *status_buf; ++ dma_addr_t status_buf_dma_handle; ++ ++ /** EP0 */ ++ dwc_otg_pcd_ep_t ep0; ++ ++ /** Array of IN EPs. */ ++ dwc_otg_pcd_ep_t in_ep[ MAX_EPS_CHANNELS - 1]; ++ /** Array of OUT EPs. */ ++ dwc_otg_pcd_ep_t out_ep[ MAX_EPS_CHANNELS - 1]; ++ /** number of valid EPs in the above array. */ ++// unsigned num_eps : 4; ++ spinlock_t lock; ++ /** Timer for SRP. If it expires before SRP is successful ++ * clear the SRP. */ ++ struct timer_list srp_timer; ++ ++ /** Tasklet to defer starting of TEST mode transmissions until ++ * Status Phase has been completed. ++ */ ++ struct tasklet_struct test_mode_tasklet; ++ ++ /** Tasklet to delay starting of xfer in DMA mode */ ++ struct tasklet_struct *start_xfer_tasklet; ++ ++ /** The test mode to enter when the tasklet is executed. */ ++ unsigned test_mode; ++ ++} dwc_otg_pcd_t; ++ ++ ++/** DWC_otg request structure. ++ * This structure is a list of requests. ++ */ ++typedef struct ++{ ++ struct usb_request req; /**< USB Request. */ ++ struct list_head queue; /**< queue of these requests. */ ++} dwc_otg_pcd_request_t; ++ ++ ++extern int dwc_otg_pcd_init(struct lm_device *lmdev); ++ ++//extern void dwc_otg_pcd_remove( struct dwc_otg_device *_otg_dev ); ++extern void dwc_otg_pcd_remove( struct lm_device *lmdev ); ++extern int32_t dwc_otg_pcd_handle_intr( dwc_otg_pcd_t *pcd ); ++extern void dwc_otg_pcd_start_srp_timer(dwc_otg_pcd_t *pcd ); ++ ++extern void dwc_otg_pcd_initiate_srp(dwc_otg_pcd_t *pcd); ++extern void dwc_otg_pcd_remote_wakeup(dwc_otg_pcd_t *pcd, int set); ++ ++extern void dwc_otg_iso_buffer_done(dwc_otg_pcd_ep_t *ep, dwc_otg_pcd_iso_request_t *req); ++extern void dwc_otg_request_done(dwc_otg_pcd_ep_t *_ep, dwc_otg_pcd_request_t *req, ++ int status); ++extern void dwc_otg_request_nuke(dwc_otg_pcd_ep_t *_ep); ++extern void dwc_otg_pcd_update_otg(dwc_otg_pcd_t *_pcd, ++ const unsigned reset); ++#ifndef VERBOSE ++#define VERIFY_PCD_DMA_ADDR(_addr_) BUG_ON(((_addr_)==DMA_ADDR_INVALID)||\ ++ ((_addr_)==0)||\ ++ ((_addr_)&0x3)) ++#else ++#define VERIFY_PCD_DMA_ADDR(_addr_) {\ ++ if(((_addr_)==DMA_ADDR_INVALID)||\ ++ ((_addr_)==0)||\ ++ ((_addr_)&0x3)) {\ ++ printk("%s: Invalid DMA address "#_addr_"(%.8x)\n",__func__,_addr_);\ ++ BUG();\ ++ }\ ++ } ++#endif ++ ++ ++static inline void ep_check_and_patch_dma_addr(dwc_otg_pcd_ep_t *ep){ ++//void ep_check_and_patch_dma_addr(dwc_otg_pcd_ep_t *ep){ ++ dwc_ep_t *dwc_ep=&ep->dwc_ep; ++ ++DWC_DEBUGPL(DBG_PCDV,"%s: dwc_ep xfer_buf=%.8x, total_len=%d, dma_addr=%.8x\n",__func__,(u32)dwc_ep->xfer_buff,(dwc_ep->total_len),dwc_ep->dma_addr); ++ if (/*(core_if->dma_enable)&&*/(dwc_ep->dma_addr==DMA_ADDR_INVALID)) { ++ if((((u32)dwc_ep->xfer_buff)&0x3)==0){ ++ dwc_ep->dma_addr=dma_map_single(NULL,(void *)(dwc_ep->start_xfer_buff),(dwc_ep->total_len), DMA_TO_DEVICE); ++DWC_DEBUGPL(DBG_PCDV," got dma_addr=%.8x\n",dwc_ep->dma_addr); ++ }else{ ++DWC_DEBUGPL(DBG_PCDV," buf not aligned, use aligned_buf instead. xfer_buf=%.8x, total_len=%d, aligned_buf_size=%d\n",(u32)dwc_ep->xfer_buff,(dwc_ep->total_len),dwc_ep->aligned_buf_size); ++ if(dwc_ep->aligned_buf_sizetotal_len){ ++ if(dwc_ep->aligned_buf){ ++//printk(" free buff dwc_ep aligned_buf_size=%d, aligned_buf(%.8x), aligned_dma_addr(%.8x));\n",dwc_ep->aligned_buf_size,dwc_ep->aligned_buf,dwc_ep->aligned_dma_addr); ++ //dma_free_coherent(NULL,dwc_ep->aligned_buf_size,dwc_ep->aligned_buf,dwc_ep->aligned_dma_addr); ++ kfree(dwc_ep->aligned_buf); ++ } ++ dwc_ep->aligned_buf_size=((1<<20)>(dwc_ep->total_len<<1))?(dwc_ep->total_len<<1):(1<<20); ++ //dwc_ep->aligned_buf = dma_alloc_coherent (NULL, dwc_ep->aligned_buf_size, &dwc_ep->aligned_dma_addr, GFP_KERNEL|GFP_DMA); ++ dwc_ep->aligned_buf=kmalloc(dwc_ep->aligned_buf_size,GFP_KERNEL|GFP_DMA|GFP_ATOMIC); ++ dwc_ep->aligned_dma_addr=dma_map_single(NULL,(void *)(dwc_ep->aligned_buf),(dwc_ep->aligned_buf_size),DMA_FROM_DEVICE); ++ if(!dwc_ep->aligned_buf){ ++ DWC_ERROR("Cannot alloc required buffer!!\n"); ++ BUG(); ++ } ++DWC_DEBUGPL(DBG_PCDV," dwc_ep allocated aligned buf=%.8x, dma_addr=%.8x, size=%d(0x%x)\n", (u32)dwc_ep->aligned_buf, dwc_ep->aligned_dma_addr, dwc_ep->aligned_buf_size, dwc_ep->aligned_buf_size); ++ } ++ dwc_ep->dma_addr=dwc_ep->aligned_dma_addr; ++ if(dwc_ep->is_in) { ++ memcpy(dwc_ep->aligned_buf,dwc_ep->xfer_buff,dwc_ep->total_len); ++ dma_sync_single_for_device(NULL,dwc_ep->dma_addr,dwc_ep->total_len,DMA_TO_DEVICE); ++ } ++ } ++ } ++} ++ ++#endif ++#endif /* DWC_HOST_ONLY */ +--- /dev/null ++++ b/drivers/usb/host/otg/dwc_otg_pcd_intr.c +@@ -0,0 +1,3708 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd_intr.c $ ++ * $Revision: #83 $ ++ * $Date: 2008/10/14 $ ++ * $Change: 1115682 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++#ifndef DWC_HOST_ONLY ++#include ++#include ++#include ++#include ++ ++#include "dwc_otg_driver.h" ++#include "dwc_otg_pcd.h" ++ ++ ++#define DEBUG_EP0 ++ ++ ++/* request functions defined in "dwc_otg_pcd.c" */ ++ ++/** @file ++ * This file contains the implementation of the PCD Interrupt handlers. ++ * ++ * The PCD handles the device interrupts. Many conditions can cause a ++ * device interrupt. When an interrupt occurs, the device interrupt ++ * service routine determines the cause of the interrupt and ++ * dispatches handling to the appropriate function. These interrupt ++ * handling functions are described below. ++ * All interrupt registers are processed from LSB to MSB. ++ */ ++ ++ ++/** ++ * This function prints the ep0 state for debug purposes. ++ */ ++static inline void print_ep0_state(dwc_otg_pcd_t *pcd) ++{ ++#ifdef DEBUG ++ char str[40]; ++ ++ switch (pcd->ep0state) { ++ case EP0_DISCONNECT: ++ strcpy(str, "EP0_DISCONNECT"); ++ break; ++ case EP0_IDLE: ++ strcpy(str, "EP0_IDLE"); ++ break; ++ case EP0_IN_DATA_PHASE: ++ strcpy(str, "EP0_IN_DATA_PHASE"); ++ break; ++ case EP0_OUT_DATA_PHASE: ++ strcpy(str, "EP0_OUT_DATA_PHASE"); ++ break; ++ case EP0_IN_STATUS_PHASE: ++ strcpy(str,"EP0_IN_STATUS_PHASE"); ++ break; ++ case EP0_OUT_STATUS_PHASE: ++ strcpy(str,"EP0_OUT_STATUS_PHASE"); ++ break; ++ case EP0_STALL: ++ strcpy(str,"EP0_STALL"); ++ break; ++ default: ++ strcpy(str,"EP0_INVALID"); ++ } ++ ++ DWC_DEBUGPL(DBG_ANY, "%s(%d)\n", str, pcd->ep0state); ++#endif ++} ++ ++/** ++ * This function returns pointer to in ep struct with number ep_num ++ */ ++static inline dwc_otg_pcd_ep_t* get_in_ep(dwc_otg_pcd_t *pcd, uint32_t ep_num) ++{ ++ int i; ++ int num_in_eps = GET_CORE_IF(pcd)->dev_if->num_in_eps; ++ if(ep_num == 0) { ++ return &pcd->ep0; ++ } ++ else { ++ for(i = 0; i < num_in_eps; ++i) ++ { ++ if(pcd->in_ep[i].dwc_ep.num == ep_num) ++ return &pcd->in_ep[i]; ++ } ++ return 0; ++ } ++} ++/** ++ * This function returns pointer to out ep struct with number ep_num ++ */ ++static inline dwc_otg_pcd_ep_t* get_out_ep(dwc_otg_pcd_t *pcd, uint32_t ep_num) ++{ ++ int i; ++ int num_out_eps = GET_CORE_IF(pcd)->dev_if->num_out_eps; ++ if(ep_num == 0) { ++ return &pcd->ep0; ++ } ++ else { ++ for(i = 0; i < num_out_eps; ++i) ++ { ++ if(pcd->out_ep[i].dwc_ep.num == ep_num) ++ return &pcd->out_ep[i]; ++ } ++ return 0; ++ } ++} ++/** ++ * This functions gets a pointer to an EP from the wIndex address ++ * value of the control request. ++ */ ++static dwc_otg_pcd_ep_t *get_ep_by_addr (dwc_otg_pcd_t *pcd, u16 wIndex) ++{ ++ dwc_otg_pcd_ep_t *ep; ++ ++ if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0) ++ return &pcd->ep0; ++ list_for_each_entry(ep, &pcd->gadget.ep_list, ep.ep_list) ++ { ++ u8 bEndpointAddress; ++ ++ if (!ep->desc) ++ continue; ++ ++ bEndpointAddress = ep->desc->bEndpointAddress; ++ if((wIndex & (USB_DIR_IN | USB_ENDPOINT_NUMBER_MASK)) ++ == (bEndpointAddress & (USB_DIR_IN | USB_ENDPOINT_NUMBER_MASK))) ++ return ep; ++ } ++ return NULL; ++} ++ ++/** ++ * This function checks the EP request queue, if the queue is not ++ * empty the next request is started. ++ */ ++void start_next_request(dwc_otg_pcd_ep_t *ep) ++{ ++ dwc_otg_pcd_request_t *req = 0; ++ uint32_t max_transfer = GET_CORE_IF(ep->pcd)->core_params->max_transfer_size; ++ if (!list_empty(&ep->queue)) { ++ req = list_entry(ep->queue.next, ++ dwc_otg_pcd_request_t, queue); ++ ++ /* Setup and start the Transfer */ ++ ep->dwc_ep.dma_addr = req->req.dma; ++ ep->dwc_ep.start_xfer_buff = req->req.buf; ++ ep->dwc_ep.xfer_buff = req->req.buf; ++ ep->dwc_ep.sent_zlp = 0; ++ ep->dwc_ep.total_len = req->req.length; ++ ep->dwc_ep.xfer_len = 0; ++ ep->dwc_ep.xfer_count = 0; ++ ++ if(max_transfer > MAX_TRANSFER_SIZE) { ++ ep->dwc_ep.maxxfer = max_transfer - (max_transfer % ep->dwc_ep.maxpacket); ++ } else { ++ ep->dwc_ep.maxxfer = max_transfer; ++ } ++ ++ if(req->req.zero) { ++ if((ep->dwc_ep.total_len % ep->dwc_ep.maxpacket == 0) ++ && (ep->dwc_ep.total_len != 0)) { ++ ep->dwc_ep.sent_zlp = 1; ++ } ++ ++ } ++ ep_check_and_patch_dma_addr(ep); ++ dwc_otg_ep_start_transfer(GET_CORE_IF(ep->pcd), &ep->dwc_ep); ++ } ++} ++ ++/** ++ * This function handles the SOF Interrupts. At this time the SOF ++ * Interrupt is disabled. ++ */ ++int32_t dwc_otg_pcd_handle_sof_intr(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ ++ gintsts_data_t gintsts; ++ ++ DWC_DEBUGPL(DBG_PCD, "SOF\n"); ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.sofintr = 1; ++ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); ++ ++ return 1; ++} ++ ++ ++/** ++ * This function handles the Rx Status Queue Level Interrupt, which ++ * indicates that there is a least one packet in the Rx FIFO. The ++ * packets are moved from the FIFO to memory, where they will be ++ * processed when the Endpoint Interrupt Register indicates Transfer ++ * Complete or SETUP Phase Done. ++ * ++ * Repeat the following until the Rx Status Queue is empty: ++ * -# Read the Receive Status Pop Register (GRXSTSP) to get Packet ++ * info ++ * -# If Receive FIFO is empty then skip to step Clear the interrupt ++ * and exit ++ * -# If SETUP Packet call dwc_otg_read_setup_packet to copy the ++ * SETUP data to the buffer ++ * -# If OUT Data Packet call dwc_otg_read_packet to copy the data ++ * to the destination buffer ++ */ ++int32_t dwc_otg_pcd_handle_rx_status_q_level_intr(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; ++ gintmsk_data_t gintmask = {.d32=0}; ++ device_grxsts_data_t status; ++ dwc_otg_pcd_ep_t *ep; ++ gintsts_data_t gintsts; ++#ifdef DEBUG ++ static char *dpid_str[] ={ "D0", "D2", "D1", "MDATA" }; ++#endif ++ ++ //DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _pcd); ++ /* Disable the Rx Status Queue Level interrupt */ ++ gintmask.b.rxstsqlvl= 1; ++ dwc_modify_reg32(&global_regs->gintmsk, gintmask.d32, 0); ++ ++ /* Get the Status from the top of the FIFO */ ++ status.d32 = dwc_read_reg32(&global_regs->grxstsp); ++ ++ DWC_DEBUGPL(DBG_PCD, "EP:%d BCnt:%d DPID:%s " ++ "pktsts:%x Frame:%d(0x%0x)\n", ++ status.b.epnum, status.b.bcnt, ++ dpid_str[status.b.dpid], ++ status.b.pktsts, status.b.fn, status.b.fn); ++ /* Get pointer to EP structure */ ++ ep = get_out_ep(pcd, status.b.epnum); ++ ++ switch (status.b.pktsts) { ++ case DWC_DSTS_GOUT_NAK: ++ DWC_DEBUGPL(DBG_PCDV, "Global OUT NAK\n"); ++ break; ++ case DWC_STS_DATA_UPDT: ++ DWC_DEBUGPL(DBG_PCDV, "OUT Data Packet\n"); ++ if (status.b.bcnt && ep->dwc_ep.xfer_buff) { ++ /** @todo NGS Check for buffer overflow? */ ++ dwc_otg_read_packet(core_if, ++ ep->dwc_ep.xfer_buff, ++ status.b.bcnt); ++ ep->dwc_ep.xfer_count += status.b.bcnt; ++ ep->dwc_ep.xfer_buff += status.b.bcnt; ++ } ++ break; ++ case DWC_STS_XFER_COMP: ++ DWC_DEBUGPL(DBG_PCDV, "OUT Complete\n"); ++ break; ++ case DWC_DSTS_SETUP_COMP: ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCDV, "Setup Complete\n"); ++#endif ++ break; ++case DWC_DSTS_SETUP_UPDT: ++ dwc_otg_read_setup_packet(core_if, pcd->setup_pkt->d32); ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCD, ++ "SETUP PKT: %02x.%02x v%04x i%04x l%04x\n", ++ pcd->setup_pkt->req.bRequestType, ++ pcd->setup_pkt->req.bRequest, ++ pcd->setup_pkt->req.wValue, ++ pcd->setup_pkt->req.wIndex, ++ pcd->setup_pkt->req.wLength); ++#endif ++ ep->dwc_ep.xfer_count += status.b.bcnt; ++ break; ++ default: ++ DWC_DEBUGPL(DBG_PCDV, "Invalid Packet Status (0x%0x)\n", ++ status.b.pktsts); ++ break; ++ } ++ ++ /* Enable the Rx Status Queue Level interrupt */ ++ dwc_modify_reg32(&global_regs->gintmsk, 0, gintmask.d32); ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.rxstsqlvl = 1; ++ dwc_write_reg32 (&global_regs->gintsts, gintsts.d32); ++ ++ //DWC_DEBUGPL(DBG_PCDV, "EXIT: %s\n", __func__); ++ return 1; ++} ++/** ++ * This function examines the Device IN Token Learning Queue to ++ * determine the EP number of the last IN token received. This ++ * implementation is for the Mass Storage device where there are only ++ * 2 IN EPs (Control-IN and BULK-IN). ++ * ++ * The EP numbers for the first six IN Tokens are in DTKNQR1 and there ++ * are 8 EP Numbers in each of the other possible DTKNQ Registers. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * ++ */ ++static inline int get_ep_of_last_in_token(dwc_otg_core_if_t *core_if) ++{ ++ dwc_otg_device_global_regs_t *dev_global_regs = ++ core_if->dev_if->dev_global_regs; ++ const uint32_t TOKEN_Q_DEPTH = core_if->hwcfg2.b.dev_token_q_depth; ++ /* Number of Token Queue Registers */ ++ const int DTKNQ_REG_CNT = (TOKEN_Q_DEPTH + 7) / 8; ++ dtknq1_data_t dtknqr1; ++ uint32_t in_tkn_epnums[4]; ++ int ndx = 0; ++ int i = 0; ++ volatile uint32_t *addr = &dev_global_regs->dtknqr1; ++ int epnum = 0; ++ ++ //DWC_DEBUGPL(DBG_PCD,"dev_token_q_depth=%d\n",TOKEN_Q_DEPTH); ++ ++ ++ /* Read the DTKNQ Registers */ ++ for (i = 0; i < DTKNQ_REG_CNT; i++) ++ { ++ in_tkn_epnums[ i ] = dwc_read_reg32(addr); ++ DWC_DEBUGPL(DBG_PCDV, "DTKNQR%d=0x%08x\n", i+1, ++ in_tkn_epnums[i]); ++ if (addr == &dev_global_regs->dvbusdis) { ++ addr = &dev_global_regs->dtknqr3_dthrctl; ++ } ++ else { ++ ++addr; ++ } ++ ++ } ++ ++ /* Copy the DTKNQR1 data to the bit field. */ ++ dtknqr1.d32 = in_tkn_epnums[0]; ++ /* Get the EP numbers */ ++ in_tkn_epnums[0] = dtknqr1.b.epnums0_5; ++ ndx = dtknqr1.b.intknwptr - 1; ++ ++ //DWC_DEBUGPL(DBG_PCDV,"ndx=%d\n",ndx); ++ if (ndx == -1) { ++ /** @todo Find a simpler way to calculate the max ++ * queue position.*/ ++ int cnt = TOKEN_Q_DEPTH; ++ if (TOKEN_Q_DEPTH <= 6) { ++ cnt = TOKEN_Q_DEPTH - 1; ++ } ++ else if (TOKEN_Q_DEPTH <= 14) { ++ cnt = TOKEN_Q_DEPTH - 7; ++ } ++ else if (TOKEN_Q_DEPTH <= 22) { ++ cnt = TOKEN_Q_DEPTH - 15; ++ } ++ else { ++ cnt = TOKEN_Q_DEPTH - 23; ++ } ++ epnum = (in_tkn_epnums[ DTKNQ_REG_CNT - 1 ] >> (cnt * 4)) & 0xF; ++ } ++ else { ++ if (ndx <= 5) { ++ epnum = (in_tkn_epnums[0] >> (ndx * 4)) & 0xF; ++ } ++ else if (ndx <= 13) { ++ ndx -= 6; ++ epnum = (in_tkn_epnums[1] >> (ndx * 4)) & 0xF; ++ } ++ else if (ndx <= 21) { ++ ndx -= 14; ++ epnum = (in_tkn_epnums[2] >> (ndx * 4)) & 0xF; ++ } ++ else if (ndx <= 29) { ++ ndx -= 22; ++ epnum = (in_tkn_epnums[3] >> (ndx * 4)) & 0xF; ++ } ++ } ++ //DWC_DEBUGPL(DBG_PCD,"epnum=%d\n",epnum); ++ return epnum; ++} ++ ++/** ++ * This interrupt occurs when the non-periodic Tx FIFO is half-empty. ++ * The active request is checked for the next packet to be loaded into ++ * the non-periodic Tx FIFO. ++ */ ++int32_t dwc_otg_pcd_handle_np_tx_fifo_empty_intr(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ dwc_otg_core_global_regs_t *global_regs = ++ core_if->core_global_regs; ++ dwc_otg_dev_in_ep_regs_t *ep_regs; ++ gnptxsts_data_t txstatus = {.d32 = 0}; ++ gintsts_data_t gintsts; ++ ++ int epnum = 0; ++ dwc_otg_pcd_ep_t *ep = 0; ++ uint32_t len = 0; ++ int dwords; ++ ++ /* Get the epnum from the IN Token Learning Queue. */ ++ epnum = get_ep_of_last_in_token(core_if); ++ ep = get_in_ep(pcd, epnum); ++ ++ DWC_DEBUGPL(DBG_PCD, "NP TxFifo Empty: %s(%d) \n", ep->ep.name, epnum); ++ ep_regs = core_if->dev_if->in_ep_regs[epnum]; ++ ++ len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count; ++ if (len > ep->dwc_ep.maxpacket) { ++ len = ep->dwc_ep.maxpacket; ++ } ++ dwords = (len + 3)/4; ++ ++ ++ /* While there is space in the queue and space in the FIFO and ++ * More data to tranfer, Write packets to the Tx FIFO */ ++ txstatus.d32 = dwc_read_reg32(&global_regs->gnptxsts); ++ DWC_DEBUGPL(DBG_PCDV, "b4 GNPTXSTS=0x%08x\n",txstatus.d32); ++ ++ while (txstatus.b.nptxqspcavail > 0 && ++ txstatus.b.nptxfspcavail > dwords && ++ ep->dwc_ep.xfer_count < ep->dwc_ep.xfer_len) { ++ /* Write the FIFO */ ++ dwc_otg_ep_write_packet(core_if, &ep->dwc_ep, 0); ++ len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count; ++ ++ if (len > ep->dwc_ep.maxpacket) { ++ len = ep->dwc_ep.maxpacket; ++ } ++ ++ dwords = (len + 3)/4; ++ txstatus.d32 = dwc_read_reg32(&global_regs->gnptxsts); ++ DWC_DEBUGPL(DBG_PCDV,"GNPTXSTS=0x%08x\n",txstatus.d32); ++ } ++ ++ DWC_DEBUGPL(DBG_PCDV, "GNPTXSTS=0x%08x\n", ++ dwc_read_reg32(&global_regs->gnptxsts)); ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.nptxfempty = 1; ++ dwc_write_reg32 (&global_regs->gintsts, gintsts.d32); ++ ++ return 1; ++} ++ ++/** ++ * This function is called when dedicated Tx FIFO Empty interrupt occurs. ++ * The active request is checked for the next packet to be loaded into ++ * apropriate Tx FIFO. ++ */ ++static int32_t write_empty_tx_fifo(dwc_otg_pcd_t *pcd, uint32_t epnum) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ dwc_otg_dev_if_t* dev_if = core_if->dev_if; ++ dwc_otg_dev_in_ep_regs_t *ep_regs; ++ dtxfsts_data_t txstatus = {.d32 = 0}; ++ dwc_otg_pcd_ep_t *ep = 0; ++ uint32_t len = 0; ++ int dwords; ++ ++ ep = get_in_ep(pcd, epnum); ++ ++ DWC_DEBUGPL(DBG_PCD, "Dedicated TxFifo Empty: %s(%d) \n", ep->ep.name, epnum); ++ ++ ep_regs = core_if->dev_if->in_ep_regs[epnum]; ++ ++ len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count; ++ ++ if (len > ep->dwc_ep.maxpacket) { ++ len = ep->dwc_ep.maxpacket; ++ } ++ ++ dwords = (len + 3)/4; ++ ++ /* While there is space in the queue and space in the FIFO and ++ * More data to tranfer, Write packets to the Tx FIFO */ ++ txstatus.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dtxfsts); ++ DWC_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n",epnum,txstatus.d32); ++ ++ while (txstatus.b.txfspcavail > dwords && ++ ep->dwc_ep.xfer_count < ep->dwc_ep.xfer_len && ++ ep->dwc_ep.xfer_len != 0) { ++ /* Write the FIFO */ ++ dwc_otg_ep_write_packet(core_if, &ep->dwc_ep, 0); ++ ++ len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count; ++ if (len > ep->dwc_ep.maxpacket) { ++ len = ep->dwc_ep.maxpacket; ++ } ++ ++ dwords = (len + 3)/4; ++ txstatus.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dtxfsts); ++ DWC_DEBUGPL(DBG_PCDV,"dtxfsts[%d]=0x%08x\n", epnum, txstatus.d32); ++ } ++ ++ DWC_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n",epnum,dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dtxfsts)); ++ ++ return 1; ++} ++ ++ ++/** ++ * This function is called when the Device is disconnected. It stops ++ * any active requests and informs the Gadget driver of the ++ * disconnect. ++ */ ++void dwc_otg_pcd_stop(dwc_otg_pcd_t *pcd) ++{ ++ int i, num_in_eps, num_out_eps; ++ dwc_otg_pcd_ep_t *ep; ++ ++ gintmsk_data_t intr_mask = {.d32 = 0}; ++ ++ num_in_eps = GET_CORE_IF(pcd)->dev_if->num_in_eps; ++ num_out_eps = GET_CORE_IF(pcd)->dev_if->num_out_eps; ++ ++ DWC_DEBUGPL(DBG_PCDV, "%s() \n", __func__); ++ /* don't disconnect drivers more than once */ ++ if (pcd->ep0state == EP0_DISCONNECT) { ++ DWC_DEBUGPL(DBG_ANY, "%s() Already Disconnected\n", __func__); ++ return; ++ } ++ pcd->ep0state = EP0_DISCONNECT; ++ ++ /* Reset the OTG state. */ ++ dwc_otg_pcd_update_otg(pcd, 1); ++ ++ /* Disable the NP Tx Fifo Empty Interrupt. */ ++ intr_mask.b.nptxfempty = 1; ++ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, ++ intr_mask.d32, 0); ++ ++ /* Flush the FIFOs */ ++ /**@todo NGS Flush Periodic FIFOs */ ++ dwc_otg_flush_tx_fifo(GET_CORE_IF(pcd), 0x10); ++ dwc_otg_flush_rx_fifo(GET_CORE_IF(pcd)); ++ ++ /* prevent new request submissions, kill any outstanding requests */ ++ ep = &pcd->ep0; ++ dwc_otg_request_nuke(ep); ++ /* prevent new request submissions, kill any outstanding requests */ ++ for (i = 0; i < num_in_eps; i++) ++ { ++ dwc_otg_pcd_ep_t *ep = &pcd->in_ep[i]; ++ dwc_otg_request_nuke(ep); ++ } ++ /* prevent new request submissions, kill any outstanding requests */ ++ for (i = 0; i < num_out_eps; i++) ++ { ++ dwc_otg_pcd_ep_t *ep = &pcd->out_ep[i]; ++ dwc_otg_request_nuke(ep); ++ } ++ ++ /* report disconnect; the driver is already quiesced */ ++ if (pcd->driver && pcd->driver->disconnect) { ++ SPIN_UNLOCK(&pcd->lock); ++ pcd->driver->disconnect(&pcd->gadget); ++ SPIN_LOCK(&pcd->lock); ++ } ++} ++ ++/** ++ * This interrupt indicates that ... ++ */ ++int32_t dwc_otg_pcd_handle_i2c_intr(dwc_otg_pcd_t *pcd) ++{ ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ gintsts_data_t gintsts; ++ ++ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "i2cintr"); ++ intr_mask.b.i2cintr = 1; ++ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, ++ intr_mask.d32, 0); ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.i2cintr = 1; ++ dwc_write_reg32 (&GET_CORE_IF(pcd)->core_global_regs->gintsts, ++ gintsts.d32); ++ return 1; ++} ++ ++ ++/** ++ * This interrupt indicates that ... ++ */ ++int32_t dwc_otg_pcd_handle_early_suspend_intr(dwc_otg_pcd_t *pcd) ++{ ++ gintsts_data_t gintsts; ++#if defined(VERBOSE) ++ DWC_PRINT("Early Suspend Detected\n"); ++#endif ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.erlysuspend = 1; ++ dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts, ++ gintsts.d32); ++ return 1; ++} ++ ++/** ++ * This function configures EPO to receive SETUP packets. ++ * ++ * @todo NGS: Update the comments from the HW FS. ++ * ++ * -# Program the following fields in the endpoint specific registers ++ * for Control OUT EP 0, in order to receive a setup packet ++ * - DOEPTSIZ0.Packet Count = 3 (To receive up to 3 back to back ++ * setup packets) ++ * - DOEPTSIZE0.Transfer Size = 24 Bytes (To receive up to 3 back ++ * to back setup packets) ++ * - In DMA mode, DOEPDMA0 Register with a memory address to ++ * store any setup packets received ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param pcd Programming view of the PCD. ++ */ ++static inline void ep0_out_start(dwc_otg_core_if_t *core_if, dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ deptsiz0_data_t doeptsize0 = { .d32 = 0}; ++ dwc_otg_dma_desc_t* dma_desc; ++ depctl_data_t doepctl = { .d32 = 0 }; ++ ++#ifdef VERBOSE ++ DWC_DEBUGPL(DBG_PCDV,"%s() doepctl0=%0x\n", __func__, ++ dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl)); ++#endif ++ ++ doeptsize0.b.supcnt = 3; ++ doeptsize0.b.pktcnt = 1; ++ doeptsize0.b.xfersize = 8*3; ++ ++ ++ if (core_if->dma_enable) { ++ if (!core_if->dma_desc_enable) { ++ /** put here as for Hermes mode deptisz register should not be written */ ++ dwc_write_reg32(&dev_if->out_ep_regs[0]->doeptsiz, ++ doeptsize0.d32); ++ ++ /** @todo dma needs to handle multiple setup packets (up to 3) */ ++ VERIFY_PCD_DMA_ADDR(pcd->setup_pkt_dma_handle); ++ ++ dwc_write_reg32(&dev_if->out_ep_regs[0]->doepdma, ++ pcd->setup_pkt_dma_handle); ++ } else { ++ dev_if->setup_desc_index = (dev_if->setup_desc_index + 1) & 1; ++ dma_desc = dev_if->setup_desc_addr[dev_if->setup_desc_index]; ++ ++ /** DMA Descriptor Setup */ ++ dma_desc->status.b.bs = BS_HOST_BUSY; ++ dma_desc->status.b.l = 1; ++ dma_desc->status.b.ioc = 1; ++ dma_desc->status.b.bytes = pcd->ep0.dwc_ep.maxpacket; ++ dma_desc->buf = pcd->setup_pkt_dma_handle; ++ dma_desc->status.b.bs = BS_HOST_READY; ++ ++ /** DOEPDMA0 Register write */ ++ VERIFY_PCD_DMA_ADDR(dev_if->dma_setup_desc_addr[dev_if->setup_desc_index]); ++ dwc_write_reg32(&dev_if->out_ep_regs[0]->doepdma, dev_if->dma_setup_desc_addr[dev_if->setup_desc_index]); ++ } ++ ++ } else { ++ /** put here as for Hermes mode deptisz register should not be written */ ++ dwc_write_reg32(&dev_if->out_ep_regs[0]->doeptsiz, ++ doeptsize0.d32); ++ } ++ ++ /** DOEPCTL0 Register write */ ++ doepctl.b.epena = 1; ++ doepctl.b.cnak = 1; ++ dwc_write_reg32(&dev_if->out_ep_regs[0]->doepctl, doepctl.d32); ++ ++#ifdef VERBOSE ++ DWC_DEBUGPL(DBG_PCDV,"doepctl0=%0x\n", ++ dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl)); ++ DWC_DEBUGPL(DBG_PCDV,"diepctl0=%0x\n", ++ dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl)); ++#endif ++} ++ ++ ++/** ++ * This interrupt occurs when a USB Reset is detected. When the USB ++ * Reset Interrupt occurs the device state is set to DEFAULT and the ++ * EP0 state is set to IDLE. ++ * -# Set the NAK bit for all OUT endpoints (DOEPCTLn.SNAK = 1) ++ * -# Unmask the following interrupt bits ++ * - DAINTMSK.INEP0 = 1 (Control 0 IN endpoint) ++ * - DAINTMSK.OUTEP0 = 1 (Control 0 OUT endpoint) ++ * - DOEPMSK.SETUP = 1 ++ * - DOEPMSK.XferCompl = 1 ++ * - DIEPMSK.XferCompl = 1 ++ * - DIEPMSK.TimeOut = 1 ++ * -# Program the following fields in the endpoint specific registers ++ * for Control OUT EP 0, in order to receive a setup packet ++ * - DOEPTSIZ0.Packet Count = 3 (To receive up to 3 back to back ++ * setup packets) ++ * - DOEPTSIZE0.Transfer Size = 24 Bytes (To receive up to 3 back ++ * to back setup packets) ++ * - In DMA mode, DOEPDMA0 Register with a memory address to ++ * store any setup packets received ++ * At this point, all the required initialization, except for enabling ++ * the control 0 OUT endpoint is done, for receiving SETUP packets. ++ */ ++int32_t dwc_otg_pcd_handle_usb_reset_intr(dwc_otg_pcd_t * pcd) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ depctl_data_t doepctl = { .d32 = 0}; ++ ++ daint_data_t daintmsk = { .d32 = 0}; ++ doepmsk_data_t doepmsk = { .d32 = 0}; ++ diepmsk_data_t diepmsk = { .d32 = 0}; ++ ++ dcfg_data_t dcfg = { .d32=0 }; ++ grstctl_t resetctl = { .d32=0 }; ++ dctl_data_t dctl = {.d32=0}; ++ int i = 0; ++ gintsts_data_t gintsts; ++ ++ DWC_PRINT("USB RESET\n"); ++#ifdef DWC_EN_ISOC ++ for(i = 1;i < 16; ++i) ++ { ++ dwc_otg_pcd_ep_t *ep; ++ dwc_ep_t *dwc_ep; ++ ep = get_in_ep(pcd,i); ++ if(ep != 0){ ++ dwc_ep = &ep->dwc_ep; ++ dwc_ep->next_frame = 0xffffffff; ++ } ++ } ++#endif /* DWC_EN_ISOC */ ++ ++ /* reset the HNP settings */ ++ dwc_otg_pcd_update_otg(pcd, 1); ++ ++ /* Clear the Remote Wakeup Signalling */ ++ dctl.b.rmtwkupsig = 1; ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dctl, ++ dctl.d32, 0); ++ ++ /* Set NAK for all OUT EPs */ ++ doepctl.b.snak = 1; ++ for (i=0; i <= dev_if->num_out_eps; i++) ++ { ++ dwc_write_reg32(&dev_if->out_ep_regs[i]->doepctl, ++ doepctl.d32); ++ } ++ ++ /* Flush the NP Tx FIFO */ ++ dwc_otg_flush_tx_fifo(core_if, 0x10); ++ /* Flush the Learning Queue */ ++ resetctl.b.intknqflsh = 1; ++ dwc_write_reg32(&core_if->core_global_regs->grstctl, resetctl.d32); ++ ++ if(core_if->multiproc_int_enable) { ++ daintmsk.b.inep0 = 1; ++ daintmsk.b.outep0 = 1; ++ dwc_write_reg32(&dev_if->dev_global_regs->deachintmsk, daintmsk.d32); ++ ++ doepmsk.b.setup = 1; ++ doepmsk.b.xfercompl = 1; ++ doepmsk.b.ahberr = 1; ++ doepmsk.b.epdisabled = 1; ++ ++ if(core_if->dma_desc_enable) { ++ doepmsk.b.stsphsercvd = 1; ++ doepmsk.b.bna = 1; ++ } ++/* ++ doepmsk.b.babble = 1; ++ doepmsk.b.nyet = 1; ++ ++ if(core_if->dma_enable) { ++ doepmsk.b.nak = 1; ++ } ++*/ ++ dwc_write_reg32(&dev_if->dev_global_regs->doepeachintmsk[0], doepmsk.d32); ++ ++ diepmsk.b.xfercompl = 1; ++ diepmsk.b.timeout = 1; ++ diepmsk.b.epdisabled = 1; ++ diepmsk.b.ahberr = 1; ++ diepmsk.b.intknepmis = 1; ++ ++ if(core_if->dma_desc_enable) { ++ diepmsk.b.bna = 1; ++ } ++/* ++ if(core_if->dma_enable) { ++ diepmsk.b.nak = 1; ++ } ++*/ ++ dwc_write_reg32(&dev_if->dev_global_regs->diepeachintmsk[0], diepmsk.d32); ++ } else{ ++ daintmsk.b.inep0 = 1; ++ daintmsk.b.outep0 = 1; ++ dwc_write_reg32(&dev_if->dev_global_regs->daintmsk, daintmsk.d32); ++ ++ doepmsk.b.setup = 1; ++ doepmsk.b.xfercompl = 1; ++ doepmsk.b.ahberr = 1; ++ doepmsk.b.epdisabled = 1; ++ ++ if(core_if->dma_desc_enable) { ++ doepmsk.b.stsphsercvd = 1; ++ doepmsk.b.bna = 1; ++ } ++/* ++ doepmsk.b.babble = 1; ++ doepmsk.b.nyet = 1; ++ doepmsk.b.nak = 1; ++*/ ++ dwc_write_reg32(&dev_if->dev_global_regs->doepmsk, doepmsk.d32); ++ ++ diepmsk.b.xfercompl = 1; ++ diepmsk.b.timeout = 1; ++ diepmsk.b.epdisabled = 1; ++ diepmsk.b.ahberr = 1; ++ diepmsk.b.intknepmis = 1; ++ ++ if(core_if->dma_desc_enable) { ++ diepmsk.b.bna = 1; ++ } ++ ++// diepmsk.b.nak = 1; ++ ++ dwc_write_reg32(&dev_if->dev_global_regs->diepmsk, diepmsk.d32); ++ } ++ ++ /* Reset Device Address */ ++ dcfg.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dcfg); ++ dcfg.b.devaddr = 0; ++ dwc_write_reg32(&dev_if->dev_global_regs->dcfg, dcfg.d32); ++ ++ /* setup EP0 to receive SETUP packets */ ++ ep0_out_start(core_if, pcd); ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.usbreset = 1; ++ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); ++ ++ return 1; ++} ++ ++/** ++ * Get the device speed from the device status register and convert it ++ * to USB speed constant. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++static int get_device_speed(dwc_otg_core_if_t *core_if) ++{ ++ dsts_data_t dsts; ++ enum usb_device_speed speed = USB_SPEED_UNKNOWN; ++ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); ++ ++ switch (dsts.b.enumspd) { ++ case DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ: ++ speed = USB_SPEED_HIGH; ++ break; ++ case DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ: ++ case DWC_DSTS_ENUMSPD_FS_PHY_48MHZ: ++ speed = USB_SPEED_FULL; ++ break; ++ ++ case DWC_DSTS_ENUMSPD_LS_PHY_6MHZ: ++ speed = USB_SPEED_LOW; ++ break; ++ } ++ ++ return speed; ++} ++ ++/** ++ * Read the device status register and set the device speed in the ++ * data structure. ++ * Set up EP0 to receive SETUP packets by calling dwc_ep0_activate. ++ */ ++int32_t dwc_otg_pcd_handle_enum_done_intr(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_pcd_ep_t *ep0 = &pcd->ep0; ++ gintsts_data_t gintsts; ++ gusbcfg_data_t gusbcfg; ++ dwc_otg_core_global_regs_t *global_regs = ++ GET_CORE_IF(pcd)->core_global_regs; ++ uint8_t utmi16b, utmi8b; ++// DWC_DEBUGPL(DBG_PCD, "SPEED ENUM\n"); ++ DWC_PRINT("SPEED ENUM\n"); ++ ++ if (GET_CORE_IF(pcd)->snpsid >= 0x4F54260A) { ++ utmi16b = 6; ++ utmi8b = 9; ++ } else { ++ utmi16b = 4; ++ utmi8b = 8; ++ } ++ dwc_otg_ep0_activate(GET_CORE_IF(pcd), &ep0->dwc_ep); ++ ++#ifdef DEBUG_EP0 ++ print_ep0_state(pcd); ++#endif ++ ++ if (pcd->ep0state == EP0_DISCONNECT) { ++ pcd->ep0state = EP0_IDLE; ++ } ++ else if (pcd->ep0state == EP0_STALL) { ++ pcd->ep0state = EP0_IDLE; ++ } ++ ++ pcd->ep0state = EP0_IDLE; ++ ++ ep0->stopped = 0; ++ ++ pcd->gadget.speed = get_device_speed(GET_CORE_IF(pcd)); ++ ++ /* Set USB turnaround time based on device speed and PHY interface. */ ++ gusbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); ++ if (pcd->gadget.speed == USB_SPEED_HIGH) { ++ if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type == DWC_HWCFG2_HS_PHY_TYPE_ULPI) { ++ /* ULPI interface */ ++ gusbcfg.b.usbtrdtim = 9; ++ } ++ if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type == DWC_HWCFG2_HS_PHY_TYPE_UTMI) { ++ /* UTMI+ interface */ ++ if (GET_CORE_IF(pcd)->hwcfg4.b.utmi_phy_data_width == 0) { ++ gusbcfg.b.usbtrdtim = utmi8b; ++ } ++ else if (GET_CORE_IF(pcd)->hwcfg4.b.utmi_phy_data_width == 1) { ++ gusbcfg.b.usbtrdtim = utmi16b; ++ } ++ else if (GET_CORE_IF(pcd)->core_params->phy_utmi_width == 8) { ++ gusbcfg.b.usbtrdtim = utmi8b; ++ } ++ else { ++ gusbcfg.b.usbtrdtim = utmi16b; ++ } ++ } ++ if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type == DWC_HWCFG2_HS_PHY_TYPE_UTMI_ULPI) { ++ /* UTMI+ OR ULPI interface */ ++ if (gusbcfg.b.ulpi_utmi_sel == 1) { ++ /* ULPI interface */ ++ gusbcfg.b.usbtrdtim = 9; ++ } ++ else { ++ /* UTMI+ interface */ ++ if (GET_CORE_IF(pcd)->core_params->phy_utmi_width == 16) { ++ gusbcfg.b.usbtrdtim = utmi16b; ++ } ++ else { ++ gusbcfg.b.usbtrdtim = utmi8b; ++ } ++ } ++ } ++ } ++ else { ++ /* Full or low speed */ ++ gusbcfg.b.usbtrdtim = 9; ++ } ++ dwc_write_reg32(&global_regs->gusbcfg, gusbcfg.d32); ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.enumdone = 1; ++ dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts, ++ gintsts.d32); ++ return 1; ++} ++ ++/** ++ * This interrupt indicates that the ISO OUT Packet was dropped due to ++ * Rx FIFO full or Rx Status Queue Full. If this interrupt occurs ++ * read all the data from the Rx FIFO. ++ */ ++int32_t dwc_otg_pcd_handle_isoc_out_packet_dropped_intr(dwc_otg_pcd_t *pcd) ++{ ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ gintsts_data_t gintsts; ++ ++ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", ++ "ISOC Out Dropped"); ++ ++ intr_mask.b.isooutdrop = 1; ++ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, ++ intr_mask.d32, 0); ++ ++ /* Clear interrupt */ ++ ++ gintsts.d32 = 0; ++ gintsts.b.isooutdrop = 1; ++ dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts, ++ gintsts.d32); ++ ++ return 1; ++} ++ ++/** ++ * This interrupt indicates the end of the portion of the micro-frame ++ * for periodic transactions. If there is a periodic transaction for ++ * the next frame, load the packets into the EP periodic Tx FIFO. ++ */ ++int32_t dwc_otg_pcd_handle_end_periodic_frame_intr(dwc_otg_pcd_t *pcd) ++{ ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ gintsts_data_t gintsts; ++ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "EOP"); ++ ++ intr_mask.b.eopframe = 1; ++ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, ++ intr_mask.d32, 0); ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.eopframe = 1; ++ dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts, gintsts.d32); ++ ++ return 1; ++} ++ ++/** ++ * This interrupt indicates that EP of the packet on the top of the ++ * non-periodic Tx FIFO does not match EP of the IN Token received. ++ * ++ * The "Device IN Token Queue" Registers are read to determine the ++ * order the IN Tokens have been received. The non-periodic Tx FIFO ++ * is flushed, so it can be reloaded in the order seen in the IN Token ++ * Queue. ++ */ ++int32_t dwc_otg_pcd_handle_ep_mismatch_intr(dwc_otg_core_if_t *core_if) ++{ ++ gintsts_data_t gintsts; ++ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, core_if); ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.epmismatch = 1; ++ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); ++ ++ return 1; ++} ++ ++/** ++ * This funcion stalls EP0. ++ */ ++static inline void ep0_do_stall(dwc_otg_pcd_t *pcd, const int err_val) ++{ ++ dwc_otg_pcd_ep_t *ep0 = &pcd->ep0; ++ struct usb_ctrlrequest *ctrl = &pcd->setup_pkt->req; ++ DWC_WARN("req %02x.%02x protocol STALL; err %d\n", ++ ctrl->bRequestType, ctrl->bRequest, err_val); ++ ++ ep0->dwc_ep.is_in = 1; ++ dwc_otg_ep_set_stall(pcd->otg_dev->core_if, &ep0->dwc_ep); ++ pcd->ep0.stopped = 1; ++ pcd->ep0state = EP0_IDLE; ++ ep0_out_start(GET_CORE_IF(pcd), pcd); ++} ++ ++/** ++ * This functions delegates the setup command to the gadget driver. ++ */ ++static inline void do_gadget_setup(dwc_otg_pcd_t *pcd, ++ struct usb_ctrlrequest * ctrl) ++{ ++ int ret = 0; ++ if (pcd->driver && pcd->driver->setup) { ++ SPIN_UNLOCK(&pcd->lock); ++ ret = pcd->driver->setup(&pcd->gadget, ctrl); ++ SPIN_LOCK(&pcd->lock); ++ if (ret < 0) { ++ ep0_do_stall(pcd, ret); ++ } ++ ++ /** @todo This is a g_file_storage gadget driver specific ++ * workaround: a DELAYED_STATUS result from the fsg_setup ++ * routine will result in the gadget queueing a EP0 IN status ++ * phase for a two-stage control transfer. Exactly the same as ++ * a SET_CONFIGURATION/SET_INTERFACE except that this is a class ++ * specific request. Need a generic way to know when the gadget ++ * driver will queue the status phase. Can we assume when we ++ * call the gadget driver setup() function that it will always ++ * queue and require the following flag? Need to look into ++ * this. ++ */ ++ ++ if (ret == 256 + 999) { ++ pcd->request_config = 1; ++ } ++ } ++} ++ ++/** ++ * This function starts the Zero-Length Packet for the IN status phase ++ * of a 2 stage control transfer. ++ */ ++static inline void do_setup_in_status_phase(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_pcd_ep_t *ep0 = &pcd->ep0; ++ if (pcd->ep0state == EP0_STALL) { ++ return; ++ } ++ ++ pcd->ep0state = EP0_IN_STATUS_PHASE; ++ ++ /* Prepare for more SETUP Packets */ ++ DWC_DEBUGPL(DBG_PCD, "EP0 IN ZLP\n"); ++ ep0->dwc_ep.xfer_len = 0; ++ ep0->dwc_ep.xfer_count = 0; ++ ep0->dwc_ep.is_in = 1; ++ ep0->dwc_ep.dma_addr = pcd->setup_pkt_dma_handle; ++ dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep); ++ ++ /* Prepare for more SETUP Packets */ ++// if(GET_CORE_IF(pcd)->dma_enable == 0) ep0_out_start(GET_CORE_IF(pcd), pcd); ++} ++ ++/** ++ * This function starts the Zero-Length Packet for the OUT status phase ++ * of a 2 stage control transfer. ++ */ ++static inline void do_setup_out_status_phase(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_pcd_ep_t *ep0 = &pcd->ep0; ++ if (pcd->ep0state == EP0_STALL) { ++ DWC_DEBUGPL(DBG_PCD, "EP0 STALLED\n"); ++ return; ++ } ++ pcd->ep0state = EP0_OUT_STATUS_PHASE; ++ ++ DWC_DEBUGPL(DBG_PCD, "EP0 OUT ZLP\n"); ++ ep0->dwc_ep.xfer_len = 0; ++ ep0->dwc_ep.xfer_count = 0; ++ ep0->dwc_ep.is_in = 0; ++ ep0->dwc_ep.dma_addr = pcd->setup_pkt_dma_handle; ++ dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep); ++ ++ /* Prepare for more SETUP Packets */ ++ if(GET_CORE_IF(pcd)->dma_enable == 0) { ++ ep0_out_start(GET_CORE_IF(pcd), pcd); ++ } ++} ++ ++/** ++ * Clear the EP halt (STALL) and if pending requests start the ++ * transfer. ++ */ ++static inline void pcd_clear_halt(dwc_otg_pcd_t *pcd, dwc_otg_pcd_ep_t *ep) ++{ ++ if(ep->dwc_ep.stall_clear_flag == 0) ++ dwc_otg_ep_clear_stall(GET_CORE_IF(pcd), &ep->dwc_ep); ++ ++ /* Reactive the EP */ ++ dwc_otg_ep_activate(GET_CORE_IF(pcd), &ep->dwc_ep); ++ if (ep->stopped) { ++ ep->stopped = 0; ++ /* If there is a request in the EP queue start it */ ++ ++ /** @todo FIXME: this causes an EP mismatch in DMA mode. ++ * epmismatch not yet implemented. */ ++ ++ /* ++ * Above fixme is solved by implmenting a tasklet to call the ++ * start_next_request(), outside of interrupt context at some ++ * time after the current time, after a clear-halt setup packet. ++ * Still need to implement ep mismatch in the future if a gadget ++ * ever uses more than one endpoint at once ++ */ ++ ep->queue_sof = 1; ++ tasklet_schedule (pcd->start_xfer_tasklet); ++ } ++ /* Start Control Status Phase */ ++ do_setup_in_status_phase(pcd); ++} ++ ++/** ++ * This function is called when the SET_FEATURE TEST_MODE Setup packet ++ * is sent from the host. The Device Control register is written with ++ * the Test Mode bits set to the specified Test Mode. This is done as ++ * a tasklet so that the "Status" phase of the control transfer ++ * completes before transmitting the TEST packets. ++ * ++ * @todo This has not been tested since the tasklet struct was put ++ * into the PCD struct! ++ * ++ */ ++static void do_test_mode(unsigned long data) ++{ ++ dctl_data_t dctl; ++ dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *)data; ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ int test_mode = pcd->test_mode; ++ ++ ++// DWC_WARN("%s() has not been tested since being rewritten!\n", __func__); ++ ++ dctl.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dctl); ++ switch (test_mode) { ++ case 1: // TEST_J ++ dctl.b.tstctl = 1; ++ break; ++ ++ case 2: // TEST_K ++ dctl.b.tstctl = 2; ++ break; ++ ++ case 3: // TEST_SE0_NAK ++ dctl.b.tstctl = 3; ++ break; ++ ++ case 4: // TEST_PACKET ++ dctl.b.tstctl = 4; ++ break; ++ ++ case 5: // TEST_FORCE_ENABLE ++ dctl.b.tstctl = 5; ++ break; ++ } ++ dwc_write_reg32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32); ++} ++ ++/** ++ * This function process the GET_STATUS Setup Commands. ++ */ ++static inline void do_get_status(dwc_otg_pcd_t *pcd) ++{ ++ struct usb_ctrlrequest ctrl = pcd->setup_pkt->req; ++ dwc_otg_pcd_ep_t *ep; ++ dwc_otg_pcd_ep_t *ep0 = &pcd->ep0; ++ uint16_t *status = pcd->status_buf; ++ ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCD, ++ "GET_STATUS %02x.%02x v%04x i%04x l%04x\n", ++ ctrl.bRequestType, ctrl.bRequest, ++ ctrl.wValue, ctrl.wIndex, ctrl.wLength); ++#endif ++ ++ switch (ctrl.bRequestType & USB_RECIP_MASK) { ++ case USB_RECIP_DEVICE: ++ *status = 0x1; /* Self powered */ ++ *status |= pcd->remote_wakeup_enable << 1; ++ break; ++ ++ case USB_RECIP_INTERFACE: ++ *status = 0; ++ break; ++ ++ case USB_RECIP_ENDPOINT: ++ ep = get_ep_by_addr(pcd, ctrl.wIndex); ++ if (ep == 0 || ctrl.wLength > 2) { ++ ep0_do_stall(pcd, -EOPNOTSUPP); ++ return; ++ } ++ /** @todo check for EP stall */ ++ *status = ep->stopped; ++ break; ++ } ++ pcd->ep0_pending = 1; ++ ep0->dwc_ep.start_xfer_buff = (uint8_t *)status; ++ ep0->dwc_ep.xfer_buff = (uint8_t *)status; ++ ep0->dwc_ep.dma_addr = pcd->status_buf_dma_handle; ++ ep0->dwc_ep.xfer_len = 2; ++ ep0->dwc_ep.xfer_count = 0; ++ ep0->dwc_ep.total_len = ep0->dwc_ep.xfer_len; ++ dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep); ++} ++/** ++ * This function process the SET_FEATURE Setup Commands. ++ */ ++static inline void do_set_feature(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ dwc_otg_core_global_regs_t *global_regs = ++ core_if->core_global_regs; ++ struct usb_ctrlrequest ctrl = pcd->setup_pkt->req; ++ dwc_otg_pcd_ep_t *ep = 0; ++ int32_t otg_cap_param = core_if->core_params->otg_cap; ++ gotgctl_data_t gotgctl = { .d32 = 0 }; ++ ++ DWC_DEBUGPL(DBG_PCD, "SET_FEATURE:%02x.%02x v%04x i%04x l%04x\n", ++ ctrl.bRequestType, ctrl.bRequest, ++ ctrl.wValue, ctrl.wIndex, ctrl.wLength); ++ DWC_DEBUGPL(DBG_PCD,"otg_cap=%d\n", otg_cap_param); ++ ++ ++ switch (ctrl.bRequestType & USB_RECIP_MASK) { ++ case USB_RECIP_DEVICE: ++ switch (ctrl.wValue) { ++ case USB_DEVICE_REMOTE_WAKEUP: ++ pcd->remote_wakeup_enable = 1; ++ break; ++ ++ case USB_DEVICE_TEST_MODE: ++ /* Setup the Test Mode tasklet to do the Test ++ * Packet generation after the SETUP Status ++ * phase has completed. */ ++ ++ /** @todo This has not been tested since the ++ * tasklet struct was put into the PCD ++ * struct! */ ++ pcd->test_mode_tasklet.next = 0; ++ pcd->test_mode_tasklet.state = 0; ++ atomic_set(&pcd->test_mode_tasklet.count, 0); ++ pcd->test_mode_tasklet.func = do_test_mode; ++ pcd->test_mode_tasklet.data = (unsigned long)pcd; ++ pcd->test_mode = ctrl.wIndex >> 8; ++ tasklet_schedule(&pcd->test_mode_tasklet); ++ break; ++ ++ case USB_DEVICE_B_HNP_ENABLE: ++ DWC_DEBUGPL(DBG_PCDV, "SET_FEATURE: USB_DEVICE_B_HNP_ENABLE\n"); ++ ++ /* dev may initiate HNP */ ++ if (otg_cap_param == DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) { ++ pcd->b_hnp_enable = 1; ++ dwc_otg_pcd_update_otg(pcd, 0); ++ DWC_DEBUGPL(DBG_PCD, "Request B HNP\n"); ++ /**@todo Is the gotgctl.devhnpen cleared ++ * by a USB Reset? */ ++ gotgctl.b.devhnpen = 1; ++ gotgctl.b.hnpreq = 1; ++ dwc_write_reg32(&global_regs->gotgctl, gotgctl.d32); ++ } ++ else { ++ ep0_do_stall(pcd, -EOPNOTSUPP); ++ } ++ break; ++ ++ case USB_DEVICE_A_HNP_SUPPORT: ++ /* RH port supports HNP */ ++ DWC_DEBUGPL(DBG_PCDV, "SET_FEATURE: USB_DEVICE_A_HNP_SUPPORT\n"); ++ if (otg_cap_param == DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) { ++ pcd->a_hnp_support = 1; ++ dwc_otg_pcd_update_otg(pcd, 0); ++ } ++ else { ++ ep0_do_stall(pcd, -EOPNOTSUPP); ++ } ++ break; ++ ++ case USB_DEVICE_A_ALT_HNP_SUPPORT: ++ /* other RH port does */ ++ DWC_DEBUGPL(DBG_PCDV, "SET_FEATURE: USB_DEVICE_A_ALT_HNP_SUPPORT\n"); ++ if (otg_cap_param == DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) { ++ pcd->a_alt_hnp_support = 1; ++ dwc_otg_pcd_update_otg(pcd, 0); ++ } ++ else { ++ ep0_do_stall(pcd, -EOPNOTSUPP); ++ } ++ break; ++ } ++ do_setup_in_status_phase(pcd); ++ break; ++ ++ case USB_RECIP_INTERFACE: ++ do_gadget_setup(pcd, &ctrl); ++ break; ++ ++ case USB_RECIP_ENDPOINT: ++ if (ctrl.wValue == USB_ENDPOINT_HALT) { ++ ep = get_ep_by_addr(pcd, ctrl.wIndex); ++ if (ep == 0) { ++ ep0_do_stall(pcd, -EOPNOTSUPP); ++ return; ++ } ++ ep->stopped = 1; ++ dwc_otg_ep_set_stall(core_if, &ep->dwc_ep); ++ } ++ do_setup_in_status_phase(pcd); ++ break; ++ } ++} ++ ++/** ++ * This function process the CLEAR_FEATURE Setup Commands. ++ */ ++static inline void do_clear_feature(dwc_otg_pcd_t *pcd) ++{ ++ struct usb_ctrlrequest ctrl = pcd->setup_pkt->req; ++ dwc_otg_pcd_ep_t *ep = 0; ++ ++ DWC_DEBUGPL(DBG_PCD, ++ "CLEAR_FEATURE:%02x.%02x v%04x i%04x l%04x\n", ++ ctrl.bRequestType, ctrl.bRequest, ++ ctrl.wValue, ctrl.wIndex, ctrl.wLength); ++ ++ switch (ctrl.bRequestType & USB_RECIP_MASK) { ++ case USB_RECIP_DEVICE: ++ switch (ctrl.wValue) { ++ case USB_DEVICE_REMOTE_WAKEUP: ++ pcd->remote_wakeup_enable = 0; ++ break; ++ ++ case USB_DEVICE_TEST_MODE: ++ /** @todo Add CLEAR_FEATURE for TEST modes. */ ++ break; ++ } ++ do_setup_in_status_phase(pcd); ++ break; ++ ++ case USB_RECIP_ENDPOINT: ++ ep = get_ep_by_addr(pcd, ctrl.wIndex); ++ if (ep == 0) { ++ ep0_do_stall(pcd, -EOPNOTSUPP); ++ return; ++ } ++ ++ pcd_clear_halt(pcd, ep); ++ ++ break; ++ } ++} ++ ++/** ++ * This function process the SET_ADDRESS Setup Commands. ++ */ ++static inline void do_set_address(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if; ++ struct usb_ctrlrequest ctrl = pcd->setup_pkt->req; ++ ++ if (ctrl.bRequestType == USB_RECIP_DEVICE) { ++ dcfg_data_t dcfg = {.d32=0}; ++ ++#ifdef DEBUG_EP0 ++// DWC_DEBUGPL(DBG_PCDV, "SET_ADDRESS:%d\n", ctrl.wValue); ++#endif ++ dcfg.b.devaddr = ctrl.wValue; ++ dwc_modify_reg32(&dev_if->dev_global_regs->dcfg, 0, dcfg.d32); ++ do_setup_in_status_phase(pcd); ++ } ++} ++ ++/** ++ * This function processes SETUP commands. In Linux, the USB Command ++ * processing is done in two places - the first being the PCD and the ++ * second in the Gadget Driver (for example, the File-Backed Storage ++ * Gadget Driver). ++ * ++ *
Parameter NameMeaning
otg_capSpecifies the OTG capabilities. The driver will automatically detect the ++ value for this parameter if none is specified. ++ - 0: HNP and SRP capable (default, if available) ++ - 1: SRP Only capable ++ - 2: No HNP/SRP capable ++
dma_enableSpecifies whether to use slave or DMA mode for accessing the data FIFOs. ++ The driver will automatically detect the value for this parameter if none is ++ specified. ++ - 0: Slave ++ - 1: DMA (default, if available) ++
dma_burst_sizeThe DMA Burst size (applicable only for External DMA Mode). ++ - Values: 1, 4, 8 16, 32, 64, 128, 256 (default 32) ++
speedSpecifies the maximum speed of operation in host and device mode. The ++ actual speed depends on the speed of the attached device and the value of ++ phy_type. ++ - 0: High Speed (default) ++ - 1: Full Speed ++
host_support_fs_ls_low_powerSpecifies whether low power mode is supported when attached to a Full ++ Speed or Low Speed device in host mode. ++ - 0: Don't support low power mode (default) ++ - 1: Support low power mode ++
host_ls_low_power_phy_clkSpecifies the PHY clock rate in low power mode when connected to a Low ++ Speed device in host mode. This parameter is applicable only if ++ HOST_SUPPORT_FS_LS_LOW_POWER is enabled. ++ - 0: 48 MHz (default) ++ - 1: 6 MHz ++
enable_dynamic_fifo Specifies whether FIFOs may be resized by the driver software. ++ - 0: Use cC FIFO size parameters ++ - 1: Allow dynamic FIFO sizing (default) ++
data_fifo_sizeTotal number of 4-byte words in the data FIFO memory. This memory ++ includes the Rx FIFO, non-periodic Tx FIFO, and periodic Tx FIFOs. ++ - Values: 32 to 32768 (default 8192) ++ ++ Note: The total FIFO memory depth in the FPGA configuration is 8192. ++
dev_rx_fifo_sizeNumber of 4-byte words in the Rx FIFO in device mode when dynamic ++ FIFO sizing is enabled. ++ - Values: 16 to 32768 (default 1064) ++
dev_nperio_tx_fifo_sizeNumber of 4-byte words in the non-periodic Tx FIFO in device mode when ++ dynamic FIFO sizing is enabled. ++ - Values: 16 to 32768 (default 1024) ++
dev_perio_tx_fifo_size_n (n = 1 to 15)Number of 4-byte words in each of the periodic Tx FIFOs in device mode ++ when dynamic FIFO sizing is enabled. ++ - Values: 4 to 768 (default 256) ++
host_rx_fifo_sizeNumber of 4-byte words in the Rx FIFO in host mode when dynamic FIFO ++ sizing is enabled. ++ - Values: 16 to 32768 (default 1024) ++
host_nperio_tx_fifo_sizeNumber of 4-byte words in the non-periodic Tx FIFO in host mode when ++ dynamic FIFO sizing is enabled in the core. ++ - Values: 16 to 32768 (default 1024) ++
host_perio_tx_fifo_sizeNumber of 4-byte words in the host periodic Tx FIFO when dynamic FIFO ++ sizing is enabled. ++ - Values: 16 to 32768 (default 1024) ++
max_transfer_sizeThe maximum transfer size supported in bytes. ++ - Values: 2047 to 65,535 (default 65,535) ++
max_packet_countThe maximum number of packets in a transfer. ++ - Values: 15 to 511 (default 511) ++
host_channelsThe number of host channel registers to use. ++ - Values: 1 to 16 (default 12) ++ ++ Note: The FPGA configuration supports a maximum of 12 host channels. ++
dev_endpointsThe number of endpoints in addition to EP0 available for device mode ++ operations. ++ - Values: 1 to 15 (default 6 IN and OUT) ++ ++ Note: The FPGA configuration supports a maximum of 6 IN and OUT endpoints in ++ addition to EP0. ++
phy_typeSpecifies the type of PHY interface to use. By default, the driver will ++ automatically detect the phy_type. ++ - 0: Full Speed ++ - 1: UTMI+ (default, if available) ++ - 2: ULPI ++
phy_utmi_widthSpecifies the UTMI+ Data Width. This parameter is applicable for a ++ phy_type of UTMI+. Also, this parameter is applicable only if the ++ OTG_HSPHY_WIDTH cC parameter was set to "8 and 16 bits", meaning that the ++ core has been configured to work at either data path width. ++ - Values: 8 or 16 bits (default 16) ++
phy_ulpi_ddrSpecifies whether the ULPI operates at double or single data rate. This ++ parameter is only applicable if phy_type is ULPI. ++ - 0: single data rate ULPI interface with 8 bit wide data bus (default) ++ - 1: double data rate ULPI interface with 4 bit wide data bus ++
i2c_enableSpecifies whether to use the I2C interface for full speed PHY. This ++ parameter is only applicable if PHY_TYPE is FS. ++ - 0: Disabled (default) ++ - 1: Enabled ++
otg_en_multiple_tx_fifoSpecifies whether dedicatedto tx fifos are enabled for non periodic IN EPs. ++ The driver will automatically detect the value for this parameter if none is ++ specified. ++ - 0: Disabled ++ - 1: Enabled (default, if available) ++
dev_tx_fifo_size_n (n = 1 to 15)Number of 4-byte words in each of the Tx FIFOs in device mode ++ when dynamic FIFO sizing is enabled. ++ - Values: 4 to 768 (default 256) ++
++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ *
Command Driver Description
GET_STATUS PCD Command is processed as ++ * defined in chapter 9 of the USB 2.0 Specification chapter 9 ++ *
CLEAR_FEATURE PCD The Device and Endpoint ++ * requests are the ENDPOINT_HALT feature is procesed, all others the ++ * interface requests are ignored.
SET_FEATURE PCD The Device and Endpoint ++ * requests are processed by the PCD. Interface requests are passed ++ * to the Gadget Driver.
SET_ADDRESS PCD Program the DCFG reg, ++ * with device address received
GET_DESCRIPTOR Gadget Driver Return the ++ * requested descriptor
SET_DESCRIPTOR Gadget Driver Optional - ++ * not implemented by any of the existing Gadget Drivers.
SET_CONFIGURATION Gadget Driver Disable ++ * all EPs and enable EPs for new configuration.
GET_CONFIGURATION Gadget Driver Return ++ * the current configuration
SET_INTERFACE Gadget Driver Disable all ++ * EPs and enable EPs for new configuration.
GET_INTERFACE Gadget Driver Return the ++ * current interface.
SYNC_FRAME PCD Display debug ++ * message.
++ * ++ * When the SETUP Phase Done interrupt occurs, the PCD SETUP commands are ++ * processed by pcd_setup. Calling the Function Driver's setup function from ++ * pcd_setup processes the gadget SETUP commands. ++ */ ++static inline void pcd_setup(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ struct usb_ctrlrequest ctrl = pcd->setup_pkt->req; ++ dwc_otg_pcd_ep_t *ep0 = &pcd->ep0; ++ ++ deptsiz0_data_t doeptsize0 = { .d32 = 0}; ++ ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCD, "SETUP %02x.%02x v%04x i%04x l%04x\n", ++ ctrl.bRequestType, ctrl.bRequest, ++ ctrl.wValue, ctrl.wIndex, ctrl.wLength); ++#endif ++ ++ doeptsize0.d32 = dwc_read_reg32(&dev_if->out_ep_regs[0]->doeptsiz); ++ ++ /** @todo handle > 1 setup packet , assert error for now */ ++ ++ if (core_if->dma_enable && core_if->dma_desc_enable == 0 && (doeptsize0.b.supcnt < 2)) { ++ DWC_ERROR ("\n\n----------- CANNOT handle > 1 setup packet in DMA mode\n\n"); ++ } ++ ++ /* Clean up the request queue */ ++ dwc_otg_request_nuke(ep0); ++ ep0->stopped = 0; ++ ++ if (ctrl.bRequestType & USB_DIR_IN) { ++ ep0->dwc_ep.is_in = 1; ++ pcd->ep0state = EP0_IN_DATA_PHASE; ++ } ++ else { ++ ep0->dwc_ep.is_in = 0; ++ pcd->ep0state = EP0_OUT_DATA_PHASE; ++ } ++ ++ if(ctrl.wLength == 0) { ++ ep0->dwc_ep.is_in = 1; ++ pcd->ep0state = EP0_IN_STATUS_PHASE; ++ } ++ ++ if ((ctrl.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) { ++ /* handle non-standard (class/vendor) requests in the gadget driver */ ++ do_gadget_setup(pcd, &ctrl); ++ return; ++ } ++ ++ /** @todo NGS: Handle bad setup packet? */ ++ ++/////////////////////////////////////////// ++//// --- Standard Request handling --- //// ++ ++ switch (ctrl.bRequest) { ++ case USB_REQ_GET_STATUS: ++ do_get_status(pcd); ++ break; ++ ++ case USB_REQ_CLEAR_FEATURE: ++ do_clear_feature(pcd); ++ break; ++ ++ case USB_REQ_SET_FEATURE: ++ do_set_feature(pcd); ++ break; ++ ++ case USB_REQ_SET_ADDRESS: ++ do_set_address(pcd); ++ break; ++ ++ case USB_REQ_SET_INTERFACE: ++ case USB_REQ_SET_CONFIGURATION: ++// _pcd->request_config = 1; /* Configuration changed */ ++ do_gadget_setup(pcd, &ctrl); ++ break; ++ ++ case USB_REQ_SYNCH_FRAME: ++ do_gadget_setup(pcd, &ctrl); ++ break; ++ ++ default: ++ /* Call the Gadget Driver's setup functions */ ++ do_gadget_setup(pcd, &ctrl); ++ break; ++ } ++} ++ ++/** ++ * This function completes the ep0 control transfer. ++ */ ++static int32_t ep0_complete_request(dwc_otg_pcd_ep_t *ep) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd); ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ dwc_otg_dev_in_ep_regs_t *in_ep_regs = ++ dev_if->in_ep_regs[ep->dwc_ep.num]; ++#ifdef DEBUG_EP0 ++ dwc_otg_dev_out_ep_regs_t *out_ep_regs = ++ dev_if->out_ep_regs[ep->dwc_ep.num]; ++#endif ++ deptsiz0_data_t deptsiz; ++ desc_sts_data_t desc_sts; ++ dwc_otg_pcd_request_t *req; ++ int is_last = 0; ++ dwc_otg_pcd_t *pcd = ep->pcd; ++ ++ //DWC_DEBUGPL(DBG_PCDV, "%s() %s\n", __func__, _ep->ep.name); ++ ++ if (pcd->ep0_pending && list_empty(&ep->queue)) { ++ if (ep->dwc_ep.is_in) { ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCDV, "Do setup OUT status phase\n"); ++#endif ++ do_setup_out_status_phase(pcd); ++ } ++ else { ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCDV, "Do setup IN status phase\n"); ++#endif ++ do_setup_in_status_phase(pcd); ++ } ++ pcd->ep0_pending = 0; ++ return 1; ++ } ++ ++ if (list_empty(&ep->queue)) { ++ return 0; ++ } ++ req = list_entry(ep->queue.next, dwc_otg_pcd_request_t, queue); ++ ++ ++ if (pcd->ep0state == EP0_OUT_STATUS_PHASE || pcd->ep0state == EP0_IN_STATUS_PHASE) { ++ is_last = 1; ++ } ++ else if (ep->dwc_ep.is_in) { ++ deptsiz.d32 = dwc_read_reg32(&in_ep_regs->dieptsiz); ++ if(core_if->dma_desc_enable != 0) ++ desc_sts.d32 = readl(dev_if->in_desc_addr); ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCDV, "%s len=%d xfersize=%d pktcnt=%d\n", ++ ep->ep.name, ep->dwc_ep.xfer_len, ++ deptsiz.b.xfersize, deptsiz.b.pktcnt); ++#endif ++ ++ if (((core_if->dma_desc_enable == 0) && (deptsiz.b.xfersize == 0)) || ++ ((core_if->dma_desc_enable != 0) && (desc_sts.b.bytes == 0))) { ++ req->req.actual = ep->dwc_ep.xfer_count; ++ /* Is a Zero Len Packet needed? */ ++ if (req->req.zero) { ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCD, "Setup Rx ZLP\n"); ++#endif ++ req->req.zero = 0; ++ } ++ do_setup_out_status_phase(pcd); ++ } ++ } ++ else { ++ /* ep0-OUT */ ++#ifdef DEBUG_EP0 ++ deptsiz.d32 = dwc_read_reg32(&out_ep_regs->doeptsiz); ++ DWC_DEBUGPL(DBG_PCDV, "%s len=%d xsize=%d pktcnt=%d\n", ++ ep->ep.name, ep->dwc_ep.xfer_len, ++ deptsiz.b.xfersize, ++ deptsiz.b.pktcnt); ++#endif ++ req->req.actual = ep->dwc_ep.xfer_count; ++ /* Is a Zero Len Packet needed? */ ++ if (req->req.zero) { ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCDV, "Setup Tx ZLP\n"); ++#endif ++ req->req.zero = 0; ++ } ++ if(core_if->dma_desc_enable == 0) ++ do_setup_in_status_phase(pcd); ++ } ++ ++ /* Complete the request */ ++ if (is_last) { ++ dwc_otg_request_done(ep, req, 0); ++ ep->dwc_ep.start_xfer_buff = 0; ++ ep->dwc_ep.xfer_buff = 0; ++ ep->dwc_ep.xfer_len = 0; ++ return 1; ++ } ++ return 0; ++} ++ ++inline void aligned_buf_patch_on_buf_dma_oep_completion(dwc_otg_pcd_ep_t *ep, uint32_t byte_count) ++{ ++ dwc_ep_t *dwc_ep = &ep->dwc_ep; ++ if(byte_count && dwc_ep->aligned_buf && ++ dwc_ep->dma_addr>=dwc_ep->aligned_dma_addr && ++ dwc_ep->dma_addr<=(dwc_ep->aligned_dma_addr+dwc_ep->aligned_buf_size))\ ++ { ++ //aligned buf used, apply complete patch ++ u32 offset=(dwc_ep->dma_addr-dwc_ep->aligned_dma_addr); ++ memcpy(dwc_ep->start_xfer_buff+offset, dwc_ep->aligned_buf+offset, byte_count); ++ ++ } ++} ++ ++/** ++ * This function completes the request for the EP. If there are ++ * additional requests for the EP in the queue they will be started. ++ */ ++static void complete_ep(dwc_otg_pcd_ep_t *ep) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd); ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ dwc_otg_dev_in_ep_regs_t *in_ep_regs = ++ dev_if->in_ep_regs[ep->dwc_ep.num]; ++ deptsiz_data_t deptsiz; ++ desc_sts_data_t desc_sts; ++ dwc_otg_pcd_request_t *req = 0; ++ dwc_otg_dma_desc_t* dma_desc; ++ uint32_t byte_count = 0; ++ int is_last = 0; ++ int i; ++ ++ DWC_DEBUGPL(DBG_PCDV,"%s() %s-%s\n", __func__, ep->ep.name, ++ (ep->dwc_ep.is_in?"IN":"OUT")); ++ ++ /* Get any pending requests */ ++ if (!list_empty(&ep->queue)) { ++ req = list_entry(ep->queue.next, dwc_otg_pcd_request_t, ++ queue); ++ if (!req) { ++ printk("complete_ep 0x%p, req = NULL!\n", ep); ++ return; ++ } ++ } ++ else { ++ printk("complete_ep 0x%p, ep->queue empty!\n", ep); ++ return; ++ } ++ DWC_DEBUGPL(DBG_PCD, "Requests %d\n", ep->pcd->request_pending); ++ ++ if (ep->dwc_ep.is_in) { ++ deptsiz.d32 = dwc_read_reg32(&in_ep_regs->dieptsiz); ++ ++ if (core_if->dma_enable) { ++ //dma_unmap_single(NULL,ep->dwc_ep.dma_addr,ep->dwc_ep.xfer_count,DMA_NONE); ++ if(core_if->dma_desc_enable == 0) { ++ //dma_unmap_single(NULL,ep->dwc_ep.dma_addr,ep->dwc_ep.xfer_count,DMA_NONE); ++ if (deptsiz.b.xfersize == 0 && deptsiz.b.pktcnt == 0) { ++ byte_count = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count; ++DWC_DEBUGPL(DBG_PCDV,"byte_count(%.8x) = (ep->dwc_ep.xfer_len(%.8x) - ep->dwc_ep.xfer_count(%.8x)\n", byte_count ,ep->dwc_ep.xfer_len , ep->dwc_ep.xfer_count ); ++ ++ ep->dwc_ep.xfer_buff += byte_count; ++ ep->dwc_ep.dma_addr += byte_count; ++ ep->dwc_ep.xfer_count += byte_count; ++ ++ DWC_DEBUGPL(DBG_PCDV, "%s len=%d xfersize=%d pktcnt=%d\n", ++ ep->ep.name, ep->dwc_ep.xfer_len, ++ deptsiz.b.xfersize, deptsiz.b.pktcnt); ++ ++ ++ if(ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) { ++ //dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep); ++printk("Warning: transfer ended, but specified len is not accomplished!! ep->total_len=%.x,ep->dwc_ep.sent_zlp=%d, byte_count(%.8x) = (ep->dwc_ep.xfer_len(%.8x) - ep->dwc_ep.xfer_count(%.8x) - deptsiz.b.xfersize(%.8x)\n", ep->dwc_ep.total_len, ep->dwc_ep.sent_zlp, byte_count ,ep->dwc_ep.xfer_len , ep->dwc_ep.xfer_count , deptsiz.b.xfersize); ++ } else if(ep->dwc_ep.sent_zlp) { ++ /* ++ * This fragment of code should initiate 0 ++ * length trasfer in case if it is queued ++ * a trasfer with size divisible to EPs max ++ * packet size and with usb_request zero field ++ * is set, which means that after data is transfered, ++ * it is also should be transfered ++ * a 0 length packet at the end. For Slave and ++ * Buffer DMA modes in this case SW has ++ * to initiate 2 transfers one with transfer size, ++ * and the second with 0 size. For Desriptor ++ * DMA mode SW is able to initiate a transfer, ++ * which will handle all the packets including ++ * the last 0 legth. ++ */ ++ ep->dwc_ep.sent_zlp = 0; ++ dwc_otg_ep_start_zl_transfer(core_if, &ep->dwc_ep); ++ } else { ++ is_last = 1; ++ } ++ } else { ++ DWC_WARN("Incomplete transfer (%s-%s [siz=%d pkt=%d])\n", ++ ep->ep.name, (ep->dwc_ep.is_in?"IN":"OUT"), ++ deptsiz.b.xfersize, deptsiz.b.pktcnt); ++ } ++ } else { ++ ++ dma_desc = ep->dwc_ep.desc_addr; ++ byte_count = 0; ++ ep->dwc_ep.sent_zlp = 0; ++ ++ for(i = 0; i < ep->dwc_ep.desc_cnt; ++i) { ++ desc_sts.d32 = readl(dma_desc); ++ byte_count += desc_sts.b.bytes; ++ dma_desc++; ++ } ++ ++ if(byte_count == 0) { ++ ep->dwc_ep.xfer_count = ep->dwc_ep.total_len; ++ is_last = 1; ++ } else { ++ DWC_WARN("Incomplete transfer\n"); ++ } ++ } ++ } else { ++ if (deptsiz.b.xfersize == 0 && deptsiz.b.pktcnt == 0) { ++ /* Check if the whole transfer was completed, ++ * if no, setup transfer for next portion of data ++ */ ++ DWC_DEBUGPL(DBG_PCDV, "%s len=%d xfersize=%d pktcnt=%d\n", ++ ep->ep.name, ep->dwc_ep.xfer_len, ++ deptsiz.b.xfersize, deptsiz.b.pktcnt); ++ if(ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) { ++ //dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep); ++printk("Warning: transfer ended, but specified len is not accomplished!! ep->total_len=%.x,ep->dwc_ep.sent_zlp=%d, ep->dwc_ep.xfer_len(%.8x) \n", ep->dwc_ep.total_len, ep->dwc_ep.sent_zlp, ep->dwc_ep.xfer_len ); ++ } else if(ep->dwc_ep.sent_zlp) { ++ /* ++ * This fragment of code should initiate 0 ++ * length trasfer in case if it is queued ++ * a trasfer with size divisible to EPs max ++ * packet size and with usb_request zero field ++ * is set, which means that after data is transfered, ++ * it is also should be transfered ++ * a 0 length packet at the end. For Slave and ++ * Buffer DMA modes in this case SW has ++ * to initiate 2 transfers one with transfer size, ++ * and the second with 0 size. For Desriptor ++ * DMA mode SW is able to initiate a transfer, ++ * which will handle all the packets including ++ * the last 0 legth. ++ */ ++ ep->dwc_ep.sent_zlp = 0; ++ dwc_otg_ep_start_zl_transfer(core_if, &ep->dwc_ep); ++ } else { ++ is_last = 1; ++ } ++ } ++ else { ++ DWC_WARN("Incomplete transfer (%s-%s [siz=%d pkt=%d])\n", ++ ep->ep.name, (ep->dwc_ep.is_in?"IN":"OUT"), ++ deptsiz.b.xfersize, deptsiz.b.pktcnt); ++ } ++ } ++ } else { ++ dwc_otg_dev_out_ep_regs_t *out_ep_regs = ++ dev_if->out_ep_regs[ep->dwc_ep.num]; ++ desc_sts.d32 = 0; ++ if(core_if->dma_enable) { ++ //dma_unmap_single(NULL,ep->dwc_ep.dma_addr,ep->dwc_ep.xfer_count,DMA_FROM_DEVICE); ++ if(core_if->dma_desc_enable) { ++ DWC_WARN("\n\n%s: we need a cache invalidation here!!\n\n",__func__); ++ dma_desc = ep->dwc_ep.desc_addr; ++ byte_count = 0; ++ ep->dwc_ep.sent_zlp = 0; ++ for(i = 0; i < ep->dwc_ep.desc_cnt; ++i) { ++ desc_sts.d32 = readl(dma_desc); ++ byte_count += desc_sts.b.bytes; ++ dma_desc++; ++ } ++ ++ ep->dwc_ep.xfer_count = ep->dwc_ep.total_len ++ - byte_count + ((4 - (ep->dwc_ep.total_len & 0x3)) & 0x3); ++ ++ //todo: invalidate cache & aligned buf patch on completion ++ // ++ ++ is_last = 1; ++ } else { ++ deptsiz.d32 = 0; ++ deptsiz.d32 = dwc_read_reg32(&out_ep_regs->doeptsiz); ++ ++ byte_count = (ep->dwc_ep.xfer_len - ++ ep->dwc_ep.xfer_count - deptsiz.b.xfersize); ++ ++// dma_sync_single_for_device(NULL,ep->dwc_ep.dma_addr,byte_count,DMA_FROM_DEVICE); ++ ++DWC_DEBUGPL(DBG_PCDV,"ep->total_len=%.x,ep->dwc_ep.sent_zlp=%d, byte_count(%.8x) = (ep->dwc_ep.xfer_len(%.8x) - ep->dwc_ep.xfer_count(%.8x) - deptsiz.b.xfersize(%.8x)\n", ep->dwc_ep.total_len, ep->dwc_ep.sent_zlp, byte_count ,ep->dwc_ep.xfer_len , ep->dwc_ep.xfer_count , deptsiz.b.xfersize); ++ //todo: invalidate cache & aligned buf patch on completion ++ dma_sync_single_for_device(NULL,ep->dwc_ep.dma_addr,byte_count,DMA_FROM_DEVICE); ++ aligned_buf_patch_on_buf_dma_oep_completion(ep,byte_count); ++ ++ ep->dwc_ep.xfer_buff += byte_count; ++ ep->dwc_ep.dma_addr += byte_count; ++ ep->dwc_ep.xfer_count += byte_count; ++ ++ /* Check if the whole transfer was completed, ++ * if no, setup transfer for next portion of data ++ */ ++ if(ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) { ++ //dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep); ++printk("Warning: transfer ended, but specified len is not accomplished!! ep->total_len=%.x,ep->dwc_ep.sent_zlp=%d, byte_count(%.8x) = (ep->dwc_ep.xfer_len(%.8x) - ep->dwc_ep.xfer_count(%.8x) - deptsiz.b.xfersize(%.8x)\n", ep->dwc_ep.total_len, ep->dwc_ep.sent_zlp, byte_count ,ep->dwc_ep.xfer_len , ep->dwc_ep.xfer_count , deptsiz.b.xfersize); ++ } ++ else if(ep->dwc_ep.sent_zlp) { ++ /* ++ * This fragment of code should initiate 0 ++ * length trasfer in case if it is queued ++ * a trasfer with size divisible to EPs max ++ * packet size and with usb_request zero field ++ * is set, which means that after data is transfered, ++ * it is also should be transfered ++ * a 0 length packet at the end. For Slave and ++ * Buffer DMA modes in this case SW has ++ * to initiate 2 transfers one with transfer size, ++ * and the second with 0 size. For Desriptor ++ * DMA mode SW is able to initiate a transfer, ++ * which will handle all the packets including ++ * the last 0 legth. ++ */ ++ ep->dwc_ep.sent_zlp = 0; ++ dwc_otg_ep_start_zl_transfer(core_if, &ep->dwc_ep); ++ } else { ++ is_last = 1; ++ } ++ } ++ } else { ++ /* Check if the whole transfer was completed, ++ * if no, setup transfer for next portion of data ++ */ ++ if(ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) { ++ //dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep); ++printk("Warning: transfer ended, but specified len is not accomplished!! ep->total_len=%.x,ep->dwc_ep.sent_zlp=%d, ep->dwc_ep.xfer_len(%.8x) \n", ep->dwc_ep.total_len, ep->dwc_ep.sent_zlp, ep->dwc_ep.xfer_len ); ++ } ++ else if(ep->dwc_ep.sent_zlp) { ++ /* ++ * This fragment of code should initiate 0 ++ * length trasfer in case if it is queued ++ * a trasfer with size divisible to EPs max ++ * packet size and with usb_request zero field ++ * is set, which means that after data is transfered, ++ * it is also should be transfered ++ * a 0 length packet at the end. For Slave and ++ * Buffer DMA modes in this case SW has ++ * to initiate 2 transfers one with transfer size, ++ * and the second with 0 size. For Desriptor ++ * DMA mode SW is able to initiate a transfer, ++ * which will handle all the packets including ++ * the last 0 legth. ++ */ ++ ep->dwc_ep.sent_zlp = 0; ++ dwc_otg_ep_start_zl_transfer(core_if, &ep->dwc_ep); ++ } else { ++ is_last = 1; ++ } ++ } ++ ++#ifdef DEBUG ++ ++ DWC_DEBUGPL(DBG_PCDV, "addr %p, %s len=%d cnt=%d xsize=%d pktcnt=%d\n", ++ &out_ep_regs->doeptsiz, ep->ep.name, ep->dwc_ep.xfer_len, ++ ep->dwc_ep.xfer_count, ++ deptsiz.b.xfersize, ++ deptsiz.b.pktcnt); ++#endif ++ } ++ ++ /* Complete the request */ ++ if (is_last) { ++ req->req.actual = ep->dwc_ep.xfer_count; ++ ++ dwc_otg_request_done(ep, req, 0); ++ ++ ep->dwc_ep.start_xfer_buff = 0; ++ ep->dwc_ep.xfer_buff = 0; ++ ep->dwc_ep.xfer_len = 0; ++ ++ /* If there is a request in the queue start it.*/ ++ start_next_request(ep); ++ } ++} ++ ++ ++#ifdef DWC_EN_ISOC ++ ++/** ++ * This function BNA interrupt for Isochronous EPs ++ * ++ */ ++static void dwc_otg_pcd_handle_iso_bna(dwc_otg_pcd_ep_t *ep) ++{ ++ dwc_ep_t *dwc_ep = &ep->dwc_ep; ++ volatile uint32_t *addr; ++ depctl_data_t depctl = {.d32 = 0}; ++ dwc_otg_pcd_t *pcd = ep->pcd; ++ dwc_otg_dma_desc_t *dma_desc; ++ int i; ++ ++ dma_desc = dwc_ep->iso_desc_addr + dwc_ep->desc_cnt * (dwc_ep->proc_buf_num); ++ ++ if(dwc_ep->is_in) { ++ desc_sts_data_t sts = {.d32 = 0}; ++ for(i = 0;i < dwc_ep->desc_cnt; ++i, ++dma_desc) ++ { ++ sts.d32 = readl(&dma_desc->status); ++ sts.b_iso_in.bs = BS_HOST_READY; ++ writel(sts.d32,&dma_desc->status); ++ } ++ } ++ else { ++ desc_sts_data_t sts = {.d32 = 0}; ++ for(i = 0;i < dwc_ep->desc_cnt; ++i, ++dma_desc) ++ { ++ sts.d32 = readl(&dma_desc->status); ++ sts.b_iso_out.bs = BS_HOST_READY; ++ writel(sts.d32,&dma_desc->status); ++ } ++ } ++ ++ if(dwc_ep->is_in == 0){ ++ addr = &GET_CORE_IF(pcd)->dev_if->out_ep_regs[dwc_ep->num]->doepctl; ++ } ++ else{ ++ addr = &GET_CORE_IF(pcd)->dev_if->in_ep_regs[dwc_ep->num]->diepctl; ++ } ++ depctl.b.epena = 1; ++ dwc_modify_reg32(addr,depctl.d32,depctl.d32); ++} ++ ++/** ++ * This function sets latest iso packet information(non-PTI mode) ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to start the transfer on. ++ * ++ */ ++void set_current_pkt_info(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ deptsiz_data_t deptsiz = { .d32 = 0 }; ++ dma_addr_t dma_addr; ++ uint32_t offset; ++ ++ if(ep->proc_buf_num) ++ dma_addr = ep->dma_addr1; ++ else ++ dma_addr = ep->dma_addr0; ++ ++ ++ if(ep->is_in) { ++ deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dieptsiz); ++ offset = ep->data_per_frame; ++ } else { ++ deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doeptsiz); ++ offset = ep->data_per_frame + (0x4 & (0x4 - (ep->data_per_frame & 0x3))); ++ } ++ ++ if(!deptsiz.b.xfersize) { ++ ep->pkt_info[ep->cur_pkt].length = ep->data_per_frame; ++ ep->pkt_info[ep->cur_pkt].offset = ep->cur_pkt_dma_addr - dma_addr; ++ ep->pkt_info[ep->cur_pkt].status = 0; ++ } else { ++ ep->pkt_info[ep->cur_pkt].length = ep->data_per_frame; ++ ep->pkt_info[ep->cur_pkt].offset = ep->cur_pkt_dma_addr - dma_addr; ++ ep->pkt_info[ep->cur_pkt].status = -ENODATA; ++ } ++ ep->cur_pkt_addr += offset; ++ ep->cur_pkt_dma_addr += offset; ++ ep->cur_pkt++; ++} ++ ++/** ++ * This function sets latest iso packet information(DDMA mode) ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param dwc_ep The EP to start the transfer on. ++ * ++ */ ++static void set_ddma_iso_pkts_info(dwc_otg_core_if_t *core_if, dwc_ep_t *dwc_ep) ++{ ++ dwc_otg_dma_desc_t* dma_desc; ++ desc_sts_data_t sts = {.d32 = 0}; ++ iso_pkt_info_t *iso_packet; ++ uint32_t data_per_desc; ++ uint32_t offset; ++ int i, j; ++ ++ iso_packet = dwc_ep->pkt_info; ++ ++ /** Reinit closed DMA Descriptors*/ ++ /** ISO OUT EP */ ++ if(dwc_ep->is_in == 0) { ++ dma_desc = dwc_ep->iso_desc_addr + dwc_ep->desc_cnt * dwc_ep->proc_buf_num; ++ offset = 0; ++ ++ for(i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm; i+= dwc_ep->pkt_per_frm) ++ { ++ for(j = 0; j < dwc_ep->pkt_per_frm; ++j) ++ { ++ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? ++ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; ++ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; ++ ++ sts.d32 = readl(&dma_desc->status); ++ ++ /* Write status in iso_packet_decsriptor */ ++ iso_packet->status = sts.b_iso_out.rxsts + (sts.b_iso_out.bs^BS_DMA_DONE); ++ if(iso_packet->status) { ++ iso_packet->status = -ENODATA; ++ } ++ ++ /* Received data length */ ++ if(!sts.b_iso_out.rxbytes){ ++ iso_packet->length = data_per_desc - sts.b_iso_out.rxbytes; ++ } else { ++ iso_packet->length = data_per_desc - sts.b_iso_out.rxbytes + ++ (4 - dwc_ep->data_per_frame % 4); ++ } ++ ++ iso_packet->offset = offset; ++ ++ offset += data_per_desc; ++ dma_desc ++; ++ iso_packet ++; ++ } ++ } ++ ++ for(j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) ++ { ++ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? ++ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; ++ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; ++ ++ sts.d32 = readl(&dma_desc->status); ++ ++ /* Write status in iso_packet_decsriptor */ ++ iso_packet->status = sts.b_iso_out.rxsts + (sts.b_iso_out.bs^BS_DMA_DONE); ++ if(iso_packet->status) { ++ iso_packet->status = -ENODATA; ++ } ++ ++ /* Received data length */ ++ iso_packet->length = dwc_ep->data_per_frame - sts.b_iso_out.rxbytes; ++ ++ iso_packet->offset = offset; ++ ++ offset += data_per_desc; ++ iso_packet++; ++ dma_desc++; ++ } ++ ++ sts.d32 = readl(&dma_desc->status); ++ ++ /* Write status in iso_packet_decsriptor */ ++ iso_packet->status = sts.b_iso_out.rxsts + (sts.b_iso_out.bs^BS_DMA_DONE); ++ if(iso_packet->status) { ++ iso_packet->status = -ENODATA; ++ } ++ /* Received data length */ ++ if(!sts.b_iso_out.rxbytes){ ++ iso_packet->length = dwc_ep->data_per_frame - sts.b_iso_out.rxbytes; ++ } else { ++ iso_packet->length = dwc_ep->data_per_frame - sts.b_iso_out.rxbytes + ++ (4 - dwc_ep->data_per_frame % 4); ++ } ++ ++ iso_packet->offset = offset; ++ } ++ else /** ISO IN EP */ ++ { ++ dma_desc = dwc_ep->iso_desc_addr + dwc_ep->desc_cnt * dwc_ep->proc_buf_num; ++ ++ for(i = 0; i < dwc_ep->desc_cnt - 1; i++) ++ { ++ sts.d32 = readl(&dma_desc->status); ++ ++ /* Write status in iso packet descriptor */ ++ iso_packet->status = sts.b_iso_in.txsts + (sts.b_iso_in.bs^BS_DMA_DONE); ++ if(iso_packet->status != 0) { ++ iso_packet->status = -ENODATA; ++ ++ } ++ /* Bytes has been transfered */ ++ iso_packet->length = dwc_ep->data_per_frame - sts.b_iso_in.txbytes; ++ ++ dma_desc ++; ++ iso_packet++; ++ } ++ ++ sts.d32 = readl(&dma_desc->status); ++ while(sts.b_iso_in.bs == BS_DMA_BUSY) { ++ sts.d32 = readl(&dma_desc->status); ++ } ++ ++ /* Write status in iso packet descriptor ??? do be done with ERROR codes*/ ++ iso_packet->status = sts.b_iso_in.txsts + (sts.b_iso_in.bs^BS_DMA_DONE); ++ if(iso_packet->status != 0) { ++ iso_packet->status = -ENODATA; ++ } ++ ++ /* Bytes has been transfered */ ++ iso_packet->length = dwc_ep->data_per_frame - sts.b_iso_in.txbytes; ++ } ++} ++ ++/** ++ * This function reinitialize DMA Descriptors for Isochronous transfer ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param dwc_ep The EP to start the transfer on. ++ * ++ */ ++static void reinit_ddma_iso_xfer(dwc_otg_core_if_t *core_if, dwc_ep_t *dwc_ep) ++{ ++ int i, j; ++ dwc_otg_dma_desc_t* dma_desc; ++ dma_addr_t dma_ad; ++ volatile uint32_t *addr; ++ desc_sts_data_t sts = { .d32 =0 }; ++ uint32_t data_per_desc; ++ ++ if(dwc_ep->is_in == 0) { ++ addr = &core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl; ++ } ++ else { ++ addr = &core_if->dev_if->in_ep_regs[dwc_ep->num]->diepctl; ++ } ++ ++ ++ if(dwc_ep->proc_buf_num == 0) { ++ /** Buffer 0 descriptors setup */ ++ dma_ad = dwc_ep->dma_addr0; ++ } ++ else { ++ /** Buffer 1 descriptors setup */ ++ dma_ad = dwc_ep->dma_addr1; ++ } ++ ++ ++ /** Reinit closed DMA Descriptors*/ ++ /** ISO OUT EP */ ++ if(dwc_ep->is_in == 0) { ++ dma_desc = dwc_ep->iso_desc_addr + dwc_ep->desc_cnt * dwc_ep->proc_buf_num; ++ ++ sts.b_iso_out.bs = BS_HOST_READY; ++ sts.b_iso_out.rxsts = 0; ++ sts.b_iso_out.l = 0; ++ sts.b_iso_out.sp = 0; ++ sts.b_iso_out.ioc = 0; ++ sts.b_iso_out.pid = 0; ++ sts.b_iso_out.framenum = 0; ++ ++ for(i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm; i+= dwc_ep->pkt_per_frm) ++ { ++ for(j = 0; j < dwc_ep->pkt_per_frm; ++j) ++ { ++ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? ++ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; ++ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; ++ sts.b_iso_out.rxbytes = data_per_desc; ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ ++ //(uint32_t)dma_ad += data_per_desc; ++ dma_ad = (uint32_t)dma_ad + data_per_desc; ++ dma_desc ++; ++ } ++ } ++ ++ for(j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) ++ { ++ ++ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? ++ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; ++ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; ++ sts.b_iso_out.rxbytes = data_per_desc; ++ ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ ++ dma_desc++; ++ //(uint32_t)dma_ad += data_per_desc; ++ dma_ad = (uint32_t)dma_ad + data_per_desc; ++ } ++ ++ sts.b_iso_out.ioc = 1; ++ sts.b_iso_out.l = dwc_ep->proc_buf_num; ++ ++ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? ++ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; ++ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; ++ sts.b_iso_out.rxbytes = data_per_desc; ++ ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ } ++ else /** ISO IN EP */ ++ { ++ dma_desc = dwc_ep->iso_desc_addr + dwc_ep->desc_cnt * dwc_ep->proc_buf_num; ++ ++ sts.b_iso_in.bs = BS_HOST_READY; ++ sts.b_iso_in.txsts = 0; ++ sts.b_iso_in.sp = 0; ++ sts.b_iso_in.ioc = 0; ++ sts.b_iso_in.pid = dwc_ep->pkt_per_frm; ++ sts.b_iso_in.framenum = dwc_ep->next_frame; ++ sts.b_iso_in.txbytes = dwc_ep->data_per_frame; ++ sts.b_iso_in.l = 0; ++ ++ for(i = 0; i < dwc_ep->desc_cnt - 1; i++) ++ { ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ ++ sts.b_iso_in.framenum += dwc_ep->bInterval; ++ //(uint32_t)dma_ad += dwc_ep->data_per_frame; ++ dma_ad = (uint32_t)dma_ad + dwc_ep->data_per_frame; ++ dma_desc ++; ++ } ++ ++ sts.b_iso_in.ioc = 1; ++ sts.b_iso_in.l = dwc_ep->proc_buf_num; ++ ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ ++ dwc_ep->next_frame = sts.b_iso_in.framenum + dwc_ep->bInterval * 1; ++ } ++ dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1; ++} ++ ++ ++/** ++ * This function is to handle Iso EP transfer complete interrupt ++ * in case Iso out packet was dropped ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param dwc_ep The EP for wihich transfer complete was asserted ++ * ++ */ ++static uint32_t handle_iso_out_pkt_dropped(dwc_otg_core_if_t *core_if, dwc_ep_t *dwc_ep) ++{ ++ uint32_t dma_addr; ++ uint32_t drp_pkt; ++ uint32_t drp_pkt_cnt; ++ deptsiz_data_t deptsiz = { .d32 = 0 }; ++ depctl_data_t depctl = { .d32 = 0 }; ++ int i; ++ ++ deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->doeptsiz); ++ ++ drp_pkt = dwc_ep->pkt_cnt - deptsiz.b.pktcnt; ++ drp_pkt_cnt = dwc_ep->pkt_per_frm - (drp_pkt % dwc_ep->pkt_per_frm); ++ ++ /* Setting dropped packets status */ ++ for(i = 0; i < drp_pkt_cnt; ++i) { ++ dwc_ep->pkt_info[drp_pkt].status = -ENODATA; ++ drp_pkt ++; ++ deptsiz.b.pktcnt--; ++ } ++ ++ ++ if(deptsiz.b.pktcnt > 0) { ++ deptsiz.b.xfersize = dwc_ep->xfer_len - (dwc_ep->pkt_cnt - deptsiz.b.pktcnt) * dwc_ep->maxpacket; ++ } else { ++ deptsiz.b.xfersize = 0; ++ deptsiz.b.pktcnt = 0; ++ } ++ ++ dwc_write_reg32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->doeptsiz, deptsiz.d32); ++ ++ if(deptsiz.b.pktcnt > 0) { ++ if(dwc_ep->proc_buf_num) { ++ dma_addr = dwc_ep->dma_addr1 + dwc_ep->xfer_len - deptsiz.b.xfersize; ++ } else { ++ dma_addr = dwc_ep->dma_addr0 + dwc_ep->xfer_len - deptsiz.b.xfersize;; ++ } ++ ++ VERIFY_PCD_DMA_ADDR(dma_addr); ++ dwc_write_reg32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->doepdma, dma_addr); ++ ++ /** Re-enable endpoint, clear nak */ ++ depctl.d32 = 0; ++ depctl.b.epena = 1; ++ depctl.b.cnak = 1; ++ ++ dwc_modify_reg32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl, ++ depctl.d32,depctl.d32); ++ return 0; ++ } else { ++ return 1; ++ } ++} ++ ++/** ++ * This function sets iso packets information(PTI mode) ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to start the transfer on. ++ * ++ */ ++static uint32_t set_iso_pkts_info(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ int i, j; ++ dma_addr_t dma_ad; ++ iso_pkt_info_t *packet_info = ep->pkt_info; ++ uint32_t offset; ++ uint32_t frame_data; ++ deptsiz_data_t deptsiz; ++ ++ if(ep->proc_buf_num == 0) { ++ /** Buffer 0 descriptors setup */ ++ dma_ad = ep->dma_addr0; ++ } ++ else { ++ /** Buffer 1 descriptors setup */ ++ dma_ad = ep->dma_addr1; ++ } ++ ++ ++ if(ep->is_in) { ++ deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dieptsiz); ++ } else { ++ deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doeptsiz); ++ } ++ ++ if(!deptsiz.b.xfersize) { ++ offset = 0; ++ for(i = 0; i < ep->pkt_cnt; i += ep->pkt_per_frm) ++ { ++ frame_data = ep->data_per_frame; ++ for(j = 0; j < ep->pkt_per_frm; ++j) { ++ ++ /* Packet status - is not set as initially ++ * it is set to 0 and if packet was sent ++ successfully, status field will remain 0*/ ++ ++ ++ /* Bytes has been transfered */ ++ packet_info->length = (ep->maxpacket < frame_data) ? ++ ep->maxpacket : frame_data; ++ ++ /* Received packet offset */ ++ packet_info->offset = offset; ++ offset += packet_info->length; ++ frame_data -= packet_info->length; ++ ++ packet_info ++; ++ } ++ } ++ return 1; ++ } else { ++ /* This is a workaround for in case of Transfer Complete with ++ * PktDrpSts interrupts merging - in this case Transfer complete ++ * interrupt for Isoc Out Endpoint is asserted without PktDrpSts ++ * set and with DOEPTSIZ register non zero. Investigations showed, ++ * that this happens when Out packet is dropped, but because of ++ * interrupts merging during first interrupt handling PktDrpSts ++ * bit is cleared and for next merged interrupts it is not reset. ++ * In this case SW hadles the interrupt as if PktDrpSts bit is set. ++ */ ++ if(ep->is_in) { ++ return 1; ++ } else { ++ return handle_iso_out_pkt_dropped(core_if, ep); ++ } ++ } ++} ++ ++/** ++ * This function is to handle Iso EP transfer complete interrupt ++ * ++ * @param ep The EP for which transfer complete was asserted ++ * ++ */ ++static void complete_iso_ep(dwc_otg_pcd_ep_t *ep) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd); ++ dwc_ep_t *dwc_ep = &ep->dwc_ep; ++ uint8_t is_last = 0; ++ ++ if(core_if->dma_enable) { ++ if(core_if->dma_desc_enable) { ++ set_ddma_iso_pkts_info(core_if, dwc_ep); ++ reinit_ddma_iso_xfer(core_if, dwc_ep); ++ is_last = 1; ++ } else { ++ if(core_if->pti_enh_enable) { ++ if(set_iso_pkts_info(core_if, dwc_ep)) { ++ dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1; ++ dwc_otg_iso_ep_start_buf_transfer(core_if, dwc_ep); ++ is_last = 1; ++ } ++ } else { ++ set_current_pkt_info(core_if, dwc_ep); ++ if(dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) { ++ is_last = 1; ++ dwc_ep->cur_pkt = 0; ++ dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1; ++ if(dwc_ep->proc_buf_num) { ++ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff1; ++ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr1; ++ } else { ++ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff0; ++ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr0; ++ } ++ ++ } ++ dwc_otg_iso_ep_start_frm_transfer(core_if, dwc_ep); ++ } ++ } ++ } else { ++ set_current_pkt_info(core_if, dwc_ep); ++ if(dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) { ++ is_last = 1; ++ dwc_ep->cur_pkt = 0; ++ dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1; ++ if(dwc_ep->proc_buf_num) { ++ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff1; ++ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr1; ++ } else { ++ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff0; ++ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr0; ++ } ++ ++ } ++ dwc_otg_iso_ep_start_frm_transfer(core_if, dwc_ep); ++ } ++ if(is_last) ++ dwc_otg_iso_buffer_done(ep, ep->iso_req); ++} ++ ++#endif //DWC_EN_ISOC ++ ++ ++/** ++ * This function handles EP0 Control transfers. ++ * ++ * The state of the control tranfers are tracked in ++ * ep0state. ++ */ ++static void handle_ep0(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ dwc_otg_pcd_ep_t *ep0 = &pcd->ep0; ++ desc_sts_data_t desc_sts; ++ deptsiz0_data_t deptsiz; ++ uint32_t byte_count; ++ ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCDV, "%s()\n", __func__); ++ print_ep0_state(pcd); ++#endif ++ ++ switch (pcd->ep0state) { ++ case EP0_DISCONNECT: ++ break; ++ ++ case EP0_IDLE: ++ pcd->request_config = 0; ++ ++ pcd_setup(pcd); ++ break; ++ ++ case EP0_IN_DATA_PHASE: ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCD, "DATA_IN EP%d-%s: type=%d, mps=%d\n", ++ ep0->dwc_ep.num, (ep0->dwc_ep.is_in ?"IN":"OUT"), ++ ep0->dwc_ep.type, ep0->dwc_ep.maxpacket); ++#endif ++ ++ if (core_if->dma_enable != 0) { ++ /* ++ * For EP0 we can only program 1 packet at a time so we ++ * need to do the make calculations after each complete. ++ * Call write_packet to make the calculations, as in ++ * slave mode, and use those values to determine if we ++ * can complete. ++ */ ++ if(core_if->dma_desc_enable == 0) { ++ deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[0]->dieptsiz); ++ byte_count = ep0->dwc_ep.xfer_len - deptsiz.b.xfersize; ++ } ++ else { ++ desc_sts.d32 = readl(core_if->dev_if->in_desc_addr); ++ byte_count = ep0->dwc_ep.xfer_len - desc_sts.b.bytes; ++ } ++ ++ ep0->dwc_ep.xfer_count += byte_count; ++ ep0->dwc_ep.xfer_buff += byte_count; ++ ep0->dwc_ep.dma_addr += byte_count; ++ } ++ if (ep0->dwc_ep.xfer_count < ep0->dwc_ep.total_len) { ++ dwc_otg_ep0_continue_transfer (GET_CORE_IF(pcd), &ep0->dwc_ep); ++ DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n"); ++ } ++ else if(ep0->dwc_ep.sent_zlp) { ++ dwc_otg_ep0_continue_transfer (GET_CORE_IF(pcd), &ep0->dwc_ep); ++ ep0->dwc_ep.sent_zlp = 0; ++ DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n"); ++ } ++ else { ++ ep0_complete_request(ep0); ++ DWC_DEBUGPL(DBG_PCD, "COMPLETE TRANSFER\n"); ++ } ++ break; ++ case EP0_OUT_DATA_PHASE: ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCD, "DATA_OUT EP%d-%s: type=%d, mps=%d\n", ++ ep0->dwc_ep.num, (ep0->dwc_ep.is_in ?"IN":"OUT"), ++ ep0->dwc_ep.type, ep0->dwc_ep.maxpacket); ++#endif ++ if (core_if->dma_enable != 0) { ++ if(core_if->dma_desc_enable == 0) { ++ deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->out_ep_regs[0]->doeptsiz); ++ byte_count = ep0->dwc_ep.maxpacket - deptsiz.b.xfersize; ++ ++ //todo: invalidate cache & aligned buf patch on completion ++ dma_sync_single_for_device(NULL,ep0->dwc_ep.dma_addr,byte_count,DMA_FROM_DEVICE); ++ aligned_buf_patch_on_buf_dma_oep_completion(ep0,byte_count); ++ } ++ else { ++ desc_sts.d32 = readl(core_if->dev_if->out_desc_addr); ++ byte_count = ep0->dwc_ep.maxpacket - desc_sts.b.bytes; ++ ++ //todo: invalidate cache & aligned buf patch on completion ++ // ++ ++ } ++ ep0->dwc_ep.xfer_count += byte_count; ++ ep0->dwc_ep.xfer_buff += byte_count; ++ ep0->dwc_ep.dma_addr += byte_count; ++ } ++ if (ep0->dwc_ep.xfer_count < ep0->dwc_ep.total_len) { ++ dwc_otg_ep0_continue_transfer (GET_CORE_IF(pcd), &ep0->dwc_ep); ++ DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n"); ++ } ++ else if(ep0->dwc_ep.sent_zlp) { ++ dwc_otg_ep0_continue_transfer (GET_CORE_IF(pcd), &ep0->dwc_ep); ++ ep0->dwc_ep.sent_zlp = 0; ++ DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n"); ++ } ++ else { ++ ep0_complete_request(ep0); ++ DWC_DEBUGPL(DBG_PCD, "COMPLETE TRANSFER\n"); ++ } ++ break; ++ ++ ++ case EP0_IN_STATUS_PHASE: ++ case EP0_OUT_STATUS_PHASE: ++ DWC_DEBUGPL(DBG_PCD, "CASE: EP0_STATUS\n"); ++ ep0_complete_request(ep0); ++ pcd->ep0state = EP0_IDLE; ++ ep0->stopped = 1; ++ ep0->dwc_ep.is_in = 0; /* OUT for next SETUP */ ++ ++ /* Prepare for more SETUP Packets */ ++ if(core_if->dma_enable) { ++ ep0_out_start(core_if, pcd); ++ } ++ break; ++ ++ case EP0_STALL: ++ DWC_ERROR("EP0 STALLed, should not get here pcd_setup()\n"); ++ break; ++ } ++#ifdef DEBUG_EP0 ++ print_ep0_state(pcd); ++#endif ++} ++ ++ ++/** ++ * Restart transfer ++ */ ++static void restart_transfer(dwc_otg_pcd_t *pcd, const uint32_t epnum) ++{ ++ dwc_otg_core_if_t *core_if; ++ dwc_otg_dev_if_t *dev_if; ++ deptsiz_data_t dieptsiz = {.d32=0}; ++ dwc_otg_pcd_ep_t *ep; ++ ++ ep = get_in_ep(pcd, epnum); ++ ++#ifdef DWC_EN_ISOC ++ if(ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) { ++ return; ++ } ++#endif /* DWC_EN_ISOC */ ++ ++ core_if = GET_CORE_IF(pcd); ++ dev_if = core_if->dev_if; ++ ++ dieptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dieptsiz); ++ ++ DWC_DEBUGPL(DBG_PCD,"xfer_buff=%p xfer_count=%0x xfer_len=%0x" ++ " stopped=%d\n", ep->dwc_ep.xfer_buff, ++ ep->dwc_ep.xfer_count, ep->dwc_ep.xfer_len , ++ ep->stopped); ++ /* ++ * If xfersize is 0 and pktcnt in not 0, resend the last packet. ++ */ ++ if (dieptsiz.b.pktcnt && dieptsiz.b.xfersize == 0 && ++ ep->dwc_ep.start_xfer_buff != 0) { ++ if (ep->dwc_ep.total_len <= ep->dwc_ep.maxpacket) { ++ ep->dwc_ep.xfer_count = 0; ++ ep->dwc_ep.xfer_buff = ep->dwc_ep.start_xfer_buff; ++ ep->dwc_ep.xfer_len = ep->dwc_ep.xfer_count; ++ } ++ else { ++ ep->dwc_ep.xfer_count -= ep->dwc_ep.maxpacket; ++ /* convert packet size to dwords. */ ++ ep->dwc_ep.xfer_buff -= ep->dwc_ep.maxpacket; ++ ep->dwc_ep.xfer_len = ep->dwc_ep.xfer_count; ++ } ++ ep->stopped = 0; ++ DWC_DEBUGPL(DBG_PCD,"xfer_buff=%p xfer_count=%0x " ++ "xfer_len=%0x stopped=%d\n", ++ ep->dwc_ep.xfer_buff, ++ ep->dwc_ep.xfer_count, ep->dwc_ep.xfer_len , ++ ep->stopped ++ ); ++ if (epnum == 0) { ++ dwc_otg_ep0_start_transfer(core_if, &ep->dwc_ep); ++ } ++ else { ++ dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep); ++ } ++ } ++} ++ ++ ++/** ++ * handle the IN EP disable interrupt. ++ */ ++static inline void handle_in_ep_disable_intr(dwc_otg_pcd_t *pcd, ++ const uint32_t epnum) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ deptsiz_data_t dieptsiz = {.d32=0}; ++ dctl_data_t dctl = {.d32=0}; ++ dwc_otg_pcd_ep_t *ep; ++ dwc_ep_t *dwc_ep; ++ ++ ep = get_in_ep(pcd, epnum); ++ dwc_ep = &ep->dwc_ep; ++ ++ if(dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) { ++ dwc_otg_flush_tx_fifo(core_if, dwc_ep->tx_fifo_num); ++ return; ++ } ++ ++ DWC_DEBUGPL(DBG_PCD,"diepctl%d=%0x\n", epnum, ++ dwc_read_reg32(&dev_if->in_ep_regs[epnum]->diepctl)); ++ dieptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dieptsiz); ++ ++ DWC_DEBUGPL(DBG_ANY, "pktcnt=%d size=%d\n", ++ dieptsiz.b.pktcnt, ++ dieptsiz.b.xfersize); ++ ++ if (ep->stopped) { ++ /* Flush the Tx FIFO */ ++ dwc_otg_flush_tx_fifo(core_if, dwc_ep->tx_fifo_num); ++ /* Clear the Global IN NP NAK */ ++ dctl.d32 = 0; ++ dctl.b.cgnpinnak = 1; ++ dwc_modify_reg32(&dev_if->dev_global_regs->dctl, ++ dctl.d32, 0); ++ /* Restart the transaction */ ++ if (dieptsiz.b.pktcnt != 0 || ++ dieptsiz.b.xfersize != 0) { ++ restart_transfer(pcd, epnum); ++ } ++ } ++ else { ++ /* Restart the transaction */ ++ if (dieptsiz.b.pktcnt != 0 || ++ dieptsiz.b.xfersize != 0) { ++ restart_transfer(pcd, epnum); ++ } ++ DWC_DEBUGPL(DBG_ANY, "STOPPED!!!\n"); ++ } ++} ++ ++/** ++ * Handler for the IN EP timeout handshake interrupt. ++ */ ++static inline void handle_in_ep_timeout_intr(dwc_otg_pcd_t *pcd, ++ const uint32_t epnum) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ ++#ifdef DEBUG ++ deptsiz_data_t dieptsiz = {.d32=0}; ++ uint32_t num = 0; ++#endif ++ dctl_data_t dctl = {.d32=0}; ++ dwc_otg_pcd_ep_t *ep; ++ ++ gintmsk_data_t intr_mask = {.d32 = 0}; ++ ++ ep = get_in_ep(pcd, epnum); ++ ++ /* Disable the NP Tx Fifo Empty Interrrupt */ ++ if (!core_if->dma_enable) { ++ intr_mask.b.nptxfempty = 1; ++ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, intr_mask.d32, 0); ++ } ++ /** @todo NGS Check EP type. ++ * Implement for Periodic EPs */ ++ /* ++ * Non-periodic EP ++ */ ++ /* Enable the Global IN NAK Effective Interrupt */ ++ intr_mask.b.ginnakeff = 1; ++ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, ++ 0, intr_mask.d32); ++ ++ /* Set Global IN NAK */ ++ dctl.b.sgnpinnak = 1; ++ dwc_modify_reg32(&dev_if->dev_global_regs->dctl, ++ dctl.d32, dctl.d32); ++ ++ ep->stopped = 1; ++ ++#ifdef DEBUG ++ dieptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[num]->dieptsiz); ++ DWC_DEBUGPL(DBG_ANY, "pktcnt=%d size=%d\n", ++ dieptsiz.b.pktcnt, ++ dieptsiz.b.xfersize); ++#endif ++ ++#ifdef DISABLE_PERIODIC_EP ++ /* ++ * Set the NAK bit for this EP to ++ * start the disable process. ++ */ ++ diepctl.d32 = 0; ++ diepctl.b.snak = 1; ++ dwc_modify_reg32(&dev_if->in_ep_regs[num]->diepctl, diepctl.d32, diepctl.d32); ++ ep->disabling = 1; ++ ep->stopped = 1; ++#endif ++} ++ ++/** ++ * Handler for the IN EP NAK interrupt. ++ */ ++static inline int32_t handle_in_ep_nak_intr(dwc_otg_pcd_t *pcd, ++ const uint32_t epnum) ++{ ++ /** @todo implement ISR */ ++ dwc_otg_core_if_t* core_if; ++ diepmsk_data_t intr_mask = { .d32 = 0}; ++ ++ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "IN EP NAK"); ++ core_if = GET_CORE_IF(pcd); ++ intr_mask.b.nak = 1; ++ ++ if(core_if->multiproc_int_enable) { ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->diepeachintmsk[epnum], ++ intr_mask.d32, 0); ++ } else { ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->diepmsk, ++ intr_mask.d32, 0); ++ } ++ ++ return 1; ++} ++ ++/** ++ * Handler for the OUT EP Babble interrupt. ++ */ ++static inline int32_t handle_out_ep_babble_intr(dwc_otg_pcd_t *pcd, ++ const uint32_t epnum) ++{ ++ /** @todo implement ISR */ ++ dwc_otg_core_if_t* core_if; ++ doepmsk_data_t intr_mask = { .d32 = 0}; ++ ++ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "OUT EP Babble"); ++ core_if = GET_CORE_IF(pcd); ++ intr_mask.b.babble = 1; ++ ++ if(core_if->multiproc_int_enable) { ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepeachintmsk[epnum], ++ intr_mask.d32, 0); ++ } else { ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepmsk, ++ intr_mask.d32, 0); ++ } ++ ++ return 1; ++} ++ ++/** ++ * Handler for the OUT EP NAK interrupt. ++ */ ++static inline int32_t handle_out_ep_nak_intr(dwc_otg_pcd_t *pcd, ++ const uint32_t epnum) ++{ ++ /** @todo implement ISR */ ++ dwc_otg_core_if_t* core_if; ++ doepmsk_data_t intr_mask = { .d32 = 0}; ++ ++ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "OUT EP NAK"); ++ core_if = GET_CORE_IF(pcd); ++ intr_mask.b.nak = 1; ++ ++ if(core_if->multiproc_int_enable) { ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepeachintmsk[epnum], ++ intr_mask.d32, 0); ++ } else { ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepmsk, ++ intr_mask.d32, 0); ++ } ++ ++ return 1; ++} ++ ++/** ++ * Handler for the OUT EP NYET interrupt. ++ */ ++static inline int32_t handle_out_ep_nyet_intr(dwc_otg_pcd_t *pcd, ++ const uint32_t epnum) ++{ ++ /** @todo implement ISR */ ++ dwc_otg_core_if_t* core_if; ++ doepmsk_data_t intr_mask = { .d32 = 0}; ++ ++ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "OUT EP NYET"); ++ core_if = GET_CORE_IF(pcd); ++ intr_mask.b.nyet = 1; ++ ++ if(core_if->multiproc_int_enable) { ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepeachintmsk[epnum], ++ intr_mask.d32, 0); ++ } else { ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepmsk, ++ intr_mask.d32, 0); ++ } ++ ++ return 1; ++} ++ ++/** ++ * This interrupt indicates that an IN EP has a pending Interrupt. ++ * The sequence for handling the IN EP interrupt is shown below: ++ * -# Read the Device All Endpoint Interrupt register ++ * -# Repeat the following for each IN EP interrupt bit set (from ++ * LSB to MSB). ++ * -# Read the Device Endpoint Interrupt (DIEPINTn) register ++ * -# If "Transfer Complete" call the request complete function ++ * -# If "Endpoint Disabled" complete the EP disable procedure. ++ * -# If "AHB Error Interrupt" log error ++ * -# If "Time-out Handshake" log error ++ * -# If "IN Token Received when TxFIFO Empty" write packet to Tx ++ * FIFO. ++ * -# If "IN Token EP Mismatch" (disable, this is handled by EP ++ * Mismatch Interrupt) ++ */ ++static int32_t dwc_otg_pcd_handle_in_ep_intr(dwc_otg_pcd_t *pcd) ++{ ++#define CLEAR_IN_EP_INTR(__core_if,__epnum,__intr) \ ++do { \ ++ diepint_data_t diepint = {.d32=0}; \ ++ diepint.b.__intr = 1; \ ++ dwc_write_reg32(&__core_if->dev_if->in_ep_regs[__epnum]->diepint, \ ++ diepint.d32); \ ++} while (0) ++ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ diepint_data_t diepint = {.d32=0}; ++ dctl_data_t dctl = {.d32=0}; ++ depctl_data_t depctl = {.d32=0}; ++ uint32_t ep_intr; ++ uint32_t epnum = 0; ++ dwc_otg_pcd_ep_t *ep; ++ dwc_ep_t *dwc_ep; ++ gintmsk_data_t intr_mask = {.d32 = 0}; ++ ++ ++ ++ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, pcd); ++ ++ /* Read in the device interrupt bits */ ++ ep_intr = dwc_otg_read_dev_all_in_ep_intr(core_if); ++ ++ /* Service the Device IN interrupts for each endpoint */ ++ while(ep_intr) { ++ if (ep_intr&0x1) { ++ uint32_t empty_msk; ++ /* Get EP pointer */ ++ ep = get_in_ep(pcd, epnum); ++ dwc_ep = &ep->dwc_ep; ++ ++ depctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->diepctl); ++ empty_msk = dwc_read_reg32(&dev_if->dev_global_regs->dtknqr4_fifoemptymsk); ++ ++ DWC_DEBUGPL(DBG_PCDV, ++ "IN EP INTERRUPT - %d\nepmty_msk - %8x diepctl - %8x\n", ++ epnum, ++ empty_msk, ++ depctl.d32); ++ ++ DWC_DEBUGPL(DBG_PCD, ++ "EP%d-%s: type=%d, mps=%d\n", ++ dwc_ep->num, (dwc_ep->is_in ?"IN":"OUT"), ++ dwc_ep->type, dwc_ep->maxpacket); ++ ++ diepint.d32 = dwc_otg_read_dev_in_ep_intr(core_if, dwc_ep); ++ ++ DWC_DEBUGPL(DBG_PCDV, "EP %d Interrupt Register - 0x%x\n", epnum, diepint.d32); ++ /* Transfer complete */ ++ if (diepint.b.xfercompl) { ++ /* Disable the NP Tx FIFO Empty ++ * Interrrupt */ ++ if(core_if->en_multiple_tx_fifo == 0) { ++ intr_mask.b.nptxfempty = 1; ++ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, intr_mask.d32, 0); ++ } ++ else { ++ /* Disable the Tx FIFO Empty Interrupt for this EP */ ++ uint32_t fifoemptymsk = 0x1 << dwc_ep->num; ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk, ++ fifoemptymsk, 0); ++ } ++ /* Clear the bit in DIEPINTn for this interrupt */ ++ CLEAR_IN_EP_INTR(core_if,epnum,xfercompl); ++ ++ /* Complete the transfer */ ++ if (epnum == 0) { ++ handle_ep0(pcd); ++ } ++#ifdef DWC_EN_ISOC ++ else if(dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) { ++ if(!ep->stopped) ++ complete_iso_ep(ep); ++ } ++#endif //DWC_EN_ISOC ++ else { ++ ++ complete_ep(ep); ++ } ++ } ++ /* Endpoint disable */ ++ if (diepint.b.epdisabled) { ++ DWC_DEBUGPL(DBG_ANY,"EP%d IN disabled\n", epnum); ++ handle_in_ep_disable_intr(pcd, epnum); ++ ++ /* Clear the bit in DIEPINTn for this interrupt */ ++ CLEAR_IN_EP_INTR(core_if,epnum,epdisabled); ++ } ++ /* AHB Error */ ++ if (diepint.b.ahberr) { ++ DWC_DEBUGPL(DBG_ANY,"EP%d IN AHB Error\n", epnum); ++ /* Clear the bit in DIEPINTn for this interrupt */ ++ CLEAR_IN_EP_INTR(core_if,epnum,ahberr); ++ } ++ /* TimeOUT Handshake (non-ISOC IN EPs) */ ++ if (diepint.b.timeout) { ++ DWC_DEBUGPL(DBG_ANY,"EP%d IN Time-out\n", epnum); ++ handle_in_ep_timeout_intr(pcd, epnum); ++ ++ CLEAR_IN_EP_INTR(core_if,epnum,timeout); ++ } ++ /** IN Token received with TxF Empty */ ++ if (diepint.b.intktxfemp) { ++ DWC_DEBUGPL(DBG_ANY,"EP%d IN TKN TxFifo Empty\n", ++ epnum); ++ if (!ep->stopped && epnum != 0) { ++ ++ diepmsk_data_t diepmsk = { .d32 = 0}; ++ diepmsk.b.intktxfemp = 1; ++ ++ if(core_if->multiproc_int_enable) { ++ dwc_modify_reg32(&dev_if->dev_global_regs->diepeachintmsk[epnum], ++ diepmsk.d32, 0); ++ } else { ++ dwc_modify_reg32(&dev_if->dev_global_regs->diepmsk, diepmsk.d32, 0); ++ } ++ start_next_request(ep); ++ } ++ else if(core_if->dma_desc_enable && epnum == 0 && ++ pcd->ep0state == EP0_OUT_STATUS_PHASE) { ++ // EP0 IN set STALL ++ depctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->diepctl); ++ ++ /* set the disable and stall bits */ ++ if (depctl.b.epena) { ++ depctl.b.epdis = 1; ++ } ++ depctl.b.stall = 1; ++ dwc_write_reg32(&dev_if->in_ep_regs[epnum]->diepctl, depctl.d32); ++ } ++ CLEAR_IN_EP_INTR(core_if,epnum,intktxfemp); ++ } ++ /** IN Token Received with EP mismatch */ ++ if (diepint.b.intknepmis) { ++ DWC_DEBUGPL(DBG_ANY,"EP%d IN TKN EP Mismatch\n", epnum); ++ CLEAR_IN_EP_INTR(core_if,epnum,intknepmis); ++ } ++ /** IN Endpoint NAK Effective */ ++ if (diepint.b.inepnakeff) { ++ DWC_DEBUGPL(DBG_ANY,"EP%d IN EP NAK Effective\n", epnum); ++ /* Periodic EP */ ++ if (ep->disabling) { ++ depctl.d32 = 0; ++ depctl.b.snak = 1; ++ depctl.b.epdis = 1; ++ dwc_modify_reg32(&dev_if->in_ep_regs[epnum]->diepctl, depctl.d32, depctl.d32); ++ } ++ CLEAR_IN_EP_INTR(core_if,epnum,inepnakeff); ++ ++ } ++ ++ /** IN EP Tx FIFO Empty Intr */ ++ if (diepint.b.emptyintr) { ++ DWC_DEBUGPL(DBG_ANY,"EP%d Tx FIFO Empty Intr \n", epnum); ++ write_empty_tx_fifo(pcd, epnum); ++ ++ CLEAR_IN_EP_INTR(core_if,epnum,emptyintr); ++ ++ } ++ ++ /** IN EP BNA Intr */ ++ if (diepint.b.bna) { ++ CLEAR_IN_EP_INTR(core_if,epnum,bna); ++ if(core_if->dma_desc_enable) { ++#ifdef DWC_EN_ISOC ++ if(dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) { ++ /* ++ * This checking is performed to prevent first "false" BNA ++ * handling occuring right after reconnect ++ */ ++ if(dwc_ep->next_frame != 0xffffffff) ++ dwc_otg_pcd_handle_iso_bna(ep); ++ } ++ else ++#endif //DWC_EN_ISOC ++ { ++ dctl.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dctl); ++ ++ /* If Global Continue on BNA is disabled - disable EP */ ++ if(!dctl.b.gcontbna) { ++ depctl.d32 = 0; ++ depctl.b.snak = 1; ++ depctl.b.epdis = 1; ++ dwc_modify_reg32(&dev_if->in_ep_regs[epnum]->diepctl, depctl.d32, depctl.d32); ++ } else { ++ start_next_request(ep); ++ } ++ } ++ } ++ } ++ /* NAK Interrutp */ ++ if (diepint.b.nak) { ++ DWC_DEBUGPL(DBG_ANY,"EP%d IN NAK Interrupt\n", epnum); ++ handle_in_ep_nak_intr(pcd, epnum); ++ ++ CLEAR_IN_EP_INTR(core_if,epnum,nak); ++ } ++ } ++ epnum++; ++ ep_intr >>=1; ++ } ++ ++ return 1; ++#undef CLEAR_IN_EP_INTR ++} ++ ++/** ++ * This interrupt indicates that an OUT EP has a pending Interrupt. ++ * The sequence for handling the OUT EP interrupt is shown below: ++ * -# Read the Device All Endpoint Interrupt register ++ * -# Repeat the following for each OUT EP interrupt bit set (from ++ * LSB to MSB). ++ * -# Read the Device Endpoint Interrupt (DOEPINTn) register ++ * -# If "Transfer Complete" call the request complete function ++ * -# If "Endpoint Disabled" complete the EP disable procedure. ++ * -# If "AHB Error Interrupt" log error ++ * -# If "Setup Phase Done" process Setup Packet (See Standard USB ++ * Command Processing) ++ */ ++static int32_t dwc_otg_pcd_handle_out_ep_intr(dwc_otg_pcd_t *pcd) ++{ ++#define CLEAR_OUT_EP_INTR(__core_if,__epnum,__intr) \ ++do { \ ++ doepint_data_t doepint = {.d32=0}; \ ++ doepint.b.__intr = 1; \ ++ dwc_write_reg32(&__core_if->dev_if->out_ep_regs[__epnum]->doepint, \ ++ doepint.d32); \ ++} while (0) ++ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ uint32_t ep_intr; ++ doepint_data_t doepint = {.d32=0}; ++ dctl_data_t dctl = {.d32=0}; ++ depctl_data_t doepctl = {.d32=0}; ++ uint32_t epnum = 0; ++ dwc_otg_pcd_ep_t *ep; ++ dwc_ep_t *dwc_ep; ++ ++ DWC_DEBUGPL(DBG_PCDV, "%s()\n", __func__); ++ ++ /* Read in the device interrupt bits */ ++ ep_intr = dwc_otg_read_dev_all_out_ep_intr(core_if); ++ ++ while(ep_intr) { ++ if (ep_intr&0x1) { ++ /* Get EP pointer */ ++ ep = get_out_ep(pcd, epnum); ++ dwc_ep = &ep->dwc_ep; ++ ++#ifdef VERBOSE ++ DWC_DEBUGPL(DBG_PCDV, ++ "EP%d-%s: type=%d, mps=%d\n", ++ dwc_ep->num, (dwc_ep->is_in ?"IN":"OUT"), ++ dwc_ep->type, dwc_ep->maxpacket); ++#endif ++ doepint.d32 = dwc_otg_read_dev_out_ep_intr(core_if, dwc_ep); ++ ++ /* Transfer complete */ ++ if (doepint.b.xfercompl) { ++ ++ if (epnum == 0) { ++ /* Clear the bit in DOEPINTn for this interrupt */ ++ CLEAR_OUT_EP_INTR(core_if,epnum,xfercompl); ++ if(core_if->dma_desc_enable == 0 || pcd->ep0state != EP0_IDLE) ++ handle_ep0(pcd); ++#ifdef DWC_EN_ISOC ++ } else if(dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) { ++ if (doepint.b.pktdrpsts == 0) { ++ /* Clear the bit in DOEPINTn for this interrupt */ ++ CLEAR_OUT_EP_INTR(core_if,epnum,xfercompl); ++ complete_iso_ep(ep); ++ } else { ++ ++ doepint_data_t doepint = {.d32=0}; ++ doepint.b.xfercompl = 1; ++ doepint.b.pktdrpsts = 1; ++ dwc_write_reg32(&core_if->dev_if->out_ep_regs[epnum]->doepint, ++ doepint.d32); ++ if(handle_iso_out_pkt_dropped(core_if,dwc_ep)) { ++ complete_iso_ep(ep); ++ } ++ } ++#endif //DWC_EN_ISOC ++ } else { ++ /* Clear the bit in DOEPINTn for this interrupt */ ++ CLEAR_OUT_EP_INTR(core_if,epnum,xfercompl); ++ complete_ep(ep); ++ } ++ ++ } ++ ++ /* Endpoint disable */ ++ if (doepint.b.epdisabled) { ++ ++ /* Clear the bit in DOEPINTn for this interrupt */ ++ CLEAR_OUT_EP_INTR(core_if,epnum,epdisabled); ++ } ++ /* AHB Error */ ++ if (doepint.b.ahberr) { ++ DWC_DEBUGPL(DBG_PCD,"EP%d OUT AHB Error\n", epnum); ++ DWC_DEBUGPL(DBG_PCD,"EP DMA REG %d \n", core_if->dev_if->out_ep_regs[epnum]->doepdma); ++ CLEAR_OUT_EP_INTR(core_if,epnum,ahberr); ++ } ++ /* Setup Phase Done (contorl EPs) */ ++ if (doepint.b.setup) { ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCD,"EP%d SETUP Done\n", ++ epnum); ++#endif ++ CLEAR_OUT_EP_INTR(core_if,epnum,setup); ++ ++ handle_ep0(pcd); ++ } ++ ++ /** OUT EP BNA Intr */ ++ if (doepint.b.bna) { ++ CLEAR_OUT_EP_INTR(core_if,epnum,bna); ++ if(core_if->dma_desc_enable) { ++#ifdef DWC_EN_ISOC ++ if(dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) { ++ /* ++ * This checking is performed to prevent first "false" BNA ++ * handling occuring right after reconnect ++ */ ++ if(dwc_ep->next_frame != 0xffffffff) ++ dwc_otg_pcd_handle_iso_bna(ep); ++ } ++ else ++#endif //DWC_EN_ISOC ++ { ++ dctl.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dctl); ++ ++ /* If Global Continue on BNA is disabled - disable EP*/ ++ if(!dctl.b.gcontbna) { ++ doepctl.d32 = 0; ++ doepctl.b.snak = 1; ++ doepctl.b.epdis = 1; ++ dwc_modify_reg32(&dev_if->out_ep_regs[epnum]->doepctl, doepctl.d32, doepctl.d32); ++ } else { ++ start_next_request(ep); ++ } ++ } ++ } ++ } ++ if (doepint.b.stsphsercvd) { ++ CLEAR_OUT_EP_INTR(core_if,epnum,stsphsercvd); ++ if(core_if->dma_desc_enable) { ++ do_setup_in_status_phase(pcd); ++ } ++ } ++ /* Babble Interrutp */ ++ if (doepint.b.babble) { ++ DWC_DEBUGPL(DBG_ANY,"EP%d OUT Babble\n", epnum); ++ handle_out_ep_babble_intr(pcd, epnum); ++ ++ CLEAR_OUT_EP_INTR(core_if,epnum,babble); ++ } ++ /* NAK Interrutp */ ++ if (doepint.b.nak) { ++ DWC_DEBUGPL(DBG_ANY,"EP%d OUT NAK\n", epnum); ++ handle_out_ep_nak_intr(pcd, epnum); ++ ++ CLEAR_OUT_EP_INTR(core_if,epnum,nak); ++ } ++ /* NYET Interrutp */ ++ if (doepint.b.nyet) { ++ DWC_DEBUGPL(DBG_ANY,"EP%d OUT NYET\n", epnum); ++ handle_out_ep_nyet_intr(pcd, epnum); ++ ++ CLEAR_OUT_EP_INTR(core_if,epnum,nyet); ++ } ++ } ++ ++ epnum++; ++ ep_intr >>=1; ++ } ++ ++ return 1; ++ ++#undef CLEAR_OUT_EP_INTR ++} ++ ++ ++/** ++ * Incomplete ISO IN Transfer Interrupt. ++ * This interrupt indicates one of the following conditions occurred ++ * while transmitting an ISOC transaction. ++ * - Corrupted IN Token for ISOC EP. ++ * - Packet not complete in FIFO. ++ * The follow actions will be taken: ++ * -# Determine the EP ++ * -# Set incomplete flag in dwc_ep structure ++ * -# Disable EP; when "Endpoint Disabled" interrupt is received ++ * Flush FIFO ++ */ ++int32_t dwc_otg_pcd_handle_incomplete_isoc_in_intr(dwc_otg_pcd_t *pcd) ++{ ++ gintsts_data_t gintsts; ++ ++ ++#ifdef DWC_EN_ISOC ++ dwc_otg_dev_if_t *dev_if; ++ deptsiz_data_t deptsiz = { .d32 = 0}; ++ depctl_data_t depctl = { .d32 = 0}; ++ dsts_data_t dsts = { .d32 = 0}; ++ dwc_ep_t *dwc_ep; ++ int i; ++ ++ dev_if = GET_CORE_IF(pcd)->dev_if; ++ ++ for(i = 1; i <= dev_if->num_in_eps; ++i) { ++ dwc_ep = &pcd->in_ep[i].dwc_ep; ++ if(dwc_ep->active && ++ dwc_ep->type == USB_ENDPOINT_XFER_ISOC) ++ { ++ deptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[i]->dieptsiz); ++ depctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[i]->diepctl); ++ ++ if(depctl.b.epdis && deptsiz.d32) { ++ set_current_pkt_info(GET_CORE_IF(pcd), dwc_ep); ++ if(dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) { ++ dwc_ep->cur_pkt = 0; ++ dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1; ++ ++ if(dwc_ep->proc_buf_num) { ++ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff1; ++ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr1; ++ } else { ++ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff0; ++ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr0; ++ } ++ ++ } ++ ++ dsts.d32 = dwc_read_reg32(&GET_CORE_IF(pcd)->dev_if->dev_global_regs->dsts); ++ dwc_ep->next_frame = dsts.b.soffn; ++ ++ dwc_otg_iso_ep_start_frm_transfer(GET_CORE_IF(pcd), dwc_ep); ++ } ++ } ++ } ++ ++#else ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", ++ "IN ISOC Incomplete"); ++ ++ intr_mask.b.incomplisoin = 1; ++ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, ++ intr_mask.d32, 0); ++#endif //DWC_EN_ISOC ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.incomplisoin = 1; ++ dwc_write_reg32 (&GET_CORE_IF(pcd)->core_global_regs->gintsts, ++ gintsts.d32); ++ ++ return 1; ++} ++ ++/** ++ * Incomplete ISO OUT Transfer Interrupt. ++ * ++ * This interrupt indicates that the core has dropped an ISO OUT ++ * packet. The following conditions can be the cause: ++ * - FIFO Full, the entire packet would not fit in the FIFO. ++ * - CRC Error ++ * - Corrupted Token ++ * The follow actions will be taken: ++ * -# Determine the EP ++ * -# Set incomplete flag in dwc_ep structure ++ * -# Read any data from the FIFO ++ * -# Disable EP. when "Endpoint Disabled" interrupt is received ++ * re-enable EP. ++ */ ++int32_t dwc_otg_pcd_handle_incomplete_isoc_out_intr(dwc_otg_pcd_t *pcd) ++{ ++ /* @todo implement ISR */ ++ gintsts_data_t gintsts; ++ ++#ifdef DWC_EN_ISOC ++ dwc_otg_dev_if_t *dev_if; ++ deptsiz_data_t deptsiz = { .d32 = 0}; ++ depctl_data_t depctl = { .d32 = 0}; ++ dsts_data_t dsts = { .d32 = 0}; ++ dwc_ep_t *dwc_ep; ++ int i; ++ ++ dev_if = GET_CORE_IF(pcd)->dev_if; ++ ++ for(i = 1; i <= dev_if->num_out_eps; ++i) { ++ dwc_ep = &pcd->in_ep[i].dwc_ep; ++ if(pcd->out_ep[i].dwc_ep.active && ++ pcd->out_ep[i].dwc_ep.type == USB_ENDPOINT_XFER_ISOC) ++ { ++ deptsiz.d32 = dwc_read_reg32(&dev_if->out_ep_regs[i]->doeptsiz); ++ depctl.d32 = dwc_read_reg32(&dev_if->out_ep_regs[i]->doepctl); ++ ++ if(depctl.b.epdis && deptsiz.d32) { ++ set_current_pkt_info(GET_CORE_IF(pcd), &pcd->out_ep[i].dwc_ep); ++ if(dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) { ++ dwc_ep->cur_pkt = 0; ++ dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1; ++ ++ if(dwc_ep->proc_buf_num) { ++ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff1; ++ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr1; ++ } else { ++ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff0; ++ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr0; ++ } ++ ++ } ++ ++ dsts.d32 = dwc_read_reg32(&GET_CORE_IF(pcd)->dev_if->dev_global_regs->dsts); ++ dwc_ep->next_frame = dsts.b.soffn; ++ ++ dwc_otg_iso_ep_start_frm_transfer(GET_CORE_IF(pcd), dwc_ep); ++ } ++ } ++ } ++#else ++ /** @todo implement ISR */ ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ ++ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", ++ "OUT ISOC Incomplete"); ++ ++ intr_mask.b.incomplisoout = 1; ++ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, ++ intr_mask.d32, 0); ++ ++#endif // DWC_EN_ISOC ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.incomplisoout = 1; ++ dwc_write_reg32 (&GET_CORE_IF(pcd)->core_global_regs->gintsts, ++ gintsts.d32); ++ ++ return 1; ++} ++ ++/** ++ * This function handles the Global IN NAK Effective interrupt. ++ * ++ */ ++int32_t dwc_otg_pcd_handle_in_nak_effective(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if; ++ depctl_data_t diepctl = { .d32 = 0}; ++ depctl_data_t diepctl_rd = { .d32 = 0}; ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ gintsts_data_t gintsts; ++ int i; ++ ++ DWC_DEBUGPL(DBG_PCD, "Global IN NAK Effective\n"); ++ ++ /* Disable all active IN EPs */ ++ diepctl.b.epdis = 1; ++ diepctl.b.snak = 1; ++ ++ for (i=0; i <= dev_if->num_in_eps; i++) ++ { ++ diepctl_rd.d32 = dwc_read_reg32(&dev_if->in_ep_regs[i]->diepctl); ++ if (diepctl_rd.b.epena) { ++ dwc_write_reg32(&dev_if->in_ep_regs[i]->diepctl, ++ diepctl.d32); ++ } ++ } ++ /* Disable the Global IN NAK Effective Interrupt */ ++ intr_mask.b.ginnakeff = 1; ++ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, ++ intr_mask.d32, 0); ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.ginnakeff = 1; ++ dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts, ++ gintsts.d32); ++ ++ return 1; ++} ++ ++/** ++ * OUT NAK Effective. ++ * ++ */ ++int32_t dwc_otg_pcd_handle_out_nak_effective(dwc_otg_pcd_t *pcd) ++{ ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ gintsts_data_t gintsts; ++ ++ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", ++ "Global IN NAK Effective\n"); ++ /* Disable the Global IN NAK Effective Interrupt */ ++ intr_mask.b.goutnakeff = 1; ++ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, ++ intr_mask.d32, 0); ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.goutnakeff = 1; ++ dwc_write_reg32 (&GET_CORE_IF(pcd)->core_global_regs->gintsts, ++ gintsts.d32); ++ ++ return 1; ++} ++ ++ ++/** ++ * PCD interrupt handler. ++ * ++ * The PCD handles the device interrupts. Many conditions can cause a ++ * device interrupt. When an interrupt occurs, the device interrupt ++ * service routine determines the cause of the interrupt and ++ * dispatches handling to the appropriate function. These interrupt ++ * handling functions are described below. ++ * ++ * All interrupt registers are processed from LSB to MSB. ++ * ++ */ ++int32_t dwc_otg_pcd_handle_intr(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++#ifdef VERBOSE ++ dwc_otg_core_global_regs_t *global_regs = ++ core_if->core_global_regs; ++#endif ++ gintsts_data_t gintr_status; ++ int32_t retval = 0; ++ ++ ++#ifdef VERBOSE ++ DWC_DEBUGPL(DBG_ANY, "%s() gintsts=%08x gintmsk=%08x\n", ++ __func__, ++ dwc_read_reg32(&global_regs->gintsts), ++ dwc_read_reg32(&global_regs->gintmsk)); ++#endif ++ ++ if (dwc_otg_is_device_mode(core_if)) { ++ SPIN_LOCK(&pcd->lock); ++#ifdef VERBOSE ++ DWC_DEBUGPL(DBG_PCDV, "%s() gintsts=%08x gintmsk=%08x\n", ++ __func__, ++ dwc_read_reg32(&global_regs->gintsts), ++ dwc_read_reg32(&global_regs->gintmsk)); ++#endif ++ ++ gintr_status.d32 = dwc_otg_read_core_intr(core_if); ++ ++/* ++ if (!gintr_status.d32) { ++ SPIN_UNLOCK(&pcd->lock); ++ return 0; ++ } ++*/ ++ DWC_DEBUGPL(DBG_PCDV, "%s: gintsts&gintmsk=%08x\n", ++ __func__, gintr_status.d32); ++ ++ if (gintr_status.b.sofintr) { ++ retval |= dwc_otg_pcd_handle_sof_intr(pcd); ++ } ++ if (gintr_status.b.rxstsqlvl) { ++ retval |= dwc_otg_pcd_handle_rx_status_q_level_intr(pcd); ++ } ++ if (gintr_status.b.nptxfempty) { ++ retval |= dwc_otg_pcd_handle_np_tx_fifo_empty_intr(pcd); ++ } ++ if (gintr_status.b.ginnakeff) { ++ retval |= dwc_otg_pcd_handle_in_nak_effective(pcd); ++ } ++ if (gintr_status.b.goutnakeff) { ++ retval |= dwc_otg_pcd_handle_out_nak_effective(pcd); ++ } ++ if (gintr_status.b.i2cintr) { ++ retval |= dwc_otg_pcd_handle_i2c_intr(pcd); ++ } ++ if (gintr_status.b.erlysuspend) { ++ retval |= dwc_otg_pcd_handle_early_suspend_intr(pcd); ++ } ++ if (gintr_status.b.usbreset) { ++ retval |= dwc_otg_pcd_handle_usb_reset_intr(pcd); ++ } ++ if (gintr_status.b.enumdone) { ++ retval |= dwc_otg_pcd_handle_enum_done_intr(pcd); ++ } ++ if (gintr_status.b.isooutdrop) { ++ retval |= dwc_otg_pcd_handle_isoc_out_packet_dropped_intr(pcd); ++ } ++ if (gintr_status.b.eopframe) { ++ retval |= dwc_otg_pcd_handle_end_periodic_frame_intr(pcd); ++ } ++ if (gintr_status.b.epmismatch) { ++ retval |= dwc_otg_pcd_handle_ep_mismatch_intr(core_if); ++ } ++ if (gintr_status.b.inepint) { ++ if(!core_if->multiproc_int_enable) { ++ retval |= dwc_otg_pcd_handle_in_ep_intr(pcd); ++ } ++ } ++ if (gintr_status.b.outepintr) { ++ if(!core_if->multiproc_int_enable) { ++ retval |= dwc_otg_pcd_handle_out_ep_intr(pcd); ++ } ++ } ++ if (gintr_status.b.incomplisoin) { ++ retval |= dwc_otg_pcd_handle_incomplete_isoc_in_intr(pcd); ++ } ++ if (gintr_status.b.incomplisoout) { ++ retval |= dwc_otg_pcd_handle_incomplete_isoc_out_intr(pcd); ++ } ++ ++ /* In MPI mode De vice Endpoints intterrupts are asserted ++ * without setting outepintr and inepint bits set, so these ++ * Interrupt handlers are called without checking these bit-fields ++ */ ++ if(core_if->multiproc_int_enable) { ++ retval |= dwc_otg_pcd_handle_in_ep_intr(pcd); ++ retval |= dwc_otg_pcd_handle_out_ep_intr(pcd); ++ } ++#ifdef VERBOSE ++ DWC_DEBUGPL(DBG_PCDV, "%s() gintsts=%0x\n", __func__, ++ dwc_read_reg32(&global_regs->gintsts)); ++#endif ++ SPIN_UNLOCK(&pcd->lock); ++ } ++ ++ S3C2410X_CLEAR_EINTPEND(); ++ ++ return retval; ++} ++ ++#endif /* DWC_HOST_ONLY */ +--- /dev/null ++++ b/drivers/usb/host/otg/dwc_otg_plat.h +@@ -0,0 +1,268 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/platform/dwc_otg_plat.h $ ++ * $Revision: #23 $ ++ * $Date: 2008/07/15 $ ++ * $Change: 1064915 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++#if !defined(__DWC_OTG_PLAT_H__) ++#define __DWC_OTG_PLAT_H__ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#define cns3xxx_ioremap(addr, size) ioremap(addr, size) ++#define cns3xxx_iounmap(addr) iounmap(addr) ++/* Changed all readl and writel to __raw_readl, __raw_writel */ ++ ++/** ++ * @file ++ * ++ * This file contains the Platform Specific constants, interfaces ++ * (functions and macros) for Linux. ++ * ++ */ ++//#if !defined(__LINUX_ARM_ARCH__) ++//#error "The contents of this file is Linux specific!!!" ++//#endif ++ ++/** ++ * Reads the content of a register. ++ * ++ * @param reg address of register to read. ++ * @return contents of the register. ++ * ++ ++ * Usage:
++ * uint32_t dev_ctl = dwc_read_reg32(&dev_regs->dctl); ++ */ ++static __inline__ uint32_t dwc_read_reg32( volatile uint32_t *reg) ++{ ++ return __raw_readl(reg); ++ // return readl(reg); ++}; ++ ++/** ++ * Writes a register with a 32 bit value. ++ * ++ * @param reg address of register to read. ++ * @param value to write to _reg. ++ * ++ * Usage:
++ * dwc_write_reg32(&dev_regs->dctl, 0); ++ */ ++static __inline__ void dwc_write_reg32( volatile uint32_t *reg, const uint32_t value) ++{ ++ // writel( value, reg ); ++ __raw_writel(value, reg); ++ ++}; ++ ++/** ++ * This function modifies bit values in a register. Using the ++ * algorithm: (reg_contents & ~clear_mask) | set_mask. ++ * ++ * @param reg address of register to read. ++ * @param clear_mask bit mask to be cleared. ++ * @param set_mask bit mask to be set. ++ * ++ * Usage:
++ * // Clear the SOF Interrupt Mask bit and
++ * // set the OTG Interrupt mask bit, leaving all others as they were. ++ * dwc_modify_reg32(&dev_regs->gintmsk, DWC_SOF_INT, DWC_OTG_INT);
++ */ ++static __inline__ ++ void dwc_modify_reg32( volatile uint32_t *reg, const uint32_t clear_mask, const uint32_t set_mask) ++{ ++ // writel( (readl(reg) & ~clear_mask) | set_mask, reg ); ++ __raw_writel( (__raw_readl(reg) & ~clear_mask) | set_mask, reg ); ++}; ++ ++ ++/** ++ * Wrapper for the OS micro-second delay function. ++ * @param[in] usecs Microseconds of delay ++ */ ++static __inline__ void UDELAY( const uint32_t usecs ) ++{ ++ udelay( usecs ); ++} ++ ++/** ++ * Wrapper for the OS milli-second delay function. ++ * @param[in] msecs milliseconds of delay ++ */ ++static __inline__ void MDELAY( const uint32_t msecs ) ++{ ++ mdelay( msecs ); ++} ++ ++/** ++ * Wrapper for the Linux spin_lock. On the ARM (Integrator) ++ * spin_lock() is a nop. ++ * ++ * @param lock Pointer to the spinlock. ++ */ ++static __inline__ void SPIN_LOCK( spinlock_t *lock ) ++{ ++ spin_lock(lock); ++} ++ ++/** ++ * Wrapper for the Linux spin_unlock. On the ARM (Integrator) ++ * spin_lock() is a nop. ++ * ++ * @param lock Pointer to the spinlock. ++ */ ++static __inline__ void SPIN_UNLOCK( spinlock_t *lock ) ++{ ++ spin_unlock(lock); ++} ++ ++/** ++ * Wrapper (macro) for the Linux spin_lock_irqsave. On the ARM ++ * (Integrator) spin_lock() is a nop. ++ * ++ * @param l Pointer to the spinlock. ++ * @param f unsigned long for irq flags storage. ++ */ ++#define SPIN_LOCK_IRQSAVE( l, f ) spin_lock_irqsave(l,f); ++ ++/** ++ * Wrapper (macro) for the Linux spin_unlock_irqrestore. On the ARM ++ * (Integrator) spin_lock() is a nop. ++ * ++ * @param l Pointer to the spinlock. ++ * @param f unsigned long for irq flags storage. ++ */ ++#define SPIN_UNLOCK_IRQRESTORE( l,f ) spin_unlock_irqrestore(l,f); ++ ++/* ++ * Debugging support vanishes in non-debug builds. ++ */ ++ ++ ++/** ++ * The Debug Level bit-mask variable. ++ */ ++extern uint32_t g_dbg_lvl; ++/** ++ * Set the Debug Level variable. ++ */ ++static inline uint32_t SET_DEBUG_LEVEL( const uint32_t new ) ++{ ++ uint32_t old = g_dbg_lvl; ++ g_dbg_lvl = new; ++ return old; ++} ++ ++/** When debug level has the DBG_CIL bit set, display CIL Debug messages. */ ++#define DBG_CIL (0x2) ++/** When debug level has the DBG_CILV bit set, display CIL Verbose debug ++ * messages */ ++#define DBG_CILV (0x20) ++/** When debug level has the DBG_PCD bit set, display PCD (Device) debug ++ * messages */ ++#define DBG_PCD (0x4) ++/** When debug level has the DBG_PCDV set, display PCD (Device) Verbose debug ++ * messages */ ++#define DBG_PCDV (0x40) ++/** When debug level has the DBG_HCD bit set, display Host debug messages */ ++#define DBG_HCD (0x8) ++/** When debug level has the DBG_HCDV bit set, display Verbose Host debug ++ * messages */ ++#define DBG_HCDV (0x80) ++/** When debug level has the DBG_HCD_URB bit set, display enqueued URBs in host ++ * mode. */ ++#define DBG_HCD_URB (0x800) ++ ++/** When debug level has any bit set, display debug messages */ ++#define DBG_ANY (0xFF) ++ ++/** All debug messages off */ ++#define DBG_OFF 0 ++ ++/** Prefix string for DWC_DEBUG print macros. */ ++#define USB_DWC "DWC_otg: " ++ ++/** ++ * Print a debug message when the Global debug level variable contains ++ * the bit defined in lvl. ++ * ++ * @param[in] lvl - Debug level, use one of the DBG_ constants above. ++ * @param[in] x - like printf ++ * ++ * Example:

++ * ++ * DWC_DEBUGPL( DBG_ANY, "%s(%p)\n", __func__, _reg_base_addr); ++ * ++ *
++ * results in:
++ * ++ * usb-DWC_otg: dwc_otg_cil_init(ca867000) ++ * ++ */ ++#ifdef DEBUG ++ ++# define DWC_DEBUGPL(lvl, x...) do{ if ((lvl)&g_dbg_lvl)printk( KERN_DEBUG USB_DWC x ); }while(0) ++# define DWC_DEBUGP(x...) DWC_DEBUGPL(DBG_ANY, x ) ++ ++# define CHK_DEBUG_LEVEL(level) ((level) & g_dbg_lvl) ++ ++#else ++ ++# define DWC_DEBUGPL(lvl, x...) do{}while(0) ++# define DWC_DEBUGP(x...) ++ ++# define CHK_DEBUG_LEVEL(level) (0) ++ ++#endif /*DEBUG*/ ++ ++/** ++ * Print an Error message. ++ */ ++#define DWC_ERROR(x...) printk( KERN_ERR USB_DWC x ) ++/** ++ * Print a Warning message. ++ */ ++#define DWC_WARN(x...) printk( KERN_WARNING USB_DWC x ) ++/** ++ * Print a notice (normal but significant message). ++ */ ++#define DWC_NOTICE(x...) printk( KERN_NOTICE USB_DWC x ) ++/** ++ * Basic message printing. ++ */ ++#define DWC_PRINT(x...) printk( KERN_INFO USB_DWC x ) ++ ++#endif ++ +--- /dev/null ++++ b/drivers/usb/host/otg/dwc_otg_regs.h +@@ -0,0 +1,2075 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_regs.h $ ++ * $Revision: #72 $ ++ * $Date: 2008/09/19 $ ++ * $Change: 1099526 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++#ifndef __DWC_OTG_REGS_H__ ++#define __DWC_OTG_REGS_H__ ++ ++/** ++ * @file ++ * ++ * This file contains the data structures for accessing the DWC_otg core registers. ++ * ++ * The application interfaces with the HS OTG core by reading from and ++ * writing to the Control and Status Register (CSR) space through the ++ * AHB Slave interface. These registers are 32 bits wide, and the ++ * addresses are 32-bit-block aligned. ++ * CSRs are classified as follows: ++ * - Core Global Registers ++ * - Device Mode Registers ++ * - Device Global Registers ++ * - Device Endpoint Specific Registers ++ * - Host Mode Registers ++ * - Host Global Registers ++ * - Host Port CSRs ++ * - Host Channel Specific Registers ++ * ++ * Only the Core Global registers can be accessed in both Device and ++ * Host modes. When the HS OTG core is operating in one mode, either ++ * Device or Host, the application must not access registers from the ++ * other mode. When the core switches from one mode to another, the ++ * registers in the new mode of operation must be reprogrammed as they ++ * would be after a power-on reset. ++ */ ++ ++/** Maximum number of Periodic FIFOs */ ++#define MAX_PERIO_FIFOS 15 ++/** Maximum number of Transmit FIFOs */ ++#define MAX_TX_FIFOS 15 ++ ++/** Maximum number of Endpoints/HostChannels */ ++#define MAX_EPS_CHANNELS 16 ++ ++/****************************************************************************/ ++/** DWC_otg Core registers . ++ * The dwc_otg_core_global_regs structure defines the size ++ * and relative field offsets for the Core Global registers. ++ */ ++typedef struct dwc_otg_core_global_regs ++{ ++ /** OTG Control and Status Register. Offset: 000h */ ++ volatile uint32_t gotgctl; ++ /** OTG Interrupt Register. Offset: 004h */ ++ volatile uint32_t gotgint; ++ /**Core AHB Configuration Register. Offset: 008h */ ++ volatile uint32_t gahbcfg; ++ ++#define DWC_GLBINTRMASK 0x0001 ++#define DWC_DMAENABLE 0x0020 ++#define DWC_NPTXEMPTYLVL_EMPTY 0x0080 ++#define DWC_NPTXEMPTYLVL_HALFEMPTY 0x0000 ++#define DWC_PTXEMPTYLVL_EMPTY 0x0100 ++#define DWC_PTXEMPTYLVL_HALFEMPTY 0x0000 ++ ++ /**Core USB Configuration Register. Offset: 00Ch */ ++ volatile uint32_t gusbcfg; ++ /**Core Reset Register. Offset: 010h */ ++ volatile uint32_t grstctl; ++ /**Core Interrupt Register. Offset: 014h */ ++ volatile uint32_t gintsts; ++ /**Core Interrupt Mask Register. Offset: 018h */ ++ volatile uint32_t gintmsk; ++ /**Receive Status Queue Read Register (Read Only). Offset: 01Ch */ ++ volatile uint32_t grxstsr; ++ /**Receive Status Queue Read & POP Register (Read Only). Offset: 020h*/ ++ volatile uint32_t grxstsp; ++ /**Receive FIFO Size Register. Offset: 024h */ ++ volatile uint32_t grxfsiz; ++ /**Non Periodic Transmit FIFO Size Register. Offset: 028h */ ++ volatile uint32_t gnptxfsiz; ++ /**Non Periodic Transmit FIFO/Queue Status Register (Read ++ * Only). Offset: 02Ch */ ++ volatile uint32_t gnptxsts; ++ /**I2C Access Register. Offset: 030h */ ++ volatile uint32_t gi2cctl; ++ /**PHY Vendor Control Register. Offset: 034h */ ++ volatile uint32_t gpvndctl; ++ /**General Purpose Input/Output Register. Offset: 038h */ ++ volatile uint32_t ggpio; ++ /**User ID Register. Offset: 03Ch */ ++ volatile uint32_t guid; ++ /**Synopsys ID Register (Read Only). Offset: 040h */ ++ volatile uint32_t gsnpsid; ++ /**User HW Config1 Register (Read Only). Offset: 044h */ ++ volatile uint32_t ghwcfg1; ++ /**User HW Config2 Register (Read Only). Offset: 048h */ ++ volatile uint32_t ghwcfg2; ++#define DWC_SLAVE_ONLY_ARCH 0 ++#define DWC_EXT_DMA_ARCH 1 ++#define DWC_INT_DMA_ARCH 2 ++ ++#define DWC_MODE_HNP_SRP_CAPABLE 0 ++#define DWC_MODE_SRP_ONLY_CAPABLE 1 ++#define DWC_MODE_NO_HNP_SRP_CAPABLE 2 ++#define DWC_MODE_SRP_CAPABLE_DEVICE 3 ++#define DWC_MODE_NO_SRP_CAPABLE_DEVICE 4 ++#define DWC_MODE_SRP_CAPABLE_HOST 5 ++#define DWC_MODE_NO_SRP_CAPABLE_HOST 6 ++ ++ /**User HW Config3 Register (Read Only). Offset: 04Ch */ ++ volatile uint32_t ghwcfg3; ++ /**User HW Config4 Register (Read Only). Offset: 050h*/ ++ volatile uint32_t ghwcfg4; ++ /** Reserved Offset: 054h-0FFh */ ++ volatile uint32_t reserved[43]; ++ /** Host Periodic Transmit FIFO Size Register. Offset: 100h */ ++ volatile uint32_t hptxfsiz; ++ /** Device Periodic Transmit FIFO#n Register if dedicated fifos are disabled, ++ otherwise Device Transmit FIFO#n Register. ++ * Offset: 104h + (FIFO_Number-1)*04h, 1 <= FIFO Number <= 15 (1<=n<=15). */ ++ volatile uint32_t dptxfsiz_dieptxf[15]; ++} dwc_otg_core_global_regs_t; ++ ++/** ++ * This union represents the bit fields of the Core OTG Control ++ * and Status Register (GOTGCTL). Set the bits using the bit ++ * fields then write the d32 value to the register. ++ */ ++typedef union gotgctl_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned sesreqscs : 1; ++ unsigned sesreq : 1; ++ unsigned reserved2_7 : 6; ++ unsigned hstnegscs : 1; ++ unsigned hnpreq : 1; ++ unsigned hstsethnpen : 1; ++ unsigned devhnpen : 1; ++ unsigned reserved12_15 : 4; ++ unsigned conidsts : 1; ++ unsigned reserved17 : 1; ++ unsigned asesvld : 1; ++ unsigned bsesvld : 1; ++ unsigned currmod : 1; ++ unsigned reserved21_31 : 11; ++ } b; ++} gotgctl_data_t; ++ ++/** ++ * This union represents the bit fields of the Core OTG Interrupt Register ++ * (GOTGINT). Set/clear the bits using the bit fields then write the d32 ++ * value to the register. ++ */ ++typedef union gotgint_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** Current Mode */ ++ unsigned reserved0_1 : 2; ++ ++ /** Session End Detected */ ++ unsigned sesenddet : 1; ++ ++ unsigned reserved3_7 : 5; ++ ++ /** Session Request Success Status Change */ ++ unsigned sesreqsucstschng : 1; ++ /** Host Negotiation Success Status Change */ ++ unsigned hstnegsucstschng : 1; ++ ++ unsigned reserver10_16 : 7; ++ ++ /** Host Negotiation Detected */ ++ unsigned hstnegdet : 1; ++ /** A-Device Timeout Change */ ++ unsigned adevtoutchng : 1; ++ /** Debounce Done */ ++ unsigned debdone : 1; ++ ++ unsigned reserved31_20 : 12; ++ ++ } b; ++} gotgint_data_t; ++ ++ ++/** ++ * This union represents the bit fields of the Core AHB Configuration ++ * Register (GAHBCFG). Set/clear the bits using the bit fields then ++ * write the d32 value to the register. ++ */ ++typedef union gahbcfg_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned glblintrmsk : 1; ++#define DWC_GAHBCFG_GLBINT_ENABLE 1 ++ ++ unsigned hburstlen : 4; ++#define DWC_GAHBCFG_INT_DMA_BURST_SINGLE 0 ++#define DWC_GAHBCFG_INT_DMA_BURST_INCR 1 ++#define DWC_GAHBCFG_INT_DMA_BURST_INCR4 3 ++#define DWC_GAHBCFG_INT_DMA_BURST_INCR8 5 ++#define DWC_GAHBCFG_INT_DMA_BURST_INCR16 7 ++ ++ unsigned dmaenable : 1; ++#define DWC_GAHBCFG_DMAENABLE 1 ++ unsigned reserved : 1; ++ unsigned nptxfemplvl_txfemplvl : 1; ++ unsigned ptxfemplvl : 1; ++#define DWC_GAHBCFG_TXFEMPTYLVL_EMPTY 1 ++#define DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY 0 ++ unsigned reserved9_31 : 23; ++ } b; ++} gahbcfg_data_t; ++ ++/** ++ * This union represents the bit fields of the Core USB Configuration ++ * Register (GUSBCFG). Set the bits using the bit fields then write ++ * the d32 value to the register. ++ */ ++typedef union gusbcfg_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned toutcal : 3; ++ unsigned phyif : 1; ++ unsigned ulpi_utmi_sel : 1; ++ unsigned fsintf : 1; ++ unsigned physel : 1; ++ unsigned ddrsel : 1; ++ unsigned srpcap : 1; ++ unsigned hnpcap : 1; ++ unsigned usbtrdtim : 4; ++ unsigned nptxfrwnden : 1; ++ unsigned phylpwrclksel : 1; ++ unsigned otgutmifssel : 1; ++ unsigned ulpi_fsls : 1; ++ unsigned ulpi_auto_res : 1; ++ unsigned ulpi_clk_sus_m : 1; ++ unsigned ulpi_ext_vbus_drv : 1; ++ unsigned ulpi_int_vbus_indicator : 1; ++ unsigned term_sel_dl_pulse : 1; ++ unsigned reserved23_27 : 5; ++ unsigned tx_end_delay : 1; ++ unsigned reserved29_31 : 3; ++ } b; ++} gusbcfg_data_t; ++ ++/** ++ * This union represents the bit fields of the Core Reset Register ++ * (GRSTCTL). Set/clear the bits using the bit fields then write the ++ * d32 value to the register. ++ */ ++typedef union grstctl_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** Core Soft Reset (CSftRst) (Device and Host) ++ * ++ * The application can flush the control logic in the ++ * entire core using this bit. This bit resets the ++ * pipelines in the AHB Clock domain as well as the ++ * PHY Clock domain. ++ * ++ * The state machines are reset to an IDLE state, the ++ * control bits in the CSRs are cleared, all the ++ * transmit FIFOs and the receive FIFO are flushed. ++ * ++ * The status mask bits that control the generation of ++ * the interrupt, are cleared, to clear the ++ * interrupt. The interrupt status bits are not ++ * cleared, so the application can get the status of ++ * any events that occurred in the core after it has ++ * set this bit. ++ * ++ * Any transactions on the AHB are terminated as soon ++ * as possible following the protocol. Any ++ * transactions on the USB are terminated immediately. ++ * ++ * The configuration settings in the CSRs are ++ * unchanged, so the software doesn't have to ++ * reprogram these registers (Device ++ * Configuration/Host Configuration/Core System ++ * Configuration/Core PHY Configuration). ++ * ++ * The application can write to this bit, any time it ++ * wants to reset the core. This is a self clearing ++ * bit and the core clears this bit after all the ++ * necessary logic is reset in the core, which may ++ * take several clocks, depending on the current state ++ * of the core. ++ */ ++ unsigned csftrst : 1; ++ /** Hclk Soft Reset ++ * ++ * The application uses this bit to reset the control logic in ++ * the AHB clock domain. Only AHB clock domain pipelines are ++ * reset. ++ */ ++ unsigned hsftrst : 1; ++ /** Host Frame Counter Reset (Host Only)
++ * ++ * The application can reset the (micro)frame number ++ * counter inside the core, using this bit. When the ++ * (micro)frame counter is reset, the subsequent SOF ++ * sent out by the core, will have a (micro)frame ++ * number of 0. ++ */ ++ unsigned hstfrm : 1; ++ /** In Token Sequence Learning Queue Flush ++ * (INTknQFlsh) (Device Only) ++ */ ++ unsigned intknqflsh : 1; ++ /** RxFIFO Flush (RxFFlsh) (Device and Host) ++ * ++ * The application can flush the entire Receive FIFO ++ * using this bit.

The application must first ++ * ensure that the core is not in the middle of a ++ * transaction.

The application should write into ++ * this bit, only after making sure that neither the ++ * DMA engine is reading from the RxFIFO nor the MAC ++ * is writing the data in to the FIFO.

The ++ * application should wait until the bit is cleared ++ * before performing any other operations. This bit ++ * will takes 8 clocks (slowest of PHY or AHB clock) ++ * to clear. ++ */ ++ unsigned rxfflsh : 1; ++ /** TxFIFO Flush (TxFFlsh) (Device and Host). ++ * ++ * This bit is used to selectively flush a single or ++ * all transmit FIFOs. The application must first ++ * ensure that the core is not in the middle of a ++ * transaction.

The application should write into ++ * this bit, only after making sure that neither the ++ * DMA engine is writing into the TxFIFO nor the MAC ++ * is reading the data out of the FIFO.

The ++ * application should wait until the core clears this ++ * bit, before performing any operations. This bit ++ * will takes 8 clocks (slowest of PHY or AHB clock) ++ * to clear. ++ */ ++ unsigned txfflsh : 1; ++ ++ /** TxFIFO Number (TxFNum) (Device and Host). ++ * ++ * This is the FIFO number which needs to be flushed, ++ * using the TxFIFO Flush bit. This field should not ++ * be changed until the TxFIFO Flush bit is cleared by ++ * the core. ++ * - 0x0 : Non Periodic TxFIFO Flush ++ * - 0x1 : Periodic TxFIFO #1 Flush in device mode ++ * or Periodic TxFIFO in host mode ++ * - 0x2 : Periodic TxFIFO #2 Flush in device mode. ++ * - ... ++ * - 0xF : Periodic TxFIFO #15 Flush in device mode ++ * - 0x10: Flush all the Transmit NonPeriodic and ++ * Transmit Periodic FIFOs in the core ++ */ ++ unsigned txfnum : 5; ++ /** Reserved */ ++ unsigned reserved11_29 : 19; ++ /** DMA Request Signal. Indicated DMA request is in ++ * probress. Used for debug purpose. */ ++ unsigned dmareq : 1; ++ /** AHB Master Idle. Indicates the AHB Master State ++ * Machine is in IDLE condition. */ ++ unsigned ahbidle : 1; ++ } b; ++} grstctl_t; ++ ++ ++/** ++ * This union represents the bit fields of the Core Interrupt Mask ++ * Register (GINTMSK). Set/clear the bits using the bit fields then ++ * write the d32 value to the register. ++ */ ++typedef union gintmsk_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned reserved0 : 1; ++ unsigned modemismatch : 1; ++ unsigned otgintr : 1; ++ unsigned sofintr : 1; ++ unsigned rxstsqlvl : 1; ++ unsigned nptxfempty : 1; ++ unsigned ginnakeff : 1; ++ unsigned goutnakeff : 1; ++ unsigned reserved8 : 1; ++ unsigned i2cintr : 1; ++ unsigned erlysuspend : 1; ++ unsigned usbsuspend : 1; ++ unsigned usbreset : 1; ++ unsigned enumdone : 1; ++ unsigned isooutdrop : 1; ++ unsigned eopframe : 1; ++ unsigned reserved16 : 1; ++ unsigned epmismatch : 1; ++ unsigned inepintr : 1; ++ unsigned outepintr : 1; ++ unsigned incomplisoin : 1; ++ unsigned incomplisoout : 1; ++ unsigned reserved22_23 : 2; ++ unsigned portintr : 1; ++ unsigned hcintr : 1; ++ unsigned ptxfempty : 1; ++ unsigned reserved27 : 1; ++ unsigned conidstschng : 1; ++ unsigned disconnect : 1; ++ unsigned sessreqintr : 1; ++ unsigned wkupintr : 1; ++ } b; ++} gintmsk_data_t; ++/** ++ * This union represents the bit fields of the Core Interrupt Register ++ * (GINTSTS). Set/clear the bits using the bit fields then write the ++ * d32 value to the register. ++ */ ++typedef union gintsts_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++#define DWC_SOF_INTR_MASK 0x0008 ++ /** register bits */ ++ struct ++ { ++#define DWC_HOST_MODE 1 ++ unsigned curmode : 1; ++ unsigned modemismatch : 1; ++ unsigned otgintr : 1; ++ unsigned sofintr : 1; ++ unsigned rxstsqlvl : 1; ++ unsigned nptxfempty : 1; ++ unsigned ginnakeff : 1; ++ unsigned goutnakeff : 1; ++ unsigned reserved8 : 1; ++ unsigned i2cintr : 1; ++ unsigned erlysuspend : 1; ++ unsigned usbsuspend : 1; ++ unsigned usbreset : 1; ++ unsigned enumdone : 1; ++ unsigned isooutdrop : 1; ++ unsigned eopframe : 1; ++ unsigned intokenrx : 1; ++ unsigned epmismatch : 1; ++ unsigned inepint: 1; ++ unsigned outepintr : 1; ++ unsigned incomplisoin : 1; ++ unsigned incomplisoout : 1; ++ unsigned reserved22_23 : 2; ++ unsigned portintr : 1; ++ unsigned hcintr : 1; ++ unsigned ptxfempty : 1; ++ unsigned reserved27 : 1; ++ unsigned conidstschng : 1; ++ unsigned disconnect : 1; ++ unsigned sessreqintr : 1; ++ unsigned wkupintr : 1; ++ } b; ++} gintsts_data_t; ++ ++ ++/** ++ * This union represents the bit fields in the Device Receive Status Read and ++ * Pop Registers (GRXSTSR, GRXSTSP) Read the register into the d32 ++ * element then read out the bits using the bit elements. ++ */ ++typedef union device_grxsts_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned epnum : 4; ++ unsigned bcnt : 11; ++ unsigned dpid : 2; ++ ++#define DWC_STS_DATA_UPDT 0x2 // OUT Data Packet ++#define DWC_STS_XFER_COMP 0x3 // OUT Data Transfer Complete ++ ++#define DWC_DSTS_GOUT_NAK 0x1 // Global OUT NAK ++#define DWC_DSTS_SETUP_COMP 0x4 // Setup Phase Complete ++#define DWC_DSTS_SETUP_UPDT 0x6 // SETUP Packet ++ unsigned pktsts : 4; ++ unsigned fn : 4; ++ unsigned reserved : 7; ++ } b; ++} device_grxsts_data_t; ++ ++/** ++ * This union represents the bit fields in the Host Receive Status Read and ++ * Pop Registers (GRXSTSR, GRXSTSP) Read the register into the d32 ++ * element then read out the bits using the bit elements. ++ */ ++typedef union host_grxsts_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned chnum : 4; ++ unsigned bcnt : 11; ++ unsigned dpid : 2; ++ ++ unsigned pktsts : 4; ++#define DWC_GRXSTS_PKTSTS_IN 0x2 ++#define DWC_GRXSTS_PKTSTS_IN_XFER_COMP 0x3 ++#define DWC_GRXSTS_PKTSTS_DATA_TOGGLE_ERR 0x5 ++#define DWC_GRXSTS_PKTSTS_CH_HALTED 0x7 ++ ++ unsigned reserved : 11; ++ } b; ++} host_grxsts_data_t; ++ ++/** ++ * This union represents the bit fields in the FIFO Size Registers (HPTXFSIZ, ++ * GNPTXFSIZ, DPTXFSIZn, DIEPTXFn). Read the register into the d32 element then ++ * read out the bits using the bit elements. ++ */ ++typedef union fifosize_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned startaddr : 16; ++ unsigned depth : 16; ++ } b; ++} fifosize_data_t; ++ ++/** ++ * This union represents the bit fields in the Non-Periodic Transmit ++ * FIFO/Queue Status Register (GNPTXSTS). Read the register into the ++ * d32 element then read out the bits using the bit ++ * elements. ++ */ ++typedef union gnptxsts_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned nptxfspcavail : 16; ++ unsigned nptxqspcavail : 8; ++ /** Top of the Non-Periodic Transmit Request Queue ++ * - bit 24 - Terminate (Last entry for the selected ++ * channel/EP) ++ * - bits 26:25 - Token Type ++ * - 2'b00 - IN/OUT ++ * - 2'b01 - Zero Length OUT ++ * - 2'b10 - PING/Complete Split ++ * - 2'b11 - Channel Halt ++ * - bits 30:27 - Channel/EP Number ++ */ ++ unsigned nptxqtop_terminate : 1; ++ unsigned nptxqtop_token : 2; ++ unsigned nptxqtop_chnep : 4; ++ unsigned reserved : 1; ++ } b; ++} gnptxsts_data_t; ++ ++/** ++ * This union represents the bit fields in the Transmit ++ * FIFO Status Register (DTXFSTS). Read the register into the ++ * d32 element then read out the bits using the bit ++ * elements. ++ */ ++typedef union dtxfsts_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned txfspcavail : 16; ++ unsigned reserved : 16; ++ } b; ++} dtxfsts_data_t; ++ ++/** ++ * This union represents the bit fields in the I2C Control Register ++ * (I2CCTL). Read the register into the d32 element then read out the ++ * bits using the bit elements. ++ */ ++typedef union gi2cctl_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned rwdata : 8; ++ unsigned regaddr : 8; ++ unsigned addr : 7; ++ unsigned i2cen : 1; ++ unsigned ack : 1; ++ unsigned i2csuspctl : 1; ++ unsigned i2cdevaddr : 2; ++ unsigned reserved : 2; ++ unsigned rw : 1; ++ unsigned bsydne : 1; ++ } b; ++} gi2cctl_data_t; ++ ++/** ++ * This union represents the bit fields in the User HW Config1 ++ * Register. Read the register into the d32 element then read ++ * out the bits using the bit elements. ++ */ ++typedef union hwcfg1_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned ep_dir0 : 2; ++ unsigned ep_dir1 : 2; ++ unsigned ep_dir2 : 2; ++ unsigned ep_dir3 : 2; ++ unsigned ep_dir4 : 2; ++ unsigned ep_dir5 : 2; ++ unsigned ep_dir6 : 2; ++ unsigned ep_dir7 : 2; ++ unsigned ep_dir8 : 2; ++ unsigned ep_dir9 : 2; ++ unsigned ep_dir10 : 2; ++ unsigned ep_dir11 : 2; ++ unsigned ep_dir12 : 2; ++ unsigned ep_dir13 : 2; ++ unsigned ep_dir14 : 2; ++ unsigned ep_dir15 : 2; ++ } b; ++} hwcfg1_data_t; ++ ++/** ++ * This union represents the bit fields in the User HW Config2 ++ * Register. Read the register into the d32 element then read ++ * out the bits using the bit elements. ++ */ ++typedef union hwcfg2_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /* GHWCFG2 */ ++ unsigned op_mode : 3; ++#define DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG 0 ++#define DWC_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG 1 ++#define DWC_HWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE_OTG 2 ++#define DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE 3 ++#define DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE 4 ++#define DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST 5 ++#define DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST 6 ++ ++ unsigned architecture : 2; ++ unsigned point2point : 1; ++ unsigned hs_phy_type : 2; ++#define DWC_HWCFG2_HS_PHY_TYPE_NOT_SUPPORTED 0 ++#define DWC_HWCFG2_HS_PHY_TYPE_UTMI 1 ++#define DWC_HWCFG2_HS_PHY_TYPE_ULPI 2 ++#define DWC_HWCFG2_HS_PHY_TYPE_UTMI_ULPI 3 ++ ++ unsigned fs_phy_type : 2; ++ unsigned num_dev_ep : 4; ++ unsigned num_host_chan : 4; ++ unsigned perio_ep_supported : 1; ++ unsigned dynamic_fifo : 1; ++ unsigned multi_proc_int : 1; ++ unsigned reserved21 : 1; ++ unsigned nonperio_tx_q_depth : 2; ++ unsigned host_perio_tx_q_depth : 2; ++ unsigned dev_token_q_depth : 5; ++ unsigned reserved31 : 1; ++ } b; ++} hwcfg2_data_t; ++ ++/** ++ * This union represents the bit fields in the User HW Config3 ++ * Register. Read the register into the d32 element then read ++ * out the bits using the bit elements. ++ */ ++typedef union hwcfg3_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /* GHWCFG3 */ ++ unsigned xfer_size_cntr_width : 4; ++ unsigned packet_size_cntr_width : 3; ++ unsigned otg_func : 1; ++ unsigned i2c : 1; ++ unsigned vendor_ctrl_if : 1; ++ unsigned optional_features : 1; ++ unsigned synch_reset_type : 1; ++ unsigned ahb_phy_clock_synch : 1; ++ unsigned reserved15_13 : 3; ++ unsigned dfifo_depth : 16; ++ } b; ++} hwcfg3_data_t; ++ ++/** ++ * This union represents the bit fields in the User HW Config4 ++ * Register. Read the register into the d32 element then read ++ * out the bits using the bit elements. ++ */ ++typedef union hwcfg4_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned num_dev_perio_in_ep : 4; ++ unsigned power_optimiz : 1; ++ unsigned min_ahb_freq : 9; ++ unsigned utmi_phy_data_width : 2; ++ unsigned num_dev_mode_ctrl_ep : 4; ++ unsigned iddig_filt_en : 1; ++ unsigned vbus_valid_filt_en : 1; ++ unsigned a_valid_filt_en : 1; ++ unsigned b_valid_filt_en : 1; ++ unsigned session_end_filt_en : 1; ++ unsigned ded_fifo_en : 1; ++ unsigned num_in_eps : 4; ++ unsigned desc_dma : 1; ++ unsigned desc_dma_dyn : 1; ++ } b; ++} hwcfg4_data_t; ++ ++//////////////////////////////////////////// ++// Device Registers ++/** ++ * Device Global Registers. Offsets 800h-BFFh ++ * ++ * The following structures define the size and relative field offsets ++ * for the Device Mode Registers. ++ * ++ * These registers are visible only in Device mode and must not be ++ * accessed in Host mode, as the results are unknown. ++ */ ++typedef struct dwc_otg_dev_global_regs ++{ ++ /** Device Configuration Register. Offset 800h */ ++ volatile uint32_t dcfg; ++ /** Device Control Register. Offset: 804h */ ++ volatile uint32_t dctl; ++ /** Device Status Register (Read Only). Offset: 808h */ ++ volatile uint32_t dsts; ++ /** Reserved. Offset: 80Ch */ ++ uint32_t unused; ++ /** Device IN Endpoint Common Interrupt Mask ++ * Register. Offset: 810h */ ++ volatile uint32_t diepmsk; ++ /** Device OUT Endpoint Common Interrupt Mask ++ * Register. Offset: 814h */ ++ volatile uint32_t doepmsk; ++ /** Device All Endpoints Interrupt Register. Offset: 818h */ ++ volatile uint32_t daint; ++ /** Device All Endpoints Interrupt Mask Register. Offset: ++ * 81Ch */ ++ volatile uint32_t daintmsk; ++ /** Device IN Token Queue Read Register-1 (Read Only). ++ * Offset: 820h */ ++ volatile uint32_t dtknqr1; ++ /** Device IN Token Queue Read Register-2 (Read Only). ++ * Offset: 824h */ ++ volatile uint32_t dtknqr2; ++ /** Device VBUS discharge Register. Offset: 828h */ ++ volatile uint32_t dvbusdis; ++ /** Device VBUS Pulse Register. Offset: 82Ch */ ++ volatile uint32_t dvbuspulse; ++ /** Device IN Token Queue Read Register-3 (Read Only). / ++ * Device Thresholding control register (Read/Write) ++ * Offset: 830h */ ++ volatile uint32_t dtknqr3_dthrctl; ++ /** Device IN Token Queue Read Register-4 (Read Only). / ++ * Device IN EPs empty Inr. Mask Register (Read/Write) ++ * Offset: 834h */ ++ volatile uint32_t dtknqr4_fifoemptymsk; ++ /** Device Each Endpoint Interrupt Register (Read Only). / ++ * Offset: 838h */ ++ volatile uint32_t deachint; ++ /** Device Each Endpoint Interrupt mask Register (Read/Write). / ++ * Offset: 83Ch */ ++ volatile uint32_t deachintmsk; ++ /** Device Each In Endpoint Interrupt mask Register (Read/Write). / ++ * Offset: 840h */ ++ volatile uint32_t diepeachintmsk[MAX_EPS_CHANNELS]; ++ /** Device Each Out Endpoint Interrupt mask Register (Read/Write). / ++ * Offset: 880h */ ++ volatile uint32_t doepeachintmsk[MAX_EPS_CHANNELS]; ++} dwc_otg_device_global_regs_t; ++ ++/** ++ * This union represents the bit fields in the Device Configuration ++ * Register. Read the register into the d32 member then ++ * set/clear the bits using the bit elements. Write the ++ * d32 member to the dcfg register. ++ */ ++typedef union dcfg_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** Device Speed */ ++ unsigned devspd : 2; ++ /** Non Zero Length Status OUT Handshake */ ++ unsigned nzstsouthshk : 1; ++#define DWC_DCFG_SEND_STALL 1 ++ ++ unsigned reserved3 : 1; ++ /** Device Addresses */ ++ unsigned devaddr : 7; ++ /** Periodic Frame Interval */ ++ unsigned perfrint : 2; ++#define DWC_DCFG_FRAME_INTERVAL_80 0 ++#define DWC_DCFG_FRAME_INTERVAL_85 1 ++#define DWC_DCFG_FRAME_INTERVAL_90 2 ++#define DWC_DCFG_FRAME_INTERVAL_95 3 ++ ++ unsigned reserved13_17 : 5; ++ /** In Endpoint Mis-match count */ ++ unsigned epmscnt : 5; ++ /** Enable Descriptor DMA in Device mode */ ++ unsigned descdma : 1; ++ } b; ++} dcfg_data_t; ++ ++/** ++ * This union represents the bit fields in the Device Control ++ * Register. Read the register into the d32 member then ++ * set/clear the bits using the bit elements. ++ */ ++typedef union dctl_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** Remote Wakeup */ ++ unsigned rmtwkupsig : 1; ++ /** Soft Disconnect */ ++ unsigned sftdiscon : 1; ++ /** Global Non-Periodic IN NAK Status */ ++ unsigned gnpinnaksts : 1; ++ /** Global OUT NAK Status */ ++ unsigned goutnaksts : 1; ++ /** Test Control */ ++ unsigned tstctl : 3; ++ /** Set Global Non-Periodic IN NAK */ ++ unsigned sgnpinnak : 1; ++ /** Clear Global Non-Periodic IN NAK */ ++ unsigned cgnpinnak : 1; ++ /** Set Global OUT NAK */ ++ unsigned sgoutnak : 1; ++ /** Clear Global OUT NAK */ ++ unsigned cgoutnak : 1; ++ ++ /** Power-On Programming Done */ ++ unsigned pwronprgdone : 1; ++ /** Global Continue on BNA */ ++ unsigned gcontbna : 1; ++ /** Global Multi Count */ ++ unsigned gmc : 2; ++ /** Ignore Frame Number for ISOC EPs */ ++ unsigned ifrmnum : 1; ++ /** NAK on Babble */ ++ unsigned nakonbble : 1; ++ ++ unsigned reserved16_31 : 16; ++ } b; ++} dctl_data_t; ++ ++/** ++ * This union represents the bit fields in the Device Status ++ * Register. Read the register into the d32 member then ++ * set/clear the bits using the bit elements. ++ */ ++typedef union dsts_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** Suspend Status */ ++ unsigned suspsts : 1; ++ /** Enumerated Speed */ ++ unsigned enumspd : 2; ++#define DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ 0 ++#define DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ 1 ++#define DWC_DSTS_ENUMSPD_LS_PHY_6MHZ 2 ++#define DWC_DSTS_ENUMSPD_FS_PHY_48MHZ 3 ++ /** Erratic Error */ ++ unsigned errticerr : 1; ++ unsigned reserved4_7: 4; ++ /** Frame or Microframe Number of the received SOF */ ++ unsigned soffn : 14; ++ unsigned reserved22_31 : 10; ++ } b; ++} dsts_data_t; ++ ++ ++/** ++ * This union represents the bit fields in the Device IN EP Interrupt ++ * Register and the Device IN EP Common Mask Register. ++ * ++ * - Read the register into the d32 member then set/clear the ++ * bits using the bit elements. ++ */ ++typedef union diepint_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** Transfer complete mask */ ++ unsigned xfercompl : 1; ++ /** Endpoint disable mask */ ++ unsigned epdisabled : 1; ++ /** AHB Error mask */ ++ unsigned ahberr : 1; ++ /** TimeOUT Handshake mask (non-ISOC EPs) */ ++ unsigned timeout : 1; ++ /** IN Token received with TxF Empty mask */ ++ unsigned intktxfemp : 1; ++ /** IN Token Received with EP mismatch mask */ ++ unsigned intknepmis : 1; ++ /** IN Endpoint HAK Effective mask */ ++ unsigned inepnakeff : 1; ++ /** IN Endpoint HAK Effective mask */ ++ unsigned emptyintr : 1; ++ ++ unsigned txfifoundrn : 1; ++ ++ /** BNA Interrupt mask */ ++ unsigned bna : 1; ++ ++ unsigned reserved10_12 : 3; ++ /** BNA Interrupt mask */ ++ unsigned nak : 1; ++ ++ unsigned reserved14_31 : 18; ++ } b; ++} diepint_data_t; ++ ++/** ++ * This union represents the bit fields in the Device IN EP ++ * Common/Dedicated Interrupt Mask Register. ++ */ ++typedef union diepint_data diepmsk_data_t; ++ ++/** ++ * This union represents the bit fields in the Device OUT EP Interrupt ++ * Registerand Device OUT EP Common Interrupt Mask Register. ++ * ++ * - Read the register into the d32 member then set/clear the ++ * bits using the bit elements. ++ */ ++typedef union doepint_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** Transfer complete */ ++ unsigned xfercompl : 1; ++ /** Endpoint disable */ ++ unsigned epdisabled : 1; ++ /** AHB Error */ ++ unsigned ahberr : 1; ++ /** Setup Phase Done (contorl EPs) */ ++ unsigned setup : 1; ++ /** OUT Token Received when Endpoint Disabled */ ++ unsigned outtknepdis : 1; ++ ++ unsigned stsphsercvd : 1; ++ /** Back-to-Back SETUP Packets Received */ ++ unsigned back2backsetup : 1; ++ ++ unsigned reserved7 : 1; ++ /** OUT packet Error */ ++ unsigned outpkterr : 1; ++ /** BNA Interrupt */ ++ unsigned bna : 1; ++ ++ unsigned reserved10 : 1; ++ /** Packet Drop Status */ ++ unsigned pktdrpsts : 1; ++ /** Babble Interrupt */ ++ unsigned babble : 1; ++ /** NAK Interrupt */ ++ unsigned nak : 1; ++ /** NYET Interrupt */ ++ unsigned nyet : 1; ++ ++ unsigned reserved15_31 : 17; ++ } b; ++} doepint_data_t; ++ ++/** ++ * This union represents the bit fields in the Device OUT EP ++ * Common/Dedicated Interrupt Mask Register. ++ */ ++typedef union doepint_data doepmsk_data_t; ++ ++/** ++ * This union represents the bit fields in the Device All EP Interrupt ++ * and Mask Registers. ++ * - Read the register into the d32 member then set/clear the ++ * bits using the bit elements. ++ */ ++typedef union daint_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** IN Endpoint bits */ ++ unsigned in : 16; ++ /** OUT Endpoint bits */ ++ unsigned out : 16; ++ } ep; ++ struct ++ { ++ /** IN Endpoint bits */ ++ unsigned inep0 : 1; ++ unsigned inep1 : 1; ++ unsigned inep2 : 1; ++ unsigned inep3 : 1; ++ unsigned inep4 : 1; ++ unsigned inep5 : 1; ++ unsigned inep6 : 1; ++ unsigned inep7 : 1; ++ unsigned inep8 : 1; ++ unsigned inep9 : 1; ++ unsigned inep10 : 1; ++ unsigned inep11 : 1; ++ unsigned inep12 : 1; ++ unsigned inep13 : 1; ++ unsigned inep14 : 1; ++ unsigned inep15 : 1; ++ /** OUT Endpoint bits */ ++ unsigned outep0 : 1; ++ unsigned outep1 : 1; ++ unsigned outep2 : 1; ++ unsigned outep3 : 1; ++ unsigned outep4 : 1; ++ unsigned outep5 : 1; ++ unsigned outep6 : 1; ++ unsigned outep7 : 1; ++ unsigned outep8 : 1; ++ unsigned outep9 : 1; ++ unsigned outep10 : 1; ++ unsigned outep11 : 1; ++ unsigned outep12 : 1; ++ unsigned outep13 : 1; ++ unsigned outep14 : 1; ++ unsigned outep15 : 1; ++ } b; ++} daint_data_t; ++ ++/** ++ * This union represents the bit fields in the Device IN Token Queue ++ * Read Registers. ++ * - Read the register into the d32 member. ++ * - READ-ONLY Register ++ */ ++typedef union dtknq1_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** In Token Queue Write Pointer */ ++ unsigned intknwptr : 5; ++ /** Reserved */ ++ unsigned reserved05_06 : 2; ++ /** write pointer has wrapped. */ ++ unsigned wrap_bit : 1; ++ /** EP Numbers of IN Tokens 0 ... 4 */ ++ unsigned epnums0_5 : 24; ++ }b; ++} dtknq1_data_t; ++ ++/** ++ * This union represents Threshold control Register ++ * - Read and write the register into the d32 member. ++ * - READ-WRITABLE Register ++ */ ++typedef union dthrctl_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** non ISO Tx Thr. Enable */ ++ unsigned non_iso_thr_en : 1; ++ /** ISO Tx Thr. Enable */ ++ unsigned iso_thr_en : 1; ++ /** Tx Thr. Length */ ++ unsigned tx_thr_len : 9; ++ /** Reserved */ ++ unsigned reserved11_15 : 5; ++ /** Rx Thr. Enable */ ++ unsigned rx_thr_en : 1; ++ /** Rx Thr. Length */ ++ unsigned rx_thr_len : 9; ++ /** Reserved */ ++ unsigned reserved26_31 : 6; ++ }b; ++} dthrctl_data_t; ++ ++ ++/** ++ * Device Logical IN Endpoint-Specific Registers. Offsets ++ * 900h-AFCh ++ * ++ * There will be one set of endpoint registers per logical endpoint ++ * implemented. ++ * ++ * These registers are visible only in Device mode and must not be ++ * accessed in Host mode, as the results are unknown. ++ */ ++typedef struct dwc_otg_dev_in_ep_regs ++{ ++ /** Device IN Endpoint Control Register. Offset:900h + ++ * (ep_num * 20h) + 00h */ ++ volatile uint32_t diepctl; ++ /** Reserved. Offset:900h + (ep_num * 20h) + 04h */ ++ uint32_t reserved04; ++ /** Device IN Endpoint Interrupt Register. Offset:900h + ++ * (ep_num * 20h) + 08h */ ++ volatile uint32_t diepint; ++ /** Reserved. Offset:900h + (ep_num * 20h) + 0Ch */ ++ uint32_t reserved0C; ++ /** Device IN Endpoint Transfer Size ++ * Register. Offset:900h + (ep_num * 20h) + 10h */ ++ volatile uint32_t dieptsiz; ++ /** Device IN Endpoint DMA Address Register. Offset:900h + ++ * (ep_num * 20h) + 14h */ ++ volatile uint32_t diepdma; ++ /** Device IN Endpoint Transmit FIFO Status Register. Offset:900h + ++ * (ep_num * 20h) + 18h */ ++ volatile uint32_t dtxfsts; ++ /** Device IN Endpoint DMA Buffer Register. Offset:900h + ++ * (ep_num * 20h) + 1Ch */ ++ volatile uint32_t diepdmab; ++} dwc_otg_dev_in_ep_regs_t; ++ ++/** ++ * Device Logical OUT Endpoint-Specific Registers. Offsets: ++ * B00h-CFCh ++ * ++ * There will be one set of endpoint registers per logical endpoint ++ * implemented. ++ * ++ * These registers are visible only in Device mode and must not be ++ * accessed in Host mode, as the results are unknown. ++ */ ++typedef struct dwc_otg_dev_out_ep_regs ++{ ++ /** Device OUT Endpoint Control Register. Offset:B00h + ++ * (ep_num * 20h) + 00h */ ++ volatile uint32_t doepctl; ++ /** Device OUT Endpoint Frame number Register. Offset: ++ * B00h + (ep_num * 20h) + 04h */ ++ volatile uint32_t doepfn; ++ /** Device OUT Endpoint Interrupt Register. Offset:B00h + ++ * (ep_num * 20h) + 08h */ ++ volatile uint32_t doepint; ++ /** Reserved. Offset:B00h + (ep_num * 20h) + 0Ch */ ++ uint32_t reserved0C; ++ /** Device OUT Endpoint Transfer Size Register. Offset: ++ * B00h + (ep_num * 20h) + 10h */ ++ volatile uint32_t doeptsiz; ++ /** Device OUT Endpoint DMA Address Register. Offset:B00h ++ * + (ep_num * 20h) + 14h */ ++ volatile uint32_t doepdma; ++ /** Reserved. Offset:B00h + * (ep_num * 20h) + 1Ch */ ++ uint32_t unused; ++ /** Device OUT Endpoint DMA Buffer Register. Offset:B00h ++ * + (ep_num * 20h) + 1Ch */ ++ uint32_t doepdmab; ++} dwc_otg_dev_out_ep_regs_t; ++ ++/** ++ * This union represents the bit fields in the Device EP Control ++ * Register. Read the register into the d32 member then ++ * set/clear the bits using the bit elements. ++ */ ++typedef union depctl_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** Maximum Packet Size ++ * IN/OUT EPn ++ * IN/OUT EP0 - 2 bits ++ * 2'b00: 64 Bytes ++ * 2'b01: 32 ++ * 2'b10: 16 ++ * 2'b11: 8 */ ++ unsigned mps : 11; ++#define DWC_DEP0CTL_MPS_64 0 ++#define DWC_DEP0CTL_MPS_32 1 ++#define DWC_DEP0CTL_MPS_16 2 ++#define DWC_DEP0CTL_MPS_8 3 ++ ++ /** Next Endpoint ++ * IN EPn/IN EP0 ++ * OUT EPn/OUT EP0 - reserved */ ++ unsigned nextep : 4; ++ ++ /** USB Active Endpoint */ ++ unsigned usbactep : 1; ++ ++ /** Endpoint DPID (INTR/Bulk IN and OUT endpoints) ++ * This field contains the PID of the packet going to ++ * be received or transmitted on this endpoint. The ++ * application should program the PID of the first ++ * packet going to be received or transmitted on this ++ * endpoint , after the endpoint is ++ * activated. Application use the SetD1PID and ++ * SetD0PID fields of this register to program either ++ * D0 or D1 PID. ++ * ++ * The encoding for this field is ++ * - 0: D0 ++ * - 1: D1 ++ */ ++ unsigned dpid : 1; ++ ++ /** NAK Status */ ++ unsigned naksts : 1; ++ ++ /** Endpoint Type ++ * 2'b00: Control ++ * 2'b01: Isochronous ++ * 2'b10: Bulk ++ * 2'b11: Interrupt */ ++ unsigned eptype : 2; ++ ++ /** Snoop Mode ++ * OUT EPn/OUT EP0 ++ * IN EPn/IN EP0 - reserved */ ++ unsigned snp : 1; ++ ++ /** Stall Handshake */ ++ unsigned stall : 1; ++ ++ /** Tx Fifo Number ++ * IN EPn/IN EP0 ++ * OUT EPn/OUT EP0 - reserved */ ++ unsigned txfnum : 4; ++ ++ /** Clear NAK */ ++ unsigned cnak : 1; ++ /** Set NAK */ ++ unsigned snak : 1; ++ /** Set DATA0 PID (INTR/Bulk IN and OUT endpoints) ++ * Writing to this field sets the Endpoint DPID (DPID) ++ * field in this register to DATA0. Set Even ++ * (micro)frame (SetEvenFr) (ISO IN and OUT Endpoints) ++ * Writing to this field sets the Even/Odd ++ * (micro)frame (EO_FrNum) field to even (micro) ++ * frame. ++ */ ++ unsigned setd0pid : 1; ++ /** Set DATA1 PID (INTR/Bulk IN and OUT endpoints) ++ * Writing to this field sets the Endpoint DPID (DPID) ++ * field in this register to DATA1 Set Odd ++ * (micro)frame (SetOddFr) (ISO IN and OUT Endpoints) ++ * Writing to this field sets the Even/Odd ++ * (micro)frame (EO_FrNum) field to odd (micro) frame. ++ */ ++ unsigned setd1pid : 1; ++ ++ /** Endpoint Disable */ ++ unsigned epdis : 1; ++ /** Endpoint Enable */ ++ unsigned epena : 1; ++ } b; ++} depctl_data_t; ++ ++/** ++ * This union represents the bit fields in the Device EP Transfer ++ * Size Register. Read the register into the d32 member then ++ * set/clear the bits using the bit elements. ++ */ ++typedef union deptsiz_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct { ++ /** Transfer size */ ++ unsigned xfersize : 19; ++ /** Packet Count */ ++ unsigned pktcnt : 10; ++ /** Multi Count - Periodic IN endpoints */ ++ unsigned mc : 2; ++ unsigned reserved : 1; ++ } b; ++} deptsiz_data_t; ++ ++/** ++ * This union represents the bit fields in the Device EP 0 Transfer ++ * Size Register. Read the register into the d32 member then ++ * set/clear the bits using the bit elements. ++ */ ++typedef union deptsiz0_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct { ++ /** Transfer size */ ++ unsigned xfersize : 7; ++ /** Reserved */ ++ unsigned reserved7_18 : 12; ++ /** Packet Count */ ++ unsigned pktcnt : 1; ++ /** Reserved */ ++ unsigned reserved20_28 : 9; ++ /**Setup Packet Count (DOEPTSIZ0 Only) */ ++ unsigned supcnt : 2; ++ unsigned reserved31; ++ } b; ++} deptsiz0_data_t; ++ ++ ++///////////////////////////////////////////////// ++// DMA Descriptor Specific Structures ++// ++ ++/** Buffer status definitions */ ++ ++#define BS_HOST_READY 0x0 ++#define BS_DMA_BUSY 0x1 ++#define BS_DMA_DONE 0x2 ++#define BS_HOST_BUSY 0x3 ++ ++/** Receive/Transmit status definitions */ ++ ++#define RTS_SUCCESS 0x0 ++#define RTS_BUFFLUSH 0x1 ++#define RTS_RESERVED 0x2 ++#define RTS_BUFERR 0x3 ++ ++ ++/** ++ * This union represents the bit fields in the DMA Descriptor ++ * status quadlet. Read the quadlet into the d32 member then ++ * set/clear the bits using the bit, b_iso_out and ++ * b_iso_in elements. ++ */ ++typedef union desc_sts_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** quadlet bits */ ++ struct { ++ /** Received number of bytes */ ++ unsigned bytes : 16; ++ ++ unsigned reserved16_22 : 7; ++ /** Multiple Transfer - only for OUT EPs */ ++ unsigned mtrf : 1; ++ /** Setup Packet received - only for OUT EPs */ ++ unsigned sr : 1; ++ /** Interrupt On Complete */ ++ unsigned ioc : 1; ++ /** Short Packet */ ++ unsigned sp : 1; ++ /** Last */ ++ unsigned l : 1; ++ /** Receive Status */ ++ unsigned sts : 2; ++ /** Buffer Status */ ++ unsigned bs : 2; ++ } b; ++ ++#ifdef DWC_EN_ISOC ++ /** iso out quadlet bits */ ++ struct { ++ /** Received number of bytes */ ++ unsigned rxbytes : 11; ++ ++ unsigned reserved11 : 1; ++ /** Frame Number */ ++ unsigned framenum : 11; ++ /** Received ISO Data PID */ ++ unsigned pid : 2; ++ /** Interrupt On Complete */ ++ unsigned ioc : 1; ++ /** Short Packet */ ++ unsigned sp : 1; ++ /** Last */ ++ unsigned l : 1; ++ /** Receive Status */ ++ unsigned rxsts : 2; ++ /** Buffer Status */ ++ unsigned bs : 2; ++ } b_iso_out; ++ ++ /** iso in quadlet bits */ ++ struct { ++ /** Transmited number of bytes */ ++ unsigned txbytes : 12; ++ /** Frame Number */ ++ unsigned framenum : 11; ++ /** Transmited ISO Data PID */ ++ unsigned pid : 2; ++ /** Interrupt On Complete */ ++ unsigned ioc : 1; ++ /** Short Packet */ ++ unsigned sp : 1; ++ /** Last */ ++ unsigned l : 1; ++ /** Transmit Status */ ++ unsigned txsts : 2; ++ /** Buffer Status */ ++ unsigned bs : 2; ++ } b_iso_in; ++#endif //DWC_EN_ISOC ++} desc_sts_data_t; ++ ++/** ++ * DMA Descriptor structure ++ * ++ * DMA Descriptor structure contains two quadlets: ++ * Status quadlet and Data buffer pointer. ++ */ ++typedef struct dwc_otg_dma_desc ++{ ++ /** DMA Descriptor status quadlet */ ++ desc_sts_data_t status; ++ /** DMA Descriptor data buffer pointer */ ++ dma_addr_t buf; ++} dwc_otg_dma_desc_t; ++ ++/** ++ * The dwc_otg_dev_if structure contains information needed to manage ++ * the DWC_otg controller acting in device mode. It represents the ++ * programming view of the device-specific aspects of the controller. ++ */ ++typedef struct dwc_otg_dev_if ++{ ++ /** Pointer to device Global registers. ++ * Device Global Registers starting at offset 800h ++ */ ++ dwc_otg_device_global_regs_t *dev_global_regs; ++#define DWC_DEV_GLOBAL_REG_OFFSET 0x800 ++ ++ /** ++ * Device Logical IN Endpoint-Specific Registers 900h-AFCh ++ */ ++ dwc_otg_dev_in_ep_regs_t *in_ep_regs[MAX_EPS_CHANNELS]; ++#define DWC_DEV_IN_EP_REG_OFFSET 0x900 ++#define DWC_EP_REG_OFFSET 0x20 ++ ++ /** Device Logical OUT Endpoint-Specific Registers B00h-CFCh */ ++ dwc_otg_dev_out_ep_regs_t *out_ep_regs[MAX_EPS_CHANNELS]; ++#define DWC_DEV_OUT_EP_REG_OFFSET 0xB00 ++ ++ /* Device configuration information*/ ++ uint8_t speed; /**< Device Speed 0: Unknown, 1: LS, 2:FS, 3: HS */ ++ uint8_t num_in_eps; /**< Number # of Tx EP range: 0-15 exept ep0 */ ++ uint8_t num_out_eps; /**< Number # of Rx EP range: 0-15 exept ep 0*/ ++ ++ /** Size of periodic FIFOs (Bytes) */ ++ uint16_t perio_tx_fifo_size[MAX_PERIO_FIFOS]; ++ ++ /** Size of Tx FIFOs (Bytes) */ ++ uint16_t tx_fifo_size[MAX_TX_FIFOS]; ++ ++ /** Thresholding enable flags and length varaiables **/ ++ uint16_t rx_thr_en; ++ uint16_t iso_tx_thr_en; ++ uint16_t non_iso_tx_thr_en; ++ ++ uint16_t rx_thr_length; ++ uint16_t tx_thr_length; ++ ++ /** ++ * Pointers to the DMA Descriptors for EP0 Control ++ * transfers (virtual and physical) ++ */ ++ ++ /** 2 descriptors for SETUP packets */ ++ uint32_t dma_setup_desc_addr[2]; ++ dwc_otg_dma_desc_t* setup_desc_addr[2]; ++ ++ /** Pointer to Descriptor with latest SETUP packet */ ++ dwc_otg_dma_desc_t* psetup; ++ ++ /** Index of current SETUP handler descriptor */ ++ uint32_t setup_desc_index; ++ ++ /** Descriptor for Data In or Status In phases */ ++ uint32_t dma_in_desc_addr; ++ dwc_otg_dma_desc_t* in_desc_addr;; ++ ++ /** Descriptor for Data Out or Status Out phases */ ++ uint32_t dma_out_desc_addr; ++ dwc_otg_dma_desc_t* out_desc_addr; ++ ++} dwc_otg_dev_if_t; ++ ++ ++ ++ ++///////////////////////////////////////////////// ++// Host Mode Register Structures ++// ++/** ++ * The Host Global Registers structure defines the size and relative ++ * field offsets for the Host Mode Global Registers. Host Global ++ * Registers offsets 400h-7FFh. ++*/ ++typedef struct dwc_otg_host_global_regs ++{ ++ /** Host Configuration Register. Offset: 400h */ ++ volatile uint32_t hcfg; ++ /** Host Frame Interval Register. Offset: 404h */ ++ volatile uint32_t hfir; ++ /** Host Frame Number / Frame Remaining Register. Offset: 408h */ ++ volatile uint32_t hfnum; ++ /** Reserved. Offset: 40Ch */ ++ uint32_t reserved40C; ++ /** Host Periodic Transmit FIFO/ Queue Status Register. Offset: 410h */ ++ volatile uint32_t hptxsts; ++ /** Host All Channels Interrupt Register. Offset: 414h */ ++ volatile uint32_t haint; ++ /** Host All Channels Interrupt Mask Register. Offset: 418h */ ++ volatile uint32_t haintmsk; ++} dwc_otg_host_global_regs_t; ++ ++/** ++ * This union represents the bit fields in the Host Configuration Register. ++ * Read the register into the d32 member then set/clear the bits using ++ * the bit elements. Write the d32 member to the hcfg register. ++ */ ++typedef union hcfg_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct ++ { ++ /** FS/LS Phy Clock Select */ ++ unsigned fslspclksel : 2; ++#define DWC_HCFG_30_60_MHZ 0 ++#define DWC_HCFG_48_MHZ 1 ++#define DWC_HCFG_6_MHZ 2 ++ ++ /** FS/LS Only Support */ ++ unsigned fslssupp : 1; ++ } b; ++} hcfg_data_t; ++ ++/** ++ * This union represents the bit fields in the Host Frame Remaing/Number ++ * Register. ++ */ ++typedef union hfir_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct ++ { ++ unsigned frint : 16; ++ unsigned reserved : 16; ++ } b; ++} hfir_data_t; ++ ++/** ++ * This union represents the bit fields in the Host Frame Remaing/Number ++ * Register. ++ */ ++typedef union hfnum_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct ++ { ++ unsigned frnum : 16; ++#define DWC_HFNUM_MAX_FRNUM 0x3FFF ++ unsigned frrem : 16; ++ } b; ++} hfnum_data_t; ++ ++typedef union hptxsts_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct ++ { ++ unsigned ptxfspcavail : 16; ++ unsigned ptxqspcavail : 8; ++ /** Top of the Periodic Transmit Request Queue ++ * - bit 24 - Terminate (last entry for the selected channel) ++ * - bits 26:25 - Token Type ++ * - 2'b00 - Zero length ++ * - 2'b01 - Ping ++ * - 2'b10 - Disable ++ * - bits 30:27 - Channel Number ++ * - bit 31 - Odd/even microframe ++ */ ++ unsigned ptxqtop_terminate : 1; ++ unsigned ptxqtop_token : 2; ++ unsigned ptxqtop_chnum : 4; ++ unsigned ptxqtop_odd : 1; ++ } b; ++} hptxsts_data_t; ++ ++/** ++ * This union represents the bit fields in the Host Port Control and Status ++ * Register. Read the register into the d32 member then set/clear the ++ * bits using the bit elements. Write the d32 member to the ++ * hprt0 register. ++ */ ++typedef union hprt0_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned prtconnsts : 1; ++ unsigned prtconndet : 1; ++ unsigned prtena : 1; ++ unsigned prtenchng : 1; ++ unsigned prtovrcurract : 1; ++ unsigned prtovrcurrchng : 1; ++ unsigned prtres : 1; ++ unsigned prtsusp : 1; ++ unsigned prtrst : 1; ++ unsigned reserved9 : 1; ++ unsigned prtlnsts : 2; ++ unsigned prtpwr : 1; ++ unsigned prttstctl : 4; ++ unsigned prtspd : 2; ++#define DWC_HPRT0_PRTSPD_HIGH_SPEED 0 ++#define DWC_HPRT0_PRTSPD_FULL_SPEED 1 ++#define DWC_HPRT0_PRTSPD_LOW_SPEED 2 ++ unsigned reserved19_31 : 13; ++ } b; ++} hprt0_data_t; ++ ++/** ++ * This union represents the bit fields in the Host All Interrupt ++ * Register. ++ */ ++typedef union haint_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned ch0 : 1; ++ unsigned ch1 : 1; ++ unsigned ch2 : 1; ++ unsigned ch3 : 1; ++ unsigned ch4 : 1; ++ unsigned ch5 : 1; ++ unsigned ch6 : 1; ++ unsigned ch7 : 1; ++ unsigned ch8 : 1; ++ unsigned ch9 : 1; ++ unsigned ch10 : 1; ++ unsigned ch11 : 1; ++ unsigned ch12 : 1; ++ unsigned ch13 : 1; ++ unsigned ch14 : 1; ++ unsigned ch15 : 1; ++ unsigned reserved : 16; ++ } b; ++ ++ struct ++ { ++ unsigned chint : 16; ++ unsigned reserved : 16; ++ } b2; ++} haint_data_t; ++ ++/** ++ * This union represents the bit fields in the Host All Interrupt ++ * Register. ++ */ ++typedef union haintmsk_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned ch0 : 1; ++ unsigned ch1 : 1; ++ unsigned ch2 : 1; ++ unsigned ch3 : 1; ++ unsigned ch4 : 1; ++ unsigned ch5 : 1; ++ unsigned ch6 : 1; ++ unsigned ch7 : 1; ++ unsigned ch8 : 1; ++ unsigned ch9 : 1; ++ unsigned ch10 : 1; ++ unsigned ch11 : 1; ++ unsigned ch12 : 1; ++ unsigned ch13 : 1; ++ unsigned ch14 : 1; ++ unsigned ch15 : 1; ++ unsigned reserved : 16; ++ } b; ++ ++ struct ++ { ++ unsigned chint : 16; ++ unsigned reserved : 16; ++ } b2; ++} haintmsk_data_t; ++ ++/** ++ * Host Channel Specific Registers. 500h-5FCh ++ */ ++typedef struct dwc_otg_hc_regs ++{ ++ /** Host Channel 0 Characteristic Register. Offset: 500h + (chan_num * 20h) + 00h */ ++ volatile uint32_t hcchar; ++ /** Host Channel 0 Split Control Register. Offset: 500h + (chan_num * 20h) + 04h */ ++ volatile uint32_t hcsplt; ++ /** Host Channel 0 Interrupt Register. Offset: 500h + (chan_num * 20h) + 08h */ ++ volatile uint32_t hcint; ++ /** Host Channel 0 Interrupt Mask Register. Offset: 500h + (chan_num * 20h) + 0Ch */ ++ volatile uint32_t hcintmsk; ++ /** Host Channel 0 Transfer Size Register. Offset: 500h + (chan_num * 20h) + 10h */ ++ volatile uint32_t hctsiz; ++ /** Host Channel 0 DMA Address Register. Offset: 500h + (chan_num * 20h) + 14h */ ++ volatile uint32_t hcdma; ++ /** Reserved. Offset: 500h + (chan_num * 20h) + 18h - 500h + (chan_num * 20h) + 1Ch */ ++ uint32_t reserved[2]; ++} dwc_otg_hc_regs_t; ++ ++/** ++ * This union represents the bit fields in the Host Channel Characteristics ++ * Register. Read the register into the d32 member then set/clear the ++ * bits using the bit elements. Write the d32 member to the ++ * hcchar register. ++ */ ++typedef union hcchar_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct ++ { ++ /** Maximum packet size in bytes */ ++ unsigned mps : 11; ++ ++ /** Endpoint number */ ++ unsigned epnum : 4; ++ ++ /** 0: OUT, 1: IN */ ++ unsigned epdir : 1; ++ ++ unsigned reserved : 1; ++ ++ /** 0: Full/high speed device, 1: Low speed device */ ++ unsigned lspddev : 1; ++ ++ /** 0: Control, 1: Isoc, 2: Bulk, 3: Intr */ ++ unsigned eptype : 2; ++ ++ /** Packets per frame for periodic transfers. 0 is reserved. */ ++ unsigned multicnt : 2; ++ ++ /** Device address */ ++ unsigned devaddr : 7; ++ ++ /** ++ * Frame to transmit periodic transaction. ++ * 0: even, 1: odd ++ */ ++ unsigned oddfrm : 1; ++ ++ /** Channel disable */ ++ unsigned chdis : 1; ++ ++ /** Channel enable */ ++ unsigned chen : 1; ++ } b; ++} hcchar_data_t; ++ ++typedef union hcsplt_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct ++ { ++ /** Port Address */ ++ unsigned prtaddr : 7; ++ ++ /** Hub Address */ ++ unsigned hubaddr : 7; ++ ++ /** Transaction Position */ ++ unsigned xactpos : 2; ++#define DWC_HCSPLIT_XACTPOS_MID 0 ++#define DWC_HCSPLIT_XACTPOS_END 1 ++#define DWC_HCSPLIT_XACTPOS_BEGIN 2 ++#define DWC_HCSPLIT_XACTPOS_ALL 3 ++ ++ /** Do Complete Split */ ++ unsigned compsplt : 1; ++ ++ /** Reserved */ ++ unsigned reserved : 14; ++ ++ /** Split Enble */ ++ unsigned spltena : 1; ++ } b; ++} hcsplt_data_t; ++ ++ ++/** ++ * This union represents the bit fields in the Host All Interrupt ++ * Register. ++ */ ++typedef union hcint_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** Transfer Complete */ ++ unsigned xfercomp : 1; ++ /** Channel Halted */ ++ unsigned chhltd : 1; ++ /** AHB Error */ ++ unsigned ahberr : 1; ++ /** STALL Response Received */ ++ unsigned stall : 1; ++ /** NAK Response Received */ ++ unsigned nak : 1; ++ /** ACK Response Received */ ++ unsigned ack : 1; ++ /** NYET Response Received */ ++ unsigned nyet : 1; ++ /** Transaction Err */ ++ unsigned xacterr : 1; ++ /** Babble Error */ ++ unsigned bblerr : 1; ++ /** Frame Overrun */ ++ unsigned frmovrun : 1; ++ /** Data Toggle Error */ ++ unsigned datatglerr : 1; ++ /** Reserved */ ++ unsigned reserved : 21; ++ } b; ++} hcint_data_t; ++ ++/** ++ * This union represents the bit fields in the Host Channel Transfer Size ++ * Register. Read the register into the d32 member then set/clear the ++ * bits using the bit elements. Write the d32 member to the ++ * hcchar register. ++ */ ++typedef union hctsiz_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct ++ { ++ /** Total transfer size in bytes */ ++ unsigned xfersize : 19; ++ ++ /** Data packets to transfer */ ++ unsigned pktcnt : 10; ++ ++ /** ++ * Packet ID for next data packet ++ * 0: DATA0 ++ * 1: DATA2 ++ * 2: DATA1 ++ * 3: MDATA (non-Control), SETUP (Control) ++ */ ++ unsigned pid : 2; ++#define DWC_HCTSIZ_DATA0 0 ++#define DWC_HCTSIZ_DATA1 2 ++#define DWC_HCTSIZ_DATA2 1 ++#define DWC_HCTSIZ_MDATA 3 ++#define DWC_HCTSIZ_SETUP 3 ++ ++ /** Do PING protocol when 1 */ ++ unsigned dopng : 1; ++ } b; ++} hctsiz_data_t; ++ ++/** ++ * This union represents the bit fields in the Host Channel Interrupt Mask ++ * Register. Read the register into the d32 member then set/clear the ++ * bits using the bit elements. Write the d32 member to the ++ * hcintmsk register. ++ */ ++typedef union hcintmsk_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct ++ { ++ unsigned xfercompl : 1; ++ unsigned chhltd : 1; ++ unsigned ahberr : 1; ++ unsigned stall : 1; ++ unsigned nak : 1; ++ unsigned ack : 1; ++ unsigned nyet : 1; ++ unsigned xacterr : 1; ++ unsigned bblerr : 1; ++ unsigned frmovrun : 1; ++ unsigned datatglerr : 1; ++ unsigned reserved : 21; ++ } b; ++} hcintmsk_data_t; ++ ++/** OTG Host Interface Structure. ++ * ++ * The OTG Host Interface Structure structure contains information ++ * needed to manage the DWC_otg controller acting in host mode. It ++ * represents the programming view of the host-specific aspects of the ++ * controller. ++ */ ++typedef struct dwc_otg_host_if ++{ ++ /** Host Global Registers starting at offset 400h.*/ ++ dwc_otg_host_global_regs_t *host_global_regs; ++#define DWC_OTG_HOST_GLOBAL_REG_OFFSET 0x400 ++ ++ /** Host Port 0 Control and Status Register */ ++ volatile uint32_t *hprt0; ++#define DWC_OTG_HOST_PORT_REGS_OFFSET 0x440 ++ ++ ++ /** Host Channel Specific Registers at offsets 500h-5FCh. */ ++ dwc_otg_hc_regs_t *hc_regs[MAX_EPS_CHANNELS]; ++#define DWC_OTG_HOST_CHAN_REGS_OFFSET 0x500 ++#define DWC_OTG_CHAN_REGS_OFFSET 0x20 ++ ++ ++ /* Host configuration information */ ++ /** Number of Host Channels (range: 1-16) */ ++ uint8_t num_host_channels; ++ /** Periodic EPs supported (0: no, 1: yes) */ ++ uint8_t perio_eps_supported; ++ /** Periodic Tx FIFO Size (Only 1 host periodic Tx FIFO) */ ++ uint16_t perio_tx_fifo_size; ++ ++} dwc_otg_host_if_t; ++ ++ ++/** ++ * This union represents the bit fields in the Power and Clock Gating Control ++ * Register. Read the register into the d32 member then set/clear the ++ * bits using the bit elements. ++ */ ++typedef union pcgcctl_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct ++ { ++ /** Stop Pclk */ ++ unsigned stoppclk : 1; ++ /** Gate Hclk */ ++ unsigned gatehclk : 1; ++ /** Power Clamp */ ++ unsigned pwrclmp : 1; ++ /** Reset Power Down Modules */ ++ unsigned rstpdwnmodule : 1; ++ /** PHY Suspended */ ++ unsigned physuspended : 1; ++ ++ unsigned reserved : 27; ++ } b; ++} pcgcctl_data_t; ++ ++ ++#endif +--- /dev/null ++++ b/drivers/usb/host/otg/Makefile +@@ -0,0 +1,52 @@ ++# ++# Makefile for DWC_otg Highspeed USB controller driver ++# ++ ++ifneq ($(KERNELRELEASE),) ++EXTRA_CFLAGS += -DDEBUG ++ ++# Use one of the following flags to compile the software in host-only or ++# device-only mode. ++#CPPFLAGS += -DDWC_HOST_ONLY ++#CPPFLAGS += -DDWC_DEVICE_ONLY ++ ++EXTRA_CFLAGS += -Dlinux -DDWC_HS_ELECT_TST ++#EXTRA_CFLAGS += -DDWC_EN_ISOC ++ ++ifneq ($(CONFIG_USB_CNS3XXX_OTG_HCD_ONLY),) ++EXTRA_CFLAGS += -DDWC_HOST_ONLY ++endif ++ ++ifneq ($(CONFIG_USB_CNS3XXX_OTG_PCD_ONLY),) ++EXTRA_CFLAGS += -DDWC_DEVICE_ONLY ++endif ++ ++obj-$(CONFIG_USB_CNS3XXX_OTG) := dwc_otg.o ++#obj-$(CONFIG_USB_GADGET_CNS3XXX_OTG) := dwc_otg.o ++ ++dwc_otg-objs := dwc_otg_driver.o dwc_otg_attr.o ++dwc_otg-objs += dwc_otg_cil.o dwc_otg_cil_intr.o ++dwc_otg-objs += dwc_otg_pcd.o dwc_otg_pcd_intr.o ++dwc_otg-objs += dwc_otg_hcd.o dwc_otg_hcd_intr.o dwc_otg_hcd_queue.o ++ ++else ++ ++PWD := $(shell pwd) ++ ++# Command paths ++CTAGS := $(CTAGS) ++DOXYGEN := $(DOXYGEN) ++ ++default: ++ $(MAKE) -C$(KDIR) M=$(PWD) ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) modules ++ ++docs: $(wildcard *.[hc]) doc/doxygen.cfg ++ $(DOXYGEN) doc/doxygen.cfg ++ ++tags: $(wildcard *.[hc]) ++ $(CTAGS) -e $(wildcard *.[hc]) $(wildcard linux/*.[hc]) $(wildcard $(KDIR)/include/linux/usb*.h) ++ ++endif ++ ++clean: ++ rm -rf *.o *.ko .*cmd *.mod.c .tmp_versions +--- a/drivers/usb/Kconfig ++++ b/drivers/usb/Kconfig +@@ -39,6 +39,7 @@ config USB_ARCH_HAS_OHCI + default y if ARCH_AT91 + default y if ARCH_PNX4008 && I2C + default y if MFD_TC6393XB ++ default y if ARCH_CNS3XXX + # PPC: + default y if STB03xxx + default y if PPC_MPC52xx +@@ -58,6 +59,7 @@ config USB_ARCH_HAS_EHCI + default y if PPC_83xx + default y if SOC_AU1200 + default y if ARCH_IXP4XX ++ default y if ARCH_CNS3XXX + default PCI + + # ARM SA1111 chips have a non-PCI based "OHCI-compatible" USB host interface. +--- a/drivers/usb/Makefile ++++ b/drivers/usb/Makefile +@@ -20,6 +20,8 @@ obj-$(CONFIG_USB_U132_HCD) += host/ + obj-$(CONFIG_USB_R8A66597_HCD) += host/ + obj-$(CONFIG_USB_HWA_HCD) += host/ + obj-$(CONFIG_USB_ISP1760_HCD) += host/ ++obj-$(CONFIG_USB_CNS3XXX_OTG) += host/ ++obj-$(CONFIG_USB_GADGET_CNS3XXX_OTG) += host/ + + obj-$(CONFIG_USB_C67X00_HCD) += c67x00/ + +--- a/drivers/usb/storage/protocol.c ++++ b/drivers/usb/storage/protocol.c +@@ -182,9 +182,10 @@ unsigned int usb_stor_access_xfer_buf(un + PAGE_SIZE - poff); + unsigned char *ptr = kmap(page); + +- if (dir == TO_XFER_BUF) ++ if (dir == TO_XFER_BUF) { + memcpy(ptr + poff, buffer + cnt, plen); +- else ++ flush_dcache_page(page); ++ } else + memcpy(buffer + cnt, ptr + poff, plen); + kunmap(page); + +--- a/include/linux/usb.h ++++ b/include/linux/usb.h +@@ -1201,8 +1201,14 @@ struct urb { + unsigned int pipe; /* (in) pipe information */ + int status; /* (return) non-ISO status */ + unsigned int transfer_flags; /* (in) URB_SHORT_NOT_OK | ...*/ ++ + void *transfer_buffer; /* (in) associated data buffer */ + dma_addr_t transfer_dma; /* (in) dma addr for transfer_buffer */ ++ ++ void * aligned_transfer_buffer; /* (in) associated data buffer */ ++ dma_addr_t aligned_transfer_dma; /* (in) dma addr for transfer_buffer */ ++ u32 aligned_transfer_buffer_length; /* (in) data buffer length */ ++ + struct usb_sg_request *sg; /* (in) scatter gather buffer list */ + int num_sgs; /* (in) number of entries in the sg list */ + u32 transfer_buffer_length; /* (in) data buffer length */ diff --git a/target/linux/cns3xxx/patches-2.6.31/209-cns3xxx_watchdog_support.patch b/target/linux/cns3xxx/patches-2.6.31/209-cns3xxx_watchdog_support.patch new file mode 100644 index 0000000000..af9a60152f --- /dev/null +++ b/target/linux/cns3xxx/patches-2.6.31/209-cns3xxx_watchdog_support.patch @@ -0,0 +1,496 @@ +--- /dev/null ++++ b/drivers/watchdog/cns3xxx_wdt.c +@@ -0,0 +1,465 @@ ++/******************************************************************************* ++ * ++ * drivers/watchdog/cns3xxx_wdt.c ++ * ++ * Watchdog timer driver for the CNS3XXX SOCs ++ * ++ * Author: Scott Shu ++ * ++ * Copyright (c) 2008 Cavium Networks ++ * ++ * This file is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, Version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or ++ * NONINFRINGEMENT. See the GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this file; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA or ++ * visit http://www.gnu.org/licenses/. ++ * ++ * This file may also be available under a different license from Cavium. ++ * Contact Cavium Networks for more information ++ * ++ ******************************************************************************/ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++struct cns3xxx_wdt { ++ unsigned long timer_alive; ++ struct device *dev; ++ void __iomem *base; ++ int irq; ++ unsigned int perturb; ++ char expect_close; ++}; ++ ++static struct platform_device *cns3xxx_wdt_dev; ++extern unsigned int twd_timer_rate; ++static spinlock_t wdt_lock; ++ ++#define TIMER_MARGIN 60 ++static int cns3xxx_margin = TIMER_MARGIN; ++module_param(cns3xxx_margin, int, 0); ++MODULE_PARM_DESC(cns3xxx_margin, ++ "CNS3XXX timer margin in seconds. (0 < cns3xxx_margin < 65536, default=" ++ __MODULE_STRING(TIMER_MARGIN) ")"); ++ ++static int nowayout = WATCHDOG_NOWAYOUT; ++module_param(nowayout, int, 0); ++MODULE_PARM_DESC(nowayout, ++ "Watchdog cannot be stopped once started (default=" ++ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); ++ ++#define ONLY_TESTING 0 ++static int cns3xxx_noboot = ONLY_TESTING; ++module_param(cns3xxx_noboot, int, 0); ++MODULE_PARM_DESC(cns3xxx_noboot, "CNS3XXX watchdog action, " ++ "set to 1 to ignore reboots, 0 to reboot (default=" ++ __MODULE_STRING(ONLY_TESTING) ")"); ++ ++/* ++ * This is the interrupt handler. Note that we only use this ++ * in testing mode, so don't actually do a reboot here. ++ */ ++static irqreturn_t cns3xxx_wdt_fire(int irq, void *arg) ++{ ++ struct cns3xxx_wdt *wdt = arg; ++ ++ /* Check it really was our interrupt */ ++ if (readl(wdt->base + TWD_WDOG_INTSTAT)) { ++ dev_printk(KERN_CRIT, wdt->dev, ++ "Triggered - Reboot ignored.\n"); ++ /* Clear the interrupt on the watchdog */ ++ writel(1, wdt->base + TWD_WDOG_INTSTAT); ++ return IRQ_HANDLED; ++ } ++ return IRQ_NONE; ++} ++ ++/* ++ * cns3xxx_wdt_keepalive - reload the timer ++ * ++ * Note that the spec says a DIFFERENT value must be written to the reload ++ * register each time. The "perturb" variable deals with this by adding 1 ++ * to the count every other time the function is called. ++ */ ++static void cns3xxx_wdt_keepalive(struct cns3xxx_wdt *wdt) ++{ ++ unsigned int count; ++ ++ /* Assume prescale is set to 256 */ ++ count = (twd_timer_rate / 256) * cns3xxx_margin; ++ ++ /* Reload the counter */ ++ spin_lock(&wdt_lock); ++ writel(count + wdt->perturb, wdt->base + TWD_WDOG_LOAD); ++ wdt->perturb = wdt->perturb ? 0 : 1; ++ spin_unlock(&wdt_lock); ++} ++ ++static void cns3xxx_wdt_stop(struct cns3xxx_wdt *wdt) ++{ ++ spin_lock(&wdt_lock); ++ writel(0x12345678, wdt->base + TWD_WDOG_DISABLE); ++ writel(0x87654321, wdt->base + TWD_WDOG_DISABLE); ++ writel(0x0, wdt->base + TWD_WDOG_CONTROL); ++ spin_unlock(&wdt_lock); ++} ++ ++static void cns3xxx_wdt_start(struct cns3xxx_wdt *wdt) ++{ ++ dev_printk(KERN_INFO, wdt->dev, "enabling watchdog.\n"); ++ ++ //spin_lock(&wdt_lock); ++ /* This loads the count register but does NOT start the count yet */ ++ cns3xxx_wdt_keepalive(wdt); ++ spin_lock(&wdt_lock); ++ ++ if (cns3xxx_noboot) { ++ /* Enable watchdog - prescale=256, watchdog mode=0, enable=1 */ ++ writel(0x0000FF01, wdt->base + TWD_WDOG_CONTROL); ++ } else { ++ /* Enable watchdog - prescale=256, watchdog mode=1, enable=1 */ ++ writel(0x0000FF09, wdt->base + TWD_WDOG_CONTROL); ++ } ++ spin_unlock(&wdt_lock); ++} ++ ++static int cns3xxx_wdt_set_heartbeat(int t) ++{ ++ if (t < 0x0001 || t > 0xFFFF) ++ return -EINVAL; ++ ++ cns3xxx_margin = t; ++ return 0; ++} ++ ++/* ++ * /dev/watchdog handling ++ */ ++static int cns3xxx_wdt_open(struct inode *inode, struct file *file) ++{ ++ struct cns3xxx_wdt *wdt = platform_get_drvdata(cns3xxx_wdt_dev); ++ ++ if (test_and_set_bit(0, &wdt->timer_alive)) ++ return -EBUSY; ++ ++ if (nowayout) ++ __module_get(THIS_MODULE); ++ ++ file->private_data = wdt; ++ ++ /* ++ * Activate timer ++ */ ++ cns3xxx_wdt_start(wdt); ++ ++ return nonseekable_open(inode, file); ++} ++ ++static int cns3xxx_wdt_release(struct inode *inode, struct file *file) ++{ ++ struct cns3xxx_wdt *wdt = file->private_data; ++ ++ /* ++ * Shut off the timer. ++ * Lock it in if it's a module and we set nowayout ++ */ ++ if (wdt->expect_close == 42) ++ cns3xxx_wdt_stop(wdt); ++ else { ++ dev_printk(KERN_CRIT, wdt->dev, ++ "unexpected close, not stopping watchdog!\n"); ++ cns3xxx_wdt_keepalive(wdt); ++ } ++ clear_bit(0, &wdt->timer_alive); ++ wdt->expect_close = 0; ++ return 0; ++} ++ ++static ssize_t cns3xxx_wdt_write(struct file *file, const char *data, ++ size_t len, loff_t *ppos) ++{ ++ struct cns3xxx_wdt *wdt = file->private_data; ++ ++ /* ++ * Refresh the timer. ++ */ ++ if (len) { ++ if (!nowayout) { ++ size_t i; ++ ++ /* In case it was set long ago */ ++ wdt->expect_close = 0; ++ ++ for (i = 0; i != len; i++) { ++ char c; ++ ++ if (get_user(c, data + i)) ++ return -EFAULT; ++ if (c == 'V') ++ wdt->expect_close = 42; ++ } ++ } ++ cns3xxx_wdt_keepalive(wdt); ++ } ++ return len; ++} ++ ++static struct watchdog_info ident = { ++ .options = WDIOF_SETTIMEOUT | ++ WDIOF_KEEPALIVEPING | ++ WDIOF_MAGICCLOSE, ++ .identity = "CNS3XXX Watchdog", ++}; ++ ++static long cns3xxx_wdt_ioctl(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ struct cns3xxx_wdt *wdt = file->private_data; ++ int ret; ++ union { ++ struct watchdog_info ident; ++ int i; ++ } uarg; ++ ++ if (_IOC_DIR(cmd) && _IOC_SIZE(cmd) > sizeof(uarg)) ++ return -ENOTTY; ++ ++ if (_IOC_DIR(cmd) & _IOC_WRITE) { ++ ret = copy_from_user(&uarg, (void __user *)arg, _IOC_SIZE(cmd)); ++ if (ret) ++ return -EFAULT; ++ } ++ ++ switch (cmd) { ++ case WDIOC_GETSUPPORT: ++ uarg.ident = ident; ++ ret = 0; ++ break; ++ ++ case WDIOC_GETSTATUS: ++ case WDIOC_GETBOOTSTATUS: ++ uarg.i = 0; ++ ret = 0; ++ break; ++ ++ case WDIOC_SETOPTIONS: ++ ret = -EINVAL; ++ if (uarg.i & WDIOS_DISABLECARD) { ++ cns3xxx_wdt_stop(wdt); ++ ret = 0; ++ } ++ if (uarg.i & WDIOS_ENABLECARD) { ++ cns3xxx_wdt_start(wdt); ++ ret = 0; ++ } ++ break; ++ ++ case WDIOC_KEEPALIVE: ++ cns3xxx_wdt_keepalive(wdt); ++ ret = 0; ++ break; ++ ++ case WDIOC_SETTIMEOUT: ++ ret = cns3xxx_wdt_set_heartbeat(uarg.i); ++ if (ret) ++ break; ++ ++ cns3xxx_wdt_keepalive(wdt); ++ /* Fall */ ++ case WDIOC_GETTIMEOUT: ++ uarg.i = cns3xxx_margin; ++ ret = 0; ++ break; ++ ++ default: ++ return -ENOTTY; ++ } ++ ++ if (ret == 0 && _IOC_DIR(cmd) & _IOC_READ) { ++ ret = copy_to_user((void __user *)arg, &uarg, _IOC_SIZE(cmd)); ++ if (ret) ++ ret = -EFAULT; ++ } ++ return ret; ++} ++ ++/* ++ * System shutdown handler. Turn off the watchdog if we're ++ * restarting or halting the system. ++ */ ++static void cns3xxx_wdt_shutdown(struct platform_device *dev) ++{ ++ struct cns3xxx_wdt *wdt = platform_get_drvdata(dev); ++ ++ if (system_state == SYSTEM_RESTART || system_state == SYSTEM_HALT) ++ cns3xxx_wdt_stop(wdt); ++} ++ ++/* ++ * Kernel Interfaces ++ */ ++static const struct file_operations cns3xxx_wdt_fops = { ++ .owner = THIS_MODULE, ++ .llseek = no_llseek, ++ .write = cns3xxx_wdt_write, ++ .unlocked_ioctl = cns3xxx_wdt_ioctl, ++ .open = cns3xxx_wdt_open, ++ .release = cns3xxx_wdt_release, ++}; ++ ++static struct miscdevice cns3xxx_wdt_miscdev = { ++ .minor = WATCHDOG_MINOR, ++ .name = "watchdog", ++ .fops = &cns3xxx_wdt_fops, ++}; ++ ++static int __devinit cns3xxx_wdt_probe(struct platform_device *dev) ++{ ++ struct cns3xxx_wdt *wdt; ++ struct resource *res; ++ int ret; ++ ++ /* We only accept one device, and it must have an id of -1 */ ++ if (dev->id != -1) ++ return -ENODEV; ++ ++ res = platform_get_resource(dev, IORESOURCE_MEM, 0); ++ if (!res) { ++ ret = -ENODEV; ++ goto err_out; ++ } ++ ++ wdt = kzalloc(sizeof(struct cns3xxx_wdt), GFP_KERNEL); ++ if (!wdt) { ++ ret = -ENOMEM; ++ goto err_out; ++ } ++ ++ wdt->dev = &dev->dev; ++ wdt->irq = platform_get_irq(dev, 0); ++ if (wdt->irq < 0) { ++ ret = -ENXIO; ++ goto err_free; ++ } ++ wdt->base = ioremap(res->start, res->end - res->start + 1); ++ if (!wdt->base) { ++ ret = -ENOMEM; ++ goto err_free; ++ } ++ ++ cns3xxx_wdt_miscdev.parent = &dev->dev; ++ ret = misc_register(&cns3xxx_wdt_miscdev); ++ if (ret) { ++ dev_printk(KERN_ERR, wdt->dev, ++ "cannot register miscdev on minor=%d (err=%d)\n", ++ WATCHDOG_MINOR, ret); ++ goto err_misc; ++ } ++ ++ ret = request_irq(wdt->irq, cns3xxx_wdt_fire, IRQF_DISABLED, ++ dev->name, wdt); ++ if (ret) { ++ dev_printk(KERN_ERR, wdt->dev, ++ "cannot register IRQ%d for watchdog\n", wdt->irq); ++ goto err_irq; ++ } ++ ++ cns3xxx_wdt_stop(wdt); ++ platform_set_drvdata(dev, wdt); ++ cns3xxx_wdt_dev = dev; ++ ++ return 0; ++ ++err_irq: ++ misc_deregister(&cns3xxx_wdt_miscdev); ++err_misc: ++ platform_set_drvdata(dev, NULL); ++ iounmap(wdt->base); ++err_free: ++ kfree(wdt); ++err_out: ++ return ret; ++} ++ ++static int __devexit cns3xxx_wdt_remove(struct platform_device *dev) ++{ ++ struct cns3xxx_wdt *wdt = platform_get_drvdata(dev); ++ ++ platform_set_drvdata(dev, NULL); ++ ++ misc_deregister(&cns3xxx_wdt_miscdev); ++ ++ cns3xxx_wdt_dev = NULL; ++ ++ free_irq(wdt->irq, wdt); ++ iounmap(wdt->base); ++ kfree(wdt); ++ return 0; ++} ++ ++ ++static struct platform_driver cns3xxx_wdt_driver = { ++ .probe = cns3xxx_wdt_probe, ++ .remove = __devexit_p(cns3xxx_wdt_remove), ++ .shutdown = cns3xxx_wdt_shutdown, ++ .driver = { ++ .owner = THIS_MODULE, ++ .name = "cns3xxx-wdt", ++ }, ++}; ++ ++static char banner[] __initdata = KERN_INFO ++ "CNS3XXX Watchdog Timer, noboot=%d margin=%d sec (nowayout= %d)\n"; ++ ++static int __init cns3xxx_wdt_init(void) ++{ ++ /* ++ * Check that the margin value is within it's range; ++ * if not reset to the default ++ */ ++ if (cns3xxx_wdt_set_heartbeat(cns3xxx_margin)) { ++ cns3xxx_wdt_set_heartbeat(TIMER_MARGIN); ++ printk(KERN_INFO "cns3xxx_margin value must be 0 < cns3xxx_margin < 65536, using %d\n", ++ TIMER_MARGIN); ++ } ++ ++ printk(banner, cns3xxx_noboot, cns3xxx_margin, nowayout); ++ ++ spin_lock_init(&wdt_lock); ++ ++ return platform_driver_register(&cns3xxx_wdt_driver); ++} ++ ++static void __exit cns3xxx_wdt_exit(void) ++{ ++ platform_driver_unregister(&cns3xxx_wdt_driver); ++} ++ ++module_init(cns3xxx_wdt_init); ++module_exit(cns3xxx_wdt_exit); ++ ++MODULE_AUTHOR("Scott Shu"); ++MODULE_DESCRIPTION("CNS3XXX Watchdog Device Driver"); ++MODULE_LICENSE("GPL"); ++MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); ++MODULE_ALIAS("platform:cns3xxx-wdt"); +--- a/drivers/watchdog/Kconfig ++++ b/drivers/watchdog/Kconfig +@@ -231,6 +231,15 @@ config DAVINCI_WATCHDOG + NOTE: once enabled, this timer cannot be disabled. + Say N if you are unsure. + ++config CNS3XXX_WATCHDOG ++ tristate "CNS3XXX watchdog" ++ depends on ARCH_CNS3XXX && LOCAL_TIMERS ++ help ++ Watchdog timer embedded into the CNS3XXX SoCs system. ++ ++ To compile this driver as a module, choose M here: the ++ module will be called cns3xxx_wdt. ++ + config ORION_WATCHDOG + tristate "Orion watchdog" + depends on ARCH_ORION5X || ARCH_KIRKWOOD +--- a/drivers/watchdog/Makefile ++++ b/drivers/watchdog/Makefile +@@ -41,6 +41,7 @@ obj-$(CONFIG_EP93XX_WATCHDOG) += ep93xx_ + obj-$(CONFIG_PNX4008_WATCHDOG) += pnx4008_wdt.o + obj-$(CONFIG_IOP_WATCHDOG) += iop_wdt.o + obj-$(CONFIG_DAVINCI_WATCHDOG) += davinci_wdt.o ++obj-$(CONFIG_CNS3XXX_WATCHDOG) += cns3xxx_wdt.o + obj-$(CONFIG_ORION_WATCHDOG) += orion_wdt.o + obj-$(CONFIG_COH901327_WATCHDOG) += coh901327_wdt.o + obj-$(CONFIG_STMP3XXX_WATCHDOG) += stmp3xxx_wdt.o diff --git a/toolchain/uClibc/config-0.9.30.1/arm.cns3xxx b/toolchain/uClibc/config-0.9.30.1/arm.cns3xxx new file mode 100644 index 0000000000..84ad02bb3e --- /dev/null +++ b/toolchain/uClibc/config-0.9.30.1/arm.cns3xxx @@ -0,0 +1,25 @@ +ARCH_ANY_ENDIAN=y +ARCH_LITTLE_ENDIAN=y +ARCH_WANTS_LITTLE_ENDIAN=y +# CONFIG_ARM10T is not set +# CONFIG_ARM1136JF_S is not set +CONFIG_ARM1176JZF_S=y +# CONFIG_ARM1176JZ_S is not set +# CONFIG_ARM610 is not set +# CONFIG_ARM710 is not set +# CONFIG_ARM720T is not set +# CONFIG_ARM7TDMI is not set +# CONFIG_ARM920T is not set +# CONFIG_ARM922T is not set +# CONFIG_ARM926T is not set +# CONFIG_ARM_CORTEX_M1 is not set +# CONFIG_ARM_CORTEX_M3 is not set +# CONFIG_ARM_EABI is not set +# CONFIG_ARM_IWMMXT is not set +CONFIG_ARM_OABI=y +# CONFIG_ARM_SA110 is not set +# CONFIG_ARM_SA1100 is not set +# CONFIG_ARM_XSCALE is not set +# CONFIG_GENERIC_ARM is not set +TARGET_ARCH="arm" +TARGET_arm=y diff --git a/toolchain/uClibc/config-0.9.30.2/arm.cns3xxx b/toolchain/uClibc/config-0.9.30.2/arm.cns3xxx new file mode 100644 index 0000000000..84ad02bb3e --- /dev/null +++ b/toolchain/uClibc/config-0.9.30.2/arm.cns3xxx @@ -0,0 +1,25 @@ +ARCH_ANY_ENDIAN=y +ARCH_LITTLE_ENDIAN=y +ARCH_WANTS_LITTLE_ENDIAN=y +# CONFIG_ARM10T is not set +# CONFIG_ARM1136JF_S is not set +CONFIG_ARM1176JZF_S=y +# CONFIG_ARM1176JZ_S is not set +# CONFIG_ARM610 is not set +# CONFIG_ARM710 is not set +# CONFIG_ARM720T is not set +# CONFIG_ARM7TDMI is not set +# CONFIG_ARM920T is not set +# CONFIG_ARM922T is not set +# CONFIG_ARM926T is not set +# CONFIG_ARM_CORTEX_M1 is not set +# CONFIG_ARM_CORTEX_M3 is not set +# CONFIG_ARM_EABI is not set +# CONFIG_ARM_IWMMXT is not set +CONFIG_ARM_OABI=y +# CONFIG_ARM_SA110 is not set +# CONFIG_ARM_SA1100 is not set +# CONFIG_ARM_XSCALE is not set +# CONFIG_GENERIC_ARM is not set +TARGET_ARCH="arm" +TARGET_arm=y diff --git a/toolchain/uClibc/config-0.9.30.3/arm.cns3xxx b/toolchain/uClibc/config-0.9.30.3/arm.cns3xxx new file mode 100644 index 0000000000..84ad02bb3e --- /dev/null +++ b/toolchain/uClibc/config-0.9.30.3/arm.cns3xxx @@ -0,0 +1,25 @@ +ARCH_ANY_ENDIAN=y +ARCH_LITTLE_ENDIAN=y +ARCH_WANTS_LITTLE_ENDIAN=y +# CONFIG_ARM10T is not set +# CONFIG_ARM1136JF_S is not set +CONFIG_ARM1176JZF_S=y +# CONFIG_ARM1176JZ_S is not set +# CONFIG_ARM610 is not set +# CONFIG_ARM710 is not set +# CONFIG_ARM720T is not set +# CONFIG_ARM7TDMI is not set +# CONFIG_ARM920T is not set +# CONFIG_ARM922T is not set +# CONFIG_ARM926T is not set +# CONFIG_ARM_CORTEX_M1 is not set +# CONFIG_ARM_CORTEX_M3 is not set +# CONFIG_ARM_EABI is not set +# CONFIG_ARM_IWMMXT is not set +CONFIG_ARM_OABI=y +# CONFIG_ARM_SA110 is not set +# CONFIG_ARM_SA1100 is not set +# CONFIG_ARM_XSCALE is not set +# CONFIG_GENERIC_ARM is not set +TARGET_ARCH="arm" +TARGET_arm=y diff --git a/toolchain/uClibc/config-0.9.31/arm.cns3xxx b/toolchain/uClibc/config-0.9.31/arm.cns3xxx new file mode 100644 index 0000000000..84ad02bb3e --- /dev/null +++ b/toolchain/uClibc/config-0.9.31/arm.cns3xxx @@ -0,0 +1,25 @@ +ARCH_ANY_ENDIAN=y +ARCH_LITTLE_ENDIAN=y +ARCH_WANTS_LITTLE_ENDIAN=y +# CONFIG_ARM10T is not set +# CONFIG_ARM1136JF_S is not set +CONFIG_ARM1176JZF_S=y +# CONFIG_ARM1176JZ_S is not set +# CONFIG_ARM610 is not set +# CONFIG_ARM710 is not set +# CONFIG_ARM720T is not set +# CONFIG_ARM7TDMI is not set +# CONFIG_ARM920T is not set +# CONFIG_ARM922T is not set +# CONFIG_ARM926T is not set +# CONFIG_ARM_CORTEX_M1 is not set +# CONFIG_ARM_CORTEX_M3 is not set +# CONFIG_ARM_EABI is not set +# CONFIG_ARM_IWMMXT is not set +CONFIG_ARM_OABI=y +# CONFIG_ARM_SA110 is not set +# CONFIG_ARM_SA1100 is not set +# CONFIG_ARM_XSCALE is not set +# CONFIG_GENERIC_ARM is not set +TARGET_ARCH="arm" +TARGET_arm=y diff --git a/toolchain/uClibc/config-0.9.32/arm.cns3xxx b/toolchain/uClibc/config-0.9.32/arm.cns3xxx new file mode 100644 index 0000000000..84ad02bb3e --- /dev/null +++ b/toolchain/uClibc/config-0.9.32/arm.cns3xxx @@ -0,0 +1,25 @@ +ARCH_ANY_ENDIAN=y +ARCH_LITTLE_ENDIAN=y +ARCH_WANTS_LITTLE_ENDIAN=y +# CONFIG_ARM10T is not set +# CONFIG_ARM1136JF_S is not set +CONFIG_ARM1176JZF_S=y +# CONFIG_ARM1176JZ_S is not set +# CONFIG_ARM610 is not set +# CONFIG_ARM710 is not set +# CONFIG_ARM720T is not set +# CONFIG_ARM7TDMI is not set +# CONFIG_ARM920T is not set +# CONFIG_ARM922T is not set +# CONFIG_ARM926T is not set +# CONFIG_ARM_CORTEX_M1 is not set +# CONFIG_ARM_CORTEX_M3 is not set +# CONFIG_ARM_EABI is not set +# CONFIG_ARM_IWMMXT is not set +CONFIG_ARM_OABI=y +# CONFIG_ARM_SA110 is not set +# CONFIG_ARM_SA1100 is not set +# CONFIG_ARM_XSCALE is not set +# CONFIG_GENERIC_ARM is not set +TARGET_ARCH="arm" +TARGET_arm=y