/*
- * Copyright (C) 2006 Freescale Semiconductor, Inc.
+ * Copyright (C) 2006-2009 Freescale Semiconductor, Inc.
*
* Dave Liu <daveliu@freescale.com>
* based on source code of Shlomi Gridish
/* QE RISC allocation
*/
-typedef enum qe_risc_allocation {
- QE_RISC_ALLOCATION_RISC1 = 1, /* RISC 1 */
- QE_RISC_ALLOCATION_RISC2 = 2, /* RISC 2 */
- QE_RISC_ALLOCATION_RISC1_AND_RISC2 = 3 /* RISC 1 or RISC 2 */
-} qe_risc_allocation_e;
+#define QE_RISC_ALLOCATION_RISC1 0x1 /* RISC 1 */
+#define QE_RISC_ALLOCATION_RISC2 0x2 /* RISC 2 */
+#define QE_RISC_ALLOCATION_RISC3 0x4 /* RISC 3 */
+#define QE_RISC_ALLOCATION_RISC4 0x8 /* RISC 4 */
+#define QE_RISC_ALLOCATION_RISC1_AND_RISC2 (QE_RISC_ALLOCATION_RISC1 | \
+ QE_RISC_ALLOCATION_RISC2)
+#define QE_RISC_ALLOCATION_FOUR_RISCS (QE_RISC_ALLOCATION_RISC1 | \
+ QE_RISC_ALLOCATION_RISC2 | \
+ QE_RISC_ALLOCATION_RISC3 | \
+ QE_RISC_ALLOCATION_RISC4)
/* QE CECR commands for UCC fast.
*/
/*
- * Copyright (C) 2006 Freescale Semiconductor, Inc.
+ * Copyright (C) 2006-2009 Freescale Semiconductor, Inc.
*
* Dave Liu <daveliu@freescale.com>
*
.num_threads_tx = UEC_NUM_OF_THREADS_4,
.num_threads_rx = UEC_NUM_OF_THREADS_4,
#endif
+#if (MAX_QE_RISC == 4)
+ .risc_tx = QE_RISC_ALLOCATION_FOUR_RISCS,
+ .risc_rx = QE_RISC_ALLOCATION_FOUR_RISCS,
+#else
.risc_tx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
.risc_rx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
+#endif
.tx_bd_ring_len = 16,
.rx_bd_ring_len = 16,
.phy_address = CONFIG_SYS_UEC1_PHY_ADDR,
.num_threads_tx = UEC_NUM_OF_THREADS_4,
.num_threads_rx = UEC_NUM_OF_THREADS_4,
#endif
+#if (MAX_QE_RISC == 4)
+ .risc_tx = QE_RISC_ALLOCATION_FOUR_RISCS,
+ .risc_rx = QE_RISC_ALLOCATION_FOUR_RISCS,
+#else
.risc_tx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
.risc_rx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
+#endif
.tx_bd_ring_len = 16,
.rx_bd_ring_len = 16,
.phy_address = CONFIG_SYS_UEC2_PHY_ADDR,
.num_threads_tx = UEC_NUM_OF_THREADS_4,
.num_threads_rx = UEC_NUM_OF_THREADS_4,
#endif
+#if (MAX_QE_RISC == 4)
+ .risc_tx = QE_RISC_ALLOCATION_FOUR_RISCS,
+ .risc_rx = QE_RISC_ALLOCATION_FOUR_RISCS,
+#else
.risc_tx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
.risc_rx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
+#endif
.tx_bd_ring_len = 16,
.rx_bd_ring_len = 16,
.phy_address = CONFIG_SYS_UEC3_PHY_ADDR,
.num_threads_tx = UEC_NUM_OF_THREADS_4,
.num_threads_rx = UEC_NUM_OF_THREADS_4,
#endif
+#if (MAX_QE_RISC == 4)
+ .risc_tx = QE_RISC_ALLOCATION_FOUR_RISCS,
+ .risc_rx = QE_RISC_ALLOCATION_FOUR_RISCS,
+#else
.risc_tx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
.risc_rx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
+#endif
.tx_bd_ring_len = 16,
.rx_bd_ring_len = 16,
.phy_address = CONFIG_SYS_UEC4_PHY_ADDR,
.num_threads_tx = UEC_NUM_OF_THREADS_4,
.num_threads_rx = UEC_NUM_OF_THREADS_4,
#endif
+#if (MAX_QE_RISC == 4)
+ .risc_tx = QE_RISC_ALLOCATION_FOUR_RISCS,
+ .risc_rx = QE_RISC_ALLOCATION_FOUR_RISCS,
+#else
.risc_tx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
.risc_rx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
+#endif
.tx_bd_ring_len = 16,
.rx_bd_ring_len = 16,
.phy_address = CONFIG_SYS_UEC5_PHY_ADDR,
.num_threads_tx = UEC_NUM_OF_THREADS_4,
.num_threads_rx = UEC_NUM_OF_THREADS_4,
#endif
+#if (MAX_QE_RISC == 4)
+ .risc_tx = QE_RISC_ALLOCATION_FOUR_RISCS,
+ .risc_rx = QE_RISC_ALLOCATION_FOUR_RISCS,
+#else
.risc_tx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
.risc_rx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
+#endif
.tx_bd_ring_len = 16,
.rx_bd_ring_len = 16,
.phy_address = CONFIG_SYS_UEC6_PHY_ADDR,