mutex_unlock(&priv->reg_mutex);
}
+static int rtl83xx_port_pre_bridge_flags(struct dsa_switch *ds, int port, unsigned long flags, struct netlink_ext_ack *extack)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+ unsigned long features = 0;
+ pr_debug("%s: %d %lX\n", __func__, port, flags);
+ if (priv->r->enable_learning)
+ features |= BR_LEARNING;
+ if (priv->r->enable_flood)
+ features |= BR_FLOOD;
+ if (priv->r->enable_mcast_flood)
+ features |= BR_MCAST_FLOOD;
+ if (priv->r->enable_bcast_flood)
+ features |= BR_BCAST_FLOOD;
+ if (flags & ~(features))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int rtl83xx_port_bridge_flags(struct dsa_switch *ds, int port, unsigned long flags, struct netlink_ext_ack *extack)
+{
+ struct rtl838x_switch_priv *priv = ds->priv;
+
+ pr_debug("%s: %d %lX\n", __func__, port, flags);
+ if (priv->r->enable_learning)
+ priv->r->enable_learning(port, !!(flags & BR_LEARNING));
+
+ if (priv->r->enable_flood)
+ priv->r->enable_flood(port, !!(flags & BR_FLOOD));
+
+ if (priv->r->enable_mcast_flood)
+ priv->r->enable_mcast_flood(port, !!(flags & BR_MCAST_FLOOD));
+
+ if (priv->r->enable_bcast_flood)
+ priv->r->enable_bcast_flood(port, !!(flags & BR_BCAST_FLOOD));
+
+ return 0;
+}
+
static bool rtl83xx_lag_can_offload(struct dsa_switch *ds,
struct net_device *lag,
struct netdev_lag_upper_info *info)
.port_lag_change = rtl83xx_port_lag_change,
.port_lag_join = rtl83xx_port_lag_join,
.port_lag_leave = rtl83xx_port_lag_leave,
+
+ .port_pre_bridge_flags = rtl83xx_port_pre_bridge_flags,
+ .port_bridge_flags = rtl83xx_port_bridge_flags,
};
const struct dsa_switch_ops rtl930x_switch_ops = {
.port_lag_join = rtl83xx_port_lag_join,
.port_lag_leave = rtl83xx_port_lag_leave,
+ .port_pre_bridge_flags = rtl83xx_port_pre_bridge_flags,
+ .port_bridge_flags = rtl83xx_port_bridge_flags,
};
sw_w32(0, RTL838X_SPCL_TRAP_ARP_CTRL);
}
+static void rtl838x_enable_learning(int port, bool enable)
+{
+ // Limit learning to maximum: 32k entries, after that just flood (bits 0-1)
+
+ if (enable) {
+ // flood after 32k entries
+ sw_w32((0x3fff << 2) | 0, RTL838X_L2_PORT_LRN_CONSTRT + (port << 2));
+ } else {
+ // just forward
+ sw_w32(0, RTL838X_L2_PORT_LRN_CONSTRT + (port << 2));
+ }
+}
+
+static void rtl838x_enable_flood(int port, bool enable)
+{
+ u32 flood_mask = sw_r32(RTL838X_L2_PORT_LRN_CONSTRT + (port << 2));
+
+ if (enable) {
+ // flood
+ flood_mask &= ~3;
+ flood_mask |= 0;
+ sw_w32(flood_mask, RTL838X_L2_PORT_LRN_CONSTRT + (port << 2));
+ } else {
+ // drop (bit 1)
+ flood_mask &= ~3;
+ flood_mask |= 1;
+ sw_w32(flood_mask, RTL838X_L2_PORT_LRN_CONSTRT + (port << 2));
+ }
+}
+
+static void rtl838x_enable_mcast_flood(int port, bool enable)
+{
+
+}
+
+static void rtl838x_enable_bcast_flood(int port, bool enable)
+{
+
+}
+
static void rtl838x_stp_get(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[])
{
int i;
.vlan_fwd_on_inner = rtl838x_vlan_fwd_on_inner,
.set_vlan_igr_filter = rtl838x_set_igr_filter,
.set_vlan_egr_filter = rtl838x_set_egr_filter,
+ .enable_learning = rtl838x_enable_learning,
+ .enable_flood = rtl838x_enable_flood,
+ .enable_mcast_flood = rtl838x_enable_mcast_flood,
+ .enable_bcast_flood = rtl838x_enable_bcast_flood,
.stp_get = rtl838x_stp_get,
.stp_set = rtl838x_stp_set,
.mac_port_ctrl = rtl838x_mac_port_ctrl,
#define RTL930X_L2_BC_FLD_PMSK (0x9068)
#define RTL930X_L2_UNKN_UC_FLD_PMSK (0x9064)
#define RTL838X_L2_LRN_CONSTRT_EN (0x3368)
+#define RTL838X_L2_PORT_LRN_CONSTRT (0x32A0)
+#define RTL839X_L2_PORT_LRN_CONSTRT (0x3914)
#define RTL838X_L2_PORT_NEW_SALRN(p) (0x328c + (((p >> 4) << 2)))
#define RTL839X_L2_PORT_NEW_SALRN(p) (0x38F0 + (((p >> 4) << 2)))
void (*vlan_port_pvid_set)(int port, enum pbvlan_type type, int pvid);
void (*set_vlan_igr_filter)(int port, enum igr_filter state);
void (*set_vlan_egr_filter)(int port, enum egr_filter state);
+ void (*enable_learning)(int port, bool enable);
+ void (*enable_flood)(int port, bool enable);
+ void (*enable_mcast_flood)(int port, bool enable);
+ void (*enable_bcast_flood)(int port, bool enable);
void (*stp_get)(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[]);
void (*stp_set)(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[]);
int (*mac_force_mode_ctrl)(int port);
void (*set_l3_router_mac)(u32 idx, struct rtl93xx_rt_mac *m);
void (*set_l3_egress_intf)(int idx, struct rtl838x_l3_intf *intf);
void (*set_distribution_algorithm)(int group, int algoidx, u32 algomask);
-
};
struct rtl838x_switch_priv {
sw_w32(0, RTL839X_SPCL_TRAP_ARP_CTRL);
}
+static void rtl839x_enable_learning(int port, bool enable)
+{
+ // Limit learning to maximum: 32k entries, after that just flood (bits 0-1)
+
+ if (enable) {
+ // flood after 32k entries
+ sw_w32((0x7fff << 2) | 0, RTL839X_L2_PORT_LRN_CONSTRT + (port << 2));
+ } else {
+ // just forward
+ sw_w32(0, RTL839X_L2_PORT_LRN_CONSTRT + (port << 2));
+ }
+
+}
+
+static void rtl839x_enable_flood(int port, bool enable)
+{
+ u32 flood_mask = sw_r32(RTL839X_L2_PORT_LRN_CONSTRT + (port << 2));
+
+ if (enable) {
+ // flood
+ flood_mask &= ~3;
+ flood_mask |= 0;
+ sw_w32(flood_mask, RTL839X_L2_PORT_LRN_CONSTRT + (port << 2));
+ } else {
+ // drop (bit 1)
+ flood_mask &= ~3;
+ flood_mask |= 1;
+ sw_w32(flood_mask, RTL839X_L2_PORT_LRN_CONSTRT + (port << 2));
+ }
+
+}
+
+static void rtl839x_enable_mcast_flood(int port, bool enable)
+{
+
+}
+
+static void rtl839x_enable_bcast_flood(int port, bool enable)
+{
+
+}
irqreturn_t rtl839x_switch_irq(int irq, void *dev_id)
{
struct dsa_switch *ds = dev_id;
.vlan_port_pvid_set = rtl839x_vlan_port_pvid_set,
.set_vlan_igr_filter = rtl839x_set_igr_filter,
.set_vlan_egr_filter = rtl839x_set_egr_filter,
+ .enable_learning = rtl839x_enable_learning,
+ .enable_flood = rtl839x_enable_flood,
+ .enable_mcast_flood = rtl839x_enable_mcast_flood,
+ .enable_bcast_flood = rtl839x_enable_bcast_flood,
.stp_get = rtl839x_stp_get,
.stp_set = rtl839x_stp_set,
.mac_force_mode_ctrl = rtl839x_mac_force_mode_ctrl,