#define RTL930X_ISR_GLB (0xC658)
#define RTL930X_ISR_PORT_LINK_STS_CHG (0xC660)
-// IMR_GLB does not exit on RTL931X
+/* IMR_GLB does not exit on RTL931X */
#define RTL931X_IMR_PORT_LINK_STS_CHG (0x126C)
#define RTL931X_ISR_GLB_SRC (0x12B4)
#define RTL931X_ISR_PORT_LINK_STS_CHG (0x12B8)
#define RTL9300_TC_INT_IP BIT(16)
#define RTL9300_TC_INT_IE BIT(20)
-// Timer modes
+/* Timer modes */
#define TIMER_MODE_REPEAT 1
#define TIMER_MODE_ONCE 0
-// Minimum divider is 2
+/* Minimum divider is 2 */
#define DIVISOR_RTL9300 2
#define N_BITS 28
u32 v = readl(rtl_clk->base + RTL9300_TC_INT);
- // Acknowledge the IRQ
+ /* Acknowledge the IRQ */
v |= RTL9300_TC_INT_IP;
writel(v, rtl_clk->base + RTL9300_TC_INT);
writel(0, base + RTL9300_TC_CTRL);
- // Acknowledge possibly pending IRQ
+ /* Acknowledge possibly pending IRQ */
v = readl(base + RTL9300_TC_INT);
writel(v | RTL9300_TC_INT_IP, base + RTL9300_TC_INT);
}
{
u32 v;
- // Disable timer
+ /* Disable timer */
writel(0, base + RTL9300_TC_CTRL);
- // Acknowledge possibly pending IRQ
+ /* Acknowledge possibly pending IRQ */
v = readl(base + RTL9300_TC_INT);
writel(v | RTL9300_TC_INT_IP, base + RTL9300_TC_INT);
- // Setup maximum period (for use as clock-source)
+ /* Setup maximum period (for use as clock-source) */
writel(0x0fffffff, base + RTL9300_TC_DATA);
}
/* Set execution bit: cleared when operation completed */
t |= 1;
- // Start execution
+ /* Start execution */
sw_w32(t, gpios->ext_gpio_indrt_access);
do {
udelay(1);
/* Set execution bit: cleared when operation completed */
t |= 1;
- // Start execution
+ /* Start execution */
sw_w32(t, gpios->ext_gpio_indrt_access);
do {
udelay(1);
gpios->reg_cached = 0;
if (soc_info.family == RTL8390_FAMILY_ID) {
- // RTL8390: Enable external gpio in global led control register
+ /* RTL8390: Enable external gpio in global led control register */
sw_w32_mask(0x7 << 18, 0x4 << 18, RTL839X_LED_GLB_CTRL);
} else if (soc_info.family == RTL8380_FAMILY_ID) {
- // RTL8380: Enable RTL8231 indirect access mode
+ /* RTL8380: Enable RTL8231 indirect access mode */
sw_w32_mask(0, 1, RTL838X_EXTRA_GPIO_CTRL);
sw_w32_mask(3, 1, RTL838X_DMY_REG5);
}
static void rtl9300_i2c_reg_addr_set(struct rtl9300_i2c *i2c, u32 reg, u16 len)
{
- // Set register address width
+ /* Set register address width */
REG_MASK(i2c, 0x3 << RTL9300_I2C_CTRL2_MADDR_WIDTH, len << RTL9300_I2C_CTRL2_MADDR_WIDTH,
RTL9300_I2C_CTRL2);
- // Set register address
+ /* Set register address */
REG_MASK(i2c, 0xffffff << RTL9300_I2C_CTRL1_MEM_ADDR, reg << RTL9300_I2C_CTRL1_MEM_ADDR,
RTL9300_I2C_CTRL1);
}
static void rtl9310_i2c_reg_addr_set(struct rtl9300_i2c *i2c, u32 reg, u16 len)
{
- // Set register address width
+ /* Set register address width */
REG_MASK(i2c, 0x3 << RTL9310_I2C_CTRL_MADDR_WIDTH, len << RTL9310_I2C_CTRL_MADDR_WIDTH,
RTL9310_I2C_CTRL);
- // Set register address
+ /* Set register address */
writel(reg, REG(i2c, RTL9310_I2C_MEMADDR));
}
{
u32 v;
- // Set SCL pin
+ /* Set SCL pin */
REG_MASK(i2c, 0, BIT(RTL9300_I2C_CTRL1_GPIO8_SCL_SEL), RTL9300_I2C_CTRL1);
- // Set SDA pin
+ /* Set SDA pin */
REG_MASK(i2c, 0x7 << RTL9300_I2C_CTRL1_SDA_OUT_SEL,
i2c->sda_num << RTL9300_I2C_CTRL1_SDA_OUT_SEL, RTL9300_I2C_CTRL1);
- // Set SDA pin to I2C functionality
+ /* Set SDA pin to I2C functionality */
v = readl(i2c->base + RTL9300_I2C_MST_GLB_CTRL);
v |= BIT(i2c->sda_num);
writel(v, i2c->base + RTL9300_I2C_MST_GLB_CTRL);
{
u32 v;
- // Set SCL pin
+ /* Set SCL pin */
REG_MASK(i2c, 0, BIT(RTL9310_I2C_MST_IF_SEL_GPIO_SCL_SEL + scl_num), RTL9310_I2C_MST_IF_SEL);
- // Set SDA pin
+ /* Set SDA pin */
REG_MASK(i2c, 0x7 << RTL9310_I2C_CTRL_SDA_OUT_SEL,
i2c->sda_num << RTL9310_I2C_CTRL_SDA_OUT_SEL, RTL9310_I2C_CTRL);
- // Set SDA pin to I2C functionality
+ /* Set SDA pin to I2C functionality */
v = readl(i2c->base + RTL9310_I2C_MST_IF_SEL);
v |= BIT(i2c->sda_num);
writel(v, i2c->base + RTL9310_I2C_MST_IF_SEL);
static int rtl9300_i2c_config_xfer(struct rtl9300_i2c *i2c, u16 addr, u16 len)
{
- // Set bus frequency
+ /* Set bus frequency */
REG_MASK(i2c, 0x3 << RTL9300_I2C_CTRL2_SCL_FREQ,
i2c->bus_freq << RTL9300_I2C_CTRL2_SCL_FREQ, RTL9300_I2C_CTRL2);
- // Set slave device address
+ /* Set slave device address */
REG_MASK(i2c, 0x7f << RTL9300_I2C_CTRL2_DEV_ADDR,
addr << RTL9300_I2C_CTRL2_DEV_ADDR, RTL9300_I2C_CTRL2);
- // Set data length
+ /* Set data length */
REG_MASK(i2c, 0xf << RTL9300_I2C_CTRL2_DATA_WIDTH,
((len - 1) & 0xf) << RTL9300_I2C_CTRL2_DATA_WIDTH, RTL9300_I2C_CTRL2);
- // Set read mode to random
+ /* Set read mode to random */
REG_MASK(i2c, 0x1 << RTL9300_I2C_CTRL2_READ_MODE, 0, RTL9300_I2C_CTRL2);
return 0;
static int rtl9310_i2c_config_xfer(struct rtl9300_i2c *i2c, u16 addr, u16 len)
{
- // Set bus frequency
+ /* Set bus frequency */
REG_MASK(i2c, 0x3 << RTL9310_I2C_CTRL_SCL_FREQ,
i2c->bus_freq << RTL9310_I2C_CTRL_SCL_FREQ, RTL9310_I2C_CTRL);
- // Set slave device address
+ /* Set slave device address */
REG_MASK(i2c, 0x7f << RTL9310_I2C_CTRL_DEV_ADDR,
addr << RTL9310_I2C_CTRL_DEV_ADDR, RTL9310_I2C_CTRL);
- // Set data length
+ /* Set data length */
REG_MASK(i2c, 0xf << RTL9310_I2C_CTRL_DATA_WIDTH,
((len - 1) & 0xf) << RTL9310_I2C_CTRL_DATA_WIDTH, RTL9310_I2C_CTRL);
- // Set read mode to random
+ /* Set read mode to random */
REG_MASK(i2c, 0x1 << RTL9310_I2C_CTRL_READ_MODE, 0, RTL9310_I2C_CTRL);
return 0;
struct device *dev;
struct i2c_adapter adap;
u8 bus_freq;
- u8 sda_num; // SDA channel number
- u8 scl_num; // SCL channel, mapping to master 1 or 2
+ u8 sda_num; /* SDA channel number */
+ u8 scl_num; /* SCL channel, mapping to master 1 or 2 */
};
#endif
{
struct rtl9300_mux *mux = i2c_mux_priv(muxc);
- // Set SCL pin
+ /* Set SCL pin */
REG_MASK(channels[chan].scl_num, 0,
BIT(RTL9300_I2C_CTRL1_GPIO8_SCL_SEL), RTL9300_I2C_CTRL1);
- // Set SDA pin
+ /* Set SDA pin */
REG_MASK(channels[chan].scl_num, 0x7 << RTL9300_I2C_CTRL1_SDA_OUT_SEL,
channels[chan].sda_num << RTL9300_I2C_CTRL1_SDA_OUT_SEL, RTL9300_I2C_CTRL1);
{
struct rtl9300_mux *mux = i2c_mux_priv(muxc);
- // Set SCL pin
+ /* Set SCL pin */
REG_MASK(0, 0, BIT(RTL9310_I2C_MST_IF_SEL_GPIO_SCL_SEL + channels[chan].scl_num),
RTL9310_I2C_MST_IF_SEL);
- // Set SDA pin
+ /* Set SDA pin */
REG_MASK(channels[chan].scl_num, 0xf << RTL9310_I2C_CTRL_SDA_OUT_SEL,
channels[chan].sda_num << RTL9310_I2C_CTRL_SDA_OUT_SEL, RTL9310_I2C_CTRL);
struct rtl9300_mux *mux = i2c_mux_priv(muxc);
u32 v;
- // Set SDA pin to I2C functionality
+ /* Set SDA pin to I2C functionality */
v = readl(REG(0, RTL9300_I2C_MST_GLB_CTRL));
v |= BIT(pin);
writel(v, REG(0, RTL9300_I2C_MST_GLB_CTRL));
struct rtl9300_mux *mux = i2c_mux_priv(muxc);
u32 v;
- // Set SDA pin to I2C functionality
+ /* Set SDA pin to I2C functionality */
v = readl(REG(0, RTL9310_I2C_MST_IF_SEL));
v |= BIT(pin);
writel(v, REG(0, RTL9310_I2C_MST_IF_SEL));
}
static struct table_reg rtl838x_tbl_regs[] = {
- TBL_DESC(0x6900, 0x6908, 3, 15, 13, 1), // RTL8380_TBL_L2
- TBL_DESC(0x6914, 0x6918, 18, 14, 12, 1), // RTL8380_TBL_0
- TBL_DESC(0xA4C8, 0xA4CC, 6, 14, 12, 1), // RTL8380_TBL_1
-
- TBL_DESC(0x1180, 0x1184, 3, 16, 14, 0), // RTL8390_TBL_L2
- TBL_DESC(0x1190, 0x1194, 17, 15, 12, 0), // RTL8390_TBL_0
- TBL_DESC(0x6B80, 0x6B84, 4, 14, 12, 0), // RTL8390_TBL_1
- TBL_DESC(0x611C, 0x6120, 9, 8, 6, 0), // RTL8390_TBL_2
-
- TBL_DESC(0xB320, 0xB334, 3, 18, 16, 0), // RTL9300_TBL_L2
- TBL_DESC(0xB340, 0xB344, 19, 16, 12, 0), // RTL9300_TBL_0
- TBL_DESC(0xB3A0, 0xB3A4, 20, 16, 13, 0), // RTL9300_TBL_1
- TBL_DESC(0xCE04, 0xCE08, 6, 14, 12, 0), // RTL9300_TBL_2
- TBL_DESC(0xD600, 0xD604, 30, 7, 6, 0), // RTL9300_TBL_HSB
- TBL_DESC(0x7880, 0x7884, 22, 9, 8, 0), // RTL9300_TBL_HSA
-
- TBL_DESC(0x8500, 0x8508, 8, 19, 15, 0), // RTL9310_TBL_0
- TBL_DESC(0x40C0, 0x40C4, 22, 16, 14, 0), // RTL9310_TBL_1
- TBL_DESC(0x8528, 0x852C, 6, 18, 14, 0), // RTL9310_TBL_2
- TBL_DESC(0x0200, 0x0204, 9, 15, 12, 0), // RTL9310_TBL_3
- TBL_DESC(0x20dc, 0x20e0, 29, 7, 6, 0), // RTL9310_TBL_4
- TBL_DESC(0x7e1c, 0x7e20, 53, 8, 6, 0), // RTL9310_TBL_5
+ TBL_DESC(0x6900, 0x6908, 3, 15, 13, 1), /* RTL8380_TBL_L2 */
+ TBL_DESC(0x6914, 0x6918, 18, 14, 12, 1), /* RTL8380_TBL_0 */
+ TBL_DESC(0xA4C8, 0xA4CC, 6, 14, 12, 1), /* RTL8380_TBL_1 */
+
+ TBL_DESC(0x1180, 0x1184, 3, 16, 14, 0), /* RTL8390_TBL_L2 */
+ TBL_DESC(0x1190, 0x1194, 17, 15, 12, 0), /* RTL8390_TBL_0 */
+ TBL_DESC(0x6B80, 0x6B84, 4, 14, 12, 0), /* RTL8390_TBL_1 */
+ TBL_DESC(0x611C, 0x6120, 9, 8, 6, 0), /* RTL8390_TBL_2 */
+
+ TBL_DESC(0xB320, 0xB334, 3, 18, 16, 0), /* RTL9300_TBL_L2 */
+ TBL_DESC(0xB340, 0xB344, 19, 16, 12, 0), /* RTL9300_TBL_0 */
+ TBL_DESC(0xB3A0, 0xB3A4, 20, 16, 13, 0), /* RTL9300_TBL_1 */
+ TBL_DESC(0xCE04, 0xCE08, 6, 14, 12, 0), /* RTL9300_TBL_2 */
+ TBL_DESC(0xD600, 0xD604, 30, 7, 6, 0), /* RTL9300_TBL_HSB */
+ TBL_DESC(0x7880, 0x7884, 22, 9, 8, 0), /* RTL9300_TBL_HSA */
+
+ TBL_DESC(0x8500, 0x8508, 8, 19, 15, 0), /* RTL9310_TBL_0 */
+ TBL_DESC(0x40C0, 0x40C4, 22, 16, 14, 0), /* RTL9310_TBL_1 */
+ TBL_DESC(0x8528, 0x852C, 6, 18, 14, 0), /* RTL9310_TBL_2 */
+ TBL_DESC(0x0200, 0x0204, 9, 15, 12, 0), /* RTL9310_TBL_3 */
+ TBL_DESC(0x20dc, 0x20e0, 29, 7, 6, 0), /* RTL9310_TBL_4 */
+ TBL_DESC(0x7e1c, 0x7e20, 53, 8, 6, 0), /* RTL9310_TBL_5 */
};
void rtl_table_init(void)
if (!r)
return;
-// pr_info("Unlocking %08x\n", (u32)r);
+/* pr_info("Unlocking %08x\n", (u32)r); */
mutex_unlock(&r->lock);
-// pr_info("Unlock done\n");
+/* pr_info("Unlock done\n"); */
}
static int rtl_table_exec(struct table_reg *r, bool is_write, int idx)
led_set = 0;
priv->ports[pn].led_set = led_set;
- // Check for the integrated SerDes of the RTL8380M first
+ /* Check for the integrated SerDes of the RTL8380M first */
if (of_property_read_bool(phy_node, "phy-is-integrated")
&& priv->id == 0x8380 && pn >= 24) {
pr_debug("----> FÓUND A SERDES\n");
case NETDEV_LAG_HASH_L23:
algomsk |= TRUNK_DISTRIBUTION_ALGO_DMAC_BIT;
algomsk |= TRUNK_DISTRIBUTION_ALGO_SMAC_BIT;
- algomsk |= TRUNK_DISTRIBUTION_ALGO_SIP_BIT; //source ip
- algomsk |= TRUNK_DISTRIBUTION_ALGO_DIP_BIT; //dest ip
+ algomsk |= TRUNK_DISTRIBUTION_ALGO_SIP_BIT; /* source ip */
+ algomsk |= TRUNK_DISTRIBUTION_ALGO_DIP_BIT; /* dest ip */
algoidx = 1;
break;
case NETDEV_LAG_HASH_L34:
- algomsk |= TRUNK_DISTRIBUTION_ALGO_SRC_L4PORT_BIT; //sport
- algomsk |= TRUNK_DISTRIBUTION_ALGO_DST_L4PORT_BIT; //dport
- algomsk |= TRUNK_DISTRIBUTION_ALGO_SIP_BIT; //source ip
- algomsk |= TRUNK_DISTRIBUTION_ALGO_DIP_BIT; //dest ip
+ algomsk |= TRUNK_DISTRIBUTION_ALGO_SRC_L4PORT_BIT; /* sport */
+ algomsk |= TRUNK_DISTRIBUTION_ALGO_DST_L4PORT_BIT; /* dport */
+ algomsk |= TRUNK_DISTRIBUTION_ALGO_SIP_BIT; /* source ip */
+ algomsk |= TRUNK_DISTRIBUTION_ALGO_DIP_BIT; /* dest ip */
algoidx = 2;
break;
default:
return -ENOSPC;
}
- // 0x7f algo mask all
+ /* 0x7f algo mask all */
priv->r->mask_port_reg_be(BIT_ULL(port), 0, priv->r->trk_mbr_ctr(group));
priv->lags_port_members[group] &= ~BIT_ULL(port);
u64_to_ether_addr(nh->mac, &e.mac[0]);
e.port = nh->port;
- // Loop over all entries in the hash-bucket and over the second block on 93xx SoCs
+ /* Loop over all entries in the hash-bucket and over the second block on 93xx SoCs */
for (i = 0; i < priv->l2_bucket_size; i++) {
entry = priv->r->read_l2_entry_using_hash(key, i, &e);
return -1;
}
- // Found an existing (e->valid is true) or empty entry, make it a nexthop entry
+ /* Found an existing (e->valid is true) or empty entry, make it a nexthop entry */
nh->l2_id = idx;
if (e.valid) {
nh->port = e.port;
- nh->vid = e.vid; // Save VID
+ nh->vid = e.vid; /* Save VID */
nh->rvid = e.rvid;
nh->dev_id = e.stack_dev;
- // If the entry is already a valid next hop entry, don't change it
+ /* If the entry is already a valid next hop entry, don't change it */
if (e.next_hop)
return 0;
} else {
e.block_da = false;
e.block_sa = false;
e.suspended = false;
- e.age = 0; // With port-ignore
+ e.age = 0; /* With port-ignore */
e.port = priv->port_ignore;
u64_to_ether_addr(nh->mac, &e.mac[0]);
}
e.next_hop = true;
- e.nh_route_id = nh->id; // NH route ID takes place of VID
+ e.nh_route_id = nh->id; /* NH route ID takes place of VID */
e.nh_vlan_target = false;
priv->r->write_l2_entry_using_hash(idx >> 2, idx & 0x3, &e);
if (e.is_static)
e.valid = false;
e.next_hop = false;
- e.vid = nh->vid; // Restore VID
+ e.vid = nh->vid; /* Restore VID */
e.rvid = nh->rvid;
priv->r->write_l2_entry_using_hash(key, i, &e);
{
int i;
-// TODO: On 5.12:
-// if(!dsa_slave_dev_check(dev)) {
-// netdev_info(dev, "%s: not a DSA device.\n", __func__);
-// return -EINVAL;
-// }
+/* TODO: On 5.12:
+ * if(!dsa_slave_dev_check(dev)) {
+ * netdev_info(dev, "%s: not a DSA device.\n", __func__);
+ * return -EINVAL;
+ * }
+ */
for (i = 0; i < priv->cpu_port; i++) {
if (!priv->ports[i].dp)
pr_info("%s: Setting up fwding: ip %pI4, GW mac %016llx\n",
__func__, &ip_addr, mac);
- // Reads the ROUTING table entry associated with the route
+ /* Reads the ROUTING table entry associated with the route */
priv->r->route_read(r->id, r);
pr_info("Route with id %d to %pI4 / %d\n", r->id, &r->dst_ip, r->prefix_len);
r->nh.port = priv->port_ignore;
r->nh.id = r->id;
- // Do we need to explicitly add a DMAC entry with the route's nh index?
+ /* Do we need to explicitly add a DMAC entry with the route's nh index? */
if (priv->r->set_l3_egress_mac)
priv->r->set_l3_egress_mac(r->id, mac);
- // Update ROUTING table: map gateway-mac and switch-mac id to route id
+ /* Update ROUTING table: map gateway-mac and switch-mac id to route id */
rtl83xx_l2_nexthop_add(priv, &r->nh);
r->attr.valid = true;
r->attr.action = ROUTE_ACT_FORWARD;
r->attr.type = 0;
- r->attr.hit = false; // Reset route-used indicator
+ r->attr.hit = false; /* Reset route-used indicator */
- // Add PIE entry with dst_ip and prefix_len
+ /* Add PIE entry with dst_ip and prefix_len */
r->pr.dip = r->dst_ip;
r->pr.dip_m = inet_make_mask(r->prefix_len);
r->id = idx;
r->gw_ip = ip;
- r->pr.id = -1; // We still need to allocate a rule in HW
+ r->pr.id = -1; /* We still need to allocate a rule in HW */
r->is_host_route = false;
err = rhltable_insert(&priv->routes, &r->linkage, route_ht_params);
r->id = idx + MAX_ROUTES;
r->gw_ip = ip;
- r->pr.id = -1; // We still need to allocate a rule in HW
+ r->pr.id = -1; /* We still need to allocate a rule in HW */
r->is_host_route = true;
err = rhltable_insert(&priv->routes, &r->linkage, route_ht_params);
priv->r->host_route_write(id, r);
clear_bit(r->id - MAX_ROUTES, priv->host_route_use_bm);
} else {
- // If there is a HW representation of the route, delete it
+ /* If there is a HW representation of the route, delete it */
if (priv->r->route_lookup_hw) {
id = priv->r->route_lookup_hw(r);
pr_info("%s: Got id for prefix route: %d\n", __func__, id);
m.valid = true;
m.mac = mac;
- m.p_type = 0; // An individual port, not a trunk port
- m.p_id = 0x3f; // Listen on any port
+ m.p_type = 0; /* An individual port, not a trunk port */
+ m.p_id = 0x3f; /* Listen on any port */
m.p_id_mask = 0;
- m.vid = 0; // Listen on any VLAN...
- m.vid_mask = 0; // ... so mask needs to be 0
- m.mac_mask = 0xffffffffffffULL; // We want an exact match of the interface MAC
- m.action = L3_FORWARD; // Route the packet
+ m.vid = 0; /* Listen on any VLAN... */
+ m.vid_mask = 0; /* ... so mask needs to be 0 */
+ m.mac_mask = 0xffffffffffffULL; /* We want an exact match of the interface MAC */
+ m.action = L3_FORWARD; /* Route the packet */
priv->r->set_l3_router_mac(free_mac, &m);
mutex_unlock(&priv->reg_mutex);
return -1;
}
- // Set up default egress interface 1
+ /* Set up default egress interface 1 */
intf.vid = vlan;
intf.smac_idx = free_mac;
intf.ip4_mtu_id = 1;
intf.ip6_mtu_id = 1;
- intf.ttl_scope = 1; // TTL
- intf.hl_scope = 1; // Hop Limit
- intf.ip4_icmp_redirect = intf.ip6_icmp_redirect = 2; // FORWARD
- intf.ip4_pbr_icmp_redirect = intf.ip6_pbr_icmp_redirect = 2; // FORWARD;
+ intf.ttl_scope = 1; /* TTL */
+ intf.hl_scope = 1; /* Hop Limit */
+ intf.ip4_icmp_redirect = intf.ip6_icmp_redirect = 2; /* FORWARD */
+ intf.ip4_pbr_icmp_redirect = intf.ip6_pbr_icmp_redirect = 2; /* FORWARD; */
priv->r->set_l3_egress_intf(free_mac, &intf);
priv->r->set_l3_egress_mac(L3_EGRESS_DMACS + free_mac, mac);
if (port < 0)
return -1;
- // For now we only work with routes that have a gateway and are not ourself
-// if ((!nh->fib_nh_gw4) && (info->dst_len != 32))
-// return 0;
+ /* For now we only work with routes that have a gateway and are not ourself */
+/* if ((!nh->fib_nh_gw4) && (info->dst_len != 32)) */
+/* return 0; */
if ((info->dst & 0xff) == 0xff)
return 0;
- // Do not offload routes to 192.168.100.x
+ /* Do not offload routes to 192.168.100.x */
if ((info->dst & 0xffffff00) == 0xc0a86400)
return 0;
- // Do not offload routes to 127.x.x.x
+ /* Do not offload routes to 127.x.x.x */
if ((info->dst & 0xff000000) == 0x7f000000)
return 0;
- // Allocate route or host-route (entry if hardware supports this)
+ /* Allocate route or host-route (entry if hardware supports this) */
if (info->dst_len == 32 && priv->r->host_route_write)
r = rtl83xx_host_route_alloc(priv, nh->fib_nh_gw4);
else
if (rtl83xx_alloc_router_mac(priv, mac))
goto out_free_rt;
- // vid = 0: Do not care about VID
+ /* vid = 0: Do not care about VID */
r->nh.if_id = rtl83xx_alloc_egress_intf(priv, mac, vlan);
if (r->nh.if_id < 0)
goto out_free_rmac;
}
}
- // We need to resolve the mac address of the GW
+ /* We need to resolve the mac address of the GW */
if (!to_localhost)
rtl83xx_port_ipv4_resolve(priv, dev, nh->fib_nh_gw4);
struct fib6_entry_notifier_info *info)
{
pr_debug("In %s\n", __func__);
-// nh->fib_nh_flags |= RTNH_F_OFFLOAD;
+/* nh->fib_nh_flags |= RTNH_F_OFFLOAD; */
return 0;
}
return -EINVAL;
}
- // Initialize access to RTL switch tables
+ /* Initialize access to RTL switch tables */
rtl_table_init();
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
goto err_register_nb;
}
- // Initialize hash table for L3 routing
+ /* Initialize hash table for L3 routing */
rhltable_init(&priv->routes, &route_ht_params);
/* Register netevent notifier callback to catch notifications about neighboring
if (err)
goto err_register_fib_nb;
- // TODO: put this into l2_setup()
- // Flood BPDUs to all ports including cpu-port
+ /* TODO: put this into l2_setup() */
+ /* Flood BPDUs to all ports including cpu-port */
if (soc_info.family != RTL9300_FAMILY_ID) {
bpdu_mask = soc_info.family == RTL8380_FAMILY_ID ? 0x1FFFFFFF : 0x1FFFFFFFFFFFFF;
priv->r->set_port_reg_be(bpdu_mask, priv->r->rma_bpdu_fld_pmask);
- // TRAP 802.1X frames (EAPOL) to the CPU-Port, bypass STP and VLANs
+ /* TRAP 802.1X frames (EAPOL) to the CPU-Port, bypass STP and VLANs */
sw_w32(7, priv->r->spcl_trap_eapol_ctrl);
rtl838x_dbgfs_init(priv);
static int rtl83xx_sw_remove(struct platform_device *pdev)
{
- // TODO:
+ /* TODO: */
pr_debug("Removing platform driver for rtl83xx-sw\n");
return 0;
const char *rtl931x_drop_cntr[] = {
"ALE_RX_GOOD_PKTS", "RX_MAX_FRAME_SIZE", "MAC_RX_DROP", "OPENFLOW_IP_MPLS_TTL", "OPENFLOW_TBL_MISS",
- "IGR_BW", "SPECIAL_CONGEST", "EGR_QUEUE", "RESERVED", "EGR_LINK_STATUS", "STACK_UCAST_NONUCAST_TTL", // 10
+ "IGR_BW", "SPECIAL_CONGEST", "EGR_QUEUE", "RESERVED", "EGR_LINK_STATUS", "STACK_UCAST_NONUCAST_TTL", /* 10 */
"STACK_NONUC_BLOCKING_PMSK", "L2_CRC", "SRC_PORT_FILTER", "PARSER_PACKET_TOO_LONG", "PARSER_MALFORM_PACKET",
- "MPLS_OVER_2_LBL", "EACL_METER", "IACL_METER", "PROTO_STORM", "INVALID_CAPWAP_HEADER", // 20
+ "MPLS_OVER_2_LBL", "EACL_METER", "IACL_METER", "PROTO_STORM", "INVALID_CAPWAP_HEADER", /* 20 */
"MAC_IP_SUBNET_BASED_VLAN", "OAM_PARSER", "UC_MC_RPF", "IP_MAC_BINDING_MATCH_MISMATCH", "SA_BLOCK",
- "TUNNEL_IP_ADDRESS_CHECK", "EACL_DROP", "IACL_DROP", "ATTACK_PREVENT", "SYSTEM_PORT_LIMIT_LEARN", // 30,
+ "TUNNEL_IP_ADDRESS_CHECK", "EACL_DROP", "IACL_DROP", "ATTACK_PREVENT", "SYSTEM_PORT_LIMIT_LEARN", /* 30 */
"OAMPDU", "CCM_RX", "CFM_UNKNOWN_TYPE", "LBM_LBR_LTM_LTR", "Y_1731", "VLAN_LIMIT_LEARN",
- "VLAN_ACCEPT_FRAME_TYPE", "CFI_1", "STATIC_DYNAMIC_PORT_MOVING", "PORT_MOVE_FORBID", // 40
+ "VLAN_ACCEPT_FRAME_TYPE", "CFI_1", "STATIC_DYNAMIC_PORT_MOVING", "PORT_MOVE_FORBID", /* 40 */
"L3_CRC", "BPDU_PTP_LLDP_EAPOL_RMA", "MSTP_SRC_DROP_DISABLED_BLOCKING", "INVALID_SA", "NEW_SA",
- "VLAN_IGR_FILTER", "IGR_VLAN_CONVERT", "GRATUITOUS_ARP", "MSTP_SRC_DROP", "L2_HASH_FULL", // 50
+ "VLAN_IGR_FILTER", "IGR_VLAN_CONVERT", "GRATUITOUS_ARP", "MSTP_SRC_DROP", "L2_HASH_FULL", /* 50 */
"MPLS_UNKNOWN_LBL", "L3_IPUC_NON_IP", "TTL", "MTU", "ICMP_REDIRECT", "STORM_CONTROL", "L3_DIP_DMAC_MISMATCH",
- "IP4_IP_OPTION", "IP6_HBH_EXT_HEADER", "IP4_IP6_HEADER_ERROR", // 60
+ "IP4_IP_OPTION", "IP6_HBH_EXT_HEADER", "IP4_IP6_HEADER_ERROR", /* 60 */
"ROUTING_IP_ADDR_CHECK", "ROUTING_EXCEPTION", "DA_BLOCK", "OAM_MUX", "PORT_ISOLATION", "VLAN_EGR_FILTER",
- "MIRROR_ISOLATE", "MSTP_DESTINATION_DROP", "L2_MC_BRIDGE", "IP_UC_MC_ROUTING_LOOK_UP_MISS", // 70
+ "MIRROR_ISOLATE", "MSTP_DESTINATION_DROP", "L2_MC_BRIDGE", "IP_UC_MC_ROUTING_LOOK_UP_MISS", /* 70 */
"L2_UC", "L2_MC", "IP4_MC", "IP6_MC", "L3_UC_MC_ROUTE", "UNKNOWN_L2_UC_FLPM", "BC_FLPM",
- "VLAN_PRO_UNKNOWN_L2_MC_FLPM", "VLAN_PRO_UNKNOWN_IP4_MC_FLPM", "VLAN_PROFILE_UNKNOWN_IP6_MC_FLPM" // 80,
+ "VLAN_PRO_UNKNOWN_L2_MC_FLPM", "VLAN_PRO_UNKNOWN_IP4_MC_FLPM", "VLAN_PROFILE_UNKNOWN_IP6_MC_FLPM", /* 80 */
};
static ssize_t rtl838x_common_read(char __user *buffer, size_t count,
{
debugfs_remove_recursive(priv->dbgfs_dir);
-// kfree(priv->dbgfs_entries);
+/* kfree(priv->dbgfs_entries); */
}
static int rtl838x_dbgfs_port_init(struct dentry *parent, struct rtl838x_switch_priv *priv,
pr_info("UNKNOWN_MC_PMASK: %016llx\n", priv->r->read_mcast_pmask(UNKNOWN_MC_PMASK));
priv->r->vlan_profile_dump(0);
- info.fid = 0; // Default Forwarding ID / MSTI
- info.hash_uc_fid = false; // Do not build the L2 lookup hash with FID, but VID
- info.hash_mc_fid = false; // Do the same for Multicast packets
- info.profile_id = 0; // Use default Vlan Profile 0
- info.tagged_ports = 0; // Initially no port members
+ info.fid = 0; /* Default Forwarding ID / MSTI */
+ info.hash_uc_fid = false; /* Do not build the L2 lookup hash with FID, but VID */
+ info.hash_mc_fid = false; /* Do the same for Multicast packets */
+ info.profile_id = 0; /* Use default Vlan Profile 0 */
+ info.tagged_ports = 0; /* Initially no port members */
if (priv->family_id == RTL9310_FAMILY_ID) {
info.if_id = 0;
info.multicast_grp_mask = 0;
info.l2_tunnel_list_id = -1;
}
- // Initialize all vlans 0-4095
+ /* Initialize all vlans 0-4095 */
for (i = 0; i < MAX_VLANS; i ++)
priv->r->vlan_set_tagged(i, &info);
- // reset PVIDs; defaults to 1 on reset
+ /* reset PVIDs; defaults to 1 on reset */
for (i = 0; i <= priv->ds->num_ports; i++) {
priv->r->vlan_port_pvid_set(i, PBVLAN_TYPE_INNER, 0);
priv->r->vlan_port_pvid_set(i, PBVLAN_TYPE_OUTER, 0);
priv->r->vlan_port_pvidmode_set(i, PBVLAN_TYPE_OUTER, PBVLAN_MODE_UNTAG_AND_PRITAG);
}
- // Set forwarding action based on inner VLAN tag
+ /* Set forwarding action based on inner VLAN tag */
for (i = 0; i < priv->cpu_port; i++)
priv->r->vlan_fwd_on_inner(i, true);
}
sw_w32(0, RTL931X_SMI_PORT_POLLING_CTRL + 4);
}
- // Disable all ports except CPU port
+ /* Disable all ports except CPU port */
for (i = 0; i < ds->num_ports; i++)
priv->ports[i].enable = false;
priv->ports[priv->cpu_port].enable = true;
rtl930x_print_matrix();
- // TODO: Initialize statistics
+ /* TODO: Initialize statistics */
rtl83xx_vlan_setup(priv);
phylink_set(mask, 1000baseT_Half);
}
- // Internal phys of the RTL93xx family provide 10G
+ /* Internal phys of the RTL93xx family provide 10G */
if (priv->ports[port].phy_is_integrated &&
state->interface == PHY_INTERFACE_MODE_1000BASEX) {
phylink_set(mask, 1000baseX_Full);
reg |= 1 << speed_bit;
break;
default:
- break; // Ignore, including 10MBit which has a speed value of 0
+ break; /* Ignore, including 10MBit which has a speed value of 0 */
}
if (priv->family_id == RTL8380_FAMILY_ID) {
reg |= RTL839X_DUPLEX_MODE;
}
- // LAG members must use DUPLEX and we need to enable the link
+ /* LAG members must use DUPLEX and we need to enable the link */
if (priv->lagmembers & BIT_ULL(port)) {
switch(priv->family_id) {
case RTL8380_FAMILY_ID:
}
}
- // Disable AN
+ /* Disable AN */
if (priv->family_id == RTL8380_FAMILY_ID)
reg &= ~RTL838X_NWAY_EN;
sw_w32(reg, priv->r->mac_force_mode_ctrl(port));
rtl931x_sds_init(sds_num, PHY_INTERFACE_MODE_10GBASER);
break;
case PHY_INTERFACE_MODE_USXGMII:
- // Translates to MII_USXGMII_10GSXGMII
+ /* Translates to MII_USXGMII_10GSXGMII */
band = rtl931x_sds_cmu_band_get(sds_num, PHY_INTERFACE_MODE_USXGMII);
rtl931x_sds_init(sds_num, PHY_INTERFACE_MODE_USXGMII);
break;
reg &= ~(RTL931X_DUPLEX_MODE | RTL931X_FORCE_EN | RTL931X_FORCE_LINK_EN);
reg &= ~(0xf << 12);
- reg |= 0x2 << 12; // Set SMI speed to 0x2
+ reg |= 0x2 << 12; /* Set SMI speed to 0x2 */
reg |= RTL931X_TX_PAUSE_EN | RTL931X_RX_PAUSE_EN;
pr_info("%s port %d, mode %x, phy-mode: %s, speed %d, link %d\n", __func__,
port, mode, phy_modes(state->interface), state->speed, state->link);
- // Nothing to be done for the CPU-port
+ /* Nothing to be done for the CPU-port */
if (port == priv->cpu_port)
return;
break;
case PHY_INTERFACE_MODE_10GBASER:
case PHY_INTERFACE_MODE_10GKR:
- sds_mode = 0x1b; // 10G 1000X Auto
+ sds_mode = 0x1b; /* 10G 1000X Auto */
break;
case PHY_INTERFACE_MODE_USXGMII:
sds_mode = 0x0d;
reg |= RTL930X_DUPLEX_MODE;
if (priv->ports[port].phy_is_integrated)
- reg &= ~RTL930X_FORCE_EN; // Clear MAC_FORCE_EN to allow SDS-MAC link
+ reg &= ~RTL930X_FORCE_EN; /* Clear MAC_FORCE_EN to allow SDS-MAC link */
else
reg |= RTL930X_FORCE_EN;
/* Stop TX/RX to port */
sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(port));
- // No longer force link
+ /* No longer force link */
sw_w32_mask(0x3, 0, priv->r->mac_force_mode_ctrl(port));
}
/* Stop TX/RX to port */
sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(port));
- // No longer force link
+ /* No longer force link */
if (priv->family_id == RTL9300_FAMILY_ID)
v = RTL930X_FORCE_EN | RTL930X_FORCE_LINK_EN;
else if (priv->family_id == RTL9310_FAMILY_ID)
struct rtl838x_switch_priv *priv = ds->priv;
/* Restart TX/RX to port */
sw_w32_mask(0, 0x3, priv->r->mac_port_ctrl(port));
- // TODO: Set speed/duplex/pauses
+ /* TODO: Set speed/duplex/pauses */
}
static void rtl93xx_phylink_mac_link_up(struct dsa_switch *ds, int port,
/* Restart TX/RX to port */
sw_w32_mask(0, 0x3, priv->r->mac_port_ctrl(port));
- // TODO: Set speed/duplex/pauses
+ /* TODO: Set speed/duplex/pauses */
}
static void rtl83xx_get_strings(struct dsa_switch *ds,
}
set_bit(mc_group, priv->mc_group_bm);
- mc_group++; // We cannot use group 0, as this is used for lookup miss flooding
+ mc_group++; /* We cannot use group 0, as this is used for lookup miss flooding */
portmask = BIT_ULL(port) | BIT_ULL(priv->cpu_port);
priv->r->write_mcast_pmask(mc_group, portmask);
v |= priv->ports[port].pm;
priv->r->traffic_set(port, v);
- // TODO: Figure out if this is necessary
+ /* TODO: Figure out if this is necessary */
if (priv->family_id == RTL9300_FAMILY_ID) {
sw_w32_mask(0, BIT(port), RTL930X_L2_PORT_SABLK_CTRL);
sw_w32_mask(0, BIT(port), RTL930X_L2_PORT_DABLK_CTRL);
if (!dsa_is_user_port(ds, port))
return;
- // BUG: This does not work on RTL931X
+ /* BUG: This does not work on RTL931X */
/* remove port from switch mask of CPU_PORT */
priv->r->traffic_disable(priv->cpu_port, port);
store_mcgroups(priv, port);
u64 entry;
pr_debug("%s: using key %x, for seed %016llx\n", __func__, key, seed);
- // Loop over all entries in the hash-bucket and over the second block on 93xx SoCs
+ /* Loop over all entries in the hash-bucket and over the second block on 93xx SoCs */
for (i = 0; i < priv->l2_bucket_size; i++) {
entry = priv->r->read_l2_entry_using_hash(key, i, e);
pr_debug("valid %d, mac %016llx\n", e->valid, ether_addr_to_u64(&e->mac[0]));
idx = rtl83xx_find_l2_hash_entry(priv, seed, false, &e);
- // Found an existing or empty entry
+ /* Found an existing or empty entry */
if (idx >= 0) {
rtl83xx_setup_l2_uc_entry(&e, port, vid, mac);
priv->r->write_l2_entry_using_hash(idx >> 2, idx & 0x3, &e);
goto out;
}
- // Hash buckets full, try CAM
+ /* Hash buckets full, try CAM */
rtl83xx_find_l2_cam_entry(priv, seed, false, &e);
if (idx >= 0) {
idx = rtl83xx_find_l2_hash_entry(priv, seed, false, &e);
- // Found an existing or empty entry
+ /* Found an existing or empty entry */
if (idx >= 0) {
if (e.valid) {
pr_debug("Found an existing entry %016llx, mc_group %d\n",
goto out;
}
- // Hash buckets full, try CAM
+ /* Hash buckets full, try CAM */
rtl83xx_find_l2_cam_entry(priv, seed, false, &e);
if (idx >= 0) {
}
goto out;
}
- // TODO: Re-enable with a newer kernel: err = -ENOENT;
+ /* TODO: Re-enable with a newer kernel: err = -ENOENT; */
out:
mutex_unlock(&priv->reg_mutex);
struct rtl838x_switch_priv *priv = ds->priv;
pr_debug("%s: %d\n", __func__, port);
- // Nothing to be done...
+ /* Nothing to be done... */
return 0;
}
static void rtl838x_storm_enable(struct rtl838x_switch_priv *priv, int port, bool enable)
{
- // Enable Storm control for that port for UC, MC, and BC
+ /* Enable Storm control for that port for UC, MC, and BC */
if (enable)
sw_w32(0x7, RTL838X_STORM_CTRL_LB_CTRL(port));
else
int i;
pr_info("Enabling Storm control\n");
- // TICK_PERIOD_PPS
+ /* TICK_PERIOD_PPS */
if (priv->id == 0x8380)
sw_w32_mask(0x3ff << 20, 434 << 20, RTL838X_SCHED_LB_TICK_TKN_CTRL_0);
- // Set burst rate
- sw_w32(0x00008000, RTL838X_STORM_CTRL_BURST_0); // UC
- sw_w32(0x80008000, RTL838X_STORM_CTRL_BURST_1); // MC and BC
+ /* Set burst rate */
+ sw_w32(0x00008000, RTL838X_STORM_CTRL_BURST_0); /* UC */
+ sw_w32(0x80008000, RTL838X_STORM_CTRL_BURST_1); /* MC and BC */
- // Set burst Packets per Second to 32
- sw_w32(0x00000020, RTL838X_STORM_CTRL_BURST_PPS_0); // UC
- sw_w32(0x00200020, RTL838X_STORM_CTRL_BURST_PPS_1); // MC and BC
+ /* Set burst Packets per Second to 32 */
+ sw_w32(0x00000020, RTL838X_STORM_CTRL_BURST_PPS_0); /* UC */
+ sw_w32(0x00200020, RTL838X_STORM_CTRL_BURST_PPS_1); /* MC and BC */
- // Include IFG in storm control, rate based on bytes/s (0 = packets)
+ /* Include IFG in storm control, rate based on bytes/s (0 = packets) */
sw_w32_mask(0, 1 << 6 | 1 << 5, RTL838X_STORM_CTRL);
- // Bandwidth control includes preamble and IFG (10 Bytes)
+ /* Bandwidth control includes preamble and IFG (10 Bytes) */
sw_w32_mask(0, 1, RTL838X_SCHED_CTRL);
- // On SoCs except RTL8382M, set burst size of port egress
+ /* On SoCs except RTL8382M, set burst size of port egress */
if (priv->id != 0x8382)
sw_w32_mask(0xffff, 0x800, RTL838X_SCHED_LB_THR);
}
}
- // Attack prevention, enable all attack prevention measures
- //sw_w32(0x1ffff, RTL838X_ATK_PRVNT_CTRL);
+ /* Attack prevention, enable all attack prevention measures */
+ /* sw_w32(0x1ffff, RTL838X_ATK_PRVNT_CTRL); */
/* Attack prevention, drop (bit = 0) problematic packets on all ports.
* Setting bit = 1 means: trap to CPU
*/
- //sw_w32(0, RTL838X_ATK_PRVNT_ACT);
- // Enable attack prevention on all ports
- //sw_w32(0x0fffffff, RTL838X_ATK_PRVNT_PORT_EN);
+ /* sw_w32(0, RTL838X_ATK_PRVNT_ACT); */
+ /* Enable attack prevention on all ports */
+ /* sw_w32(0x0fffffff, RTL838X_ATK_PRVNT_PORT_EN); */
}
/* Sets the rate limit, 10MBit/s is equal to a rate value of 625 */
/* Tick length and token size settings for SoC with 250MHz,
* RTL8350 family would use 50MHz
*/
- // Set the special tick period
+ /* Set the special tick period */
sw_w32(976563, RTL839X_STORM_CTRL_SPCL_LB_TICK_TKN_CTRL);
- // Ingress tick period and token length 10G
+ /* Ingress tick period and token length 10G */
sw_w32(18 << 11 | 151, RTL839X_IGR_BWCTRL_LB_TICK_TKN_CTRL_0);
- // Ingress tick period and token length 1G
+ /* Ingress tick period and token length 1G */
sw_w32(245 << 11 | 129, RTL839X_IGR_BWCTRL_LB_TICK_TKN_CTRL_1);
- // Egress tick period 10G, bytes/token 10G and tick period 1G, bytes/token 1G
+ /* Egress tick period 10G, bytes/token 10G and tick period 1G, bytes/token 1G */
sw_w32(18 << 24 | 151 << 16 | 185 << 8 | 97, RTL839X_SCHED_LB_TICK_TKN_CTRL);
- // Set the tick period of the CPU and the Token Len
+ /* Set the tick period of the CPU and the Token Len */
sw_w32(3815 << 8 | 1, RTL839X_SCHED_LB_TICK_TKN_PPS_CTRL);
- // Set the Weighted Fair Queueing burst size
+ /* Set the Weighted Fair Queueing burst size */
sw_w32_mask(0xffff, 4500, RTL839X_SCHED_LB_THR);
- // Storm-rate calculation is based on bytes/sec (bit 5), include IFG (bit 6)
+ /* Storm-rate calculation is based on bytes/sec (bit 5), include IFG (bit 6) */
sw_w32_mask(0, 1 << 5 | 1 << 6, RTL839X_STORM_CTRL);
/* Based on the rate control mode being bytes/s
continue;
if (priv->ports[p].is10G)
- rtl839x_set_egress_rate(priv, p, 625000); // 10GB/s
+ rtl839x_set_egress_rate(priv, p, 625000); /* 10GB/s */
else
- rtl839x_set_egress_rate(priv, p, 62500); // 1GB/s
+ rtl839x_set_egress_rate(priv, p, 62500); /* 1GB/s */
- // Setup queues: all RTL83XX SoCs have 8 queues, maximum rate
+ /* Setup queues: all RTL83XX SoCs have 8 queues, maximum rate */
for (q = 0; q < 8; q++)
rtl839x_egress_rate_queue_limit(priv, p, q, 0xfffff);
if (priv->ports[p].is10G) {
- // Set high threshold to maximum
+ /* Set high threshold to maximum */
sw_w32_mask(0xffff, 0xffff, RTL839X_IGR_BWCTRL_PORT_CTRL_10G_0(p));
} else {
- // Set high threshold to maximum
+ /* Set high threshold to maximum */
sw_w32_mask(0xffff, 0xffff, RTL839X_IGR_BWCTRL_PORT_CTRL_1(p));
}
}
- // Set global ingress low watermark rate
+ /* Set global ingress low watermark rate */
sw_w32(65532, RTL839X_IGR_BWCTRL_CTRL_LB_THR);
}
mutex_lock(&priv->reg_mutex);
/* Check whether we need to empty the egress queue of that port due to Errata E0014503 */
if (sched == WEIGHTED_FAIR_QUEUE && t == WEIGHTED_ROUND_ROBIN && port != priv->cpu_port) {
- // Read Operations, Adminstatrion and Management control register
+ /* Read Operations, Adminstatrion and Management control register */
oam_state = sw_r32(RTL839X_OAM_CTRL);
- // Get current OAM state
+ /* Get current OAM state */
oam_port_state = sw_r32(RTL839X_OAM_PORT_ACT_CTRL(port));
- // Disable OAM to block traffice
+ /* Disable OAM to block traffice */
v = sw_r32(RTL839X_OAM_CTRL);
sw_w32_mask(0, 1, RTL839X_OAM_CTRL);
v = sw_r32(RTL839X_OAM_CTRL);
- // Set to trap action OAM forward (bits 1, 2) and OAM Mux Action Drop (bit 0)
+ /* Set to trap action OAM forward (bits 1, 2) and OAM Mux Action Drop (bit 0) */
sw_w32(0x2, RTL839X_OAM_PORT_ACT_CTRL(port));
- // Set port egress rate to unlimited
+ /* Set port egress rate to unlimited */
egress_rate = rtl839x_set_egress_rate(priv, port, 0xFFFFF);
- // Wait until the egress used page count of that port is 0
+ /* Wait until the egress used page count of that port is 0 */
i = 0;
do {
usleep_range(100, 200);
} while (i < 3500 && count > 0);
}
- // Actually set the scheduling algorithm
+ /* Actually set the scheduling algorithm */
rtl839x_read_scheduling_table(port);
sw_w32_mask(BIT(19), sched ? BIT(19) : 0, RTL839X_TBL_ACCESS_DATA_2(8));
rtl839x_write_scheduling_table(port);
if (sched == WEIGHTED_FAIR_QUEUE && t == WEIGHTED_ROUND_ROBIN && port != priv->cpu_port) {
- // Restore OAM state to control register
+ /* Restore OAM state to control register */
sw_w32(oam_state, RTL839X_OAM_CTRL);
- // Restore trap action state
+ /* Restore trap action state */
sw_w32(oam_port_state, RTL839X_OAM_PORT_ACT_CTRL(port));
- // Restore port egress rate
+ /* Restore port egress rate */
rtl839x_set_egress_rate(priv, port, egress_rate);
}
pr_info("RTL838X_PRI_SEL_TBL_CTRL(i): %08x\n", sw_r32(RTL838X_PRI_SEL_TBL_CTRL(0)));
rtl83xx_setup_default_prio2queue();
- // Enable inner (bit 12) and outer (bit 13) priority remapping from DSCP
+ /* Enable inner (bit 12) and outer (bit 13) priority remapping from DSCP */
sw_w32_mask(0, BIT(12) | BIT(13), RTL838X_PRI_DSCP_INVLD_CTRL0);
/* Set default weight for calculating internal priority, in prio selection group 0
v = 3 | (4 << 3) | (5 << 6) | (6 << 9) | (7 << 12);
sw_w32(v, RTL838X_PRI_SEL_TBL_CTRL(0));
- // Set the inner and outer priority one-to-one to re-marked outer dot1p priority
+ /* Set the inner and outer priority one-to-one to re-marked outer dot1p priority */
v = 0;
for (p = 0; p < 8; p++)
v |= p << (3 * p);
v |= (dot1p_priority_remapping[p] & 0x7) << (p * 3);
sw_w32(v, RTL838X_PRI_SEL_IPRI_REMAP);
- // On all ports set scheduler type to WFQ
+ /* On all ports set scheduler type to WFQ */
for (i = 0; i <= soc_info.cpu_port; i++)
sw_w32(0, RTL838X_SCHED_P_TYPE_CTRL(i));
- // Enable egress scheduler for CPU-Port
+ /* Enable egress scheduler for CPU-Port */
sw_w32_mask(0, BIT(8), RTL838X_SCHED_LB_CTRL(soc_info.cpu_port));
- // Enable egress drop allways on
+ /* Enable egress drop allways on */
sw_w32_mask(0, BIT(11), RTL838X_FC_P_EGR_DROP_CTRL(soc_info.cpu_port));
- // Give special trap frames priority 7 (BPDUs) and routing exceptions:
+ /* Give special trap frames priority 7 (BPDUs) and routing exceptions: */
sw_w32_mask(0, 7 << 3 | 7, RTL838X_QM_PKT2CPU_INTPRI_2);
- // Give RMA frames priority 7:
+ /* Give RMA frames priority 7: */
sw_w32_mask(0, 7, RTL838X_QM_PKT2CPU_INTPRI_1);
}
for (port = 0; port < soc_info.cpu_port; port++)
sw_w32(7, RTL839X_QM_PORT_QNUM(port));
- // CPU-port gets queue number 7
+ /* CPU-port gets queue number 7 */
sw_w32(7, RTL839X_QM_PORT_QNUM(soc_info.cpu_port));
for (port = 0; port <= soc_info.cpu_port; port++) {
rtl83xx_set_ingress_priority(port, 0);
rtl839x_set_scheduling_algorithm(priv, port, WEIGHTED_FAIR_QUEUE);
rtl839x_set_scheduling_queue_weights(priv, port, default_queue_weights);
- // Do re-marking based on outer tag
+ /* Do re-marking based on outer tag */
sw_w32_mask(0, BIT(port % 32), RTL839X_RMK_PORT_DEI_TAG_CTRL(port));
}
- // Remap dot1p priorities to internal priority, for this the outer tag needs be re-marked
+ /* Remap dot1p priorities to internal priority, for this the outer tag needs be re-marked */
v = 0;
for (p = 0; p < 8; p++)
v |= (dot1p_priority_remapping[p] & 0x7) << (p * 3);
*/
sw_w32(2 << 2, RTL839X_PRI_SEL_DEI2DP_REMAP);
- // Re-mark DEI: 4 bit-fields of 2 bits each, field 0 is bits 0-1, ...
+ /* Re-mark DEI: 4 bit-fields of 2 bits each, field 0 is bits 0-1, ... */
sw_w32((0x1 << 2) | (0x1 << 4), RTL839X_RMK_DEI_CTRL);
/* Set Congestion avoidance drop probability to 0 for drop precedences 0-2 (bits 24-31)
extern struct mutex smi_lock;
-// see_dal_maple_acl_log2PhyTmplteField and src/app/diag_v2/src/diag_acl.c
+/* see_dal_maple_acl_log2PhyTmplteField and src/app/diag_v2/src/diag_acl.c */
/* Definition of the RTL838X-specific template field IDs as used in the PIE */
enum template_field_id {
TEMPLATE_FIELD_SPMMASK = 0,
- TEMPLATE_FIELD_SPM0 = 1, // Source portmask ports 0-15
- TEMPLATE_FIELD_SPM1 = 2, // Source portmask ports 16-28
+ TEMPLATE_FIELD_SPM0 = 1, /* Source portmask ports 0-15 */
+ TEMPLATE_FIELD_SPM1 = 2, /* Source portmask ports 16-28 */
TEMPLATE_FIELD_RANGE_CHK = 3,
- TEMPLATE_FIELD_DMAC0 = 4, // Destination MAC [15:0]
- TEMPLATE_FIELD_DMAC1 = 5, // Destination MAC [31:16]
- TEMPLATE_FIELD_DMAC2 = 6, // Destination MAC [47:32]
- TEMPLATE_FIELD_SMAC0 = 7, // Source MAC [15:0]
- TEMPLATE_FIELD_SMAC1 = 8, // Source MAC [31:16]
- TEMPLATE_FIELD_SMAC2 = 9, // Source MAC [47:32]
- TEMPLATE_FIELD_ETHERTYPE = 10, // Ethernet typ
- TEMPLATE_FIELD_OTAG = 11, // Outer VLAN tag
- TEMPLATE_FIELD_ITAG = 12, // Inner VLAN tag
- TEMPLATE_FIELD_SIP0 = 13, // IPv4 or IPv6 source IP[15:0] or ARP/RARP
- // source protocol address in header
- TEMPLATE_FIELD_SIP1 = 14, // IPv4 or IPv6 source IP[31:16] or ARP/RARP
- TEMPLATE_FIELD_DIP0 = 15, // IPv4 or IPv6 destination IP[15:0]
- TEMPLATE_FIELD_DIP1 = 16, // IPv4 or IPv6 destination IP[31:16]
- TEMPLATE_FIELD_IP_TOS_PROTO = 17, // IPv4 TOS/IPv6 traffic class and
- // IPv4 proto/IPv6 next header fields
- TEMPLATE_FIELD_L34_HEADER = 18, // packet with extra tag and IPv6 with auth, dest,
- // frag, route, hop-by-hop option header,
- // IGMP type, TCP flag
- TEMPLATE_FIELD_L4_SPORT = 19, // TCP/UDP source port
- TEMPLATE_FIELD_L4_DPORT = 20, // TCP/UDP destination port
+ TEMPLATE_FIELD_DMAC0 = 4, /* Destination MAC [15:0] */
+ TEMPLATE_FIELD_DMAC1 = 5, /* Destination MAC [31:16] */
+ TEMPLATE_FIELD_DMAC2 = 6, /* Destination MAC [47:32] */
+ TEMPLATE_FIELD_SMAC0 = 7, /* Source MAC [15:0] */
+ TEMPLATE_FIELD_SMAC1 = 8, /* Source MAC [31:16] */
+ TEMPLATE_FIELD_SMAC2 = 9, /* Source MAC [47:32] */
+ TEMPLATE_FIELD_ETHERTYPE = 10, /* Ethernet typ */
+ TEMPLATE_FIELD_OTAG = 11, /* Outer VLAN tag */
+ TEMPLATE_FIELD_ITAG = 12, /* Inner VLAN tag */
+ TEMPLATE_FIELD_SIP0 = 13, /* IPv4 or IPv6 source IP[15:0] or ARP/RARP */
+ /* source protocol address in header */
+ TEMPLATE_FIELD_SIP1 = 14, /* IPv4 or IPv6 source IP[31:16] or ARP/RARP */
+ TEMPLATE_FIELD_DIP0 = 15, /* IPv4 or IPv6 destination IP[15:0] */
+ TEMPLATE_FIELD_DIP1 = 16, /* IPv4 or IPv6 destination IP[31:16] */
+ TEMPLATE_FIELD_IP_TOS_PROTO = 17, /* IPv4 TOS/IPv6 traffic class and */
+ /* IPv4 proto/IPv6 next header fields */
+ TEMPLATE_FIELD_L34_HEADER = 18, /* packet with extra tag and IPv6 with auth, dest, */
+ /* frag, route, hop-by-hop option header, */
+ /* IGMP type, TCP flag */
+ TEMPLATE_FIELD_L4_SPORT = 19, /* TCP/UDP source port */
+ TEMPLATE_FIELD_L4_DPORT = 20, /* TCP/UDP destination port */
TEMPLATE_FIELD_ICMP_IGMP = 21,
TEMPLATE_FIELD_IP_RANGE = 22,
- TEMPLATE_FIELD_FIELD_SELECTOR_VALID = 23, // Field selector mask
+ TEMPLATE_FIELD_FIELD_SELECTOR_VALID = 23, /* Field selector mask */
TEMPLATE_FIELD_FIELD_SELECTOR_0 = 24,
TEMPLATE_FIELD_FIELD_SELECTOR_1 = 25,
TEMPLATE_FIELD_FIELD_SELECTOR_2 = 26,
TEMPLATE_FIELD_FIELD_SELECTOR_3 = 27,
- TEMPLATE_FIELD_SIP2 = 28, // IPv6 source IP[47:32]
- TEMPLATE_FIELD_SIP3 = 29, // IPv6 source IP[63:48]
- TEMPLATE_FIELD_SIP4 = 30, // IPv6 source IP[79:64]
- TEMPLATE_FIELD_SIP5 = 31, // IPv6 source IP[95:80]
- TEMPLATE_FIELD_SIP6 = 32, // IPv6 source IP[111:96]
- TEMPLATE_FIELD_SIP7 = 33, // IPv6 source IP[127:112]
- TEMPLATE_FIELD_DIP2 = 34, // IPv6 destination IP[47:32]
- TEMPLATE_FIELD_DIP3 = 35, // IPv6 destination IP[63:48]
- TEMPLATE_FIELD_DIP4 = 36, // IPv6 destination IP[79:64]
- TEMPLATE_FIELD_DIP5 = 37, // IPv6 destination IP[95:80]
- TEMPLATE_FIELD_DIP6 = 38, // IPv6 destination IP[111:96]
- TEMPLATE_FIELD_DIP7 = 39, // IPv6 destination IP[127:112]
- TEMPLATE_FIELD_FWD_VID = 40, // Forwarding VLAN-ID
+ TEMPLATE_FIELD_SIP2 = 28, /* IPv6 source IP[47:32] */
+ TEMPLATE_FIELD_SIP3 = 29, /* IPv6 source IP[63:48] */
+ TEMPLATE_FIELD_SIP4 = 30, /* IPv6 source IP[79:64] */
+ TEMPLATE_FIELD_SIP5 = 31, /* IPv6 source IP[95:80] */
+ TEMPLATE_FIELD_SIP6 = 32, /* IPv6 source IP[111:96] */
+ TEMPLATE_FIELD_SIP7 = 33, /* IPv6 source IP[127:112] */
+ TEMPLATE_FIELD_DIP2 = 34, /* IPv6 destination IP[47:32] */
+ TEMPLATE_FIELD_DIP3 = 35, /* IPv6 destination IP[63:48] */
+ TEMPLATE_FIELD_DIP4 = 36, /* IPv6 destination IP[79:64] */
+ TEMPLATE_FIELD_DIP5 = 37, /* IPv6 destination IP[95:80] */
+ TEMPLATE_FIELD_DIP6 = 38, /* IPv6 destination IP[111:96] */
+ TEMPLATE_FIELD_DIP7 = 39, /* IPv6 destination IP[127:112] */
+ TEMPLATE_FIELD_FWD_VID = 40, /* Forwarding VLAN-ID */
TEMPLATE_FIELD_FLOW_LABEL = 41,
};
static void rtl838x_vlan_tables_read(u32 vlan, struct rtl838x_vlan_info *info)
{
u32 v;
- // Read VLAN table (0) via register 0
+ /* Read VLAN table (0) via register 0 */
struct table_reg *r = rtl_table_get(RTL8380_TBL_0, 0);
rtl_table_read(r, vlan);
info->hash_uc_fid = !!(v & 0x10);
info->fid = (v >> 5) & 0x3f;
- // Read UNTAG table (0) via table register 1
+ /* Read UNTAG table (0) via table register 1 */
r = rtl_table_get(RTL8380_TBL_1, 0);
rtl_table_read(r, vlan);
info->untagged_ports = sw_r32(rtl_table_data(r, 0));
static void rtl838x_vlan_set_tagged(u32 vlan, struct rtl838x_vlan_info *info)
{
u32 v;
- // Access VLAN table (0) via register 0
+ /* Access VLAN table (0) via register 0 */
struct table_reg *r = rtl_table_get(RTL8380_TBL_0, 0);
sw_w32(info->tagged_ports, rtl_table_data(r, 0));
static void rtl838x_vlan_set_untagged(u32 vlan, u64 portmask)
{
- // Access UNTAG table (0) via register 1
+ /* Access UNTAG table (0) via register 1 */
struct table_reg *r = rtl_table_get(RTL8380_TBL_1, 0);
sw_w32(portmask & 0x1fffffff, rtl_table_data(r, 0));
e->valid = false;
else
e->type = L2_UNICAST;
- } else { // L2 multicast
+ } else { /* L2 multicast */
pr_debug("Got L2 MC entry: %08x %08x %08x\n", r[0], r[1], r[2]);
e->valid = true;
e->type = L2_MULTICAST;
e->mc_portmask_index = (r[0] >> 12) & 0x1ff;
}
- } else { // IPv4 and IPv6 multicast
+ } else { /* IPv4 and IPv6 multicast */
e->valid = true;
e->mc_portmask_index = (r[0] >> 12) & 0x1ff;
e->mc_gip = (r[1] << 20) | (r[2] >> 12);
r[0] |= e->nh_route_id & 0x1ff;
}
r[0] |= (e->age & 0x3) << 17;
- } else { // L2 Multicast
+ } else { /* L2 Multicast */
r[0] |= (e->mc_portmask_index & 0x1ff) << 12;
r[2] |= e->rvid & 0xfff;
r[0] |= e->vid & 0xfff;
pr_debug("FILL MC: %08x %08x %08x\n", r[0], r[1], r[2]);
}
- } else { // IPv4 and IPv6 multicast
+ } else { /* IPv4 and IPv6 multicast */
r[0] |= (e->mc_portmask_index & 0x1ff) << 12;
r[1] = e->mc_gip >> 20;
r[2] = e->mc_gip << 12;
static u64 rtl838x_read_l2_entry_using_hash(u32 hash, u32 pos, struct rtl838x_l2_entry *e)
{
u32 r[3];
- struct table_reg *q = rtl_table_get(RTL8380_TBL_L2, 0); // Access L2 Table 0
- u32 idx = (0 << 14) | (hash << 2) | pos; // Search SRAM, with hash and at pos in bucket
+ struct table_reg *q = rtl_table_get(RTL8380_TBL_L2, 0); /* Access L2 Table 0 */
+ u32 idx = (0 << 14) | (hash << 2) | pos; /* Search SRAM, with hash and at pos in bucket */
int i;
rtl_table_read(q, idx);
if (!e->valid)
return 0;
- return (((u64) r[1]) << 32) | (r[2]); // mac and vid concatenated as hash seed
+ return (((u64) r[1]) << 32) | (r[2]); /* mac and vid concatenated as hash seed */
}
static void rtl838x_write_l2_entry_using_hash(u32 hash, u32 pos, struct rtl838x_l2_entry *e)
struct table_reg *q = rtl_table_get(RTL8380_TBL_L2, 0);
int i;
- u32 idx = (0 << 14) | (hash << 2) | pos; // Access SRAM, with hash and at pos in bucket
+ u32 idx = (0 << 14) | (hash << 2) | pos; /* Access SRAM, with hash and at pos in bucket */
rtl838x_fill_l2_row(r, e);
static u64 rtl838x_read_cam(int idx, struct rtl838x_l2_entry *e)
{
u32 r[3];
- struct table_reg *q = rtl_table_get(RTL8380_TBL_L2, 1); // Access L2 Table 1
+ struct table_reg *q = rtl_table_get(RTL8380_TBL_L2, 1); /* Access L2 Table 1 */
int i;
rtl_table_read(q, idx);
pr_debug("Found in CAM: R1 %x R2 %x R3 %x\n", r[0], r[1], r[2]);
- // Return MAC with concatenated VID ac concatenated ID
+ /* Return MAC with concatenated VID ac concatenated ID */
return (((u64) r[1]) << 32) | r[2];
}
static void rtl838x_write_cam(int idx, struct rtl838x_l2_entry *e)
{
u32 r[3];
- struct table_reg *q = rtl_table_get(RTL8380_TBL_L2, 1); // Access L2 Table 1
+ struct table_reg *q = rtl_table_get(RTL8380_TBL_L2, 1); /* Access L2 Table 1 */
int i;
rtl838x_fill_l2_row(r, e);
static u64 rtl838x_read_mcast_pmask(int idx)
{
u32 portmask;
- // Read MC_PMSK (2) via register RTL8380_TBL_L2
+ /* Read MC_PMSK (2) via register RTL8380_TBL_L2 */
struct table_reg *q = rtl_table_get(RTL8380_TBL_L2, 2);
rtl_table_read(q, idx);
static void rtl838x_write_mcast_pmask(int idx, u64 portmask)
{
- // Access MC_PMSK (2) via register RTL8380_TBL_L2
+ /* Access MC_PMSK (2) via register RTL8380_TBL_L2 */
struct table_reg *q = rtl_table_get(RTL8380_TBL_L2, 2);
sw_w32(((u32)portmask) & 0x1fffffff, rtl_table_data(q, 0));
static void rtl838x_vlan_profile_setup(int profile)
{
u32 pmask_id = UNKNOWN_MC_PMASK;
- // Enable L2 Learning BIT 0, portmask UNKNOWN_MC_PMASK for unknown MC traffic flooding
+ /* Enable L2 Learning BIT 0, portmask UNKNOWN_MC_PMASK for unknown MC traffic flooding */
u32 p = 1 | pmask_id << 1 | pmask_id << 10 | pmask_id << 19;
sw_w32(p, RTL838X_VLAN_PROFILE(profile));
* and per vlan (bit 2) */
sw_w32(0x7, RTL838X_L2_LRN_CONSTRT_EN);
- // Limit learning to maximum: 16k entries, after that just flood (bits 0-1)
+ /* Limit learning to maximum: 16k entries, after that just flood (bits 0-1) */
sw_w32((0x3fff << 2) | 0, RTL838X_L2_LRN_CONSTRT);
- // Do not trap ARP packets to CPU_PORT
+ /* Do not trap ARP packets to CPU_PORT */
sw_w32(0, RTL838X_SPCL_TRAP_ARP_CTRL);
}
static void rtl838x_enable_learning(int port, bool enable)
{
- // Limit learning to maximum: 16k entries
+ /* Limit learning to maximum: 16k entries */
sw_w32_mask(0x3fff << 2, enable ? (0x3fff << 2) : 0,
RTL838X_L2_PORT_LRN_CONSTRT + (port << 2));
{
u32 v;
- // This works only for Ethernet ports, and on the RTL838X, ports from 24 are SFP
+ /* This works only for Ethernet ports, and on the RTL838X, ports from 24 are SFP */
if (port >= 24)
return;
pr_debug("In %s: setting port %d to %d\n", __func__, port, enable);
v = enable ? 0x3 : 0x0;
- // Set EEE state for 100 (bit 9) & 1000MBit (bit 10)
+ /* Set EEE state for 100 (bit 9) & 1000MBit (bit 10) */
sw_w32_mask(0x3 << 9, v << 9, priv->r->mac_force_mode_ctrl(port));
- // Set TX/RX EEE state
+ /* Set TX/RX EEE state */
if (enable) {
sw_w32_mask(0, BIT(port), RTL838X_EEE_PORT_TX_EN);
sw_w32_mask(0, BIT(port), RTL838X_EEE_PORT_RX_EN);
sw_w32(0x5001411, RTL838X_EEE_TX_TIMER_GIGA_CTRL);
sw_w32(0x5001417, RTL838X_EEE_TX_TIMER_GELITE_CTRL);
- // Enable EEE MAC support on ports
+ /* Enable EEE MAC support on ports */
for (i = 0; i < priv->cpu_port; i++) {
if (priv->ports[i].phy)
rtl838x_port_eee_set(priv, i, enable);
int block = index / PIE_BLOCK_SIZE;
u32 block_state = sw_r32(RTL838X_ACL_BLK_LOOKUP_CTRL);
- // Make sure rule-lookup is enabled in the block
+ /* Make sure rule-lookup is enabled in the block */
if (!(block_state & BIT(block)))
sw_w32(block_state | BIT(block), RTL838X_ACL_BLK_LOOKUP_CTRL);
}
pr_debug("%s: from %d to %d\n", __func__, index_from, index_to);
mutex_lock(&priv->reg_mutex);
- // Remember currently active blocks
+ /* Remember currently active blocks */
block_state = sw_r32(RTL838X_ACL_BLK_LOOKUP_CTRL);
- // Make sure rule-lookup is disabled in the relevant blocks
+ /* Make sure rule-lookup is disabled in the relevant blocks */
for (block = block_from; block <= block_to; block++) {
if (block_state & BIT(block))
sw_w32(block_state & (~BIT(block)), RTL838X_ACL_BLK_LOOKUP_CTRL);
}
- // Write from-to and execute bit into control register
+ /* Write from-to and execute bit into control register */
sw_w32(v, RTL838X_ACL_CLR_CTRL);
- // Wait until command has completed
+ /* Wait until command has completed */
do {
} while (sw_r32(RTL838X_ACL_CLR_CTRL) & BIT(0));
- // Re-enable rule lookup
+ /* Re-enable rule lookup */
for (block = block_from; block <= block_to; block++) {
if (!(block_state & BIT(block)))
sw_w32(block_state | BIT(block), RTL838X_ACL_BLK_LOOKUP_CTRL);
break;
case TEMPLATE_FIELD_SIP2:
pr->is_ipv6 = true;
- // Make use of limitiations on the position of the match values
+ /* Make use of limitiations on the position of the match values */
ipv6_addr_set(&pr->sip6, pr->sip, r[5 - i / 2],
r[4 - i / 2], r[3 - i / 2]);
ipv6_addr_set(&pr->sip6_m, pr->sip_m, r[5 - i / 2],
r[14] |= pr->ivalid ? BIT(27) : 0;
if (pr->drop)
- r[17] = 0x1 << 14; // Standard drop action
+ r[17] = 0x1 << 14; /* Standard drop action */
else
r[17] = 0;
r[17] |= pr->fwd_sel ? BIT(13) : 0;
* defines which Action Information Field (0-4) in the IACL table stores
* the additional data of the action (like e.g. the port number a packet is
* forwarded to) */
- // TODO: count bits in selectors to limit to a maximum number of actions
- if (pr->fwd_sel) { // Forwarding action
+ /* TODO: count bits in selectors to limit to a maximum number of actions */
+ if (pr->fwd_sel) { /* Forwarding action */
data = pr->fwd_act << 13;
data |= pr->fwd_data;
data |= pr->bypass_all ? BIT(12) : 0;
fields_used++;
}
- if (pr->ovid_sel) { // Outer VID action
+ if (pr->ovid_sel) { /* Outer VID action */
data = (pr->ovid_act & 0x3) << 12;
data |= pr->ovid_data;
*aif-- = data;
fields_used++;
}
- if (pr->ivid_sel) { // Inner VID action
+ if (pr->ivid_sel) { /* Inner VID action */
data = (pr->ivid_act & 0x3) << 12;
data |= pr->ivid_data;
*aif-- = data;
fields_used++;
}
- if (pr->flt_sel) { // Filter action
+ if (pr->flt_sel) { /* Filter action */
*aif-- = pr->flt_data;
fields_used++;
}
- if (pr->log_sel) { // Log action
+ if (pr->log_sel) { /* Log action */
if (fields_used >= 4)
return -1;
*aif-- = pr->log_data;
fields_used++;
}
- if (pr->rmk_sel) { // Remark action
+ if (pr->rmk_sel) { /* Remark action */
if (fields_used >= 4)
return -1;
*aif-- = pr->rmk_data;
fields_used++;
}
- if (pr->meter_sel) { // Meter action
+ if (pr->meter_sel) { /* Meter action */
if (fields_used >= 4)
return -1;
*aif-- = pr->meter_data;
fields_used++;
}
- if (pr->tagst_sel) { // Egress Tag Status action
+ if (pr->tagst_sel) { /* Egress Tag Status action */
if (fields_used >= 4)
return -1;
*aif-- = pr->tagst_data;
fields_used++;
}
- if (pr->mir_sel) { // Mirror action
+ if (pr->mir_sel) { /* Mirror action */
if (fields_used >= 4)
return -1;
*aif-- = pr->mir_data;
fields_used++;
}
- if (pr->nopri_sel) { // Normal Priority action
+ if (pr->nopri_sel) { /* Normal Priority action */
if (fields_used >= 4)
return -1;
*aif-- = pr->nopri_data;
fields_used++;
}
- if (pr->cpupri_sel) { // CPU Priority action
+ if (pr->cpupri_sel) { /* CPU Priority action */
if (fields_used >= 4)
return -1;
*aif-- = pr->nopri_data;
fields_used++;
}
- if (pr->otpid_sel) { // OTPID action
+ if (pr->otpid_sel) { /* OTPID action */
if (fields_used >= 4)
return -1;
*aif-- = pr->otpid_data;
fields_used++;
}
- if (pr->itpid_sel) { // ITPID action
+ if (pr->itpid_sel) { /* ITPID action */
if (fields_used >= 4)
return -1;
*aif-- = pr->itpid_data;
fields_used++;
}
- if (pr->shaper_sel) { // Traffic shaper action
+ if (pr->shaper_sel) { /* Traffic shaper action */
if (fields_used >= 4)
return -1;
*aif-- = pr->shaper_data;
if (pr->drop)
pr_debug("%s: Action Drop: %d", __func__, pr->drop);
- if (pr->fwd_sel){ // Forwarding action
+ if (pr->fwd_sel){ /* Forwarding action */
pr->fwd_act = *aif >> 13;
pr->fwd_data = *aif--;
pr->bypass_all = pr->fwd_data & BIT(12);
if (pr->bypass_all || pr->bypass_ibc_sc || pr->bypass_igr_stp)
pr->bypass_sel = true;
}
- if (pr->ovid_sel) // Outer VID action
+ if (pr->ovid_sel) /* Outer VID action */
pr->ovid_data = *aif--;
- if (pr->ivid_sel) // Inner VID action
+ if (pr->ivid_sel) /* Inner VID action */
pr->ivid_data = *aif--;
- if (pr->flt_sel) // Filter action
+ if (pr->flt_sel) /* Filter action */
pr->flt_data = *aif--;
- if (pr->log_sel) // Log action
+ if (pr->log_sel) /* Log action */
pr->log_data = *aif--;
- if (pr->rmk_sel) // Remark action
+ if (pr->rmk_sel) /* Remark action */
pr->rmk_data = *aif--;
- if (pr->meter_sel) // Meter action
+ if (pr->meter_sel) /* Meter action */
pr->meter_data = *aif--;
- if (pr->tagst_sel) // Egress Tag Status action
+ if (pr->tagst_sel) /* Egress Tag Status action */
pr->tagst_data = *aif--;
- if (pr->mir_sel) // Mirror action
+ if (pr->mir_sel) /* Mirror action */
pr->mir_data = *aif--;
- if (pr->nopri_sel) // Normal Priority action
+ if (pr->nopri_sel) /* Normal Priority action */
pr->nopri_data = *aif--;
- if (pr->cpupri_sel) // CPU Priority action
+ if (pr->cpupri_sel) /* CPU Priority action */
pr->nopri_data = *aif--;
- if (pr->otpid_sel) // OTPID action
+ if (pr->otpid_sel) /* OTPID action */
pr->otpid_data = *aif--;
- if (pr->itpid_sel) // ITPID action
+ if (pr->itpid_sel) /* ITPID action */
pr->itpid_data = *aif--;
- if (pr->shaper_sel) // Traffic shaper action
+ if (pr->shaper_sel) /* Traffic shaper action */
pr->shaper_data = *aif--;
}
static int rtl838x_pie_rule_read(struct rtl838x_switch_priv *priv, int idx, struct pie_rule *pr)
{
- // Read IACL table (1) via register 0
+ /* Read IACL table (1) via register 0 */
struct table_reg *q = rtl_table_get(RTL8380_TBL_0, 1);
u32 r[18];
int i;
static int rtl838x_pie_rule_write(struct rtl838x_switch_priv *priv, int idx, struct pie_rule *pr)
{
- // Access IACL table (1) via register 0
+ /* Access IACL table (1) via register 0 */
struct table_reg *q = rtl_table_get(RTL8380_TBL_0, 1);
u32 r[18];
int i, err = 0;
goto err_out;
}
-// rtl838x_pie_rule_dump_raw(r);
+/* rtl838x_pie_rule_dump_raw(r); */
for (i = 0; i < 18; i++)
sw_w32(r[i], rtl_table_data(q, i));
if (ether_addr_to_u64(pr->dmac) && !rtl838x_pie_templ_has(t, TEMPLATE_FIELD_DMAC0))
return -1;
- // TODO: Check more
+ /* TODO: Check more */
i = find_first_zero_bit(&priv->pie_use_bm[block * 4], PIE_BLOCK_SIZE);
set_bit(idx, priv->pie_use_bm);
pr->valid = true;
- pr->tid = j; // Mapped to template number
+ pr->tid = j; /* Mapped to template number */
pr->tid_m = 0x3;
pr->id = idx;
mutex_init(&priv->pie_mutex);
- // Enable ACL lookup on all ports, including CPU_PORT
+ /* Enable ACL lookup on all ports, including CPU_PORT */
for (i = 0; i <= priv->cpu_port; i++)
sw_w32(1, RTL838X_ACL_PORT_LOOKUP_CTRL(i));
- // Power on all PIE blocks
+ /* Power on all PIE blocks */
for (i = 0; i < priv->n_pie_blocks; i++)
sw_w32_mask(0, BIT(i), RTL838X_ACL_BLK_PWR_CTRL);
- // Include IPG in metering
+ /* Include IPG in metering */
sw_w32(1, RTL838X_METER_GLB_CTRL);
- // Delete all present rules
+ /* Delete all present rules */
rtl838x_pie_rule_del(priv, 0, priv->n_pie_blocks * PIE_BLOCK_SIZE - 1);
- // Routing bypasses source port filter: disable write-protection, first
+ /* Routing bypasses source port filter: disable write-protection, first */
sw_w32_mask(0, 3, RTL838X_INT_RW_CTRL);
sw_w32_mask(0, 1, RTL838X_DMY_REG27);
sw_w32_mask(3, 0, RTL838X_INT_RW_CTRL);
- // Enable predefined templates 0, 1 and 2 for even blocks
+ /* Enable predefined templates 0, 1 and 2 for even blocks */
template_selectors = 0 | (1 << 3) | (2 << 6);
for (i = 0; i < 6; i += 2)
sw_w32(template_selectors, RTL838X_ACL_BLK_TMPLTE_CTRL(i));
- // Enable predefined templates 0, 3 and 4 (IPv6 support) for odd blocks
+ /* Enable predefined templates 0, 3 and 4 (IPv6 support) for odd blocks */
template_selectors = 0 | (3 << 3) | (4 << 6);
for (i = 1; i < priv->n_pie_blocks; i += 2)
sw_w32(template_selectors, RTL838X_ACL_BLK_TMPLTE_CTRL(i));
- // Group each pair of physical blocks together to a logical block
+ /* Group each pair of physical blocks together to a logical block */
sw_w32(0b10101010101, RTL838X_ACL_BLK_GROUP_CTRL);
}
{
u32 v;
- // Read LOG table (3) via register RTL8380_TBL_0
+ /* Read LOG table (3) via register RTL8380_TBL_0 */
struct table_reg *r = rtl_table_get(RTL8380_TBL_0, 3);
pr_debug("In %s, id %d\n", __func__, counter);
pr_debug("Registers: %08x %08x\n",
sw_r32(rtl_table_data(r, 0)), sw_r32(rtl_table_data(r, 1)));
- // The table has a size of 2 registers
+ /* The table has a size of 2 registers */
if (counter % 2)
v = sw_r32(rtl_table_data(r, 0));
else
static void rtl838x_packet_cntr_clear(int counter)
{
- // Access LOG table (3) via register RTL8380_TBL_0
+ /* Access LOG table (3) via register RTL8380_TBL_0 */
struct table_reg *r = rtl_table_get(RTL8380_TBL_0, 3);
pr_debug("In %s, id %d\n", __func__, counter);
- // The table has a size of 2 registers
+ /* The table has a size of 2 registers */
if (counter % 2)
sw_w32(0, rtl_table_data(r, 0));
else
static void rtl838x_route_read(int idx, struct rtl83xx_route *rt)
{
- // Read ROUTING table (2) via register RTL8380_TBL_1
+ /* Read ROUTING table (2) via register RTL8380_TBL_1 */
struct table_reg *r = rtl_table_get(RTL8380_TBL_1, 2);
pr_debug("In %s, id %d\n", __func__, idx);
rtl_table_read(r, idx);
- // The table has a size of 2 registers
+ /* The table has a size of 2 registers */
rt->nh.gw = sw_r32(rtl_table_data(r, 0));
rt->nh.gw <<= 32;
rt->nh.gw |= sw_r32(rtl_table_data(r, 1));
static void rtl838x_route_write(int idx, struct rtl83xx_route *rt)
{
- // Access ROUTING table (2) via register RTL8380_TBL_1
+ /* Access ROUTING table (2) via register RTL8380_TBL_1 */
struct table_reg *r = rtl_table_get(RTL8380_TBL_1, 2);
pr_debug("In %s, id %d, gw: %016llx\n", __func__, idx, rt->nh.gw);
static int rtl838x_l3_setup(struct rtl838x_switch_priv *priv)
{
- // Nothing to be done
+ /* Nothing to be done */
return 0;
}
void rtl838x_set_distribution_algorithm(int group, int algoidx, u32 algomsk)
{
- algoidx &= 1; // RTL838X only supports 2 concurrent algorithms
+ algoidx &= 1; /* RTL838X only supports 2 concurrent algorithms */
sw_w32_mask(1 << (group % 8), algoidx << (group % 8),
RTL838X_TRK_HASH_IDX_CTRL + ((group >> 3) << 2));
sw_w32(algomsk, RTL838X_TRK_HASH_CTRL + (algoidx << 2));
#define RTL930X_PIE_BLK_PHASE_CTRL (0xA5A4)
#define RTL931X_PIE_BLK_PHASE_CTRL (0x4184)
-// PIE actions
+/* PIE actions */
#define PIE_ACT_COPY_TO_PORT 2
#define PIE_ACT_REDIRECT_TO_PORT 4
#define PIE_ACT_ROUTE_UC 6
#define PIE_ACT_VID_ASSIGN 0
-// L3 actions
+/* L3 actions */
#define L3_FORWARD 0
#define L3_DROP 1
#define L3_TRAP2CPU 2
#define L3_COPY2MASTERCPU 5
#define L3_HARDDROP 6
-// Route actions
+/* Route actions */
#define ROUTE_ACT_FORWARD 0
#define ROUTE_ACT_TRAP2CPU 1
#define ROUTE_ACT_COPY2CPU 2
u8 profile_id;
bool hash_mc_fid;
bool hash_uc_fid;
- u8 fid; // AKA MSTI
+ u8 fid; /* AKA MSTI */
- // The following fields are used only by the RTL931X
- int if_id; // Interface (index in L3_EGR_INTF_IDX)
+ /* The following fields are used only by the RTL931X */
+ int if_id; /* Interface (index in L3_EGR_INTF_IDX) */
u16 multicast_grp_mask;
int l2_tunnel_list_id;
};
u32 mc_sip;
u16 mc_mac_index;
u16 nh_route_id;
- bool nh_vlan_target; // Only RTL83xx: VLAN used for next hop
+ bool nh_vlan_target; /* Only RTL83xx: VLAN used for next hop */
- // The following is only valid on RTL931x
+ /* The following is only valid on RTL931x */
bool is_open_flow;
bool is_pe_forward;
bool is_local_forward;
* to SoC family (e.g. because of different port ranges) */
struct pie_rule {
int id;
- enum pie_phase phase; // Phase in which this template is applied
- int packet_cntr; // ID of a packet counter assigned to this rule
- int octet_cntr; // ID of a byte counter assigned to this rule
+ enum pie_phase phase; /* Phase in which this template is applied */
+ int packet_cntr; /* ID of a packet counter assigned to this rule */
+ int octet_cntr; /* ID of a byte counter assigned to this rule */
u32 last_packet_cnt;
u64 last_octet_cnt;
- // The following are requirements for the pie template
+ /* The following are requirements for the pie template */
bool is_egress;
- bool is_ipv6; // This is a rule with IPv6 fields
+ bool is_ipv6; /* This is a rule with IPv6 fields */
- // Fixed fields that are always matched against on RTL8380
+ /* Fixed fields that are always matched against on RTL8380 */
u8 spmmask_fix;
- u8 spn; // Source port number
- bool stacking_port; // Source port is stacking port
- bool mgnt_vlan; // Packet arrived on management VLAN
- bool dmac_hit_sw; // The packet's destination MAC matches one of the device's
- bool content_too_deep; // The content of the packet cannot be parsed: too many layers
- bool not_first_frag; // Not the first IP fragment
- u8 frame_type_l4; // 0: UDP, 1: TCP, 2: ICMP/ICMPv6, 3: IGMP
- u8 frame_type; // 0: ARP, 1: L2 only, 2: IPv4, 3: IPv6
- bool otag_fmt; // 0: outer tag packet, 1: outer priority tag or untagged
- bool itag_fmt; // 0: inner tag packet, 1: inner priority tag or untagged
- bool otag_exist; // packet with outer tag
- bool itag_exist; // packet with inner tag
- bool frame_type_l2; // 0: Ethernet, 1: LLC_SNAP, 2: LLC_Other, 3: Reserved
- bool igr_normal_port; // Ingress port is not cpu or stacking port
- u8 tid; // The template ID defining the what the templated fields mean
-
- // Masks for the fields that are always matched against on RTL8380
+ u8 spn; /* Source port number */
+ bool stacking_port; /* Source port is stacking port */
+ bool mgnt_vlan; /* Packet arrived on management VLAN */
+ bool dmac_hit_sw; /* The packet's destination MAC matches one of the device's */
+ bool content_too_deep; /* The content of the packet cannot be parsed: too many layers */
+ bool not_first_frag; /* Not the first IP fragment */
+ u8 frame_type_l4; /* 0: UDP, 1: TCP, 2: ICMP/ICMPv6, 3: IGMP */
+ u8 frame_type; /* 0: ARP, 1: L2 only, 2: IPv4, 3: IPv6 */
+ bool otag_fmt; /* 0: outer tag packet, 1: outer priority tag or untagged */
+ bool itag_fmt; /* 0: inner tag packet, 1: inner priority tag or untagged */
+ bool otag_exist; /* packet with outer tag */
+ bool itag_exist; /* packet with inner tag */
+ bool frame_type_l2; /* 0: Ethernet, 1: LLC_SNAP, 2: LLC_Other, 3: Reserved */
+ bool igr_normal_port; /* Ingress port is not cpu or stacking port */
+ u8 tid; /* The template ID defining the what the templated fields mean */
+
+ /* Masks for the fields that are always matched against on RTL8380 */
u8 spmmask_fix_m;
u8 spn_m;
bool stacking_port_m;
bool igr_normal_port_m;
u8 tid_m;
- // Logical operations between rules, special rules for rule numbers apply
+ /* Logical operations between rules, special rules for rule numbers apply */
bool valid;
- bool cond_not; // Matches when conditions not match
- bool cond_and1; // And this rule 2n with the next rule 2n+1 in same block
- bool cond_and2; // And this rule m in block 2n with rule m in block 2n+1
+ bool cond_not; /* Matches when conditions not match */
+ bool cond_and1; /* And this rule 2n with the next rule 2n+1 in same block */
+ bool cond_and2; /* And this rule m in block 2n with rule m in block 2n+1 */
bool ivalid;
- // Actions to be performed
- bool drop; // Drop the packet
- bool fwd_sel; // Forward packet: to port, portmask, dest route, next rule, drop
- bool ovid_sel; // So something to outer vlan-id: shift, re-assign
- bool ivid_sel; // Do something to inner vlan-id: shift, re-assign
- bool flt_sel; // Filter the packet when sending to certain ports
- bool log_sel; // Log the packet in one of the LOG-table counters
- bool rmk_sel; // Re-mark the packet, i.e. change the priority-tag
- bool meter_sel; // Meter the packet, i.e. limit rate of this type of packet
- bool tagst_sel; // Change the ergress tag
- bool mir_sel; // Mirror the packet to a Link Aggregation Group
- bool nopri_sel; // Change the normal priority
- bool cpupri_sel; // Change the CPU priority
- bool otpid_sel; // Change Outer Tag Protocol Identifier (802.1q)
- bool itpid_sel; // Change Inner Tag Protocol Identifier (802.1q)
- bool shaper_sel; // Apply traffic shaper
- bool mpls_sel; // MPLS actions
- bool bypass_sel; // Bypass actions
- bool fwd_sa_lrn; // Learn the source address when forwarding
- bool fwd_mod_to_cpu; // Forward the modified VLAN tag format to CPU-port
-
- // Fields used in predefined templates 0-2 on RTL8380 / 90 / 9300
- u64 spm; // Source Port Matrix
- u16 otag; // Outer VLAN-ID
- u8 smac[ETH_ALEN]; // Source MAC address
- u8 dmac[ETH_ALEN]; // Destination MAC address
- u16 ethertype; // Ethernet frame type field in ethernet header
- u16 itag; // Inner VLAN-ID
+ /* Actions to be performed */
+ bool drop; /* Drop the packet */
+ bool fwd_sel; /* Forward packet: to port, portmask, dest route, next rule, drop */
+ bool ovid_sel; /* So something to outer vlan-id: shift, re-assign */
+ bool ivid_sel; /* Do something to inner vlan-id: shift, re-assign */
+ bool flt_sel; /* Filter the packet when sending to certain ports */
+ bool log_sel; /* Log the packet in one of the LOG-table counters */
+ bool rmk_sel; /* Re-mark the packet, i.e. change the priority-tag */
+ bool meter_sel; /* Meter the packet, i.e. limit rate of this type of packet */
+ bool tagst_sel; /* Change the ergress tag */
+ bool mir_sel; /* Mirror the packet to a Link Aggregation Group */
+ bool nopri_sel; /* Change the normal priority */
+ bool cpupri_sel; /* Change the CPU priority */
+ bool otpid_sel; /* Change Outer Tag Protocol Identifier (802.1q) */
+ bool itpid_sel; /* Change Inner Tag Protocol Identifier (802.1q) */
+ bool shaper_sel; /* Apply traffic shaper */
+ bool mpls_sel; /* MPLS actions */
+ bool bypass_sel; /* Bypass actions */
+ bool fwd_sa_lrn; /* Learn the source address when forwarding */
+ bool fwd_mod_to_cpu; /* Forward the modified VLAN tag format to CPU-port */
+
+ /* Fields used in predefined templates 0-2 on RTL8380 / 90 / 9300 */
+ u64 spm; /* Source Port Matrix */
+ u16 otag; /* Outer VLAN-ID */
+ u8 smac[ETH_ALEN]; /* Source MAC address */
+ u8 dmac[ETH_ALEN]; /* Destination MAC address */
+ u16 ethertype; /* Ethernet frame type field in ethernet header */
+ u16 itag; /* Inner VLAN-ID */
u16 field_range_check;
- u32 sip; // Source IP
- struct in6_addr sip6; // IPv6 Source IP
- u32 dip; // Destination IP
- struct in6_addr dip6; // IPv6 Destination IP
- u16 tos_proto; // IPv4: TOS + Protocol fields, IPv6: Traffic class + next header
- u16 sport; // TCP/UDP source port
- u16 dport; // TCP/UDP destination port
+ u32 sip; /* Source IP */
+ struct in6_addr sip6; /* IPv6 Source IP */
+ u32 dip; /* Destination IP */
+ struct in6_addr dip6; /* IPv6 Destination IP */
+ u16 tos_proto; /* IPv4: TOS + Protocol fields, IPv6: Traffic class + next header */
+ u16 sport; /* TCP/UDP source port */
+ u16 dport; /* TCP/UDP destination port */
u16 icmp_igmp;
u16 tcp_info;
- u16 dsap_ssap; // Destination / Source Service Access Point bytes (802.3)
+ u16 dsap_ssap; /* Destination / Source Service Access Point bytes (802.3) */
u64 spm_m;
u16 otag_m;
u16 itag_m;
u16 field_range_check_m;
u32 sip_m;
- struct in6_addr sip6_m; // IPv6 Source IP mask
+ struct in6_addr sip6_m; /* IPv6 Source IP mask */
u32 dip_m;
- struct in6_addr dip6_m; // IPv6 Destination IP mask
+ struct in6_addr dip6_m; /* IPv6 Destination IP mask */
u16 tos_proto_m;
u16 sport_m;
u16 dport_m;
u16 tcp_info_m;
u16 dsap_ssap_m;
- // Data associated with actions
- u8 fwd_act; // Type of forwarding action
- // 0: permit, 1: drop, 2: copy to port id, 4: copy to portmask
- // 4: redirect to portid, 5: redirect to portmask
- // 6: route, 7: vlan leaky (only 8380)
- u16 fwd_data; // Additional data for forwarding action, e.g. destination port
+ /* Data associated with actions */
+ u8 fwd_act; /* Type of forwarding action */
+ /* 0: permit, 1: drop, 2: copy to port id, 4: copy to portmask */
+ /* 4: redirect to portid, 5: redirect to portmask */
+ /* 6: route, 7: vlan leaky (only 8380) */
+ u16 fwd_data; /* Additional data for forwarding action, e.g. destination port */
u8 ovid_act;
- u16 ovid_data; // Outer VLAN ID
+ u16 ovid_data; /* Outer VLAN ID */
u8 ivid_act;
- u16 ivid_data; // Inner VLAN ID
- u16 flt_data; // Filtering data
- u16 log_data; // ID of packet or octet counter in LOG table, on RTL93xx
- // unnecessary since PIE-Rule-ID == LOG-counter-ID
+ u16 ivid_data; /* Inner VLAN ID */
+ u16 flt_data; /* Filtering data */
+ u16 log_data; /* ID of packet or octet counter in LOG table, on RTL93xx */
+ /* unnecessary since PIE-Rule-ID == LOG-counter-ID */
bool log_octets;
- u8 mpls_act; // MPLS action type
- u16 mpls_lib_idx; // MPLS action data
+ u8 mpls_act; /* MPLS action type */
+ u16 mpls_lib_idx; /* MPLS action data */
- u16 rmk_data; // Data for remarking
- u16 meter_data; // ID of meter for bandwidth control
+ u16 rmk_data; /* Data for remarking */
+ u16 meter_data; /* ID of meter for bandwidth control */
u16 tagst_data;
u16 mir_data;
u16 nopri_data;
u16 itpid_data;
u16 shaper_data;
- // Bypass actions, ignored on RTL8380
- bool bypass_all; // Not clear
- bool bypass_igr_stp; // Bypass Ingress STP state
- bool bypass_ibc_sc; // Bypass Ingress Bandwidth Control and Storm Control
+ /* Bypass actions, ignored on RTL8380 */
+ bool bypass_all; /* Not clear */
+ bool bypass_igr_stp; /* Bypass Ingress STP state */
+ bool bypass_ibc_sc; /* Bypass Ingress Bandwidth Control and Storm Control */
};
struct rtl838x_l3_intf {
* Mask fields state whether the corresponding data fields matter for matching
*/
struct rtl93xx_rt_mac {
- bool valid; // Valid or not
- bool p_type; // Individual (0) or trunk (1) port
- bool p_mask; // Whether the port type is used
+ bool valid; /* Valid or not */
+ bool p_type; /* Individual (0) or trunk (1) port */
+ bool p_mask; /* Whether the port type is used */
u8 p_id;
- u8 p_id_mask; // Mask for the port
- u8 action; // Routing action performed: 0: FORWARD, 1: DROP, 2: TRAP2CPU
- // 3: COPY2CPU, 4: TRAP2MASTERCPU, 5: COPY2MASTERCPU, 6: HARDDROP
+ u8 p_id_mask; /* Mask for the port */
+ u8 action; /* Routing action performed: 0: FORWARD, 1: DROP, 2: TRAP2CPU */
+ /* 3: COPY2CPU, 4: TRAP2MASTERCPU, 5: COPY2MASTERCPU, 6: HARDDROP */
u16 vid;
u16 vid_mask;
- u64 mac; // MAC address used as source MAC in the routed packet
+ u64 mac; /* MAC address used as source MAC in the routed packet */
u64 mac_mask;
};
struct rtl83xx_nexthop {
- u16 id; // ID: L3_NEXT_HOP table-index or route-index set in L2_NEXT_HOP
+ u16 id; /* ID: L3_NEXT_HOP table-index or route-index set in L2_NEXT_HOP */
u32 dev_id;
u16 port;
- u16 vid; // VLAN-ID for L2 table entry (saved from L2-UC entry)
- u16 rvid; // Relay VID/FID for the L2 table entry
- u64 mac; // The MAC address of the entry in the L2_NEXT_HOP table
+ u16 vid; /* VLAN-ID for L2 table entry (saved from L2-UC entry) */
+ u16 rvid; /* Relay VID/FID for the L2 table entry */
+ u64 mac; /* The MAC address of the entry in the L2_NEXT_HOP table */
u16 mac_id;
- u16 l2_id; // Index of this next hop forwarding entry in L2 FIB table
- u64 gw; // The gateway MAC address packets are forwarded to
- int if_id; // Interface (into L3_EGR_INTF_IDX)
+ u16 l2_id; /* Index of this next hop forwarding entry in L2 FIB table */
+ u64 gw; /* The gateway MAC address packets are forwarded to */
+ int if_id; /* Interface (into L3_EGR_INTF_IDX) */
};
struct rtl838x_switch_priv;
};
struct rtl83xx_route {
- u32 gw_ip; // IP of the route's gateway
- u32 dst_ip; // IP of the destination net
+ u32 gw_ip; /* IP of the route's gateway */
+ u32 dst_ip; /* IP of the destination net */
struct in6_addr dst_ip6;
- int prefix_len; // Network prefix len of the destination net
+ int prefix_len; /* Network prefix len of the destination net */
bool is_host_route;
- int id; // ID number of this route
+ int id; /* ID number of this route */
struct rhlist_head linkage;
- u16 switch_mac_id; // Index into switch's own MACs, RTL839X only
+ u16 switch_mac_id; /* Index into switch's own MACs, RTL839X only */
struct rtl83xx_nexthop nh;
struct pie_rule pr;
struct rtl93xx_route_attr attr;
u16 family_id;
char version;
struct rtl838x_port ports[57];
- struct mutex reg_mutex; // Mutex for individual register manipulations
- struct mutex pie_mutex; // Mutex for Packet Inspection Engine
+ struct mutex reg_mutex; /* Mutex for individual register manipulations */
+ struct mutex pie_mutex; /* Mutex for Packet Inspection Engine */
int link_state_irq;
int mirror_group_ports[4];
struct mii_bus *mii_bus;
u32 lag_primary[MAX_LAGS];
u32 is_lagmember[57];
u64 lagmembers;
- struct notifier_block nb; // TODO: change to different name
+ struct notifier_block nb; /* TODO: change to different name */
struct notifier_block ne_nb;
struct notifier_block fib_nb;
bool eee_enabled;
/* Definition of the RTL839X-specific template field IDs as used in the PIE */
enum template_field_id {
TEMPLATE_FIELD_SPMMASK = 0,
- TEMPLATE_FIELD_SPM0 = 1, // Source portmask ports 0-15
- TEMPLATE_FIELD_SPM1 = 2, // Source portmask ports 16-31
- TEMPLATE_FIELD_SPM2 = 3, // Source portmask ports 32-47
- TEMPLATE_FIELD_SPM3 = 4, // Source portmask ports 48-56
- TEMPLATE_FIELD_DMAC0 = 5, // Destination MAC [15:0]
- TEMPLATE_FIELD_DMAC1 = 6, // Destination MAC [31:16]
- TEMPLATE_FIELD_DMAC2 = 7, // Destination MAC [47:32]
- TEMPLATE_FIELD_SMAC0 = 8, // Source MAC [15:0]
- TEMPLATE_FIELD_SMAC1 = 9, // Source MAC [31:16]
- TEMPLATE_FIELD_SMAC2 = 10, // Source MAC [47:32]
- TEMPLATE_FIELD_ETHERTYPE = 11, // Ethernet frame type field
- // Field-ID 12 is not used
+ TEMPLATE_FIELD_SPM0 = 1, /* Source portmask ports 0-15 */
+ TEMPLATE_FIELD_SPM1 = 2, /* Source portmask ports 16-31 */
+ TEMPLATE_FIELD_SPM2 = 3, /* Source portmask ports 32-47 */
+ TEMPLATE_FIELD_SPM3 = 4, /* Source portmask ports 48-56 */
+ TEMPLATE_FIELD_DMAC0 = 5, /* Destination MAC [15:0] */
+ TEMPLATE_FIELD_DMAC1 = 6, /* Destination MAC [31:16] */
+ TEMPLATE_FIELD_DMAC2 = 7, /* Destination MAC [47:32] */
+ TEMPLATE_FIELD_SMAC0 = 8, /* Source MAC [15:0] */
+ TEMPLATE_FIELD_SMAC1 = 9, /* Source MAC [31:16] */
+ TEMPLATE_FIELD_SMAC2 = 10, /* Source MAC [47:32] */
+ TEMPLATE_FIELD_ETHERTYPE = 11, /* Ethernet frame type field */
+ /* Field-ID 12 is not used */
TEMPLATE_FIELD_OTAG = 13,
TEMPLATE_FIELD_ITAG = 14,
TEMPLATE_FIELD_SIP0 = 15,
TEMPLATE_FIELD_DIP7 = 61,
};
-// Number of fixed templates predefined in the SoC
+/* Number of fixed templates predefined in the SoC */
#define N_FIXED_TEMPLATES 5
static enum template_field_id fixed_templates[N_FIXED_TEMPLATES][N_FIXED_FIELDS] =
{
static void rtl839x_vlan_tables_read(u32 vlan, struct rtl838x_vlan_info *info)
{
u32 u, v, w;
- // Read VLAN table (0) via register 0
+ /* Read VLAN table (0) via register 0 */
struct table_reg *r = rtl_table_get(RTL8390_TBL_0, 0);
rtl_table_read(r, vlan);
info->hash_uc_fid = !!(w & BIT(3));
info->fid = (v >> 3) & 0xff;
- // Read UNTAG table (0) via table register 1
+ /* Read UNTAG table (0) via table register 1 */
r = rtl_table_get(RTL8390_TBL_1, 0);
rtl_table_read(r, vlan);
u = sw_r32(rtl_table_data(r, 0));
static void rtl839x_vlan_set_tagged(u32 vlan, struct rtl838x_vlan_info *info)
{
u32 u, v, w;
- // Access VLAN table (0) via register 0
+ /* Access VLAN table (0) via register 0 */
struct table_reg *r = rtl_table_get(RTL8390_TBL_0, 0);
u = info->tagged_ports >> 21;
{
u32 u, v;
- // Access UNTAG table (0) via table register 1
+ /* Access UNTAG table (0) via table register 1 */
struct table_reg *r = rtl_table_get(RTL8390_TBL_1, 0);
u = portmask >> 21;
e->mc_portmask_index = (r[2] >> 6) & 0xfff;
e->vid = e->rvid;
}
- } else { // IPv4 and IPv6 multicast
+ } else { /* IPv4 and IPv6 multicast */
e->vid = e->rvid = (r[0] << 20) & 0xfff;
e->mc_gip = r[1];
e->mc_portmask_index = (r[2] >> 6) & 0xfff;
e->valid = true;
e->type = IP6_MULTICAST;
}
- // pr_info("%s: vid %d, rvid: %d\n", __func__, e->vid, e->rvid);
+ /* pr_info("%s: vid %d, rvid: %d\n", __func__, e->vid, e->rvid); */
}
/* Fills the 3 SoC table registers r[] with the information in the rtl838x_l2_entry */
r[1] |= ((u32)e->mac[4]) << 12;
r[1] |= ((u32)e->mac[5]) << 4;
- if (!(e->mac[0] & 1)) { // Not multicast
+ if (!(e->mac[0] & 1)) { /* Not multicast */
r[2] |= e->is_static ? BIT(18) : 0;
r[0] |= ((u32)e->rvid) << 20;
r[2] |= e->port << 24;
r[2] |= e->vid << 4;
}
pr_debug("Write L2 NH: %08x %08x %08x\n", r[0], r[1], r[2]);
- } else { // L2 Multicast
+ } else { /* L2 Multicast */
r[0] |= ((u32)e->rvid) << 20;
r[2] |= ((u32)e->mc_portmask_index) << 6;
}
- } else { // IPv4 or IPv6 MC entry
+ } else { /* IPv4 or IPv6 MC entry */
r[0] = ((u32)e->rvid) << 20;
r[1] = e->mc_gip;
r[2] |= ((u32)e->mc_portmask_index) << 6;
{
u32 r[3];
struct table_reg *q = rtl_table_get(RTL8390_TBL_L2, 0);
- u32 idx = (0 << 14) | (hash << 2) | pos; // Search SRAM, with hash and at pos in bucket
+ u32 idx = (0 << 14) | (hash << 2) | pos; /* Search SRAM, with hash and at pos in bucket */
int i;
rtl_table_read(q, idx);
struct table_reg *q = rtl_table_get(RTL8390_TBL_L2, 0);
int i;
- u32 idx = (0 << 14) | (hash << 2) | pos; // Access SRAM, with hash and at pos in bucket
+ u32 idx = (0 << 14) | (hash << 2) | pos; /* Access SRAM, with hash and at pos in bucket */
rtl839x_fill_l2_row(r, e);
static u64 rtl839x_read_cam(int idx, struct rtl838x_l2_entry *e)
{
u32 r[3];
- struct table_reg *q = rtl_table_get(RTL8390_TBL_L2, 1); // Access L2 Table 1
+ struct table_reg *q = rtl_table_get(RTL8390_TBL_L2, 1); /* Access L2 Table 1 */
int i;
rtl_table_read(q, idx);
pr_debug("Found in CAM: R1 %x R2 %x R3 %x\n", r[0], r[1], r[2]);
- // Return MAC with concatenated VID ac concatenated ID
+ /* Return MAC with concatenated VID ac concatenated ID */
return rtl839x_l2_hash_seed(ether_addr_to_u64(&e->mac[0]), e->rvid);
}
static void rtl839x_write_cam(int idx, struct rtl838x_l2_entry *e)
{
u32 r[3];
- struct table_reg *q = rtl_table_get(RTL8390_TBL_L2, 1); // Access L2 Table 1
+ struct table_reg *q = rtl_table_get(RTL8390_TBL_L2, 1); /* Access L2 Table 1 */
int i;
rtl839x_fill_l2_row(r, e);
static u64 rtl839x_read_mcast_pmask(int idx)
{
u64 portmask;
- // Read MC_PMSK (2) via register RTL8390_TBL_L2
+ /* Read MC_PMSK (2) via register RTL8390_TBL_L2 */
struct table_reg *q = rtl_table_get(RTL8390_TBL_L2, 2);
rtl_table_read(q, idx);
portmask = sw_r32(rtl_table_data(q, 0));
portmask <<= 32;
portmask |= sw_r32(rtl_table_data(q, 1));
- portmask >>= 11; // LSB is bit 11 in data registers
+ portmask >>= 11; /* LSB is bit 11 in data registers */
rtl_table_release(q);
return portmask;
static void rtl839x_write_mcast_pmask(int idx, u64 portmask)
{
- // Access MC_PMSK (2) via register RTL8380_TBL_L2
+ /* Access MC_PMSK (2) via register RTL8380_TBL_L2 */
struct table_reg *q = rtl_table_get(RTL8390_TBL_L2, 2);
- portmask <<= 11; // LSB is bit 11 in data registers
+ portmask <<= 11; /* LSB is bit 11 in data registers */
sw_w32((u32)(portmask >> 32), rtl_table_data(q, 0));
sw_w32((u32)((portmask & 0xfffff800)), rtl_table_data(q, 1));
rtl_table_write(q, idx);
u32 p[2];
u32 pmask_id = UNKNOWN_MC_PMASK;
- p[0] = pmask_id; // Use portmaks 0xfff for unknown IPv6 MC flooding
- // Enable L2 Learning BIT 0, portmask UNKNOWN_MC_PMASK for IP/L2-MC traffic flooding
+ p[0] = pmask_id; /* Use portmaks 0xfff for unknown IPv6 MC flooding */
+ /* Enable L2 Learning BIT 0, portmask UNKNOWN_MC_PMASK for IP/L2-MC traffic flooding */
p[1] = 1 | pmask_id << 1 | pmask_id << 13;
sw_w32(p[0], RTL839X_VLAN_PROFILE(profile));
* also for multicast flooding */
sw_w32(UNKNOWN_MC_PMASK << 12 | UNKNOWN_MC_PMASK, RTL839X_L2_FLD_PMSK);
- // Limit learning to maximum: 32k entries, after that just flood (bits 0-1)
+ /* Limit learning to maximum: 32k entries, after that just flood (bits 0-1) */
sw_w32((0x7fff << 2) | 0, RTL839X_L2_LRN_CONSTRT);
- // Do not trap ARP packets to CPU_PORT
+ /* Do not trap ARP packets to CPU_PORT */
sw_w32(0, RTL839X_SPCL_TRAP_ARP_CTRL);
}
static void rtl839x_enable_learning(int port, bool enable)
{
- // Limit learning to maximum: 32k entries
+ /* Limit learning to maximum: 32k entries */
sw_w32_mask(0x7fff << 2, enable ? (0x7fff << 2) : 0,
RTL839X_L2_PORT_LRN_CONSTRT + (port << 2));
return IRQ_HANDLED;
}
-// TODO: unused
+/* TODO: unused */
int rtl8390_sds_power(int mac, int val)
{
u32 offset = (mac == 48) ? 0x0 : 0x100;
return -1;
}
- // Set bit 1003. 1000 starts at 7c
+ /* Set bit 1003. 1000 starts at 7c */
sw_w32_mask(BIT(11), mode << 11, RTL839X_SDS12_13_PWR0 + offset);
return 0;
if (port > 63 || page > 4095 || reg > 31)
return -ENOTSUPP;
- // Take bug on RTL839x Rev <= C into account
+ /* Take bug on RTL839x Rev <= C into account */
if (port >= RTL839X_CPU_PORT)
return -EIO;
if (port > 63 || page > 4095 || reg > 31)
return -ENOTSUPP;
- // Take bug on RTL839x Rev <= C into account
+ /* Take bug on RTL839x Rev <= C into account */
if (port >= RTL839X_CPU_PORT)
return -EIO;
mutex_lock(&smi_lock);
- // Set PHY to access
+ /* Set PHY to access */
rtl839x_set_port_reg_le(BIT_ULL(port), RTL839X_PHYREG_PORT_CTRL);
sw_w32_mask(0xffff0000, val << 16, RTL839X_PHYREG_DATA_CTRL);
int err = 0;
u32 v;
- // Take bug on RTL839x Rev <= C into account
+ /* Take bug on RTL839x Rev <= C into account */
if (port >= RTL839X_CPU_PORT)
return -EIO;
mutex_lock(&smi_lock);
- // Set PHY to access
+ /* Set PHY to access */
sw_w32_mask(0xffff << 16, port << 16, RTL839X_PHYREG_DATA_CTRL);
- // Set MMD device number and register to write to
+ /* Set MMD device number and register to write to */
sw_w32(devnum << 16 | (regnum & 0xffff), RTL839X_PHYREG_MMD_CTRL);
- v = BIT(2) | BIT(0); // MMD-access | EXEC
+ v = BIT(2) | BIT(0); /* MMD-access | EXEC */
sw_w32(v, RTL839X_PHYREG_ACCESS_CTRL);
err = rtl839x_smi_wait_op(100000);
if (err)
goto errout;
- // There is no error-checking via BIT 1 of v, as it does not seem to be set correctly
+ /* There is no error-checking via BIT 1 of v, as it does not seem to be set correctly */
*val = (sw_r32(RTL839X_PHYREG_DATA_CTRL) & 0xffff);
pr_debug("%s: port %d, regnum: %x, val: %x (err %d)\n", __func__, port, regnum, *val, err);
int err = 0;
u32 v;
- // Take bug on RTL839x Rev <= C into account
+ /* Take bug on RTL839x Rev <= C into account */
if (port >= RTL839X_CPU_PORT)
return -EIO;
mutex_lock(&smi_lock);
- // Set PHY to access
+ /* Set PHY to access */
rtl839x_set_port_reg_le(BIT_ULL(port), RTL839X_PHYREG_PORT_CTRL);
- // Set data to write
+ /* Set data to write */
sw_w32_mask(0xffff << 16, val << 16, RTL839X_PHYREG_DATA_CTRL);
- // Set MMD device number and register to write to
+ /* Set MMD device number and register to write to */
sw_w32(devnum << 16 | (regnum & 0xffff), RTL839X_PHYREG_MMD_CTRL);
- v = BIT(3) | BIT(2) | BIT(0); // WRITE | MMD-access | EXEC
+ v = BIT(3) | BIT(2) | BIT(0); /* WRITE | MMD-access | EXEC */
sw_w32(v, RTL839X_PHYREG_ACCESS_CTRL);
err = rtl839x_smi_wait_op(100000);
{
u32 v;
- // This works only for Ethernet ports, and on the RTL839X, ports above 47 are SFP
+ /* This works only for Ethernet ports, and on the RTL839X, ports above 47 are SFP */
if (port >= 48)
return;
pr_debug("In %s: setting port %d to %d\n", __func__, port, enable);
v = enable ? 0xf : 0x0;
- // Set EEE for 100, 500, 1000MBit and 10GBit
+ /* Set EEE for 100, 500, 1000MBit and 10GBit */
sw_w32_mask(0xf << 8, v << 8, rtl839x_mac_force_mode_ctrl(port));
- // Set TX/RX EEE state
+ /* Set TX/RX EEE state */
v = enable ? 0x3 : 0x0;
sw_w32(v, RTL839X_EEE_CTRL(port));
pr_info("Setting up EEE, state: %d\n", enable);
- // Set wake timer for TX and pause timer both to 0x21
+ /* Set wake timer for TX and pause timer both to 0x21 */
sw_w32_mask(0xff << 20| 0xff, 0x21 << 20| 0x21, RTL839X_EEE_TX_TIMER_GELITE_CTRL);
- // Set pause wake timer for GIGA-EEE to 0x11
+ /* Set pause wake timer for GIGA-EEE to 0x11 */
sw_w32_mask(0xff << 20, 0x11 << 20, RTL839X_EEE_TX_TIMER_GIGA_CTRL);
- // Set pause wake timer for 10GBit ports to 0x11
+ /* Set pause wake timer for 10GBit ports to 0x11 */
sw_w32_mask(0xff << 20, 0x11 << 20, RTL839X_EEE_TX_TIMER_10G_CTRL);
- // Setup EEE on all ports
+ /* Setup EEE on all ports */
for (i = 0; i < priv->cpu_port; i++) {
if (priv->ports[i].phy)
rtl839x_port_eee_set(priv, i, enable);
pr_debug("%s: from %d to %d\n", __func__, index_from, index_to);
mutex_lock(&priv->reg_mutex);
- // Write from-to and execute bit into control register
+ /* Write from-to and execute bit into control register */
sw_w32(v, RTL839X_ACL_CLR_CTRL);
- // Wait until command has completed
+ /* Wait until command has completed */
do {
} while (sw_r32(RTL839X_ACL_CLR_CTRL) & BIT(0));
pr_info("%s: unknown field %d\n", __func__, field_type);
}
- // On the RTL8390, the mask fields are not word aligned!
+ /* On the RTL8390, the mask fields are not word aligned! */
if (!(i % 2)) {
r[5 - i / 2] = data;
r[12 - i / 2] |= ((u32)data_m << 8);
break;
case TEMPLATE_FIELD_SIP2:
pr->is_ipv6 = true;
- // Make use of limitiations on the position of the match values
+ /* Make use of limitiations on the position of the match values */
ipv6_addr_set(&pr->sip6, pr->sip, r[5 - i / 2],
r[4 - i / 2], r[3 - i / 2]);
ipv6_addr_set(&pr->sip6_m, pr->sip_m, r[5 - i / 2],
static void rtl839x_write_pie_action(u32 r[], struct pie_rule *pr)
{
if (pr->drop) {
- r[13] |= 0x9; // Set ACT_MASK_FWD & FWD_ACT = DROP
+ r[13] |= 0x9; /* Set ACT_MASK_FWD & FWD_ACT = DROP */
r[13] |= BIT(3);
} else {
r[13] |= pr->fwd_sel ? BIT(3) : 0;
static void rtl839x_read_pie_action(u32 r[], struct pie_rule *pr)
{
- if (r[13] & BIT(3)) { // ACT_MASK_FWD set, is it a drop?
+ if (r[13] & BIT(3)) { /* ACT_MASK_FWD set, is it a drop? */
if ((r[14] & 0x7) == 1) {
pr->drop = true;
} else {
pr->mir_sel = r[13] & BIT(5);
pr->log_sel = r[13] & BIT(4);
- // TODO: Read in data fields
+ /* TODO: Read in data fields */
pr->bypass_all = r[16] & BIT(9);
pr->bypass_igr_stp = r[16] & BIT(8);
static int rtl839x_pie_rule_read(struct rtl838x_switch_priv *priv, int idx, struct pie_rule *pr)
{
- // Read IACL table (2) via register 0
+ /* Read IACL table (2) via register 0 */
struct table_reg *q = rtl_table_get(RTL8380_TBL_0, 2);
u32 r[17];
int i;
static int rtl839x_pie_rule_write(struct rtl838x_switch_priv *priv, int idx, struct pie_rule *pr)
{
- // Access IACL table (2) via register 0
+ /* Access IACL table (2) via register 0 */
struct table_reg *q = rtl_table_get(RTL8390_TBL_0, 2);
u32 r[17];
int i;
rtl839x_write_pie_action(r, pr);
-// rtl839x_pie_rule_dump_raw(r);
+/* rtl839x_pie_rule_dump_raw(r); */
for (i = 0; i < 17; i++)
sw_w32(r[i], rtl_table_data(q, i));
if (ether_addr_to_u64(pr->dmac) && !rtl839x_pie_templ_has(t, TEMPLATE_FIELD_DMAC0))
return -1;
- // TODO: Check more
+ /* TODO: Check more */
i = find_first_zero_bit(&priv->pie_use_bm[block * 4], PIE_BLOCK_SIZE);
set_bit(idx, priv->pie_use_bm);
pr->valid = true;
- pr->tid = j; // Mapped to template number
+ pr->tid = j; /* Mapped to template number */
pr->tid_m = 0x3;
pr->id = idx;
mutex_init(&priv->pie_mutex);
- // Power on all PIE blocks
+ /* Power on all PIE blocks */
for (i = 0; i < priv->n_pie_blocks; i++)
sw_w32_mask(0, BIT(i), RTL839X_PS_ACL_PWR_CTRL);
- // Set ingress and egress ACL blocks to 50/50: first Egress block is 9
- sw_w32_mask(0x1f, 9, RTL839X_ACL_CTRL); // Writes 9 to cutline field
+ /* Set ingress and egress ACL blocks to 50/50: first Egress block is 9 */
+ sw_w32_mask(0x1f, 9, RTL839X_ACL_CTRL); /* Writes 9 to cutline field */
- // Include IPG in metering
+ /* Include IPG in metering */
sw_w32(1, RTL839X_METER_GLB_CTRL);
- // Delete all present rules
+ /* Delete all present rules */
rtl839x_pie_rule_del(priv, 0, priv->n_pie_blocks * PIE_BLOCK_SIZE - 1);
- // Enable predefined templates 0, 1 for blocks 0-2
+ /* Enable predefined templates 0, 1 for blocks 0-2 */
template_selectors = 0 | (1 << 3);
for (i = 0; i < 3; i++)
sw_w32(template_selectors, RTL839X_ACL_BLK_TMPLTE_CTRL(i));
- // Enable predefined templates 2, 3 for blocks 3-5
+ /* Enable predefined templates 2, 3 for blocks 3-5 */
template_selectors = 2 | (3 << 3);
for (i = 3; i < 6; i++)
sw_w32(template_selectors, RTL839X_ACL_BLK_TMPLTE_CTRL(i));
- // Enable predefined templates 1, 4 for blocks 6-8
+ /* Enable predefined templates 1, 4 for blocks 6-8 */
template_selectors = 2 | (3 << 3);
for (i = 6; i < 9; i++)
sw_w32(template_selectors, RTL839X_ACL_BLK_TMPLTE_CTRL(i));
- // Enable predefined templates 0, 1 for blocks 9-11
+ /* Enable predefined templates 0, 1 for blocks 9-11 */
template_selectors = 0 | (1 << 3);
for (i = 9; i < 12; i++)
sw_w32(template_selectors, RTL839X_ACL_BLK_TMPLTE_CTRL(i));
- // Enable predefined templates 2, 3 for blocks 12-14
+ /* Enable predefined templates 2, 3 for blocks 12-14 */
template_selectors = 2 | (3 << 3);
for (i = 12; i < 15; i++)
sw_w32(template_selectors, RTL839X_ACL_BLK_TMPLTE_CTRL(i));
- // Enable predefined templates 1, 4 for blocks 15-17
+ /* Enable predefined templates 1, 4 for blocks 15-17 */
template_selectors = 2 | (3 << 3);
for (i = 15; i < 18; i++)
sw_w32(template_selectors, RTL839X_ACL_BLK_TMPLTE_CTRL(i));
{
u32 v;
- // Read LOG table (4) via register RTL8390_TBL_0
+ /* Read LOG table (4) via register RTL8390_TBL_0 */
struct table_reg *r = rtl_table_get(RTL8390_TBL_0, 4);
pr_debug("In %s, id %d\n", __func__, counter);
rtl_table_read(r, counter / 2);
- // The table has a size of 2 registers
+ /* The table has a size of 2 registers */
if (counter % 2)
v = sw_r32(rtl_table_data(r, 0));
else
static void rtl839x_packet_cntr_clear(int counter)
{
- // Access LOG table (4) via register RTL8390_TBL_0
+ /* Access LOG table (4) via register RTL8390_TBL_0 */
struct table_reg *r = rtl_table_get(RTL8390_TBL_0, 4);
pr_debug("In %s, id %d\n", __func__, counter);
- // The table has a size of 2 registers
+ /* The table has a size of 2 registers */
if (counter % 2)
sw_w32(0, rtl_table_data(r, 0));
else
static void rtl839x_route_read(int idx, struct rtl83xx_route *rt)
{
u64 v;
- // Read ROUTING table (2) via register RTL8390_TBL_1
+ /* Read ROUTING table (2) via register RTL8390_TBL_1 */
struct table_reg *r = rtl_table_get(RTL8390_TBL_1, 2);
pr_debug("In %s\n", __func__);
rtl_table_read(r, idx);
- // The table has a size of 2 registers
+ /* The table has a size of 2 registers */
v = sw_r32(rtl_table_data(r, 0));
v <<= 32;
v |= sw_r32(rtl_table_data(r, 1));
{
u32 v;
- // Read ROUTING table (2) via register RTL8390_TBL_1
+ /* Read ROUTING table (2) via register RTL8390_TBL_1 */
struct table_reg *r = rtl_table_get(RTL8390_TBL_1, 2);
pr_debug("In %s\n", __func__);
mac = ether_addr_to_u64(dev->dev_addr);
for (i = 0; i < 15; i++) {
- mac++; // BUG: VRRP for testing
+ mac++; /* BUG: VRRP for testing */
sw_w32(mac >> 32, RTL839X_ROUTING_SA_CTRL + i * 8);
sw_w32(mac, RTL839X_ROUTING_SA_CTRL + i * 8 + 4);
}
/* Definition of the RTL930X-specific template field IDs as used in the PIE */
enum template_field_id {
- TEMPLATE_FIELD_SPM0 = 0, // Source portmask ports 0-15
- TEMPLATE_FIELD_SPM1 = 1, // Source portmask ports 16-31
- TEMPLATE_FIELD_DMAC0 = 2, // Destination MAC [15:0]
- TEMPLATE_FIELD_DMAC1 = 3, // Destination MAC [31:16]
- TEMPLATE_FIELD_DMAC2 = 4, // Destination MAC [47:32]
- TEMPLATE_FIELD_SMAC0 = 5, // Source MAC [15:0]
- TEMPLATE_FIELD_SMAC1 = 6, // Source MAC [31:16]
- TEMPLATE_FIELD_SMAC2 = 7, // Source MAC [47:32]
- TEMPLATE_FIELD_ETHERTYPE = 8, // Ethernet frame type field
+ TEMPLATE_FIELD_SPM0 = 0, /* Source portmask ports 0-15 */
+ TEMPLATE_FIELD_SPM1 = 1, /* Source portmask ports 16-31 */
+ TEMPLATE_FIELD_DMAC0 = 2, /* Destination MAC [15:0] */
+ TEMPLATE_FIELD_DMAC1 = 3, /* Destination MAC [31:16] */
+ TEMPLATE_FIELD_DMAC2 = 4, /* Destination MAC [47:32] */
+ TEMPLATE_FIELD_SMAC0 = 5, /* Source MAC [15:0] */
+ TEMPLATE_FIELD_SMAC1 = 6, /* Source MAC [31:16] */
+ TEMPLATE_FIELD_SMAC2 = 7, /* Source MAC [47:32] */
+ TEMPLATE_FIELD_ETHERTYPE = 8, /* Ethernet frame type field */
TEMPLATE_FIELD_OTAG = 9,
TEMPLATE_FIELD_ITAG = 10,
TEMPLATE_FIELD_SIP0 = 11,
TEMPLATE_FIELD_SNAP_OUI = 42,
TEMPLATE_FIELD_FWD_VID = 43,
TEMPLATE_FIELD_RANGE_CHK = 44,
- TEMPLATE_FIELD_VLAN_GMSK = 45, // VLAN Group Mask/IP range check
+ TEMPLATE_FIELD_VLAN_GMSK = 45, /* VLAN Group Mask/IP range check */
TEMPLATE_FIELD_DLP = 46,
TEMPLATE_FIELD_META_DATA = 47,
TEMPLATE_FIELD_SRC_FWD_VID = 48,
*/
#define TEMPLATE_FIELD_VLAN TEMPLATE_FIELD_ITAG
-// Number of fixed templates predefined in the RTL9300 SoC
+/* Number of fixed templates predefined in the RTL9300 SoC */
#define N_FIXED_TEMPLATES 5
-// RTL9300 specific predefined templates
+/* RTL9300 specific predefined templates */
static enum template_field_id fixed_templates[N_FIXED_TEMPLATES][N_FIXED_FIELDS] =
{
{
static inline int rtl930x_l2_port_new_sa_fwd(int p)
{
- // TODO: The definition of the fields changed, because of the master-cpu in a stack
+ /* TODO: The definition of the fields changed, because of the master-cpu in a stack */
return RTL930X_L2_PORT_NEW_SA_FWD(p);
}
static void rtl930x_vlan_tables_read(u32 vlan, struct rtl838x_vlan_info *info)
{
u32 v, w;
- // Read VLAN table (1) via register 0
+ /* Read VLAN table (1) via register 0 */
struct table_reg *r = rtl_table_get(RTL9300_TBL_0, 1);
rtl_table_read(r, vlan);
info->hash_uc_fid = !!(w & BIT(28));
info->fid = ((v & 0x7) << 3) | ((w >> 29) & 0x7);
- // Read UNTAG table via table register 2
+ /* Read UNTAG table via table register 2 */
r = rtl_table_get(RTL9300_TBL_2, 0);
rtl_table_read(r, vlan);
v = sw_r32(rtl_table_data(r, 0));
static void rtl930x_vlan_set_tagged(u32 vlan, struct rtl838x_vlan_info *info)
{
u32 v, w;
- // Access VLAN table (1) via register 0
+ /* Access VLAN table (1) via register 0 */
struct table_reg *r = rtl_table_get(RTL9300_TBL_0, 1);
v = info->tagged_ports << 3;
/* Sets the L2 forwarding to be based on either the inner VLAN tag or the outer */
static void rtl930x_vlan_fwd_on_inner(int port, bool is_set)
{
- // Always set all tag modes to fwd based on either inner or outer tag
+ /* Always set all tag modes to fwd based on either inner or outer tag */
if (is_set)
sw_w32_mask(0, 0xf, RTL930X_VLAN_PORT_FWD + (port << 2));
else
p[0] = sw_r32(RTL930X_VLAN_PROFILE_SET(profile));
p[1] = sw_r32(RTL930X_VLAN_PROFILE_SET(profile) + 4);
- // Enable routing of Ipv4/6 Unicast and IPv4/6 Multicast traffic
+ /* Enable routing of Ipv4/6 Unicast and IPv4/6 Multicast traffic */
p[0] |= BIT(17) | BIT(16) | BIT(13) | BIT(12);
- p[2] = 0x1fffffff; // L2 unknown MC flooding portmask all ports, including the CPU-port
- p[3] = 0x1fffffff; // IPv4 unknown MC flooding portmask
- p[4] = 0x1fffffff; // IPv6 unknown MC flooding portmask
+ p[2] = 0x1fffffff; /* L2 unknown MC flooding portmask all ports, including the CPU-port */
+ p[3] = 0x1fffffff; /* IPv4 unknown MC flooding portmask */
+ p[4] = 0x1fffffff; /* IPv6 unknown MC flooding portmask */
sw_w32(p[0], RTL930X_VLAN_PROFILE_SET(profile));
sw_w32(p[1], RTL930X_VLAN_PROFILE_SET(profile) + 4);
static void rtl930x_l2_learning_setup(void)
{
- // Portmask for flooding broadcast traffic
+ /* Portmask for flooding broadcast traffic */
sw_w32(0x1fffffff, RTL930X_L2_BC_FLD_PMSK);
- // Portmask for flooding unicast traffic with unknown destination
+ /* Portmask for flooding unicast traffic with unknown destination */
sw_w32(0x1fffffff, RTL930X_L2_UNKN_UC_FLD_PMSK);
- // Limit learning to maximum: 32k entries, after that just flood (bits 0-1)
+ /* Limit learning to maximum: 32k entries, after that just flood (bits 0-1) */
sw_w32((0x7fff << 2) | 0, RTL930X_L2_LRN_CONSTRT_CTRL);
}
h1 ^
(seed & 0x7ff));
- // Algorithm choice for block 0
+ /* Algorithm choice for block 0 */
if (sw_r32(RTL930X_L2_CTRL) & BIT(0))
h = k1;
else
e->is_ip_mc = false;
e->is_ipv6_mc = false;
- // TODO: Is there not a function to copy directly MAC memory?
+ /* TODO: Is there not a function to copy directly MAC memory? */
e->mac[0] = (r[0] >> 24);
e->mac[1] = (r[0] >> 16);
e->mac[2] = (r[0] >> 8);
e->type = L2_UNICAST;
e->is_static = !!(r[2] & BIT(14));
e->port = (r[2] >> 20) & 0x3ff;
- // Check for trunk port
+ /* Check for trunk port */
if (r[2] & BIT(30)) {
e->is_trunk = true;
e->stack_dev = (e->port >> 9) & 1;
e->suspended = !!(r[2] & BIT(13));
e->age = (r[2] >> 17) & 3;
e->valid = true;
- // the UC_VID field in hardware is used for the VID or for the route id
+ /* the UC_VID field in hardware is used for the VID or for the route id */
if (e->next_hop) {
e->nh_route_id = r[2] & 0x7ff;
e->vid = 0;
return;
}
- r[2] = BIT(31); // Set valid bit
+ r[2] = BIT(31); /* Set valid bit */
r[0] = ((u32)e->mac[0]) << 24 |
((u32)e->mac[1]) << 16 |
r[2] |= e->block_sa ? BIT(17) : 0;
r[2] |= e->suspended ? BIT(13) : 0;
r[2] |= (e->age & 0x3) << 17;
- // the UC_VID field in hardware is used for the VID or for the route id
+ /* the UC_VID field in hardware is used for the VID or for the route id */
if (e->next_hop)
r[2] |= e->nh_route_id & 0x7ff;
else
r[2] |= e->vid & 0xfff;
- } else { // L2_MULTICAST
+ } else { /* L2_MULTICAST */
r[2] |= (e->mc_portmask_index & 0x3ff) << 16;
r[2] |= e->mc_mac_index & 0x7ff;
}
hash &= 0xffff;
}
- idx = (0 << 14) | (hash << 2) | pos; // Search SRAM, with hash and at pos in bucket
+ idx = (0 << 14) | (hash << 2) | pos; /* Search SRAM, with hash and at pos in bucket */
pr_debug("%s: NOW hash %08x, pos: %d\n", __func__, hash, pos);
rtl_table_read(q, idx);
seed = rtl930x_l2_hash_seed(mac, e->rvid);
pr_debug("%s: mac %016llx, seed %016llx\n", __func__, mac, seed);
- // return vid with concatenated mac as unique id
+ /* return vid with concatenated mac as unique id */
return seed;
}
{
u32 r[3];
struct table_reg *q = rtl_table_get(RTL9300_TBL_L2, 0);
- u32 idx = (0 << 14) | (hash << 2) | pos; // Access SRAM, with hash and at pos in bucket
+ u32 idx = (0 << 14) | (hash << 2) | pos; /* Access SRAM, with hash and at pos in bucket */
int i;
pr_debug("%s: hash %d, pos %d\n", __func__, hash, pos);
if (!e->valid)
return 0;
- // return mac with concatenated vid as unique id
+ /* return mac with concatenated vid as unique id */
return ((u64)r[0] << 28) | ((r[1] & 0xffff0000) >> 4) | e->vid;
}
static void rtl930x_write_cam(int idx, struct rtl838x_l2_entry *e)
{
u32 r[3];
- struct table_reg *q = rtl_table_get(RTL9300_TBL_L2, 1); // Access L2 Table 1
+ struct table_reg *q = rtl_table_get(RTL9300_TBL_L2, 1); /* Access L2 Table 1 */
int i;
rtl930x_fill_l2_row(r, e);
static u64 rtl930x_read_mcast_pmask(int idx)
{
u32 portmask;
- // Read MC_PORTMASK (2) via register RTL9300_TBL_L2
+ /* Read MC_PORTMASK (2) via register RTL9300_TBL_L2 */
struct table_reg *q = rtl_table_get(RTL9300_TBL_L2, 2);
rtl_table_read(q, idx);
{
u32 pm = portmask;
- // Access MC_PORTMASK (2) via register RTL9300_TBL_L2
+ /* Access MC_PORTMASK (2) via register RTL9300_TBL_L2 */
struct table_reg *q = rtl_table_get(RTL9300_TBL_L2, 2);
pr_debug("%s: Index idx %d has portmask %08x\n", __func__, idx, pm);
mutex_lock(&smi_lock);
- // Set PHY to access
+ /* Set PHY to access */
sw_w32(BIT(port), RTL930X_SMI_ACCESS_PHY_CTRL_0);
- // Set data to write
+ /* Set data to write */
sw_w32_mask(0xffff << 16, val << 16, RTL930X_SMI_ACCESS_PHY_CTRL_2);
- // Set MMD device number and register to write to
+ /* Set MMD device number and register to write to */
sw_w32(devnum << 16 | (regnum & 0xffff), RTL930X_SMI_ACCESS_PHY_CTRL_3);
- v = BIT(2) | BIT(1) | BIT(0); // WRITE | MMD-access | EXEC
+ v = BIT(2) | BIT(1) | BIT(0); /* WRITE | MMD-access | EXEC */
sw_w32(v, RTL930X_SMI_ACCESS_PHY_CTRL_1);
do {
mutex_lock(&smi_lock);
- // Set PHY to access
+ /* Set PHY to access */
sw_w32_mask(0xffff << 16, port << 16, RTL930X_SMI_ACCESS_PHY_CTRL_2);
- // Set MMD device number and register to write to
+ /* Set MMD device number and register to write to */
sw_w32(devnum << 16 | (regnum & 0xffff), RTL930X_SMI_ACCESS_PHY_CTRL_3);
- v = BIT(1) | BIT(0); // MMD-access | EXEC
+ v = BIT(1) | BIT(0); /* MMD-access | EXEC */
sw_w32(v, RTL930X_SMI_ACCESS_PHY_CTRL_1);
do {
v = sw_r32(RTL930X_SMI_ACCESS_PHY_CTRL_1);
} while (v & BIT(0));
- // There is no error-checking via BIT 25 of v, as it does not seem to be set correctly
+ /* There is no error-checking via BIT 25 of v, as it does not seem to be set correctly */
*val = (sw_r32(RTL930X_SMI_ACCESS_PHY_CTRL_2) & 0xffff);
pr_debug("%s: port %d, regnum: %x, val: %x (err %d)\n", __func__, port, regnum, *val, err);
h1 ^
(seed & 0x7ff));
- // Algorithm choice for block 0
+ /* Algorithm choice for block 0 */
if (sw_r32(RTL930X_L2_CTRL) & BIT(0))
h = k1;
else
{
u32 v;
- // This works only for Ethernet ports, and on the RTL930X, ports from 26 are SFP
+ /* This works only for Ethernet ports, and on the RTL930X, ports from 26 are SFP */
if (port >= 26)
return;
pr_debug("In %s: setting port %d to %d\n", __func__, port, enable);
v = enable ? 0x3f : 0x0;
- // Set EEE/EEEP state for 100, 500, 1000MBit and 2.5, 5 and 10GBit
+ /* Set EEE/EEEP state for 100, 500, 1000MBit and 2.5, 5 and 10GBit */
sw_w32_mask(0, v << 10, rtl930x_mac_force_mode_ctrl(port));
- // Set TX/RX EEE state
+ /* Set TX/RX EEE state */
v = enable ? 0x3 : 0x0;
sw_w32(v, RTL930X_EEE_CTRL(port));
e->lp_advertised |= ADVERTISED_10000baseT_Full;
}
- // Read 2x to clear latched state
+ /* Read 2x to clear latched state */
a = sw_r32(RTL930X_EEEP_PORT_CTRL(port));
a = sw_r32(RTL930X_EEEP_PORT_CTRL(port));
pr_info("%s RTL930X_EEEP_PORT_CTRL: %08x\n", __func__, a);
pr_info("Setting up EEE, state: %d\n", enable);
- // Setup EEE on all ports
+ /* Setup EEE on all ports */
for (i = 0; i < priv->cpu_port; i++) {
if (priv->ports[i].phy)
rtl930x_port_eee_set(priv, i, enable);
bool host_route, default_route;
struct in6_addr ip6_m;
- // Read L3_PREFIX_ROUTE_IPUC table (2) via register RTL9300_TBL_1
+ /* Read L3_PREFIX_ROUTE_IPUC table (2) via register RTL9300_TBL_1 */
struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 2);
rtl_table_read(r, idx);
- // The table has a size of 11 registers
+ /* The table has a size of 11 registers */
rt->attr.valid = !!(sw_r32(rtl_table_data(r, 0)) & BIT(31));
if (!rt->attr.valid)
goto out;
pr_info("%s: host route %d, default_route %d\n", __func__, host_route, default_route);
switch (rt->attr.type) {
- case 0: // IPv4 Unicast route
+ case 0: /* IPv4 Unicast route */
rt->dst_ip = sw_r32(rtl_table_data(r, 4));
ip4_m = sw_r32(rtl_table_data(r, 9));
pr_info("%s: Read ip4 mask: %08x\n", __func__, ip4_m);
if (rt->prefix_len < 0)
rt->prefix_len = inet_mask_len(ip4_m);
break;
- case 2: // IPv6 Unicast route
+ case 2: /* IPv6 Unicast route */
ipv6_addr_set(&rt->dst_ip6,
sw_r32(rtl_table_data(r, 1)), sw_r32(rtl_table_data(r, 2)),
sw_r32(rtl_table_data(r, 3)), sw_r32(rtl_table_data(r, 4)));
rt->prefix_len = find_last_bit((unsigned long int *)&ip6_m.s6_addr32,
128);
break;
- case 1: // IPv4 Multicast route
- case 3: // IPv6 Multicast route
+ case 1: /* IPv4 Multicast route */
+ case 3: /* IPv6 Multicast route */
pr_warn("%s: route type not supported\n", __func__);
goto out;
}
static void rtl930x_net6_mask(int prefix_len, struct in6_addr *ip6_m)
{
int o, b;
- // Define network mask
+ /* Define network mask */
o = prefix_len >> 3;
b = prefix_len & 0x7;
memset(ip6_m->s6_addr, 0xff, o);
static void rtl930x_host_route_read(int idx, struct rtl83xx_route *rt)
{
u32 v;
- // Read L3_HOST_ROUTE_IPUC table (1) via register RTL9300_TBL_1
+ /* Read L3_HOST_ROUTE_IPUC table (1) via register RTL9300_TBL_1 */
struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 1);
idx = ((idx / 6) * 8) + (idx % 6);
pr_debug("In %s, physical index %d\n", __func__, idx);
rtl_table_read(r, idx);
- // The table has a size of 5 (for UC, 11 for MC) registers
+ /* The table has a size of 5 (for UC, 11 for MC) registers */
v = sw_r32(rtl_table_data(r, 0));
rt->attr.valid = !!(v & BIT(31));
if (!rt->attr.valid)
goto out;
rt->attr.type = (v >> 29) & 0x3;
switch (rt->attr.type) {
- case 0: // IPv4 Unicast route
+ case 0: /* IPv4 Unicast route */
rt->dst_ip = sw_r32(rtl_table_data(r, 4));
break;
- case 2: // IPv6 Unicast route
+ case 2: /* IPv6 Unicast route */
ipv6_addr_set(&rt->dst_ip6,
sw_r32(rtl_table_data(r, 3)), sw_r32(rtl_table_data(r, 2)),
sw_r32(rtl_table_data(r, 1)), sw_r32(rtl_table_data(r, 0)));
break;
- case 1: // IPv4 Multicast route
- case 3: // IPv6 Multicast route
+ case 1: /* IPv4 Multicast route */
+ case 3: /* IPv6 Multicast route */
pr_warn("%s: route type not supported\n", __func__);
goto out;
}
static void rtl930x_host_route_write(int idx, struct rtl83xx_route *rt)
{
u32 v;
- // Access L3_HOST_ROUTE_IPUC table (1) via register RTL9300_TBL_1
+ /* Access L3_HOST_ROUTE_IPUC table (1) via register RTL9300_TBL_1 */
struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 1);
- // The table has a size of 5 (for UC, 11 for MC) registers
+ /* The table has a size of 5 (for UC, 11 for MC) registers */
idx = ((idx / 6) * 8) + (idx % 6);
rt->attr.dst_null);
pr_debug("%s: GW: %pI4, prefix_len: %d\n", __func__, &rt->dst_ip, rt->prefix_len);
- v = BIT(31); // Entry is valid
+ v = BIT(31); /* Entry is valid */
v |= (rt->attr.type & 0x3) << 29;
v |= rt->attr.hit ? BIT(20) : 0;
v |= rt->attr.dst_null ? BIT(19) : 0;
sw_w32(v, rtl_table_data(r, 0));
switch (rt->attr.type) {
- case 0: // IPv4 Unicast route
+ case 0: /* IPv4 Unicast route */
sw_w32(0, rtl_table_data(r, 1));
sw_w32(0, rtl_table_data(r, 2));
sw_w32(0, rtl_table_data(r, 3));
sw_w32(rt->dst_ip, rtl_table_data(r, 4));
break;
- case 2: // IPv6 Unicast route
+ case 2: /* IPv6 Unicast route */
sw_w32(rt->dst_ip6.s6_addr32[0], rtl_table_data(r, 1));
sw_w32(rt->dst_ip6.s6_addr32[1], rtl_table_data(r, 2));
sw_w32(rt->dst_ip6.s6_addr32[2], rtl_table_data(r, 3));
sw_w32(rt->dst_ip6.s6_addr32[3], rtl_table_data(r, 4));
break;
- case 1: // IPv4 Multicast route
- case 3: // IPv6 Multicast route
+ case 1: /* IPv4 Multicast route */
+ case 3: /* IPv6 Multicast route */
pr_warn("%s: route type not supported\n", __func__);
goto out;
}
struct in6_addr ip6_m;
int i;
- if (rt->attr.type == 1 || rt->attr.type == 3) // Hardware only supports UC routes
+ if (rt->attr.type == 1 || rt->attr.type == 3) /* Hardware only supports UC routes */
return -1;
sw_w32_mask(0x3 << 19, rt->attr.type, RTL930X_L3_HW_LU_KEY_CTRL);
- if (rt->attr.type) { // IPv6
+ if (rt->attr.type) { /* IPv6 */
rtl930x_net6_mask(rt->prefix_len, &ip6_m);
for (i = 0; i < 4; i++)
sw_w32(rt->dst_ip6.s6_addr32[0] & ip6_m.s6_addr32[0],
RTL930X_L3_HW_LU_KEY_IP_CTRL + (i << 2));
- } else { // IPv4
+ } else { /* IPv4 */
ip4_m = inet_make_mask(rt->prefix_len);
sw_w32(0, RTL930X_L3_HW_LU_KEY_IP_CTRL);
sw_w32(0, RTL930X_L3_HW_LU_KEY_IP_CTRL + 4);
sw_w32(v, RTL930X_L3_HW_LU_KEY_IP_CTRL + 12);
}
- // Execute CAM lookup in SoC
+ /* Execute CAM lookup in SoC */
sw_w32(BIT(15), RTL930X_L3_HW_LU_CTRL);
- // Wait until execute bit clears and result is ready
+ /* Wait until execute bit clears and result is ready */
do {
v = sw_r32(RTL930X_L3_HW_LU_CTRL);
} while (v & BIT(15));
pr_info("%s: found: %d, index: %d\n", __func__, !!(v & BIT(14)), v & 0x1ff);
- // Test if search successful (BIT 14 set)
+ /* Test if search successful (BIT 14 set) */
if (v & BIT(14))
return v & 0x1ff;
u32 hash;
struct rtl83xx_route route_entry;
- // IPv6 entries take up 3 slots
+ /* IPv6 entries take up 3 slots */
slot_width = (rt->attr.type == 0) || (rt->attr.type == 2) ? 1 : 3;
for (t = 0; t < 2; t++) {
{
u32 v, ip4_m;
struct in6_addr ip6_m;
- // Access L3_PREFIX_ROUTE_IPUC table (2) via register RTL9300_TBL_1
- // The table has a size of 11 registers (20 for MC)
+ /* Access L3_PREFIX_ROUTE_IPUC table (2) via register RTL9300_TBL_1 */
+ /* The table has a size of 11 registers (20 for MC) */
struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 2);
pr_debug("%s: index %d is valid: %d\n", __func__, idx, rt->attr.valid);
v |= rt->attr.dst_null ? BIT(6) : 0;
v |= rt->attr.qos_as ? BIT(6) : 0;
v |= rt->attr.qos_prio & 0x7;
- v |= rt->prefix_len == 0 ? BIT(20) : 0; // set default route bit
+ v |= rt->prefix_len == 0 ? BIT(20) : 0; /* set default route bit */
- // set bit mask for entry type always to 0x3
+ /* set bit mask for entry type always to 0x3 */
sw_w32(0x3 << 29, rtl_table_data(r, 5));
switch (rt->attr.type) {
- case 0: // IPv4 Unicast route
+ case 0: /* IPv4 Unicast route */
sw_w32(0, rtl_table_data(r, 1));
sw_w32(0, rtl_table_data(r, 2));
sw_w32(0, rtl_table_data(r, 3));
sw_w32(rt->dst_ip, rtl_table_data(r, 4));
- v |= rt->prefix_len == 32 ? BIT(21) : 0; // set host-route bit
+ v |= rt->prefix_len == 32 ? BIT(21) : 0; /* set host-route bit */
ip4_m = inet_make_mask(rt->prefix_len);
sw_w32(0, rtl_table_data(r, 6));
sw_w32(0, rtl_table_data(r, 7));
sw_w32(0, rtl_table_data(r, 8));
sw_w32(ip4_m, rtl_table_data(r, 9));
break;
- case 2: // IPv6 Unicast route
+ case 2: /* IPv6 Unicast route */
sw_w32(rt->dst_ip6.s6_addr32[0], rtl_table_data(r, 1));
sw_w32(rt->dst_ip6.s6_addr32[1], rtl_table_data(r, 2));
sw_w32(rt->dst_ip6.s6_addr32[2], rtl_table_data(r, 3));
sw_w32(rt->dst_ip6.s6_addr32[3], rtl_table_data(r, 4));
- v |= rt->prefix_len == 128 ? BIT(21) : 0; // set host-route bit
+ v |= rt->prefix_len == 128 ? BIT(21) : 0; /* set host-route bit */
rtl930x_net6_mask(rt->prefix_len, &ip6_m);
sw_w32(ip6_m.s6_addr32[2], rtl_table_data(r, 8));
sw_w32(ip6_m.s6_addr32[3], rtl_table_data(r, 9));
break;
- case 1: // IPv4 Multicast route
- case 3: // IPv6 Multicast route
+ case 1: /* IPv4 Multicast route */
+ case 3: /* IPv6 Multicast route */
pr_warn("%s: route type not supported\n", __func__);
rtl_table_release(r);
return;
static void rtl930x_get_l3_nexthop(int idx, u16 *dmac_id, u16 *interface)
{
u32 v;
- // Read L3_NEXTHOP table (3) via register RTL9300_TBL_1
+ /* Read L3_NEXTHOP table (3) via register RTL9300_TBL_1 */
struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 3);
rtl_table_read(r, idx);
- // The table has a size of 1 register
+ /* The table has a size of 1 register */
v = sw_r32(rtl_table_data(r, 0));
rtl_table_release(r);
int i, free_mtu;
int mtu_id;
- // Try to find an existing mtu-value or a free slot
+ /* Try to find an existing mtu-value or a free slot */
free_mtu = MAX_INTF_MTUS;
for (i = 0; i < MAX_INTF_MTUS && priv->intf_mtus[i] != mtu; i++) {
if ((!priv->intf_mtu_count[i]) && (free_mtu == MAX_INTF_MTUS))
priv->intf_mtus[i] = mtu;
pr_info("Writing MTU %d to slot %d\n", priv->intf_mtus[i], i);
- // Set MTU-value of the slot TODO: distinguish between IPv4/IPv6 routes / slots
+ /* Set MTU-value of the slot TODO: distinguish between IPv4/IPv6 routes / slots */
sw_w32_mask(0xffff << ((i % 2) * 16), priv->intf_mtus[i] << ((i % 2) * 16),
RTL930X_L3_IP_MTU_CTRL(i));
sw_w32_mask(0xffff << ((i % 2) * 16), priv->intf_mtus[i] << ((i % 2) * 16),
static int rtl930x_l3_intf_add(struct rtl838x_switch_priv *priv, struct rtl838x_l3_intf *intf)
{
int i, intf_id, mtu_id;
- // number of MTU-values < 16384
+ /* number of MTU-values < 16384 */
- // Use the same IPv6 mtu as the ip4 mtu for this route if unset
+ /* Use the same IPv6 mtu as the ip4 mtu for this route if unset */
intf->ip6_mtu = intf->ip6_mtu ? intf->ip6_mtu : intf->ip4_mtu;
mtu_id = rtl930x_l3_mtu_add(priv, intf->ip4_mtu);
*/
static void rtl930x_set_l3_nexthop(int idx, u16 dmac_id, u16 interface)
{
- // Access L3_NEXTHOP table (3) via register RTL9300_TBL_1
+ /* Access L3_NEXTHOP table (3) via register RTL9300_TBL_1 */
struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 3);
pr_info("%s: Writing to L3_NEXTHOP table, index %d, dmac_id %d, interface %d\n",
pr_info("%s: unknown field %d\n", __func__, field_type);
}
- // On the RTL9300, the mask fields are not word aligned!
+ /* On the RTL9300, the mask fields are not word aligned! */
if (!(i % 2)) {
r[5 - i / 2] = data;
r[12 - i / 2] |= ((u32)data_m << 8);
static void rtl930x_write_pie_action(u32 r[], struct pie_rule *pr)
{
- // Either drop or forward
+ /* Either drop or forward */
if (pr->drop) {
- r[14] |= BIT(24) | BIT(25) | BIT(26); // Do Green, Yellow and Red drops
- // Actually DROP, not PERMIT in Green / Yellow / Red
+ r[14] |= BIT(24) | BIT(25) | BIT(26); /* Do Green, Yellow and Red drops */
+ /* Actually DROP, not PERMIT in Green / Yellow / Red */
r[14] |= BIT(23) | BIT(22) | BIT(20);
} else {
r[14] |= pr->fwd_sel ? BIT(27) : 0;
r[14] |= pr->fwd_act << 18;
- r[14] |= BIT(14); // We overwrite any drop
+ r[14] |= BIT(14); /* We overwrite any drop */
}
if (pr->phase == PHASE_VACL)
r[14] |= pr->fwd_sa_lrn ? BIT(15) : 0;
static int rtl930x_pie_rule_write(struct rtl838x_switch_priv *priv, int idx, struct pie_rule *pr)
{
- // Access IACL table (2) via register 0
+ /* Access IACL table (2) via register 0 */
struct table_reg *q = rtl_table_get(RTL9300_TBL_0, 2);
u32 r[19];
int i;
rtl930x_write_pie_action(r, pr);
-// rtl930x_pie_rule_dump_raw(r);
+/* rtl930x_pie_rule_dump_raw(r); */
for (i = 0; i < 19; i++)
sw_w32(r[i], rtl_table_data(q, i));
if (ether_addr_to_u64(pr->dmac) && !rtl930x_pie_templ_has(t, TEMPLATE_FIELD_DMAC0))
return -1;
- // TODO: Check more
+ /* TODO: Check more */
i = find_first_zero_bit(&priv->pie_use_bm[block * 4], PIE_BLOCK_SIZE);
set_bit(idx, priv->pie_use_bm);
pr->valid = true;
- pr->tid = j; // Mapped to template number
+ pr->tid = j; /* Mapped to template number */
pr->tid_m = 0x1;
pr->id = idx;
pr_debug("%s: from %d to %d\n", __func__, index_from, index_to);
mutex_lock(&priv->reg_mutex);
- // Write from-to and execute bit into control register
+ /* Write from-to and execute bit into control register */
sw_w32(v, RTL930X_PIE_CLR_CTRL);
- // Wait until command has completed
+ /* Wait until command has completed */
do {
} while (sw_r32(RTL930X_PIE_CLR_CTRL) & BIT(0));
mutex_init(&priv->pie_mutex);
pr_info("%s\n", __func__);
- // Enable ACL lookup on all ports, including CPU_PORT
+ /* Enable ACL lookup on all ports, including CPU_PORT */
for (i = 0; i <= priv->cpu_port; i++)
sw_w32(1, RTL930X_ACL_PORT_LOOKUP_CTRL(i));
- // Include IPG in metering
+ /* Include IPG in metering */
sw_w32_mask(0, 1, RTL930X_METER_GLB_CTRL);
- // Delete all present rules, block size is 128 on all SoC families
+ /* Delete all present rules, block size is 128 on all SoC families */
rtl930x_pie_rule_del(priv, 0, priv->n_pie_blocks * 128 - 1);
- // Assign blocks 0-7 to VACL phase (bit = 0), blocks 8-15 to IACL (bit = 1)
+ /* Assign blocks 0-7 to VACL phase (bit = 0), blocks 8-15 to IACL (bit = 1) */
sw_w32(0xff00, RTL930X_PIE_BLK_PHASE_CTRL);
- // Enable predefined templates 0, 1 for first quarter of all blocks
+ /* Enable predefined templates 0, 1 for first quarter of all blocks */
template_selectors = 0 | (1 << 4);
for (i = 0; i < priv->n_pie_blocks / 4; i++)
sw_w32(template_selectors, RTL930X_PIE_BLK_TMPLTE_CTRL(i));
- // Enable predefined templates 2, 3 for second quarter of all blocks
+ /* Enable predefined templates 2, 3 for second quarter of all blocks */
template_selectors = 2 | (3 << 4);
for (i = priv->n_pie_blocks / 4; i < priv->n_pie_blocks / 2; i++)
sw_w32(template_selectors, RTL930X_PIE_BLK_TMPLTE_CTRL(i));
- // Enable predefined templates 0, 1 for third half of all blocks
+ /* Enable predefined templates 0, 1 for third half of all blocks */
template_selectors = 0 | (1 << 4);
for (i = priv->n_pie_blocks / 2; i < priv->n_pie_blocks * 3 / 4; i++)
sw_w32(template_selectors, RTL930X_PIE_BLK_TMPLTE_CTRL(i));
- // Enable predefined templates 2, 3 for fourth quater of all blocks
+ /* Enable predefined templates 2, 3 for fourth quater of all blocks */
template_selectors = 2 | (3 << 4);
for (i = priv->n_pie_blocks * 3 / 4; i < priv->n_pie_blocks; i++)
sw_w32(template_selectors, RTL930X_PIE_BLK_TMPLTE_CTRL(i));
static void rtl930x_set_l3_egress_intf(int idx, struct rtl838x_l3_intf *intf)
{
u32 u, v;
- // Read L3_EGR_INTF table (4) via register RTL9300_TBL_1
+ /* Read L3_EGR_INTF table (4) via register RTL9300_TBL_1 */
struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 4);
- // The table has 2 registers
+ /* The table has 2 registers */
u = (intf->vid & 0xfff) << 9;
u |= (intf->smac_idx & 0x3f) << 3;
u |= (intf->ip4_mtu_id & 0x7);
static void rtl930x_get_l3_router_mac(u32 idx, struct rtl93xx_rt_mac *m)
{
u32 v, w;
- // Read L3_ROUTER_MAC table (0) via register RTL9300_TBL_1
+ /* Read L3_ROUTER_MAC table (0) via register RTL9300_TBL_1 */
struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 0);
rtl_table_read(r, idx);
- // The table has a size of 7 registers, 64 entries
+ /* The table has a size of 7 registers, 64 entries */
v = sw_r32(rtl_table_data(r, 0));
w = sw_r32(rtl_table_data(r, 3));
m->valid = !!(v & BIT(20));
goto out;
m->p_type = !!(v & BIT(19));
- m->p_id = (v >> 13) & 0x3f; // trunk id of port
+ m->p_id = (v >> 13) & 0x3f; /* trunk id of port */
m->vid = v & 0xfff;
m->vid_mask = w & 0xfff;
m->action = sw_r32(rtl_table_data(r, 6)) & 0x7;
(sw_r32(rtl_table_data(r, 4)));
m->mac = ((((u64)sw_r32(rtl_table_data(r, 1))) << 32) & 0xffffffffffffULL) |
(sw_r32(rtl_table_data(r, 2)));
- // Bits L3_INTF and BMSK_L3_INTF are 0
+ /* Bits L3_INTF and BMSK_L3_INTF are 0 */
out:
rtl_table_release(r);
static void rtl930x_set_l3_router_mac(u32 idx, struct rtl93xx_rt_mac *m)
{
u32 v, w;
- // Read L3_ROUTER_MAC table (0) via register RTL9300_TBL_1
+ /* Read L3_ROUTER_MAC table (0) via register RTL9300_TBL_1 */
struct table_reg *r = rtl_table_get(RTL9300_TBL_1, 0);
- // The table has a size of 7 registers, 64 entries
- v = BIT(20); // mac entry valid, port type is 0: individual
+ /* The table has a size of 7 registers, 64 entries */
+ v = BIT(20); /* mac entry valid, port type is 0: individual */
v |= (m->p_id & 0x3f) << 13;
- v |= (m->vid & 0xfff); // Set the interface_id to the vlan id
+ v |= (m->vid & 0xfff); /* Set the interface_id to the vlan id */
w = m->vid_mask;
w |= (m->p_id_mask & 0x3f) << 13;
sw_w32(v, rtl_table_data(r, 0));
sw_w32(w, rtl_table_data(r, 3));
- // Set MAC address, L3_INTF (bit 12 in register 1) needs to be 0
+ /* Set MAC address, L3_INTF (bit 12 in register 1) needs to be 0 */
sw_w32((u32)(m->mac), rtl_table_data(r, 2));
sw_w32(m->mac >> 32, rtl_table_data(r, 1));
- // Set MAC address mask, BMSK_L3_INTF (bit 12 in register 5) needs to be 0
+ /* Set MAC address mask, BMSK_L3_INTF (bit 12 in register 5) needs to be 0 */
sw_w32((u32)(m->mac_mask >> 32), rtl_table_data(r, 4));
sw_w32((u32)m->mac_mask, rtl_table_data(r, 5));
static u64 rtl930x_get_l3_egress_mac(u32 idx)
{
u64 mac;
- // Read L3_EGR_INTF_MAC table (2) via register RTL9300_TBL_2
+ /* Read L3_EGR_INTF_MAC table (2) via register RTL9300_TBL_2 */
struct table_reg *r = rtl_table_get(RTL9300_TBL_2, 2);
rtl_table_read(r, idx);
- // The table has a size of 2 registers
+ /* The table has a size of 2 registers */
mac = sw_r32(rtl_table_data(r, 0));
mac <<= 32;
mac |= sw_r32(rtl_table_data(r, 1));
*/
static void rtl930x_set_l3_egress_mac(u32 idx, u64 mac)
{
- // Access L3_EGR_INTF_MAC table (2) via register RTL9300_TBL_2
+ /* Access L3_EGR_INTF_MAC table (2) via register RTL9300_TBL_2 */
struct table_reg *r = rtl_table_get(RTL9300_TBL_2, 2);
- // The table has a size of 2 registers
+ /* The table has a size of 2 registers */
sw_w32(mac >> 32, rtl_table_data(r, 0));
sw_w32(mac, rtl_table_data(r, 1));
{
int i;
- // Setup MTU with id 0 for default interface
+ /* Setup MTU with id 0 for default interface */
for (i = 0; i < MAX_INTF_MTUS; i++)
priv->intf_mtu_count[i] = priv->intf_mtus[i] = 0;
- priv->intf_mtu_count[0] = 0; // Needs to stay forever
+ priv->intf_mtu_count[0] = 0; /* Needs to stay forever */
priv->intf_mtus[0] = DEFAULT_MTU;
sw_w32_mask(0xffff, DEFAULT_MTU, RTL930X_L3_IP_MTU_CTRL(0));
sw_w32_mask(0xffff, DEFAULT_MTU, RTL930X_L3_IP6_MTU_CTRL(0));
sw_w32_mask(0xffff0000, DEFAULT_MTU << 16, RTL930X_L3_IP_MTU_CTRL(1));
sw_w32_mask(0xffff0000, DEFAULT_MTU << 16, RTL930X_L3_IP6_MTU_CTRL(1));
- // Clear all source port MACs
+ /* Clear all source port MACs */
for (i = 0; i < MAX_SMACS; i++)
rtl930x_set_l3_egress_mac(L3_EGRESS_DMACS + i, 0ULL);
- // Configure the default L3 hash algorithm
- sw_w32_mask(BIT(2), 0, RTL930X_L3_HOST_TBL_CTRL); // Algorithm selection 0 = 0
- sw_w32_mask(0, BIT(3), RTL930X_L3_HOST_TBL_CTRL); // Algorithm selection 1 = 1
+ /* Configure the default L3 hash algorithm */
+ sw_w32_mask(BIT(2), 0, RTL930X_L3_HOST_TBL_CTRL); /* Algorithm selection 0 = 0 */
+ sw_w32_mask(0, BIT(3), RTL930X_L3_HOST_TBL_CTRL); /* Algorithm selection 1 = 1 */
pr_info("L3_IPUC_ROUTE_CTRL %08x, IPMC_ROUTE %08x, IP6UC_ROUTE %08x, IP6MC_ROUTE %08x\n",
sw_r32(RTL930X_L3_IPUC_ROUTE_CTRL), sw_r32(RTL930X_L3_IPMC_ROUTE_CTRL),
sw_r32(RTL930X_L3_IPUC_ROUTE_CTRL), sw_r32(RTL930X_L3_IPMC_ROUTE_CTRL),
sw_r32(RTL930X_L3_IP6UC_ROUTE_CTRL), sw_r32(RTL930X_L3_IP6MC_ROUTE_CTRL));
- // Trap non-ip traffic to the CPU-port (e.g. ARP so we stay reachable)
+ /* Trap non-ip traffic to the CPU-port (e.g. ARP so we stay reachable) */
sw_w32_mask(0x3 << 8, 0x1 << 8, RTL930X_L3_IP_ROUTE_CTRL);
pr_info("L3_IP_ROUTE_CTRL %08x\n", sw_r32(RTL930X_L3_IP_ROUTE_CTRL));
- // PORT_ISO_RESTRICT_ROUTE_CTRL ?
+ /* PORT_ISO_RESTRICT_ROUTE_CTRL? */
- // Do not use prefix route 0 because of HW limitations
+ /* Do not use prefix route 0 because of HW limitations */
set_bit(0, priv->route_use_bm);
return 0;
{
u32 v;
- // Read LOG table (3) via register RTL9300_TBL_0
+ /* Read LOG table (3) via register RTL9300_TBL_0 */
struct table_reg *r = rtl_table_get(RTL9300_TBL_0, 3);
pr_debug("In %s, id %d\n", __func__, counter);
pr_debug("Registers: %08x %08x\n",
sw_r32(rtl_table_data(r, 0)), sw_r32(rtl_table_data(r, 1)));
- // The table has a size of 2 registers
+ /* The table has a size of 2 registers */
if (counter % 2)
v = sw_r32(rtl_table_data(r, 0));
else
static void rtl930x_packet_cntr_clear(int counter)
{
- // Access LOG table (3) via register RTL9300_TBL_0
+ /* Access LOG table (3) via register RTL9300_TBL_0 */
struct table_reg *r = rtl_table_get(RTL9300_TBL_0, 3);
pr_info("In %s, id %d\n", __func__, counter);
- // The table has a size of 2 registers
+ /* The table has a size of 2 registers */
if (counter % 2)
sw_w32(0, rtl_table_data(r, 0));
else
sw_w32(v, RTL930X_LED_SET0_0_CTRL - i * 8);
}
- // Set LED mode to serial (0x1)
+ /* Set LED mode to serial (0x1) */
sw_w32_mask(0x3, 0x1, RTL930X_LED_GLB_CTRL);
- // Set port type masks
+ /* Set port type masks */
sw_w32(pm, RTL930X_LED_PORT_COPR_MASK_CTRL);
sw_w32(pm, RTL930X_LED_PORT_FIB_MASK_CTRL);
sw_w32(pm, RTL930X_LED_PORT_COMBO_MASK_CTRL);
.l2_ctrl_1 = RTL930X_L2_AGE_CTRL,
.l2_port_aging_out = RTL930X_L2_PORT_AGE_CTRL,
.set_ageing_time = rtl930x_set_ageing_time,
- .smi_poll_ctrl = RTL930X_SMI_POLL_CTRL, // TODO: Difference to RTL9300_SMI_PRVTE_POLLING_CTRL
+ .smi_poll_ctrl = RTL930X_SMI_POLL_CTRL, /* TODO: Difference to RTL9300_SMI_PRVTE_POLLING_CTRL */
.l2_tbl_flush_ctrl = RTL930X_L2_TBL_FLUSH_CTRL,
.exec_tbl0_cmd = rtl930x_exec_tbl0_cmd,
.exec_tbl1_cmd = rtl930x_exec_tbl1_cmd,
*/
#define TEMPLATE_FIELD_VLAN TEMPLATE_FIELD_ITAG
-// Number of fixed templates predefined in the RTL9300 SoC
+/* Number of fixed templates predefined in the RTL9300 SoC */
#define N_FIXED_TEMPLATES 5
-// RTL931x specific predefined templates
+/* RTL931x specific predefined templates */
static enum template_field_id fixed_templates[N_FIXED_TEMPLATES][N_FIXED_FIELDS_RTL931X] =
{
{
static void rtl931x_vlan_tables_read(u32 vlan, struct rtl838x_vlan_info *info)
{
u32 v, w, x, y;
- // Read VLAN table (3) via register 0
+ /* Read VLAN table (3) via register 0 */
struct table_reg *r = rtl_table_get(RTL9310_TBL_0, 3);
rtl_table_read(r, vlan);
pr_debug("VLAN_READ %d: %08x %08x %08x %08x\n", vlan, v, w, x, y);
info->tagged_ports = ((u64) v) << 25 | (w >> 7);
info->profile_id = (x >> 16) & 0xf;
- info->fid = w & 0x7f; // AKA MSTI depending on context
+ info->fid = w & 0x7f; /* AKA MSTI depending on context */
info->hash_uc_fid = !!(x & BIT(31));
info->hash_mc_fid = !!(x & BIT(30));
info->if_id = (x >> 20) & 0x3ff;
info->tagged_ports, info->profile_id, info->hash_uc_fid, info->hash_mc_fid,
info->if_id);
- // Read UNTAG table via table register 3
+ /* Read UNTAG table via table register 3 */
r = rtl_table_get(RTL9310_TBL_3, 0);
rtl_table_read(r, vlan);
v = ((u64)sw_r32(rtl_table_data(r, 0))) << 25;
static void rtl931x_vlan_set_tagged(u32 vlan, struct rtl838x_vlan_info *info)
{
u32 v, w, x, y;
- // Access VLAN table (1) via register 0
+ /* Access VLAN table (1) via register 0 */
struct table_reg *r = rtl_table_get(RTL9310_TBL_0, 3);
v = info->tagged_ports >> 25;
pr_debug("RTL931X Link change: status: %x, ports %016llx\n", status, ports);
link = rtl839x_get_port_reg_le(RTL931X_MAC_LINK_STS);
- // Must re-read this to get correct status
+ /* Must re-read this to get correct status */
link = rtl839x_get_port_reg_le(RTL931X_MAC_LINK_STS);
pr_debug("RTL931X Link change: status: %x, link status %016llx\n", status, link);
mutex_lock(&smi_lock);
- // Set PHY to access via port-number
+ /* Set PHY to access via port-number */
sw_w32(port << 5, RTL931X_SMI_INDRT_ACCESS_BC_PHYID_CTRL);
- // Set MMD device number and register to write to
+ /* Set MMD device number and register to write to */
sw_w32(devnum << 16 | mdiobus_c45_regad(regnum), RTL931X_SMI_INDRT_ACCESS_MMD_CTRL);
- v = type << 2 | BIT(0); // MMD-access-type | EXEC
+ v = type << 2 | BIT(0); /* MMD-access-type | EXEC */
sw_w32(v, RTL931X_SMI_INDRT_ACCESS_CTRL_0);
do {
v = sw_r32(RTL931X_SMI_INDRT_ACCESS_CTRL_0);
} while (v & BIT(0));
- // Check for error condition
+ /* Check for error condition */
if (v & BIT(1))
err = -EIO;
mutex_lock(&smi_lock);
- // Set PHY to access via port-mask
+ /* Set PHY to access via port-mask */
pm = (u64)1 << port;
sw_w32((u32)pm, RTL931X_SMI_INDRT_ACCESS_CTRL_2);
sw_w32((u32)(pm >> 32), RTL931X_SMI_INDRT_ACCESS_CTRL_2 + 4);
- // Set data to write
+ /* Set data to write */
sw_w32_mask(0xffff, val, RTL931X_SMI_INDRT_ACCESS_CTRL_3);
- // Set MMD device number and register to write to
+ /* Set MMD device number and register to write to */
sw_w32(devnum << 16 | mdiobus_c45_regad(regnum), RTL931X_SMI_INDRT_ACCESS_MMD_CTRL);
- v = BIT(4) | type << 2 | BIT(0); // WRITE | MMD-access-type | EXEC
+ v = BIT(4) | type << 2 | BIT(0); /* WRITE | MMD-access-type | EXEC */
sw_w32(v, RTL931X_SMI_INDRT_ACCESS_CTRL_0);
do {
sw_w32_mask(7 << ((port % 10) * 3), value << ((port % 10) * 3), RTL931X_RMA_BPDU_CTRL + ((port / 10) << 2));
break;
case PTP:
- //udp
+ /* udp */
sw_w32_mask(3 << 2, value << 2, RTL931X_RMA_PTP_CTRL + (port << 2));
- //eth2
+ /* eth2 */
sw_w32_mask(3, value, RTL931X_RMA_PTP_CTRL + (port << 2));
break;
case PTP_UDP:
h4 = (seed >> 48) & 0xfff;
k1 = h0 ^ h1 ^ h2 ^ h3 ^ h4;
- // Algorithm choice for block 0
+ /* Algorithm choice for block 0 */
if (sw_r32(RTL931X_L2_CTRL) & BIT(0))
h = k1;
else
e->is_l2_tunnel = !!(r[2] & BIT(31));
e->is_static = !!(r[2] & BIT(13));
e->port = (r[2] >> 19) & 0x3ff;
- // Check for trunk port
+ /* Check for trunk port */
if (r[2] & BIT(29)) {
e->is_trunk = true;
e->stack_dev = (e->port >> 9) & 1;
e->suspended = !!(r[2] & BIT(12));
e->age = (r[2] >> 16) & 3;
- // the UC_VID field in hardware is used for the VID or for the route id
+ /* the UC_VID field in hardware is used for the VID or for the route id */
if (e->next_hop) {
e->nh_route_id = r[2] & 0x7ff;
e->vid = 0;
}
if (e->is_l2_tunnel)
e->l2_tunnel_id = ((r[2] & 0xff) << 4) | (r[3] >> 28);
- // TODO: Implement VLAN conversion
+ /* TODO: Implement VLAN conversion */
} else {
e->type = L2_MULTICAST;
e->is_local_forward = !!(r[2] & BIT(31));
return;
}
- r[2] = BIT(31); // Set valid bit
+ r[2] = BIT(31); /* Set valid bit */
r[0] = ((u32)e->mac[0]) << 24 |
((u32)e->mac[1]) << 16 |
r[2] |= e->block_sa ? BIT(17) : 0;
r[2] |= e->suspended ? BIT(13) : 0;
r[2] |= (e->age & 0x3) << 17;
- // the UC_VID field in hardware is used for the VID or for the route id
+ /* the UC_VID field in hardware is used for the VID or for the route id */
if (e->next_hop)
r[2] |= e->nh_route_id & 0x7ff;
else
r[2] |= e->vid & 0xfff;
- } else { // L2_MULTICAST
+ } else { /* L2_MULTICAST */
r[2] |= (e->mc_portmask_index & 0x3ff) << 16;
r[2] |= e->mc_mac_index & 0x7ff;
}
hash &= 0xffff;
}
- idx = (0 << 14) | (hash << 2) | pos; // Search SRAM, with hash and at pos in bucket
+ idx = (0 << 14) | (hash << 2) | pos; /* Search SRAM, with hash and at pos in bucket */
pr_debug("%s: NOW hash %08x, pos: %d\n", __func__, hash, pos);
rtl_table_read(q, idx);
seed = rtl931x_l2_hash_seed(mac, e->rvid);
pr_debug("%s: mac %016llx, seed %016llx\n", __func__, mac, seed);
- // return vid with concatenated mac as unique id
+ /* return vid with concatenated mac as unique id */
return seed;
}
{
u32 r[4];
struct table_reg *q = rtl_table_get(RTL9310_TBL_0, 0);
- u32 idx = (0 << 14) | (hash << 2) | pos; // Access SRAM, with hash and at pos in bucket
+ u32 idx = (0 << 14) | (hash << 2) | pos; /* Access SRAM, with hash and at pos in bucket */
int i;
pr_info("%s: hash %d, pos %d\n", __func__, hash, pos);
static void rtl931x_vlan_fwd_on_inner(int port, bool is_set)
{
- // Always set all tag modes to fwd based on either inner or outer tag
+ /* Always set all tag modes to fwd based on either inner or outer tag */
if (is_set)
sw_w32_mask(0, 0xf, RTL931X_VLAN_PORT_FWD + (port << 2));
else
p[0] = sw_r32(RTL931X_VLAN_PROFILE_SET(profile));
- // Enable routing of Ipv4/6 Unicast and IPv4/6 Multicast traffic
- //p[0] |= BIT(17) | BIT(16) | BIT(13) | BIT(12);
- p[0] |= 0x3 << 11; // COPY2CPU
+ /* Enable routing of Ipv4/6 Unicast and IPv4/6 Multicast traffic */
+ /* p[0] |= BIT(17) | BIT(16) | BIT(13) | BIT(12); */
+ p[0] |= 0x3 << 11; /* COPY2CPU */
- p[1] = 0x1FFFFFF; // L2 unknwon MC flooding portmask all ports, including the CPU-port
+ p[1] = 0x1FFFFFF; /* L2 unknwon MC flooding portmask all ports, including the CPU-port */
p[2] = 0xFFFFFFFF;
- p[3] = 0x1FFFFFF; // IPv4 unknwon MC flooding portmask
+ p[3] = 0x1FFFFFF; /* IPv4 unknwon MC flooding portmask */
p[4] = 0xFFFFFFFF;
- p[5] = 0x1FFFFFF; // IPv6 unknwon MC flooding portmask
+ p[5] = 0x1FFFFFF; /* IPv6 unknwon MC flooding portmask */
p[6] = 0xFFFFFFFF;
for (int i = 0; i < 7; i++)
static void rtl931x_l2_learning_setup(void)
{
- // Portmask for flooding broadcast traffic
+ /* Portmask for flooding broadcast traffic */
rtl839x_set_port_reg_be(0x1FFFFFFFFFFFFFF, RTL931X_L2_BC_FLD_PMSK);
- // Portmask for flooding unicast traffic with unknown destination
+ /* Portmask for flooding unicast traffic with unknown destination */
rtl839x_set_port_reg_be(0x1FFFFFFFFFFFFFF, RTL931X_L2_UNKN_UC_FLD_PMSK);
- // Limit learning to maximum: 64k entries, after that just flood (bits 0-2)
+ /* Limit learning to maximum: 64k entries, after that just flood (bits 0-2) */
sw_w32((0xffff << 3) | FORWARD, RTL931X_L2_LRN_CONSTRT_CTRL);
}
static u64 rtl931x_read_mcast_pmask(int idx)
{
u64 portmask;
- // Read MC_PMSK (2) via register RTL9310_TBL_0
+ /* Read MC_PMSK (2) via register RTL9310_TBL_0 */
struct table_reg *q = rtl_table_get(RTL9310_TBL_0, 2);
rtl_table_read(q, idx);
{
u64 pm = portmask;
- // Access MC_PMSK (2) via register RTL9310_TBL_0
+ /* Access MC_PMSK (2) via register RTL9310_TBL_0 */
struct table_reg *q = rtl_table_get(RTL9310_TBL_0, 2);
pr_debug("%s: Index idx %d has portmask %016llx\n", __func__, idx, pm);
}
void rtl931x_sw_init(struct rtl838x_switch_priv *priv)
{
-// rtl931x_sds_init(priv);
+/* rtl931x_sds_init(priv); */
}
static void rtl931x_pie_lookup_enable(struct rtl838x_switch_priv *priv, int index)
for (i = 0; i < N_FIXED_FIELDS; i++) {
rtl931x_pie_data_fill(t[i], pr, &data, &data_m);
- // On the RTL9300, the mask fields are not word aligned!
+ /* On the RTL9300, the mask fields are not word aligned! */
if (!(i % 2)) {
r[5 - i / 2] = data;
r[12 - i / 2] |= ((u32)data_m << 8);
pr->mgnt_vlan = r[7] & BIT(31);
if (pr->phase == PHASE_IACL)
pr->dmac_hit_sw = r[7] & BIT(30);
- else // TODO: EACL/VACL phase handling
+ else /* TODO: EACL/VACL phase handling */
pr->content_too_deep = r[7] & BIT(30);
pr->not_first_frag = r[7] & BIT(29);
pr->frame_type_l4 = (r[7] >> 26) & 7;
static void rtl931x_write_pie_action(u32 r[], struct pie_rule *pr)
{
- // Either drop or forward
+ /* Either drop or forward */
if (pr->drop) {
- r[15] |= BIT(11) | BIT(12) | BIT(13); // Do Green, Yellow and Red drops
- // Actually DROP, not PERMIT in Green / Yellow / Red
+ r[15] |= BIT(11) | BIT(12) | BIT(13); /* Do Green, Yellow and Red drops */
+ /* Actually DROP, not PERMIT in Green / Yellow / Red */
r[16] |= BIT(27) | BIT(28) | BIT(29);
} else {
r[15] |= pr->fwd_sel ? BIT(14) : 0;
r[16] |= pr->fwd_act << 24;
- r[16] |= BIT(21); // We overwrite any drop
+ r[16] |= BIT(21); /* We overwrite any drop */
}
if (pr->phase == PHASE_VACL)
r[16] |= pr->fwd_sa_lrn ? BIT(22) : 0;
r[15] |= pr->log_sel ? BIT(26) : 0;
r[16] |= ((u32)(pr->fwd_data & 0xfff)) << 9;
-// r[15] |= pr->log_octets ? BIT(31) : 0;
+/* r[15] |= pr->log_octets ? BIT(31) : 0; */
r[15] |= (u32)(pr->meter_data) >> 2;
r[16] |= (((u32)(pr->meter_data) >> 7) & 0x3) << 29;
static int rtl931x_pie_rule_write(struct rtl838x_switch_priv *priv, int idx, struct pie_rule *pr)
{
- // Access IACL table (0) via register 1, the table size is 4096
+ /* Access IACL table (0) via register 1, the table size is 4096 */
struct table_reg *q = rtl_table_get(RTL9310_TBL_1, 0);
u32 r[22];
int i;
if (ether_addr_to_u64(pr->dmac) && !rtl931x_pie_templ_has(t, TEMPLATE_FIELD_DMAC0))
return -1;
- // TODO: Check more
+ /* TODO: Check more */
i = find_first_zero_bit(&priv->pie_use_bm[block * 4], PIE_BLOCK_SIZE);
set_bit(idx, priv->pie_use_bm);
pr->valid = true;
- pr->tid = j; // Mapped to template number
+ pr->tid = j; /* Mapped to template number */
pr->tid_m = 0x1;
pr->id = idx;
pr_info("%s: from %d to %d\n", __func__, index_from, index_to);
mutex_lock(&priv->reg_mutex);
- // Write from-to and execute bit into control register
+ /* Write from-to and execute bit into control register */
sw_w32(v, RTL931X_PIE_CLR_CTRL);
- // Wait until command has completed
+ /* Wait until command has completed */
do {
} while (sw_r32(RTL931X_PIE_CLR_CTRL) & BIT(0));
mutex_init(&priv->pie_mutex);
pr_info("%s\n", __func__);
- // Enable ACL lookup on all ports, including CPU_PORT
+ /* Enable ACL lookup on all ports, including CPU_PORT */
for (i = 0; i <= priv->cpu_port; i++)
sw_w32(1, RTL931X_ACL_PORT_LOOKUP_CTRL(i));
- // Include IPG in metering
+ /* Include IPG in metering */
sw_w32_mask(0, 1, RTL931X_METER_GLB_CTRL);
- // Delete all present rules, block size is 128 on all SoC families
+ /* Delete all present rules, block size is 128 on all SoC families */
rtl931x_pie_rule_del(priv, 0, priv->n_pie_blocks * 128 - 1);
- // Assign first half blocks 0-7 to VACL phase, second half to IACL
- // 3 bits are used for each block, values for PIE blocks are
- // 6: Disabled, 0: VACL, 1: IACL, 2: EACL
- // And for OpenFlow Flow blocks: 3: Ingress Flow table 0,
- // 4: Ingress Flow Table 3, 5: Egress flow table 0
+ /* Assign first half blocks 0-7 to VACL phase, second half to IACL */
+ /* 3 bits are used for each block, values for PIE blocks are */
+ /* 6: Disabled, 0: VACL, 1: IACL, 2: EACL */
+ /* And for OpenFlow Flow blocks: 3: Ingress Flow table 0, */
+ /* 4: Ingress Flow Table 3, 5: Egress flow table 0 */
for (i = 0; i < priv->n_pie_blocks; i++) {
int pos = (i % 10) * 3;
u32 r = RTL931X_PIE_BLK_PHASE_CTRL + 4 * (i / 10);
sw_w32_mask(0x7 << pos, 1 << pos, r);
}
- // Enable predefined templates 0, 1 for first quarter of all blocks
+ /* Enable predefined templates 0, 1 for first quarter of all blocks */
template_selectors = 0 | (1 << 4);
for (i = 0; i < priv->n_pie_blocks / 4; i++)
sw_w32(template_selectors, RTL931X_PIE_BLK_TMPLTE_CTRL(i));
- // Enable predefined templates 2, 3 for second quarter of all blocks
+ /* Enable predefined templates 2, 3 for second quarter of all blocks */
template_selectors = 2 | (3 << 4);
for (i = priv->n_pie_blocks / 4; i < priv->n_pie_blocks / 2; i++)
sw_w32(template_selectors, RTL931X_PIE_BLK_TMPLTE_CTRL(i));
- // Enable predefined templates 0, 1 for third quater of all blocks
+ /* Enable predefined templates 0, 1 for third quater of all blocks */
template_selectors = 0 | (1 << 4);
for (i = priv->n_pie_blocks / 2; i < priv->n_pie_blocks * 3 / 4; i++)
sw_w32(template_selectors, RTL931X_PIE_BLK_TMPLTE_CTRL(i));
- // Enable predefined templates 2, 3 for fourth quater of all blocks
+ /* Enable predefined templates 2, 3 for fourth quater of all blocks */
template_selectors = 2 | (3 << 4);
for (i = priv->n_pie_blocks * 3 / 4; i < priv->n_pie_blocks; i++)
sw_w32(template_selectors, RTL931X_PIE_BLK_TMPLTE_CTRL(i));
if (!priv->ports[i].phy)
continue;
- v = 0x1; // Found on the EdgeCore, but we do not have any HW description
+ v = 0x1; /* Found on the EdgeCore, but we do not have any HW description */
sw_w32_mask(0x3 << pos, v << pos, RTL931X_LED_PORT_NUM_CTRL(i));
if (priv->ports[i].phy_is_integrated)
sw_w32(v, RTL931X_LED_SET0_0_CTRL - i * 8);
}
- // Set LED mode to serial (0x1)
+ /* Set LED mode to serial (0x1) */
sw_w32_mask(0x3, 0x1, RTL931X_LED_GLB_CTRL);
rtl839x_set_port_reg_le(pm_copper, RTL931X_LED_PORT_COPR_MASK_CTRL);
.get_port_reg_le = rtl839x_get_port_reg_le,
.stat_port_rst = RTL931X_STAT_PORT_RST,
.stat_rst = RTL931X_STAT_RST,
- .stat_port_std_mib = 0, // Not defined
+ .stat_port_std_mib = 0, /* Not defined */
.traffic_enable = rtl931x_traffic_enable,
.traffic_disable = rtl931x_traffic_disable,
.traffic_get = rtl931x_traffic_get,
.l2_ctrl_1 = RTL931X_L2_AGE_CTRL,
.l2_port_aging_out = RTL931X_L2_PORT_AGE_CTRL,
.set_ageing_time = rtl931x_set_ageing_time,
- // .smi_poll_ctrl does not exist
+ /* .smi_poll_ctrl does not exist */
.l2_tbl_flush_ctrl = RTL931X_L2_TBL_FLUSH_CTRL,
.exec_tbl0_cmd = rtl931x_exec_tbl0_cmd,
.exec_tbl1_cmd = rtl931x_exec_tbl1_cmd,
.isr_glb_src = RTL931X_ISR_GLB_SRC,
.isr_port_link_sts_chg = RTL931X_ISR_PORT_LINK_STS_CHG,
.imr_port_link_sts_chg = RTL931X_IMR_PORT_LINK_STS_CHG,
- // imr_glb does not exist on RTL931X
+ /* imr_glb does not exist on RTL931X */
.vlan_tables_read = rtl931x_vlan_tables_read,
.vlan_set_tagged = rtl931x_vlan_set_tagged,
.vlan_set_untagged = rtl931x_vlan_set_untagged,
flow_rule_match_vlan(rule, &match);
flow->rule.itag = match.key->vlan_id;
flow->rule.itag_m = match.mask->vlan_id;
- // TODO: What about match.key->vlan_priority ?
+ /* TODO: What about match.key->vlan_priority? */
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
flow->rule.sport_m = match.mask->src;
}
- // TODO: ICMP
+ /* TODO: ICMP */
return 0;
}
case FLOW_ACTION_VLAN_PUSH:
pr_debug("%s: VLAN_PUSH\n", __func__);
-// TODO: act->vlan.proto
+/* TODO: act->vlan.proto */
flow->rule.ivid_act = PIE_ACT_VID_ASSIGN;
flow->rule.ivid_sel = true;
flow->rule.ivid_data = htons(act->vlan.vid);
goto out_free;
}
- rtl83xx_add_flow(priv, f, flow); // TODO: check error
+ rtl83xx_add_flow(priv, f, flow); /* TODO: check error */
- // Add log action to flow
+ /* Add log action to flow */
flow->rule.packet_cntr = rtl83xx_packet_cntr_alloc(priv);
if (flow->rule.packet_cntr >= 0) {
pr_debug("Using packet counter %d\n", flow->rule.packet_cntr);
flow->rule.last_packet_cnt = total_packets;
}
- // TODO: We need a second PIE rule to count the bytes
+ /* TODO: We need a second PIE rule to count the bytes */
flow_stats_update(&cls_flower->stats, 100 * new_packets, new_packets, 0, lastused,
FLOW_ACTION_HW_STATS_IMMEDIATE);
static void rtl838x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio)
{
- // cpu_tag[0] is reserved on the RTL83XX SoCs
- h->cpu_tag[1] = 0x0400; // BIT 10: RTL8380_CPU_TAG
- h->cpu_tag[2] = 0x0200; // Set only AS_DPM, to enable DPM settings below
+ /* cpu_tag[0] is reserved on the RTL83XX SoCs */
+ h->cpu_tag[1] = 0x0400; /* BIT 10: RTL8380_CPU_TAG */
+ h->cpu_tag[2] = 0x0200; /* Set only AS_DPM, to enable DPM settings below */
h->cpu_tag[3] = 0x0000;
h->cpu_tag[4] = BIT(dest_port) >> 16;
h->cpu_tag[5] = BIT(dest_port) & 0xffff;
static void rtl839x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio)
{
- // cpu_tag[0] is reserved on the RTL83XX SoCs
- h->cpu_tag[1] = 0x0100; // RTL8390_CPU_TAG marker
+ /* cpu_tag[0] is reserved on the RTL83XX SoCs */
+ h->cpu_tag[1] = 0x0100; /* RTL8390_CPU_TAG marker */
h->cpu_tag[2] = BIT(4); /* AS_DPM flag */
h->cpu_tag[3] = h->cpu_tag[4] = h->cpu_tag[5] = 0;
- // h->cpu_tag[1] |= BIT(1) | BIT(0); // Bypass filter 1/2
+ /* h->cpu_tag[1] |= BIT(1) | BIT(0); */ /* Bypass filter 1/2 */
if (dest_port >= 32) {
dest_port -= 32;
h->cpu_tag[2] |= (BIT(dest_port) >> 16) & 0xf;
static void rtl930x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio)
{
- h->cpu_tag[0] = 0x8000; // CPU tag marker
+ h->cpu_tag[0] = 0x8000; /* CPU tag marker */
h->cpu_tag[1] = h->cpu_tag[2] = 0;
h->cpu_tag[3] = 0;
h->cpu_tag[4] = 0;
static void rtl931x_create_tx_header(struct p_hdr *h, unsigned int dest_port, int prio)
{
- h->cpu_tag[0] = 0x8000; // CPU tag marker
+ h->cpu_tag[0] = 0x8000; /* CPU tag marker */
h->cpu_tag[1] = h->cpu_tag[2] = 0;
h->cpu_tag[3] = 0;
h->cpu_tag[4] = h->cpu_tag[5] = h->cpu_tag[6] = h->cpu_tag[7] = 0;
static void rtl93xx_header_vlan_set(struct p_hdr *h, int vlan)
{
- h->cpu_tag[2] |= BIT(4); // Enable VLAN forwarding offload
+ h->cpu_tag[2] |= BIT(4); /* Enable VLAN forwarding offload */
h->cpu_tag[2] |= (vlan >> 8) & 0xf;
h->cpu_tag[3] |= (vlan & 0xff) << 8;
}
*/
void rtl838x_update_cntr(int r, int released)
{
- // This feature is not available on RTL838x SoCs
+ /* This feature is not available on RTL838x SoCs */
}
void rtl839x_update_cntr(int r, int released)
{
- // This feature is not available on RTL839x SoCs
+ /* This feature is not available on RTL839x SoCs */
}
void rtl930x_update_cntr(int r, int released)
t->crc_error = t->reason == 13;
pr_debug("Reason: %d\n", t->reason);
- if (t->reason != 6) // NIC_RX_REASON_SPECIAL_TRAP
+ if (t->reason != 6) /* NIC_RX_REASON_SPECIAL_TRAP */
t->l2_offloaded = 1;
else
t->l2_offloaded = 0;
t->crc_error = h->cpu_tag[4] & BIT(6);
pr_debug("Reason: %d\n", t->reason);
- if ((t->reason >= 7 && t->reason <= 13) || // NIC_RX_REASON_RMA
- (t->reason >= 23 && t->reason <= 25)) // NIC_RX_REASON_SPECIAL_TRAP
+ if ((t->reason >= 7 && t->reason <= 13) || /* NIC_RX_REASON_RMA */
+ (t->reason >= 23 && t->reason <= 25)) /* NIC_RX_REASON_SPECIAL_TRAP */
t->l2_offloaded = 0;
else
t->l2_offloaded = 1;
if (t->reason != 63)
pr_info("%s: Reason %d, port %d, queue %d\n", __func__, t->reason, t->port, t->queue);
- if (t->reason >= 19 && t->reason <= 27) // NIC_RX_REASON_RMA
+ if (t->reason >= 19 && t->reason <= 27) /* NIC_RX_REASON_RMA */
t->l2_offloaded = 0;
else
t->l2_offloaded = 1;
/* Setup Head of Line */
if (priv->family_id == RTL8380_FAMILY_ID)
- sw_w32(0, RTL838X_DMA_IF_RX_RING_SIZE); // Disabled on RTL8380
+ sw_w32(0, RTL838X_DMA_IF_RX_RING_SIZE); /* Disabled on RTL8380 */
if (priv->family_id == RTL8390_FAMILY_ID)
sw_w32(0xffffffff, RTL839X_DMA_IF_RX_RING_CNTR);
if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) {
sw_w32(0x0000c808, priv->r->dma_if_ctrl);
/* Enable Notify, RX done, RX overflow and TX done interrupts */
- sw_w32(0x007fffff, priv->r->dma_if_intr_msk); // Notify IRQ!
+ sw_w32(0x007fffff, priv->r->dma_if_intr_msk); /* Notify IRQ! */
/* Enable DMA */
sw_w32_mask(0, RX_EN | TX_EN, priv->r->dma_if_ctrl);
sw_w32_mask(0x0, 0x3 | BIT(3), priv->r->mac_port_ctrl(priv->cpu_port));
/* CPU port joins Lookup Miss Flooding Portmask */
- // TODO: The code below should also work for the RTL838x
+ /* TODO: The code below should also work for the RTL838x */
sw_w32(0x28000, RTL839X_TBL_ACCESS_L2_CTRL);
sw_w32_mask(0, 0x80000000, RTL839X_TBL_ACCESS_L2_DATA(0));
sw_w32(0x38000, RTL839X_TBL_ACCESS_L2_CTRL);
pos = (i % 3) * 10;
sw_w32_mask(0x3ff << pos, priv->rxringlen << pos, priv->r->dma_if_rx_ring_size(i));
- // Some SoCs have issues with missing underflow protection
+ /* Some SoCs have issues with missing underflow protection */
v = (sw_r32(priv->r->dma_if_rx_ring_cntr(i)) >> pos) & 0x3ff;
sw_w32_mask(0x3ff << pos, v, priv->r->dma_if_rx_ring_cntr(i));
}
sw_w32_mask(0x3ff << 2, 100 << 2, RTL839X_L2_NOTIFICATION_CTRL);
/* Setup notification events */
- sw_w32_mask(0, 1 << 14, RTL839X_L2_CTRL_0); // RTL8390_L2_CTRL_0_FLUSH_NOTIFY_EN
- sw_w32_mask(0, 1 << 12, RTL839X_L2_NOTIFICATION_CTRL); // SUSPEND_NOTIFICATION_EN
+ sw_w32_mask(0, 1 << 14, RTL839X_L2_CTRL_0); /* RTL8390_L2_CTRL_0_FLUSH_NOTIFY_EN */
+ sw_w32_mask(0, 1 << 12, RTL839X_L2_NOTIFICATION_CTRL); /* SUSPEND_NOTIFICATION_EN
/* Enable Notification */
sw_w32_mask(0, 1 << 0, RTL839X_L2_NOTIFICATION_CTRL);
case RTL8390_FAMILY_ID:
rtl839x_hw_en_rxtx(priv);
- // Trap MLD and IGMP messages to CPU_PORT
+ /* Trap MLD and IGMP messages to CPU_PORT */
sw_w32(0x3, RTL839X_SPCL_TRAP_IGMP_CTRL);
/* Flush learned FDB entries on link down of a port */
sw_w32_mask(0, BIT(7), RTL839X_L2_CTRL_0);
rtl93xx_hw_en_rxtx(priv);
/* Flush learned FDB entries on link down of a port */
sw_w32_mask(0, BIT(7), RTL930X_L2_CTRL);
- // Trap MLD and IGMP messages to CPU_PORT
+ /* Trap MLD and IGMP messages to CPU_PORT */
sw_w32((0x2 << 3) | 0x2, RTL930X_VLAN_APP_PKT_CTRL);
break;
case RTL9310_FAMILY_ID:
rtl93xx_hw_en_rxtx(priv);
- // Trap MLD and IGMP messages to CPU_PORT
+ /* Trap MLD and IGMP messages to CPU_PORT */
sw_w32((0x2 << 3) | 0x2, RTL931X_VLAN_APP_PKT_CTRL);
- // Disable External CPU access to switch, clear EXT_CPU_EN
+ /* Disable External CPU access to switch, clear EXT_CPU_EN */
sw_w32_mask(BIT(2), 0, RTL931X_MAC_L2_GLOBAL_CTRL2);
- // Set PCIE_PWR_DOWN
+ /* Set PCIE_PWR_DOWN */
sw_w32_mask(0, BIT(1), RTL931X_PS_SOC_CTRL);
break;
}
u32 clear_irq = priv->family_id == RTL8380_FAMILY_ID ? 0x000fffff : 0x007fffff;
int i;
- // Disable RX/TX from/to CPU-port
+ /* Disable RX/TX from/to CPU-port */
sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(priv->cpu_port));
/* Disable traffic */
sw_w32_mask(RX_EN_93XX | TX_EN_93XX, 0, priv->r->dma_if_ctrl);
else
sw_w32_mask(RX_EN | TX_EN, 0, priv->r->dma_if_ctrl);
- mdelay(200); // Test, whether this is needed
+ mdelay(200); /* Test, whether this is needed */
/* Block all ports */
if (priv->family_id == RTL8380_FAMILY_ID) {
do { } while (sw_r32(priv->r->l2_tbl_flush_ctrl) & (1 << 28));
}
}
- // TODO: L2 flush register is 64 bit on RTL931X and 930X
+ /* TODO: L2 flush register is 64 bit on RTL931X and 930X */
/* CPU-Port: Link down */
if (priv->family_id == RTL8380_FAMILY_ID || priv->family_id == RTL8390_FAMILY_ID)
int dest_port = -1;
int q = skb_get_queue_mapping(skb) % TXRINGS;
- if (q) // Check for high prio queue
+ if (q) /* Check for high prio queue */
pr_debug("SKB priority: %d\n", skb->priority);
spin_lock_irqsave(&priv->lock, flags);
len -= 4;
}
- len += 4; // Add space for CRC
+ len += 4; /* Add space for CRC */
if (skb_padto(skb, len)) {
ret = NETDEV_TX_OK;
h = &ring->tx_header[q][ring->c_tx[q]];
h->size = len;
h->len = len;
- // On RTL8380 SoCs, small packet lengths being sent need adjustments
+ /* On RTL8380 SoCs, small packet lengths being sent need adjustments */
if (priv->family_id == RTL8380_FAMILY_ID) {
if (len < ETH_ZLEN - 4)
h->len -= 4;
/* Hand over to switch */
ring->tx_r[q][ring->c_tx[q]] |= 1;
- // Before starting TX, prevent a Lextra bus bug on RTL8380 SoCs
+ /* Before starting TX, prevent a Lextra bus bug on RTL8380 SoCs */
if (priv->family_id == RTL8380_FAMILY_ID) {
for (i = 0; i < 10; i++) {
val = sw_r32(priv->r->dma_if_ctrl);
/* Tell switch to send data */
if (priv->family_id == RTL9310_FAMILY_ID || priv->family_id == RTL9300_FAMILY_ID) {
- // Ring ID q == 0: Low priority, Ring ID = 1: High prio queue
+ /* Ring ID q == 0: Low priority, Ring ID = 1: High prio queue */
if (!q)
sw_w32_mask(0, BIT(2), priv->r->dma_if_ctrl);
else
netif_receive_skb_list(&rx_list);
- // Update counters
+ /* Update counters */
priv->r->update_cntr(r, 0);
spin_unlock_irqrestore(&priv->lock, flags);
static int rtl8390_init_mac(struct rtl838x_eth_priv *priv)
{
- // We will need to set-up EEE and the egress-rate limitation
+ /* We will need to set-up EEE and the egress-rate limitation */
return 0;
}
/* Enable PHY control via SoC */
sw_w32_mask(0, 1 << 15, RTL838X_SMI_GLB_CTRL);
- // Probably should reset all PHYs here...
+ /* Probably should reset all PHYs here... */
return 0;
}
/* Disable PHY polling via SoC */
sw_w32_mask(1 << 7, 0, RTL839X_SMI_GLB_CTRL);
- // Probably should reset all PHYs here...
+ /* Probably should reset all PHYs here... */
return 0;
}
u32 poll_ctrl = 0;
u32 private_poll_mask = 0;
u32 v;
- bool uses_usxgmii = false; // For the Aquantia PHYs
- bool uses_hisgmii = false; // For the RTL8221/8226
+ bool uses_usxgmii = false; /* For the Aquantia PHYs */
+ bool uses_hisgmii = false; /* For the RTL8221/8226 */
- // Mapping of port to phy-addresses on an SMI bus
+ /* Mapping of port to phy-addresses on an SMI bus */
poll_sel[0] = poll_sel[1] = 0;
for (i = 0; i < RTL930X_CPU_PORT; i++) {
if (priv->smi_bus[i] > 3)
poll_ctrl |= BIT(20 + priv->smi_bus[i]);
}
- // Configure which SMI bus is behind which port number
+ /* Configure which SMI bus is behind which port number */
sw_w32(poll_sel[0], RTL930X_SMI_PORT0_15_POLLING_SEL);
sw_w32(poll_sel[1], RTL930X_SMI_PORT16_27_POLLING_SEL);
- // Disable POLL_SEL for any SMI bus with a normal PHY (not RTL8295R for SFP+)
+ /* Disable POLL_SEL for any SMI bus with a normal PHY (not RTL8295R for SFP+) */
sw_w32_mask(poll_ctrl, 0, RTL930X_SMI_GLB_CTRL);
- // Configure which SMI busses are polled in c45 based on a c45 PHY being on that bus
+ /* Configure which SMI busses are polled in c45 based on a c45 PHY being on that bus */
for (i = 0; i < 4; i++)
if (priv->smi_bus_isc45[i])
c45_mask |= BIT(i + 16);
pr_info("c45_mask: %08x\n", c45_mask);
sw_w32_mask(0, c45_mask, RTL930X_SMI_GLB_CTRL);
- // Set the MAC type of each port according to the PHY-interface
- // Values are FE: 2, GE: 3, XGE/2.5G: 0(SERDES) or 1(otherwise), SXGE: 0
+ /* Set the MAC type of each port according to the PHY-interface */
+ /* Values are FE: 2, GE: 3, XGE/2.5G: 0(SERDES) or 1(otherwise), SXGE: 0 */
v = 0;
for (i = 0; i < RTL930X_CPU_PORT; i++) {
switch (priv->interfaces[i]) {
case PHY_INTERFACE_MODE_10GBASER:
- break; // Serdes: Value = 0
+ break; /* Serdes: Value = 0 */
case PHY_INTERFACE_MODE_HSGMII:
private_poll_mask |= BIT(i);
- // fallthrough
+ /* fallthrough */
case PHY_INTERFACE_MODE_USXGMII:
v |= BIT(mac_type_bit[i]);
uses_usxgmii = true;
}
sw_w32(v, RTL930X_SMI_MAC_TYPE_CTRL);
- // Set the private polling mask for all Realtek PHYs (i.e. not the 10GBit Aquantia ones)
+ /* Set the private polling mask for all Realtek PHYs (i.e. not the 10GBit Aquantia ones) */
sw_w32(private_poll_mask, RTL930X_SMI_PRVTE_POLLING_CTRL);
/* The following magic values are found in the port configuration, they seem to
bool mdc_on[4];
pr_info("%s called\n", __func__);
- // Disable port polling for configuration purposes
+ /* Disable port polling for configuration purposes */
sw_w32(0, RTL931X_SMI_PORT_POLLING_CTRL);
sw_w32(0, RTL931X_SMI_PORT_POLLING_CTRL + 4);
msleep(100);
mdc_on[0] = mdc_on[1] = mdc_on[2] = mdc_on[3] = false;
- // Mapping of port to phy-addresses on an SMI bus
+ /* Mapping of port to phy-addresses on an SMI bus */
poll_sel[0] = poll_sel[1] = poll_sel[2] = poll_sel[3] = 0;
for (i = 0; i < 56; i++) {
pos = (i % 6) * 5;
mdc_on[priv->smi_bus[i]] = true;
}
- // Configure which SMI bus is behind which port number
+ /* Configure which SMI bus is behind which port number */
for (i = 0; i < 4; i++) {
pr_info("poll sel %d, %08x\n", i, poll_sel[i]);
sw_w32(poll_sel[i], RTL931X_SMI_PORT_POLLING_SEL + (i * 4));
}
- // Configure which SMI busses
+ /* Configure which SMI busses */
pr_info("%s: WAS RTL931X_MAC_L2_GLOBAL_CTRL2 %08x\n", __func__, sw_r32(RTL931X_MAC_L2_GLOBAL_CTRL2));
pr_info("c45_mask: %08x, RTL931X_SMI_GLB_CTRL0 was %X", c45_mask, sw_r32(RTL931X_SMI_GLB_CTRL0));
for (i = 0; i < 4; i++) {
- // bus is polled in c45
+ /* bus is polled in c45 */
if (priv->smi_bus_isc45[i])
- c45_mask |= 0x2 << (i * 2); // Std. C45, non-standard is 0x3
- // Enable bus access via MDC
+ c45_mask |= 0x2 << (i * 2); /* Std. C45, non-standard is 0x3 */
+ /* Enable bus access via MDC */
if (mdc_on[i])
sw_w32_mask(0, BIT(9 + i), RTL931X_MAC_L2_GLOBAL_CTRL2);
}
{
pr_info("In %s\n", __func__);
- // Initialize Encapsulation memory and wait until finished
+ /* Initialize Encapsulation memory and wait until finished */
sw_w32(0x1, RTL931X_MEM_ENCAP_INIT);
do { } while (sw_r32(RTL931X_MEM_ENCAP_INIT) & 1);
pr_info("%s: init ENCAP done\n", __func__);
- // Initialize Managemen Information Base memory and wait until finished
+ /* Initialize Managemen Information Base memory and wait until finished */
sw_w32(0x1, RTL931X_MEM_MIB_INIT);
do { } while (sw_r32(RTL931X_MEM_MIB_INIT) & 1);
pr_info("%s: init MIB done\n", __func__);
- // Initialize ACL (PIE) memory and wait until finished
+ /* Initialize ACL (PIE) memory and wait until finished */
sw_w32(0x1, RTL931X_MEM_ACL_INIT);
do { } while (sw_r32(RTL931X_MEM_ACL_INIT) & 1);
pr_info("%s: init ACL done\n", __func__);
- // Initialize ALE memory and wait until finished
+ /* Initialize ALE memory and wait until finished */
sw_w32(0xFFFFFFFF, RTL931X_MEM_ALE_INIT_0);
do { } while (sw_r32(RTL931X_MEM_ALE_INIT_0));
sw_w32(0x7F, RTL931X_MEM_ALE_INIT_1);
do { } while (sw_r32(RTL931X_MEM_ALE_INIT_2) & 0x7ff);
pr_info("%s: init ALE done\n", __func__);
- // Enable ESD auto recovery
+ /* Enable ESD auto recovery */
sw_w32(0x1, RTL931X_MDX_CTRL_RSVD);
- // Init SPI, is this for thermal control or what?
+ /* Init SPI, is this for thermal control or what? */
sw_w32_mask(0x7 << 11, 0x2 << 11, RTL931X_SPI_CTRL0);
return 0;
goto err_free;
}
- // Allocate ring-buffer space at the end of the allocated memory
+ /* Allocate ring-buffer space at the end of the allocated memory */
ring = priv->membase;
ring->rx_space = priv->membase + sizeof(struct ring_b) + sizeof(struct notify_b);
#define RTL930X_MAC_LINK_DUP_STS (0xCB28)
#define RTL931X_MAC_LINK_DUP_STS (0x0ef0)
-// TODO: RTL8390_MAC_LINK_MEDIA_STS_ADDR ???
+/* TODO: RTL8390_MAC_LINK_MEDIA_STS_ADDR??? */
#define RTL838X_MAC_TX_PAUSE_STS (0xa1a0)
#define RTL839X_MAC_TX_PAUSE_STS (0x03b8)
phy_modify(phydev, 0, BIT(15), BIT(15));
}
-// The access registers for SDS_MODE_SEL and the LSB for each SDS within
+/* The access registers for SDS_MODE_SEL and the LSB for each SDS within */
u16 rtl9300_sds_regs[] = { 0x0194, 0x0194, 0x0194, 0x0194, 0x02a0, 0x02a0, 0x02a0, 0x02a0,
0x02A4, 0x02A4, 0x0198, 0x0198 };
u8 rtl9300_sds_lsb[] = { 0, 6, 12, 18, 0, 6, 12, 18, 0, 6, 0, 6};
int ret = 0, i;
u32 val;
-// TODO: ret = genphy_read_status(phydev);
-// if (ret < 0) {
-// pr_info("%s: genphy_read_status failed\n", __func__);
-// return ret;
-// }
+/* TODO: ret = genphy_read_status(phydev);
+ * if (ret < 0) {
+ * pr_info("%s: genphy_read_status failed\n", __func__);
+ * return ret;
+ * }
+ */
- // Link status must be read twice
+ /* Link status must be read twice */
for (i = 0; i < 2; i++)
val = phy_read_mmd(phydev, MMD_VEND2, 0xA402);
if (!phydev->link)
goto out;
- // Read duplex status
+ /* Read duplex status */
val = phy_read_mmd(phydev, MMD_VEND2, 0xA434);
if (val < 0)
goto out;
phydev->duplex = !!(val & BIT(3));
- // Read speed
+ /* Read speed */
val = phy_read_mmd(phydev, MMD_VEND2, 0xA434);
switch (val & 0x0630) {
case 0x0000:
if (v < 0)
goto out;
- v |= BIT(5); // HD 10M
- v |= BIT(6); // FD 10M
- v |= BIT(7); // HD 100M
- v |= BIT(8); // FD 100M
+ v |= BIT(5); /* HD 10M */
+ v |= BIT(6); /* FD 10M */
+ v |= BIT(7); /* HD 100M */
+ v |= BIT(8); /* FD 100M */
ret = phy_write_mmd(phydev, MMD_AN, 16, v);
- // Allow 1GBit
+ /* Allow 1GBit */
v = phy_read_mmd(phydev, MMD_VEND2, 0xA412);
if (v < 0)
goto out;
- v |= BIT(9); // FD 1000M
+ v |= BIT(9); /* FD 1000M */
ret = phy_write_mmd(phydev, MMD_VEND2, 0xA412, v);
if (ret < 0)
goto out;
- // Allow 2.5G
+ /* Allow 2.5G */
v = phy_read_mmd(phydev, MMD_AN, 32);
if (v < 0)
goto out;
ret = rtl8226_advertise_aneg(phydev);
if (ret)
goto out;
- // AutoNegotiationEnable
+ /* AutoNegotiationEnable */
v = phy_read_mmd(phydev, MMD_AN, 0);
if (v < 0)
goto out;
- v |= BIT(12); // Enable AN
+ v |= BIT(12); /* Enable AN */
ret = phy_write_mmd(phydev, MMD_AN, 0, v);
if (ret < 0)
goto out;
- // RestartAutoNegotiation
+ /* RestartAutoNegotiation */
v = phy_read_mmd(phydev, MMD_VEND2, 0xA400);
if (v < 0)
goto out;
ret = phy_write_mmd(phydev, MMD_VEND2, 0xA400, v);
}
-// TODO: ret = __genphy_config_aneg(phydev, ret);
+/* TODO: ret = __genphy_config_aneg(phydev, ret); */
out:
return ret;
poll_state = disable_polling(port);
- // Remember aneg state
+ /* Remember aneg state */
val = phy_read_mmd(phydev, MMD_AN, 0);
an_enabled = !!(val & BIT(12));
- // Setup 100/1000MBit
+ /* Setup 100/1000MBit */
val = phy_read_mmd(phydev, MMD_AN, 60);
if (e->eee_enabled)
val |= 0x6;
val &= 0x6;
phy_write_mmd(phydev, MMD_AN, 60, val);
- // Setup 2.5GBit
+ /* Setup 2.5GBit */
val = phy_read_mmd(phydev, MMD_AN, 62);
if (e->eee_enabled)
val |= 0x1;
val &= 0x1;
phy_write_mmd(phydev, MMD_AN, 62, val);
- // RestartAutoNegotiation
+ /* RestartAutoNegotiation */
val = phy_read_mmd(phydev, MMD_VEND2, 0xA400);
val |= BIT(9);
phy_write_mmd(phydev, MMD_VEND2, 0xA400, val);
val = phy_read_paged(phydev, 7, 60);
if (e->eee_enabled) {
- // Verify vs MAC-based EEE
+ /* Verify vs MAC-based EEE */
e->eee_enabled = !!(val & BIT(7));
if (!e->eee_enabled) {
val = phy_read_paged(phydev, RTL821X_PAGE_MAC, 25);
/* Set GPHY page to copper */
phy_write_paged(phydev, RTL821X_PAGE_GPHY, RTL821XINT_MEDIA_PAGE_SELECT, RTL821X_MEDIA_PAGE_COPPER);
- // Get auto-negotiation status
+ /* Get auto-negotiation status */
val = phy_read(phydev, 0);
an_enabled = val & BIT(12);
pr_info("%s: aneg: %d\n", __func__, an_enabled);
val = phy_read_paged(phydev, RTL821X_PAGE_MAC, 25);
- val &= ~BIT(5); // Use MAC-based EEE
+ val &= ~BIT(5); /* Use MAC-based EEE */
phy_write_paged(phydev, RTL821X_PAGE_MAC, 25, val);
/* Enable 100M (bit 1) / 1000M (bit 2) EEE */
mode = rtl9300_sds_mode_get(sds_num);
pr_info("%s got SDS mode %02x\n", __func__, mode);
- if (mode == 0x1a) { // 10GR mode
+ if (mode == 0x1a) { /* 10GR mode */
status = rtl9300_sds_field_r(sds_num, 0x5, 0, 12, 12);
latch_status = rtl9300_sds_field_r(sds_num, 0x4, 1, 2, 2);
status |= rtl9300_sds_field_r(sds_num, 0x5, 0, 12, 12);
void rtl930x_sds_rx_rst(int sds_num, phy_interface_t phy_if)
{
- int page = 0x2e; // 10GR and USXGMII
+ int page = 0x2e; /* 10GR and USXGMII */
if (phy_if == PHY_INTERFACE_MODE_1000BASEX)
page = 0x24;
case PHY_INTERFACE_MODE_HSGMII:
sds_mode = 0x12;
lc_value = 0x3;
- // Configure LC
+ /* Configure LC */
break;
case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_2500BASEX:
sds_mode = 0x16;
lc_value = 0x3;
- // Configure LC
+ /* Configure LC */
break;
case PHY_INTERFACE_MODE_10GBASER:
break;
case PHY_INTERFACE_MODE_NA:
- // This will disable SerDes
+ /* This will disable SerDes */
sds_mode = 0x1f;
break;
}
pr_info("%s --------------------- serdes %d forcing to %x ...\n", __func__, sds, sds_mode);
- // Power down SerDes
+ /* Power down SerDes */
rtl9300_sds_field_w(sds, 0x20, 0, 7, 6, 0x3);
if (sds == 5) pr_info("%s after %x\n", __func__, rtl930x_read_sds_phy(sds, 0x20, 0));
if (sds == 5) pr_info("%s a %x\n", __func__, rtl930x_read_sds_phy(sds, 0x1f, 9));
- // Force mode enable
+ /* Force mode enable */
rtl9300_sds_field_w(sds, 0x1f, 9, 6, 6, 0x1);
if (sds == 5) pr_info("%s b %x\n", __func__, rtl930x_read_sds_phy(sds, 0x1f, 9));
return;
if (sds == 5) pr_info("%s c %x\n", __func__, rtl930x_read_sds_phy(sds, 0x20, 18));
- // Enable LC and ring
+ /* Enable LC and ring */
rtl9300_sds_field_w(lane_0, 0x20, 18, 3, 0, 0xf);
if (sds == lane_0)
else
rtl9300_sds_field_w(lane_0, 0x20, 18, 15, 12, lc_value);
- // Force analog LC & ring on
+ /* Force analog LC & ring on */
rtl9300_sds_field_w(lane_0, 0x21, 11, 3, 0, 0xf);
v = lc_on ? 0x3 : 0x1;
else
rtl9300_sds_field_w(lane_0, 0x20, 18, 7, 6, v);
- // Force SerDes mode
+ /* Force SerDes mode */
rtl9300_sds_field_w(sds, 0x1f, 9, 6, 6, 1);
rtl9300_sds_field_w(sds, 0x1f, 9, 11, 7, sds_mode);
- // Toggle LC or Ring
+ /* Toggle LC or Ring */
for (i = 0; i < 20; i++) {
mdelay(200);
t = rtl9300_sds_field_r(sds, 0x6, 0x1, 2, 2);
rtl9300_sds_field_w(sds, 0x6, 0x1, 2, 2, 0x1);
- // Reset FSM
+ /* Reset FSM */
rtl9300_sds_field_w(sds, 0x6, 0x2, 12, 12, 0x1);
mdelay(10);
rtl9300_sds_field_w(sds, 0x6, 0x2, 12, 12, 0x0);
mdelay(10);
- // Need to read this twice
+ /* Need to read this twice */
v = rtl9300_sds_field_r(sds, 0x5, 0, 12, 12);
v = rtl9300_sds_field_r(sds, 0x5, 0, 12, 12);
rtl9300_sds_field_w(sds, 0x6, 0x1, 2, 2, t);
- // Reset FSM again
+ /* Reset FSM again */
rtl9300_sds_field_w(sds, 0x6, 0x2, 12, 12, 0x1);
mdelay(10);
rtl9300_sds_field_w(sds, 0x6, 0x2, 12, 12, 0x0);
rtl930x_sds_rx_rst(sds, phy_if);
- // Re-enable power
+ /* Re-enable power */
rtl9300_sds_field_w(sds, 0x20, 0, 7, 6, 0);
pr_info("%s --------------------- serdes %d forced to %x DONE\n", __func__, sds, sds_mode);
void rtl9300_sds_tx_config(int sds, phy_interface_t phy_if)
{
- // parameters: rtl9303_80G_txParam_s2
+ /* parameters: rtl9303_80G_txParam_s2 */
int impedance = 0x8;
int pre_amp = 0x2;
int main_amp = 0x9;
{
u32 v10, v1;
- v10 = rtl930x_read_sds_phy(sds, 6, 2); // 10GBit, page 6, reg 2
- v1 = rtl930x_read_sds_phy(sds, 0, 0); // 1GBit, page 0, reg 0
+ v10 = rtl930x_read_sds_phy(sds, 6, 2); /* 10GBit, page 6, reg 2 */
+ v1 = rtl930x_read_sds_phy(sds, 0, 0); /* 1GBit, page 0, reg 0 */
pr_info("%s: registers before %08x %08x\n", __func__, v10, v1);
v10 &= ~(BIT(13) | BIT(14));
else
rtl930x_write_sds_phy(sds_num - 1, 0x1f, 0x2, 0x31);
- // ##Page0x2E, Reg0x15[9], REG0_RX_EN_TEST=[1]
+ /* ##Page0x2E, Reg0x15[9], REG0_RX_EN_TEST=[1] */
rtl9300_sds_field_w(sds_num, 0x2e, 0x15, 9, 9, 0x1);
- // ##Page0x21, Reg0x06[11 6], REG0_RX_DEBUG_SEL=[1 0 x x x x]
+ /* ##Page0x21, Reg0x06[11 6], REG0_RX_DEBUG_SEL=[1 0 x x x x] */
rtl9300_sds_field_w(sds_num, 0x21, 0x06, 11, 6, 0x20);
switch(dcvs_id) {
rtl9300_sds_field_w(sds_num, 0x2f, 0x0c, 5, 0, 0x22);
mdelay(1);
- // ##DCVS0 Read Out
+ /* ##DCVS0 Read Out */
dcvs_sign_out = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 4, 4);
dcvs_coef_bin = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 3, 0);
dcvs_manual = !!rtl9300_sds_field_r(sds_num, 0x2e, 0x1e, 14, 14);
rtl9300_sds_field_w(sds_num, 0x2f, 0x0c, 5, 0, 0x23);
mdelay(1);
- // ##DCVS0 Read Out
+ /* ##DCVS0 Read Out */
dcvs_coef_bin = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 4, 4);
dcvs_coef_bin = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 3, 0);
dcvs_manual = !!rtl9300_sds_field_r(sds_num, 0x2e, 0x1e, 13, 13);
rtl9300_sds_field_w(sds_num, 0x2f, 0x0c, 5, 0, 0x24);
mdelay(1);
- // ##DCVS0 Read Out
+ /* ##DCVS0 Read Out */
dcvs_sign_out = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 4, 4);
dcvs_coef_bin = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 3, 0);
dcvs_manual = !!rtl9300_sds_field_r(sds_num, 0x2e, 0x1e, 12, 12);
rtl9300_sds_field_w(sds_num, 0x2f, 0x0c, 5, 0, 0x25);
mdelay(1);
- // ##DCVS0 Read Out
+ /* ##DCVS0 Read Out */
dcvs_sign_out = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 4, 4);
dcvs_coef_bin = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 3, 0);
dcvs_manual = rtl9300_sds_field_r(sds_num, 0x2e, 0x1e, 11, 11);
rtl9300_sds_field_w(sds_num, 0x2f, 0x0c, 5, 0, 0x2c);
mdelay(1);
- // ##DCVS0 Read Out
+ /* ##DCVS0 Read Out */
dcvs_sign_out = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 4, 4);
dcvs_coef_bin = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 3, 0);
dcvs_manual = !!rtl9300_sds_field_r(sds_num, 0x2e, 0x01, 15, 15);
rtl9300_sds_field_w(sds_num, 0x2f, 0x0c, 5, 0, 0x2d);
mdelay(1);
- // ##DCVS0 Read Out
+ /* ##DCVS0 Read Out */
dcvs_sign_out = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 4, 4);
dcvs_coef_bin = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 3, 0);
dcvs_manual = rtl9300_sds_field_r(sds_num, 0x2e, 0x02, 11, 11);
else
rtl930x_write_sds_phy(sds_num - 1, 0x1f, 0x2, 0x31);
- // ##Page0x2E, Reg0x15[9], REG0_RX_EN_TEST=[1]
+ /* ##Page0x2E, Reg0x15[9], REG0_RX_EN_TEST=[1] */
rtl9300_sds_field_w(sds_num, 0x2e, 0x15, 9, 9, 0x1);
- // ##Page0x21, Reg0x06[11 6], REG0_RX_DEBUG_SEL=[0 1 x x x x]
+ /* ##Page0x21, Reg0x06[11 6], REG0_RX_DEBUG_SEL=[0 1 x x x x] */
rtl9300_sds_field_w(sds_num, 0x21, 0x06, 11, 6, 0x10);
mdelay(1);
- // ##LEQ Read Out
+ /* ##LEQ Read Out */
leq_gray = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 7, 3);
leq_manual = !!rtl9300_sds_field_r(sds_num, 0x2e, 0x18, 15, 15);
leq_bin = rtl9300_sds_rxcal_gray_to_binary(leq_gray);
{
u32 vth_manual;
- //##Page0x1F, Reg0x02[15 0], REG_DBGO_SEL=[0x002F]; //Lane0
- //##Page0x1F, Reg0x02[15 0], REG_DBGO_SEL=[0x0031]; //Lane1
+ /* ##Page0x1F, Reg0x02[15 0], REG_DBGO_SEL=[0x002F]; */ /* Lane0 */
+ /* ##Page0x1F, Reg0x02[15 0], REG_DBGO_SEL=[0x0031]; */ /* Lane1 */
if (!(sds_num % 2))
rtl930x_write_sds_phy(sds_num, 0x1f, 0x2, 0x2f);
else
rtl930x_write_sds_phy(sds_num - 1, 0x1f, 0x2, 0x31);
- //##Page0x2E, Reg0x15[9], REG0_RX_EN_TEST=[1]
+ /* ##Page0x2E, Reg0x15[9], REG0_RX_EN_TEST=[1] */
rtl9300_sds_field_w(sds_num, 0x2e, 0x15, 9, 9, 0x1);
- //##Page0x21, Reg0x06[11 6], REG0_RX_DEBUG_SEL=[1 0 x x x x]
+ /* ##Page0x21, Reg0x06[11 6], REG0_RX_DEBUG_SEL=[1 0 x x x x] */
rtl9300_sds_field_w(sds_num, 0x21, 0x06, 11, 6, 0x20);
- //##Page0x2F, Reg0x0C[5 0], REG0_COEF_SEL=[0 0 1 1 0 0]
+ /* ##Page0x2F, Reg0x0C[5 0], REG0_COEF_SEL=[0 0 1 1 0 0] */
rtl9300_sds_field_w(sds_num, 0x2f, 0x0c, 5, 0, 0xc);
mdelay(1);
- //##VthP & VthN Read Out
- vth_list[0] = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 2, 0); // v_thp set bin
- vth_list[1] = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 5, 3); // v_thn set bin
+ /* ##VthP & VthN Read Out */
+ vth_list[0] = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 2, 0); /* v_thp set bin */
+ vth_list[1] = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 5, 3); /* v_thn set bin */
pr_info("vth_set_bin = %d", vth_list[0]);
pr_info("vth_set_bin = %d", vth_list[1]);
if (manual) {
switch(tap_id) {
case 0:
- //##REG0_LOAD_IN_INIT[0]=1; REG0_TAP0_INIT[5:0]=Tap0_Value
+ /* ##REG0_LOAD_IN_INIT[0]=1; REG0_TAP0_INIT[5:0]=Tap0_Value */
rtl9300_sds_field_w(sds_num, 0x2e, 0x0f, tap_id + 7, tap_id + 7, 0x1);
rtl9300_sds_field_w(sds_num, 0x2f, 0x03, 5, 5, tap_list[0]);
rtl9300_sds_field_w(sds_num, 0x2f, 0x03, 4, 0, tap_list[1]);
else
rtl930x_write_sds_phy(sds_num - 1, 0x1f, 0x2, 0x31);
- //##Page0x2E, Reg0x15[9], REG0_RX_EN_TEST=[1]
+ /* ##Page0x2E, Reg0x15[9], REG0_RX_EN_TEST=[1] */
rtl9300_sds_field_w(sds_num, 0x2e, 0x15, 9, 9, 0x1);
- //##Page0x21, Reg0x06[11 6], REG0_RX_DEBUG_SEL=[1 0 x x x x]
+ /* ##Page0x21, Reg0x06[11 6], REG0_RX_DEBUG_SEL=[1 0 x x x x] */
rtl9300_sds_field_w(sds_num, 0x21, 0x06, 11, 6, 0x20);
if (!tap_id) {
- //##Page0x2F, Reg0x0C[5 0], REG0_COEF_SEL=[0 0 0 0 0 1]
+ /* ##Page0x2F, Reg0x0C[5 0], REG0_COEF_SEL=[0 0 0 0 0 1] */
rtl9300_sds_field_w(sds_num, 0x2f, 0x0c, 5, 0, 0);
- //##Tap1 Even Read Out
+ /* ##Tap1 Even Read Out */
mdelay(1);
tap0_sign_out = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 5, 5);
tap0_coef_bin = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 4, 0);
tap_manual = !!rtl9300_sds_field_r(sds_num, 0x2e, 0x0f, 7, 7);
pr_info("tap0 manual = %u",tap_manual);
} else {
- //##Page0x2F, Reg0x0C[5 0], REG0_COEF_SEL=[0 0 0 0 0 1]
+ /* ##Page0x2F, Reg0x0C[5 0], REG0_COEF_SEL=[0 0 0 0 0 1] */
rtl9300_sds_field_w(sds_num, 0x2f, 0x0c, 5, 0, tap_id);
mdelay(1);
- //##Tap1 Even Read Out
+ /* ##Tap1 Even Read Out */
tap_sign_out_even = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 5, 5);
tap_coef_bin_even = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 4, 0);
- //##Page0x2F, Reg0x0C[5 0], REG0_COEF_SEL=[0 0 0 1 1 0]
+ /* ##Page0x2F, Reg0x0C[5 0], REG0_COEF_SEL=[0 0 0 1 1 0] */
rtl9300_sds_field_w(sds_num, 0x2f, 0x0c, 5, 0, (tap_id + 5));
- //##Tap1 Odd Read Out
+ /* ##Tap1 Odd Read Out */
tap_sign_out_odd = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 5, 5);
tap_coef_bin_odd = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 4, 0);
void rtl9300_do_rx_calibration_1(int sds, phy_interface_t phy_mode)
{
- // From both rtl9300_rxCaliConf_serdes_myParam and rtl9300_rxCaliConf_phy_myParam
- int tap0_init_val = 0x1f; // Initial Decision Fed Equalizer 0 tap
+ /* From both rtl9300_rxCaliConf_serdes_myParam and rtl9300_rxCaliConf_phy_myParam */
+ int tap0_init_val = 0x1f; /* Initial Decision Fed Equalizer 0 tap */
int vth_min = 0x0;
pr_info("start_1.1.1 initial value for sds %d\n", sds);
rtl930x_write_sds_phy(sds, 6, 0, 0);
- // FGCAL
+ /* FGCAL */
rtl9300_sds_field_w(sds, 0x2e, 0x01, 14, 14, 0x00);
rtl9300_sds_field_w(sds, 0x2e, 0x1c, 10, 5, 0x20);
rtl9300_sds_field_w(sds, 0x2f, 0x02, 0, 0, 0x01);
- // DCVS
+ /* DCVS */
rtl9300_sds_field_w(sds, 0x2e, 0x1e, 14, 11, 0x00);
rtl9300_sds_field_w(sds, 0x2e, 0x01, 15, 15, 0x00);
rtl9300_sds_field_w(sds, 0x2e, 0x02, 11, 11, 0x00);
rtl9300_sds_field_w(sds, 0x2e, 0x04, 6, 6, 0x01);
rtl9300_sds_field_w(sds, 0x2e, 0x04, 7, 7, 0x01);
- // LEQ (Long Term Equivalent signal level)
+ /* LEQ (Long Term Equivalent signal level) */
rtl9300_sds_field_w(sds, 0x2e, 0x16, 14, 8, 0x00);
- // DFE (Decision Fed Equalizer)
+ /* DFE (Decision Fed Equalizer) */
rtl9300_sds_field_w(sds, 0x2f, 0x03, 5, 0, tap0_init_val);
rtl9300_sds_field_w(sds, 0x2e, 0x09, 11, 6, 0x00);
rtl9300_sds_field_w(sds, 0x2e, 0x09, 5, 0, 0x00);
rtl9300_sds_field_w(sds, 0x2e, 0x06, 5, 0, 0x00);
rtl9300_sds_field_w(sds, 0x2f, 0x01, 5, 0, 0x00);
- // Vth
+ /* Vth */
rtl9300_sds_field_w(sds, 0x2e, 0x13, 5, 3, 0x07);
rtl9300_sds_field_w(sds, 0x2e, 0x13, 2, 0, 0x07);
rtl9300_sds_field_w(sds, 0x2f, 0x0b, 5, 3, vth_min);
pr_info("start_1.1.5 LEQ and DFE setting\n");
- // TODO: make this work for DAC cables of different lengths
- // For a 10GBit serdes wit Fibre, SDS 8 or 9
+ /* TODO: make this work for DAC cables of different lengths */
+ /* For a 10GBit serdes wit Fibre, SDS 8 or 9 */
if (phy_mode == PHY_INTERFACE_MODE_10GBASER || PHY_INTERFACE_MODE_1000BASEX)
rtl9300_sds_field_w(sds, 0x2e, 0x16, 3, 2, 0x02);
else
pr_err("%s not PHY-based or SerDes, implement DAC!\n", __func__);
- // No serdes, check for Aquantia PHYs
+ /* No serdes, check for Aquantia PHYs */
rtl9300_sds_field_w(sds, 0x2e, 0x16, 3, 2, 0x02);
rtl9300_sds_field_w(sds, 0x2e, 0x0f, 6, 0, 0x5f);
{
pr_info("start_1.2.1 ForegroundOffsetCal_Manual\n");
- // Gray config endis to 1
+ /* Gray config endis to 1 */
rtl9300_sds_field_w(sds_num, 0x2f, 0x02, 2, 2, 0x01);
- // ForegroundOffsetCal_Manual(auto mode)
+ /* ForegroundOffsetCal_Manual(auto mode) */
rtl9300_sds_field_w(sds_num, 0x2e, 0x01, 14, 14, 0x00);
pr_info("end_1.2.1");
void rtl9300_do_rx_calibration_2_2(int sds_num)
{
- //Force Rx-Run = 0
+ /* Force Rx-Run = 0 */
rtl9300_sds_field_w(sds_num, 0x2e, 0x15, 8, 8, 0x0);
rtl930x_sds_rx_rst(sds_num, PHY_INTERFACE_MODE_10GBASER);
else
rtl930x_write_sds_phy(sds_num -1 , 0x1f, 0x2, 0x31);
- // ##Page0x2E, Reg0x15[9], REG0_RX_EN_TEST=[1]
+ /* ##Page0x2E, Reg0x15[9], REG0_RX_EN_TEST=[1] */
rtl9300_sds_field_w(sds_num, 0x2e, 0x15, 9, 9, 0x1);
- // ##Page0x21, Reg0x06[11 6], REG0_RX_DEBUG_SEL=[1 0 x x x x]
+ /* ##Page0x21, Reg0x06[11 6], REG0_RX_DEBUG_SEL=[1 0 x x x x] */
rtl9300_sds_field_w(sds_num, 0x21, 0x06, 11, 6, 0x20);
- // ##Page0x2F, Reg0x0C[5 0], REG0_COEF_SEL=[0 0 1 1 1 1]
+ /* ##Page0x2F, Reg0x0C[5 0], REG0_COEF_SEL=[0 0 1 1 1 1] */
rtl9300_sds_field_w(sds_num, 0x2f, 0x0c, 5, 0, 0xf);
- // ##FGCAL read gray
+ /* ##FGCAL read gray */
fgcal_gray = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 5, 0);
- // ##Page0x2F, Reg0x0C[5 0], REG0_COEF_SEL=[0 0 1 1 1 0]
+ /* ##Page0x2F, Reg0x0C[5 0], REG0_COEF_SEL=[0 0 1 1 1 0] */
rtl9300_sds_field_w(sds_num, 0x2f, 0x0c, 5, 0, 0xe);
- // ##FGCAL read binary
+ /* ##FGCAL read binary */
fgcal_binary = rtl9300_sds_field_r(sds_num, 0x1f, 0x14, 5, 0);
pr_info("%s: fgcal_gray: %d, fgcal_binary %d\n",
{
pr_info("start_1.3.1");
- // ##1.3.1
+ /* ##1.3.1 */
if (phy_mode != PHY_INTERFACE_MODE_10GBASER && phy_mode != PHY_INTERFACE_MODE_1000BASEX)
rtl9300_sds_field_w(sds_num, 0x2e, 0xc, 8, 8, 0);
int i;
if (phy_mode == PHY_INTERFACE_MODE_10GBASER || phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
- // rtl9300_rxCaliConf_serdes_myParam
+ /* rtl9300_rxCaliConf_serdes_myParam */
dac_long_cable_offset = 3;
eq_hold_enabled = true;
} else {
- // rtl9300_rxCaliConf_phy_myParam
+ /* rtl9300_rxCaliConf_phy_myParam */
dac_long_cable_offset = 0;
eq_hold_enabled = false;
}
pr_info("start_1.4.1");
- // ##1.4.1
+ /* ##1.4.1 */
rtl9300_sds_rxcal_vth_manual(sds_num, false, vth_list);
rtl9300_sds_rxcal_tap_manual(sds_num, 0, false, tap0_list);
mdelay(200);
void rtl9300_do_rx_calibration_5(u32 sds_num, phy_interface_t phy_mode)
{
- if (phy_mode == PHY_INTERFACE_MODE_10GBASER) // dfeTap1_4Enable true
+ if (phy_mode == PHY_INTERFACE_MODE_10GBASER) /* dfeTap1_4Enable true */
rtl9300_do_rx_calibration_5_2(sds_num);
}
rtl9300_do_rx_calibration_5(sds, phy_mode);
mdelay(20);
- // Do this only for 10GR mode, SDS active in mode 0x1a
+ /* Do this only for 10GR mode, SDS active in mode 0x1a */
if (rtl9300_sds_field_r(sds, 0x1f, 9, 11, 7) == 0x1a) {
pr_info("%s: SDS enabled\n", __func__);
latch_sts = rtl9300_sds_field_r(sds, 0x4, 1, 2, 2);
break;
case PHY_INTERFACE_MODE_10GBASER:
- // Read twice to clear
+ /* Read twice to clear */
rtl930x_read_sds_phy(sds_num, 5, 1);
rtl930x_read_sds_phy(sds_num, 5, 1);
break;
rtl9300_sds_sym_err_reset(sds_num, phy_mode);
rtl9300_sds_sym_err_reset(sds_num, phy_mode);
- // Count errors during 1ms
+ /* Count errors during 1ms */
errors1 = rtl9300_sds_sym_err_get(sds_num, phy_mode);
mdelay(1);
errors2 = rtl9300_sds_sym_err_get(sds_num, phy_mode);
{
u32 v;
- // Enable 1GBit PHY
+ /* Enable 1GBit PHY */
v = rtl930x_read_sds_phy(sds_num, PHY_PAGE_2, PHY_CTRL_REG);
pr_info("%s 1gbit phy: %08x\n", __func__, v);
v &= ~BIT(PHY_POWER_BIT);
rtl930x_write_sds_phy(sds_num, PHY_PAGE_2, PHY_CTRL_REG, v);
pr_info("%s 1gbit phy enabled: %08x\n", __func__, v);
- // Enable 10GBit PHY
+ /* Enable 10GBit PHY */
v = rtl930x_read_sds_phy(sds_num, PHY_PAGE_4, PHY_CTRL_REG);
pr_info("%s 10gbit phy: %08x\n", __func__, v);
v &= ~BIT(PHY_POWER_BIT);
rtl930x_write_sds_phy(sds_num, PHY_PAGE_4, PHY_CTRL_REG, v);
pr_info("%s 10gbit phy after: %08x\n", __func__, v);
- // dal_longan_construct_mac_default_10gmedia_fiber
+ /* dal_longan_construct_mac_default_10gmedia_fiber */
v = rtl930x_read_sds_phy(sds_num, 0x1f, 11);
pr_info("%s set medium: %08x\n", __func__, v);
v |= BIT(1);
}
#define RTL930X_MAC_FORCE_MODE_CTRL (0xCA1C)
-// phy_mode = PHY_INTERFACE_MODE_10GBASER, sds_mode = 0x1a
+/* phy_mode = PHY_INTERFACE_MODE_10GBASER, sds_mode = 0x1a */
int rtl9300_serdes_setup(int sds_num, phy_interface_t phy_mode)
{
int sds_mode;
return -EINVAL;
}
- // Maybe use dal_longan_sds_init
+ /* Maybe use dal_longan_sds_init */
- // dal_longan_construct_serdesConfig_init // Serdes Construct
+ /* dal_longan_construct_serdesConfig_init */ /* Serdes Construct */
rtl9300_phy_enable_10g_1g(sds_num);
- // Set Serdes Mode
- rtl9300_sds_set(sds_num, 0x1a); // 0x1b: RTK_MII_10GR1000BX_AUTO
+ /* Set Serdes Mode */
+ rtl9300_sds_set(sds_num, 0x1a); /* 0x1b: RTK_MII_10GR1000BX_AUTO */
- // Do RX calibration
+ /* Do RX calibration */
do {
rtl9300_do_rx_calibration(sds_num, phy_mode);
calib_tries++;
u32 en;
u32 cmu_band;
-// page = rtl9300_sds_cmu_page_get(sds);
- page = 0x25; // 10GR and 1000BX
+/* page = rtl9300_sds_cmu_page_get(sds); */
+ page = 0x25; /* 10GR and 1000BX */
sds = (sds % 2) ? (sds - 1) : (sds);
rtl9300_sds_field_w(sds, page, 0x1c, 15, 15, 1);
rtl9300_sds_field_w(sds + 1, page, 0x1c, 15, 15, 1);
en = rtl9300_sds_field_r(sds, page, 27, 1, 1);
- if(!en) { // Auto mode
+ if(!en) { /* Auto mode */
rtl930x_write_sds_phy(sds, 0x1f, 0x02, 31);
cmu_band = rtl9300_sds_field_r(sds, 0x1f, 0x15, 5, 1);
if (sds_num < 0)
return 0;
- if (phy_mode != PHY_INTERFACE_MODE_10GBASER) // TODO: for now we only patch 10GR SerDes
+ if (phy_mode != PHY_INTERFACE_MODE_10GBASER) /* TODO: for now we only patch 10GR SerDes */
return 0;
switch (phy_mode) {
pr_info("%s CMU BAND is %d\n", __func__, rtl9300_sds_cmu_band_get(sds_num));
- // Turn Off Serdes
+ /* Turn Off Serdes */
rtl9300_sds_rst(sds_num, 0x1f);
pr_info("%s PATCHING SerDes %d\n", __func__, sds_num);
rtl9300_phy_enable_10g_1g(sds_num);
- // Disable MAC
+ /* Disable MAC */
sw_w32_mask(0, 1, RTL930X_MAC_FORCE_MODE_CTRL);
mdelay(20);
- // ----> dal_longan_sds_mode_set
+ /* ----> dal_longan_sds_mode_set */
pr_info("%s: Configuring RTL9300 SERDES %d, mode %02x\n", __func__, sds_num, sds_mode);
- // Configure link to MAC
- rtl9300_serdes_mac_link_config(sds_num, true, true); // MAC Construct
+ /* Configure link to MAC */
+ rtl9300_serdes_mac_link_config(sds_num, true, true); /* MAC Construct */
- // Disable MAC
+ /* Disable MAC */
sw_w32_mask(0, 1, RTL930X_MAC_FORCE_MODE_CTRL);
mdelay(20);
rtl9300_force_sds_mode(sds_num, PHY_INTERFACE_MODE_NA);
- // Re-Enable MAC
+ /* Re-Enable MAC */
sw_w32_mask(1, 0, RTL930X_MAC_FORCE_MODE_CTRL);
rtl9300_force_sds_mode(sds_num, phy_mode);
- // Do RX calibration
+ /* Do RX calibration */
do {
rtl9300_do_rx_calibration(sds_num, phy_mode);
calib_tries++;
rtl9300_sds_tx_config(sds_num, phy_mode);
- // The clock needs only to be configured on the FPGA implementation
+ /* The clock needs only to be configured on the FPGA implementation */
return 0;
}
u32 o, v, o_mode;
int shift = ((sds & 0x3) << 3);
- // TODO: We need to lock this!
+ /* TODO: We need to lock this! */
o = sw_r32(RTL931X_PS_SERDES_OFF_MODE_CTRL_ADDR);
v = o | BIT(sds);
{
switch (mode) {
case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_1000BASEX: // MII_1000BX_FIBER / 100BX_FIBER / 1000BX100BX_AUTO
+ case PHY_INTERFACE_MODE_1000BASEX: /* MII_1000BX_FIBER / 100BX_FIBER / 1000BX100BX_AUTO */
return 0x24;
case PHY_INTERFACE_MODE_HSGMII:
- case PHY_INTERFACE_MODE_2500BASEX: // MII_2500Base_X:
+ case PHY_INTERFACE_MODE_2500BASEX: /* MII_2500Base_X: */
return 0x28;
-// case MII_HISGMII_5G:
-// return 0x2a;
+/* case MII_HISGMII_5G: */
+/* return 0x2a; */
case PHY_INTERFACE_MODE_QSGMII:
- return 0x2a; // Code also has 0x34
- case PHY_INTERFACE_MODE_XAUI: // MII_RXAUI_LITE:
+ return 0x2a; /* Code also has 0x34 */
+ case PHY_INTERFACE_MODE_XAUI: /* MII_RXAUI_LITE: */
return 0x2c;
- case PHY_INTERFACE_MODE_XGMII: // MII_XSGMII
+ case PHY_INTERFACE_MODE_XGMII: /* MII_XSGMII */
case PHY_INTERFACE_MODE_10GKR:
- case PHY_INTERFACE_MODE_10GBASER: // MII_10GR
+ case PHY_INTERFACE_MODE_10GBASER: /* MII_10GR */
return 0x2e;
default:
return -1;
static void rtl931x_cmu_type_set(u32 asds, phy_interface_t mode, int chiptype)
{
- int cmu_type = 0; // Clock Management Unit
+ int cmu_type = 0; /* Clock Management Unit */
u32 cmu_page = 0;
u32 frc_cmu_spd;
u32 evenSds;
val = 0x6;
break;
case PHY_INTERFACE_MODE_XGMII:
- val = 0x10; // serdes mode XSGMII
+ val = 0x10; /* serdes mode XSGMII */
break;
case PHY_INTERFACE_MODE_USXGMII:
case PHY_INTERFACE_MODE_2500BASEX:
case PHY_INTERFACE_MODE_NA:
break;
- case PHY_INTERFACE_MODE_XGMII: // MII_XSGMII
+ case PHY_INTERFACE_MODE_XGMII: /* MII_XSGMII */
if (chiptype) {
u32 xsg_sdsid_1;
xsg_sdsid_1 = dSds + 1;
- //fifo inv clk
+ /* fifo inv clk */
rtl9310_sds_field_w(dSds, 0x1, 0x1, 7, 4, 0xf);
rtl9310_sds_field_w(dSds, 0x1, 0x1, 3, 0, 0xf);
rtl9310_sds_field_w(dSds + 1, 0x0, 0xE, 12, 12, 1);
break;
- case PHY_INTERFACE_MODE_USXGMII: // MII_USXGMII_10GSXGMII/10GDXGMII/10GQXGMII:
+ case PHY_INTERFACE_MODE_USXGMII: /* MII_USXGMII_10GSXGMII/10GDXGMII/10GQXGMII: */
u32 i, evenSds;
u32 op_code = 0x6003;
}
break;
- case PHY_INTERFACE_MODE_10GBASER: // MII_10GR / MII_10GR1000BX_AUTO:
- // configure 10GR fiber mode=1
+ case PHY_INTERFACE_MODE_10GBASER: /* MII_10GR / MII_10GR1000BX_AUTO: */
+ /* configure 10GR fiber mode=1 */
rtl9310_sds_field_w(asds, 0x1f, 0xb, 1, 1, 1);
- // init fiber_1g
+ /* init fiber_1g */
rtl9310_sds_field_w(dSds, 0x3, 0x13, 15, 14, 0);
rtl9310_sds_field_w(dSds, 0x2, 0x0, 12, 12, 1);
rtl9310_sds_field_w(dSds, 0x2, 0x0, 6, 6, 1);
rtl9310_sds_field_w(dSds, 0x2, 0x0, 13, 13, 0);
- // init auto
+ /* init auto */
rtl9310_sds_field_w(asds, 0x1f, 13, 15, 0, 0x109e);
rtl9310_sds_field_w(asds, 0x1f, 0x6, 14, 10, 0x8);
rtl9310_sds_field_w(asds, 0x1f, 0x7, 10, 4, 0x7f);
rtl9310_sds_field_w(dSds, 0x1, 0x14, 8, 8, 1);
break;
- case PHY_INTERFACE_MODE_1000BASEX: // MII_1000BX_FIBER
+ case PHY_INTERFACE_MODE_1000BASEX: /* MII_1000BX_FIBER */
rtl9310_sds_field_w(dSds, 0x3, 0x13, 15, 14, 0);
rtl9310_sds_field_w(dSds, 0x2, 0x0, 12, 12, 1);
val = 0xa0000;
sw_w32(val, RTL931X_CHIP_INFO_ADDR);
val = sw_r32(RTL931X_CHIP_INFO_ADDR);
- if (val & BIT(28)) // consider 9311 etc. RTL9313_CHIP_ID == HWP_CHIP_ID(unit))
+ if (val & BIT(28)) /* consider 9311 etc. RTL9313_CHIP_ID == HWP_CHIP_ID(unit)) */
{
rtl931x_write_sds_phy(asds, 0x2E, 0x1, board_sds_tx2[sds - 2]);
} else {
struct rtl83xx_shared_private *shared = phydev->shared->priv;
shared->name = "RTL8218D";
/* Configuration must be done while patching still possible */
-// TODO: return configure_rtl8218d(phydev);
+/* TODO: return configure_rtl8218d(phydev); */
}
return 0;
struct part parts[10];
};
-// TODO: fixed path?
+/* TODO: fixed path? */
#define FIRMWARE_838X_8380_1 "rtl838x_phy/rtl838x_8380.fw"
#define FIRMWARE_838X_8214FC_1 "rtl838x_phy/rtl838x_8214fc.fw"
#define FIRMWARE_838X_8218b_1 "rtl838x_phy/rtl838x_8218b.fw"
#define PHY_ID_RTL8393_I 0x001c8393
#define PHY_ID_RTL9300_I 0x70d03106
-// PHY MMD devices
+/* PHY MMD devices */
#define MMD_AN 7
#define MMD_VEND2 31