void rtcl_ccu_log_early(void)
{
- int clk_idx;
char meminfo[80], clkinfo[255], msg[255] = "rtl83xx-clk: initialized";
sprintf(meminfo, " (%d Bit DDR%d)", rtcl_ccu->dram.buswidth, rtcl_ccu->dram.type);
- for (clk_idx = 0; clk_idx < CLK_COUNT; clk_idx++) {
+ for (int clk_idx = 0; clk_idx < CLK_COUNT; clk_idx++) {
sprintf(clkinfo, ", %s %lu MHz", rtcl_clk_info[clk_idx].display_name,
rtcl_ccu->clks[clk_idx].startup / 1000000);
if (clk_idx == CLK_MEM)
void rtcl_ccu_log_late(void)
{
- int clk_idx;
struct rtcl_clk *rclk;
bool overclock = false;
char clkinfo[80], msg[255] = "rate setting enabled";
- for (clk_idx = 0; clk_idx < CLK_COUNT; clk_idx++) {
+ for (int clk_idx = 0; clk_idx < CLK_COUNT; clk_idx++) {
rclk = &rtcl_ccu->clks[clk_idx];
overclock |= rclk->max > rclk->startup;
sprintf(clkinfo, ", %s %lu-%lu MHz", rtcl_clk_info[clk_idx].display_name,
static int i2c_read(void __iomem *r0, u8 *buf, int len)
{
- int i;
- u32 v;
-
if (len > 16)
return -EIO;
- for (i = 0; i < len; i++) {
+ for (int i = 0; i < len; i++) {
+ u32 v;
+
if (i % 4 == 0)
v = readl(r0 + i);
buf[i] = v;
static int i2c_write(void __iomem *r0, u8 *buf, int len)
{
- u32 v;
- int i;
-
if (len > 16)
return -EIO;
- for (i = 0; i < len; i++) {
+ for (int i = 0; i < len; i++) {
+ u32 v;
+
if (! (i % 4))
v = 0;
v <<= 8;
void rtl_table_init(void)
{
- int i;
-
- for (i = 0; i < RTL_TBL_END; i++)
+ for (int i = 0; i < RTL_TBL_END; i++)
mutex_init(&rtl838x_tbl_regs[i].lock);
}
struct rtl838x_l2_entry e;
u64 seed = priv->r->l2_hash_seed(nh->mac, nh->rvid);
u32 key = priv->r->l2_hash_key(priv, seed);
- int i, idx = -1;
+ int idx = -1;
u64 entry;
pr_debug("%s searching for %08llx vid %d with key %d, seed: %016llx\n",
e.port = nh->port;
/* Loop over all entries in the hash-bucket and over the second block on 93xx SoCs */
- for (i = 0; i < priv->l2_bucket_size; i++) {
+ for (int i = 0; i < priv->l2_bucket_size; i++) {
entry = priv->r->read_l2_entry_using_hash(key, i, &e);
if (!e.valid || ((entry & 0x0fffffffffffffffULL) == seed)) {
*/
int rtl83xx_port_is_under(const struct net_device * dev, struct rtl838x_switch_priv *priv)
{
- int i;
-
/* TODO: On 5.12:
* if(!dsa_slave_dev_check(dev)) {
* netdev_info(dev, "%s: not a DSA device.\n", __func__);
* }
*/
- for (i = 0; i < priv->cpu_port; i++) {
+ for (int i = 0; i < priv->cpu_port; i++) {
if (!priv->ports[i].dp)
continue;
if (priv->ports[i].dp->slave == dev)
*/
static int rtl83xx_alloc_router_mac(struct rtl838x_switch_priv *priv, u64 mac)
{
- int i, free_mac = -1;
+ int free_mac = -1;
struct rtl93xx_rt_mac m;
mutex_lock(&priv->reg_mutex);
- for (i = 0; i < MAX_ROUTER_MACS; i++) {
+ for (int i = 0; i < MAX_ROUTER_MACS; i++) {
priv->r->get_l3_router_mac(i, &m);
if (free_mac < 0 && !m.valid) {
free_mac = i;
static int rtl83xx_alloc_egress_intf(struct rtl838x_switch_priv *priv, u64 mac, int vlan)
{
- int i, free_mac = -1;
+ int free_mac = -1;
struct rtl838x_l3_intf intf;
u64 m;
mutex_lock(&priv->reg_mutex);
- for (i = 0; i < MAX_SMACS; i++) {
+ for (int i = 0; i < MAX_SMACS; i++) {
m = priv->r->get_l3_egress_mac(L3_EGRESS_DMACS + i);
if (free_mac < 0 && !m) {
free_mac = i;
static int __init rtl83xx_sw_probe(struct platform_device *pdev)
{
- int err = 0, i;
+ int err = 0;
struct rtl838x_switch_priv *priv;
struct device *dev = &pdev->dev;
u64 bpdu_mask;
* dsa_switch_tree, the tree is built when the switch
* is registered by dsa_register_switch
*/
- for (i = 0; i <= priv->cpu_port; i++)
+ for (int i = 0; i <= priv->cpu_port; i++)
priv->ports[i].dp = dsa_to_port(priv->ds, i);
/* Enable link and media change interrupts. Are the SERDES masks needed? */
priv->r->l3_setup(priv);
/* Clear all destination ports for mirror groups */
- for (i = 0; i < 4; i++)
+ for (int i = 0; i < 4; i++)
priv->mirror_group_ports[i] = -1;
/* Register netdevice event callback to catch changes in link aggregation groups */
loff_t *ppos)
{
struct rtl838x_switch_priv *priv = filp->private_data;
- int i;
const char **d;
u32 v;
char *buf;
if (!buf)
return -ENOMEM;
- for (i = 0; i < num; i++) {
+ for (int i = 0; i < num; i++) {
v = sw_r32(offset + (i << 2)) & 0xffff;
n += sprintf(buf + n, "%s: %d\n", d[i], v);
}
struct rtl838x_l2_entry *e)
{
u64 portmask;
- int i;
if (e->type == L2_UNICAST) {
seq_puts(m, "L2_UNICAST\n");
portmask = priv->r->read_mcast_pmask(e->mc_portmask_index);
seq_printf(m, " index %u ports", e->mc_portmask_index);
- for (i = 0; i < 64; i++) {
+ for (int i = 0; i < 64; i++) {
if (portmask & BIT_ULL(i))
seq_printf(m, " %d", i);
}
{
struct rtl838x_switch_priv *priv = m->private;
struct rtl838x_l2_entry e;
- int i, bucket, index;
+ int bucket, index;
mutex_lock(&priv->reg_mutex);
- for (i = 0; i < priv->fib_entries; i++) {
+ for (int i = 0; i < priv->fib_entries; i++) {
bucket = i >> 2;
index = i & 0x3;
priv->r->read_l2_entry_using_hash(bucket, index, &e);
cond_resched();
}
- for (i = 0; i < 64; i++) {
+ for (int i = 0; i < 64; i++) {
priv->r->read_cam(i, &e);
if (!e.valid)
static int rtl838x_dbgfs_leds(struct dentry *parent, struct rtl838x_switch_priv *priv)
{
struct dentry *led_dir;
- int p;
- char led_sw_p_ctrl_name[20];
- char port_led_name[20];
led_dir = debugfs_create_dir("led", parent);
(u32 *)(RTL838X_SW_BASE + RTL8380_LED1_SW_P_EN_CTRL));
debugfs_create_x32("led2_sw_p_en_ctrl", 0644, led_dir,
(u32 *)(RTL838X_SW_BASE + RTL8380_LED2_SW_P_EN_CTRL));
- for (p = 0; p < 28; p++) {
+ for (int p = 0; p < 28; p++) {
+ char led_sw_p_ctrl_name[20];
+
snprintf(led_sw_p_ctrl_name, sizeof(led_sw_p_ctrl_name),
"led_sw_p_ctrl.%02d", p);
debugfs_create_x32(led_sw_p_ctrl_name, 0644, led_dir,
(u32 *)(RTL838X_SW_BASE + RTL8380_LED_SW_P_CTRL(p)));
}
} else if (priv->family_id == RTL8390_FAMILY_ID) {
+ char port_led_name[20];
+
debugfs_create_x32("led_glb_ctrl", 0644, led_dir,
(u32 *)(RTL838X_SW_BASE + RTL8390_LED_GLB_CTRL));
debugfs_create_x32("led_set_2_3", 0644, led_dir,
(u32 *)(RTL838X_SW_BASE + RTL8390_LED_SET_2_3_CTRL));
debugfs_create_x32("led_set_0_1", 0644, led_dir,
(u32 *)(RTL838X_SW_BASE + RTL8390_LED_SET_0_1_CTRL));
- for (p = 0; p < 4; p++) {
+ for (int p = 0; p < 4; p++) {
snprintf(port_led_name, sizeof(port_led_name), "led_copr_set_sel.%1d", p);
debugfs_create_x32(port_led_name, 0644, led_dir,
(u32 *)(RTL838X_SW_BASE + RTL8390_LED_COPR_SET_SEL_CTRL(p << 4)));
(u32 *)(RTL838X_SW_BASE + RTL8390_LED_COMBO_CTRL(32)));
debugfs_create_x32("led_sw_ctrl", 0644, led_dir,
(u32 *)(RTL838X_SW_BASE + RTL8390_LED_SW_CTRL));
- for (p = 0; p < 5; p++) {
+ for (int p = 0; p < 5; p++) {
snprintf(port_led_name, sizeof(port_led_name), "led_sw_p_en_ctrl.%1d", p);
debugfs_create_x32(port_led_name, 0644, led_dir,
(u32 *)(RTL838X_SW_BASE + RTL8390_LED_SW_P_EN_CTRL(p * 10)));
}
- for (p = 0; p < 28; p++) {
+ for (int p = 0; p < 28; p++) {
snprintf(port_led_name, sizeof(port_led_name), "led_sw_p_ctrl.%02d", p);
debugfs_create_x32(port_led_name, 0644, led_dir,
(u32 *)(RTL838X_SW_BASE + RTL8390_LED_SW_P_CTRL(p)));
struct dentry *port_dir;
struct dentry *mirror_dir;
struct debugfs_regset32 *port_ctrl_regset;
- int ret, i;
+ int ret;
char lag_name[10];
char mirror_name[10];
(u32 *)(RTL838X_SW_BASE + RTL838X_MODEL_NAME_INFO));
/* Create one directory per port */
- for (i = 0; i < priv->cpu_port; i++) {
+ for (int i = 0; i < priv->cpu_port; i++) {
if (priv->ports[i].phy) {
ret = rtl838x_dbgfs_port_init(rtl838x_dir, priv, i);
if (ret)
debugfs_create_u8("id", 0444, port_dir, &priv->cpu_port);
/* Create entries for LAGs */
- for (i = 0; i < priv->n_lags; i++) {
+ for (int i = 0; i < priv->n_lags; i++) {
snprintf(lag_name, sizeof(lag_name), "lag.%02d", i);
if (priv->family_id == RTL8380_FAMILY_ID)
debugfs_create_x32(lag_name, 0644, rtl838x_dir,
}
/* Create directories for mirror groups */
- for (i = 0; i < 4; i++) {
+ for (int i = 0; i < 4; i++) {
snprintf(mirror_name, sizeof(mirror_name), "mirror.%1d", i);
mirror_dir = debugfs_create_dir(mirror_name, rtl838x_dir);
if (priv->family_id == RTL8380_FAMILY_ID) {
static void rtl83xx_enable_phy_polling(struct rtl838x_switch_priv *priv)
{
- int i;
u64 v = 0;
msleep(1000);
/* Enable all ports with a PHY, including the SFP-ports */
- for (i = 0; i < priv->cpu_port; i++) {
+ for (int i = 0; i < priv->cpu_port; i++) {
if (priv->ports[i].phy)
v |= BIT_ULL(i);
}
static void rtl83xx_vlan_setup(struct rtl838x_switch_priv *priv)
{
struct rtl838x_vlan_info info;
- int i;
pr_info("In %s\n", __func__);
}
/* Initialize all vlans 0-4095 */
- for (i = 0; i < MAX_VLANS; i ++)
+ for (int i = 0; i < MAX_VLANS; i ++)
priv->r->vlan_set_tagged(i, &info);
/* reset PVIDs; defaults to 1 on reset */
- for (i = 0; i <= priv->ds->num_ports; i++) {
+ for (int i = 0; i <= priv->ds->num_ports; i++) {
priv->r->vlan_port_pvid_set(i, PBVLAN_TYPE_INNER, 0);
priv->r->vlan_port_pvid_set(i, PBVLAN_TYPE_OUTER, 0);
priv->r->vlan_port_pvidmode_set(i, PBVLAN_TYPE_INNER, PBVLAN_MODE_UNTAG_AND_PRITAG);
}
/* Set forwarding action based on inner VLAN tag */
- for (i = 0; i < priv->cpu_port; i++)
+ for (int i = 0; i < priv->cpu_port; i++)
priv->r->vlan_fwd_on_inner(i, true);
}
static void rtl83xx_setup_bpdu_traps(struct rtl838x_switch_priv *priv)
{
- int i;
-
- for (i = 0; i < priv->cpu_port; i++)
+ for (int i = 0; i < priv->cpu_port; i++)
priv->r->set_receive_management_action(i, BPDU, COPY2CPU);
}
static int rtl83xx_setup(struct dsa_switch *ds)
{
- int i;
struct rtl838x_switch_priv *priv = ds->priv;
u64 port_bitmap = BIT_ULL(priv->cpu_port);
/* Disable MAC polling the PHY so that we can start configuration */
priv->r->set_port_reg_le(0ULL, priv->r->smi_poll_ctrl);
- for (i = 0; i < ds->num_ports; i++)
+ for (int i = 0; i < ds->num_ports; i++)
priv->ports[i].enable = false;
priv->ports[priv->cpu_port].enable = true;
/* Setting bit j in register RTL838X_PORT_ISO_CTRL(i) allows
* traffic from source port i to destination port j
*/
- for (i = 0; i < priv->cpu_port; i++) {
+ for (int i = 0; i < priv->cpu_port; i++) {
if (priv->ports[i].phy) {
priv->r->set_port_reg_be(BIT_ULL(priv->cpu_port) | BIT_ULL(i),
priv->r->port_iso_ctrl(i));
static int rtl93xx_setup(struct dsa_switch *ds)
{
- int i;
struct rtl838x_switch_priv *priv = ds->priv;
u32 port_bitmap = BIT(priv->cpu_port);
}
/* Disable all ports except CPU port */
- for (i = 0; i < ds->num_ports; i++)
+ for (int i = 0; i < ds->num_ports; i++)
priv->ports[i].enable = false;
priv->ports[priv->cpu_port].enable = true;
- for (i = 0; i < priv->cpu_port; i++) {
+ for (int i = 0; i < priv->cpu_port; i++) {
if (priv->ports[i].phy) {
priv->r->traffic_set(i, BIT_ULL(priv->cpu_port) | BIT_ULL(i));
port_bitmap |= BIT_ULL(i);
static void rtl83xx_get_strings(struct dsa_switch *ds,
int port, u32 stringset, u8 *data)
{
- int i;
-
if (stringset != ETH_SS_STATS)
return;
- for (i = 0; i < ARRAY_SIZE(rtl83xx_mib); i++)
+ for (int i = 0; i < ARRAY_SIZE(rtl83xx_mib); i++)
strncpy(data + i * ETH_GSTRING_LEN, rtl83xx_mib[i].name,
ETH_GSTRING_LEN);
}
{
struct rtl838x_switch_priv *priv = ds->priv;
const struct rtl83xx_mib_desc *mib;
- int i;
u64 h;
- for (i = 0; i < ARRAY_SIZE(rtl83xx_mib); i++) {
+ for (int i = 0; i < ARRAY_SIZE(rtl83xx_mib); i++) {
mib = &rtl83xx_mib[i];
data[i] = sw_r32(priv->r->stat_port_std_mib + (port << 8) + 252 - mib->offset);
static void store_mcgroups(struct rtl838x_switch_priv *priv, int port)
{
- int mc_group;
-
- for (mc_group = 0; mc_group < MAX_MC_GROUPS; mc_group++) {
+ for (int mc_group = 0; mc_group < MAX_MC_GROUPS; mc_group++) {
u64 portmask = priv->r->read_mcast_pmask(mc_group);
if (portmask & BIT_ULL(port)) {
priv->mc_group_saves[mc_group] = port;
static void load_mcgroups(struct rtl838x_switch_priv *priv, int port)
{
- int mc_group;
-
- for (mc_group = 0; mc_group < MAX_MC_GROUPS; mc_group++) {
+ for (int mc_group = 0; mc_group < MAX_MC_GROUPS; mc_group++) {
if (priv->mc_group_saves[mc_group] == port) {
rtl83xx_mc_group_add_port(priv, mc_group, port);
priv->mc_group_saves[mc_group] = -1;
{
struct rtl838x_switch_priv *priv = ds->priv;
u64 port_bitmap = BIT_ULL(priv->cpu_port), v;
- int i;
pr_debug("%s %x: %d %llx", __func__, (u32)priv, port, port_bitmap);
}
mutex_lock(&priv->reg_mutex);
- for (i = 0; i < ds->num_ports; i++) {
+ for (int i = 0; i < ds->num_ports; i++) {
/* Add this port to the port matrix of the other ports in the
* same bridge. If the port is disabled, port matrix is kept
* and not being setup until the port becomes enabled.
{
struct rtl838x_switch_priv *priv = ds->priv;
u64 port_bitmap = BIT_ULL(priv->cpu_port), v;
- int i;
pr_debug("%s %x: %d", __func__, (u32)priv, port);
mutex_lock(&priv->reg_mutex);
- for (i = 0; i < ds->num_ports; i++) {
+ for (int i = 0; i < ds->num_ports; i++) {
/* Remove this port from the port matrix of the other ports
* in the same bridge. If the port is disabled, port matrix
* is kept and not being setup until the port becomes enabled.
static int rtl83xx_find_l2_hash_entry(struct rtl838x_switch_priv *priv, u64 seed,
bool must_exist, struct rtl838x_l2_entry *e)
{
- int i, idx = -1;
+ int idx = -1;
u32 key = priv->r->l2_hash_key(priv, seed);
u64 entry;
pr_debug("%s: using key %x, for seed %016llx\n", __func__, key, seed);
/* Loop over all entries in the hash-bucket and over the second block on 93xx SoCs */
- for (i = 0; i < priv->l2_bucket_size; i++) {
+ for (int i = 0; i < priv->l2_bucket_size; i++) {
entry = priv->r->read_l2_entry_using_hash(key, i, e);
pr_debug("valid %d, mac %016llx\n", e->valid, ether_addr_to_u64(&e->mac[0]));
if (must_exist && !e->valid)
static int rtl83xx_find_l2_cam_entry(struct rtl838x_switch_priv *priv, u64 seed,
bool must_exist, struct rtl838x_l2_entry *e)
{
- int i, idx = -1;
+ int idx = -1;
u64 entry;
- for (i = 0; i < 64; i++) {
+ for (int i = 0; i < 64; i++) {
entry = priv->r->read_cam(i, e);
if (!must_exist && !e->valid) {
if (idx < 0) /* First empty entry? */
{
struct rtl838x_l2_entry e;
struct rtl838x_switch_priv *priv = ds->priv;
- int i;
mutex_lock(&priv->reg_mutex);
- for (i = 0; i < priv->fib_entries; i++) {
+ for (int i = 0; i < priv->fib_entries; i++) {
priv->r->read_l2_entry_using_hash(i >> 2, i & 0x3, &e);
if (!e.valid)
cond_resched();
}
- for (i = 0; i < 64; i++) {
+ for (int i = 0; i < 64; i++) {
priv->r->read_cam(i, &e);
if (!e.valid)
static void rtl838x_rate_control_init(struct rtl838x_switch_priv *priv)
{
- int i;
-
pr_info("Enabling Storm control\n");
/* TICK_PERIOD_PPS */
if (priv->id == 0x8380)
/* Enable storm control on all ports with a PHY and limit rates,
* for UC and MC for both known and unknown addresses
*/
- for (i = 0; i < priv->cpu_port; i++) {
+ for (int i = 0; i < priv->cpu_port; i++) {
if (priv->ports[i].phy) {
sw_w32((1 << 18) | 0x8000, RTL838X_STORM_CTRL_PORT_UC(i));
sw_w32((1 << 18) | 0x8000, RTL838X_STORM_CTRL_PORT_MC(i));
static void rtl839x_rate_control_init(struct rtl838x_switch_priv *priv)
{
- int p, q;
-
pr_info("%s: enabling rate control\n", __func__);
/* Tick length and token size settings for SoC with 250MHz,
* RTL8350 family would use 50MHz
* for UC, MC and BC
* For 1G port, the minimum burst rate is 1700, maximum 65535,
* For 10G ports it is 2650 and 1048575 respectively */
- for (p = 0; p < priv->cpu_port; p++) {
+ for (int p = 0; p < priv->cpu_port; p++) {
if (priv->ports[p].phy && !priv->ports[p].is10G) {
sw_w32_mask(0xffff, 0x8000, RTL839X_STORM_CTRL_PORT_UC_1(p));
sw_w32_mask(0xffff, 0x8000, RTL839X_STORM_CTRL_PORT_MC_1(p));
}
/* Setup ingress/egress per-port rate control */
- for (p = 0; p < priv->cpu_port; p++) {
+ for (int p = 0; p < priv->cpu_port; p++) {
if (!priv->ports[p].phy)
continue;
rtl839x_set_egress_rate(priv, p, 62500); /* 1GB/s */
/* Setup queues: all RTL83XX SoCs have 8 queues, maximum rate */
- for (q = 0; q < 8; q++)
+ for (int q = 0; q < 8; q++)
rtl839x_egress_rate_queue_limit(priv, p, q, 0xfffff);
if (priv->ports[p].is10G) {
void rtl838x_setup_prio2queue_matrix(int *min_queues)
{
- int i;
u32 v;
pr_info("Current Intprio2queue setting: %08x\n", sw_r32(RTL838X_QM_INTPRI2QID_CTRL));
- for (i = 0; i < MAX_PRIOS; i++)
+ for (int i = 0; i < MAX_PRIOS; i++)
v |= i << (min_queues[i] * 3);
sw_w32(v, RTL838X_QM_INTPRI2QID_CTRL);
}
void rtl839x_setup_prio2queue_matrix(int *min_queues)
{
- int i, q;
-
pr_info("Current Intprio2queue setting: %08x\n", sw_r32(RTL839X_QM_INTPRI2QID_CTRL(0)));
- for (i = 0; i < MAX_PRIOS; i++) {
- q = min_queues[i];
+ for (int i = 0; i < MAX_PRIOS; i++) {
+ int q = min_queues[i];
sw_w32(i << (q * 3), RTL839X_QM_INTPRI2QID_CTRL(q));
}
}
{
int reg = soc_info.family == RTL8380_FAMILY_ID ? RTL838X_QM_PKT2CPU_INTPRI_MAP
: RTL839X_QM_PKT2CPU_INTPRI_MAP;
- int i;
u32 v;
pr_info("QM_PKT2CPU_INTPRI_MAP: %08x\n", sw_r32(reg));
- for (i = 0; i < MAX_PRIOS; i++)
+ for (int i = 0; i < MAX_PRIOS; i++)
v |= max_queues[i] << (i * 3);
sw_w32(v, reg);
}
void rtl839x_set_scheduling_queue_weights(struct rtl838x_switch_priv *priv, int port,
int *queue_weights)
{
- int i, lsb, low_byte, start_bit, high_mask;
-
mutex_lock(&priv->reg_mutex);
rtl839x_read_scheduling_table(port);
- for (i = 0; i < 8; i++) {
- lsb = 48 + i * 8;
- low_byte = 8 - (lsb >> 5);
- start_bit = lsb - (low_byte << 5);
- high_mask = 0x3ff >> (32 - start_bit);
+ for (int i = 0; i < 8; i++) {
+ int lsb = 48 + i * 8;
+ int low_byte = 8 - (lsb >> 5);
+ int start_bit = lsb - (low_byte << 5);
+ int high_mask = 0x3ff >> (32 - start_bit);
+
sw_w32_mask(0x3ff << start_bit, (queue_weights[i] & 0x3ff) << start_bit,
RTL839X_TBL_ACCESS_DATA_2(low_byte));
if (high_mask)
void rtl838x_config_qos(void)
{
- int i, p;
u32 v;
pr_info("Setting up RTL838X QoS\n");
/* Set the inner and outer priority one-to-one to re-marked outer dot1p priority */
v = 0;
- for (p = 0; p < 8; p++)
+ for (int p = 0; p < 8; p++)
v |= p << (3 * p);
sw_w32(v, RTL838X_RMK_OPRI_CTRL);
sw_w32(v, RTL838X_RMK_IPRI_CTRL);
v = 0;
- for (p = 0; p < 8; p++)
+ for (int p = 0; p < 8; p++)
v |= (dot1p_priority_remapping[p] & 0x7) << (p * 3);
sw_w32(v, RTL838X_PRI_SEL_IPRI_REMAP);
/* On all ports set scheduler type to WFQ */
- for (i = 0; i <= soc_info.cpu_port; i++)
+ for (int i = 0; i <= soc_info.cpu_port; i++)
sw_w32(0, RTL838X_SCHED_P_TYPE_CTRL(i));
/* Enable egress scheduler for CPU-Port */
void rtl839x_config_qos(void)
{
- int port, p, q;
u32 v;
struct rtl838x_switch_priv *priv = switch_priv;
pr_info("RTL839X_PRI_SEL_TBL_CTRL(i): %08x\n", sw_r32(RTL839X_PRI_SEL_TBL_CTRL(0)));
rtl83xx_setup_default_prio2queue();
- for (port = 0; port < soc_info.cpu_port; port++)
+ for (int port = 0; port < soc_info.cpu_port; port++)
sw_w32(7, RTL839X_QM_PORT_QNUM(port));
/* CPU-port gets queue number 7 */
sw_w32(7, RTL839X_QM_PORT_QNUM(soc_info.cpu_port));
- for (port = 0; port <= soc_info.cpu_port; port++) {
+ for (int port = 0; port <= soc_info.cpu_port; port++) {
rtl83xx_set_ingress_priority(port, 0);
rtl839x_set_scheduling_algorithm(priv, port, WEIGHTED_FAIR_QUEUE);
rtl839x_set_scheduling_queue_weights(priv, port, default_queue_weights);
/* Remap dot1p priorities to internal priority, for this the outer tag needs be re-marked */
v = 0;
- for (p = 0; p < 8; p++)
+ for (int p = 0; p < 8; p++)
v |= (dot1p_priority_remapping[p] & 0x7) << (p * 3);
sw_w32(v, RTL839X_PRI_SEL_IPRI_REMAP);
/* Set queue-based congestion avoidance properties, register fields are as
* for forward RTL839X_WRED_PORT_THR_CTRL
*/
- for (q = 0; q < 8; q++) {
+ for (int q = 0; q < 8; q++) {
sw_w32(255 << 24 | 78 << 12 | 68, RTL839X_WRED_QUEUE_THR_CTRL(q, 0));
sw_w32(255 << 24 | 74 << 12 | 64, RTL839X_WRED_QUEUE_THR_CTRL(q, 0));
sw_w32(255 << 24 | 70 << 12 | 60, RTL839X_WRED_QUEUE_THR_CTRL(q, 0));
void rtl838x_print_matrix(void)
{
unsigned volatile int *ptr8;
- int i;
ptr8 = RTL838X_SW_BASE + RTL838X_PORT_ISO_CTRL(0);
- for (i = 0; i < 28; i += 8)
+ for (int i = 0; i < 28; i += 8)
pr_debug("> %8x %8x %8x %8x %8x %8x %8x %8x\n",
ptr8[i + 0], ptr8[i + 1], ptr8[i + 2], ptr8[i + 3],
ptr8[i + 4], ptr8[i + 5], ptr8[i + 6], ptr8[i + 7]);
u32 r[3];
struct table_reg *q = rtl_table_get(RTL8380_TBL_L2, 0); /* Access L2 Table 0 */
u32 idx = (0 << 14) | (hash << 2) | pos; /* Search SRAM, with hash and at pos in bucket */
- int i;
rtl_table_read(q, idx);
- for (i = 0; i < 3; i++)
+ for (int i = 0; i < 3; i++)
r[i] = sw_r32(rtl_table_data(q, i));
rtl_table_release(q);
{
u32 r[3];
struct table_reg *q = rtl_table_get(RTL8380_TBL_L2, 0);
- int i;
u32 idx = (0 << 14) | (hash << 2) | pos; /* Access SRAM, with hash and at pos in bucket */
rtl838x_fill_l2_row(r, e);
- for (i = 0; i < 3; i++)
+ for (int i = 0; i < 3; i++)
sw_w32(r[i], rtl_table_data(q, i));
rtl_table_write(q, idx);
{
u32 r[3];
struct table_reg *q = rtl_table_get(RTL8380_TBL_L2, 1); /* Access L2 Table 1 */
- int i;
rtl_table_read(q, idx);
- for (i = 0; i < 3; i++)
+ for (int i = 0; i < 3; i++)
r[i] = sw_r32(rtl_table_data(q, i));
rtl_table_release(q);
{
u32 r[3];
struct table_reg *q = rtl_table_get(RTL8380_TBL_L2, 1); /* Access L2 Table 1 */
- int i;
rtl838x_fill_l2_row(r, e);
- for (i = 0; i < 3; i++)
+ for (int i = 0; i < 3; i++)
sw_w32(r[i], rtl_table_data(q, i));
rtl_table_write(q, idx);
static void rtl838x_stp_get(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[])
{
- int i;
u32 cmd = 1 << 15 | /* Execute cmd */
1 << 14 | /* Read */
2 << 12 | /* Table type 0b10 */
(msti & 0xfff);
priv->r->exec_tbl0_cmd(cmd);
- for (i = 0; i < 2; i++)
+ for (int i = 0; i < 2; i++)
port_state[i] = sw_r32(priv->r->tbl_access_data_0(i));
}
static void rtl838x_stp_set(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[])
{
- int i;
u32 cmd = 1 << 15 | /* Execute cmd */
0 << 14 | /* Write */
2 << 12 | /* Table type 0b10 */
(msti & 0xfff);
- for (i = 0; i < 2; i++)
+ for (int i = 0; i < 2; i++)
sw_w32(port_state[i], priv->r->tbl_access_data_0(i));
priv->r->exec_tbl0_cmd(cmd);
}
static void rtl838x_init_eee(struct rtl838x_switch_priv *priv, bool enable)
{
- int i;
-
pr_info("Setting up EEE, state: %d\n", enable);
sw_w32_mask(0x4, 0, RTL838X_SMI_GLB_CTRL);
sw_w32(0x5001417, RTL838X_EEE_TX_TIMER_GELITE_CTRL);
/* Enable EEE MAC support on ports */
- for (i = 0; i < priv->cpu_port; i++) {
+ for (int i = 0; i < priv->cpu_port; i++) {
if (priv->ports[i].phy)
rtl838x_port_eee_set(priv, i, enable);
}
int block_from = index_from / PIE_BLOCK_SIZE;
int block_to = index_to / PIE_BLOCK_SIZE;
u32 v = (index_from << 1)| (index_to << 12 ) | BIT(0);
- int block;
u32 block_state;
pr_debug("%s: from %d to %d\n", __func__, index_from, index_to);
block_state = sw_r32(RTL838X_ACL_BLK_LOOKUP_CTRL);
/* Make sure rule-lookup is disabled in the relevant blocks */
- for (block = block_from; block <= block_to; block++) {
+ for (int block = block_from; block <= block_to; block++) {
if (block_state & BIT(block))
sw_w32(block_state & (~BIT(block)), RTL838X_ACL_BLK_LOOKUP_CTRL);
}
} while (sw_r32(RTL838X_ACL_CLR_CTRL) & BIT(0));
/* Re-enable rule lookup */
- for (block = block_from; block <= block_to; block++) {
+ for (int block = block_from; block <= block_to; block++) {
if (!(block_state & BIT(block)))
sw_w32(block_state | BIT(block), RTL838X_ACL_BLK_LOOKUP_CTRL);
}
*/
static void rtl838x_write_pie_templated(u32 r[], struct pie_rule *pr, enum template_field_id t[])
{
- int i;
- enum template_field_id field_type;
- u16 data, data_m;
-
- for (i = 0; i < N_FIXED_FIELDS; i++) {
- field_type = t[i];
- data = data_m = 0;
+ for (int i = 0; i < N_FIXED_FIELDS; i++) {
+ enum template_field_id field_type = t[i];
+ u16 data = 0, data_m = 0;
switch (field_type) {
case TEMPLATE_FIELD_SPM0:
*/
static void rtl838x_read_pie_templated(u32 r[], struct pie_rule *pr, enum template_field_id t[])
{
- int i;
- enum template_field_id field_type;
- u16 data, data_m;
+ for (int i = 0; i < N_FIXED_FIELDS; i++) {
+ enum template_field_id field_type = t[i];
+ u16 data, data_m;
- for (i = 0; i < N_FIXED_FIELDS; i++) {
field_type = t[i];
if (!(i % 2)) {
data = r[5 - i / 2];
/* Read IACL table (1) via register 0 */
struct table_reg *q = rtl_table_get(RTL8380_TBL_0, 1);
u32 r[18];
- int i;
int block = idx / PIE_BLOCK_SIZE;
u32 t_select = sw_r32(RTL838X_ACL_BLK_TMPLTE_CTRL(block));
memset(pr, 0, sizeof(*pr));
rtl_table_read(q, idx);
- for (i = 0; i < 18; i++)
+ for (int i = 0; i < 18; i++)
r[i] = sw_r32(rtl_table_data(q, i));
rtl_table_release(q);
/* Access IACL table (1) via register 0 */
struct table_reg *q = rtl_table_get(RTL8380_TBL_0, 1);
u32 r[18];
- int i, err = 0;
+ int err = 0;
int block = idx / PIE_BLOCK_SIZE;
u32 t_select = sw_r32(RTL838X_ACL_BLK_TMPLTE_CTRL(block));
pr_debug("%s: %d, t_select: %08x\n", __func__, idx, t_select);
- for (i = 0; i < 18; i++)
+ for (int i = 0; i < 18; i++)
r[i] = 0;
if (!pr->valid)
/* rtl838x_pie_rule_dump_raw(r); */
- for (i = 0; i < 18; i++)
+ for (int i = 0; i < 18; i++)
sw_w32(r[i], rtl_table_data(q, i));
err_out:
static bool rtl838x_pie_templ_has(int t, enum template_field_id field_type)
{
- int i;
enum template_field_id ft;
- for (i = 0; i < N_FIXED_FIELDS; i++) {
+ for (int i = 0; i < N_FIXED_FIELDS; i++) {
ft = fixed_templates[t][i];
if (field_type == ft)
return true;
static int rtl838x_pie_rule_add(struct rtl838x_switch_priv *priv, struct pie_rule *pr)
{
- int idx, block, j, t;
+ int idx, block, j;
pr_debug("In %s\n", __func__);
for (block = 0; block < priv->n_pie_blocks; block++) {
for (j = 0; j < 3; j++) {
- t = (sw_r32(RTL838X_ACL_BLK_TMPLTE_CTRL(block)) >> (j * 3)) & 0x7;
+ int t = (sw_r32(RTL838X_ACL_BLK_TMPLTE_CTRL(block)) >> (j * 3)) & 0x7;
pr_debug("Testing block %d, template %d, template id %d\n", block, j, t);
idx = rtl838x_pie_verify_template(priv, pr, t, block);
if (idx >= 0)
*/
static void rtl838x_pie_init(struct rtl838x_switch_priv *priv)
{
- int i;
u32 template_selectors;
mutex_init(&priv->pie_mutex);
/* Enable ACL lookup on all ports, including CPU_PORT */
- for (i = 0; i <= priv->cpu_port; i++)
+ for (int i = 0; i <= priv->cpu_port; i++)
sw_w32(1, RTL838X_ACL_PORT_LOOKUP_CTRL(i));
/* Power on all PIE blocks */
- for (i = 0; i < priv->n_pie_blocks; i++)
+ for (int i = 0; i < priv->n_pie_blocks; i++)
sw_w32_mask(0, BIT(i), RTL838X_ACL_BLK_PWR_CTRL);
/* Include IPG in metering */
/* Enable predefined templates 0, 1 and 2 for even blocks */
template_selectors = 0 | (1 << 3) | (2 << 6);
- for (i = 0; i < 6; i += 2)
+ for (int i = 0; i < 6; i += 2)
sw_w32(template_selectors, RTL838X_ACL_BLK_TMPLTE_CTRL(i));
/* Enable predefined templates 0, 3 and 4 (IPv6 support) for odd blocks */
template_selectors = 0 | (3 << 3) | (4 << 6);
- for (i = 1; i < priv->n_pie_blocks; i += 2)
+ for (int i = 1; i < priv->n_pie_blocks; i += 2)
sw_w32(template_selectors, RTL838X_ACL_BLK_TMPLTE_CTRL(i));
/* Group each pair of physical blocks together to a logical block */
u32 status = sw_r32(RTL838X_ISR_GLB_SRC);
u32 ports = sw_r32(RTL838X_ISR_PORT_LINK_STS_CHG);
u32 link;
- int i;
/* Clear status */
sw_w32(ports, RTL838X_ISR_PORT_LINK_STS_CHG);
pr_info("RTL8380 Link change: status: %x, ports %x\n", status, ports);
- for (i = 0; i < 28; i++) {
+ for (int i = 0; i < 28; i++) {
if (ports & BIT(i)) {
link = sw_r32(RTL838X_MAC_LINK_STS);
if (link & BIT(i))
void rtl839x_print_matrix(void)
{
volatile u64 *ptr9;
- int i;
ptr9 = RTL838X_SW_BASE + RTL839X_PORT_ISO_CTRL(0);
- for (i = 0; i < 52; i += 4)
+ for (int i = 0; i < 52; i += 4)
pr_debug("> %16llx %16llx %16llx %16llx\n",
ptr9[i + 0], ptr9[i + 1], ptr9[i + 2], ptr9[i + 3]);
pr_debug("CPU_PORT> %16llx\n", ptr9[52]);
u32 r[3];
struct table_reg *q = rtl_table_get(RTL8390_TBL_L2, 0);
u32 idx = (0 << 14) | (hash << 2) | pos; /* Search SRAM, with hash and at pos in bucket */
- int i;
rtl_table_read(q, idx);
- for (i = 0; i < 3; i++)
+ for (int i = 0; i < 3; i++)
r[i] = sw_r32(rtl_table_data(q, i));
rtl_table_release(q);
{
u32 r[3];
struct table_reg *q = rtl_table_get(RTL8390_TBL_L2, 0);
- int i;
u32 idx = (0 << 14) | (hash << 2) | pos; /* Access SRAM, with hash and at pos in bucket */
rtl839x_fill_l2_row(r, e);
- for (i = 0; i < 3; i++)
+ for (int i = 0; i < 3; i++)
sw_w32(r[i], rtl_table_data(q, i));
rtl_table_write(q, idx);
{
u32 r[3];
struct table_reg *q = rtl_table_get(RTL8390_TBL_L2, 1); /* Access L2 Table 1 */
- int i;
rtl_table_read(q, idx);
- for (i = 0; i < 3; i++)
+ for (int i = 0; i < 3; i++)
r[i] = sw_r32(rtl_table_data(q, i));
rtl_table_release(q);
{
u32 r[3];
struct table_reg *q = rtl_table_get(RTL8390_TBL_L2, 1); /* Access L2 Table 1 */
- int i;
rtl839x_fill_l2_row(r, e);
- for (i = 0; i < 3; i++)
+ for (int i = 0; i < 3; i++)
sw_w32(r[i], rtl_table_data(q, i));
rtl_table_write(q, idx);
u32 status = sw_r32(RTL839X_ISR_GLB_SRC);
u64 ports = rtl839x_get_port_reg_le(RTL839X_ISR_PORT_LINK_STS_CHG);
u64 link;
- int i;
/* Clear status */
rtl839x_set_port_reg_le(ports, RTL839X_ISR_PORT_LINK_STS_CHG);
pr_debug("RTL8390 Link change: status: %x, ports %llx\n", status, ports);
- for (i = 0; i < RTL839X_CPU_PORT; i++) {
+ for (int i = 0; i < RTL839X_CPU_PORT; i++) {
if (ports & BIT_ULL(i)) {
link = rtl839x_get_port_reg_le(RTL839X_MAC_LINK_STS);
if (link & BIT_ULL(i))
static void rtl839x_stp_get(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[])
{
- int i;
u32 cmd = 1 << 16 | /* Execute cmd */
0 << 15 | /* Read */
5 << 12 | /* Table type 0b101 */
(msti & 0xfff);
priv->r->exec_tbl0_cmd(cmd);
- for (i = 0; i < 4; i++)
+ for (int i = 0; i < 4; i++)
port_state[i] = sw_r32(priv->r->tbl_access_data_0(i));
}
static void rtl839x_stp_set(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[])
{
- int i;
u32 cmd = 1 << 16 | /* Execute cmd */
1 << 15 | /* Write */
5 << 12 | /* Table type 0b101 */
(msti & 0xfff);
- for (i = 0; i < 4; i++)
+ for (int i = 0; i < 4; i++)
sw_w32(port_state[i], priv->r->tbl_access_data_0(i));
priv->r->exec_tbl0_cmd(cmd);
}
static void rtl839x_init_eee(struct rtl838x_switch_priv *priv, bool enable)
{
- int i;
-
pr_info("Setting up EEE, state: %d\n", enable);
/* Set wake timer for TX and pause timer both to 0x21 */
sw_w32_mask(0xff << 20, 0x11 << 20, RTL839X_EEE_TX_TIMER_10G_CTRL);
/* Setup EEE on all ports */
- for (i = 0; i < priv->cpu_port; i++) {
+ for (int i = 0; i < priv->cpu_port; i++) {
if (priv->ports[i].phy)
rtl839x_port_eee_set(priv, i, enable);
}
*/
static void rtl839x_write_pie_templated(u32 r[], struct pie_rule *pr, enum template_field_id t[])
{
- int i;
- enum template_field_id field_type;
- u16 data, data_m;
-
- for (i = 0; i < N_FIXED_FIELDS; i++) {
- field_type = t[i];
- data = data_m = 0;
+ for (int i = 0; i < N_FIXED_FIELDS; i++) {
+ enum template_field_id field_type = t[i];
+ u16 data = 0, data_m = 0;
switch (field_type) {
case TEMPLATE_FIELD_SPM0:
*/
void rtl839x_read_pie_templated(u32 r[], struct pie_rule *pr, enum template_field_id t[])
{
- int i;
- enum template_field_id field_type;
- u16 data, data_m;
+ for (int i = 0; i < N_FIXED_FIELDS; i++) {
+ enum template_field_id field_type = t[i];
+ u16 data, data_m;
- for (i = 0; i < N_FIXED_FIELDS; i++) {
- field_type = t[i];
if (!(i % 2)) {
data = r[5 - i / 2];
data_m = r[12 - i / 2];
/* Read IACL table (2) via register 0 */
struct table_reg *q = rtl_table_get(RTL8380_TBL_0, 2);
u32 r[17];
- int i;
int block = idx / PIE_BLOCK_SIZE;
u32 t_select = sw_r32(RTL839X_ACL_BLK_TMPLTE_CTRL(block));
memset(pr, 0, sizeof(*pr));
rtl_table_read(q, idx);
- for (i = 0; i < 17; i++)
+ for (int i = 0; i < 17; i++)
r[i] = sw_r32(rtl_table_data(q, i));
rtl_table_release(q);
/* Access IACL table (2) via register 0 */
struct table_reg *q = rtl_table_get(RTL8390_TBL_0, 2);
u32 r[17];
- int i;
int block = idx / PIE_BLOCK_SIZE;
u32 t_select = sw_r32(RTL839X_ACL_BLK_TMPLTE_CTRL(block));
pr_debug("%s: %d, t_select: %08x\n", __func__, idx, t_select);
- for (i = 0; i < 17; i++)
+ for (int i = 0; i < 17; i++)
r[i] = 0;
if (!pr->valid) {
/* rtl839x_pie_rule_dump_raw(r); */
- for (i = 0; i < 17; i++)
+ for (int i = 0; i < 17; i++)
sw_w32(r[i], rtl_table_data(q, i));
rtl_table_write(q, idx);
static bool rtl839x_pie_templ_has(int t, enum template_field_id field_type)
{
- int i;
- enum template_field_id ft;
-
- for (i = 0; i < N_FIXED_FIELDS; i++) {
- ft = fixed_templates[t][i];
+ for (int i = 0; i < N_FIXED_FIELDS; i++) {
+ enum template_field_id ft = fixed_templates[t][i];
if (field_type == ft)
return true;
}
static void rtl839x_pie_init(struct rtl838x_switch_priv *priv)
{
- int i;
u32 template_selectors;
mutex_init(&priv->pie_mutex);
/* Power on all PIE blocks */
- for (i = 0; i < priv->n_pie_blocks; i++)
+ for (int i = 0; i < priv->n_pie_blocks; i++)
sw_w32_mask(0, BIT(i), RTL839X_PS_ACL_PWR_CTRL);
/* Set ingress and egress ACL blocks to 50/50: first Egress block is 9 */
/* Enable predefined templates 0, 1 for blocks 0-2 */
template_selectors = 0 | (1 << 3);
- for (i = 0; i < 3; i++)
+ for (int i = 0; i < 3; i++)
sw_w32(template_selectors, RTL839X_ACL_BLK_TMPLTE_CTRL(i));
/* Enable predefined templates 2, 3 for blocks 3-5 */
template_selectors = 2 | (3 << 3);
- for (i = 3; i < 6; i++)
+ for (int i = 3; i < 6; i++)
sw_w32(template_selectors, RTL839X_ACL_BLK_TMPLTE_CTRL(i));
/* Enable predefined templates 1, 4 for blocks 6-8 */
template_selectors = 2 | (3 << 3);
- for (i = 6; i < 9; i++)
+ for (int i = 6; i < 9; i++)
sw_w32(template_selectors, RTL839X_ACL_BLK_TMPLTE_CTRL(i));
/* Enable predefined templates 0, 1 for blocks 9-11 */
template_selectors = 0 | (1 << 3);
- for (i = 9; i < 12; i++)
+ for (int i = 9; i < 12; i++)
sw_w32(template_selectors, RTL839X_ACL_BLK_TMPLTE_CTRL(i));
/* Enable predefined templates 2, 3 for blocks 12-14 */
template_selectors = 2 | (3 << 3);
- for (i = 12; i < 15; i++)
+ for (int i = 12; i < 15; i++)
sw_w32(template_selectors, RTL839X_ACL_BLK_TMPLTE_CTRL(i));
/* Enable predefined templates 1, 4 for blocks 15-17 */
template_selectors = 2 | (3 << 3);
- for (i = 15; i < 18; i++)
+ for (int i = 15; i < 18; i++)
sw_w32(template_selectors, RTL839X_ACL_BLK_TMPLTE_CTRL(i));
}
/* Configure the switch's own MAC addresses used when routing packets */
static void rtl839x_setup_port_macs(struct rtl838x_switch_priv *priv)
{
- int i;
struct net_device *dev;
u64 mac;
dev = priv->ports[priv->cpu_port].dp->slave;
mac = ether_addr_to_u64(dev->dev_addr);
- for (i = 0; i < 15; i++) {
+ for (int i = 0; i < 15; i++) {
mac++; /* BUG: VRRP for testing */
sw_w32(mac >> 32, RTL839X_ROUTING_SA_CTRL + i * 8);
sw_w32(mac, RTL839X_ROUTING_SA_CTRL + i * 8 + 4);
void rtl930x_print_matrix(void)
{
- int i;
struct table_reg *r = rtl_table_get(RTL9300_TBL_0, 6);
- for (i = 0; i < 29; i++) {
+ for (int i = 0; i < 29; i++) {
rtl_table_read(r, i);
pr_debug("> %08x\n", sw_r32(rtl_table_data(r, 0)));
}
static void rtl930x_stp_get(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[])
{
- int i;
u32 cmd = 1 << 17 | /* Execute cmd */
0 << 16 | /* Read */
4 << 12 | /* Table type 0b10 */
(msti & 0xfff);
priv->r->exec_tbl0_cmd(cmd);
- for (i = 0; i < 2; i++)
+ for (int i = 0; i < 2; i++)
port_state[i] = sw_r32(RTL930X_TBL_ACCESS_DATA_0(i));
pr_debug("MSTI: %d STATE: %08x, %08x\n", msti, port_state[0], port_state[1]);
}
static void rtl930x_stp_set(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[])
{
- int i;
u32 cmd = 1 << 17 | /* Execute cmd */
1 << 16 | /* Write */
4 << 12 | /* Table type 4 */
(msti & 0xfff);
- for (i = 0; i < 2; i++)
+ for (int i = 0; i < 2; i++)
sw_w32(port_state[i], RTL930X_TBL_ACCESS_DATA_0(i));
priv->r->exec_tbl0_cmd(cmd);
}
u32 r[3];
struct table_reg *q = rtl_table_get(RTL9300_TBL_L2, 0);
u32 idx;
- int i;
u64 mac;
u64 seed;
pr_debug("%s: NOW hash %08x, pos: %d\n", __func__, hash, pos);
rtl_table_read(q, idx);
- for (i = 0; i < 3; i++)
+ for (int i = 0; i < 3; i++)
r[i] = sw_r32(rtl_table_data(q, i));
rtl_table_release(q);
u32 r[3];
struct table_reg *q = rtl_table_get(RTL9300_TBL_L2, 0);
u32 idx = (0 << 14) | (hash << 2) | pos; /* Access SRAM, with hash and at pos in bucket */
- int i;
pr_debug("%s: hash %d, pos %d\n", __func__, hash, pos);
pr_debug("%s: index %d -> mac %02x:%02x:%02x:%02x:%02x:%02x\n", __func__, idx,
rtl930x_fill_l2_row(r, e);
- for (i = 0; i < 3; i++)
+ for (int i = 0; i < 3; i++)
sw_w32(r[i], rtl_table_data(q, i));
rtl_table_write(q, idx);
{
u32 r[3];
struct table_reg *q = rtl_table_get(RTL9300_TBL_L2, 1);
- int i;
rtl_table_read(q, idx);
- for (i = 0; i < 3; i++)
+ for (int i = 0; i < 3; i++)
r[i] = sw_r32(rtl_table_data(q, i));
rtl_table_release(q);
{
u32 r[3];
struct table_reg *q = rtl_table_get(RTL9300_TBL_L2, 1); /* Access L2 Table 1 */
- int i;
rtl930x_fill_l2_row(r, e);
- for (i = 0; i < 3; i++)
+ for (int i = 0; i < 3; i++)
sw_w32(r[i], rtl_table_data(q, i));
rtl_table_write(q, idx);
void rtl9300_dump_debug(void)
{
- int i;
u16 r = RTL930X_STAT_PRVTE_DROP_COUNTER0;
- for (i = 0; i < 10; i ++) {
+ for (int i = 0; i < 10; i ++) {
pr_info("# %d %08x %08x %08x %08x %08x %08x %08x %08x\n", i * 8,
sw_r32(r), sw_r32(r + 4), sw_r32(r + 8), sw_r32(r + 12),
sw_r32(r + 16), sw_r32(r + 20), sw_r32(r + 24), sw_r32(r + 28));
struct dsa_switch *ds = dev_id;
u32 ports = sw_r32(RTL930X_ISR_PORT_LINK_STS_CHG);
u32 link;
- int i;
/* Clear status */
sw_w32(ports, RTL930X_ISR_PORT_LINK_STS_CHG);
- for (i = 0; i < 28; i++) {
+ for (int i = 0; i < 28; i++) {
if (ports & BIT(i)) {
/* Read the register twice because of issues with latency at least
* with the external RTL8226 PHY on the XGS1210
static void rtl930x_init_eee(struct rtl838x_switch_priv *priv, bool enable)
{
- int i;
-
pr_info("Setting up EEE, state: %d\n", enable);
/* Setup EEE on all ports */
- for (i = 0; i < priv->cpu_port; i++) {
+ for (int i = 0; i < priv->cpu_port; i++) {
if (priv->ports[i].phy)
rtl930x_port_eee_set(priv, i, enable);
}
{
u32 ip4_m, v;
struct in6_addr ip6_m;
- int i;
if (rt->attr.type == 1 || rt->attr.type == 3) /* Hardware only supports UC routes */
return -1;
sw_w32_mask(0x3 << 19, rt->attr.type, RTL930X_L3_HW_LU_KEY_CTRL);
if (rt->attr.type) { /* IPv6 */
rtl930x_net6_mask(rt->prefix_len, &ip6_m);
- for (i = 0; i < 4; i++)
+ for (int i = 0; i < 4; i++)
sw_w32(rt->dst_ip6.s6_addr32[0] & ip6_m.s6_addr32[0],
RTL930X_L3_HW_LU_KEY_IP_CTRL + (i << 2));
} else { /* IPv4 */
static int rtl930x_find_l3_slot(struct rtl83xx_route *rt, bool must_exist)
{
- int t, s, slot_width, algorithm, addr, idx;
+ int slot_width, algorithm, addr, idx;
u32 hash;
struct rtl83xx_route route_entry;
/* IPv6 entries take up 3 slots */
slot_width = (rt->attr.type == 0) || (rt->attr.type == 2) ? 1 : 3;
- for (t = 0; t < 2; t++) {
+ for (int t = 0; t < 2; t++) {
algorithm = (sw_r32(RTL930X_L3_HOST_TBL_CTRL) >> (2 + t)) & 0x1;
hash = rtl930x_l3_hash4(rt->dst_ip, algorithm, false);
pr_debug("%s: table %d, algorithm %d, hash %04x\n", __func__, t, algorithm, hash);
- for (s = 0; s < 6; s += slot_width) {
+ for (int s = 0; s < 6; s += slot_width) {
addr = (t << 12) | ((hash & 0x1ff) << 3) | s;
pr_debug("%s physical address %d\n", __func__, addr);
idx = ((addr / 8) * 6) + (addr % 8);
*/
static void rtl930x_write_pie_templated(u32 r[], struct pie_rule *pr, enum template_field_id t[])
{
- int i;
- enum template_field_id field_type;
- u16 data, data_m;
-
- for (i = 0; i < N_FIXED_FIELDS; i++) {
- field_type = t[i];
- data = data_m = 0;
+ for (int i = 0; i < N_FIXED_FIELDS; i++) {
+ enum template_field_id field_type = t[i];
+ u16 data = 0, data_m = 0;
switch (field_type) {
case TEMPLATE_FIELD_SPM0:
/* Access IACL table (2) via register 0 */
struct table_reg *q = rtl_table_get(RTL9300_TBL_0, 2);
u32 r[19];
- int i;
int block = idx / PIE_BLOCK_SIZE;
u32 t_select = sw_r32(RTL930X_PIE_BLK_TMPLTE_CTRL(block));
pr_debug("%s: %d, t_select: %08x\n", __func__, idx, t_select);
- for (i = 0; i < 19; i++)
+ for (int i = 0; i < 19; i++)
r[i] = 0;
if (!pr->valid) {
/* rtl930x_pie_rule_dump_raw(r); */
- for (i = 0; i < 19; i++)
+ for (int i = 0; i < 19; i++)
sw_w32(r[i], rtl_table_data(q, i));
rtl_table_write(q, idx);
static bool rtl930x_pie_templ_has(int t, enum template_field_id field_type)
{
- int i;
- enum template_field_id ft;
-
- for (i = 0; i < N_FIXED_FIELDS; i++) {
- ft = fixed_templates[t][i];
+ for (int i = 0; i < N_FIXED_FIELDS; i++) {
+ enum template_field_id ft = fixed_templates[t][i];
if (field_type == ft)
return true;
}
static void rtl930x_pie_init(struct rtl838x_switch_priv *priv)
{
- int i;
u32 template_selectors;
mutex_init(&priv->pie_mutex);
pr_info("%s\n", __func__);
/* Enable ACL lookup on all ports, including CPU_PORT */
- for (i = 0; i <= priv->cpu_port; i++)
+ for (int i = 0; i <= priv->cpu_port; i++)
sw_w32(1, RTL930X_ACL_PORT_LOOKUP_CTRL(i));
/* Include IPG in metering */
/* Enable predefined templates 0, 1 for first quarter of all blocks */
template_selectors = 0 | (1 << 4);
- for (i = 0; i < priv->n_pie_blocks / 4; i++)
+ for (int i = 0; i < priv->n_pie_blocks / 4; i++)
sw_w32(template_selectors, RTL930X_PIE_BLK_TMPLTE_CTRL(i));
/* Enable predefined templates 2, 3 for second quarter of all blocks */
template_selectors = 2 | (3 << 4);
- for (i = priv->n_pie_blocks / 4; i < priv->n_pie_blocks / 2; i++)
+ for (int i = priv->n_pie_blocks / 4; i < priv->n_pie_blocks / 2; i++)
sw_w32(template_selectors, RTL930X_PIE_BLK_TMPLTE_CTRL(i));
/* Enable predefined templates 0, 1 for third half of all blocks */
template_selectors = 0 | (1 << 4);
- for (i = priv->n_pie_blocks / 2; i < priv->n_pie_blocks * 3 / 4; i++)
+ for (int i = priv->n_pie_blocks / 2; i < priv->n_pie_blocks * 3 / 4; i++)
sw_w32(template_selectors, RTL930X_PIE_BLK_TMPLTE_CTRL(i));
/* Enable predefined templates 2, 3 for fourth quater of all blocks */
template_selectors = 2 | (3 << 4);
- for (i = priv->n_pie_blocks * 3 / 4; i < priv->n_pie_blocks; i++)
+ for (int i = priv->n_pie_blocks * 3 / 4; i < priv->n_pie_blocks; i++)
sw_w32(template_selectors, RTL930X_PIE_BLK_TMPLTE_CTRL(i));
}
*/
int rtl930x_l3_setup(struct rtl838x_switch_priv *priv)
{
- int i;
-
/* Setup MTU with id 0 for default interface */
- for (i = 0; i < MAX_INTF_MTUS; i++)
+ for (int i = 0; i < MAX_INTF_MTUS; i++)
priv->intf_mtu_count[i] = priv->intf_mtus[i] = 0;
priv->intf_mtu_count[0] = 0; /* Needs to stay forever */
sw_w32_mask(0xffff0000, DEFAULT_MTU << 16, RTL930X_L3_IP6_MTU_CTRL(1));
/* Clear all source port MACs */
- for (i = 0; i < MAX_SMACS; i++)
+ for (int i = 0; i < MAX_SMACS; i++)
rtl930x_set_l3_egress_mac(L3_EGRESS_DMACS + i, 0ULL);
/* Configure the default L3 hash algorithm */
static void rtl930x_led_init(struct rtl838x_switch_priv *priv)
{
- int i, pos;
- u32 v, pm = 0, set;
- u32 setlen;
- const __be32 *led_set;
- char set_name[9];
struct device_node *node;
+ u32 pm = 0;
pr_info("%s called\n", __func__);
node = of_find_compatible_node(NULL, NULL, "realtek,rtl9300-leds");
return;
}
- for (i = 0; i < priv->cpu_port; i++) {
- pos = (i << 1) % 32;
+ for (int i = 0; i < priv->cpu_port; i++) {
+ int pos = (i << 1) % 32;
+ u32 set;
+ u32 v;
+
sw_w32_mask(0x3 << pos, 0, RTL930X_LED_PORT_FIB_SET_SEL_CTRL(i));
sw_w32_mask(0x3 << pos, 0, RTL930X_LED_PORT_COPR_SET_SEL_CTRL(i));
sw_w32_mask(0, set << pos, RTL930X_LED_PORT_FIB_SET_SEL_CTRL(i));
}
- for (i = 0; i < 4; i++) {
+ for (int i = 0; i < 4; i++) {
+ const __be32 *led_set;
+ char set_name[9];
+ u32 setlen;
+ u32 v;
+
sprintf(set_name, "led_set%d", i);
led_set = of_get_property(node, set_name, &setlen);
if (!led_set || setlen != 16)
sw_w32(pm, RTL930X_LED_PORT_FIB_MASK_CTRL);
sw_w32(pm, RTL930X_LED_PORT_COMBO_MASK_CTRL);
- for (i = 0; i < 24; i++)
+ for (int i = 0; i < 24; i++)
pr_info("%s %08x: %08x\n",__func__, 0xbb00cc00 + i * 4, sw_r32(0xcc00 + i * 4));
}
static void rtl931x_stp_get(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[])
{
- int i;
u32 cmd = 1 << 20 | /* Execute cmd */
0 << 19 | /* Read */
5 << 15 | /* Table type 0b101 */
(msti & 0x3fff);
priv->r->exec_tbl0_cmd(cmd);
- for (i = 0; i < 4; i++)
+ for (int i = 0; i < 4; i++)
port_state[i] = sw_r32(priv->r->tbl_access_data_0(i));
}
static void rtl931x_stp_set(struct rtl838x_switch_priv *priv, u16 msti, u32 port_state[])
{
- int i;
u32 cmd = 1 << 20 | /* Execute cmd */
1 << 19 | /* Write */
5 << 15 | /* Table type 0b101 */
(msti & 0x3fff);
- for (i = 0; i < 4; i++)
+ for (int i = 0; i < 4; i++)
sw_w32(port_state[i], priv->r->tbl_access_data_0(i));
priv->r->exec_tbl0_cmd(cmd);
}
u32 status = sw_r32(RTL931X_ISR_GLB_SRC);
u64 ports = rtl839x_get_port_reg_le(RTL931X_ISR_PORT_LINK_STS_CHG);
u64 link;
- int i;
/* Clear status */
rtl839x_set_port_reg_le(ports, RTL931X_ISR_PORT_LINK_STS_CHG);
link = rtl839x_get_port_reg_le(RTL931X_MAC_LINK_STS);
pr_debug("RTL931X Link change: status: %x, link status %016llx\n", status, link);
- for (i = 0; i < 56; i++) {
+ for (int i = 0; i < 56; i++) {
if (ports & BIT_ULL(i)) {
if (link & BIT_ULL(i)) {
pr_info("%s port %d up\n", __func__, i);
void rtl931x_print_matrix(void)
{
volatile u64 *ptr = RTL838X_SW_BASE + RTL839X_PORT_ISO_CTRL(0);
- int i;
- for (i = 0; i < 52; i += 4)
+ for (int i = 0; i < 52; i += 4)
pr_info("> %16llx %16llx %16llx %16llx\n",
ptr[i + 0], ptr[i + 1], ptr[i + 2], ptr[i + 3]);
pr_info("CPU_PORT> %16llx\n", ptr[52]);
u32 r[4];
struct table_reg *q = rtl_table_get(RTL9310_TBL_0, 0);
u32 idx;
- int i;
u64 mac;
u64 seed;
pr_debug("%s: NOW hash %08x, pos: %d\n", __func__, hash, pos);
rtl_table_read(q, idx);
- for (i = 0; i < 4; i++)
+ for (int i = 0; i < 4; i++)
r[i] = sw_r32(rtl_table_data(q, i));
rtl_table_release(q);
u32 r[4];
struct table_reg *q = rtl_table_get(RTL9310_TBL_0, 0);
u32 idx = (0 << 14) | (hash << 2) | pos; /* Access SRAM, with hash and at pos in bucket */
- int i;
pr_info("%s: hash %d, pos %d\n", __func__, hash, pos);
pr_info("%s: index %d -> mac %02x:%02x:%02x:%02x:%02x:%02x\n", __func__, idx,
rtl931x_fill_l2_row(r, e);
pr_info("%s: %d: %08x %08x %08x\n", __func__, idx, r[0], r[1], r[2]);
- for (i = 0; i < 4; i++)
+ for (int i = 0; i < 4; i++)
sw_w32(r[i], rtl_table_data(q, i));
rtl_table_write(q, idx);
static void rtl931x_vlan_profile_setup(int profile)
{
u32 p[7];
- int i;
pr_info("In %s\n", __func__);
*/
static void rtl931x_write_pie_templated(u32 r[], struct pie_rule *pr, enum template_field_id t[])
{
- int i;
- u16 data, data_m;
+ for (int i = 0; i < N_FIXED_FIELDS; i++) {
+ u16 data, data_m;
- for (i = 0; i < N_FIXED_FIELDS; i++) {
rtl931x_pie_data_fill(t[i], pr, &data, &data_m);
/* On the RTL9300, the mask fields are not word aligned! */
/* Access IACL table (0) via register 1, the table size is 4096 */
struct table_reg *q = rtl_table_get(RTL9310_TBL_1, 0);
u32 r[22];
- int i;
int block = idx / PIE_BLOCK_SIZE;
u32 t_select = sw_r32(RTL931X_PIE_BLK_TMPLTE_CTRL(block));
pr_info("%s: %d, t_select: %08x\n", __func__, idx, t_select);
- for (i = 0; i < 22; i++)
+ for (int i = 0; i < 22; i++)
r[i] = 0;
if (!pr->valid) {
rtl931x_pie_rule_dump_raw(r);
- for (i = 0; i < 22; i++)
+ for (int i = 0; i < 22; i++)
sw_w32(r[i], rtl_table_data(q, i));
rtl_table_write(q, idx);
static bool rtl931x_pie_templ_has(int t, enum template_field_id field_type)
{
- int i;
- enum template_field_id ft;
-
- for (i = 0; i < N_FIXED_FIELDS_RTL931X; i++) {
- ft = fixed_templates[t][i];
+ for (int i = 0; i < N_FIXED_FIELDS_RTL931X; i++) {
+ enum template_field_id ft = fixed_templates[t][i];
if (field_type == ft)
return true;
}
static int rtl931x_pie_rule_add(struct rtl838x_switch_priv *priv, struct pie_rule *pr)
{
- int idx, block, j, t;
+ int idx, block, j;
int min_block = 0;
int max_block = priv->n_pie_blocks / 2;
for (block = min_block; block < max_block; block++) {
for (j = 0; j < 2; j++) {
- t = (sw_r32(RTL931X_PIE_BLK_TMPLTE_CTRL(block)) >> (j * 4)) & 0xf;
+ int t = (sw_r32(RTL931X_PIE_BLK_TMPLTE_CTRL(block)) >> (j * 4)) & 0xf;
pr_info("Testing block %d, template %d, template id %d\n", block, j, t);
pr_info("%s: %08x\n",
__func__, sw_r32(RTL931X_PIE_BLK_TMPLTE_CTRL(block)));
static void rtl931x_pie_init(struct rtl838x_switch_priv *priv)
{
- int i;
u32 template_selectors;
mutex_init(&priv->pie_mutex);
pr_info("%s\n", __func__);
/* Enable ACL lookup on all ports, including CPU_PORT */
- for (i = 0; i <= priv->cpu_port; i++)
+ for (int i = 0; i <= priv->cpu_port; i++)
sw_w32(1, RTL931X_ACL_PORT_LOOKUP_CTRL(i));
/* Include IPG in metering */
/* 6: Disabled, 0: VACL, 1: IACL, 2: EACL */
/* And for OpenFlow Flow blocks: 3: Ingress Flow table 0, */
/* 4: Ingress Flow Table 3, 5: Egress flow table 0 */
- for (i = 0; i < priv->n_pie_blocks; i++) {
+ for (int i = 0; i < priv->n_pie_blocks; i++) {
int pos = (i % 10) * 3;
u32 r = RTL931X_PIE_BLK_PHASE_CTRL + 4 * (i / 10);
/* Enable predefined templates 0, 1 for first quarter of all blocks */
template_selectors = 0 | (1 << 4);
- for (i = 0; i < priv->n_pie_blocks / 4; i++)
+ for (int i = 0; i < priv->n_pie_blocks / 4; i++)
sw_w32(template_selectors, RTL931X_PIE_BLK_TMPLTE_CTRL(i));
/* Enable predefined templates 2, 3 for second quarter of all blocks */
template_selectors = 2 | (3 << 4);
- for (i = priv->n_pie_blocks / 4; i < priv->n_pie_blocks / 2; i++)
+ for (int i = priv->n_pie_blocks / 4; i < priv->n_pie_blocks / 2; i++)
sw_w32(template_selectors, RTL931X_PIE_BLK_TMPLTE_CTRL(i));
/* Enable predefined templates 0, 1 for third quater of all blocks */
template_selectors = 0 | (1 << 4);
- for (i = priv->n_pie_blocks / 2; i < priv->n_pie_blocks * 3 / 4; i++)
+ for (int i = priv->n_pie_blocks / 2; i < priv->n_pie_blocks * 3 / 4; i++)
sw_w32(template_selectors, RTL931X_PIE_BLK_TMPLTE_CTRL(i));
/* Enable predefined templates 2, 3 for fourth quater of all blocks */
template_selectors = 2 | (3 << 4);
- for (i = priv->n_pie_blocks * 3 / 4; i < priv->n_pie_blocks; i++)
+ for (int i = priv->n_pie_blocks * 3 / 4; i < priv->n_pie_blocks; i++)
sw_w32(template_selectors, RTL931X_PIE_BLK_TMPLTE_CTRL(i));
}
static void rtl931x_led_init(struct rtl838x_switch_priv *priv)
{
- int i, pos;
- u32 v, set;
u64 pm_copper = 0, pm_fiber = 0;
- u32 setlen;
- const __be32 *led_set;
- char set_name[9];
struct device_node *node;
pr_info("%s called\n", __func__);
return;
}
- for (i = 0; i < priv->cpu_port; i++) {
- pos = (i << 1) % 32;
+ for (int i = 0; i < priv->cpu_port; i++) {
+ int pos = (i << 1) % 32;
+ u32 set;
+ u32 v;
+
sw_w32_mask(0x3 << pos, 0, RTL931X_LED_PORT_FIB_SET_SEL_CTRL(i));
sw_w32_mask(0x3 << pos, 0, RTL931X_LED_PORT_COPR_SET_SEL_CTRL(i));
sw_w32_mask(0, set << pos, RTL931X_LED_PORT_FIB_SET_SEL_CTRL(i));
}
- for (i = 0; i < 4; i++) {
+ for (int i = 0; i < 4; i++) {
+ const __be32 *led_set;
+ char set_name[9];
+ u32 setlen;
+ u32 v;
+
sprintf(set_name, "led_set%d", i);
pr_info(">%s<\n", set_name);
led_set = of_get_property(node, set_name, &setlen);
rtl839x_set_port_reg_le(pm_fiber, RTL931X_LED_PORT_FIB_MASK_CTRL);
rtl839x_set_port_reg_le(pm_copper | pm_fiber, RTL931X_LED_PORT_COMBO_MASK_CTRL);
- for (i = 0; i < 32; i++)
+ for (int i = 0; i < 32; i++)
pr_info("%s %08x: %08x\n",__func__, 0xbb000600 + i * 4, sw_r32(0x0600 + i * 4));
}
*/
static void rtl838x_rb_cleanup(struct rtl838x_eth_priv *priv, int status)
{
- int r;
- u32 *last;
- struct p_hdr *h;
- struct ring_b *ring = priv->membase;
+ for (int r = 0; r < priv->rxrings; r++) {
+ struct ring_b *ring = priv->membase;
+ struct p_hdr *h;
+ u32 *last;
- for (r = 0; r < priv->rxrings; r++) {
pr_debug("In %s working on r: %d\n", __func__, r);
last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
do {
void rtl838x_fdb_sync(struct work_struct *work)
{
- const struct fdb_update_work *uw =
- container_of(work, struct fdb_update_work, work);
- struct switchdev_notifier_fdb_info info;
- u8 addr[ETH_ALEN];
- int i = 0;
- int action;
+ const struct fdb_update_work *uw = container_of(work, struct fdb_update_work, work);
- while (uw->macs[i]) {
- action = (uw->macs[i] & (1ULL << 63)) ? SWITCHDEV_FDB_ADD_TO_BRIDGE
- : SWITCHDEV_FDB_DEL_TO_BRIDGE;
+ for (int i = 0; uw->macs[i]; i++) {
+ struct switchdev_notifier_fdb_info info;
+ u8 addr[ETH_ALEN];
+ int action;
+
+ action = (uw->macs[i] & (1ULL << 63)) ?
+ SWITCHDEV_FDB_ADD_TO_BRIDGE :
+ SWITCHDEV_FDB_DEL_TO_BRIDGE;
u64_to_ether_addr(uw->macs[i] & 0xffffffffffffULL, addr);
info.addr = &addr[0];
info.vid = 0;
info.offloaded = 1;
pr_debug("FDB entry %d: %llx, action %d\n", i, uw->macs[0], action);
call_switchdev_notifiers(action, uw->ndev, &info.info, NULL);
- i++;
}
kfree(work);
}
static void rtl839x_l2_notification_handler(struct rtl838x_eth_priv *priv)
{
struct notify_b *nb = priv->membase + sizeof(struct ring_b);
- u32 e = priv->lastEvent;
- struct n_event *event;
- int i;
- u64 mac;
- struct fdb_update_work *w;
+ u32 e = priv->lastEvent;
while (!(nb->ring[e] & 1)) {
+ struct fdb_update_work *w;
+ struct n_event *event;
+ u64 mac;
+ int i;
+
w = kzalloc(sizeof(*w), GFP_ATOMIC);
if (!w) {
pr_err("Out of memory: %s", __func__);
struct net_device *dev = dev_id;
struct rtl838x_eth_priv *priv = netdev_priv(dev);
u32 status = sw_r32(priv->r->dma_if_intr_sts);
- int i;
pr_debug("IRQ: %08x\n", status);
/* ACK and disable RX interrupt for this ring */
sw_w32_mask(0xff00 & status, 0, priv->r->dma_if_intr_msk);
sw_w32(0x0000ff00 & status, priv->r->dma_if_intr_sts);
- for (i = 0; i < priv->rxrings; i++) {
+ for (int i = 0; i < priv->rxrings; i++) {
if (status & BIT(i + 8)) {
pr_debug("Scheduling queue: %d\n", i);
napi_schedule(&priv->rx_qs[i].napi);
u32 status_rx_r = sw_r32(priv->r->dma_if_intr_rx_runout_sts);
u32 status_rx = sw_r32(priv->r->dma_if_intr_rx_done_sts);
u32 status_tx = sw_r32(priv->r->dma_if_intr_tx_done_sts);
- int i;
pr_debug("In %s, status_tx: %08x, status_rx: %08x, status_rx_r: %08x\n",
__func__, status_tx, status_rx, status_rx_r);
/* ACK and disable RX interrupt for given rings */
sw_w32(status_rx, priv->r->dma_if_intr_rx_done_sts);
sw_w32_mask(status_rx, 0, priv->r->dma_if_intr_rx_done_msk);
- for (i = 0; i < priv->rxrings; i++) {
+ for (int i = 0; i < priv->rxrings; i++) {
if (status_rx & BIT(i)) {
pr_debug("Scheduling queue: %d\n", i);
napi_schedule(&priv->rx_qs[i].napi);
{
u32 int_saved, nbuf;
u32 reset_mask;
- int i, pos;
pr_info("RESETTING %x, CPU_PORT %d\n", priv->family_id, priv->cpu_port);
sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(priv->cpu_port));
if (priv->family_id == RTL8390_FAMILY_ID)
sw_w32(0xffffffff, RTL839X_DMA_IF_RX_RING_CNTR);
if (priv->family_id == RTL9300_FAMILY_ID || priv->family_id == RTL9310_FAMILY_ID) {
- for (i = 0; i < priv->rxrings; i++) {
- pos = (i % 3) * 10;
+ for (int i = 0; i < priv->rxrings; i++) {
+ int pos = (i % 3) * 10;
+
sw_w32_mask(0x3ff << pos, 0, priv->r->dma_if_rx_ring_size(i));
sw_w32_mask(0x3ff << pos, priv->rxringlen,
priv->r->dma_if_rx_ring_cntr(i));
static void rtl838x_hw_ring_setup(struct rtl838x_eth_priv *priv)
{
- int i;
struct ring_b *ring = priv->membase;
- for (i = 0; i < priv->rxrings; i++)
+ for (int i = 0; i < priv->rxrings; i++)
sw_w32(KSEG1ADDR(&ring->rx_r[i]), priv->r->dma_rx_base + i * 4);
- for (i = 0; i < TXRINGS; i++)
+ for (int i = 0; i < TXRINGS; i++)
sw_w32(KSEG1ADDR(&ring->tx_r[i]), priv->r->dma_tx_base + i * 4);
}
static void rtl93xx_hw_en_rxtx(struct rtl838x_eth_priv *priv)
{
- int i, pos;
- u32 v;
-
/* Setup CPU-Port: RX Buffer truncated at 1600 Bytes */
sw_w32(0x06400040, priv->r->dma_if_ctrl);
- for (i = 0; i < priv->rxrings; i++) {
- pos = (i % 3) * 10;
+ for (int i = 0; i < priv->rxrings; i++) {
+ int pos = (i % 3) * 10;
+ u32 v;
+
sw_w32_mask(0x3ff << pos, priv->rxringlen << pos, priv->r->dma_if_rx_ring_size(i));
/* Some SoCs have issues with missing underflow protection */
static void rtl838x_setup_ring_buffer(struct rtl838x_eth_priv *priv, struct ring_b *ring)
{
- int i, j;
- struct p_hdr *h;
+ for (int i = 0; i < priv->rxrings; i++) {
+ struct p_hdr *h;
+ int j;
- for (i = 0; i < priv->rxrings; i++) {
for (j = 0; j < priv->rxringlen; j++) {
h = &ring->rx_header[i][j];
memset(h, 0, sizeof(struct p_hdr));
ring->c_rx[i] = 0;
}
- for (i = 0; i < TXRINGS; i++) {
+ for (int i = 0; i < TXRINGS; i++) {
+ struct p_hdr *h;
+ int j;
+
for (j = 0; j < TXRINGLEN; j++) {
h = &ring->tx_header[i][j];
memset(h, 0, sizeof(struct p_hdr));
static void rtl839x_setup_notify_ring_buffer(struct rtl838x_eth_priv *priv)
{
- int i;
struct notify_b *b = priv->membase + sizeof(struct ring_b);
- for (i = 0; i < NOTIFY_BLOCKS; i++)
+ for (int i = 0; i < NOTIFY_BLOCKS; i++)
b->ring[i] = KSEG1ADDR(&b->blocks[i]) | 1 | (i == (NOTIFY_BLOCKS - 1) ? WRAP : 0);
sw_w32((u32) b->ring, RTL839X_DMA_IF_NBUF_BASE_DESC_ADDR_CTRL);
unsigned long flags;
struct rtl838x_eth_priv *priv = netdev_priv(ndev);
struct ring_b *ring = priv->membase;
- int i;
pr_debug("%s called: RX rings %d(length %d), TX rings %d(length %d)\n",
__func__, priv->rxrings, priv->rxringlen, TXRINGS, TXRINGLEN);
rtl838x_hw_ring_setup(priv);
phylink_start(priv->phylink);
- for (i = 0; i < priv->rxrings; i++)
+ for (int i = 0; i < priv->rxrings; i++)
napi_enable(&priv->rx_qs[i].napi);
switch (priv->family_id) {
{
u32 force_mac = priv->family_id == RTL8380_FAMILY_ID ? 0x6192C : 0x75;
u32 clear_irq = priv->family_id == RTL8380_FAMILY_ID ? 0x000fffff : 0x007fffff;
- int i;
/* Disable RX/TX from/to CPU-port */
sw_w32_mask(0x3, 0, priv->r->mac_port_ctrl(priv->cpu_port));
/* Flush L2 address cache */
if (priv->family_id == RTL8380_FAMILY_ID) {
- for (i = 0; i <= priv->cpu_port; i++) {
+ for (int i = 0; i <= priv->cpu_port; i++) {
sw_w32(1 << 26 | 1 << 23 | i << 5, priv->r->l2_tbl_flush_ctrl);
do { } while (sw_r32(priv->r->l2_tbl_flush_ctrl) & (1 << 26));
}
} else if (priv->family_id == RTL8390_FAMILY_ID) {
- for (i = 0; i <= priv->cpu_port; i++) {
+ for (int i = 0; i <= priv->cpu_port; i++) {
sw_w32(1 << 28 | 1 << 25 | i << 5, priv->r->l2_tbl_flush_ctrl);
do { } while (sw_r32(priv->r->l2_tbl_flush_ctrl) & (1 << 28));
}
static int rtl838x_eth_stop(struct net_device *ndev)
{
- unsigned long flags;
- int i;
struct rtl838x_eth_priv *priv = netdev_priv(ndev);
pr_info("in %s\n", __func__);
phylink_stop(priv->phylink);
rtl838x_hw_stop(priv);
- for (i = 0; i < priv->rxrings; i++)
+ for (int i = 0; i < priv->rxrings; i++)
napi_disable(&priv->rx_qs[i].napi);
netif_tx_stop_all_queues(ndev);
static int rtl838x_eth_tx(struct sk_buff *skb, struct net_device *dev)
{
- int len, i;
+ int len;
struct rtl838x_eth_priv *priv = netdev_priv(dev);
struct ring_b *ring = priv->membase;
- uint32_t val;
int ret;
unsigned long flags;
struct p_hdr *h;
/* Before starting TX, prevent a Lextra bus bug on RTL8380 SoCs */
if (priv->family_id == RTL8380_FAMILY_ID) {
- for (i = 0; i < 10; i++) {
- val = sw_r32(priv->r->dma_if_ctrl);
+ for (int i = 0; i < 10; i++) {
+ u32 val = sw_r32(priv->r->dma_if_ctrl);
if ((val & 0xc) == 0xc)
break;
}
{
struct rtl838x_eth_priv *priv = netdev_priv(dev);
struct ring_b *ring = priv->membase;
- struct sk_buff *skb;
LIST_HEAD(rx_list);
unsigned long flags;
- int i, len, work_done = 0;
- u8 *data, *skb_data;
- unsigned int val;
+ int work_done = 0;
u32 *last;
- struct p_hdr *h;
bool dsa = netdev_uses_dsa(dev);
- struct dsa_tag tag;
pr_debug("---------------------------------------------------------- RX - %d\n", r);
spin_lock_irqsave(&priv->lock, flags);
last = (u32 *)KSEG1ADDR(sw_r32(priv->r->dma_if_rx_cur + r * 4));
do {
+ struct sk_buff *skb;
+ struct dsa_tag tag;
+ struct p_hdr *h;
+ u8 *skb_data;
+ u8 *data;
+ int len;
+
if ((ring->rx_r[r][ring->c_rx[r]] & 0x1)) {
if (&ring->rx_r[r][ring->c_rx[r]] != last) {
netdev_warn(dev, "Ring contention: r: %x, last %x, cur %x\n",
/* BUG: Prevent bug on RTL838x SoCs */
if (priv->family_id == RTL8380_FAMILY_ID) {
sw_w32(0xffffffff, priv->r->dma_if_rx_ring_size(0));
- for (i = 0; i < priv->rxrings; i++) {
+ for (int i = 0; i < priv->rxrings; i++) {
+ unsigned int val;
+
/* Update each ring cnt */
val = sw_r32(priv->r->dma_if_rx_ring_cntr(i));
sw_w32(val, priv->r->dma_if_rx_ring_cntr(i));
static int rtl8380_init_mac(struct rtl838x_eth_priv *priv)
{
- int i;
-
if (priv->family_id == 0x8390)
return rtl8390_init_mac(priv);
/* Init VLAN. TODO: Understand what is being done, here */
if (priv->id == 0x8382) {
- for (i = 0; i <= 28; i++)
+ for (int i = 0; i <= 28; i++)
sw_w32(0, 0xd57c + i * 0x80);
}
if (priv->id == 0x8380) {
- for (i = 8; i <= 28; i++)
+ for (int i = 8; i <= 28; i++)
sw_w32(0, 0xd57c + i * 0x80);
}
static int rtl930x_mdio_reset(struct mii_bus *bus)
{
- int i;
- int pos;
struct rtl838x_eth_priv *priv = bus->priv;
u32 c45_mask = 0;
u32 poll_sel[2];
/* Mapping of port to phy-addresses on an SMI bus */
poll_sel[0] = poll_sel[1] = 0;
- for (i = 0; i < RTL930X_CPU_PORT; i++) {
+ for (int i = 0; i < RTL930X_CPU_PORT; i++) {
+ int pos;
+
if (priv->smi_bus[i] > 3)
continue;
pos = (i % 6) * 5;
sw_w32_mask(poll_ctrl, 0, RTL930X_SMI_GLB_CTRL);
/* Configure which SMI busses are polled in c45 based on a c45 PHY being on that bus */
- for (i = 0; i < 4; i++)
+ for (int i = 0; i < 4; i++)
if (priv->smi_bus_isc45[i])
c45_mask |= BIT(i + 16);
/* Set the MAC type of each port according to the PHY-interface */
/* Values are FE: 2, GE: 3, XGE/2.5G: 0(SERDES) or 1(otherwise), SXGE: 0 */
v = 0;
- for (i = 0; i < RTL930X_CPU_PORT; i++) {
+ for (int i = 0; i < RTL930X_CPU_PORT; i++) {
switch (priv->interfaces[i]) {
case PHY_INTERFACE_MODE_10GBASER:
break; /* Serdes: Value = 0 */
static int rtl931x_mdio_reset(struct mii_bus *bus)
{
- int i;
- int pos;
struct rtl838x_eth_priv *priv = bus->priv;
u32 c45_mask = 0;
u32 poll_sel[4];
mdc_on[0] = mdc_on[1] = mdc_on[2] = mdc_on[3] = false;
/* Mapping of port to phy-addresses on an SMI bus */
poll_sel[0] = poll_sel[1] = poll_sel[2] = poll_sel[3] = 0;
- for (i = 0; i < 56; i++) {
+ for (int i = 0; i < 56; i++) {
+ u32 pos;
+
pos = (i % 6) * 5;
sw_w32_mask(0x1f << pos, priv->smi_addr[i] << pos, RTL931X_SMI_PORT_ADDR + (i / 6) * 4);
pos = (i * 2) % 32;
}
/* Configure which SMI bus is behind which port number */
- for (i = 0; i < 4; i++) {
+ for (int i = 0; i < 4; i++) {
pr_info("poll sel %d, %08x\n", i, poll_sel[i]);
sw_w32(poll_sel[i], RTL931X_SMI_PORT_POLLING_SEL + (i * 4));
}
/* Configure which SMI busses */
pr_info("%s: WAS RTL931X_MAC_L2_GLOBAL_CTRL2 %08x\n", __func__, sw_r32(RTL931X_MAC_L2_GLOBAL_CTRL2));
pr_info("c45_mask: %08x, RTL931X_SMI_GLB_CTRL0 was %X", c45_mask, sw_r32(RTL931X_SMI_GLB_CTRL0));
- for (i = 0; i < 4; i++) {
+ for (int i = 0; i < 4; i++) {
/* bus is polled in c45 */
if (priv->smi_bus_isc45[i])
c45_mask |= 0x2 << (i * 2); /* Std. C45, non-standard is 0x3 */
struct resource *res, *mem;
phy_interface_t phy_mode;
struct phylink *phylink;
- int err = 0, i, rxrings, rxringlen;
+ int err = 0, rxrings, rxringlen;
struct ring_b *ring;
pr_info("Probing RTL838X eth device pdev: %x, dev: %x\n",
if (err)
goto err_free;
- for (i = 0; i < priv->rxrings; i++) {
+ for (int i = 0; i < priv->rxrings; i++) {
priv->rx_qs[i].id = i;
priv->rx_qs[i].priv = priv;
netif_napi_add(dev, &priv->rx_qs[i].napi, rtl838x_poll_rx, 64);
{
struct net_device *dev = platform_get_drvdata(pdev);
struct rtl838x_eth_priv *priv = netdev_priv(dev);
- int i;
if (dev) {
pr_info("Removing platform driver for rtl838x-eth\n");
netif_tx_stop_all_queues(dev);
- for (i = 0; i < priv->rxrings; i++)
+ for (int i = 0; i < priv->rxrings; i++)
netif_napi_del(&priv->rx_qs[i].napi);
unregister_netdev(dev);
static int rtl8226_read_status(struct phy_device *phydev)
{
- int ret = 0, i;
+ int ret = 0;
u32 val;
/* TODO: ret = genphy_read_status(phydev);
*/
/* Link status must be read twice */
- for (i = 0; i < 2; i++)
+ for (int i = 0; i < 2; i++)
val = phy_read_mmd(phydev, MMD_VEND2, 0xA402);
phydev->link = val & BIT(2) ? 1 : 0;
static int rtl8380_configure_int_rtl8218b(struct phy_device *phydev)
{
u32 val, phy_id;
- int i, p, ipd_flag;
int mac = phydev->mdio.addr;
struct fw_header *h;
u32 *rtl838x_6275B_intPhy_perport;
rtl838x_6275B_intPhy_perport = (void *)h + sizeof(struct fw_header) + h->parts[8].start;
rtl8218b_6276B_hwEsd_perport = (void *)h + sizeof(struct fw_header) + h->parts[9].start;
- if (sw_r32(RTL838X_DMY_REG31) == 0x1)
- ipd_flag = 1;
+ if (sw_r32(RTL838X_DMY_REG31) == 0x1) {
+ int ipd_flag = 1;
+ }
val = phy_read(phydev, 0);
if (val & BIT(11))
msleep(100);
/* Ready PHY for patch */
- for (p = 0; p < 8; p++) {
+ for (int p = 0; p < 8; p++) {
phy_package_port_write_paged(phydev, p, RTL83XX_PAGE_RAW, RTL8XXX_PAGE_SELECT, RTL821X_PAGE_PATCH);
phy_package_port_write_paged(phydev, p, RTL83XX_PAGE_RAW, 0x10, 0x0010);
}
msleep(500);
- for (p = 0; p < 8; p++) {
+ for (int p = 0; p < 8; p++) {
+ int i;
+
for (i = 0; i < 100 ; i++) {
val = phy_package_port_read_paged(phydev, p, RTL821X_PAGE_STATE, 0x10);
if (val & 0x40)
return -1;
}
}
- for (p = 0; p < 8; p++) {
+ for (int p = 0; p < 8; p++) {
+ int i;
+
i = 0;
while (rtl838x_6275B_intPhy_perport[i * 2]) {
phy_package_port_write_paged(phydev, p, RTL83XX_PAGE_RAW,
static int rtl8380_configure_ext_rtl8218b(struct phy_device *phydev)
{
u32 val, ipd, phy_id;
- int i, l;
int mac = phydev->mdio.addr;
struct fw_header *h;
u32 *rtl8380_rtl8218b_perchip;
phydev_info(phydev, "Detected chip revision %04x\n", val);
- i = 0;
- while (rtl8380_rtl8218b_perchip[i * 3] &&
- rtl8380_rtl8218b_perchip[i * 3 + 1]) {
+ for (int i = 0; rtl8380_rtl8218b_perchip[i * 3] &&
+ rtl8380_rtl8218b_perchip[i * 3 + 1]; i++) {
phy_package_port_write_paged(phydev, rtl8380_rtl8218b_perchip[i * 3],
RTL83XX_PAGE_RAW, rtl8380_rtl8218b_perchip[i * 3 + 1],
rtl8380_rtl8218b_perchip[i * 3 + 2]);
- i++;
}
/* Enable PHY */
- for (i = 0; i < 8; i++) {
+ for (int i = 0; i < 8; i++) {
phy_package_port_write_paged(phydev, i, RTL83XX_PAGE_RAW, RTL8XXX_PAGE_SELECT, RTL8XXX_PAGE_MAIN);
phy_package_port_write_paged(phydev, i, RTL83XX_PAGE_RAW, 0x00, 0x1140);
}
mdelay(100);
/* Request patch */
- for (i = 0; i < 8; i++) {
+ for (int i = 0; i < 8; i++) {
phy_package_port_write_paged(phydev, i, RTL83XX_PAGE_RAW, RTL8XXX_PAGE_SELECT, RTL821X_PAGE_PATCH);
phy_package_port_write_paged(phydev, i, RTL83XX_PAGE_RAW, 0x10, 0x0010);
}
mdelay(300);
/* Verify patch readiness */
- for (i = 0; i < 8; i++) {
+ for (int i = 0; i < 8; i++) {
+ int l;
+
for (l = 0; l < 100; l++) {
val = phy_package_port_read_paged(phydev, i, RTL821X_PAGE_STATE, 0x10);
if (val & 0x40)
phy_write_paged(phydev, 0, 30, 0);
ipd = (ipd >> 4) & 0xf; /* unused ? */
- i = 0;
- while (rtl8218B_6276B_rtl8380_perport[i * 2]) {
+ for (int i = 0; rtl8218B_6276B_rtl8380_perport[i * 2]; i++) {
phy_write_paged(phydev, RTL83XX_PAGE_RAW, rtl8218B_6276B_rtl8380_perport[i * 2],
rtl8218B_6276B_rtl8380_perport[i * 2 + 1]);
- i++;
}
/* Disable broadcast ID */
static int rtl8380_configure_rtl8214fc(struct phy_device *phydev)
{
- u32 phy_id, val, page = 0;
- int i, l;
int mac = phydev->mdio.addr;
struct fw_header *h;
u32 *rtl8380_rtl8214fc_perchip;
u32 *rtl8380_rtl8214fc_perport;
+ u32 phy_id;
+ u32 val;
val = phy_read(phydev, 2);
phy_id = val << 16;
msleep(100);
phy_write_paged(phydev, 0, RTL821XEXT_MEDIA_PAGE_SELECT, RTL821X_MEDIA_PAGE_COPPER);
- i = 0;
- while (rtl8380_rtl8214fc_perchip[i * 3] &&
- rtl8380_rtl8214fc_perchip[i * 3 + 1]) {
+ for (int i = 0; rtl8380_rtl8214fc_perchip[i * 3] &&
+ rtl8380_rtl8214fc_perchip[i * 3 + 1]; i++) {
+ u32 page = 0;
+
if (rtl8380_rtl8214fc_perchip[i * 3 + 1] == 0x1f)
page = rtl8380_rtl8214fc_perchip[i * 3 + 2];
if (rtl8380_rtl8214fc_perchip[i * 3 + 1] == 0x13 && page == 0x260) {
rtl8380_rtl8214fc_perchip[i * 3 + 1],
rtl8380_rtl8214fc_perchip[i * 3 + 2]);
}
- i++;
}
/* Force copper medium */
- for (i = 0; i < 4; i++) {
+ for (int i = 0; i < 4; i++) {
phy_package_port_write_paged(phydev, i, RTL83XX_PAGE_RAW, RTL8XXX_PAGE_SELECT, RTL8XXX_PAGE_MAIN);
phy_package_port_write_paged(phydev, i, RTL83XX_PAGE_RAW, RTL821XEXT_MEDIA_PAGE_SELECT, RTL821X_MEDIA_PAGE_COPPER);
}
/* Enable PHY */
- for (i = 0; i < 4; i++) {
+ for (int i = 0; i < 4; i++) {
phy_package_port_write_paged(phydev, i, RTL83XX_PAGE_RAW, RTL8XXX_PAGE_SELECT, RTL8XXX_PAGE_MAIN);
phy_package_port_write_paged(phydev, i, RTL83XX_PAGE_RAW, 0x00, 0x1140);
}
mdelay(100);
/* Disable Autosensing */
- for (i = 0; i < 4; i++) {
+ for (int i = 0; i < 4; i++) {
+ int l;
+
for (l = 0; l < 100; l++) {
val = phy_package_port_read_paged(phydev, i, RTL821X_PAGE_GPHY, 0x10);
if ((val & 0x7) >= 3)
}
/* Request patch */
- for (i = 0; i < 4; i++) {
+ for (int i = 0; i < 4; i++) {
phy_package_port_write_paged(phydev, i, RTL83XX_PAGE_RAW, RTL8XXX_PAGE_SELECT, RTL821X_PAGE_PATCH);
phy_package_port_write_paged(phydev, i, RTL83XX_PAGE_RAW, 0x10, 0x0010);
}
mdelay(300);
/* Verify patch readiness */
- for (i = 0; i < 4; i++) {
+ for (int i = 0; i < 4; i++) {
+ int l;
+
for (l = 0; l < 100; l++) {
val = phy_package_port_read_paged(phydev, i, RTL821X_PAGE_STATE, 0x10);
if (val & 0x40)
/* Use Broadcast ID method for patching */
rtl821x_phy_setup_package_broadcast(phydev, true);
- i = 0;
- while (rtl8380_rtl8214fc_perport[i * 2]) {
+ for (int i = 0; rtl8380_rtl8214fc_perport[i * 2]; i++) {
phy_write_paged(phydev, RTL83XX_PAGE_RAW, rtl8380_rtl8214fc_perport[i * 2],
rtl8380_rtl8214fc_perport[i * 2 + 1]);
- i++;
}
/* Disable broadcast ID */
rtl821x_phy_setup_package_broadcast(phydev, false);
/* Auto medium selection */
- for (i = 0; i < 4; i++) {
+ for (int i = 0; i < 4; i++) {
phy_write_paged(phydev, RTL83XX_PAGE_RAW, RTL8XXX_PAGE_SELECT, RTL8XXX_PAGE_MAIN);
phy_write_paged(phydev, RTL83XX_PAGE_RAW, RTL821XEXT_MEDIA_PAGE_SELECT, RTL821X_MEDIA_PAGE_AUTO);
}
*/
void rtl9300_force_sds_mode(int sds, phy_interface_t phy_if)
{
+ int lc_value;
int sds_mode;
bool lc_on;
- int i, lc_value;
int lane_0 = (sds % 2) ? sds - 1 : sds;
- u32 v, cr_0, cr_1, cr_2;
- u32 m_bit, l_bit;
+ u32 v;
pr_info("%s: SDS: %d, mode %d\n", __func__, sds, phy_if);
switch (phy_if) {
rtl9300_sds_field_w(sds, 0x1f, 9, 11, 7, sds_mode);
/* Toggle LC or Ring */
- for (i = 0; i < 20; i++) {
+ for (int i = 0; i < 20; i++) {
+ u32 cr_0, cr_1, cr_2;
+ u32 m_bit, l_bit;
+
mdelay(200);
rtl930x_write_sds_phy(lane_0, 0x1f, 2, 53);
int rtl9300_configure_serdes(struct phy_device *phydev)
{
+ int phy_mode = PHY_INTERFACE_MODE_10GBASER;
struct device *dev = &phydev->mdio.dev;
- int phy_addr = phydev->mdio.addr;
- struct device_node *dn;
+ int calib_tries = 0;
u32 sds_num = 0;
- int sds_mode, calib_tries = 0, phy_mode = PHY_INTERFACE_MODE_10GBASER, i;
+ int sds_mode;
if (dev->of_node) {
- dn = dev->of_node;
+ struct device_node *dn = dev->of_node;
+ int phy_addr = phydev->mdio.addr;
if (of_property_read_u32(dn, "sds", &sds_num))
sds_num = -1;
pr_info("%s PATCHING SerDes %d\n", __func__, sds_num);
if (sds_num % 2) {
- for (i = 0; i < sizeof(rtl9300_a_sds_10gr_lane1) / sizeof(sds_config); ++i) {
+ for (int i = 0; i < sizeof(rtl9300_a_sds_10gr_lane1) / sizeof(sds_config); ++i) {
rtl930x_write_sds_phy(sds_num, rtl9300_a_sds_10gr_lane1[i].page,
rtl9300_a_sds_10gr_lane1[i].reg,
rtl9300_a_sds_10gr_lane1[i].data);
}
} else {
- for (i = 0; i < sizeof(rtl9300_a_sds_10gr_lane0) / sizeof(sds_config); ++i) {
+ for (int i = 0; i < sizeof(rtl9300_a_sds_10gr_lane0) / sizeof(sds_config); ++i) {
rtl930x_write_sds_phy(sds_num, rtl9300_a_sds_10gr_lane0[i].page,
rtl9300_a_sds_10gr_lane0[i].reg,
rtl9300_a_sds_10gr_lane0[i].data);
static void rtl931x_symerr_clear(u32 sds, phy_interface_t mode)
{
- u32 i;
- u32 xsg_sdsid_0, xsg_sdsid_1;
switch (mode) {
case PHY_INTERFACE_MODE_NA:
break;
case PHY_INTERFACE_MODE_XGMII:
+ u32 xsg_sdsid_0, xsg_sdsid_1;
+
if (sds < 2)
xsg_sdsid_0 = sds;
else
xsg_sdsid_0 = (sds - 1) * 2;
xsg_sdsid_1 = xsg_sdsid_0 + 1;
- for (i = 0; i < 4; ++i) {
+ for (int i = 0; i < 4; ++i) {
rtl9310_sds_field_w(xsg_sdsid_0, 0x1, 24, 2, 0, i);
rtl9310_sds_field_w(xsg_sdsid_0, 0x1, 3, 15, 8, 0x0);
rtl9310_sds_field_w(xsg_sdsid_0, 0x1, 2, 15, 0, 0x0);
}
- for (i = 0; i < 4; ++i) {
+ for (int i = 0; i < 4; ++i) {
rtl9310_sds_field_w(xsg_sdsid_1, 0x1, 24, 2, 0, i);
rtl9310_sds_field_w(xsg_sdsid_1, 0x1, 3, 15, 8, 0x0);
rtl9310_sds_field_w(xsg_sdsid_1, 0x1, 2, 15, 0, 0x0);
break;
case PHY_INTERFACE_MODE_USXGMII: /* MII_USXGMII_10GSXGMII/10GDXGMII/10GQXGMII: */
- u32 i, evenSds;
u32 op_code = 0x6003;
+ u32 evenSds;
if (chiptype) {
rtl9310_sds_field_w(asds, 0x6, 0x2, 12, 12, 1);
- for (i = 0; i < sizeof(sds_config_10p3125g_type1) / sizeof(sds_config); ++i) {
+ for (int i = 0; i < sizeof(sds_config_10p3125g_type1) / sizeof(sds_config); ++i) {
rtl931x_write_sds_phy(asds, sds_config_10p3125g_type1[i].page - 0x4, sds_config_10p3125g_type1[i].reg, sds_config_10p3125g_type1[i].data);
}
evenSds = asds - (asds % 2);
- for (i = 0; i < sizeof(sds_config_10p3125g_cmu_type1) / sizeof(sds_config); ++i) {
+ for (int i = 0; i < sizeof(sds_config_10p3125g_cmu_type1) / sizeof(sds_config); ++i) {
rtl931x_write_sds_phy(evenSds,
sds_config_10p3125g_cmu_type1[i].page - 0x4, sds_config_10p3125g_cmu_type1[i].reg, sds_config_10p3125g_cmu_type1[i].data);
}