esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode);
esw->mode = mode;
+ mlx5_lag_update(esw->dev);
+
if (mode == SRIOV_LEGACY) {
err = esw_create_legacy_fdb_table(esw);
} else {
old_mode = esw->mode;
esw->mode = SRIOV_NONE;
+ mlx5_lag_update(esw->dev);
+
if (old_mode == SRIOV_OFFLOADS)
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
}
return ESW_ALLOWED(esw) ? esw->mode : SRIOV_NONE;
}
EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
+
+bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1)
+{
+ if (dev0->priv.eswitch->mode == SRIOV_NONE &&
+ dev1->priv.eswitch->mode == SRIOV_NONE)
+ return true;
+
+ return false;
+}
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2);
}
+bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0,
+ struct mlx5_core_dev *dev1);
+
#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
#define esw_info(dev, format, ...) \
static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
static inline int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { return 0; }
static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) {}
+static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
#define FDB_MAX_CHAIN 1
#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1)
struct lag_tracker tracker;
struct delayed_work bond_work;
struct notifier_block nb;
-
- /* Admin state. Allow lag only if allowed is true
- * even if network conditions for lag were met
- */
- bool allowed;
};
/* General purpose, use for short periods of time.
err);
}
+static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
+{
+ if (ldev->pf[0].dev &&
+ ldev->pf[1].dev &&
+ mlx5_esw_lag_prereq(ldev->pf[0].dev, ldev->pf[1].dev))
+ return true;
+ else
+ return false;
+}
+
static void mlx5_do_bond(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
tracker = ldev->tracker;
mutex_unlock(&lag_mutex);
- do_bond = tracker.is_bonded && ldev->allowed;
+ do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
if (do_bond && !mlx5_lag_is_bonded(ldev)) {
if (!sriov_enabled)
return NOTIFY_DONE;
}
-static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
-{
- if ((ldev->pf[0].dev && mlx5_sriov_is_enabled(ldev->pf[0].dev)) ||
- (ldev->pf[1].dev && mlx5_sriov_is_enabled(ldev->pf[1].dev)))
- return false;
- else
- return true;
-}
-
static struct mlx5_lag *mlx5_lag_dev_alloc(void)
{
struct mlx5_lag *ldev;
return NULL;
INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
- ldev->allowed = mlx5_lag_check_prereq(ldev);
return ldev;
}
ldev->tracker.netdev_state[fn].link_up = 0;
ldev->tracker.netdev_state[fn].tx_enabled = 0;
- ldev->allowed = mlx5_lag_check_prereq(ldev);
dev->priv.lag = ldev;
mutex_unlock(&lag_mutex);
memset(&ldev->pf[i], 0, sizeof(*ldev->pf));
dev->priv.lag = NULL;
- ldev->allowed = mlx5_lag_check_prereq(ldev);
mutex_unlock(&lag_mutex);
}
}
EXPORT_SYMBOL(mlx5_lag_is_active);
-static int mlx5_lag_set_state(struct mlx5_core_dev *dev, bool allow)
+void mlx5_lag_update(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
- int ret = 0;
- bool lag_active;
mlx5_dev_list_lock();
-
ldev = mlx5_lag_dev_get(dev);
- if (!ldev) {
- ret = -ENODEV;
- goto unlock;
- }
- lag_active = mlx5_lag_is_bonded(ldev);
- if (!mlx5_lag_check_prereq(ldev) && allow) {
- ret = -EINVAL;
- goto unlock;
- }
- if (ldev->allowed == allow)
+ if (!ldev)
goto unlock;
- ldev->allowed = allow;
- if ((lag_active && !allow) || allow)
- mlx5_do_bond(ldev);
-unlock:
- mlx5_dev_list_unlock();
- return ret;
-}
-int mlx5_lag_forbid(struct mlx5_core_dev *dev)
-{
- return mlx5_lag_set_state(dev, false);
-}
+ mlx5_do_bond(ldev);
-int mlx5_lag_allow(struct mlx5_core_dev *dev)
-{
- return mlx5_lag_set_state(dev, true);
+unlock:
+ mlx5_dev_list_unlock();
}
struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)