for_each_hwfn(cdev, i) {
struct qed_hwfn *hwfn = &cdev->hwfns[i];
+ struct qed_tunnel_info *tun;
+
+ tun = &hwfn->cdev->tunnel;
rc = qed_sp_pf_update_tunn_cfg(hwfn, &tunn_info,
QED_SPQ_MODE_EBLOCK, NULL);
if (rc)
return rc;
+
+ if (IS_PF_SRIOV(hwfn)) {
+ u16 vxlan_port, geneve_port;
+ int j;
+
+ vxlan_port = tun->vxlan_port.port;
+ geneve_port = tun->geneve_port.port;
+
+ qed_for_each_vf(hwfn, j) {
+ qed_iov_bulletin_set_udp_ports(hwfn, j,
+ vxlan_port,
+ geneve_port);
+ }
+
+ qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+ }
}
return 0;
qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
}
+void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
+ int vfid, u16 vxlan_port, u16 geneve_port)
+{
+ struct qed_vf_info *vf_info;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Can not set udp ports, invalid vfid [%d]\n", vfid);
+ return;
+ }
+
+ if (vf_info->b_malicious) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Can not set udp ports to malicious VF [%d]\n",
+ vfid);
+ return;
+ }
+
+ vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
+ vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
+}
+
static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
{
struct qed_vf_info *p_vf_info;
*/
u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id);
+void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
+ int vfid, u16 vxlan_port, u16 geneve_port);
+
/**
* @brief Read sriov related information and allocated resources
* reads from configuraiton space, shmem, etc.
return MAX_NUM_VFS;
}
+static inline void
+qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, int vfid,
+ u16 vxlan_port, u16 geneve_port)
+{
+}
+
static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
{
return 0;
return true;
}
+static void
+qed_vf_bulletin_get_udp_ports(struct qed_hwfn *p_hwfn,
+ u16 *p_vxlan_port, u16 *p_geneve_port)
+{
+ struct qed_bulletin_content *p_bulletin;
+
+ p_bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
+
+ *p_vxlan_port = p_bulletin->vxlan_udp_port;
+ *p_geneve_port = p_bulletin->geneve_udp_port;
+}
+
void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
u16 *fw_major, u16 *fw_minor,
u16 *fw_rev, u16 *fw_eng)
struct qed_eth_cb_ops *ops = hwfn->cdev->protocol_ops.eth;
u8 mac[ETH_ALEN], is_mac_exist, is_mac_forced;
void *cookie = hwfn->cdev->ops_cookie;
+ u16 vxlan_port, geneve_port;
+ qed_vf_bulletin_get_udp_ports(hwfn, &vxlan_port, &geneve_port);
is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac,
&is_mac_forced);
if (is_mac_exist && cookie)
ops->force_mac(cookie, mac, !!is_mac_forced);
+ ops->ports_update(cookie, vxlan_port, geneve_port);
+
/* Always update link configuration according to bulletin */
qed_link_update(hwfn);
}
u8 partner_rx_flow_ctrl_en;
u8 partner_adv_pause;
u8 sfp_tx_fault;
- u8 padding4[6];
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+ u8 padding4[2];
u32 speed;
u32 partner_adv_speed;
/* Filtering function definitions */
void qede_force_mac(void *dev, u8 *mac, bool forced);
+void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port);
int qede_set_mac_addr(struct net_device *ndev, void *p);
int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid);
}
#endif
+void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port)
+{
+ struct qede_dev *edev = dev;
+
+ if (edev->vxlan_dst_port != vxlan_port)
+ edev->vxlan_dst_port = 0;
+
+ if (edev->geneve_dst_port != geneve_port)
+ edev->geneve_dst_port = 0;
+}
+
void qede_force_mac(void *dev, u8 *mac, bool forced)
{
struct qede_dev *edev = dev;
.link_update = qede_link_update,
},
.force_mac = qede_force_mac,
+ .ports_update = qede_udp_ports_update,
};
static int qede_netdev_event(struct notifier_block *this, unsigned long event,
struct qed_eth_cb_ops {
struct qed_common_cb_ops common;
void (*force_mac) (void *dev, u8 *mac, bool forced);
+ void (*ports_update)(void *dev, u16 vxlan_port, u16 geneve_port);
};
#define QED_MAX_PHC_DRIFT_PPB 291666666