* Switch the data path from the synthetic interface to the VF
* interface.
*/
-void netvsc_switch_datapath(struct netvsc_device *nv_dev, bool vf)
+void netvsc_switch_datapath(struct net_device *ndev, bool vf)
{
- struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
- struct net_device *ndev = nv_dev->ndev;
struct net_device_context *net_device_ctx = netdev_priv(ndev);
struct hv_device *dev = net_device_ctx->device_ctx;
+ struct netvsc_device *nv_dev = net_device_ctx->nvdev;
+ struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
memset(init_pkt, 0, sizeof(struct nvsp_message));
init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
net_device->destroy = false;
atomic_set(&net_device->open_cnt, 0);
atomic_set(&net_device->vf_use_cnt, 0);
- net_device->ndev = ndev;
net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
net_device = get_outbound_net_device(device);
if (!net_device)
return -ENODEV;
- ndev = net_device->ndev;
+ ndev = hv_get_drvdata(device);
node = cpu_to_node(device->channel->target_cpu);
net_device->recv_buf = vzalloc_node(net_device->recv_buf_size, node);
struct nvsp_message *init_packet,
u32 nvsp_ver)
{
+ struct net_device *ndev = hv_get_drvdata(device);
int ret;
unsigned long t;
/* NVSPv2 or later: Send NDIS config */
memset(init_packet, 0, sizeof(struct nvsp_message));
init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
- init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu +
- ETH_HLEN;
+ init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN;
init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5)
struct netvsc_device *net_device;
struct nvsp_message *init_packet;
int ndis_version;
- struct net_device *ndev;
u32 ver_list[] = { NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 };
int i, num_ver = 4; /* number of different NVSP versions */
net_device = get_outbound_net_device(device);
if (!net_device)
return -ENODEV;
- ndev = net_device->ndev;
init_packet = &net_device->channel_init_pkt;
}
static inline int netvsc_send_pkt(
+ struct hv_device *device,
struct hv_netvsc_packet *packet,
struct netvsc_device *net_device,
struct hv_page_buffer **pb,
struct nvsp_message nvmsg;
u16 q_idx = packet->q_idx;
struct vmbus_channel *out_channel = net_device->chn_table[q_idx];
- struct net_device *ndev = net_device->ndev;
+ struct net_device *ndev = hv_get_drvdata(device);
u64 req_id;
int ret;
struct hv_page_buffer *pgbuf;
}
if (msd_send) {
- m_ret = netvsc_send_pkt(msd_send, net_device, NULL, msd_skb);
+ m_ret = netvsc_send_pkt(device, msd_send, net_device,
+ NULL, msd_skb);
if (m_ret != 0) {
netvsc_free_send_slot(net_device,
send_now:
if (cur_send)
- ret = netvsc_send_pkt(cur_send, net_device, pb, skb);
+ ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
netvsc_free_send_slot(net_device, section_index);
struct nvsp_message recvcompMessage;
int retries = 0;
int ret;
- struct net_device *ndev;
-
- ndev = net_device->ndev;
+ struct net_device *ndev = hv_get_drvdata(device);
recvcompMessage.hdr.msg_type =
NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
u32 status = NVSP_STAT_SUCCESS;
int i;
int count = 0;
- struct net_device *ndev;
+ struct net_device *ndev = hv_get_drvdata(device);
void *data;
- ndev = net_device->ndev;
-
/*
* All inbound packets other than send completion should be xfer page
* packet
struct nvsp_message *nvmsg)
{
struct netvsc_device *nvscdev;
- struct net_device *ndev;
+ struct net_device *ndev = hv_get_drvdata(hdev);
int i;
u32 count, *tab;
nvscdev = get_outbound_net_device(hdev);
if (!nvscdev)
return;
- ndev = nvscdev->ndev;
count = nvmsg->msg.v5_msg.send_table.count;
if (count != VRSS_SEND_TAB_SIZE) {
net_device = get_inbound_net_device(device);
if (!net_device)
return;
- ndev = net_device->ndev;
+ ndev = hv_get_drvdata(device);
buffer = get_per_channel_state(channel);
do {
{
struct net_device_context *ndevctx =
container_of(w, struct net_device_context, work);
- struct netvsc_device *nvdev;
+ struct hv_device *device_obj = ndevctx->device_ctx;
+ struct net_device *ndev = hv_get_drvdata(device_obj);
+ struct netvsc_device *nvdev = ndevctx->nvdev;
struct rndis_device *rdev;
- nvdev = ndevctx->nvdev;
- if (nvdev == NULL || nvdev->ndev == NULL)
+ if (!nvdev)
return;
rdev = nvdev->extension;
if (rdev == NULL)
return;
- if (nvdev->ndev->flags & IFF_PROMISC)
+ if (ndev->flags & IFF_PROMISC)
rndis_filter_set_packet_filter(rdev,
NDIS_PACKET_TYPE_PROMISCUOUS);
else
*/
static void netvsc_link_change(struct work_struct *w)
{
- struct net_device_context *ndev_ctx;
- struct net_device *net;
+ struct net_device_context *ndev_ctx =
+ container_of(w, struct net_device_context, dwork.work);
+ struct hv_device *device_obj = ndev_ctx->device_ctx;
+ struct net_device *net = hv_get_drvdata(device_obj);
struct netvsc_device *net_device;
struct rndis_device *rdev;
struct netvsc_reconfig *event = NULL;
bool notify = false, reschedule = false;
unsigned long flags, next_reconfig, delay;
- ndev_ctx = container_of(w, struct net_device_context, dwork.work);
-
rtnl_lock();
if (ndev_ctx->start_remove)
goto out_unlock;
net_device = ndev_ctx->nvdev;
rdev = net_device->extension;
- net = net_device->ndev;
next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
if (time_is_after_jiffies(next_reconfig)) {
atomic_dec(&gwrk->netvsc_dev->vf_use_cnt);
}
-static struct netvsc_device *get_netvsc_device(char *mac)
+static struct net_device *get_netvsc_net_device(char *mac)
{
- struct net_device *dev;
- struct net_device_context *netvsc_ctx = NULL;
+ struct net_device *dev, *found = NULL;
int rtnl_locked;
rtnl_locked = rtnl_trylock();
if (memcmp(dev->dev_addr, mac, ETH_ALEN) == 0) {
if (dev->netdev_ops != &device_ops)
continue;
- netvsc_ctx = netdev_priv(dev);
+ found = dev;
break;
}
}
if (rtnl_locked)
rtnl_unlock();
- if (netvsc_ctx == NULL)
- return NULL;
-
- return netvsc_ctx->nvdev;
+ return found;
}
static int netvsc_register_vf(struct net_device *vf_netdev)
{
+ struct net_device *ndev;
+ struct net_device_context *net_device_ctx;
struct netvsc_device *netvsc_dev;
const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
* associate with the VF interface. If we don't find a matching
* synthetic interface, move on.
*/
- netvsc_dev = get_netvsc_device(vf_netdev->dev_addr);
+ ndev = get_netvsc_net_device(vf_netdev->dev_addr);
+ if (!ndev)
+ return NOTIFY_DONE;
+
+ net_device_ctx = netdev_priv(ndev);
+ netvsc_dev = net_device_ctx->nvdev;
if (netvsc_dev == NULL)
return NOTIFY_DONE;
- netdev_info(netvsc_dev->ndev, "VF registering: %s\n", vf_netdev->name);
+ netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
/*
* Take a reference on the module.
*/
static int netvsc_vf_up(struct net_device *vf_netdev)
{
+ struct net_device *ndev;
struct netvsc_device *netvsc_dev;
const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
struct net_device_context *net_device_ctx;
if (eth_ops == ðtool_ops)
return NOTIFY_DONE;
- netvsc_dev = get_netvsc_device(vf_netdev->dev_addr);
+ ndev = get_netvsc_net_device(vf_netdev->dev_addr);
+ if (!ndev)
+ return NOTIFY_DONE;
+
+ net_device_ctx = netdev_priv(ndev);
+ netvsc_dev = net_device_ctx->nvdev;
if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL))
return NOTIFY_DONE;
- netdev_info(netvsc_dev->ndev, "VF up: %s\n", vf_netdev->name);
- net_device_ctx = netdev_priv(netvsc_dev->ndev);
+ netdev_info(ndev, "VF up: %s\n", vf_netdev->name);
netvsc_dev->vf_inject = true;
/*
/*
* notify the host to switch the data path.
*/
- netvsc_switch_datapath(netvsc_dev, true);
- netdev_info(netvsc_dev->ndev, "Data path switched to VF: %s\n",
- vf_netdev->name);
+ netvsc_switch_datapath(ndev, true);
+ netdev_info(ndev, "Data path switched to VF: %s\n", vf_netdev->name);
- netif_carrier_off(netvsc_dev->ndev);
+ netif_carrier_off(ndev);
/*
* Now notify peers. We are scheduling work to
static int netvsc_vf_down(struct net_device *vf_netdev)
{
+ struct net_device *ndev;
struct netvsc_device *netvsc_dev;
struct net_device_context *net_device_ctx;
const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
if (eth_ops == ðtool_ops)
return NOTIFY_DONE;
- netvsc_dev = get_netvsc_device(vf_netdev->dev_addr);
+ ndev = get_netvsc_net_device(vf_netdev->dev_addr);
+ if (!ndev)
+ return NOTIFY_DONE;
+
+ net_device_ctx = netdev_priv(ndev);
+ netvsc_dev = net_device_ctx->nvdev;
if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL))
return NOTIFY_DONE;
- netdev_info(netvsc_dev->ndev, "VF down: %s\n", vf_netdev->name);
- net_device_ctx = netdev_priv(netvsc_dev->ndev);
+ netdev_info(ndev, "VF down: %s\n", vf_netdev->name);
netvsc_dev->vf_inject = false;
/*
* Wait for currently active users to
while (atomic_read(&netvsc_dev->vf_use_cnt) != 0)
udelay(50);
- netvsc_switch_datapath(netvsc_dev, false);
- netdev_info(netvsc_dev->ndev, "Data path switched from VF: %s\n",
- vf_netdev->name);
+ netvsc_switch_datapath(ndev, false);
+ netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name);
rndis_filter_close(net_device_ctx->device_ctx);
- netif_carrier_on(netvsc_dev->ndev);
+ netif_carrier_on(ndev);
/*
* Notify peers.
*/
atomic_inc(&netvsc_dev->vf_use_cnt);
- net_device_ctx->gwrk.netdev = netvsc_dev->ndev;
+ net_device_ctx->gwrk.netdev = ndev;
net_device_ctx->gwrk.netvsc_dev = netvsc_dev;
schedule_work(&net_device_ctx->gwrk.dwrk);
static int netvsc_unregister_vf(struct net_device *vf_netdev)
{
+ struct net_device *ndev;
struct netvsc_device *netvsc_dev;
const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
+ struct net_device_context *net_device_ctx;
if (eth_ops == ðtool_ops)
return NOTIFY_DONE;
- netvsc_dev = get_netvsc_device(vf_netdev->dev_addr);
+ ndev = get_netvsc_net_device(vf_netdev->dev_addr);
+ if (!ndev)
+ return NOTIFY_DONE;
+
+ net_device_ctx = netdev_priv(ndev);
+ netvsc_dev = net_device_ctx->nvdev;
if (netvsc_dev == NULL)
return NOTIFY_DONE;
- netdev_info(netvsc_dev->ndev, "VF unregistering: %s\n",
- vf_netdev->name);
+ netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
netvsc_dev->vf_netdev = NULL;
module_put(THIS_MODULE);