int netvsc_send(struct net_device_context *ndc,
struct hv_netvsc_packet *packet,
struct rndis_message *rndis_msg,
- struct hv_page_buffer **page_buffer,
+ struct hv_page_buffer *page_buffer,
struct sk_buff *skb);
void netvsc_linkstatus_callback(struct hv_device *device_obj,
struct rndis_message *resp);
u32 pend_size,
struct hv_netvsc_packet *packet,
struct rndis_message *rndis_msg,
- struct hv_page_buffer **pb,
+ struct hv_page_buffer *pb,
struct sk_buff *skb)
{
char *start = net_device->send_buf;
}
for (i = 0; i < page_count; i++) {
- char *src = phys_to_virt((*pb)[i].pfn << PAGE_SHIFT);
- u32 offset = (*pb)[i].offset;
- u32 len = (*pb)[i].len;
+ char *src = phys_to_virt(pb[i].pfn << PAGE_SHIFT);
+ u32 offset = pb[i].offset;
+ u32 len = pb[i].len;
memcpy(dest, (src + offset), len);
msg_size += len;
struct hv_device *device,
struct hv_netvsc_packet *packet,
struct netvsc_device *net_device,
- struct hv_page_buffer **pb,
+ struct hv_page_buffer *pb,
struct sk_buff *skb)
{
struct nvsp_message nvmsg;
struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
u64 req_id;
int ret;
- struct hv_page_buffer *pgbuf;
u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound);
nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
return -ENODEV;
if (packet->page_buf_cnt) {
- pgbuf = packet->cp_partial ? (*pb) +
- packet->rmsg_pgcnt : (*pb);
+ if (packet->cp_partial)
+ pb += packet->rmsg_pgcnt;
+
ret = vmbus_sendpacket_pagebuffer_ctl(out_channel,
- pgbuf,
- packet->page_buf_cnt,
+ pb, packet->page_buf_cnt,
&nvmsg,
sizeof(struct nvsp_message),
req_id,
int netvsc_send(struct net_device_context *ndev_ctx,
struct hv_netvsc_packet *packet,
struct rndis_message *rndis_msg,
- struct hv_page_buffer **pb,
+ struct hv_page_buffer *pb,
struct sk_buff *skb)
{
struct netvsc_device *net_device
static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
struct hv_netvsc_packet *packet,
- struct hv_page_buffer **page_buf)
+ struct hv_page_buffer *pb)
{
- struct hv_page_buffer *pb = *page_buf;
u32 slots_used = 0;
char *data = skb->data;
int frags = skb_shinfo(skb)->nr_frags;
u32 rndis_msg_size;
struct rndis_per_packet_info *ppi;
u32 hash;
- struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
- struct hv_page_buffer *pb = page_buf;
+ struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
/* We can only transmit MAX_PAGE_BUFFER_COUNT number
* of pages in a single packet. If skb is scattered around
rndis_msg->msg_len += rndis_msg_size;
packet->total_data_buflen = rndis_msg->msg_len;
packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
- skb, packet, &pb);
+ skb, packet, pb);
/* timestamp packet in software */
skb_tx_timestamp(skb);
- ret = netvsc_send(net_device_ctx, packet, rndis_msg, &pb, skb);
+ ret = netvsc_send(net_device_ctx, packet, rndis_msg, pb, skb);
if (likely(ret == 0))
return NETDEV_TX_OK;
static int rndis_filter_send_request(struct rndis_device *dev,
struct rndis_request *req)
{
- int ret;
struct hv_netvsc_packet *packet;
struct hv_page_buffer page_buf[2];
struct hv_page_buffer *pb = page_buf;
struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
+ int ret;
/* Setup the packet to send it */
packet = &req->pkt;
}
rcu_read_lock_bh();
- ret = netvsc_send(net_device_ctx, packet, NULL, &pb, NULL);
+ ret = netvsc_send(net_device_ctx, packet, NULL, pb, NULL);
rcu_read_unlock_bh();
return ret;