#define MAX_PENDING_REQS 256
+/* Discriminate from any valid pending_idx value. */
+#define INVALID_PENDING_IDX 0xFFFF
+
#define MAX_BUFFER_OFFSET PAGE_SIZE
/* extra field used in struct page */
u16 flags);
static inline unsigned long idx_to_pfn(struct xen_netbk *netbk,
- unsigned int idx)
+ u16 idx)
{
return page_to_pfn(netbk->mmap_pages[idx]);
}
static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk,
- unsigned int idx)
+ u16 idx)
{
return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx));
}
sizeof(struct iphdr) + MAX_IPOPTLEN + \
sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
+static u16 frag_get_pending_idx(skb_frag_t *frag)
+{
+ return (u16)frag->page_offset;
+}
+
+static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
+{
+ frag->page_offset = pending_idx;
+}
+
static inline pending_ring_idx_t pending_index(unsigned i)
{
return i & (MAX_PENDING_REQS-1);
for (i = 0; i < nr_frags; i++) {
netbk_gop_frag_copy(vif, skb, npo,
- skb_shinfo(skb)->frags[i].page,
+ skb_frag_page(&skb_shinfo(skb)->frags[i]),
skb_shinfo(skb)->frags[i].size,
skb_shinfo(skb)->frags[i].page_offset,
&head);
static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk,
struct sk_buff *skb,
- unsigned long pending_idx)
+ u16 pending_idx)
{
struct page *page;
page = alloc_page(GFP_KERNEL|__GFP_COLD);
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
skb_frag_t *frags = shinfo->frags;
- unsigned long pending_idx = *((u16 *)skb->data);
+ u16 pending_idx = *((u16 *)skb->data);
int i, start;
/* Skip first skb fragment if it is on same page as header fragment. */
- start = ((unsigned long)shinfo->frags[0].page == pending_idx);
+ start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
for (i = start; i < shinfo->nr_frags; i++, txp++) {
struct page *page;
memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp));
xenvif_get(vif);
pending_tx_info[pending_idx].vif = vif;
- frags[i].page = (void *)pending_idx;
+ frag_set_pending_idx(&frags[i], pending_idx);
}
return gop;
struct gnttab_copy **gopp)
{
struct gnttab_copy *gop = *gopp;
- int pending_idx = *((u16 *)skb->data);
+ u16 pending_idx = *((u16 *)skb->data);
struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
struct xenvif *vif = pending_tx_info[pending_idx].vif;
struct xen_netif_tx_request *txp;
}
/* Skip first skb fragment if it is on same page as header fragment. */
- start = ((unsigned long)shinfo->frags[0].page == pending_idx);
+ start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
for (i = start; i < nr_frags; i++) {
int j, newerr;
pending_ring_idx_t index;
- pending_idx = (unsigned long)shinfo->frags[i].page;
+ pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
/* Check error status: if okay then remember grant handle. */
newerr = (++gop)->status;
pending_idx = *((u16 *)skb->data);
xen_netbk_idx_release(netbk, pending_idx);
for (j = start; j < i; j++) {
- pending_idx = (unsigned long)shinfo->frags[i].page;
+ pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
xen_netbk_idx_release(netbk, pending_idx);
}
for (i = 0; i < nr_frags; i++) {
skb_frag_t *frag = shinfo->frags + i;
struct xen_netif_tx_request *txp;
- unsigned long pending_idx;
+ struct page *page;
+ u16 pending_idx;
- pending_idx = (unsigned long)frag->page;
+ pending_idx = frag_get_pending_idx(frag);
txp = &netbk->pending_tx_info[pending_idx].req;
- frag->page = virt_to_page(idx_to_kaddr(netbk, pending_idx));
- frag->size = txp->size;
- frag->page_offset = txp->offset;
-
+ page = virt_to_page(idx_to_kaddr(netbk, pending_idx));
+ __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
skb->len += txp->size;
skb->data_len += txp->size;
skb->truesize += txp->size;
skb_shinfo(skb)->nr_frags = ret;
if (data_len < txreq.size) {
skb_shinfo(skb)->nr_frags++;
- skb_shinfo(skb)->frags[0].page =
- (void *)(unsigned long)pending_idx;
+ frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
+ pending_idx);
} else {
- /* Discriminate from any valid pending_idx value. */
- skb_shinfo(skb)->frags[0].page = (void *)~0UL;
+ frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
+ INVALID_PENDING_IDX);
}
__skb_queue_tail(&netbk->tx_queue, skb);