struct igb_tx_buffer *buffer_info)
{
if (buffer_info->dma) {
- if (buffer_info->mapped_as_page)
+ if (buffer_info->tx_flags & IGB_TX_FLAGS_MAPPED_AS_PAGE)
dma_unmap_page(tx_ring->dev,
buffer_info->dma,
buffer_info->length,
buffer_info->time_stamp = 0;
buffer_info->length = 0;
buffer_info->next_to_watch = NULL;
- buffer_info->mapped_as_page = false;
}
/**
}
}
-#define IGB_TX_FLAGS_CSUM 0x00000001
-#define IGB_TX_FLAGS_VLAN 0x00000002
-#define IGB_TX_FLAGS_TSO 0x00000004
-#define IGB_TX_FLAGS_IPV4 0x00000008
-#define IGB_TX_FLAGS_TSTAMP 0x00000010
-#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
-#define IGB_TX_FLAGS_VLAN_SHIFT 16
-
void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
u32 type_tucmd, u32 mss_l4len_idx)
{
#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
static inline int igb_tx_map(struct igb_ring *tx_ring, struct sk_buff *skb,
- struct igb_tx_buffer *first)
+ struct igb_tx_buffer *first, u32 tx_flags)
{
struct igb_tx_buffer *buffer_info;
struct device *dev = tx_ring->dev;
buffer_info = &tx_ring->tx_buffer_info[i];
BUG_ON(hlen >= IGB_MAX_DATA_PER_TXD);
buffer_info->length = hlen;
+ buffer_info->tx_flags = tx_flags;
buffer_info->dma = dma_map_single(dev, skb->data, hlen,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, buffer_info->dma))
goto dma_error;
+ tx_flags |= IGB_TX_FLAGS_MAPPED_AS_PAGE;
+
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[f];
unsigned int len = frag->size;
buffer_info = &tx_ring->tx_buffer_info[i];
BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
buffer_info->length = len;
- buffer_info->mapped_as_page = true;
+ buffer_info->tx_flags = tx_flags;
buffer_info->dma = skb_frag_dma_map(dev, frag, 0, len,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, buffer_info->dma))
}
buffer_info->skb = skb;
- buffer_info->tx_flags = skb_shinfo(skb)->tx_flags;
/* multiply data chunks by size of headers */
buffer_info->bytecount = ((gso_segs - 1) * hlen) + skb->len;
buffer_info->gso_segs = gso_segs;
buffer_info->dma = 0;
buffer_info->time_stamp = 0;
buffer_info->length = 0;
- buffer_info->mapped_as_page = false;
/* clear timestamp and dma mappings for remaining portion of packet */
while (count--) {
* count reflects descriptors mapped, if 0 or less then mapping error
* has occurred and we need to rewind the descriptor queue
*/
- count = igb_tx_map(tx_ring, skb, first);
+ count = igb_tx_map(tx_ring, skb, first, tx_flags);
if (!count) {
dev_kfree_skb_any(skb);
first->time_stamp = 0;
u64 regval;
/* if skb does not support hw timestamp or TX stamp not valid exit */
- if (likely(!(buffer_info->tx_flags & SKBTX_HW_TSTAMP)) ||
+ if (likely(!(buffer_info->tx_flags & IGB_TX_FLAGS_TSTAMP)) ||
!(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
return;