skb->data,
skb_tailroom(skb),
DMA_FROM_DEVICE);
- if (dma_mapping_error(&np->pci_dev->dev,
- np->put_rx_ctx->dma)) {
+ if (unlikely(dma_mapping_error(&np->pci_dev->dev,
+ np->put_rx_ctx->dma))) {
kfree_skb(skb);
goto packet_dropped;
}
skb->data,
skb_tailroom(skb),
DMA_FROM_DEVICE);
- if (dma_mapping_error(&np->pci_dev->dev,
- np->put_rx_ctx->dma)) {
+ if (unlikely(dma_mapping_error(&np->pci_dev->dev,
+ np->put_rx_ctx->dma))) {
kfree_skb(skb);
goto packet_dropped;
}
np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev,
skb->data + offset, bcnt,
DMA_TO_DEVICE);
- if (dma_mapping_error(&np->pci_dev->dev,
- np->put_tx_ctx->dma)) {
+ if (unlikely(dma_mapping_error(&np->pci_dev->dev,
+ np->put_tx_ctx->dma))) {
/* on DMA mapping error - drop the packet */
dev_kfree_skb_any(skb);
u64_stats_update_begin(&np->swstats_tx_syncp);
frag, offset,
bcnt,
DMA_TO_DEVICE);
- if (dma_mapping_error(&np->pci_dev->dev, np->put_tx_ctx->dma)) {
+ if (unlikely(dma_mapping_error(&np->pci_dev->dev,
+ np->put_tx_ctx->dma))) {
/* Unwind the mapped fragments */
do {
np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev,
skb->data + offset, bcnt,
DMA_TO_DEVICE);
- if (dma_mapping_error(&np->pci_dev->dev,
- np->put_tx_ctx->dma)) {
+ if (unlikely(dma_mapping_error(&np->pci_dev->dev,
+ np->put_tx_ctx->dma))) {
/* on DMA mapping error - drop the packet */
dev_kfree_skb_any(skb);
u64_stats_update_begin(&np->swstats_tx_syncp);
bcnt,
DMA_TO_DEVICE);
- if (dma_mapping_error(&np->pci_dev->dev, np->put_tx_ctx->dma)) {
+ if (unlikely(dma_mapping_error(&np->pci_dev->dev,
+ np->put_tx_ctx->dma))) {
/* Unwind the mapped fragments */
do {
test_dma_addr = dma_map_single(&np->pci_dev->dev, tx_skb->data,
skb_tailroom(tx_skb),
DMA_FROM_DEVICE);
- if (dma_mapping_error(&np->pci_dev->dev,
- test_dma_addr)) {
+ if (unlikely(dma_mapping_error(&np->pci_dev->dev,
+ test_dma_addr))) {
dev_kfree_skb_any(tx_skb);
goto out;
}