}
else {
skb_put(skb,pkt_len-4); /* Make room */
- eth_copy_and_sum(skb,
+ skb_copy_to_linear_data(skb,
(unsigned char *)__va(bdp->cbd_bufaddr),
- pkt_len-4, 0);
+ pkt_len-4);
skb->protocol=eth_type_trans(skb,dev);
netif_rx(skb);
}
}
else {
skb_put(skb,pkt_len); /* Make room */
- eth_copy_and_sum(skb,
+ skb_copy_to_linear_data(skb,
(unsigned char *)__va(bdp->cbd_bufaddr),
- pkt_len, 0);
+ pkt_len);
skb->protocol=eth_type_trans(skb,dev);
netif_rx(skb);
}
}
else {
skb_put(skb,pkt_len-4); /* Make room */
- eth_copy_and_sum(skb,
+ skb_copy_to_linear_data(skb,
cep->rx_vaddr[bdp - cep->rx_bd_base],
- pkt_len-4, 0);
+ pkt_len-4);
skb->protocol=eth_type_trans(skb,dev);
netif_rx(skb);
}
fep->stats.rx_dropped++;
} else {
skb_put(skb,pkt_len-4); /* Make room */
- eth_copy_and_sum(skb, data, pkt_len-4, 0);
+ skb_copy_to_linear_data(skb, data, pkt_len-4);
skb->protocol=eth_type_trans(skb,dev);
netif_rx(skb);
}
if (skb != NULL) {
skb_reserve(skb, 2); /* 16 byte alignment */
skb_put(skb,totlen);
- eth_copy_and_sum(skb, (char *) p->base+(unsigned long) rbd->buffer,totlen,0);
+ skb_copy_to_linear_data(skb, (char *) p->base+(unsigned long) rbd->buffer,totlen);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
dev->last_rx = jiffies;
skb_reserve (skb, 2); /* 16 byte align */
skb_put (skb, len); /* make room */
- eth_copy_and_sum(skb,
+ skb_copy_to_linear_data(skb,
(unsigned char *)&(ib->rx_buf [lp->rx_new][0]),
- len, 0);
+ len);
skb->protocol = eth_type_trans (skb, dev);
netif_rx (skb);
dev->last_rx = jiffies;
#if RX_BUF_IDX == 3
wrap_copy(skb, rx_ring, ring_offset+4, pkt_size);
#else
- eth_copy_and_sum (skb, &rx_ring[ring_offset + 4], pkt_size, 0);
+ skb_copy_to_linear_data (skb, &rx_ring[ring_offset + 4], pkt_size);
#endif
skb_put (skb, pkt_size);
skb_reserve (skb, 2); /* 16 byte align */
skb_put (skb, len); /* make room */
- eth_copy_and_sum(skb,
+ skb_copy_to_linear_data(skb,
(unsigned char *)&(ib->rx_buf [lp->rx_new][0]),
- len, 0);
+ len);
skb->protocol = eth_type_trans (skb, dev);
netif_rx (skb);
dev->last_rx = jiffies;
skb_reserve(skb,2); /* 16 byte align */
skb_put(skb,pkt_len); /* Make room */
- eth_copy_and_sum(skb, (char *)priv->rx_buff[entry], pkt_len,0);
+ skb_copy_to_linear_data(skb, (char *)priv->rx_buff[entry], pkt_len);
skb->protocol=eth_type_trans(skb,dev);
#if 0
printk(KERN_DEBUG "RX pkt type 0x%04x from ",
skb_reserve(skb, 2);
dma_sync_single(NULL, ep->descs->rdesc[entry].buf_addr,
length, DMA_FROM_DEVICE);
- eth_copy_and_sum(skb, ep->rx_buf[entry], length, 0);
+ skb_copy_to_linear_data(skb, ep->rx_buf[entry], length);
skb_put(skb, length);
skb->protocol = eth_type_trans(skb, dev);
continue;
}
skb_reserve(skb, 2); /* 16 byte IP header align */
- eth_copy_and_sum(skb,
- (unsigned char *)pDB->vaddr, frmlen, 0);
+ skb_copy_to_linear_data(skb,
+ (unsigned char *)pDB->vaddr, frmlen);
skb_put(skb, frmlen);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb); /* pass the packet to upper layers */
PCI_DMA_FROMDEVICE);
/* 16 byte align the IP header */
skb_reserve (skb, 2);
- eth_copy_and_sum (skb,
+ skb_copy_to_linear_data (skb,
np->rx_skbuff[entry]->data,
- pkt_len, 0);
+ pkt_len);
skb_put (skb, pkt_len);
pci_dma_sync_single_for_device(np->pdev,
desc->fraginfo &
#if 1 || USE_IP_CSUM
/* Packet is in one chunk -- we can copy + cksum. */
- eth_copy_and_sum(skb, sp->rx_skbuff[entry]->data, pkt_len, 0);
+ skb_copy_to_linear_data(skb, sp->rx_skbuff[entry]->data, pkt_len);
skb_put(skb, pkt_len);
#else
skb_copy_from_linear_data(sp->rx_skbuff[entry],
ep->rx_ring[entry].bufaddr,
ep->rx_buf_sz,
PCI_DMA_FROMDEVICE);
- eth_copy_and_sum(skb, ep->rx_skbuff[entry]->data, pkt_len, 0);
+ skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len);
skb_put(skb, pkt_len);
pci_dma_sync_single_for_device(ep->pci_dev,
ep->rx_ring[entry].bufaddr,
/* Call copy + cksum if available. */
#if ! defined(__alpha__)
- eth_copy_and_sum(skb,
- np->cur_rx->skbuff->data, pkt_len, 0);
+ skb_copy_to_linear_data(skb,
+ np->cur_rx->skbuff->data, pkt_len);
skb_put(skb, pkt_len);
#else
memcpy(skb_put(skb, pkt_len),
fep->stats.rx_dropped++;
} else {
skb_put(skb,pkt_len-4); /* Make room */
- eth_copy_and_sum(skb, data, pkt_len-4, 0);
+ skb_copy_to_linear_data(skb, data, pkt_len-4);
skb->protocol=eth_type_trans(skb,dev);
netif_rx(skb);
}
PCI_DMA_FROMDEVICE);
/* Call copy + cksum if available. */
#if 1 || USE_IP_COPYSUM
- eth_copy_and_sum(skb,
- hmp->rx_skbuff[entry]->data, pkt_len, 0);
+ skb_copy_to_linear_data(skb,
+ hmp->rx_skbuff[entry]->data, pkt_len);
skb_put(skb, pkt_len);
#else
memcpy(skb_put(skb, pkt_len), hmp->rx_ring_dma
skb = dev_alloc_skb(desc->pkt_length + 2);
if (likely(skb != NULL)) {
skb_reserve(skb, 2);
- eth_copy_and_sum(skb, buf, desc->pkt_length, 0);
+ skb_copy_to_linear_data(skb, buf, desc->pkt_length);
skb_put(skb, desc->pkt_length);
skb->protocol = eth_type_trans(skb, nds[desc->channel]);
}
skb_reserve(skb,2); /* 16 byte align */
skb_put(skb,pkt_len); /* Make room */
- eth_copy_and_sum(skb,
+ skb_copy_to_linear_data(skb,
(unsigned char *)isa_bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)),
- pkt_len,0);
+ pkt_len);
skb->protocol=eth_type_trans(skb,dev);
netif_rx(skb);
dev->last_rx = jiffies;
np->rx_dma[entry],
buflen,
PCI_DMA_FROMDEVICE);
- eth_copy_and_sum(skb,
- np->rx_skbuff[entry]->data, pkt_len, 0);
+ skb_copy_to_linear_data(skb,
+ np->rx_skbuff[entry]->data, pkt_len);
skb_put(skb, pkt_len);
pci_dma_sync_single_for_device(np->pci_dev,
np->rx_dma[entry],
{
skb_reserve(skb,2);
skb_put(skb,totlen);
- eth_copy_and_sum(skb,(char *) p->base+(unsigned long) rbd->buffer,totlen,0);
+ skb_copy_to_linear_data(skb,(char *) p->base+(unsigned long) rbd->buffer,totlen);
skb->protocol=eth_type_trans(skb,dev);
netif_rx(skb);
dev->last_rx = jiffies;
#ifdef RCV_VIA_SKB
if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) {
skb_put(skb,len);
- eth_copy_and_sum(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len,0);
+ skb_copy_to_linear_data(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len);
}
else {
struct sk_buff *skb1 = p->recv_skb[p->rmdnum];
}
#else
skb_put(skb,len);
- eth_copy_and_sum(skb, (unsigned char *) p->recvbounce[p->rmdnum],len,0);
+ skb_copy_to_linear_data(skb, (unsigned char *) p->recvbounce[p->rmdnum],len);
#endif
p->stats.rx_packets++;
p->stats.rx_bytes += len;
if (skb) {
skb_reserve (skb, 2); /* 16 byte align the IP fields. */
- eth_copy_and_sum (skb, &rx_ring[ring_offset + 4], pkt_size, 0);
+ skb_copy_to_linear_data (skb, &rx_ring[ring_offset + 4], pkt_size);
skb_put (skb, pkt_size);
skb->protocol = eth_type_trans (skb, dev);
lp->rx_dma_addr[entry],
pkt_len,
PCI_DMA_FROMDEVICE);
- eth_copy_and_sum(skb,
+ skb_copy_to_linear_data(skb,
(unsigned char *)(lp->rx_skbuff[entry]->data),
- pkt_len, 0);
+ pkt_len);
pci_dma_sync_single_for_device(lp->pci_dev,
lp->rx_dma_addr[entry],
pkt_len,
lp->stats.rx_packets++;
skb_reserve(skb, 2); /* 16 byte align */
skb_put(skb, len); /* make room */
- eth_copy_and_sum(skb,
+ skb_copy_to_linear_data(skb,
(unsigned char *) pData,
- len, 0);
+ len);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
dev->last_rx = jiffies;
skb_put(skb, len);
/* Copy out of kseg1 to avoid silly cache flush. */
- eth_copy_and_sum(skb, pkt_pointer + 2, len, 0);
+ skb_copy_to_linear_data(skb, pkt_pointer + 2, len);
skb->protocol = eth_type_trans(skb, dev);
/* We don't want to receive our own packets */
skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
if (skb) {
skb_reserve(skb, NET_IP_ALIGN);
- eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
+ skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
*sk_buff = skb;
sis190_give_to_asic(desc, rx_buf_sz);
ret = 0;
pci_dma_sync_single_for_cpu(np->pci_dev,
np->rx_info[entry].mapping,
pkt_len, PCI_DMA_FROMDEVICE);
- eth_copy_and_sum(skb, np->rx_info[entry].skb->data, pkt_len, 0);
+ skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len);
pci_dma_sync_single_for_device(np->pci_dev,
np->rx_info[entry].mapping,
pkt_len, PCI_DMA_FROMDEVICE);
{
skb_reserve(skb,2);
skb_put(skb,totlen);
- eth_copy_and_sum(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen,0);
+ skb_copy_to_linear_data(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen);
skb->protocol=eth_type_trans(skb,dev);
netif_rx(skb);
p->stats.rx_packets++;
skb_reserve( skb, 2 ); /* 16 byte align */
skb_put( skb, pkt_len ); /* Make room */
-// skb_copy_to_linear_data(skb, PKTBUF_ADDR(head), pkt_len);
- eth_copy_and_sum(skb,
+ skb_copy_to_linear_data(skb,
PKTBUF_ADDR(head),
- pkt_len, 0);
+ pkt_len);
skb->protocol = eth_type_trans( skb, dev );
netif_rx( skb );
sbus_dma_sync_single_for_cpu(bp->bigmac_sdev,
this->rx_addr, len,
SBUS_DMA_FROMDEVICE);
- eth_copy_and_sum(copy_skb, (unsigned char *)skb->data, len, 0);
+ skb_copy_to_linear_data(copy_skb, (unsigned char *)skb->data, len);
sbus_dma_sync_single_for_device(bp->bigmac_sdev,
this->rx_addr, len,
SBUS_DMA_FROMDEVICE);
np->rx_buf_sz,
PCI_DMA_FROMDEVICE);
- eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0);
+ skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
pci_dma_sync_single_for_device(np->pci_dev,
desc->frag[0].addr,
np->rx_buf_sz,
skb_reserve(skb, 2); /* 16 byte align */
skb_put(skb, len); /* make room */
- eth_copy_and_sum(skb,
+ skb_copy_to_linear_data(skb,
(unsigned char *)&(ib->rx_buf [entry][0]),
- len, 0);
+ len);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
dev->last_rx = jiffies;
} else {
skb_reserve(skb, 2);
skb_put(skb, len);
- eth_copy_and_sum(skb, (unsigned char *) this_qbuf,
- len, 0);
+ skb_copy_to_linear_data(skb, (unsigned char *) this_qbuf,
+ len);
skb->protocol = eth_type_trans(skb, qep->dev);
netif_rx(skb);
qep->dev->last_rx = jiffies;
tp->rx_buffers[entry].mapping,
pkt_len, PCI_DMA_FROMDEVICE);
#if ! defined(__alpha__)
- eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data,
- pkt_len, 0);
+ skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
+ pkt_len);
skb_put(skb, pkt_len);
#else
memcpy(skb_put(skb, pkt_len),
tp->rx_buffers[entry].mapping,
pkt_len, PCI_DMA_FROMDEVICE);
#if ! defined(__alpha__)
- eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data,
- pkt_len, 0);
+ skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
+ pkt_len);
skb_put(skb, pkt_len);
#else
memcpy(skb_put(skb, pkt_len),
pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
np->rx_skbuff[entry]->len,
PCI_DMA_FROMDEVICE);
- eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0);
+ skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
skb_put(skb, pkt_len);
pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],
np->rx_skbuff[entry]->len,
goto out;
}
skb_reserve(skb, 2);
- eth_copy_and_sum(skb, (unsigned char*)&card->rx_buffer[bufferoffset / 4], pkt_len, 0);
+ skb_copy_to_linear_data(skb, (unsigned char*)&card->rx_buffer[bufferoffset / 4], pkt_len);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */
#if ! defined(__alpha__)
- eth_copy_and_sum(skb, bus_to_virt(tp->rx_ring[entry].buffer1),
- pkt_len, 0);
+ skb_copy_to_linear_data(skb, bus_to_virt(tp->rx_ring[entry].buffer1),
+ pkt_len);
skb_put(skb, pkt_len);
#else
memcpy(skb_put(skb, pkt_len),
pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
PKT_BUF_SZ,
PCI_DMA_FROMDEVICE);
- eth_copy_and_sum(new_skb, skb->data, pkt_len, 0);
+ skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
pci_dma_sync_single_for_device(tp->pdev, dma_addr,
PKT_BUF_SZ,
PCI_DMA_FROMDEVICE);
if (!(skb = dev_alloc_skb(pkt_len)))
return;
- eth_copy_and_sum(skb, pkt_start + pkt_offset, pkt_len, 0);
+ skb_copy_to_linear_data(skb, pkt_start + pkt_offset, pkt_len);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, catc->netdev);
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
- eth_copy_and_sum(skb, kaweth->rx_buf + 2, pkt_len, 0);
+ skb_copy_to_linear_data(skb, kaweth->rx_buf + 2, pkt_len);
skb_put(skb, pkt_len);
rp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
- eth_copy_and_sum(skb,
+ skb_copy_to_linear_data(skb,
rp->rx_skbuff[entry]->data,
- pkt_len, 0);
+ pkt_len);
skb_put(skb, pkt_len);
pci_dma_sync_single_for_device(rp->pdev,
rp->rx_skbuff_dma[entry],
} else {
skb->dev = dev;
skb_reserve(skb, 2); /* IP headers on 16 bytes boundaries */
- eth_copy_and_sum(skb, (unsigned char *)&sig.daddr, 12, 0);
+ skb_copy_to_linear_data(skb, (unsigned char *)&sig.daddr, 12);
wl3501_receive(this, skb->data, pkt_len);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, dev);
if (skb == NULL)
break;
skb_reserve(skb, 2); /* 16 byte align the IP header */
- eth_copy_and_sum(skb, rx_skb->data, pkt_len, 0);
+ skb_copy_to_linear_data(skb, rx_skb->data, pkt_len);
skb_put(skb, pkt_len);
pci_dma_sync_single_for_device(yp->pci_dev, desc->addr,
yp->rx_buf_sz,
struct hh_cache *hh);
extern struct net_device *alloc_etherdev(int sizeof_priv);
-static inline void eth_copy_and_sum (struct sk_buff *dest,
- const unsigned char *src,
- int len, int base)
-{
- memcpy (dest->data, src, len);
-}
/**
* is_zero_ether_addr - Determine if give Ethernet address is all zeros.