}
prepare_request(req, io_req,
(unsigned long long) req->sector << 9,
- sg->offset, sg->length, sg->page);
+ sg->offset, sg->length, sg_page(sg));
last_sectors = sg->length >> 9;
n = os_write_file(thread_fd, &io_req,
sg_last(sg, qc->orig_n_elem)->length += qc->pad_len;
if (pad_buf) {
struct scatterlist *psg = &qc->pad_sgent;
- void *addr = kmap_atomic(psg->page, KM_IRQ0);
+ void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
memcpy(addr + psg->offset, pad_buf, qc->pad_len);
kunmap_atomic(addr, KM_IRQ0);
}
* data in this function or read data in ata_sg_clean.
*/
offset = lsg->offset + lsg->length - qc->pad_len;
- psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
+ sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT));
psg->offset = offset_in_page(offset);
if (qc->tf.flags & ATA_TFLAG_WRITE) {
- void *addr = kmap_atomic(psg->page, KM_IRQ0);
+ void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
memcpy(pad_buf, addr + psg->offset, qc->pad_len);
kunmap_atomic(addr, KM_IRQ0);
}
if (qc->curbytes == qc->nbytes - qc->sect_size)
ap->hsm_task_state = HSM_ST_LAST;
- page = qc->cursg->page;
+ page = sg_page(qc->cursg);
offset = qc->cursg->offset + qc->cursg_ofs;
/* get the current page and offset */
sg = qc->cursg;
- page = sg->page;
+ page = sg_page(sg);
offset = sg->offset + qc->cursg_ofs;
/* get the current page and offset */
struct scatterlist *sg = scsi_sglist(cmd);
if (sg) {
- buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
+ buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
buflen = sg->length;
} else {
buf = NULL;
Command->V1.ScatterGatherList =
(DAC960_V1_ScatterGatherSegment_T *)ScatterGatherCPU;
Command->V1.ScatterGatherListDMA = ScatterGatherDMA;
+ sg_init_table(Command->cmd_sglist, DAC960_V1_ScatterGatherLimit);
} else {
Command->cmd_sglist = Command->V2.ScatterList;
Command->V2.ScatterGatherList =
Command->V2.RequestSense =
(DAC960_SCSI_RequestSense_T *)RequestSenseCPU;
Command->V2.RequestSenseDMA = RequestSenseDMA;
+ sg_init_table(Command->cmd_sglist, DAC960_V2_ScatterGatherLimit);
}
}
return true;
(int)creq->nr_sectors);
#endif /* CCISS_DEBUG */
- memset(tmp_sg, 0, sizeof(tmp_sg));
+ sg_init_table(tmp_sg, MAXSGENTRIES);
seg = blk_rq_map_sg(q, creq, tmp_sg);
/* get the DMA records for the setup */
for (i = 0; i < seg; i++) {
c->SG[i].Len = tmp_sg[i].length;
- temp64.val = (__u64) pci_map_page(h->pdev, tmp_sg[i].page,
+ temp64.val = (__u64) pci_map_page(h->pdev, sg_page(&tmp_sg[i]),
tmp_sg[i].offset,
tmp_sg[i].length, dir);
c->SG[i].Addr.lower = temp64.val32.lower;
DBGPX(
printk("sector=%d, nr_sectors=%d\n", creq->sector, creq->nr_sectors);
);
+ sg_init_table(tmp_sg, SG_MAX);
seg = blk_rq_map_sg(q, creq, tmp_sg);
/* Now do all the DMA Mappings */
{
c->req.sg[i].size = tmp_sg[i].length;
c->req.sg[i].addr = (__u32) pci_map_page(h->pci_dev,
- tmp_sg[i].page,
+ sg_page(&tmp_sg[i]),
tmp_sg[i].offset,
tmp_sg[i].length, dir);
}
#include <linux/crypto.h>
#include <linux/blkdev.h>
#include <linux/loop.h>
+#include <linux/scatterlist.h>
#include <asm/semaphore.h>
#include <asm/uaccess.h>
.tfm = tfm,
.flags = CRYPTO_TFM_REQ_MAY_SLEEP,
};
- struct scatterlist sg_out = { NULL, };
- struct scatterlist sg_in = { NULL, };
+ struct scatterlist sg_out;
+ struct scatterlist sg_in;
encdec_cbc_t encdecfunc;
struct page *in_page, *out_page;
unsigned in_offs, out_offs;
int err;
+ sg_init_table(&sg_out, 1);
+ sg_init_table(&sg_in, 1);
+
if (cmd == READ) {
in_page = raw_page;
in_offs = raw_off;
u32 iv[4] = { 0, };
iv[0] = cpu_to_le32(IV & 0xffffffff);
- sg_in.page = in_page;
+ sg_set_page(&sg_in, in_page);
sg_in.offset = in_offs;
sg_in.length = sz;
- sg_out.page = out_page;
+ sg_set_page(&sg_out, out_page);
sg_out.offset = out_offs;
sg_out.length = sz;
op = VD_OP_BWRITE;
}
+ sg_init_table(sg, port->ring_cookies);
nsg = blk_rq_map_sg(req->q, req, sg);
len = 0;
host->n_msgs++;
assert(host->n_msgs <= CARM_MAX_REQ);
+ sg_init_table(crq->sg, CARM_MAX_REQ_SG);
return crq;
}
#include <linux/usb_usual.h>
#include <linux/blkdev.h>
#include <linux/timer.h>
+#include <linux/scatterlist.h>
#include <scsi/scsi.h>
#define DRV_NAME "ub"
if ((cmd = ub_get_cmd(lun)) == NULL)
return -1;
memset(cmd, 0, sizeof(struct ub_scsi_cmd));
+ sg_init_table(cmd->sgv, UB_MAX_REQ_SG);
blkdev_dequeue_request(rq);
else
pipe = sc->send_bulk_pipe;
sc->last_pipe = pipe;
- usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe,
- page_address(sg->page) + sg->offset, sg->length,
- ub_urb_complete, sc);
+ usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, sg_virt(sg),
+ sg->length, ub_urb_complete, sc);
sc->work_urb.actual_length = 0;
sc->work_urb.error_count = 0;
sc->work_urb.status = 0;
scmd->state = UB_CMDST_INIT;
scmd->nsg = 1;
sg = &scmd->sgv[0];
- sg->page = virt_to_page(sc->top_sense);
+ sg_set_page(sg, virt_to_page(sc->top_sense));
sg->offset = (unsigned long)sc->top_sense & (PAGE_SIZE-1);
sg->length = UB_SENSE_SIZE;
scmd->len = UB_SENSE_SIZE;
cmd->state = UB_CMDST_INIT;
cmd->nsg = 1;
sg = &cmd->sgv[0];
- sg->page = virt_to_page(p);
+ sg_set_page(sg, virt_to_page(p));
sg->offset = (unsigned long)p & (PAGE_SIZE-1);
sg->length = 8;
cmd->len = 8;
#include <linux/dma-mapping.h>
#include <linux/completion.h>
#include <linux/device.h>
+#include <linux/scatterlist.h>
#include <asm/uaccess.h>
#include <asm/vio.h>
d = req->rq_disk->private_data;
/* Now build the scatter-gather list */
+ sg_init_table(sg, VIOMAXBLOCKDMA);
nsg = blk_rq_map_sg(req->q, req, sg);
nsg = dma_map_sg(d->dev, sg, nsg, direction);
* than two possibly non-adjacent physical 4kB pages.
*/
/* group sequential buffers into one large buffer */
- addr = page_to_phys(sg->page) + sg->offset;
+ addr = sg_phys(sg);
size = sg_dma_len(sg);
while (--i) {
sg = sg_next(sg);
- if ((addr + size) != page_to_phys(sg->page) + sg->offset)
+ if ((addr + size) != sg_phys(sg))
break;
size += sg_dma_len(sg);
}
if (!hwif->sg_max_nents)
hwif->sg_max_nents = PRD_ENTRIES;
- hwif->sg_table = kzalloc(sizeof(struct scatterlist)*hwif->sg_max_nents,
+ hwif->sg_table = kmalloc(sizeof(struct scatterlist)*hwif->sg_max_nents,
GFP_KERNEL);
if (!hwif->sg_table) {
printk(KERN_ERR "%s: unable to allocate SG table.\n", hwif->name);
goto out;
}
+
+ sg_init_table(hwif->sg_table, hwif->sg_max_nents);
if (init_irq(hwif) == 0)
goto done;
hwif->cursg = sg;
}
- page = cursg->page;
+ page = sg_page(cursg);
offset = cursg->offset + hwif->cursg_ofs * SECTOR_SIZE;
/* get the current page and offset */
if (iswrite) {
if(!put_source_flags(ahwif->tx_chan,
- (void*)(page_address(sg->page)
- + sg->offset),
+ (void*) sg_virt(sg),
tc, flags)) {
printk(KERN_ERR "%s failed %d\n",
__FUNCTION__, __LINE__);
} else
{
if(!put_dest_flags(ahwif->rx_chan,
- (void*)(page_address(sg->page)
- + sg->offset),
+ (void*) sg_virt(sg),
tc, flags)) {
printk(KERN_ERR "%s failed %d\n",
__FUNCTION__, __LINE__);
unsigned long va =
(unsigned long)dma->kvirt + (i << PAGE_SHIFT);
- dma->sglist[i].page = vmalloc_to_page((void *)va);
+ sg_set_page(&dma->sglist[i], vmalloc_to_page((void *)va));
dma->sglist[i].length = PAGE_SIZE;
}
cmd->dma_size = sgpnt[0].length;
cmd->dma_type = CMD_DMA_PAGE;
cmd->cmd_dma = dma_map_page(hi->host->device.parent,
- sgpnt[0].page, sgpnt[0].offset,
+ sg_page(&sgpnt[0]), sgpnt[0].offset,
cmd->dma_size, cmd->dma_dir);
orb->data_descriptor_lo = cmd->cmd_dma;
ib_dma_unmap_sg(dev, chunk->page_list,
chunk->nents, DMA_BIDIRECTIONAL);
for (i = 0; i < chunk->nents; ++i) {
+ struct page *page = sg_page(&chunk->page_list[i]);
+
if (umem->writable && dirty)
- set_page_dirty_lock(chunk->page_list[i].page);
- put_page(chunk->page_list[i].page);
+ set_page_dirty_lock(page);
+ put_page(page);
}
kfree(chunk);
}
chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK);
+ sg_init_table(chunk->page_list, chunk->nents);
for (i = 0; i < chunk->nents; ++i) {
if (vma_list &&
!is_vm_hugetlb_page(vma_list[i + off]))
umem->hugetlb = 0;
- chunk->page_list[i].page = page_list[i + off];
+ sg_set_page(&chunk->page_list[i], page_list[i + off]);
chunk->page_list[i].offset = 0;
chunk->page_list[i].length = PAGE_SIZE;
}
DMA_BIDIRECTIONAL);
if (chunk->nmap <= 0) {
for (i = 0; i < chunk->nents; ++i)
- put_page(chunk->page_list[i].page);
+ put_page(sg_page(&chunk->page_list[i]));
kfree(chunk);
ret = -ENOMEM;
BUG_ON(!valid_dma_direction(direction));
for_each_sg(sgl, sg, nents, i) {
- addr = (u64) page_address(sg->page);
+ addr = (u64) page_address(sg_page(sg));
/* TODO: handle highmem pages */
if (!addr) {
ret = 0;
static u64 ipath_sg_dma_address(struct ib_device *dev, struct scatterlist *sg)
{
- u64 addr = (u64) page_address(sg->page);
+ u64 addr = (u64) page_address(sg_page(sg));
if (addr)
addr += sg->offset;
for (i = 0; i < chunk->nents; i++) {
void *vaddr;
- vaddr = page_address(chunk->page_list[i].page);
+ vaddr = page_address(sg_page(&chunk->page_list[i]));
if (!vaddr) {
ret = ERR_PTR(-EINVAL);
goto bail;
PCI_DMA_BIDIRECTIONAL);
for (i = 0; i < chunk->npages; ++i)
- __free_pages(chunk->mem[i].page,
+ __free_pages(sg_page(&chunk->mem[i]),
get_order(chunk->mem[i].length));
}
for (i = 0; i < chunk->npages; ++i) {
dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
- lowmem_page_address(chunk->mem[i].page),
+ lowmem_page_address(sg_page(&chunk->mem[i])),
sg_dma_address(&chunk->mem[i]));
}
}
static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
{
- mem->page = alloc_pages(gfp_mask, order);
- if (!mem->page)
+ struct page *page;
+
+ page = alloc_pages(gfp_mask, order);
+ if (!page)
return -ENOMEM;
+ sg_set_page(mem, page);
mem->length = PAGE_SIZE << order;
mem->offset = 0;
return 0;
if (!chunk)
goto fail;
+ sg_init_table(chunk->mem, MTHCA_ICM_CHUNK_LEN);
chunk->npages = 0;
chunk->nsg = 0;
list_add_tail(&chunk->list, &icm->chunk_list);
* so if we found the page, dma_handle has already
* been assigned to. */
if (chunk->mem[i].length > offset) {
- page = chunk->mem[i].page;
+ page = sg_page(&chunk->mem[i]);
goto out;
}
offset -= chunk->mem[i].length;
int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
struct mthca_user_db_table *db_tab, int index, u64 uaddr)
{
+ struct page *pages[1];
int ret = 0;
u8 status;
int i;
}
ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0,
- &db_tab->page[i].mem.page, NULL);
+ pages, NULL);
if (ret < 0)
goto out;
+ sg_set_page(&db_tab->page[i].mem, pages[0]);
db_tab->page[i].mem.length = MTHCA_ICM_PAGE_SIZE;
db_tab->page[i].mem.offset = uaddr & ~PAGE_MASK;
ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
if (ret < 0) {
- put_page(db_tab->page[i].mem.page);
+ put_page(pages[0]);
goto out;
}
ret = -EINVAL;
if (ret) {
pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
- put_page(db_tab->page[i].mem.page);
+ put_page(sg_page(&db_tab->page[i].mem));
goto out;
}
if (db_tab->page[i].uvirt) {
mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status);
pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
- put_page(db_tab->page[i].mem.page);
+ put_page(sg_page(&db_tab->page[i].mem));
}
}
p = mem;
for_each_sg(sgl, sg, data->size, i) {
- from = kmap_atomic(sg->page, KM_USER0);
+ from = kmap_atomic(sg_page(sg), KM_USER0);
memcpy(p,
from + sg->offset,
sg->length);
p = mem;
for_each_sg(sgl, sg, sg_size, i) {
- to = kmap_atomic(sg->page, KM_SOFTIRQ0);
+ to = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
memcpy(to + sg->offset,
p,
sg->length);
for_each_sg(sgl, sg, data->dma_nents, i) {
/* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX "
"offset: %ld sz: %ld\n", i,
- (unsigned long)page_to_phys(sg->page),
+ (unsigned long)sg_phys(sg),
(unsigned long)sg->offset,
(unsigned long)sg->length); */
end_addr = ib_sg_dma_address(ibdev, sg) +
iser_err("sg[%d] dma_addr:0x%lX page:0x%p "
"off:0x%x sz:0x%x dma_len:0x%x\n",
i, (unsigned long)ib_sg_dma_address(ibdev, sg),
- sg->page, sg->offset,
+ sg_page(sg), sg->offset,
sg->length, ib_sg_dma_len(ibdev, sg));
}
ctx->idx_out < ctx->bio_out->bi_vcnt) {
struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
- struct scatterlist sg_in = {
- .page = bv_in->bv_page,
- .offset = bv_in->bv_offset + ctx->offset_in,
- .length = 1 << SECTOR_SHIFT
- };
- struct scatterlist sg_out = {
- .page = bv_out->bv_page,
- .offset = bv_out->bv_offset + ctx->offset_out,
- .length = 1 << SECTOR_SHIFT
- };
+ struct scatterlist sg_in, sg_out;
+
+ sg_init_table(&sg_in, 1);
+ sg_set_page(&sg_in, bv_in->bv_page);
+ sg_in.offset = bv_in->bv_offset + ctx->offset_in;
+ sg_in.length = 1 << SECTOR_SHIFT;
+
+ sg_init_table(&sg_out, 1);
+ sg_set_page(&sg_out, bv_out->bv_page);
+ sg_out.offset = bv_out->bv_offset + ctx->offset_out;
+ sg_out.length = 1 << SECTOR_SHIFT;
ctx->offset_in += sg_in.length;
if (ctx->offset_in >= bv_in->bv_len) {
sglist = kcalloc(nr_pages, sizeof(struct scatterlist), GFP_KERNEL);
if (NULL == sglist)
return NULL;
+ sg_init_table(sglist, nr_pages);
for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) {
pg = vmalloc_to_page(virt);
if (NULL == pg)
goto err;
BUG_ON(PageHighMem(pg));
- sglist[i].page = pg;
+ sg_set_page(&sglist[i], pg);
sglist[i].length = PAGE_SIZE;
}
return sglist;
memcpy(page_address(dma->bouncemap[map_offset]) + offset, src, len);
kunmap_atomic(src, KM_BOUNCE_READ);
local_irq_restore(flags);
- dma->SGlist[map_offset].page = dma->bouncemap[map_offset];
+ sg_set_page(&dma->SGlist[map_offset], dma->bouncemap[map_offset]);
}
else {
- dma->SGlist[map_offset].page = dma->map[map_offset];
+ sg_set_page(&dma->SGlist[map_offset], dma->map[map_offset]);
}
offset = 0;
map_offset++;
sglist = kcalloc(nr_pages, sizeof(struct scatterlist), GFP_KERNEL);
if (NULL == sglist)
return NULL;
+ sg_init_table(sglist, nr_pages);
for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) {
pg = vmalloc_to_page(virt);
if (NULL == pg)
goto err;
BUG_ON(PageHighMem(pg));
- sglist[i].page = pg;
+ sg_set_page(&sglist[i], pg);
sglist[i].length = PAGE_SIZE;
}
return sglist;
sglist = kcalloc(nr_pages, sizeof(*sglist), GFP_KERNEL);
if (NULL == sglist)
return NULL;
+ sg_init_table(sglist, nr_pages);
if (NULL == pages[0])
goto nopage;
if (PageHighMem(pages[0]))
/* DMA to highmem pages might not work */
goto highmem;
- sglist[0].page = pages[0];
+ sg_set_page(&sglist[0], pages[0]);
sglist[0].offset = offset;
sglist[0].length = PAGE_SIZE - offset;
for (i = 1; i < nr_pages; i++) {
goto nopage;
if (PageHighMem(pages[i]))
goto highmem;
- sglist[i].page = pages[i];
+ sg_set_page(&sglist[i], pages[i]);
sglist[i].length = PAGE_SIZE;
}
return sglist;
#include <linux/blkdev.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
+#include <linux/scatterlist.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
blk_queue_max_segment_size(mq->queue, bouncesz);
- mq->sg = kzalloc(sizeof(struct scatterlist),
+ mq->sg = kmalloc(sizeof(struct scatterlist),
GFP_KERNEL);
if (!mq->sg) {
ret = -ENOMEM;
goto cleanup_queue;
}
+ sg_init_table(mq->sg, 1);
- mq->bounce_sg = kzalloc(sizeof(struct scatterlist) *
+ mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
bouncesz / 512, GFP_KERNEL);
if (!mq->bounce_sg) {
ret = -ENOMEM;
goto cleanup_queue;
}
+ sg_init_table(mq->bounce_sg, bouncesz / 512);
}
}
#endif
BUG_ON(dst_len == 0);
if (dst_size == 0) {
- dst_buf = page_address(dst->page) + dst->offset;
+ dst_buf = sg_virt(dst);
dst_size = dst->length;
}
if (src_size == 0) {
- src_buf = page_address(src->page) + src->offset;
+ src_buf = sg_virt(dst);
src_size = src->length;
}
return 1;
}
- mq->sg[0].page = virt_to_page(mq->bounce_buf);
- mq->sg[0].offset = offset_in_page(mq->bounce_buf);
- mq->sg[0].length = 0;
+ sg_init_one(mq->sg, mq->bounce_buf, 0);
while (sg_len) {
mq->sg[0].length += mq->bounce_sg[sg_len - 1].length;
sg = &data->sg[i];
- sgbuffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
+ sgbuffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
amount = min(size, sg->length);
size -= amount;
sg = &data->sg[host->transfer_index++];
pr_debug("sg = %p\n", sg);
- sg->dma_address = dma_map_page(NULL, sg->page, sg->offset, sg->length, DMA_FROM_DEVICE);
+ sg->dma_address = dma_map_page(NULL, sg_page(sg), sg->offset, sg->length, DMA_FROM_DEVICE);
pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
int index;
/* Swap the contents of the buffer */
- buffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
+ buffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
pr_debug("buffer = %p, length = %d\n", buffer, sg->length);
for (index = 0; index < (sg->length / 4); index++)
kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
}
- flush_dcache_page(sg->page);
+ flush_dcache_page(sg_page(sg));
}
/* Is there another transfer to trigger? */
/* This is the pointer to the data buffer */
sg = &data->sg[host->pio.index];
- sg_ptr = page_address(sg->page) + sg->offset + host->pio.offset;
+ sg_ptr = sg_virt(sg) + host->pio.offset;
/* This is the space left inside the buffer */
sg_len = data->sg[host->pio.index].length - host->pio.offset;
if (host->pio.index < host->dma.len) {
sg = &data->sg[host->pio.index];
- sg_ptr = page_address(sg->page) + sg->offset + host->pio.offset;
+ sg_ptr = sg_virt(sg) + host->pio.offset;
/* This is the space left inside the buffer */
sg_len = sg_dma_len(&data->sg[host->pio.index]) - host->pio.offset;
if (host->flags & HOST_F_XMIT){
ret = au1xxx_dbdma_put_source_flags(channel,
- (void *) (page_address(sg->page) +
- sg->offset),
- len, flags);
+ (void *) sg_virt(sg), len, flags);
}
else {
ret = au1xxx_dbdma_put_dest_flags(channel,
- (void *) (page_address(sg->page) +
- sg->offset),
+ (void *) sg_virt(sg),
len, flags);
}
}
/* Convert back to virtual address */
- host->data_ptr = (u16*)(page_address(data->sg->page) + data->sg->offset);
+ host->data_ptr = (u16*)sg_virt(sg);
host->data_cnt = 0;
clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
&& dir == DMA_FROM_DEVICE)
dir = DMA_BIDIRECTIONAL;
- dma_addr = dma_map_page(dma_dev, sg->page, 0,
+ dma_addr = dma_map_page(dma_dev, sg_page(sg), 0,
PAGE_SIZE, dir);
if (direction == DMA_TO_DEVICE)
t->tx_dma = dma_addr + sg->offset;
}
/* allow pio too; we don't allow highmem */
- kmap_addr = kmap(sg->page);
+ kmap_addr = kmap(sg_page(sg));
if (direction == DMA_TO_DEVICE)
t->tx_buf = kmap_addr + sg->offset;
else
/* discard mappings */
if (direction == DMA_FROM_DEVICE)
- flush_kernel_dcache_page(sg->page);
- kunmap(sg->page);
+ flush_kernel_dcache_page(sg_page(sg));
+ kunmap(sg_page(sg));
if (dma_dev)
dma_unmap_page(dma_dev, dma_addr, PAGE_SIZE, dir);
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/clk.h>
+#include <linux/scatterlist.h>
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/scatterlist.h>
#include <asm/mach-types.h>
#include <asm/arch/board.h>
sg = host->data->sg + host->sg_idx;
host->buffer_bytes_left = sg->length;
- host->buffer = page_address(sg->page) + sg->offset;
+ host->buffer = sg_virt(sg);
if (host->buffer_bytes_left > host->total_bytes_left)
host->buffer_bytes_left = host->total_bytes_left;
}
static inline char* sdhci_sg_to_buffer(struct sdhci_host* host)
{
- return page_address(host->cur_sg->page) + host->cur_sg->offset;
+ return sg_virt(host->cur_sg);
}
static inline int sdhci_next_sg(struct sdhci_host* host)
}
off = sg[host->sg_pos].offset + host->block_pos;
- pg = nth_page(sg[host->sg_pos].page, off >> PAGE_SHIFT);
+ pg = nth_page(sg_page(&sg[host->sg_pos]), off >> PAGE_SHIFT);
p_off = offset_in_page(off);
p_cnt = PAGE_SIZE - p_off;
p_cnt = min(p_cnt, cnt);
}
off = sg[host->sg_pos].offset + host->block_pos;
- pg = nth_page(sg[host->sg_pos].page, off >> PAGE_SHIFT);
+ pg = nth_page(sg_page(&sg[host->sg_pos]), off >> PAGE_SHIFT);
p_off = offset_in_page(off);
p_cnt = PAGE_SIZE - p_off;
p_cnt = min(p_cnt, cnt);
p_cnt = min(p_cnt, t_size);
if (r_data->flags & MMC_DATA_WRITE)
- tifm_sd_copy_page(host->bounce_buf.page,
+ tifm_sd_copy_page(sg_page(&host->bounce_buf),
r_data->blksz - t_size,
pg, p_off, p_cnt);
else if (r_data->flags & MMC_DATA_READ)
- tifm_sd_copy_page(pg, p_off, host->bounce_buf.page,
+ tifm_sd_copy_page(pg, p_off, sg_page(&host->bounce_buf),
r_data->blksz - t_size, p_cnt);
t_size -= p_cnt;
static inline char *wbsd_sg_to_buffer(struct wbsd_host *host)
{
- return page_address(host->cur_sg->page) + host->cur_sg->offset;
+ return sg_virt(host->cur_sg);
}
static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data)
len = data->sg_len;
for (i = 0; i < len; i++) {
- sgbuf = page_address(sg[i].page) + sg[i].offset;
+ sgbuf = sg_virt(&sg[i]);
memcpy(dmabuf, sgbuf, sg[i].length);
dmabuf += sg[i].length;
}
len = data->sg_len;
for (i = 0; i < len; i++) {
- sgbuf = page_address(sg[i].page) + sg[i].offset;
+ sgbuf = sg_virt(&sg[i]);
memcpy(sgbuf, dmabuf, sg[i].length);
dmabuf += sg[i].length;
}
PCI_DMA_BIDIRECTIONAL);
for (i = 0; i < chunk->npages; ++i)
- __free_pages(chunk->mem[i].page,
+ __free_pages(sg_page(&chunk->mem[i]),
get_order(chunk->mem[i].length));
}
for (i = 0; i < chunk->npages; ++i)
dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
- lowmem_page_address(chunk->mem[i].page),
+ lowmem_page_address(sg_page(&chunk->mem[i])),
sg_dma_address(&chunk->mem[i]));
}
static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
{
- mem->page = alloc_pages(gfp_mask, order);
- if (!mem->page)
+ struct page *page;
+
+ page = alloc_pages(gfp_mask, order);
+ if (!page)
return -ENOMEM;
+ sg_set_page(mem, page);
mem->length = PAGE_SIZE << order;
mem->offset = 0;
return 0;
if (!chunk)
goto fail;
+ sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
chunk->npages = 0;
chunk->nsg = 0;
list_add_tail(&chunk->list, &icm->chunk_list);
* been assigned to.
*/
if (chunk->mem[i].length > offset) {
- page = chunk->mem[i].page;
+ page = sg_page(&chunk->mem[i]);
goto out;
}
offset -= chunk->mem[i].length;
#include <linux/mm.h>
#include <linux/ppp_defs.h>
#include <linux/ppp-comp.h>
-#include <asm/scatterlist.h>
+#include <linux/scatterlist.h>
#include "ppp_mppe.h"
static unsigned int
setup_sg(struct scatterlist *sg, const void *address, unsigned int length)
{
- sg[0].page = virt_to_page(address);
- sg[0].offset = offset_in_page(address);
- sg[0].length = length;
+ sg_init_one(sg, address, length);
return length;
}
(scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
if (srb->sc_data_direction == DMA_TO_DEVICE || srb->sc_data_direction == DMA_BIDIRECTIONAL) {
struct scatterlist *sg = scsi_sglist(srb);
- char *buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
+ char *buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
memcpy(tw_dev->generic_buffer_virt[request_id], buf, sg->length);
kunmap_atomic(buf - sg->offset, KM_IRQ0);
}
char *buf;
unsigned long flags = 0;
local_irq_save(flags);
- buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
+ buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
memcpy(buf, tw_dev->generic_buffer_virt[request_id], sg->length);
kunmap_atomic(buf - sg->offset, KM_IRQ0);
local_irq_restore(flags);
struct scatterlist *sg = scsi_sglist(cmd);
local_irq_save(flags);
- buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
+ buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
transfer_len = min(sg->length, len);
memcpy(buf, data, transfer_len);
if (cmd->use_sg) {
cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
cmd->SCp.buffers_residual = cmd->use_sg - 1;
- cmd->SCp.ptr = page_address(cmd->SCp.buffer->page)+
- cmd->SCp.buffer->offset;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
cmd->SCp.this_residual = cmd->SCp.buffer->length;
} else {
cmd->SCp.buffer = NULL;
++cmd->SCp.buffer;
--cmd->SCp.buffers_residual;
cmd->SCp.this_residual = cmd->SCp.buffer->length;
- cmd->SCp.ptr = page_address(cmd->SCp.buffer->page)+
- cmd->SCp.buffer->offset;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
dprintk(NDEBUG_INFORMATION, ("scsi%d : %d bytes and %d buffers left\n", instance->host_no, cmd->SCp.this_residual, cmd->SCp.buffers_residual));
}
/*
esp->dma_mmu_get_scsi_sgl(esp, sp);
else
sp->SCp.ptr =
- (char *) virt_to_phys((page_address(sp->SCp.buffer->page) + sp->SCp.buffer->offset));
+ (char *) virt_to_phys(sg_virt(sp->SCp.buffer));
}
}
if (esp->dma_advance_sg)
esp->dma_advance_sg (sp);
else
- sp->SCp.ptr = (char *) virt_to_phys((page_address(sp->SCp.buffer->page) + sp->SCp.buffer->offset));
+ sp->SCp.ptr = (char *) virt_to_phys(sg_virt(sp->SCp.buffer));
}
outb(TRANSFER_INFO | DMA_OP, CMD_REG);
#if USE_PIO
scsi_for_each_sg(current_SC, sg, scsi_sg_count(current_SC), i) {
- NCR53c406a_pio_write(page_address(sg->page) + sg->offset,
- sg->length);
+ NCR53c406a_pio_write(sg_virt(sg), sg->length);
}
REG0;
#endif /* USE_PIO */
outb(TRANSFER_INFO | DMA_OP, CMD_REG);
#if USE_PIO
scsi_for_each_sg(current_SC, sg, scsi_sg_count(current_SC), i) {
- NCR53c406a_pio_read(page_address(sg->page) + sg->offset,
- sg->length);
+ NCR53c406a_pio_read(sg_virt(sg), sg->length);
}
REG0;
#endif /* USE_PIO */
int transfer_len;
struct scatterlist *sg = scsi_sglist(scsicmd);
- buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
+ buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
transfer_len = min(sg->length, len + offset);
transfer_len -= offset;
#define SCNEXT(SCpnt) SCDATA(SCpnt)->next
#define SCSEM(SCpnt) SCDATA(SCpnt)->done
-#define SG_ADDRESS(buffer) ((char *) (page_address((buffer)->page)+(buffer)->offset))
+#define SG_ADDRESS(buffer) ((char *) sg_virt((buffer)))
/* state handling */
static void seldi_run(struct Scsi_Host *shpnt);
#include "aha1542.h"
#define SCSI_BUF_PA(address) isa_virt_to_bus(address)
-#define SCSI_SG_PA(sgent) (isa_page_to_bus((sgent)->page) + (sgent)->offset)
+#define SCSI_SG_PA(sgent) (isa_page_to_bus(sg_page((sgent))) + (sgent)->offset)
static void BAD_DMA(void *address, unsigned int length)
{
int badseg)
{
printk(KERN_CRIT "sgpnt[%d:%d] page %p/0x%llx length %u\n",
- badseg, nseg,
- page_address(sgp->page) + sgp->offset,
+ badseg, nseg, sg_virt(sgp),
(unsigned long long)SCSI_SG_PA(sgp),
sgp->length);
printk(KERN_CRIT "Bad segment list supplied to aha1542.c (%d, %d)\n", SCpnt->use_sg, i);
scsi_for_each_sg(SCpnt, sg, SCpnt->use_sg, i) {
printk(KERN_CRIT "%d: %p %d\n", i,
- (page_address(sg->page) +
- sg->offset), sg->length);
+ sg_virt(sg), sg->length);
};
printk(KERN_CRIT "cptr %x: ", (unsigned int) cptr);
ptr = (unsigned char *) &cptr[i];
/* 4 bytes: Areca io control code */
sg = scsi_sglist(cmd);
- buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
+ buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
if (scsi_sg_count(cmd) > 1) {
retvalue = ARCMSR_MESSAGE_FAIL;
goto message_out;
strncpy(&inqdata[32], "R001", 4); /* Product Revision */
sg = scsi_sglist(cmd);
- buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
+ buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
memcpy(buffer, inqdata, sizeof(inqdata));
sg = scsi_sglist(cmd);
if (cmd->use_sg) {
cmd->SCp.buffer = (struct scatterlist *)cmd->request_buffer;
cmd->SCp.buffers_residual = cmd->use_sg - 1;
- cmd->SCp.ptr = (char *)page_address(cmd->SCp.buffer->page) +
- cmd->SCp.buffer->offset;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
cmd->SCp.this_residual = cmd->SCp.buffer->length;
/* ++roman: Try to merge some scatter-buffers if they are at
* contiguous physical addresses.
++cmd->SCp.buffer;
--cmd->SCp.buffers_residual;
cmd->SCp.this_residual = cmd->SCp.buffer->length;
- cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) +
- cmd->SCp.buffer->offset;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
/* ++roman: Try to merge some scatter-buffers if
* they are at contiguous physical addresses.
*/
SCp->Status = 0;
else {
SCp->buffer++;
- SCp->ptr = page_address(SCp->buffer->page) + SCp->buffer->offset;
+ SCp->ptr = sg_virt(SCp->buffer);
SCp->this_residual = SCp->buffer->length;
}
}
} else {
cmd->SCp.buffer = cmd->request_buffer;
cmd->SCp.buffers_residual = cmd->use_sg;
- cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) + cmd->SCp.buffer->offset;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
cmd->SCp.this_residual = cmd->SCp.buffer->length;
}
cmd->SCp.Status = (cmd->SCp.this_residual != 0); /* TRUE as long as bytes
if (current_SC->SCp.buffers_residual) {
--current_SC->SCp.buffers_residual;
++current_SC->SCp.buffer;
- current_SC->SCp.ptr = page_address(current_SC->SCp.buffer->page) + current_SC->SCp.buffer->offset;
+ current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
} else
break;
if (!current_SC->SCp.this_residual && current_SC->SCp.buffers_residual) {
--current_SC->SCp.buffers_residual;
++current_SC->SCp.buffer;
- current_SC->SCp.ptr = page_address(current_SC->SCp.buffer->page) + current_SC->SCp.buffer->offset;
+ current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
}
}
if (current_SC->use_sg) {
current_SC->SCp.buffer = (struct scatterlist *) current_SC->request_buffer;
- current_SC->SCp.ptr = page_address(current_SC->SCp.buffer->page) + current_SC->SCp.buffer->offset;
+ current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
current_SC->SCp.buffers_residual = current_SC->use_sg - 1;
} else {
if (current_SC->SCp.buffers_residual) {
--current_SC->SCp.buffers_residual;
++current_SC->SCp.buffer;
- current_SC->SCp.ptr = page_address(current_SC->SCp.buffer->page) + current_SC->SCp.buffer->offset;
+ current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
} else
break;
&& current_SC->SCp.buffers_residual) {
--current_SC->SCp.buffers_residual;
++current_SC->SCp.buffer;
- current_SC->SCp.ptr = page_address(current_SC->SCp.buffer->page) + current_SC->SCp.buffer->offset;
+ current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
}
}
if (scsi_sg_count(current_SC)) {
current_SC->SCp.buffer = scsi_sglist(current_SC);
- current_SC->SCp.ptr = page_address(current_SC->SCp.buffer->page)
- + current_SC->SCp.buffer->offset;
+ current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
current_SC->SCp.buffers_residual = scsi_sg_count(current_SC) - 1;
} else {
if (cpsum+cpnow > cpcount)
cpnow = cpcount - cpsum;
cpsum += cpnow;
- if (!sl->page) {
+ if (!sg_page(sl)) {
printk("GDT-HA %d: invalid sc/gt element in gdth_copy_internal_data()\n",
ha->hanum);
return;
}
local_irq_save(flags);
- address = kmap_atomic(sl->page, KM_BIO_SRC_IRQ) + sl->offset;
+ address = kmap_atomic(sg_page(sl), KM_BIO_SRC_IRQ) + sl->offset;
if (to_buffer)
memcpy(buffer, address, cpnow);
else
memcpy(address, buffer, cpnow);
- flush_dcache_page(sl->page);
+ flush_dcache_page(sg_page(sl));
kunmap_atomic(address, KM_BIO_SRC_IRQ);
local_irq_restore(flags);
if (cpsum == cpcount)
BUG_ON(scsi_sg_count(cmd) > 16);
scsi_for_each_sg(cmd, sg, scsi_sg_count(cmd), i) {
- ld(shpnt)[ldn].sge[i].address = (void *) (isa_page_to_bus(sg->page) + sg->offset);
+ ld(shpnt)[ldn].sge[i].address = (void *) (isa_page_to_bus(sg_page(sg)) + sg->offset);
ld(shpnt)[ldn].sge[i].byte_length = sg->length;
}
scb->enable |= IM_POINTER_TO_LIST;
while (bcount) {
count = min(pc->sg->length - pc->b_count, bcount);
- if (PageHighMem(pc->sg->page)) {
+ if (PageHighMem(sg_page(pc->sg))) {
unsigned long flags;
local_irq_save(flags);
- buf = kmap_atomic(pc->sg->page, KM_IRQ0) +
+ buf = kmap_atomic(sg_page(pc->sg), KM_IRQ0) +
pc->sg->offset;
drive->hwif->atapi_input_bytes(drive,
buf + pc->b_count, count);
kunmap_atomic(buf - pc->sg->offset, KM_IRQ0);
local_irq_restore(flags);
} else {
- buf = page_address(pc->sg->page) + pc->sg->offset;
+ buf = sg_virt(pc->sg);
drive->hwif->atapi_input_bytes(drive,
buf + pc->b_count, count);
}
while (bcount) {
count = min(pc->sg->length - pc->b_count, bcount);
- if (PageHighMem(pc->sg->page)) {
+ if (PageHighMem(sg_page(pc->sg))) {
unsigned long flags;
local_irq_save(flags);
- buf = kmap_atomic(pc->sg->page, KM_IRQ0) +
+ buf = kmap_atomic(sg_page(pc->sg), KM_IRQ0) +
pc->sg->offset;
drive->hwif->atapi_output_bytes(drive,
buf + pc->b_count, count);
kunmap_atomic(buf - pc->sg->offset, KM_IRQ0);
local_irq_restore(flags);
} else {
- buf = page_address(pc->sg->page) + pc->sg->offset;
+ buf = sg_virt(pc->sg);
drive->hwif->atapi_output_bytes(drive,
buf + pc->b_count, count);
}
cmd->SCp.buffer++;
cmd->SCp.this_residual =
cmd->SCp.buffer->length;
- cmd->SCp.ptr =
- page_address(cmd->SCp.buffer->page) +
- cmd->SCp.buffer->offset;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
/*
* Make sure that we transfer even number of bytes
cmd->SCp.buffer =
(struct scatterlist *) cmd->request_buffer;
cmd->SCp.this_residual = cmd->SCp.buffer->length;
- cmd->SCp.ptr =
- page_address(cmd->SCp.buffer->page) +
- cmd->SCp.buffer->offset;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
} else {
/* else fill the only available buffer */
cmd->SCp.buffer = NULL;
if (cmd->use_sg) {
cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
cmd->SCp.buffers_residual = cmd->use_sg - 1;
- cmd->SCp.ptr = (char *) page_address(cmd->SCp.buffer->page) + cmd->SCp.buffer->offset;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
cmd->SCp.this_residual = cmd->SCp.buffer->length;
} else {
cmd->SCp.buffer = NULL;
++cmd->SCp.buffer;
--cmd->SCp.buffers_residual;
cmd->SCp.this_residual = cmd->SCp.buffer->length;
- cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) + cmd->SCp.buffer->offset;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
}
/* Set up hardware registers */
}
scatterlist = sglist->scatterlist;
+ sg_init_table(scatterlist, num_elem);
sglist->order = order;
sglist->num_sg = num_elem;
/* Free up what we already allocated */
for (j = i - 1; j >= 0; j--)
- __free_pages(scatterlist[j].page, order);
+ __free_pages(sg_page(&scatterlist[j]), order);
kfree(sglist);
return NULL;
}
- scatterlist[i].page = page;
+ sg_set_page(&scatterlist[i], page);
}
return sglist;
int i;
for (i = 0; i < sglist->num_sg; i++)
- __free_pages(sglist->scatterlist[i].page, sglist->order);
+ __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
kfree(sglist);
}
scatterlist = sglist->scatterlist;
for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
- kaddr = kmap(scatterlist[i].page);
+ struct page *page = sg_page(&scatterlist[i]);
+
+ kaddr = kmap(page);
memcpy(kaddr, buffer, bsize_elem);
- kunmap(scatterlist[i].page);
+ kunmap(page);
scatterlist[i].length = bsize_elem;
}
if (len % bsize_elem) {
- kaddr = kmap(scatterlist[i].page);
+ struct page *page = sg_page(&scatterlist[i]);
+
+ kaddr = kmap(page);
memcpy(kaddr, buffer, len % bsize_elem);
- kunmap(scatterlist[i].page);
+ kunmap(page);
scatterlist[i].length = len % bsize_elem;
}
/* kmap_atomic() ensures addressability of the user buffer.*/
/* local_irq_save() protects the KM_IRQ0 address slot. */
local_irq_save(flags);
- buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
+ buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
if (buffer && buffer[0] == 'C' && buffer[1] == 'O' &&
buffer[2] == 'P' && buffer[3] == 'P') {
kunmap_atomic(buffer - sg->offset, KM_IRQ0);
/* kmap_atomic() ensures addressability of the data buffer.*/
/* local_irq_save() protects the KM_IRQ0 address slot. */
local_irq_save(flags);
- buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset;
+ buffer = kmap_atomic(sg_page(&sg[i]), KM_IRQ0) + sg[i].offset;
memcpy(buffer, &cdata[xfer_cnt], min_cnt);
kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
local_irq_restore(flags);
/* kmap_atomic() ensures addressability of the data buffer.*/
/* local_irq_save() protects the KM_IRQ0 address slot. */
local_irq_save(flags);
- buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset;
+ buffer = kmap_atomic(sg_page(&sg[i]), KM_IRQ0) + sg[i].offset;
memcpy(&cdata[xfer_cnt], buffer, min_cnt);
kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
local_irq_restore(flags);
static inline void
iscsi_buf_init_iov(struct iscsi_buf *ibuf, char *vbuf, int size)
{
- ibuf->sg.page = virt_to_page(vbuf);
- ibuf->sg.offset = offset_in_page(vbuf);
- ibuf->sg.length = size;
+ sg_init_one(&ibuf->sg, vbuf, size);
ibuf->sent = 0;
ibuf->use_sendmsg = 1;
}
static inline void
iscsi_buf_init_sg(struct iscsi_buf *ibuf, struct scatterlist *sg)
{
- ibuf->sg.page = sg->page;
+ sg_init_table(&ibuf->sg, 1);
+ sg_set_page(&ibuf->sg, sg_page(sg));
ibuf->sg.offset = sg->offset;
ibuf->sg.length = sg->length;
/*
* Fastpath: sg element fits into single page
*/
- if (sg->length + sg->offset <= PAGE_SIZE && !PageSlab(sg->page))
+ if (sg->length + sg->offset <= PAGE_SIZE && !PageSlab(sg_page(sg)))
ibuf->use_sendmsg = 0;
else
ibuf->use_sendmsg = 1;
for (i = tcp_ctask->sg_count; i < scsi_sg_count(sc); i++) {
char *dest;
- dest = kmap_atomic(sg[i].page, KM_SOFTIRQ0);
+ dest = kmap_atomic(sg_page(&sg[i]), KM_SOFTIRQ0);
rc = iscsi_ctask_copy(tcp_conn, ctask, dest + sg[i].offset,
sg[i].length, offset);
kunmap_atomic(dest, KM_SOFTIRQ0);
* slab case.
*/
if (buf->use_sendmsg)
- res = sock_no_sendpage(sk, buf->sg.page, offset, size, flags);
+ res = sock_no_sendpage(sk, sg_page(&buf->sg), offset, size, flags);
else
- res = tcp_conn->sendpage(sk, buf->sg.page, offset, size, flags);
+ res = tcp_conn->sendpage(sk, sg_page(&buf->sg), offset, size, flags);
if (res >= 0) {
conn->txdata_octets += res;
struct scatterlist *sg;
sg = scsi_sglist(cmd);
- buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
+ buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
memset(buf, 0, cmd->cmnd[4]);
kunmap_atomic(buf - sg->offset, KM_IRQ0);
if( cmd->cmnd[0] == INQUIRY && !islogical ) {
sgl = scsi_sglist(cmd);
- if( sgl->page ) {
- c = *(unsigned char *)
- page_address((&sgl[0])->page) +
- (&sgl[0])->offset;
+ if( sg_page(sgl) ) {
+ c = *(unsigned char *) sg_virt(&sgl[0]);
} else {
printk(KERN_WARNING
"megaraid: invalid sg.\n");
caddr_t vaddr;
sgl = scsi_sglist(scp);
- if (sgl->page) {
- vaddr = (caddr_t)
- (page_address((&sgl[0])->page)
- + (&sgl[0])->offset);
+ if (sg_page(sgl)) {
+ vaddr = (caddr_t) sg_virt(&sgl[0]);
memset(vaddr, 0, scp->cmnd[4]);
}
&& IS_RAID_CH(raid_dev, scb->dev_channel)) {
sgl = scsi_sglist(scp);
- if (sgl->page) {
- c = *(unsigned char *)
- (page_address((&sgl[0])->page) +
- (&sgl[0])->offset);
+ if (sg_page(sgl)) {
+ c = *(unsigned char *) sg_virt(&sgl[0]);
} else {
con_log(CL_ANN, (KERN_WARNING
"megaraid mailbox: invalid sg:%d\n",
void dma_mmu_get_scsi_sgl(struct NCR_ESP *esp, Scsi_Cmnd *sp)
{
- sp->SCp.ptr = page_address(sp->SCp.buffer->page)+
- sp->SCp.buffer->offset;
+ sp->SCp.ptr = sg_virt(sp->SCp.buffer);
}
void dma_mmu_release_scsi_one(struct NCR_ESP *esp, Scsi_Cmnd *sp)
void dma_advance_sg(Scsi_Cmnd *sp)
{
- sp->SCp.ptr = page_address(sp->SCp.buffer->page)+
- sp->SCp.buffer->offset;
+ sp->SCp.ptr = sg_virt(sp->SCp.buffer);
}
if (STp->raw) {
if (STp->buffer->syscall_result) {
for (i=0; i < STp->buffer->sg_segs; i++)
- memset(page_address(STp->buffer->sg[i].page),
+ memset(page_address(sg_page(&STp->buffer->sg[i])),
0, STp->buffer->sg[i].length);
strcpy(STp->buffer->b_data, "READ ERROR ON FRAME");
} else
for (i = 0, b_size = 0;
(i < STp->buffer->sg_segs) && ((b_size + STp->buffer->sg[i].length) <= OS_DATA_SIZE);
b_size += STp->buffer->sg[i++].length);
- STp->buffer->aux = (os_aux_t *) (page_address(STp->buffer->sg[i].page) + OS_DATA_SIZE - b_size);
+ STp->buffer->aux = (os_aux_t *) (page_address(sg_page(&STp->buffer->sg[i])) + OS_DATA_SIZE - b_size);
#if DEBUG
printk(OSST_DEB_MSG "%s:D: b_data points to %p in segment 0 at %p\n", name,
STp->buffer->b_data, page_address(STp->buffer->sg[0].page));
/* Try to allocate the first segment up to OS_DATA_SIZE and the others
big enough to reach the goal (code assumes no segments in place) */
for (b_size = OS_DATA_SIZE, order = OSST_FIRST_ORDER; b_size >= PAGE_SIZE; order--, b_size /= 2) {
- STbuffer->sg[0].page = alloc_pages(priority, order);
+ struct page *page = alloc_pages(priority, order);
+
STbuffer->sg[0].offset = 0;
- if (STbuffer->sg[0].page != NULL) {
+ if (page != NULL) {
+ sg_set_page(&STbuffer->sg[0], page);
STbuffer->sg[0].length = b_size;
- STbuffer->b_data = page_address(STbuffer->sg[0].page);
+ STbuffer->b_data = page_address(page);
break;
}
}
- if (STbuffer->sg[0].page == NULL) {
+ if (sg_page(&STbuffer->sg[0]) == NULL) {
printk(KERN_NOTICE "osst :I: Can't allocate tape buffer main segment.\n");
return 0;
}
/* Got initial segment of 'bsize,order', continue with same size if possible, except for AUX */
for (segs=STbuffer->sg_segs=1, got=b_size;
segs < max_segs && got < OS_FRAME_SIZE; ) {
- STbuffer->sg[segs].page =
- alloc_pages(priority, (OS_FRAME_SIZE - got <= PAGE_SIZE) ? 0 : order);
+ struct page *page = alloc_pages(priority, (OS_FRAME_SIZE - got <= PAGE_SIZE) ? 0 : order);
STbuffer->sg[segs].offset = 0;
- if (STbuffer->sg[segs].page == NULL) {
+ if (page == NULL) {
if (OS_FRAME_SIZE - got <= (max_segs - segs) * b_size / 2 && order) {
b_size /= 2; /* Large enough for the rest of the buffers */
order--;
normalize_buffer(STbuffer);
return 0;
}
+ sg_set_page(&STbuffer->sg[segs], page);
STbuffer->sg[segs].length = (OS_FRAME_SIZE - got <= PAGE_SIZE / 2) ? (OS_FRAME_SIZE - got) : b_size;
got += STbuffer->sg[segs].length;
STbuffer->buffer_size = got;
b_size < STbuffer->sg[i].length;
b_size *= 2, order++);
- __free_pages(STbuffer->sg[i].page, order);
+ __free_pages(sg_page(&STbuffer->sg[i]), order);
STbuffer->buffer_size -= STbuffer->sg[i].length;
}
#if DEBUG
for ( ; i < st_bp->sg_segs && do_count > 0; i++) {
cnt = st_bp->sg[i].length - offset < do_count ?
st_bp->sg[i].length - offset : do_count;
- res = copy_from_user(page_address(st_bp->sg[i].page) + offset, ubp, cnt);
+ res = copy_from_user(page_address(sg_page(&st_bp->sg[i])) + offset, ubp, cnt);
if (res)
return (-EFAULT);
do_count -= cnt;
for ( ; i < st_bp->sg_segs && do_count > 0; i++) {
cnt = st_bp->sg[i].length - offset < do_count ?
st_bp->sg[i].length - offset : do_count;
- res = copy_to_user(ubp, page_address(st_bp->sg[i].page) + offset, cnt);
+ res = copy_to_user(ubp, page_address(sg_page(&st_bp->sg[i])) + offset, cnt);
if (res)
return (-EFAULT);
do_count -= cnt;
i < st_bp->sg_segs && do_count > 0; i++) {
cnt = st_bp->sg[i].length - offset < do_count ?
st_bp->sg[i].length - offset : do_count ;
- memset(page_address(st_bp->sg[i].page) + offset, 0, cnt);
+ memset(page_address(sg_page(&st_bp->sg[i])) + offset, 0, cnt);
do_count -= cnt;
offset = 0;
}
for (i = 0; i < st_bp->sg_segs && do_count > 0; i++) {
cnt = st_bp->sg[i].length < do_count ?
st_bp->sg[i].length : do_count ;
- memcpy(page_address(st_bp->sg[i].page), ptr, cnt);
+ memcpy(page_address(sg_page(&st_bp->sg[i])), ptr, cnt);
do_count -= cnt;
ptr += cnt;
}
for (i = 0; i < st_bp->sg_segs && do_count > 0; i++) {
cnt = st_bp->sg[i].length < do_count ?
st_bp->sg[i].length : do_count ;
- memcpy(ptr, page_address(st_bp->sg[i].page), cnt);
+ memcpy(ptr, page_address(sg_page(&st_bp->sg[i])), cnt);
do_count -= cnt;
ptr += cnt;
}
#define MSG_EXT_SDTR 0x01
/* scatter-gather table */
-# define BUFFER_ADDR ((char *)((unsigned int)(SCpnt->SCp.buffer->page) + SCpnt->SCp.buffer->offset))
+# define BUFFER_ADDR ((char *)((sg_virt(SCpnt->SCp.buffer))))
#endif /*__nsp_cs__*/
/* end */
scsi_for_each_sg(curSC, sg, scsi_sg_count(curSC), i) {
SYM53C500_pio_write(fast_pio, port_base,
- page_address(sg->page) + sg->offset,
- sg->length);
+ sg_virt(sg), sg->length);
}
REG0(port_base);
}
scsi_for_each_sg(curSC, sg, scsi_sg_count(curSC), i) {
SYM53C500_pio_read(fast_pio, port_base,
- page_address(sg->page) + sg->offset,
- sg->length);
+ sg_virt(sg), sg->length);
}
REG0(port_base);
}
cmd->SCp.buffer++;
cmd->SCp.this_residual =
cmd->SCp.buffer->length;
- cmd->SCp.ptr =
- page_address(cmd->SCp.buffer->page) +
- cmd->SCp.buffer->offset;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
}
}
/* Now check to see if the drive is ready to comunicate */
/* if many buffers are available, start filling the first */
cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
cmd->SCp.this_residual = cmd->SCp.buffer->length;
- cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) +
- cmd->SCp.buffer->offset;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
} else {
/* else fill the only available buffer */
cmd->SCp.buffer = NULL;
req_len = act_len = 0;
scsi_for_each_sg(cmd, sgpnt, scsi_sg_count(cmd), k) {
if (active) {
- kaddr = kmap_atomic(sgpnt->page, KM_IRQ0);
+ kaddr = kmap_atomic(sg_page(sgpnt), KM_IRQ0);
len = sgpnt->length;
if ((req_len + len) > buflen) {
active = 0;
len = buflen - req_len;
}
memcpy(kaddr + sgpnt->offset, buf + req_len, len);
- flush_kernel_dcache_page(sgpnt->page);
+ flush_kernel_dcache_page(sg_page(sgpnt));
kunmap_atomic(kaddr, KM_IRQ0);
act_len += len;
}
req_len = fin = 0;
scsi_for_each_sg(cmd, sgpnt, scsi_sg_count(cmd), k) {
- kaddr = kmap_atomic(sgpnt->page, KM_IRQ0);
+ kaddr = kmap_atomic(sg_page(sgpnt->page), KM_IRQ0);
len = sgpnt->length;
if ((req_len + len) > buflen) {
len = buflen - req_len;
return ((priv->qabort == 1 ?
DID_ABORT : DID_RESET) << 16);
}
- buf = page_address(sg->page) + sg->offset;
+ buf = sg_virt(sg);
if (ql_pdma(priv, phase, buf, sg->length))
break;
}
scsi_for_each_sg(scp, sg, scp->use_sg, k) {
if (active) {
kaddr = (unsigned char *)
- kmap_atomic(sg->page, KM_USER0);
+ kmap_atomic(sg_page(sg), KM_USER0);
if (NULL == kaddr)
return (DID_ERROR << 16);
kaddr_off = (unsigned char *)kaddr + sg->offset;
sg = scsi_sglist(scp);
req_len = fin = 0;
for (k = 0; k < scp->use_sg; ++k, sg = sg_next(sg)) {
- kaddr = (unsigned char *)kmap_atomic(sg->page, KM_USER0);
+ kaddr = (unsigned char *)kmap_atomic(sg_page(sg), KM_USER0);
if (NULL == kaddr)
return -1;
kaddr_off = (unsigned char *)kaddr + sg->offset;
int i, err, nr_vecs = 0;
for_each_sg(sgl, sg, nsegs, i) {
- page = sg->page;
+ page = sg_page(sg);
off = sg->offset;
len = sg->length;
data_len += len;
if (unlikely(!sgl))
goto enomem;
- memset(sgl, 0, sizeof(*sgl) * sgp->size);
+ sg_init_table(sgl, sgp->size);
/*
* first loop through, set initial index and return value
if (prev)
sg_chain(prev, SCSI_MAX_SG_SEGMENTS, sgl);
+ /*
+ * if we have nothing left, mark the last segment as
+ * end-of-list
+ */
+ if (!left)
+ sg_mark_end(sgl, this);
+
/*
* don't allow subsequent mempool allocs to sleep, it would
* violate the mempool principle.
*offset = *offset - len_complete + sg->offset;
/* Assumption: contiguous pages can be accessed as "page + i" */
- page = nth_page(sg->page, (*offset >> PAGE_SHIFT));
+ page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
*offset &= ~PAGE_MASK;
/* Bytes in this sg-entry from *offset to the end of the page */
for (i = 0; i < nobuffs; ++i)
printk("scsi%d : buffer %d address = %p length = %d\n",
hostno, i,
- page_address(buffer[i].page) + buffer[i].offset,
+ sg_virt(&buffer[i]),
buffer[i].length);
}
#endif
buffer = (struct scatterlist *) SCint->request_buffer;
len = buffer->length;
- data = page_address(buffer->page) + buffer->offset;
+ data = sg_virt(buffer);
} else {
DPRINTK (DEBUG_SG, "scsi%d : scatter gather not requested.\n", hostno);
buffer = NULL;
--nobuffs;
++buffer;
len = buffer->length;
- data = page_address(buffer->page) + buffer->offset;
+ data = sg_virt(buffer);
DPRINTK (DEBUG_SG,
"scsi%d : next scatter-gather buffer len = %d address = %08x\n",
hostno, len, data);
--nobuffs;
++buffer;
len = buffer->length;
- data = page_address(buffer->page) + buffer->offset;
+ data = sg_virt(buffer);
DPRINTK (DEBUG_SG, "scsi%d : next scatter-gather buffer len = %d address = %08x\n", hostno, len, data);
}
break;
len = vma->vm_end - sa;
len = (len < sg->length) ? len : sg->length;
if (offset < len) {
- page = virt_to_page(page_address(sg->page) + offset);
+ page = virt_to_page(page_address(sg_page(sg)) + offset);
get_page(page); /* increment page count */
break;
}
goto out_unlock; */
}
- sgl[0].page = pages[0];
+ sg_set_page(sgl, pages[0]);
sgl[0].offset = uaddr & ~PAGE_MASK;
if (nr_pages > 1) {
sgl[0].length = PAGE_SIZE - sgl[0].offset;
count -= sgl[0].length;
for (i=1; i < nr_pages ; i++) {
- sgl[i].page = pages[i];
+ sg_set_page(&sgl[i], pages[i]);
sgl[i].length = count < PAGE_SIZE ? count : PAGE_SIZE;
count -= PAGE_SIZE;
}
int i;
for (i=0; i < nr_pages; i++) {
- struct page *page = sgl[i].page;
+ struct page *page = sg_page(&sgl[i]);
if (dirtied)
SetPageDirty(page);
scatter_elem_sz_prev = ret_sz;
}
}
- sg->page = p;
+ sg_set_page(sg, p);
sg->length = (ret_sz > num) ? num : ret_sz;
SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, "
onum = 1;
ksglen = sg->length;
- p = page_address(sg->page);
+ p = page_address(sg_page(sg));
for (j = 0, k = 0; j < onum; ++j) {
res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up);
if (res)
return res;
for (; p; sg = sg_next(sg), ksglen = sg->length,
- p = page_address(sg->page)) {
+ p = page_address(sg_page(sg))) {
if (usglen <= 0)
break;
if (ksglen > usglen) {
} else {
int k;
- for (k = 0; (k < schp->k_use_sg) && sg->page;
+ for (k = 0; (k < schp->k_use_sg) && sg_page(sg);
++k, sg = sg_next(sg)) {
SCSI_LOG_TIMEOUT(5, printk(
"sg_remove_scat: k=%d, pg=0x%p, len=%d\n",
- k, sg->page, sg->length));
- sg_page_free(sg->page, sg->length);
+ k, sg_page(sg), sg->length));
+ sg_page_free(sg_page(sg), sg->length);
}
}
kfree(schp->buffer);
} else
onum = 1;
- p = page_address(sg->page);
+ p = page_address(sg_page(sg));
ksglen = sg->length;
for (j = 0, k = 0; j < onum; ++j) {
res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up);
return res;
for (; p; sg = sg_next(sg), ksglen = sg->length,
- p = page_address(sg->page)) {
+ p = page_address(sg_page(sg))) {
if (usglen <= 0)
break;
if (ksglen > usglen) {
if ((!outp) || (num_read_xfer <= 0))
return 0;
- for (k = 0; (k < schp->k_use_sg) && sg->page; ++k, sg = sg_next(sg)) {
+ for (k = 0; (k < schp->k_use_sg) && sg_page(sg); ++k, sg = sg_next(sg)) {
num = sg->length;
if (num > num_read_xfer) {
- if (__copy_to_user(outp, page_address(sg->page),
+ if (__copy_to_user(outp, page_address(sg_page(sg)),
num_read_xfer))
return -EFAULT;
break;
} else {
- if (__copy_to_user(outp, page_address(sg->page),
+ if (__copy_to_user(outp, page_address(sg_page(sg)),
num))
return -EFAULT;
num_read_xfer -= num;
sg = &(STbp->sg[0]);
frp = STbp->frp;
for (i=count=0; count < length; i++) {
- sg[i].page = frp[i].page;
+ sg_set_page(&sg[i], frp[i].page);
if (length - count > frp[i].length)
sg[i].length = frp[i].length;
else
}
/* Populate the scatter/gather list */
- sgl[0].page = pages[0];
+ sg_set_page(&sgl[0], pages[0]);
sgl[0].offset = uaddr & ~PAGE_MASK;
if (nr_pages > 1) {
sgl[0].length = PAGE_SIZE - sgl[0].offset;
count -= sgl[0].length;
for (i=1; i < nr_pages ; i++) {
+ sg_set_page(&sgl[i], pages[i]);;
sgl[i].offset = 0;
- sgl[i].page = pages[i];
sgl[i].length = count < PAGE_SIZE ? count : PAGE_SIZE;
count -= PAGE_SIZE;
}
int i;
for (i=0; i < nr_pages; i++) {
- struct page *page = sgl[i].page;
+ struct page *page = sg_page(&sgl[i]);
if (dirtied)
SetPageDirty(page);
#define HOSTNO instance->host_no
#define H_NO(cmd) (cmd)->device->host->host_no
-#define SGADDR(buffer) (void *)(((unsigned long)page_address((buffer)->page)) + \
- (buffer)->offset)
+#define SGADDR(buffer) (void *)(((unsigned long)sg_virt(((buffer)))))
#ifdef SUPPORT_TAGS
#define MAXHOSTS 4
-#define SG_ADDRESS(buffer) ((char *) (page_address((buffer)->page)+(buffer)->offset))
+#define SG_ADDRESS(buffer) ((char *) sg_virt((buffer)))
enum phases
{
static struct scatterlist* dc390_sg_build_single(struct scatterlist *sg, void *addr, unsigned int length)
{
- memset(sg, 0, sizeof(struct scatterlist));
- sg->page = virt_to_page(addr);
- sg->length = length;
- sg->offset = (unsigned long)addr & ~PAGE_MASK;
+ sg_init_one(sg, addr, length);
return sg;
}
max = scsi_sg_count(SCpnt);
scsi_for_each_sg(SCpnt, sg, max, i) {
- mscp->sglist[i].address = isa_page_to_bus(sg->page) + sg->offset;
+ mscp->sglist[i].address = isa_page_to_bus(sg_page(sg)) + sg->offset;
mscp->sglist[i].num_bytes = sg->length;
transfer_length += sg->length;
}
if (cmd->use_sg) {
cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
cmd->SCp.buffers_residual = cmd->use_sg - 1;
- cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) +
- cmd->SCp.buffer->offset;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
cmd->SCp.this_residual = cmd->SCp.buffer->length;
} else {
cmd->SCp.buffer = NULL;
++cmd->SCp.buffer;
--cmd->SCp.buffers_residual;
cmd->SCp.this_residual = cmd->SCp.buffer->length;
- cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) +
- cmd->SCp.buffer->offset;
+ cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
}
if (!cmd->SCp.this_residual) /* avoid bogus setups */
return;
any2scsi(scb->maxlen, nseg * sizeof(Sgb));
scsi_for_each_sg(SCpnt, sg, nseg, i) {
- any2scsi(sgb[i].ptr, isa_page_to_bus(sg->page) + sg->offset);
+ any2scsi(sgb[i].ptr, isa_page_to_bus(sg_page(sg)) + sg->offset);
any2scsi(sgb[i].len, sg->length);
}
} else {
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_IOMMU)
io->urbs[i]->transfer_buffer = NULL;
#else
- io->urbs[i]->transfer_buffer =
- page_address(sg[i].page) + sg[i].offset;
+ io->urbs[i]->transfer_buffer = sg_virt(&sg[i]);
#endif
} else {
/* hc may use _only_ transfer_buffer */
- io->urbs [i]->transfer_buffer =
- page_address (sg [i].page) + sg [i].offset;
+ io->urbs [i]->transfer_buffer = sg_virt(&sg[i]);
len = sg [i].length;
}
context->fragment++;
mts_int_submit_urb(transfer,
context->data_pipe,
- page_address(sg[context->fragment].page) +
- sg[context->fragment].offset,
+ sg_virt(&sg[context->fragment]),
sg[context->fragment].length,
context->fragment + 1 == scsi_sg_count(context->srb) ?
mts_data_done : mts_do_sg);
return;
} else {
sg = scsi_sglist(srb);
- desc->context.data = page_address(sg[0].page) + sg[0].offset;
+ desc->context.data = sg_virt(&sg[0]);
desc->context.data_length = sg[0].length;
}
if (!sg)
return;
for (i = 0; i < nents; i++) {
- if (!sg [i].page)
+ if (!sg_page(&sg[i]))
continue;
- kfree (page_address (sg [i].page) + sg [i].offset);
+ kfree (sg_virt(&sg[i]));
}
kfree (sg);
}
* the *offset and *index values for the next loop. */
cnt = 0;
while (cnt < buflen) {
- struct page *page = sg->page +
+ struct page *page = sg_page(sg) +
((sg->offset + *offset) >> PAGE_SHIFT);
unsigned int poff =
(sg->offset + *offset) & (PAGE_SIZE-1);