int ivtv_udma_fill_sg_list (struct ivtv_user_dma *dma, struct ivtv_dma_page_info *dma_page, int map_offset)
{
int i, offset;
+ unsigned long flags;
offset = dma_page->offset;
/* Fill SG Array with new values */
for (i = 0; i < dma_page->page_count; i++) {
- if (i == dma_page->page_count - 1) {
- dma->SGlist[map_offset].length = dma_page->tail;
+ unsigned int len = (i == dma_page->page_count - 1) ?
+ dma_page->tail : PAGE_SIZE - offset;
+
+ dma->SGlist[map_offset].length = len;
+ dma->SGlist[map_offset].offset = offset;
+ if (PageHighMem(dma->map[map_offset])) {
+ void *src;
+
+ if (dma->bouncemap[map_offset] == NULL)
+ dma->bouncemap[map_offset] = alloc_page(GFP_KERNEL);
+ if (dma->bouncemap[map_offset] == NULL)
+ return -ENOMEM;
+ local_irq_save(flags);
+ src = kmap_atomic(dma->map[map_offset], KM_BOUNCE_READ) + offset;
+ memcpy(page_address(dma->bouncemap[map_offset]) + offset, src, len);
+ kunmap_atomic(src, KM_BOUNCE_READ);
+ local_irq_restore(flags);
+ dma->SGlist[map_offset].page = dma->bouncemap[map_offset];
}
else {
- dma->SGlist[map_offset].length = PAGE_SIZE - offset;
+ dma->SGlist[map_offset].page = dma->map[map_offset];
}
- dma->SGlist[map_offset].offset = offset;
- dma->SGlist[map_offset].page = dma->map[map_offset];
offset = 0;
map_offset++;
}
- return map_offset;
+ return 0;
}
void ivtv_udma_fill_sg_array (struct ivtv_user_dma *dma, u32 buffer_offset, u32 buffer_offset_2, u32 split) {
{
struct ivtv_dma_page_info user_dma;
struct ivtv_user_dma *dma = &itv->udma;
- int err;
+ int i, err;
IVTV_DEBUG_DMA("ivtv_udma_setup, dst: 0x%08x\n", (unsigned int)ivtv_dest_addr);
dma->page_count = user_dma.page_count;
/* Fill SG List with new values */
- ivtv_udma_fill_sg_list(dma, &user_dma, 0);
+ err = ivtv_udma_fill_sg_list(dma, &user_dma, 0);
+ if (err) {
+ for (i = 0; i < dma->page_count; i++) {
+ put_page(dma->map[i]);
+ }
+ dma->page_count = 0;
+ return err;
+ }
/* Map SG List */
dma->SG_length = pci_map_sg(itv->dev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);
void ivtv_udma_free(struct ivtv *itv)
{
+ int i;
+
/* Unmap SG Array */
if (itv->udma.SG_handle) {
pci_unmap_single(itv->dev, itv->udma.SG_handle,
if (itv->udma.SG_length) {
pci_unmap_sg(itv->dev, itv->udma.SGlist, itv->udma.page_count, PCI_DMA_TODEVICE);
}
+
+ for (i = 0; i < IVTV_DMA_SG_OSD_ENT; i++) {
+ if (itv->udma.bouncemap[i])
+ __free_page(itv->udma.bouncemap[i]);
+ }
}
void ivtv_udma_start(struct ivtv *itv)
}
/* Fill & map SG List */
- ivtv_udma_fill_sg_list (dma, &uv_dma, ivtv_udma_fill_sg_list (dma, &y_dma, 0));
+ if (ivtv_udma_fill_sg_list (dma, &uv_dma, ivtv_udma_fill_sg_list (dma, &y_dma, 0))) {
+ IVTV_DEBUG_WARN("could not allocate bounce buffers for highmem userspace buffers\n");
+ for (i = 0; i < dma->page_count; i++) {
+ put_page(dma->map[i]);
+ }
+ dma->page_count = 0;
+ return -ENOMEM;
+ }
dma->SG_length = pci_map_sg(itv->dev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);
/* Fill SG Array with new values */