};
/* per-buffer bit flags */
-#define IVTV_F_B_NEED_BUF_SWAP 0 /* this buffer should be byte swapped */
+#define IVTV_F_B_NEED_BUF_SWAP (1 << 0) /* this buffer should be byte swapped */
/* per-stream, s_flags */
#define IVTV_F_S_DMA_PENDING 0 /* this stream has pending DMA */
struct ivtv_buffer {
struct list_head list;
dma_addr_t dma_handle;
- unsigned long b_flags;
+ unsigned short b_flags;
+ unsigned short dma_xfer_cnt;
char *buf;
u32 bytesused;
struct ivtv_queue q_dma; /* waiting for DMA */
struct ivtv_queue q_predma; /* waiting for DMA */
+ /* DMA xfer counter, buffers belonging to the same DMA
+ xfer will have the same dma_xfer_cnt. */
+ u16 dma_xfer_cnt;
+
/* Base Dev SG Array for cx23415/6 */
struct ivtv_SG_element *SGarray;
struct ivtv_SG_element *PIOarray;
s->SGarray[idx].src = cpu_to_le32(offset);
s->SGarray[idx].size = cpu_to_le32(s->buf_size);
buf->bytesused = (size < s->buf_size) ? size : s->buf_size;
+ buf->dma_xfer_cnt = s->dma_xfer_cnt;
s->q_predma.bytesused += buf->bytesused;
size -= buf->bytesused;
/* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
if (s->type == IVTV_ENC_STREAM_TYPE_MPG ||
s->type == IVTV_ENC_STREAM_TYPE_VBI)
- set_bit(IVTV_F_B_NEED_BUF_SWAP, &buf->b_flags);
+ buf->b_flags |= IVTV_F_B_NEED_BUF_SWAP;
}
if (buf)
buf->bytesused += s->dma_last_offset;
}
itv->vbi.dma_offset = s_vbi->dma_offset;
s_vbi->SG_length = 0;
+ s_vbi->dma_xfer_cnt++;
set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
IVTV_DEBUG_HI_DMA("include DMA for %s\n", s->name);
}
/* Mark last buffer size for Interrupt flag */
s->SGarray[s->SG_length - 1].size |= cpu_to_le32(0x80000000);
+ s->dma_xfer_cnt++;
if (s->type == IVTV_ENC_STREAM_TYPE_VBI)
set_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
buf->bytesused = 0;
buf->readpos = 0;
buf->b_flags = 0;
+ buf->dma_xfer_cnt = 0;
}
spin_lock_irqsave(&s->qlock, flags);
list_add_tail(&buf->list, &q->list);
}
static void ivtv_queue_move_buf(struct ivtv_stream *s, struct ivtv_queue *from,
- struct ivtv_queue *to, int clear, int full)
+ struct ivtv_queue *to, int clear)
{
struct ivtv_buffer *buf = list_entry(from->list.next, struct ivtv_buffer, list);
from->bytesused -= buf->bytesused - buf->readpos;
/* special handling for q_free */
if (clear)
- buf->bytesused = buf->readpos = buf->b_flags = 0;
- else if (full) {
- /* special handling for stolen buffers, assume
- all bytes are used. */
- buf->bytesused = s->buf_size;
- buf->readpos = buf->b_flags = 0;
- }
+ buf->bytesused = buf->readpos = buf->b_flags = buf->dma_xfer_cnt = 0;
to->buffers++;
to->length += s->buf_size;
to->bytesused += buf->bytesused - buf->readpos;
/* Move 'needed_bytes' worth of buffers from queue 'from' into queue 'to'.
If 'needed_bytes' == 0, then move all buffers from 'from' into 'to'.
If 'steal' != NULL, then buffers may also taken from that queue if
- needed.
+ needed, but only if 'from' is the free queue.
The buffer is automatically cleared if it goes to the free queue. It is
also cleared if buffers need to be taken from the 'steal' queue and
int rc = 0;
int from_free = from == &s->q_free;
int to_free = to == &s->q_free;
- int bytes_available;
+ int bytes_available, bytes_steal;
spin_lock_irqsave(&s->qlock, flags);
if (needed_bytes == 0) {
}
bytes_available = from_free ? from->length : from->bytesused;
- bytes_available += steal ? steal->length : 0;
+ bytes_steal = (from_free && steal) ? steal->length : 0;
- if (bytes_available < needed_bytes) {
+ if (bytes_available + bytes_steal < needed_bytes) {
spin_unlock_irqrestore(&s->qlock, flags);
return -ENOMEM;
}
+ while (bytes_available < needed_bytes) {
+ struct ivtv_buffer *buf = list_entry(steal->list.prev, struct ivtv_buffer, list);
+ u16 dma_xfer_cnt = buf->dma_xfer_cnt;
+
+ /* move buffers from the tail of the 'steal' queue to the tail of the
+ 'from' queue. Always copy all the buffers with the same dma_xfer_cnt
+ value, this ensures that you do not end up with partial frame data
+ if one frame is stored in multiple buffers. */
+ while (dma_xfer_cnt == buf->dma_xfer_cnt) {
+ list_move_tail(steal->list.prev, &from->list);
+ rc++;
+ steal->buffers--;
+ steal->length -= s->buf_size;
+ steal->bytesused -= buf->bytesused - buf->readpos;
+ buf->bytesused = buf->readpos = buf->b_flags = buf->dma_xfer_cnt = 0;
+ from->buffers++;
+ from->length += s->buf_size;
+ bytes_available += s->buf_size;
+ if (list_empty(&steal->list))
+ break;
+ buf = list_entry(steal->list.prev, struct ivtv_buffer, list);
+ }
+ }
if (from_free) {
u32 old_length = to->length;
while (to->length - old_length < needed_bytes) {
- if (list_empty(&from->list))
- from = steal;
- if (from == steal)
- rc++; /* keep track of 'stolen' buffers */
- ivtv_queue_move_buf(s, from, to, 1, 0);
+ ivtv_queue_move_buf(s, from, to, 1);
}
}
else {
u32 old_bytesused = to->bytesused;
while (to->bytesused - old_bytesused < needed_bytes) {
- if (list_empty(&from->list))
- from = steal;
- if (from == steal)
- rc++; /* keep track of 'stolen' buffers */
- ivtv_queue_move_buf(s, from, to, to_free, rc);
+ ivtv_queue_move_buf(s, from, to, to_free);
}
}
spin_unlock_irqrestore(&s->qlock, flags);