struct dma_prog_region prog;
struct ohci1394_iso_tasklet task;
int task_active;
+ int last_cycle;
+ atomic_t skips;
u32 ContextControlSet;
u32 ContextControlClear;
iso->hostdata = xmit;
xmit->ohci = iso->host->hostdata;
xmit->task_active = 0;
+ xmit->last_cycle = -1;
+ atomic_set(&iso->skips, 0);
dma_prog_region_init(&xmit->prog);
/* parse cycle */
cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
+ if (xmit->last_cycle > -1) {
+ int cycle_diff = cycle - xmit->last_cycle;
+ int skip;
+
+ /* unwrap */
+ if (cycle_diff < 0) {
+ cycle_diff += 8000;
+ if (cycle_diff < 0)
+ PRINT(KERN_ERR, "bogus cycle diff %d\n",
+ cycle_diff);
+ }
+
+ skip = cycle_diff - 1;
+ if (skip > 0) {
+ DBGMSG("skipped %d cycles without packet loss", skip);
+ atomic_add(skip, &iso->skips);
+ }
+ }
+ xmit->last_cycle = cycle;
+
/* tell the subsystem the packet has gone out */
hpsb_iso_packet_sent(iso, cycle, event != 0x11);
prev->output_last.branchAddress = cpu_to_le32(
dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
+ /*
+ * Link the skip address to this descriptor itself. This causes a
+ * context to skip a cycle whenever lost cycles or FIFO overruns occur,
+ * without dropping the data at that point the application should then
+ * decide whether this is an error condition or not. Some protocols
+ * can deal with this by dropping some rate-matching padding packets.
+ */
+ next->output_more_immediate.branchAddress =
+ prev->output_last.branchAddress;
+
/* disable interrupt, unless required by the IRQ interval */
if (prev_i % iso->irq_interval) {
prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
static void raw1394_iso_fill_status(struct hpsb_iso *iso,
struct raw1394_iso_status *stat)
{
+ int overflows = atomic_read(&iso->overflows);
+ int skips = atomic_read(&iso->skips);
+
stat->config.data_buf_size = iso->buf_size;
stat->config.buf_packets = iso->buf_packets;
stat->config.channel = iso->channel;
stat->config.speed = iso->speed;
stat->config.irq_interval = iso->irq_interval;
stat->n_packets = hpsb_iso_n_ready(iso);
- stat->overflows = atomic_read(&iso->overflows);
+ stat->overflows = ((skips & 0xFFFF) << 16) | ((overflows & 0xFFFF));
stat->xmit_cycle = iso->xmit_cycle;
}
/* reset overflow counter */
atomic_set(&iso->overflows, 0);
+ /* reset skip counter */
+ atomic_set(&iso->skips, 0);
return 0;
}