.sched_eh = ata_std_sched_eh,
.end_eh = ata_std_end_eh,
};
-EXPORT_SYMBOL_GPL(ata_base_port_ops);
const struct ata_port_operations sata_port_ops = {
.inherits = &ata_base_port_ops,
if (udma_mask)
*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
}
-EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
static const struct ata_xfer_ent {
int shift, bits;
return last_mode;
}
-EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
/**
* ata_down_xfermask_limit - adjust dev xfer masks downward
qc->n_elem = n_elem;
qc->cursg = qc->sg;
}
-EXPORT_SYMBOL_GPL(ata_sg_init);
#ifdef CONFIG_HAS_DMA
{
kref_get(&host->kref);
}
-EXPORT_SYMBOL_GPL(ata_host_get);
void ata_host_put(struct ata_host *host)
{
DPRINTK("ata%u port thawed\n", ap->print_id);
}
-EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
static void ata_eh_scsidone(struct scsi_cmnd *scmd)
{
scmd->retries = scmd->allowed;
__ata_eh_qc_complete(qc);
}
-EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
/**
* ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
scmd->allowed++;
__ata_eh_qc_complete(qc);
}
-EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
/**
* ata_dev_disable - disable ATA device
ata_eh_finish(ap);
}
-EXPORT_SYMBOL_GPL(ata_do_eh);
/**
* ata_std_error_handler - standard error handler