ioat_kobject_del(ioat_dma);
dma_async_device_unregister(dma);
-
- dma_pool_destroy(ioat_dma->completion_pool);
-
- INIT_LIST_HEAD(&dma->channels);
}
/**
dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
for (i = 0; i < dma->chancnt; i++) {
- ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL);
+ ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
if (!ioat_chan)
break;
return;
ioat_stop(ioat_chan);
- ioat_reset_hw(ioat_chan);
- /* Put LTR to idle */
- if (ioat_dma->version >= IOAT_VER_3_4)
- writeb(IOAT_CHAN_LTR_SWSEL_IDLE,
- ioat_chan->reg_base + IOAT_CHAN_LTR_SWSEL_OFFSET);
+ if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) {
+ ioat_reset_hw(ioat_chan);
+
+ /* Put LTR to idle */
+ if (ioat_dma->version >= IOAT_VER_3_4)
+ writeb(IOAT_CHAN_LTR_SWSEL_IDLE,
+ ioat_chan->reg_base +
+ IOAT_CHAN_LTR_SWSEL_OFFSET);
+ }
spin_lock_bh(&ioat_chan->cleanup_lock);
spin_lock_bh(&ioat_chan->prep_lock);
.err_handler = &ioat_err_handler,
};
+static void release_ioatdma(struct dma_device *device)
+{
+ struct ioatdma_device *d = to_ioatdma_device(device);
+ int i;
+
+ for (i = 0; i < IOAT_MAX_CHANS; i++)
+ kfree(d->idx[i]);
+
+ dma_pool_destroy(d->completion_pool);
+ kfree(d);
+}
+
static struct ioatdma_device *
alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase)
{
- struct device *dev = &pdev->dev;
- struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
+ struct ioatdma_device *d = kzalloc(sizeof(*d), GFP_KERNEL);
if (!d)
return NULL;
d->pdev = pdev;
d->reg_base = iobase;
+ d->dma_dev.device_release = release_ioatdma;
return d;
}
if (!device)
return;
+ ioat_shutdown(pdev);
+
dev_err(&pdev->dev, "Removing dma and dca services\n");
if (device->dca) {
unregister_dca_provider(device->dca, &pdev->dev);