.unmap_page = dma_fixed_unmap_page,
};
+static u64 cell_iommu_get_fixed_address(struct device *dev);
+
static void cell_dma_dev_setup(struct device *dev)
{
- if (get_pci_dma_ops() == &dma_iommu_ops)
+ if (get_pci_dma_ops() == &dma_iommu_ops) {
+ u64 addr = cell_iommu_get_fixed_address(dev);
+
+ if (addr != OF_BAD_ADDR)
+ set_dma_offset(dev, addr + dma_iommu_fixed_base);
set_iommu_table_base(dev, cell_get_iommu_table(dev));
- else if (get_pci_dma_ops() == &dma_nommu_ops)
+ } else if (get_pci_dma_ops() == &dma_nommu_ops) {
set_dma_offset(dev, cell_dma_nommu_offset);
- else
+ } else {
BUG();
+ }
}
static void cell_pci_dma_dev_setup(struct pci_dev *dev)
const u32 *ranges = NULL;
int i, len, best, naddr, nsize, pna, range_size;
+ /* We can be called for platform devices that have no of_node */
np = of_node_get(dev->of_node);
+ if (!np)
+ goto out;
+
while (1) {
naddr = of_n_addr_cells(np);
nsize = of_n_size_cells(np);
{
if (dma_mask == DMA_BIT_MASK(64) &&
cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR) {
- u64 addr = cell_iommu_get_fixed_address(dev) +
- dma_iommu_fixed_base;
dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n");
- dev_dbg(dev, "iommu: fixed addr = %llx\n", addr);
set_dma_ops(dev, &dma_iommu_fixed_ops);
- set_dma_offset(dev, addr);
return 1;
}
if (dma_iommu_dma_supported(dev, dma_mask)) {
dev_dbg(dev, "iommu: not 64-bit, using default ops\n");
- set_dma_ops(dev, get_pci_dma_ops());
- cell_dma_dev_setup(dev);
+ set_dma_ops(dev, &dma_iommu_ops);
return 1;
}