}
#define dma_max_pfn(dev) dma_max_pfn(dev)
- static inline int set_arch_dma_coherent_ops(struct device *dev)
- {
- dev->archdata.dma_coherent = true;
- set_dma_ops(dev, &arm_coherent_dma_ops);
- return 0;
- }
- #define set_arch_dma_coherent_ops(dev) set_arch_dma_coherent_ops(dev)
+ #define arch_setup_dma_ops arch_setup_dma_ops
+ extern void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ struct iommu_ops *iommu, bool coherent);
+
+ #define arch_teardown_dma_ops arch_teardown_dma_ops
+ extern void arch_teardown_dma_ops(struct device *dev);
+/* do not use this function in a driver */
+static inline bool is_device_dma_coherent(struct device *dev)
+{
+ return dev->archdata.dma_coherent;
+}
+
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
unsigned int offset = paddr & ~PAGE_MASK;
}
EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
- #endif
+ static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
+ {
+ return coherent ? &iommu_coherent_ops : &iommu_ops;
+ }
+
+ static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ struct iommu_ops *iommu)
+ {
+ struct dma_iommu_mapping *mapping;
+
+ if (!iommu)
+ return false;
+
+ mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
+ if (IS_ERR(mapping)) {
+ pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
+ size, dev_name(dev));
+ return false;
+ }
+
+ if (arm_iommu_attach_device(dev, mapping)) {
+ pr_warn("Failed to attached device %s to IOMMU_mapping\n",
+ dev_name(dev));
+ arm_iommu_release_mapping(mapping);
+ return false;
+ }
+
+ return true;
+ }
+
+ static void arm_teardown_iommu_dma_ops(struct device *dev)
+ {
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+
+ arm_iommu_detach_device(dev);
+ arm_iommu_release_mapping(mapping);
+ }
+
+ #else
+
+ static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ struct iommu_ops *iommu)
+ {
+ return false;
+ }
+
+ static void arm_teardown_iommu_dma_ops(struct device *dev) { }
+
+ #define arm_get_iommu_dma_map_ops arm_get_dma_map_ops
+
+ #endif /* CONFIG_ARM_DMA_USE_IOMMU */
+
+ static struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
+ {
+ return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
+ }
+
+ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ struct iommu_ops *iommu, bool coherent)
+ {
+ struct dma_map_ops *dma_ops;
+
++ dev->archdata.dma_coherent = coherent;
+ if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu))
+ dma_ops = arm_get_iommu_dma_map_ops(coherent);
+ else
+ dma_ops = arm_get_dma_map_ops(coherent);
+
+ set_dma_ops(dev, dma_ops);
+ }
+
+ void arch_teardown_dma_ops(struct device *dev)
+ {
+ arm_teardown_iommu_dma_ops(dev);
+ }