if (!mem)
return H_TOO_HARD;
- if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, &hpa)))
+ if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
+ &hpa)))
return H_HARDWARE;
- pua = (void *) vmalloc_to_phys(pua);
- if (WARN_ON_ONCE_RM(!pua))
- return H_HARDWARE;
-
if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
return H_CLOSED;
if (!pua)
return;
- ret = tce_iommu_prereg_ua_to_hpa(container, *pua, tbl->it_page_shift,
- &hpa, &mem);
+ ret = tce_iommu_prereg_ua_to_hpa(container, be64_to_cpu(*pua),
- IOMMU_PAGE_SIZE(tbl), &hpa, &mem);
++ tbl->it_page_shift, &hpa, &mem);
if (ret)
- pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
- __func__, *pua, entry, ret);
+ pr_debug("%s: tce %llx at #%lx was not cached, ret=%d\n",
+ __func__, be64_to_cpu(*pua), entry, ret);
if (mem)
mm_iommu_mapped_dec(mem);
unsigned long hpa;
enum dma_data_direction dirtmp;
- if (!tbl->it_userspace) {
- ret = tce_iommu_userspace_view_alloc(tbl, container->mm);
- if (ret)
- return ret;
- }
-
for (i = 0; i < pages; ++i) {
struct mm_iommu_table_group_mem_t *mem = NULL;
- unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl,
- entry + i);
+ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry + i);
ret = tce_iommu_prereg_ua_to_hpa(container,
- tce, IOMMU_PAGE_SIZE(tbl), &hpa, &mem);
+ tce, tbl->it_page_shift, &hpa, &mem);
if (ret)
break;