if (!tbltmp)
continue;
- /*
- * Make sure hardware table parameters are exactly the same;
- * this is used in the TCE handlers where boundary checks
- * use only the first attached table.
- */
- if ((tbltmp->it_page_shift == stt->page_shift) &&
- (tbltmp->it_offset == stt->offset) &&
- (tbltmp->it_size == stt->size)) {
+ /* Make sure hardware table parameters are compatible */
+ if ((tbltmp->it_page_shift <= stt->page_shift) &&
+ (tbltmp->it_offset << tbltmp->it_page_shift ==
+ stt->offset << stt->page_shift) &&
+ (tbltmp->it_size << tbltmp->it_page_shift ==
+ stt->size << stt->page_shift)) {
/*
* Reference the table to avoid races with
* add/remove DMA windows.
return H_SUCCESS;
}
-static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
+static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
struct iommu_table *tbl, unsigned long entry)
{
enum dma_data_direction dir = DMA_NONE;
return ret;
}
-long kvmppc_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
+static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
+ struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
+ unsigned long entry)
+{
+ unsigned long i, ret = H_SUCCESS;
+ unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
+ unsigned long io_entry = entry * subpages;
+
+ for (i = 0; i < subpages; ++i) {
+ ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
+ if (ret != H_SUCCESS)
+ break;
+ }
+
+ return ret;
+}
+
+long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
unsigned long entry, unsigned long ua,
enum dma_data_direction dir)
{
return 0;
}
+static long kvmppc_tce_iommu_map(struct kvm *kvm,
+ struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
+ unsigned long entry, unsigned long ua,
+ enum dma_data_direction dir)
+{
+ unsigned long i, pgoff, ret = H_SUCCESS;
+ unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
+ unsigned long io_entry = entry * subpages;
+
+ for (i = 0, pgoff = 0; i < subpages;
+ ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
+
+ ret = kvmppc_tce_iommu_do_map(kvm, tbl,
+ io_entry + i, ua + pgoff, dir);
+ if (ret != H_SUCCESS)
+ break;
+ }
+
+ return ret;
+}
+
long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba, unsigned long tce)
{
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
if (dir == DMA_NONE)
- ret = kvmppc_tce_iommu_unmap(vcpu->kvm,
+ ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
stit->tbl, entry);
else
- ret = kvmppc_tce_iommu_map(vcpu->kvm, stit->tbl,
+ ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
entry, ua, dir);
if (ret == H_SUCCESS)
return H_PARAMETER;
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
- ret = kvmppc_tce_iommu_map(vcpu->kvm,
+ ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
stit->tbl, entry + i, ua,
iommu_tce_direction(tce));
unsigned long entry = ioba >> stt->page_shift;
for (i = 0; i < npages; ++i) {
- ret = kvmppc_tce_iommu_unmap(vcpu->kvm,
+ ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
stit->tbl, entry + i);
if (ret == H_SUCCESS)
return H_SUCCESS;
}
-static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
+static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
struct iommu_table *tbl, unsigned long entry)
{
enum dma_data_direction dir = DMA_NONE;
return ret;
}
-static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
+static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
+ struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
+ unsigned long entry)
+{
+ unsigned long i, ret = H_SUCCESS;
+ unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
+ unsigned long io_entry = entry * subpages;
+
+ for (i = 0; i < subpages; ++i) {
+ ret = kvmppc_rm_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
+ if (ret != H_SUCCESS)
+ break;
+ }
+
+ return ret;
+}
+
+static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
unsigned long entry, unsigned long ua,
enum dma_data_direction dir)
{
return 0;
}
+static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
+ struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
+ unsigned long entry, unsigned long ua,
+ enum dma_data_direction dir)
+{
+ unsigned long i, pgoff, ret = H_SUCCESS;
+ unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
+ unsigned long io_entry = entry * subpages;
+
+ for (i = 0, pgoff = 0; i < subpages;
+ ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
+
+ ret = kvmppc_rm_tce_iommu_do_map(kvm, tbl,
+ io_entry + i, ua + pgoff, dir);
+ if (ret != H_SUCCESS)
+ break;
+ }
+
+ return ret;
+}
+
long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba, unsigned long tce)
{
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
if (dir == DMA_NONE)
- ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm,
+ ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
stit->tbl, entry);
else
- ret = kvmppc_rm_tce_iommu_map(vcpu->kvm,
+ ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
stit->tbl, entry, ua, dir);
if (ret == H_SUCCESS)
return H_PARAMETER;
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
- ret = kvmppc_rm_tce_iommu_map(vcpu->kvm,
+ ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
stit->tbl, entry + i, ua,
iommu_tce_direction(tce));
unsigned long entry = ioba >> stt->page_shift;
for (i = 0; i < npages; ++i) {
- ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm,
+ ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
stit->tbl, entry + i);
if (ret == H_SUCCESS)