vm_free_page(vm, p->page);
}
-static void *kmap_page_dma(struct i915_page_dma *p)
-{
- return kmap_atomic(p->page);
-}
-
-/* We use the flushing unmap only with ppgtt structures:
- * page directories, page tables and scratch pages.
- */
-static void kunmap_page_dma(void *vaddr)
-{
- kunmap_atomic(vaddr);
-}
-
-#define kmap_px(px) kmap_page_dma(px_base(px))
-#define kunmap_px(vaddr) kunmap_page_dma((vaddr))
+#define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
#define setup_px(vm, px) setup_page_dma((vm), px_base(px))
#define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
struct i915_page_dma *p,
const u64 val)
{
- u64 * const vaddr = kmap_page_dma(p);
+ u64 * const vaddr = kmap_atomic(p->page);
int i;
for (i = 0; i < 512; i++)
vaddr[i] = val;
- kunmap_page_dma(vaddr);
+ kunmap_atomic(vaddr);
}
static void fill_page_dma_32(struct i915_address_space *vm,
if (!USES_FULL_48BIT_PPGTT(to_i915(ppgtt->base.dev)))
return;
- page_directorypo = kmap_px(pdp);
+ page_directorypo = kmap_atomic_px(pdp);
page_directorypo[index] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
- kunmap_px(page_directorypo);
+ kunmap_atomic(page_directorypo);
}
static void
struct i915_page_directory_pointer *pdp,
int index)
{
- gen8_ppgtt_pml4e_t *pagemap = kmap_px(pml4);
+ gen8_ppgtt_pml4e_t *pagemap = kmap_atomic_px(pml4);
pagemap[index] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
- kunmap_px(pagemap);
+ kunmap_atomic(pagemap);
}
/* Broadwell Page Directory Pointer Descriptors */
return true;
}
- vaddr = kmap_px(pt);
+ vaddr = kmap_atomic_px(pt);
while (pte < pte_end)
vaddr[pte++] = scratch_pte;
- kunmap_px(vaddr);
+ kunmap_atomic(vaddr);
return false;
}
if (gen8_ppgtt_clear_pt(vm, pt, start, length)) {
__clear_bit(pde, pd->used_pdes);
- pde_vaddr = kmap_px(pd);
+ pde_vaddr = kmap_atomic_px(pd);
pde_vaddr[pde] = scratch_pde;
- kunmap_px(pde_vaddr);
+ kunmap_atomic(pde_vaddr);
free_pt(vm, pt);
}
}
bool ret;
pd = pdp->page_directory[pdpe];
- vaddr = kmap_px(pd->page_table[pde]);
+ vaddr = kmap_atomic_px(pd->page_table[pde]);
do {
vaddr[pte] = pte_encode | iter->dma;
iter->dma += PAGE_SIZE;
pde = 0;
}
- kunmap_px(vaddr);
- vaddr = kmap_px(pd->page_table[pde]);
+ kunmap_atomic(vaddr);
+ vaddr = kmap_atomic_px(pd->page_table[pde]);
pte = 0;
}
} while (1);
- kunmap_px(vaddr);
+ kunmap_atomic(vaddr);
return ret;
}
/* Allocations have completed successfully, so set the bitmaps, and do
* the mappings. */
gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
- gen8_pde_t *const page_directory = kmap_px(pd);
+ gen8_pde_t *const page_directory = kmap_atomic_px(pd);
struct i915_page_table *pt;
uint64_t pd_len = length;
uint64_t pd_start = start;
* point we're still relying on insert_entries() */
}
- kunmap_px(page_directory);
+ kunmap_atomic(page_directory);
__set_bit(pdpe, pdp->used_pdpes);
gen8_setup_pdpe(ppgtt, pdp, pd, pdpe);
}
if (!test_bit(pde, pd->used_pdes))
continue;
- pt_vaddr = kmap_px(pt);
+ pt_vaddr = kmap_atomic_px(pt);
for (pte = 0; pte < GEN8_PTES; pte += 4) {
uint64_t va =
(pdpe << GEN8_PDPE_SHIFT) |
}
seq_puts(m, "\n");
}
- /* don't use kunmap_px, it could trigger
- * an unnecessary flush.
- */
kunmap_atomic(pt_vaddr);
}
}
expected);
seq_printf(m, "\tPDE: %x\n", pd_entry);
- pt_vaddr = kmap_px(ppgtt->pd.page_table[pde]);
+ pt_vaddr = kmap_atomic_px(ppgtt->pd.page_table[pde]);
for (pte = 0; pte < GEN6_PTES; pte+=4) {
unsigned long va =
}
seq_puts(m, "\n");
}
- kunmap_px(pt_vaddr);
+ kunmap_atomic(pt_vaddr);
}
}
if (last_pte > GEN6_PTES)
last_pte = GEN6_PTES;
- pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
+ pt_vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
for (i = first_pte; i < last_pte; i++)
pt_vaddr[i] = scratch_pte;
- kunmap_px(pt_vaddr);
+ kunmap_atomic(pt_vaddr);
num_entries -= last_pte - first_pte;
first_pte = 0;
struct sgt_dma iter;
gen6_pte_t *vaddr;
- vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
+ vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
iter.sg = pages->sgl;
iter.dma = sg_dma_address(iter.sg);
iter.max = iter.dma + iter.sg->length;
}
if (++act_pte == GEN6_PTES) {
- kunmap_px(vaddr);
- vaddr = kmap_px(ppgtt->pd.page_table[++act_pt]);
+ kunmap_atomic(vaddr);
+ vaddr = kmap_atomic_px(ppgtt->pd.page_table[++act_pt]);
act_pte = 0;
}
} while (1);
- kunmap_px(vaddr);
+ kunmap_atomic(vaddr);
}
static int gen6_alloc_va_range(struct i915_address_space *vm,