int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot)
{
- unsigned long flag = _PAGE_CACHE_WC;
+ enum page_cache_mode pcm = _PAGE_CACHE_MODE_WC;
int ret;
if (!is_io_mapping_possible(base, size))
return -EINVAL;
- ret = io_reserve_memtype(base, base + size, &flag);
+ ret = io_reserve_memtype(base, base + size, &pcm);
if (ret)
return ret;
- *prot = __pgprot(__PAGE_KERNEL | flag);
+ *prot = __pgprot(__PAGE_KERNEL | cachemode2protval(pcm));
return 0;
}
EXPORT_SYMBOL_GPL(iomap_create_wc);
* MTRR is UC or WC. UC_MINUS gets the real intention, of the
* user, which is "WC if the MTRR is WC, UC if you can't do that."
*/
- if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
- prot = PAGE_KERNEL_UC_MINUS;
+ if (!pat_enabled && pgprot_val(prot) ==
+ (__PAGE_KERNEL | cachemode2protval(_PAGE_CACHE_MODE_WC)))
+ prot = __pgprot(__PAGE_KERNEL |
+ cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot);
}
* On failure, returns non-zero
*/
int io_reserve_memtype(resource_size_t start, resource_size_t end,
- unsigned long *type)
+ enum page_cache_mode *type)
{
resource_size_t size = end - start;
- unsigned long req_type = *type;
- unsigned long new_type;
+ enum page_cache_mode req_type = *type;
+ enum page_cache_mode new_type;
+ unsigned long new_prot;
int ret;
WARN_ON_ONCE(iomem_map_sanity_check(start, size));
- ret = reserve_memtype(start, end, req_type, &new_type);
+ ret = reserve_memtype(start, end, cachemode2protval(req_type),
+ &new_prot);
if (ret)
goto out_err;
- if (!is_new_memtype_allowed(start, size,
- pgprot2cachemode(__pgprot(req_type)),
- pgprot2cachemode(__pgprot(new_type))))
+ new_type = pgprot2cachemode(__pgprot(new_prot));
+
+ if (!is_new_memtype_allowed(start, size, req_type, new_type))
goto out_free;
- if (kernel_map_sync_memtype(start, size, new_type) < 0)
+ if (kernel_map_sync_memtype(start, size, new_prot) < 0)
goto out_free;
*type = new_type;