/*
* The standard PCI ioremap interfaces
*/
-
-extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
-
-/* Most machines react poorly to I/O-space being cacheable... Instead let's
- * define ioremap() in terms of ioremap_nocache().
- */
-static inline void __iomem * ioremap(unsigned long offset, unsigned long size)
-{
- return __ioremap(offset, size, _PAGE_NO_CACHE);
-}
+void __iomem *ioremap(unsigned long offset, unsigned long size);
#define ioremap_nocache(off, sz) ioremap((off), (sz))
#define ioremap_wc ioremap_nocache
#define ioremap_uc ioremap_nocache
* have to convert them into an offset in a page-aligned mapping, but the
* caller shouldn't need to know that small detail.
*/
-void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
+void __iomem *ioremap(unsigned long phys_addr, unsigned long size)
{
void __iomem *addr;
struct vm_struct *area;
unsigned long end = phys_addr + size - 1;
/* Support EISA addresses */
if ((phys_addr >= 0x00080000 && end < 0x000fffff) ||
- (phys_addr >= 0x00500000 && end < 0x03bfffff)) {
+ (phys_addr >= 0x00500000 && end < 0x03bfffff))
phys_addr |= F_EXTEND(0xfc000000);
- flags |= _PAGE_NO_CACHE;
- }
#endif
/* Don't allow wraparound or zero size */
}
pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY |
- _PAGE_ACCESSED | flags);
+ _PAGE_ACCESSED | _PAGE_NO_CACHE);
/*
* Mappings have to be page-aligned
return (void __iomem *) (offset + (char __iomem *)addr);
}
-EXPORT_SYMBOL(__ioremap);
+EXPORT_SYMBOL(ioremap);
void iounmap(const volatile void __iomem *io_addr)
{