For x86_64, we can't just use %0, as it would
generate a mul against rdx, which is not really what we
want (note the ">> 32" in x86_64 version).
Using a u64 variable with a shift in i386 generates bad code,
so the solution is to explicitly use %%edx in inline assembly
for both.
Signed-off-by: Glauber Costa <gcosta@redhat.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
int d0;
xloops *= 4;
- __asm__("mull %0"
+ __asm__("mull %%edx"
:"=d" (xloops), "=&a" (d0)
:"1" (xloops), "0"
(cpu_data(raw_smp_processor_id()).loops_per_jiffy * (HZ/4)));
inline void __const_udelay(unsigned long xloops)
{
- __delay(((xloops * HZ *
- cpu_data(raw_smp_processor_id()).loops_per_jiffy) >> 32) + 1);
+ int d0;
+ xloops *= 4;
+ __asm__("mull %%edx"
+ :"=d" (xloops), "=&a" (d0)
+ :"1" (xloops), "0"
+ (cpu_data(raw_smp_processor_id()).loops_per_jiffy * (HZ/4)));
+
+ __delay(++xloops);
}
+
EXPORT_SYMBOL(__const_udelay);
void __udelay(unsigned long usecs)