* Ensure each lock is in a separate cacheline.
*/
static union {
- spinlock_t lock;
+ raw_spinlock_t lock;
char pad[L1_CACHE_BYTES];
} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp;
spinlock_t *lock = lock_addr(v);
long long val;
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
val = v->counter;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_read);
unsigned long flags;
spinlock_t *lock = lock_addr(v);
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
v->counter = i;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL(atomic64_set);
unsigned long flags;
spinlock_t *lock = lock_addr(v);
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
v->counter += a;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL(atomic64_add);
spinlock_t *lock = lock_addr(v);
long long val;
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
val = v->counter += a;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_add_return);
unsigned long flags;
spinlock_t *lock = lock_addr(v);
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
v->counter -= a;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL(atomic64_sub);
spinlock_t *lock = lock_addr(v);
long long val;
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
val = v->counter -= a;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_sub_return);
spinlock_t *lock = lock_addr(v);
long long val;
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
val = v->counter - 1;
if (val >= 0)
v->counter = val;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_dec_if_positive);
spinlock_t *lock = lock_addr(v);
long long val;
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
val = v->counter;
if (val == o)
v->counter = n;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_cmpxchg);
spinlock_t *lock = lock_addr(v);
long long val;
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
val = v->counter;
v->counter = new;
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_xchg);
spinlock_t *lock = lock_addr(v);
int ret = 0;
- spin_lock_irqsave(lock, flags);
+ raw_spin_lock_irqsave(lock, flags);
if (v->counter != u) {
v->counter += a;
ret = 1;
}
- spin_unlock_irqrestore(lock, flags);
+ raw_spin_unlock_irqrestore(lock, flags);
return ret;
}
EXPORT_SYMBOL(atomic64_add_unless);
int i;
for (i = 0; i < NR_LOCKS; ++i)
- spin_lock_init(&atomic64_lock[i].lock);
+ raw_spin_lock_init(&atomic64_lock[i].lock);
return 0;
}