static int test_bit(int nr, const volatile void * addr);
#endif
-static inline int constant_test_bit(int nr, const volatile unsigned long *addr)
+static __always_inline int constant_test_bit(int nr, const volatile unsigned long *addr)
{
return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0;
}
struct task_struct;
-static inline struct task_struct * get_current(void)
+static __always_inline struct task_struct * get_current(void)
{
return current_thread_info()->task;
}
return __res;
}
-static inline void * __memcpy(void * to, const void * from, size_t n)
+static __always_inline void * __memcpy(void * to, const void * from, size_t n)
{
int d0, d1, d2;
__asm__ __volatile__(
* This looks ugly, but the compiler can optimize it totally,
* as the count is constant.
*/
-static inline void * __constant_memcpy(void * to, const void * from, size_t n)
+static __always_inline void * __constant_memcpy(void * to, const void * from, size_t n)
{
long esi, edi;
if (!n) return to;
* things 32 bits at a time even when we don't know the size of the
* area at compile-time..
*/
-static inline void * __constant_c_memset(void * s, unsigned long c, size_t count)
+static __always_inline void * __constant_c_memset(void * s, unsigned long c, size_t count)
{
int d0, d1;
__asm__ __volatile__(
* This looks horribly ugly, but the compiler can optimize it totally,
* as we by now know that both pattern and count is constant..
*/
-static inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count)
+static __always_inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count)
{
switch (count) {
case 0:
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
-static inline unsigned long __must_check
+static __always_inline unsigned long __must_check
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
if (__builtin_constant_p(n)) {
return __copy_to_user_ll(to, from, n);
}
-static inline unsigned long __must_check
+static __always_inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_sleep();
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*/
-static inline unsigned long
+static __always_inline unsigned long
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
if (__builtin_constant_p(n)) {
return __copy_from_user_ll(to, from, n);
}
-static inline unsigned long
+static __always_inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_sleep();
* directly without translation, we catch the bug with a NULL-deference
* kernel oops. Illegal ranges of incoming indices are caught too.
*/
-static inline unsigned long fix_to_virt(const unsigned int idx)
+static __always_inline unsigned long fix_to_virt(const unsigned int idx)
{
/*
* this branch gets completely eliminated after inlining,
extern unsigned long copy_from_user(void *to, const void __user *from, unsigned len);
extern unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len);
-static inline int __copy_from_user(void *dst, const void __user *src, unsigned size)
+static __always_inline int __copy_from_user(void *dst, const void __user *src, unsigned size)
{
int ret = 0;
if (!__builtin_constant_p(size))
}
}
-static inline int __copy_to_user(void __user *dst, const void *src, unsigned size)
+static __always_inline int __copy_to_user(void __user *dst, const void *src, unsigned size)
{
int ret = 0;
if (!__builtin_constant_p(size))
}
-static inline int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
+static __always_inline int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
{
int ret = 0;
if (!__builtin_constant_p(size))
extern struct page *mem_map;
#endif
-static inline void *lowmem_page_address(struct page *page)
+static __always_inline void *lowmem_page_address(struct page *page)
{
return __va(page_to_pfn(page) << PAGE_SHIFT);
}