#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
- extern void __percpu *__alloc_percpu(size_t size, size_t align);
- extern void free_percpu(void __percpu *__pdata);
+extern bool is_kernel_percpu_address(unsigned long addr);
- extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
extern void __init setup_per_cpu_areas(void);
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
- static inline void __percpu *__alloc_percpu(size_t size, size_t align)
- {
- /*
- * Can't easily make larger alignment work with kmalloc. WARN
- * on it. Larger alignment should only be used for module
- * percpu sections on SMP for which this path isn't used.
- */
- WARN_ON_ONCE(align > SMP_CACHE_BYTES);
- return kzalloc(size, GFP_KERNEL);
- }
-
- static inline void free_percpu(void __percpu *p)
- {
- kfree(p);
- }
-
+/* can't distinguish from other static vars, always false */
+static inline bool is_kernel_percpu_address(unsigned long addr)
+{
+ return false;
+}
+
- static inline phys_addr_t per_cpu_ptr_to_phys(void *addr)
- {
- return __pa(addr);
- }
-
static inline void __init setup_per_cpu_areas(void) { }
static inline void *pcpu_lpage_remapped(void *kaddr)