. = ALIGN(8);
SECURITY_INIT
- . = ALIGN(8192);
- __per_cpu_start = .;
- .data.percpu : { *(.data.percpu) }
- __per_cpu_end = .;
+ PERCPU(8192)
. = ALIGN(2*8192);
__init_end = .;
. = ALIGN(4096);
__per_cpu_start = .;
*(.data.percpu)
+ *(.data.percpu.shared_aligned)
__per_cpu_end = .;
#ifndef CONFIG_XIP_KERNEL
__init_begin = _stext;
}
SECURITY_INIT
- . = ALIGN (8192);
- __per_cpu_start = .;
- .data.percpu : { *(.data.percpu) }
- __per_cpu_end = .;
+ PERCPU(8192)
#ifdef CONFIG_BLK_DEV_INITRD
.init.ramfs : {
__alt_instructions_end = .;
.altinstr_replacement : { *(.altinstr_replacement) }
- . = ALIGN(4096);
- __per_cpu_start = .;
- .data.percpu : { *(.data.percpu) }
- __per_cpu_end = .;
+ PERCPU(4096)
#ifdef CONFIG_BLK_DEV_INITRD
. = ALIGN(4096);
.data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) {
__per_cpu_start = .;
*(.data.percpu)
+ *(.data.percpu.shared_aligned)
__per_cpu_end = .;
}
. = ALIGN(4096);
{
__per_cpu_start = .;
*(.data.percpu)
+ *(.data.percpu.shared_aligned)
__per_cpu_end = .;
}
. = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
__initramfs_end = .;
#endif
- . = ALIGN(4096);
- __per_cpu_start = .;
- .data.percpu : { *(.data.percpu) }
- __per_cpu_end = .;
+ PERCPU(4096)
. = ALIGN(4096);
__init_end = .;
/* freed after init ends here */
.init.ramfs : { *(.init.ramfs) }
__initramfs_end = .;
#endif
- . = ALIGN(_PAGE_SIZE);
- __per_cpu_start = .;
- .data.percpu : { *(.data.percpu) }
- __per_cpu_end = .;
+ PERCPU(_PAGE_SIZE)
. = ALIGN(_PAGE_SIZE);
__init_end = .;
/* freed after init ends here */
.init.ramfs : { *(.init.ramfs) }
__initramfs_end = .;
#endif
- . = ALIGN(ASM_PAGE_SIZE);
- __per_cpu_start = .;
- .data.percpu : { *(.data.percpu) }
- __per_cpu_end = .;
+
+ PERCPU(ASM_PAGE_SIZE)
+
. = ALIGN(ASM_PAGE_SIZE);
__init_end = .;
/* freed after init ends here */
.data.percpu : {
__per_cpu_start = .;
*(.data.percpu)
+ *(.data.percpu.shared_aligned)
__per_cpu_end = .;
}
__ftr_fixup : { *(__ftr_fixup) }
__stop___ftr_fixup = .;
- . = ALIGN(4096);
- __per_cpu_start = .;
- .data.percpu : { *(.data.percpu) }
- __per_cpu_end = .;
+ PERCPU(4096)
#ifdef CONFIG_BLK_DEV_INITRD
. = ALIGN(4096);
. = ALIGN(2);
__initramfs_end = .;
#endif
- . = ALIGN(4096);
- __per_cpu_start = .;
- .data.percpu : { *(.data.percpu) }
- __per_cpu_end = .;
+ PERCPU(4096)
. = ALIGN(4096);
__init_end = .;
/* freed after init ends here */
. = ALIGN(PAGE_SIZE);
__nosave_end = .;
- . = ALIGN(PAGE_SIZE);
- __per_cpu_start = .;
- .data.percpu : { *(.data.percpu) }
- __per_cpu_end = .;
+ PERCPU(PAGE_SIZE)
.data.cacheline_aligned : { *(.data.cacheline_aligned) }
_edata = .; /* End of data section */
. = ALIGN(PAGE_SIZE);
__per_cpu_start = .;
- .data.percpu : C_PHYS(.data.percpu) { *(.data.percpu) }
+ .data.percpu : C_PHYS(.data.percpu) {
+ *(.data.percpu)
+ *(.data.percpu.shared_aligned)
+ }
__per_cpu_end = . ;
.data.cacheline_aligned : C_PHYS(.data.cacheline_aligned) { *(.data.cacheline_aligned) }
__initramfs_end = .;
#endif
- . = ALIGN(4096);
- __per_cpu_start = .;
- .data.percpu : { *(.data.percpu) }
- __per_cpu_end = .;
+ PERCPU(4096)
. = ALIGN(4096);
__init_end = .;
. = ALIGN(32);
__initramfs_end = .;
#endif
- . = ALIGN(PAGE_SIZE);
- __per_cpu_start = .;
- .data.percpu : { *(.data.percpu) }
- __per_cpu_end = .;
+ PERCPU(PAGE_SIZE)
+
. = ALIGN(PAGE_SIZE);
__init_end = .;
__bss_start = .;
__initramfs_end = .;
#endif
- . = ALIGN(4096);
- __per_cpu_start = .;
- .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { *(.data.percpu) }
- __per_cpu_end = .;
+ PERCPU(4096)
+
. = ALIGN(4096);
__init_end = .;
__initramfs_end = .;
#endif
- . = ALIGN(4096);
- __per_cpu_start = .;
- .data.percpu : { *(.data.percpu) }
- __per_cpu_end = .;
+ PERCPU(4096)
/* We need this dummy segment here */
#define DEFINE_PER_CPU(type, name) \
__attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ __attribute__((__section__(".data.percpu.shared_aligned"))) \
+ __typeof__(type) per_cpu__##name \
+ ____cacheline_aligned_in_smp
+
/* var is in discarded region: offset to particular copy we want */
#define per_cpu(var, cpu) (*({ \
extern int simple_identifier_##var(void); \
#define DEFINE_PER_CPU(type, name) \
__typeof__(type) per_cpu__##name
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ DEFINE_PER_CPU(type, name)
+
#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
#define __get_cpu_var(var) per_cpu__##var
#define __raw_get_cpu_var(var) per_cpu__##var
*(.initcall7.init) \
*(.initcall7s.init)
+#define PERCPU(align) \
+ . = ALIGN(align); \
+ __per_cpu_start = .; \
+ .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \
+ *(.data.percpu) \
+ *(.data.percpu.shared_aligned) \
+ } \
+ __per_cpu_end = .;
#define DEFINE_PER_CPU(type, name) \
__attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ __attribute__((__section__(".data.percpu.shared_aligned"))) \
+ __typeof__(type) per_cpu__##name \
+ ____cacheline_aligned_in_smp
+
/* We can use this directly for local CPU (faster). */
DECLARE_PER_CPU(unsigned long, this_cpu_off);
__attribute__((__section__(".data.percpu"))) \
__SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
+#ifdef CONFIG_SMP
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ __attribute__((__section__(".data.percpu.shared_aligned"))) \
+ __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name \
+ ____cacheline_aligned_in_smp
+#else
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ DEFINE_PER_CPU(type, name)
+#endif
+
/*
* Pretty much a literal copy of asm-generic/percpu.h, except that percpu_modcopy() is an
* external routine, to avoid include-hell.
#define DEFINE_PER_CPU(type, name) \
__attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ __attribute__((__section__(".data.percpu.shared_aligned"))) \
+ __typeof__(type) per_cpu__##name \
+ ____cacheline_aligned_in_smp
+
/* var is in discarded region: offset to particular copy we want */
#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)))
#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()))
#define DEFINE_PER_CPU(type, name) \
__typeof__(type) per_cpu__##name
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ DEFINE_PER_CPU(type, name)
#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
#define __get_cpu_var(var) per_cpu__##var
__attribute__((__section__(".data.percpu"))) \
__typeof__(type) per_cpu__##name
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ __attribute__((__section__(".data.percpu.shared_aligned"))) \
+ __typeof__(type) per_cpu__##name \
+ ____cacheline_aligned_in_smp
+
#define __get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset)
#define __raw_get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset)
#define per_cpu(var,cpu) __reloc_hide(var,__per_cpu_offset[cpu])
#define DEFINE_PER_CPU(type, name) \
__typeof__(type) per_cpu__##name
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ DEFINE_PER_CPU(type, name)
#define __get_cpu_var(var) __reloc_hide(var,0)
#define __raw_get_cpu_var(var) __reloc_hide(var,0)
#define DEFINE_PER_CPU(type, name) \
__attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ __attribute__((__section__(".data.percpu.shared_aligned"))) \
+ __typeof__(type) per_cpu__##name \
+ ____cacheline_aligned_in_smp
+
register unsigned long __local_per_cpu_offset asm("g5");
/* var is in discarded region: offset to particular copy we want */
#define real_setup_per_cpu_areas() do { } while (0)
#define DEFINE_PER_CPU(type, name) \
__typeof__(type) per_cpu__##name
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ DEFINE_PER_CPU(type, name)
#define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var))
#define __get_cpu_var(var) per_cpu__##var
#define DEFINE_PER_CPU(type, name) \
__attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ __attribute__((__section__(".data.percpu.shared_aligned"))) \
+ __typeof__(type) per_cpu__##name \
+ ____cacheline_internodealigned_in_smp
+
/* var is in discarded region: offset to particular copy we want */
#define per_cpu(var, cpu) (*({ \
extern int simple_identifier_##var(void); \
#define DEFINE_PER_CPU(type, name) \
__typeof__(type) per_cpu__##name
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ DEFINE_PER_CPU(type, name)
#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
#define __get_cpu_var(var) per_cpu__##var