Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 14 Dec 2009 17:58:24 +0000 (09:58 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 14 Dec 2009 17:58:24 +0000 (09:58 -0800)
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (34 commits)
  m68k: rename global variable vmalloc_end to m68k_vmalloc_end
  percpu: add missing per_cpu_ptr_to_phys() definition for UP
  percpu: Fix kdump failure if booted with percpu_alloc=page
  percpu: make misc percpu symbols unique
  percpu: make percpu symbols in ia64 unique
  percpu: make percpu symbols in powerpc unique
  percpu: make percpu symbols in x86 unique
  percpu: make percpu symbols in xen unique
  percpu: make percpu symbols in cpufreq unique
  percpu: make percpu symbols in oprofile unique
  percpu: make percpu symbols in tracer unique
  percpu: make percpu symbols under kernel/ and mm/ unique
  percpu: remove some sparse warnings
  percpu: make alloc_percpu() handle array types
  vmalloc: fix use of non-existent percpu variable in put_cpu_var()
  this_cpu: Use this_cpu_xx in trace_functions_graph.c
  this_cpu: Use this_cpu_xx for ftrace
  this_cpu: Use this_cpu_xx in nmi handling
  this_cpu: Use this_cpu operations in RCU
  this_cpu: Use this_cpu ops for VM statistics
  ...

Fix up trivial (famous last words) global per-cpu naming conflicts in
arch/x86/kvm/svm.c
mm/slab.c

34 files changed:
1  2 
arch/powerpc/kernel/perf_callchain.c
arch/powerpc/kernel/setup-common.c
arch/powerpc/kernel/smp.c
arch/powerpc/platforms/cell/interrupt.c
arch/x86/kernel/apic/nmi.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
arch/x86/kernel/cpu/intel_cacheinfo.c
arch/x86/kvm/svm.c
arch/x86/xen/smp.c
arch/x86/xen/time.c
crypto/cryptd.c
drivers/base/cpu.c
drivers/cpufreq/cpufreq.c
drivers/crypto/padlock-aes.c
drivers/dma/dmaengine.c
drivers/net/loopback.c
drivers/net/veth.c
drivers/s390/net/netiucv.c
fs/ext4/mballoc.c
fs/xfs/xfs_mount.c
include/net/neighbour.h
include/net/netfilter/nf_conntrack.h
kernel/lockdep.c
kernel/module.c
kernel/rcutorture.c
kernel/sched.c
kernel/softirq.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_functions_graph.c
kernel/trace/trace_hw_branches.c
mm/slab.c
mm/vmalloc.c

Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 3de0b37ec038673c3a70b4f14be7dcd5656dfcfe,6c79a14a3b6f8c784c6f6118ece14fd4e2bdd6b3..1d9b33843c80ef521dc059697285cfed06cfd7d7
@@@ -313,46 -316,40 +313,45 @@@ static void svm_hardware_disable(void *
        cpu_svm_disable();
  }
  
 -static void svm_hardware_enable(void *garbage)
 +static int svm_hardware_enable(void *garbage)
  {
  
-       struct svm_cpu_data *svm_data;
+       struct svm_cpu_data *sd;
        uint64_t efer;
        struct descriptor_table gdt_descr;
        struct desc_struct *gdt;
        int me = raw_smp_processor_id();
  
 +      rdmsrl(MSR_EFER, efer);
 +      if (efer & EFER_SVME)
 +              return -EBUSY;
 +
        if (!has_svm()) {
 -              printk(KERN_ERR "svm_cpu_init: err EOPNOTSUPP on %d\n", me);
 -              return;
 +              printk(KERN_ERR "svm_hardware_enable: err EOPNOTSUPP on %d\n",
 +                     me);
 +              return -EINVAL;
        }
-       svm_data = per_cpu(svm_data, me);
+       sd = per_cpu(svm_data, me);
  
-       if (!svm_data) {
+       if (!sd) {
 -              printk(KERN_ERR "svm_cpu_init: svm_data is NULL on %d\n",
 +              printk(KERN_ERR "svm_hardware_enable: svm_data is NULL on %d\n",
                       me);
 -              return;
 +              return -EINVAL;
        }
  
-       svm_data->asid_generation = 1;
-       svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
-       svm_data->next_asid = svm_data->max_asid + 1;
+       sd->asid_generation = 1;
+       sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
+       sd->next_asid = sd->max_asid + 1;
  
        kvm_get_gdt(&gdt_descr);
        gdt = (struct desc_struct *)gdt_descr.base;
-       svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
+       sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
  
 -      rdmsrl(MSR_EFER, efer);
        wrmsrl(MSR_EFER, efer | EFER_SVME);
  
--      wrmsrl(MSR_VM_HSAVE_PA,
-              page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
 -             page_to_pfn(sd->save_area) << PAGE_SHIFT);
++      wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
 +
 +      return 0;
  }
  
  static void svm_cpu_uninit(int cpu)
Simple merge
index 9d1f853120d859cfc814e0f2eaa2e01b3f1575e8,26e37b787ad30f7665a015df7a094dfc537e77a0..0d3f07cd1b5fe9aee977674b11b0beb311e25eb0
@@@ -97,10 -97,10 +97,10 @@@ static void get_runstate_snapshot(struc
  /* return true when a vcpu could run but has no real cpu to run on */
  bool xen_vcpu_stolen(int vcpu)
  {
-       return per_cpu(runstate, vcpu).state == RUNSTATE_runnable;
+       return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable;
  }
  
 -static void setup_runstate_info(int cpu)
 +void xen_setup_runstate_info(int cpu)
  {
        struct vcpu_register_runstate_memory_area area;
  
diff --cc crypto/cryptd.c
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 63099c58a6ddd7a92f616ca196141becbd7ad463,0c4a811242574e9d6432e83db588ff9122f78a0c..3a15de56df9caa31b818139dbcd75e2dc99e5cc8
@@@ -153,8 -153,10 +153,8 @@@ static netdev_tx_t veth_xmit(struct sk_
        struct net_device *rcv = NULL;
        struct veth_priv *priv, *rcv_priv;
        struct veth_net_stats *stats, *rcv_stats;
-       int length, cpu;
+       int length;
  
 -      skb_orphan(skb);
 -
        priv = netdev_priv(dev);
        rcv = priv->peer;
        rcv_priv = netdev_priv(rcv);
Simple merge
Simple merge
Simple merge
index 0302f31a2fb7e53956f089efac74848fbd15466d,f28403ff7648e286c85d131d1a848f01f1d35489..b0173202cad96f3a8aa55df85d16b2d11a62b608
@@@ -88,14 -90,10 +88,9 @@@ struct neigh_statistics 
        unsigned long unres_discards;   /* number of unresolved drops */
  };
  
- #define NEIGH_CACHE_STAT_INC(tbl, field)                              \
-       do {                                                            \
-               preempt_disable();                                      \
-               (per_cpu_ptr((tbl)->stats, smp_processor_id())->field)++; \
-               preempt_enable();                                       \
-       } while (0)
+ #define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field)
  
 -struct neighbour
 -{
 +struct neighbour {
        struct neighbour        *next;
        struct neigh_table      *tbl;
        struct neigh_parms      *parms;
Simple merge
index 4f8df01dbe51ad05e1957fa7e1eecd20af7e0765,8631320a50d0fea1969968743f4a5c5d6b135121..429540c70d3f497ae7fc58998b0f248817a2cbf9
@@@ -140,13 -140,9 +140,14 @@@ static inline struct lock_class *hlock_
  }
  
  #ifdef CONFIG_LOCK_STAT
- static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
+ static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS],
+                     cpu_lock_stats);
  
 +static inline u64 lockstat_clock(void)
 +{
 +      return cpu_clock(smp_processor_id());
 +}
 +
  static int lock_point(unsigned long points[], unsigned long ip)
  {
        int i;
diff --cc kernel/module.c
Simple merge
Simple merge
diff --cc kernel/sched.c
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
diff --cc mm/slab.c
index a6c9166996a9f389e72f3c71a2df7bacc937fe6f,211b1746c63ca7c723921b6b3ab6ba17a9a907c1..29b09599af7cb4510c37e02d873eab4e2cd52c79
+++ b/mm/slab.c
@@@ -697,7 -665,27 +697,7 @@@ static inline void init_lock_keys(void
  static DEFINE_MUTEX(cache_chain_mutex);
  static struct list_head cache_chain;
  
- static DEFINE_PER_CPU(struct delayed_work, reap_work);
 -/*
 - * chicken and egg problem: delay the per-cpu array allocation
 - * until the general caches are up.
 - */
 -static enum {
 -      NONE,
 -      PARTIAL_AC,
 -      PARTIAL_L3,
 -      EARLY,
 -      FULL
 -} g_cpucache_up;
 -
 -/*
 - * used by boot code to determine if it can use slab based allocator
 - */
 -int slab_is_available(void)
 -{
 -      return g_cpucache_up >= EARLY;
 -}
 -
+ static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
  
  static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
  {
diff --cc mm/vmalloc.c
Simple merge