aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/cpu.c2
-rw-r--r--drivers/cpufreq/cpufreq.c16
-rw-r--r--drivers/cpufreq/freq_table.c12
-rw-r--r--drivers/crypto/padlock-aes.c12
-rw-r--r--drivers/dma/dmaengine.c36
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c3
-rw-r--r--drivers/lguest/x86/core.c6
-rw-r--r--drivers/net/chelsio/sge.c5
-rw-r--r--drivers/net/loopback.c2
-rw-r--r--drivers/net/veth.c7
-rw-r--r--drivers/oprofile/cpu_buffer.c19
-rw-r--r--drivers/oprofile/cpu_buffer.h4
-rw-r--r--drivers/oprofile/oprofile_stats.c4
-rw-r--r--drivers/s390/net/netiucv.c8
14 files changed, 60 insertions, 76 deletions
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 27fd775375b0..958bd1540c30 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -131,7 +131,7 @@ static ssize_t show_crash_notes(struct sys_device *dev, struct sysdev_attribute
131 * boot up and this data does not change there after. Hence this 131 * boot up and this data does not change there after. Hence this
132 * operation should be safe. No locking required. 132 * operation should be safe. No locking required.
133 */ 133 */
134 addr = __pa(per_cpu_ptr(crash_notes, cpunum)); 134 addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
135 rc = sprintf(buf, "%Lx\n", addr); 135 rc = sprintf(buf, "%Lx\n", addr);
136 return rc; 136 return rc;
137} 137}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index f20668c09ce0..67bc2ece7b4b 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -64,14 +64,14 @@ static DEFINE_SPINLOCK(cpufreq_driver_lock);
64 * - Lock should not be held across 64 * - Lock should not be held across
65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP); 65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
66 */ 66 */
67static DEFINE_PER_CPU(int, policy_cpu); 67static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
68static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem); 68static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
69 69
70#define lock_policy_rwsem(mode, cpu) \ 70#define lock_policy_rwsem(mode, cpu) \
71int lock_policy_rwsem_##mode \ 71int lock_policy_rwsem_##mode \
72(int cpu) \ 72(int cpu) \
73{ \ 73{ \
74 int policy_cpu = per_cpu(policy_cpu, cpu); \ 74 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
75 BUG_ON(policy_cpu == -1); \ 75 BUG_ON(policy_cpu == -1); \
76 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ 76 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
77 if (unlikely(!cpu_online(cpu))) { \ 77 if (unlikely(!cpu_online(cpu))) { \
@@ -90,7 +90,7 @@ EXPORT_SYMBOL_GPL(lock_policy_rwsem_write);
90 90
91void unlock_policy_rwsem_read(int cpu) 91void unlock_policy_rwsem_read(int cpu)
92{ 92{
93 int policy_cpu = per_cpu(policy_cpu, cpu); 93 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
94 BUG_ON(policy_cpu == -1); 94 BUG_ON(policy_cpu == -1);
95 up_read(&per_cpu(cpu_policy_rwsem, policy_cpu)); 95 up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
96} 96}
@@ -98,7 +98,7 @@ EXPORT_SYMBOL_GPL(unlock_policy_rwsem_read);
98 98
99void unlock_policy_rwsem_write(int cpu) 99void unlock_policy_rwsem_write(int cpu)
100{ 100{
101 int policy_cpu = per_cpu(policy_cpu, cpu); 101 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
102 BUG_ON(policy_cpu == -1); 102 BUG_ON(policy_cpu == -1);
103 up_write(&per_cpu(cpu_policy_rwsem, policy_cpu)); 103 up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
104} 104}
@@ -818,7 +818,7 @@ static int cpufreq_add_dev_policy(unsigned int cpu,
818 818
819 /* Set proper policy_cpu */ 819 /* Set proper policy_cpu */
820 unlock_policy_rwsem_write(cpu); 820 unlock_policy_rwsem_write(cpu);
821 per_cpu(policy_cpu, cpu) = managed_policy->cpu; 821 per_cpu(cpufreq_policy_cpu, cpu) = managed_policy->cpu;
822 822
823 if (lock_policy_rwsem_write(cpu) < 0) { 823 if (lock_policy_rwsem_write(cpu) < 0) {
824 /* Should not go through policy unlock path */ 824 /* Should not go through policy unlock path */
@@ -932,7 +932,7 @@ static int cpufreq_add_dev_interface(unsigned int cpu,
932 if (!cpu_online(j)) 932 if (!cpu_online(j))
933 continue; 933 continue;
934 per_cpu(cpufreq_cpu_data, j) = policy; 934 per_cpu(cpufreq_cpu_data, j) = policy;
935 per_cpu(policy_cpu, j) = policy->cpu; 935 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
936 } 936 }
937 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 937 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
938 938
@@ -1020,7 +1020,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
1020 cpumask_copy(policy->cpus, cpumask_of(cpu)); 1020 cpumask_copy(policy->cpus, cpumask_of(cpu));
1021 1021
1022 /* Initially set CPU itself as the policy_cpu */ 1022 /* Initially set CPU itself as the policy_cpu */
1023 per_cpu(policy_cpu, cpu) = cpu; 1023 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
1024 ret = (lock_policy_rwsem_write(cpu) < 0); 1024 ret = (lock_policy_rwsem_write(cpu) < 0);
1025 WARN_ON(ret); 1025 WARN_ON(ret);
1026 1026
@@ -2002,7 +2002,7 @@ static int __init cpufreq_core_init(void)
2002 int cpu; 2002 int cpu;
2003 2003
2004 for_each_possible_cpu(cpu) { 2004 for_each_possible_cpu(cpu) {
2005 per_cpu(policy_cpu, cpu) = -1; 2005 per_cpu(cpufreq_policy_cpu, cpu) = -1;
2006 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu)); 2006 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2007 } 2007 }
2008 2008
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index a9bd3a05a684..05432216e224 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -174,7 +174,7 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
174} 174}
175EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target); 175EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target);
176 176
177static DEFINE_PER_CPU(struct cpufreq_frequency_table *, show_table); 177static DEFINE_PER_CPU(struct cpufreq_frequency_table *, cpufreq_show_table);
178/** 178/**
179 * show_available_freqs - show available frequencies for the specified CPU 179 * show_available_freqs - show available frequencies for the specified CPU
180 */ 180 */
@@ -185,10 +185,10 @@ static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf)
185 ssize_t count = 0; 185 ssize_t count = 0;
186 struct cpufreq_frequency_table *table; 186 struct cpufreq_frequency_table *table;
187 187
188 if (!per_cpu(show_table, cpu)) 188 if (!per_cpu(cpufreq_show_table, cpu))
189 return -ENODEV; 189 return -ENODEV;
190 190
191 table = per_cpu(show_table, cpu); 191 table = per_cpu(cpufreq_show_table, cpu);
192 192
193 for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { 193 for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
194 if (table[i].frequency == CPUFREQ_ENTRY_INVALID) 194 if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
@@ -217,20 +217,20 @@ void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
217 unsigned int cpu) 217 unsigned int cpu)
218{ 218{
219 dprintk("setting show_table for cpu %u to %p\n", cpu, table); 219 dprintk("setting show_table for cpu %u to %p\n", cpu, table);
220 per_cpu(show_table, cpu) = table; 220 per_cpu(cpufreq_show_table, cpu) = table;
221} 221}
222EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr); 222EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr);
223 223
224void cpufreq_frequency_table_put_attr(unsigned int cpu) 224void cpufreq_frequency_table_put_attr(unsigned int cpu)
225{ 225{
226 dprintk("clearing show_table for cpu %u\n", cpu); 226 dprintk("clearing show_table for cpu %u\n", cpu);
227 per_cpu(show_table, cpu) = NULL; 227 per_cpu(cpufreq_show_table, cpu) = NULL;
228} 228}
229EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr); 229EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr);
230 230
231struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu) 231struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
232{ 232{
233 return per_cpu(show_table, cpu); 233 return per_cpu(cpufreq_show_table, cpu);
234} 234}
235EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table); 235EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
236 236
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 84c51e177269..8c2f3703ec85 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -64,7 +64,7 @@ struct aes_ctx {
64 u32 *D; 64 u32 *D;
65}; 65};
66 66
67static DEFINE_PER_CPU(struct cword *, last_cword); 67static DEFINE_PER_CPU(struct cword *, paes_last_cword);
68 68
69/* Tells whether the ACE is capable to generate 69/* Tells whether the ACE is capable to generate
70 the extended key for a given key_len. */ 70 the extended key for a given key_len. */
@@ -152,9 +152,9 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
152 152
153ok: 153ok:
154 for_each_online_cpu(cpu) 154 for_each_online_cpu(cpu)
155 if (&ctx->cword.encrypt == per_cpu(last_cword, cpu) || 155 if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) ||
156 &ctx->cword.decrypt == per_cpu(last_cword, cpu)) 156 &ctx->cword.decrypt == per_cpu(paes_last_cword, cpu))
157 per_cpu(last_cword, cpu) = NULL; 157 per_cpu(paes_last_cword, cpu) = NULL;
158 158
159 return 0; 159 return 0;
160} 160}
@@ -166,7 +166,7 @@ static inline void padlock_reset_key(struct cword *cword)
166{ 166{
167 int cpu = raw_smp_processor_id(); 167 int cpu = raw_smp_processor_id();
168 168
169 if (cword != per_cpu(last_cword, cpu)) 169 if (cword != per_cpu(paes_last_cword, cpu))
170#ifndef CONFIG_X86_64 170#ifndef CONFIG_X86_64
171 asm volatile ("pushfl; popfl"); 171 asm volatile ("pushfl; popfl");
172#else 172#else
@@ -176,7 +176,7 @@ static inline void padlock_reset_key(struct cword *cword)
176 176
177static inline void padlock_store_cword(struct cword *cword) 177static inline void padlock_store_cword(struct cword *cword)
178{ 178{
179 per_cpu(last_cword, raw_smp_processor_id()) = cword; 179 per_cpu(paes_last_cword, raw_smp_processor_id()) = cword;
180} 180}
181 181
182/* 182/*
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 8f99354082ce..6f51a0a7a8bb 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -326,14 +326,7 @@ arch_initcall(dma_channel_table_init);
326 */ 326 */
327struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) 327struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
328{ 328{
329 struct dma_chan *chan; 329 return this_cpu_read(channel_table[tx_type]->chan);
330 int cpu;
331
332 cpu = get_cpu();
333 chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan;
334 put_cpu();
335
336 return chan;
337} 330}
338EXPORT_SYMBOL(dma_find_channel); 331EXPORT_SYMBOL(dma_find_channel);
339 332
@@ -857,7 +850,6 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
857 struct dma_async_tx_descriptor *tx; 850 struct dma_async_tx_descriptor *tx;
858 dma_addr_t dma_dest, dma_src; 851 dma_addr_t dma_dest, dma_src;
859 dma_cookie_t cookie; 852 dma_cookie_t cookie;
860 int cpu;
861 unsigned long flags; 853 unsigned long flags;
862 854
863 dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); 855 dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
@@ -876,10 +868,10 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
876 tx->callback = NULL; 868 tx->callback = NULL;
877 cookie = tx->tx_submit(tx); 869 cookie = tx->tx_submit(tx);
878 870
879 cpu = get_cpu(); 871 preempt_disable();
880 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; 872 __this_cpu_add(chan->local->bytes_transferred, len);
881 per_cpu_ptr(chan->local, cpu)->memcpy_count++; 873 __this_cpu_inc(chan->local->memcpy_count);
882 put_cpu(); 874 preempt_enable();
883 875
884 return cookie; 876 return cookie;
885} 877}
@@ -906,7 +898,6 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
906 struct dma_async_tx_descriptor *tx; 898 struct dma_async_tx_descriptor *tx;
907 dma_addr_t dma_dest, dma_src; 899 dma_addr_t dma_dest, dma_src;
908 dma_cookie_t cookie; 900 dma_cookie_t cookie;
909 int cpu;
910 unsigned long flags; 901 unsigned long flags;
911 902
912 dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); 903 dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
@@ -923,10 +914,10 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
923 tx->callback = NULL; 914 tx->callback = NULL;
924 cookie = tx->tx_submit(tx); 915 cookie = tx->tx_submit(tx);
925 916
926 cpu = get_cpu(); 917 preempt_disable();
927 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; 918 __this_cpu_add(chan->local->bytes_transferred, len);
928 per_cpu_ptr(chan->local, cpu)->memcpy_count++; 919 __this_cpu_inc(chan->local->memcpy_count);
929 put_cpu(); 920 preempt_enable();
930 921
931 return cookie; 922 return cookie;
932} 923}
@@ -955,7 +946,6 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
955 struct dma_async_tx_descriptor *tx; 946 struct dma_async_tx_descriptor *tx;
956 dma_addr_t dma_dest, dma_src; 947 dma_addr_t dma_dest, dma_src;
957 dma_cookie_t cookie; 948 dma_cookie_t cookie;
958 int cpu;
959 unsigned long flags; 949 unsigned long flags;
960 950
961 dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); 951 dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
@@ -973,10 +963,10 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
973 tx->callback = NULL; 963 tx->callback = NULL;
974 cookie = tx->tx_submit(tx); 964 cookie = tx->tx_submit(tx);
975 965
976 cpu = get_cpu(); 966 preempt_disable();
977 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; 967 __this_cpu_add(chan->local->bytes_transferred, len);
978 per_cpu_ptr(chan->local, cpu)->memcpy_count++; 968 __this_cpu_inc(chan->local->memcpy_count);
979 put_cpu(); 969 preempt_enable();
980 970
981 return cookie; 971 return cookie;
982} 972}
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 4b89b791be6a..42be0b15084b 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -826,8 +826,7 @@ static void __cpuinit take_over_work(struct ehca_comp_pool *pool, int cpu)
826 cq = list_entry(cct->cq_list.next, struct ehca_cq, entry); 826 cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
827 827
828 list_del(&cq->entry); 828 list_del(&cq->entry);
829 __queue_comp_task(cq, per_cpu_ptr(pool->cpu_comp_tasks, 829 __queue_comp_task(cq, this_cpu_ptr(pool->cpu_comp_tasks));
830 smp_processor_id()));
831 } 830 }
832 831
833 spin_unlock_irqrestore(&cct->task_lock, flags_cct); 832 spin_unlock_irqrestore(&cct->task_lock, flags_cct);
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 6ae388849a3b..fb2b7ef7868e 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -69,7 +69,7 @@ static struct lguest_pages *lguest_pages(unsigned int cpu)
69 (SWITCHER_ADDR + SHARED_SWITCHER_PAGES*PAGE_SIZE))[cpu]); 69 (SWITCHER_ADDR + SHARED_SWITCHER_PAGES*PAGE_SIZE))[cpu]);
70} 70}
71 71
72static DEFINE_PER_CPU(struct lg_cpu *, last_cpu); 72static DEFINE_PER_CPU(struct lg_cpu *, lg_last_cpu);
73 73
74/*S:010 74/*S:010
75 * We approach the Switcher. 75 * We approach the Switcher.
@@ -90,8 +90,8 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
90 * meanwhile). If that's not the case, we pretend everything in the 90 * meanwhile). If that's not the case, we pretend everything in the
91 * Guest has changed. 91 * Guest has changed.
92 */ 92 */
93 if (__get_cpu_var(last_cpu) != cpu || cpu->last_pages != pages) { 93 if (__get_cpu_var(lg_last_cpu) != cpu || cpu->last_pages != pages) {
94 __get_cpu_var(last_cpu) = cpu; 94 __get_cpu_var(lg_last_cpu) = cpu;
95 cpu->last_pages = pages; 95 cpu->last_pages = pages;
96 cpu->changed = CHANGED_ALL; 96 cpu->changed = CHANGED_ALL;
97 } 97 }
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 8c658cf6f62f..109d2783e4d8 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1378,7 +1378,7 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
1378 } 1378 }
1379 __skb_pull(skb, sizeof(*p)); 1379 __skb_pull(skb, sizeof(*p));
1380 1380
1381 st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id()); 1381 st = this_cpu_ptr(sge->port_stats[p->iff]);
1382 1382
1383 skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev); 1383 skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev);
1384 if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff && 1384 if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
@@ -1780,8 +1780,7 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1780{ 1780{
1781 struct adapter *adapter = dev->ml_priv; 1781 struct adapter *adapter = dev->ml_priv;
1782 struct sge *sge = adapter->sge; 1782 struct sge *sge = adapter->sge;
1783 struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port], 1783 struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]);
1784 smp_processor_id());
1785 struct cpl_tx_pkt *cpl; 1784 struct cpl_tx_pkt *cpl;
1786 struct sk_buff *orig_skb = skb; 1785 struct sk_buff *orig_skb = skb;
1787 int ret; 1786 int ret;
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index eae4ad749e9d..b9fcc9819837 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -81,7 +81,7 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
81 81
82 /* it's OK to use per_cpu_ptr() because BHs are off */ 82 /* it's OK to use per_cpu_ptr() because BHs are off */
83 pcpu_lstats = dev->ml_priv; 83 pcpu_lstats = dev->ml_priv;
84 lb_stats = per_cpu_ptr(pcpu_lstats, smp_processor_id()); 84 lb_stats = this_cpu_ptr(pcpu_lstats);
85 85
86 len = skb->len; 86 len = skb->len;
87 if (likely(netif_rx(skb) == NET_RX_SUCCESS)) { 87 if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 63099c58a6dd..3a15de56df9c 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -153,15 +153,14 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
153 struct net_device *rcv = NULL; 153 struct net_device *rcv = NULL;
154 struct veth_priv *priv, *rcv_priv; 154 struct veth_priv *priv, *rcv_priv;
155 struct veth_net_stats *stats, *rcv_stats; 155 struct veth_net_stats *stats, *rcv_stats;
156 int length, cpu; 156 int length;
157 157
158 priv = netdev_priv(dev); 158 priv = netdev_priv(dev);
159 rcv = priv->peer; 159 rcv = priv->peer;
160 rcv_priv = netdev_priv(rcv); 160 rcv_priv = netdev_priv(rcv);
161 161
162 cpu = smp_processor_id(); 162 stats = this_cpu_ptr(priv->stats);
163 stats = per_cpu_ptr(priv->stats, cpu); 163 rcv_stats = this_cpu_ptr(rcv_priv->stats);
164 rcv_stats = per_cpu_ptr(rcv_priv->stats, cpu);
165 164
166 if (!(rcv->flags & IFF_UP)) 165 if (!(rcv->flags & IFF_UP))
167 goto tx_drop; 166 goto tx_drop;
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index a7aae24f2889..166b67ea622f 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -47,7 +47,7 @@
47 */ 47 */
48static struct ring_buffer *op_ring_buffer_read; 48static struct ring_buffer *op_ring_buffer_read;
49static struct ring_buffer *op_ring_buffer_write; 49static struct ring_buffer *op_ring_buffer_write;
50DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); 50DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
51 51
52static void wq_sync_buffer(struct work_struct *work); 52static void wq_sync_buffer(struct work_struct *work);
53 53
@@ -61,8 +61,7 @@ unsigned long oprofile_get_cpu_buffer_size(void)
61 61
62void oprofile_cpu_buffer_inc_smpl_lost(void) 62void oprofile_cpu_buffer_inc_smpl_lost(void)
63{ 63{
64 struct oprofile_cpu_buffer *cpu_buf 64 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
65 = &__get_cpu_var(cpu_buffer);
66 65
67 cpu_buf->sample_lost_overflow++; 66 cpu_buf->sample_lost_overflow++;
68} 67}
@@ -95,7 +94,7 @@ int alloc_cpu_buffers(void)
95 goto fail; 94 goto fail;
96 95
97 for_each_possible_cpu(i) { 96 for_each_possible_cpu(i) {
98 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); 97 struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
99 98
100 b->last_task = NULL; 99 b->last_task = NULL;
101 b->last_is_kernel = -1; 100 b->last_is_kernel = -1;
@@ -122,7 +121,7 @@ void start_cpu_work(void)
122 work_enabled = 1; 121 work_enabled = 1;
123 122
124 for_each_online_cpu(i) { 123 for_each_online_cpu(i) {
125 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); 124 struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
126 125
127 /* 126 /*
128 * Spread the work by 1 jiffy per cpu so they dont all 127 * Spread the work by 1 jiffy per cpu so they dont all
@@ -139,7 +138,7 @@ void end_cpu_work(void)
139 work_enabled = 0; 138 work_enabled = 0;
140 139
141 for_each_online_cpu(i) { 140 for_each_online_cpu(i) {
142 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); 141 struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
143 142
144 cancel_delayed_work(&b->work); 143 cancel_delayed_work(&b->work);
145 } 144 }
@@ -330,7 +329,7 @@ static inline void
330__oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, 329__oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
331 unsigned long event, int is_kernel) 330 unsigned long event, int is_kernel)
332{ 331{
333 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); 332 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
334 unsigned long backtrace = oprofile_backtrace_depth; 333 unsigned long backtrace = oprofile_backtrace_depth;
335 334
336 /* 335 /*
@@ -375,7 +374,7 @@ oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs,
375{ 374{
376 struct op_sample *sample; 375 struct op_sample *sample;
377 int is_kernel = !user_mode(regs); 376 int is_kernel = !user_mode(regs);
378 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); 377 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
379 378
380 cpu_buf->sample_received++; 379 cpu_buf->sample_received++;
381 380
@@ -430,13 +429,13 @@ int oprofile_write_commit(struct op_entry *entry)
430 429
431void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) 430void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
432{ 431{
433 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); 432 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
434 log_sample(cpu_buf, pc, 0, is_kernel, event); 433 log_sample(cpu_buf, pc, 0, is_kernel, event);
435} 434}
436 435
437void oprofile_add_trace(unsigned long pc) 436void oprofile_add_trace(unsigned long pc)
438{ 437{
439 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); 438 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
440 439
441 if (!cpu_buf->tracing) 440 if (!cpu_buf->tracing)
442 return; 441 return;
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
index 272995d20293..68ea16ab645f 100644
--- a/drivers/oprofile/cpu_buffer.h
+++ b/drivers/oprofile/cpu_buffer.h
@@ -50,7 +50,7 @@ struct oprofile_cpu_buffer {
50 struct delayed_work work; 50 struct delayed_work work;
51}; 51};
52 52
53DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); 53DECLARE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
54 54
55/* 55/*
56 * Resets the cpu buffer to a sane state. 56 * Resets the cpu buffer to a sane state.
@@ -60,7 +60,7 @@ DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
60 */ 60 */
61static inline void op_cpu_buffer_reset(int cpu) 61static inline void op_cpu_buffer_reset(int cpu)
62{ 62{
63 struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu); 63 struct oprofile_cpu_buffer *cpu_buf = &per_cpu(op_cpu_buffer, cpu);
64 64
65 cpu_buf->last_is_kernel = -1; 65 cpu_buf->last_is_kernel = -1;
66 cpu_buf->last_task = NULL; 66 cpu_buf->last_task = NULL;
diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
index 61689e814d46..917d28ebeacd 100644
--- a/drivers/oprofile/oprofile_stats.c
+++ b/drivers/oprofile/oprofile_stats.c
@@ -23,7 +23,7 @@ void oprofile_reset_stats(void)
23 int i; 23 int i;
24 24
25 for_each_possible_cpu(i) { 25 for_each_possible_cpu(i) {
26 cpu_buf = &per_cpu(cpu_buffer, i); 26 cpu_buf = &per_cpu(op_cpu_buffer, i);
27 cpu_buf->sample_received = 0; 27 cpu_buf->sample_received = 0;
28 cpu_buf->sample_lost_overflow = 0; 28 cpu_buf->sample_lost_overflow = 0;
29 cpu_buf->backtrace_aborted = 0; 29 cpu_buf->backtrace_aborted = 0;
@@ -51,7 +51,7 @@ void oprofile_create_stats_files(struct super_block *sb, struct dentry *root)
51 return; 51 return;
52 52
53 for_each_possible_cpu(i) { 53 for_each_possible_cpu(i) {
54 cpu_buf = &per_cpu(cpu_buffer, i); 54 cpu_buf = &per_cpu(op_cpu_buffer, i);
55 snprintf(buf, 10, "cpu%d", i); 55 snprintf(buf, 10, "cpu%d", i);
56 cpudir = oprofilefs_mkdir(sb, dir, buf); 56 cpudir = oprofilefs_mkdir(sb, dir, buf);
57 57
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 395c04c2b00f..98c04cac43c1 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -113,11 +113,9 @@ static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level)
113#define IUCV_DBF_TEXT_(name, level, text...) \ 113#define IUCV_DBF_TEXT_(name, level, text...) \
114 do { \ 114 do { \
115 if (iucv_dbf_passes(iucv_dbf_##name, level)) { \ 115 if (iucv_dbf_passes(iucv_dbf_##name, level)) { \
116 char* iucv_dbf_txt_buf = \ 116 char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
117 get_cpu_var(iucv_dbf_txt_buf); \ 117 sprintf(__buf, text); \
118 sprintf(iucv_dbf_txt_buf, text); \ 118 debug_text_event(iucv_dbf_##name, level, __buf); \
119 debug_text_event(iucv_dbf_##name, level, \
120 iucv_dbf_txt_buf); \
121 put_cpu_var(iucv_dbf_txt_buf); \ 119 put_cpu_var(iucv_dbf_txt_buf); \
122 } \ 120 } \
123 } while (0) 121 } while (0)