aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-15 01:48:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-15 01:48:18 -0400
commit0429fbc0bdc297d64188483ba029a23773ae07b0 (patch)
tree67de46978c90f37540dd6ded1db20eb53a569030 /net
parent6929c358972facf2999f8768815c40dd88514fc2 (diff)
parent513d1a2884a49654f368b5fa25ef186e976bdada (diff)
Merge branch 'for-3.18-consistent-ops' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
Pull percpu consistent-ops changes from Tejun Heo: "Way back, before the current percpu allocator was implemented, static and dynamic percpu memory areas were allocated and handled separately and had their own accessors. The distinction has been gone for many years now; however, the now duplicate two sets of accessors remained with the pointer based ones - this_cpu_*() - evolving various other operations over time. During the process, we also accumulated other inconsistent operations. This pull request contains Christoph's patches to clean up the duplicate accessor situation. __get_cpu_var() uses are replaced with with this_cpu_ptr() and __this_cpu_ptr() with raw_cpu_ptr(). Unfortunately, the former sometimes is tricky thanks to C being a bit messy with the distinction between lvalues and pointers, which led to a rather ugly solution for cpumask_var_t involving the introduction of this_cpu_cpumask_var_ptr(). This converts most of the uses but not all. Christoph will follow up with the remaining conversions in this merge window and hopefully remove the obsolete accessors" * 'for-3.18-consistent-ops' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (38 commits) irqchip: Properly fetch the per cpu offset percpu: Resolve ambiguities in __get_cpu_var/cpumask_var_t -fix ia64: sn_nodepda cannot be assigned to after this_cpu conversion. Use __this_cpu_write. percpu: Resolve ambiguities in __get_cpu_var/cpumask_var_t Revert "powerpc: Replace __get_cpu_var uses" percpu: Remove __this_cpu_ptr clocksource: Replace __this_cpu_ptr with raw_cpu_ptr sparc: Replace __get_cpu_var uses avr32: Replace __get_cpu_var with __this_cpu_write blackfin: Replace __get_cpu_var uses tile: Use this_cpu_ptr() for hardware counters tile: Replace __get_cpu_var uses powerpc: Replace __get_cpu_var uses alpha: Replace __get_cpu_var ia64: Replace __get_cpu_var uses s390: cio driver &__get_cpu_var replacements s390: Replace __get_cpu_var uses mips: Replace __get_cpu_var uses MIPS: Replace __get_cpu_var uses in FPU emulator. arm: Replace __this_cpu_ptr with raw_cpu_ptr ...
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c14
-rw-r--r--net/core/drop_monitor.c2
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/ipv4/route.c4
-rw-r--r--net/ipv4/syncookies.c2
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv6/syncookies.c2
-rw-r--r--net/rds/ib_rdma.c2
9 files changed, 16 insertions, 16 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 4699dcfdc4ab..6470716ddba4 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2153,7 +2153,7 @@ static inline void __netif_reschedule(struct Qdisc *q)
2153 unsigned long flags; 2153 unsigned long flags;
2154 2154
2155 local_irq_save(flags); 2155 local_irq_save(flags);
2156 sd = &__get_cpu_var(softnet_data); 2156 sd = this_cpu_ptr(&softnet_data);
2157 q->next_sched = NULL; 2157 q->next_sched = NULL;
2158 *sd->output_queue_tailp = q; 2158 *sd->output_queue_tailp = q;
2159 sd->output_queue_tailp = &q->next_sched; 2159 sd->output_queue_tailp = &q->next_sched;
@@ -3233,7 +3233,7 @@ static void rps_trigger_softirq(void *data)
3233static int rps_ipi_queued(struct softnet_data *sd) 3233static int rps_ipi_queued(struct softnet_data *sd)
3234{ 3234{
3235#ifdef CONFIG_RPS 3235#ifdef CONFIG_RPS
3236 struct softnet_data *mysd = &__get_cpu_var(softnet_data); 3236 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
3237 3237
3238 if (sd != mysd) { 3238 if (sd != mysd) {
3239 sd->rps_ipi_next = mysd->rps_ipi_list; 3239 sd->rps_ipi_next = mysd->rps_ipi_list;
@@ -3260,7 +3260,7 @@ static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3260 if (qlen < (netdev_max_backlog >> 1)) 3260 if (qlen < (netdev_max_backlog >> 1))
3261 return false; 3261 return false;
3262 3262
3263 sd = &__get_cpu_var(softnet_data); 3263 sd = this_cpu_ptr(&softnet_data);
3264 3264
3265 rcu_read_lock(); 3265 rcu_read_lock();
3266 fl = rcu_dereference(sd->flow_limit); 3266 fl = rcu_dereference(sd->flow_limit);
@@ -3407,7 +3407,7 @@ EXPORT_SYMBOL(netif_rx_ni);
3407 3407
3408static void net_tx_action(struct softirq_action *h) 3408static void net_tx_action(struct softirq_action *h)
3409{ 3409{
3410 struct softnet_data *sd = &__get_cpu_var(softnet_data); 3410 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
3411 3411
3412 if (sd->completion_queue) { 3412 if (sd->completion_queue) {
3413 struct sk_buff *clist; 3413 struct sk_buff *clist;
@@ -3832,7 +3832,7 @@ EXPORT_SYMBOL(netif_receive_skb);
3832static void flush_backlog(void *arg) 3832static void flush_backlog(void *arg)
3833{ 3833{
3834 struct net_device *dev = arg; 3834 struct net_device *dev = arg;
3835 struct softnet_data *sd = &__get_cpu_var(softnet_data); 3835 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
3836 struct sk_buff *skb, *tmp; 3836 struct sk_buff *skb, *tmp;
3837 3837
3838 rps_lock(sd); 3838 rps_lock(sd);
@@ -4379,7 +4379,7 @@ void __napi_schedule(struct napi_struct *n)
4379 unsigned long flags; 4379 unsigned long flags;
4380 4380
4381 local_irq_save(flags); 4381 local_irq_save(flags);
4382 ____napi_schedule(&__get_cpu_var(softnet_data), n); 4382 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
4383 local_irq_restore(flags); 4383 local_irq_restore(flags);
4384} 4384}
4385EXPORT_SYMBOL(__napi_schedule); 4385EXPORT_SYMBOL(__napi_schedule);
@@ -4500,7 +4500,7 @@ EXPORT_SYMBOL(netif_napi_del);
4500 4500
4501static void net_rx_action(struct softirq_action *h) 4501static void net_rx_action(struct softirq_action *h)
4502{ 4502{
4503 struct softnet_data *sd = &__get_cpu_var(softnet_data); 4503 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
4504 unsigned long time_limit = jiffies + 2; 4504 unsigned long time_limit = jiffies + 2;
4505 int budget = netdev_budget; 4505 int budget = netdev_budget;
4506 void *have; 4506 void *have;
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 50f9a9db5792..252e155c837b 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -146,7 +146,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
146 unsigned long flags; 146 unsigned long flags;
147 147
148 local_irq_save(flags); 148 local_irq_save(flags);
149 data = &__get_cpu_var(dm_cpu_data); 149 data = this_cpu_ptr(&dm_cpu_data);
150 spin_lock(&data->lock); 150 spin_lock(&data->lock);
151 dskb = data->skb; 151 dskb = data->skb;
152 152
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 829d013745ab..61059a05ec95 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -345,7 +345,7 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
345 unsigned long flags; 345 unsigned long flags;
346 346
347 local_irq_save(flags); 347 local_irq_save(flags);
348 nc = &__get_cpu_var(netdev_alloc_cache); 348 nc = this_cpu_ptr(&netdev_alloc_cache);
349 if (unlikely(!nc->frag.page)) { 349 if (unlikely(!nc->frag.page)) {
350refill: 350refill:
351 for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) { 351 for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) {
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 793c0bb8c4fd..2d4ae469b471 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1311,7 +1311,7 @@ static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
1311 if (rt_is_input_route(rt)) { 1311 if (rt_is_input_route(rt)) {
1312 p = (struct rtable **)&nh->nh_rth_input; 1312 p = (struct rtable **)&nh->nh_rth_input;
1313 } else { 1313 } else {
1314 p = (struct rtable **)__this_cpu_ptr(nh->nh_pcpu_rth_output); 1314 p = (struct rtable **)raw_cpu_ptr(nh->nh_pcpu_rth_output);
1315 } 1315 }
1316 orig = *p; 1316 orig = *p;
1317 1317
@@ -1939,7 +1939,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
1939 do_cache = false; 1939 do_cache = false;
1940 goto add; 1940 goto add;
1941 } 1941 }
1942 prth = __this_cpu_ptr(nh->nh_pcpu_rth_output); 1942 prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
1943 } 1943 }
1944 rth = rcu_dereference(*prth); 1944 rth = rcu_dereference(*prth);
1945 if (rt_cache_valid(rth)) { 1945 if (rt_cache_valid(rth)) {
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 0431a8f3c8f4..af660030e3c7 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -40,7 +40,7 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
40 40
41 net_get_random_once(syncookie_secret, sizeof(syncookie_secret)); 41 net_get_random_once(syncookie_secret, sizeof(syncookie_secret));
42 42
43 tmp = __get_cpu_var(ipv4_cookie_scratch); 43 tmp = this_cpu_ptr(ipv4_cookie_scratch);
44 memcpy(tmp + 4, syncookie_secret[c], sizeof(syncookie_secret[c])); 44 memcpy(tmp + 4, syncookie_secret[c], sizeof(syncookie_secret[c]));
45 tmp[0] = (__force u32)saddr; 45 tmp[0] = (__force u32)saddr;
46 tmp[1] = (__force u32)daddr; 46 tmp[1] = (__force u32)daddr;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 86023b9be47f..1bec4e76d88c 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2941,7 +2941,7 @@ struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
2941 local_bh_disable(); 2941 local_bh_disable();
2942 p = ACCESS_ONCE(tcp_md5sig_pool); 2942 p = ACCESS_ONCE(tcp_md5sig_pool);
2943 if (p) 2943 if (p)
2944 return __this_cpu_ptr(p); 2944 return raw_cpu_ptr(p);
2945 2945
2946 local_bh_enable(); 2946 local_bh_enable();
2947 return NULL; 2947 return NULL;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 8d4eac793700..becd98ce9a1c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -852,7 +852,7 @@ void tcp_wfree(struct sk_buff *skb)
852 852
853 /* queue this socket to tasklet queue */ 853 /* queue this socket to tasklet queue */
854 local_irq_save(flags); 854 local_irq_save(flags);
855 tsq = &__get_cpu_var(tsq_tasklet); 855 tsq = this_cpu_ptr(&tsq_tasklet);
856 list_add(&tp->tsq_node, &tsq->head); 856 list_add(&tp->tsq_node, &tsq->head);
857 tasklet_schedule(&tsq->tasklet); 857 tasklet_schedule(&tsq->tasklet);
858 local_irq_restore(flags); 858 local_irq_restore(flags);
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 9a2838e93cc5..e25b633266c3 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -67,7 +67,7 @@ static u32 cookie_hash(const struct in6_addr *saddr, const struct in6_addr *dadd
67 67
68 net_get_random_once(syncookie6_secret, sizeof(syncookie6_secret)); 68 net_get_random_once(syncookie6_secret, sizeof(syncookie6_secret));
69 69
70 tmp = __get_cpu_var(ipv6_cookie_scratch); 70 tmp = this_cpu_ptr(ipv6_cookie_scratch);
71 71
72 /* 72 /*
73 * we have 320 bits of information to hash, copy in the remaining 73 * we have 320 bits of information to hash, copy in the remaining
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index e8fdb172adbb..273b8bff6ba4 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -267,7 +267,7 @@ static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool)
267 unsigned long *flag; 267 unsigned long *flag;
268 268
269 preempt_disable(); 269 preempt_disable();
270 flag = &__get_cpu_var(clean_list_grace); 270 flag = this_cpu_ptr(&clean_list_grace);
271 set_bit(CLEAN_LIST_BUSY_BIT, flag); 271 set_bit(CLEAN_LIST_BUSY_BIT, flag);
272 ret = llist_del_first(&pool->clean_list); 272 ret = llist_del_first(&pool->clean_list);
273 if (ret) 273 if (ret)