aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 4699dcfdc4ab..b793e3521a36 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2153,7 +2153,7 @@ static inline void __netif_reschedule(struct Qdisc *q)
2153 unsigned long flags; 2153 unsigned long flags;
2154 2154
2155 local_irq_save(flags); 2155 local_irq_save(flags);
2156 sd = &__get_cpu_var(softnet_data); 2156 sd = this_cpu_ptr(&softnet_data);
2157 q->next_sched = NULL; 2157 q->next_sched = NULL;
2158 *sd->output_queue_tailp = q; 2158 *sd->output_queue_tailp = q;
2159 sd->output_queue_tailp = &q->next_sched; 2159 sd->output_queue_tailp = &q->next_sched;
@@ -2675,7 +2675,7 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
2675 if (skb->encapsulation) 2675 if (skb->encapsulation)
2676 features &= dev->hw_enc_features; 2676 features &= dev->hw_enc_features;
2677 2677
2678 if (netif_needs_gso(skb, features)) { 2678 if (netif_needs_gso(dev, skb, features)) {
2679 struct sk_buff *segs; 2679 struct sk_buff *segs;
2680 2680
2681 segs = skb_gso_segment(skb, features); 2681 segs = skb_gso_segment(skb, features);
@@ -3233,7 +3233,7 @@ static void rps_trigger_softirq(void *data)
3233static int rps_ipi_queued(struct softnet_data *sd) 3233static int rps_ipi_queued(struct softnet_data *sd)
3234{ 3234{
3235#ifdef CONFIG_RPS 3235#ifdef CONFIG_RPS
3236 struct softnet_data *mysd = &__get_cpu_var(softnet_data); 3236 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
3237 3237
3238 if (sd != mysd) { 3238 if (sd != mysd) {
3239 sd->rps_ipi_next = mysd->rps_ipi_list; 3239 sd->rps_ipi_next = mysd->rps_ipi_list;
@@ -3260,7 +3260,7 @@ static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3260 if (qlen < (netdev_max_backlog >> 1)) 3260 if (qlen < (netdev_max_backlog >> 1))
3261 return false; 3261 return false;
3262 3262
3263 sd = &__get_cpu_var(softnet_data); 3263 sd = this_cpu_ptr(&softnet_data);
3264 3264
3265 rcu_read_lock(); 3265 rcu_read_lock();
3266 fl = rcu_dereference(sd->flow_limit); 3266 fl = rcu_dereference(sd->flow_limit);
@@ -3407,7 +3407,7 @@ EXPORT_SYMBOL(netif_rx_ni);
3407 3407
3408static void net_tx_action(struct softirq_action *h) 3408static void net_tx_action(struct softirq_action *h)
3409{ 3409{
3410 struct softnet_data *sd = &__get_cpu_var(softnet_data); 3410 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
3411 3411
3412 if (sd->completion_queue) { 3412 if (sd->completion_queue) {
3413 struct sk_buff *clist; 3413 struct sk_buff *clist;
@@ -3832,7 +3832,7 @@ EXPORT_SYMBOL(netif_receive_skb);
3832static void flush_backlog(void *arg) 3832static void flush_backlog(void *arg)
3833{ 3833{
3834 struct net_device *dev = arg; 3834 struct net_device *dev = arg;
3835 struct softnet_data *sd = &__get_cpu_var(softnet_data); 3835 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
3836 struct sk_buff *skb, *tmp; 3836 struct sk_buff *skb, *tmp;
3837 3837
3838 rps_lock(sd); 3838 rps_lock(sd);
@@ -4379,7 +4379,7 @@ void __napi_schedule(struct napi_struct *n)
4379 unsigned long flags; 4379 unsigned long flags;
4380 4380
4381 local_irq_save(flags); 4381 local_irq_save(flags);
4382 ____napi_schedule(&__get_cpu_var(softnet_data), n); 4382 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
4383 local_irq_restore(flags); 4383 local_irq_restore(flags);
4384} 4384}
4385EXPORT_SYMBOL(__napi_schedule); 4385EXPORT_SYMBOL(__napi_schedule);
@@ -4500,7 +4500,7 @@ EXPORT_SYMBOL(netif_napi_del);
4500 4500
4501static void net_rx_action(struct softirq_action *h) 4501static void net_rx_action(struct softirq_action *h)
4502{ 4502{
4503 struct softnet_data *sd = &__get_cpu_var(softnet_data); 4503 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
4504 unsigned long time_limit = jiffies + 2; 4504 unsigned long time_limit = jiffies + 2;
4505 int budget = netdev_budget; 4505 int budget = netdev_budget;
4506 void *have; 4506 void *have;