diff options
Diffstat (limited to 'net/core/dev.c')
-rw-r--r-- | net/core/dev.c | 25 |
1 files changed, 9 insertions, 16 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 1796cef55ab5..aa82f9ab6a36 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1718,15 +1718,8 @@ EXPORT_SYMBOL_GPL(is_skb_forwardable); | |||
1718 | 1718 | ||
1719 | int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) | 1719 | int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) |
1720 | { | 1720 | { |
1721 | if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { | 1721 | if (skb_orphan_frags(skb, GFP_ATOMIC) || |
1722 | if (skb_copy_ubufs(skb, GFP_ATOMIC)) { | 1722 | unlikely(!is_skb_forwardable(dev, skb))) { |
1723 | atomic_long_inc(&dev->rx_dropped); | ||
1724 | kfree_skb(skb); | ||
1725 | return NET_RX_DROP; | ||
1726 | } | ||
1727 | } | ||
1728 | |||
1729 | if (unlikely(!is_skb_forwardable(dev, skb))) { | ||
1730 | atomic_long_inc(&dev->rx_dropped); | 1723 | atomic_long_inc(&dev->rx_dropped); |
1731 | kfree_skb(skb); | 1724 | kfree_skb(skb); |
1732 | return NET_RX_DROP; | 1725 | return NET_RX_DROP; |
@@ -3079,7 +3072,7 @@ static struct rps_dev_flow * | |||
3079 | set_rps_cpu(struct net_device *dev, struct sk_buff *skb, | 3072 | set_rps_cpu(struct net_device *dev, struct sk_buff *skb, |
3080 | struct rps_dev_flow *rflow, u16 next_cpu) | 3073 | struct rps_dev_flow *rflow, u16 next_cpu) |
3081 | { | 3074 | { |
3082 | if (next_cpu != RPS_NO_CPU) { | 3075 | if (next_cpu < nr_cpu_ids) { |
3083 | #ifdef CONFIG_RFS_ACCEL | 3076 | #ifdef CONFIG_RFS_ACCEL |
3084 | struct netdev_rx_queue *rxqueue; | 3077 | struct netdev_rx_queue *rxqueue; |
3085 | struct rps_dev_flow_table *flow_table; | 3078 | struct rps_dev_flow_table *flow_table; |
@@ -3184,7 +3177,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, | |||
3184 | * If the desired CPU (where last recvmsg was done) is | 3177 | * If the desired CPU (where last recvmsg was done) is |
3185 | * different from current CPU (one in the rx-queue flow | 3178 | * different from current CPU (one in the rx-queue flow |
3186 | * table entry), switch if one of the following holds: | 3179 | * table entry), switch if one of the following holds: |
3187 | * - Current CPU is unset (equal to RPS_NO_CPU). | 3180 | * - Current CPU is unset (>= nr_cpu_ids). |
3188 | * - Current CPU is offline. | 3181 | * - Current CPU is offline. |
3189 | * - The current CPU's queue tail has advanced beyond the | 3182 | * - The current CPU's queue tail has advanced beyond the |
3190 | * last packet that was enqueued using this table entry. | 3183 | * last packet that was enqueued using this table entry. |
@@ -3192,14 +3185,14 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, | |||
3192 | * have been dequeued, thus preserving in order delivery. | 3185 | * have been dequeued, thus preserving in order delivery. |
3193 | */ | 3186 | */ |
3194 | if (unlikely(tcpu != next_cpu) && | 3187 | if (unlikely(tcpu != next_cpu) && |
3195 | (tcpu == RPS_NO_CPU || !cpu_online(tcpu) || | 3188 | (tcpu >= nr_cpu_ids || !cpu_online(tcpu) || |
3196 | ((int)(per_cpu(softnet_data, tcpu).input_queue_head - | 3189 | ((int)(per_cpu(softnet_data, tcpu).input_queue_head - |
3197 | rflow->last_qtail)) >= 0)) { | 3190 | rflow->last_qtail)) >= 0)) { |
3198 | tcpu = next_cpu; | 3191 | tcpu = next_cpu; |
3199 | rflow = set_rps_cpu(dev, skb, rflow, next_cpu); | 3192 | rflow = set_rps_cpu(dev, skb, rflow, next_cpu); |
3200 | } | 3193 | } |
3201 | 3194 | ||
3202 | if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) { | 3195 | if (tcpu < nr_cpu_ids && cpu_online(tcpu)) { |
3203 | *rflowp = rflow; | 3196 | *rflowp = rflow; |
3204 | cpu = tcpu; | 3197 | cpu = tcpu; |
3205 | goto done; | 3198 | goto done; |
@@ -3240,14 +3233,14 @@ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, | |||
3240 | struct rps_dev_flow_table *flow_table; | 3233 | struct rps_dev_flow_table *flow_table; |
3241 | struct rps_dev_flow *rflow; | 3234 | struct rps_dev_flow *rflow; |
3242 | bool expire = true; | 3235 | bool expire = true; |
3243 | int cpu; | 3236 | unsigned int cpu; |
3244 | 3237 | ||
3245 | rcu_read_lock(); | 3238 | rcu_read_lock(); |
3246 | flow_table = rcu_dereference(rxqueue->rps_flow_table); | 3239 | flow_table = rcu_dereference(rxqueue->rps_flow_table); |
3247 | if (flow_table && flow_id <= flow_table->mask) { | 3240 | if (flow_table && flow_id <= flow_table->mask) { |
3248 | rflow = &flow_table->flows[flow_id]; | 3241 | rflow = &flow_table->flows[flow_id]; |
3249 | cpu = ACCESS_ONCE(rflow->cpu); | 3242 | cpu = ACCESS_ONCE(rflow->cpu); |
3250 | if (rflow->filter == filter_id && cpu != RPS_NO_CPU && | 3243 | if (rflow->filter == filter_id && cpu < nr_cpu_ids && |
3251 | ((int)(per_cpu(softnet_data, cpu).input_queue_head - | 3244 | ((int)(per_cpu(softnet_data, cpu).input_queue_head - |
3252 | rflow->last_qtail) < | 3245 | rflow->last_qtail) < |
3253 | (int)(10 * flow_table->mask))) | 3246 | (int)(10 * flow_table->mask))) |
@@ -5209,7 +5202,7 @@ static int __netdev_upper_dev_link(struct net_device *dev, | |||
5209 | if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper)) | 5202 | if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper)) |
5210 | return -EBUSY; | 5203 | return -EBUSY; |
5211 | 5204 | ||
5212 | if (__netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper)) | 5205 | if (__netdev_find_adj(dev, upper_dev, &dev->adj_list.upper)) |
5213 | return -EEXIST; | 5206 | return -EEXIST; |
5214 | 5207 | ||
5215 | if (master && netdev_master_upper_dev_get(dev)) | 5208 | if (master && netdev_master_upper_dev_get(dev)) |