diff options
Diffstat (limited to 'net/core/dev.c')
| -rw-r--r-- | net/core/dev.c | 61 |
1 files changed, 55 insertions, 6 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 5156c0edebe8..a9775d676285 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -3963,6 +3963,8 @@ int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */ | |||
| 3963 | int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */ | 3963 | int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */ |
| 3964 | int dev_rx_weight __read_mostly = 64; | 3964 | int dev_rx_weight __read_mostly = 64; |
| 3965 | int dev_tx_weight __read_mostly = 64; | 3965 | int dev_tx_weight __read_mostly = 64; |
| 3966 | /* Maximum number of GRO_NORMAL skbs to batch up for list-RX */ | ||
| 3967 | int gro_normal_batch __read_mostly = 8; | ||
| 3966 | 3968 | ||
| 3967 | /* Called with irq disabled */ | 3969 | /* Called with irq disabled */ |
| 3968 | static inline void ____napi_schedule(struct softnet_data *sd, | 3970 | static inline void ____napi_schedule(struct softnet_data *sd, |
| @@ -5486,7 +5488,7 @@ static void gro_pull_from_frag0(struct sk_buff *skb, int grow) | |||
| 5486 | skb->data_len -= grow; | 5488 | skb->data_len -= grow; |
| 5487 | skb->tail += grow; | 5489 | skb->tail += grow; |
| 5488 | 5490 | ||
| 5489 | pinfo->frags[0].page_offset += grow; | 5491 | skb_frag_off_add(&pinfo->frags[0], grow); |
| 5490 | skb_frag_size_sub(&pinfo->frags[0], grow); | 5492 | skb_frag_size_sub(&pinfo->frags[0], grow); |
| 5491 | 5493 | ||
| 5492 | if (unlikely(!skb_frag_size(&pinfo->frags[0]))) { | 5494 | if (unlikely(!skb_frag_size(&pinfo->frags[0]))) { |
| @@ -5747,6 +5749,26 @@ struct sk_buff *napi_get_frags(struct napi_struct *napi) | |||
| 5747 | } | 5749 | } |
| 5748 | EXPORT_SYMBOL(napi_get_frags); | 5750 | EXPORT_SYMBOL(napi_get_frags); |
| 5749 | 5751 | ||
| 5752 | /* Pass the currently batched GRO_NORMAL SKBs up to the stack. */ | ||
| 5753 | static void gro_normal_list(struct napi_struct *napi) | ||
| 5754 | { | ||
| 5755 | if (!napi->rx_count) | ||
| 5756 | return; | ||
| 5757 | netif_receive_skb_list_internal(&napi->rx_list); | ||
| 5758 | INIT_LIST_HEAD(&napi->rx_list); | ||
| 5759 | napi->rx_count = 0; | ||
| 5760 | } | ||
| 5761 | |||
| 5762 | /* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded, | ||
| 5763 | * pass the whole batch up to the stack. | ||
| 5764 | */ | ||
| 5765 | static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb) | ||
| 5766 | { | ||
| 5767 | list_add_tail(&skb->list, &napi->rx_list); | ||
| 5768 | if (++napi->rx_count >= gro_normal_batch) | ||
| 5769 | gro_normal_list(napi); | ||
| 5770 | } | ||
| 5771 | |||
| 5750 | static gro_result_t napi_frags_finish(struct napi_struct *napi, | 5772 | static gro_result_t napi_frags_finish(struct napi_struct *napi, |
| 5751 | struct sk_buff *skb, | 5773 | struct sk_buff *skb, |
| 5752 | gro_result_t ret) | 5774 | gro_result_t ret) |
| @@ -5756,8 +5778,8 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi, | |||
| 5756 | case GRO_HELD: | 5778 | case GRO_HELD: |
| 5757 | __skb_push(skb, ETH_HLEN); | 5779 | __skb_push(skb, ETH_HLEN); |
| 5758 | skb->protocol = eth_type_trans(skb, skb->dev); | 5780 | skb->protocol = eth_type_trans(skb, skb->dev); |
| 5759 | if (ret == GRO_NORMAL && netif_receive_skb_internal(skb)) | 5781 | if (ret == GRO_NORMAL) |
| 5760 | ret = GRO_DROP; | 5782 | gro_normal_one(napi, skb); |
| 5761 | break; | 5783 | break; |
| 5762 | 5784 | ||
| 5763 | case GRO_DROP: | 5785 | case GRO_DROP: |
| @@ -6034,6 +6056,8 @@ bool napi_complete_done(struct napi_struct *n, int work_done) | |||
| 6034 | NAPIF_STATE_IN_BUSY_POLL))) | 6056 | NAPIF_STATE_IN_BUSY_POLL))) |
| 6035 | return false; | 6057 | return false; |
| 6036 | 6058 | ||
| 6059 | gro_normal_list(n); | ||
| 6060 | |||
| 6037 | if (n->gro_bitmask) { | 6061 | if (n->gro_bitmask) { |
| 6038 | unsigned long timeout = 0; | 6062 | unsigned long timeout = 0; |
| 6039 | 6063 | ||
| @@ -6119,10 +6143,19 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock) | |||
| 6119 | * Ideally, a new ndo_busy_poll_stop() could avoid another round. | 6143 | * Ideally, a new ndo_busy_poll_stop() could avoid another round. |
| 6120 | */ | 6144 | */ |
| 6121 | rc = napi->poll(napi, BUSY_POLL_BUDGET); | 6145 | rc = napi->poll(napi, BUSY_POLL_BUDGET); |
| 6146 | /* We can't gro_normal_list() here, because napi->poll() might have | ||
| 6147 | * rearmed the napi (napi_complete_done()) in which case it could | ||
| 6148 | * already be running on another CPU. | ||
| 6149 | */ | ||
| 6122 | trace_napi_poll(napi, rc, BUSY_POLL_BUDGET); | 6150 | trace_napi_poll(napi, rc, BUSY_POLL_BUDGET); |
| 6123 | netpoll_poll_unlock(have_poll_lock); | 6151 | netpoll_poll_unlock(have_poll_lock); |
| 6124 | if (rc == BUSY_POLL_BUDGET) | 6152 | if (rc == BUSY_POLL_BUDGET) { |
| 6153 | /* As the whole budget was spent, we still own the napi so can | ||
| 6154 | * safely handle the rx_list. | ||
| 6155 | */ | ||
| 6156 | gro_normal_list(napi); | ||
| 6125 | __napi_schedule(napi); | 6157 | __napi_schedule(napi); |
| 6158 | } | ||
| 6126 | local_bh_enable(); | 6159 | local_bh_enable(); |
| 6127 | } | 6160 | } |
| 6128 | 6161 | ||
| @@ -6167,6 +6200,7 @@ restart: | |||
| 6167 | } | 6200 | } |
| 6168 | work = napi_poll(napi, BUSY_POLL_BUDGET); | 6201 | work = napi_poll(napi, BUSY_POLL_BUDGET); |
| 6169 | trace_napi_poll(napi, work, BUSY_POLL_BUDGET); | 6202 | trace_napi_poll(napi, work, BUSY_POLL_BUDGET); |
| 6203 | gro_normal_list(napi); | ||
| 6170 | count: | 6204 | count: |
| 6171 | if (work > 0) | 6205 | if (work > 0) |
| 6172 | __NET_ADD_STATS(dev_net(napi->dev), | 6206 | __NET_ADD_STATS(dev_net(napi->dev), |
| @@ -6272,6 +6306,8 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi, | |||
| 6272 | napi->timer.function = napi_watchdog; | 6306 | napi->timer.function = napi_watchdog; |
| 6273 | init_gro_hash(napi); | 6307 | init_gro_hash(napi); |
| 6274 | napi->skb = NULL; | 6308 | napi->skb = NULL; |
| 6309 | INIT_LIST_HEAD(&napi->rx_list); | ||
| 6310 | napi->rx_count = 0; | ||
| 6275 | napi->poll = poll; | 6311 | napi->poll = poll; |
| 6276 | if (weight > NAPI_POLL_WEIGHT) | 6312 | if (weight > NAPI_POLL_WEIGHT) |
| 6277 | netdev_err_once(dev, "%s() called with weight %d\n", __func__, | 6313 | netdev_err_once(dev, "%s() called with weight %d\n", __func__, |
| @@ -6368,6 +6404,8 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll) | |||
| 6368 | goto out_unlock; | 6404 | goto out_unlock; |
| 6369 | } | 6405 | } |
| 6370 | 6406 | ||
| 6407 | gro_normal_list(n); | ||
| 6408 | |||
| 6371 | if (n->gro_bitmask) { | 6409 | if (n->gro_bitmask) { |
| 6372 | /* flush too old packets | 6410 | /* flush too old packets |
| 6373 | * If HZ < 1000, flush all packets. | 6411 | * If HZ < 1000, flush all packets. |
| @@ -8088,12 +8126,15 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, | |||
| 8088 | bpf_chk = generic_xdp_install; | 8126 | bpf_chk = generic_xdp_install; |
| 8089 | 8127 | ||
| 8090 | if (fd >= 0) { | 8128 | if (fd >= 0) { |
| 8129 | u32 prog_id; | ||
| 8130 | |||
| 8091 | if (!offload && __dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG)) { | 8131 | if (!offload && __dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG)) { |
| 8092 | NL_SET_ERR_MSG(extack, "native and generic XDP can't be active at the same time"); | 8132 | NL_SET_ERR_MSG(extack, "native and generic XDP can't be active at the same time"); |
| 8093 | return -EEXIST; | 8133 | return -EEXIST; |
| 8094 | } | 8134 | } |
| 8095 | if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && | 8135 | |
| 8096 | __dev_xdp_query(dev, bpf_op, query)) { | 8136 | prog_id = __dev_xdp_query(dev, bpf_op, query); |
| 8137 | if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && prog_id) { | ||
| 8097 | NL_SET_ERR_MSG(extack, "XDP program already attached"); | 8138 | NL_SET_ERR_MSG(extack, "XDP program already attached"); |
| 8098 | return -EBUSY; | 8139 | return -EBUSY; |
| 8099 | } | 8140 | } |
| @@ -8108,6 +8149,14 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, | |||
| 8108 | bpf_prog_put(prog); | 8149 | bpf_prog_put(prog); |
| 8109 | return -EINVAL; | 8150 | return -EINVAL; |
| 8110 | } | 8151 | } |
| 8152 | |||
| 8153 | if (prog->aux->id == prog_id) { | ||
| 8154 | bpf_prog_put(prog); | ||
| 8155 | return 0; | ||
| 8156 | } | ||
| 8157 | } else { | ||
| 8158 | if (!__dev_xdp_query(dev, bpf_op, query)) | ||
| 8159 | return 0; | ||
| 8111 | } | 8160 | } |
| 8112 | 8161 | ||
| 8113 | err = dev_xdp_install(dev, bpf_op, extack, flags, prog); | 8162 | err = dev_xdp_install(dev, bpf_op, extack, flags, prog); |
