diff options
author | David S. Miller <davem@davemloft.net> | 2018-05-07 23:35:08 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-05-07 23:35:08 -0400 |
commit | 01adc4851a8090b46c7a5ed9cfc4b97e65abfbf4 (patch) | |
tree | 2ae02593d7139962648dff203f3f9701e34ccbc3 /net/core/dev.c | |
parent | 18b338f5f9539512e76fd9ebd4c6ca1a0e159e2b (diff) | |
parent | e94fa1d93117e7f1eb783dc9cae6c70650944449 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Minor conflict, a CHECK was placed into an if() statement
in net-next, whilst a newline was added to that CHECK
call in 'net'. Thanks to Daniel for the merge resolution.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/dev.c')
-rw-r--r-- | net/core/dev.c | 73 |
1 files changed, 57 insertions, 16 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index bb81a6e1d354..29bf39174900 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -3627,6 +3627,44 @@ int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv) | |||
3627 | } | 3627 | } |
3628 | EXPORT_SYMBOL(dev_queue_xmit_accel); | 3628 | EXPORT_SYMBOL(dev_queue_xmit_accel); |
3629 | 3629 | ||
3630 | int dev_direct_xmit(struct sk_buff *skb, u16 queue_id) | ||
3631 | { | ||
3632 | struct net_device *dev = skb->dev; | ||
3633 | struct sk_buff *orig_skb = skb; | ||
3634 | struct netdev_queue *txq; | ||
3635 | int ret = NETDEV_TX_BUSY; | ||
3636 | bool again = false; | ||
3637 | |||
3638 | if (unlikely(!netif_running(dev) || | ||
3639 | !netif_carrier_ok(dev))) | ||
3640 | goto drop; | ||
3641 | |||
3642 | skb = validate_xmit_skb_list(skb, dev, &again); | ||
3643 | if (skb != orig_skb) | ||
3644 | goto drop; | ||
3645 | |||
3646 | skb_set_queue_mapping(skb, queue_id); | ||
3647 | txq = skb_get_tx_queue(dev, skb); | ||
3648 | |||
3649 | local_bh_disable(); | ||
3650 | |||
3651 | HARD_TX_LOCK(dev, txq, smp_processor_id()); | ||
3652 | if (!netif_xmit_frozen_or_drv_stopped(txq)) | ||
3653 | ret = netdev_start_xmit(skb, dev, txq, false); | ||
3654 | HARD_TX_UNLOCK(dev, txq); | ||
3655 | |||
3656 | local_bh_enable(); | ||
3657 | |||
3658 | if (!dev_xmit_complete(ret)) | ||
3659 | kfree_skb(skb); | ||
3660 | |||
3661 | return ret; | ||
3662 | drop: | ||
3663 | atomic_long_inc(&dev->tx_dropped); | ||
3664 | kfree_skb_list(skb); | ||
3665 | return NET_XMIT_DROP; | ||
3666 | } | ||
3667 | EXPORT_SYMBOL(dev_direct_xmit); | ||
3630 | 3668 | ||
3631 | /************************************************************************* | 3669 | /************************************************************************* |
3632 | * Receiver routines | 3670 | * Receiver routines |
@@ -3996,12 +4034,12 @@ static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb) | |||
3996 | } | 4034 | } |
3997 | 4035 | ||
3998 | static u32 netif_receive_generic_xdp(struct sk_buff *skb, | 4036 | static u32 netif_receive_generic_xdp(struct sk_buff *skb, |
4037 | struct xdp_buff *xdp, | ||
3999 | struct bpf_prog *xdp_prog) | 4038 | struct bpf_prog *xdp_prog) |
4000 | { | 4039 | { |
4001 | struct netdev_rx_queue *rxqueue; | 4040 | struct netdev_rx_queue *rxqueue; |
4002 | void *orig_data, *orig_data_end; | 4041 | void *orig_data, *orig_data_end; |
4003 | u32 metalen, act = XDP_DROP; | 4042 | u32 metalen, act = XDP_DROP; |
4004 | struct xdp_buff xdp; | ||
4005 | int hlen, off; | 4043 | int hlen, off; |
4006 | u32 mac_len; | 4044 | u32 mac_len; |
4007 | 4045 | ||
@@ -4036,19 +4074,19 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb, | |||
4036 | */ | 4074 | */ |
4037 | mac_len = skb->data - skb_mac_header(skb); | 4075 | mac_len = skb->data - skb_mac_header(skb); |
4038 | hlen = skb_headlen(skb) + mac_len; | 4076 | hlen = skb_headlen(skb) + mac_len; |
4039 | xdp.data = skb->data - mac_len; | 4077 | xdp->data = skb->data - mac_len; |
4040 | xdp.data_meta = xdp.data; | 4078 | xdp->data_meta = xdp->data; |
4041 | xdp.data_end = xdp.data + hlen; | 4079 | xdp->data_end = xdp->data + hlen; |
4042 | xdp.data_hard_start = skb->data - skb_headroom(skb); | 4080 | xdp->data_hard_start = skb->data - skb_headroom(skb); |
4043 | orig_data_end = xdp.data_end; | 4081 | orig_data_end = xdp->data_end; |
4044 | orig_data = xdp.data; | 4082 | orig_data = xdp->data; |
4045 | 4083 | ||
4046 | rxqueue = netif_get_rxqueue(skb); | 4084 | rxqueue = netif_get_rxqueue(skb); |
4047 | xdp.rxq = &rxqueue->xdp_rxq; | 4085 | xdp->rxq = &rxqueue->xdp_rxq; |
4048 | 4086 | ||
4049 | act = bpf_prog_run_xdp(xdp_prog, &xdp); | 4087 | act = bpf_prog_run_xdp(xdp_prog, xdp); |
4050 | 4088 | ||
4051 | off = xdp.data - orig_data; | 4089 | off = xdp->data - orig_data; |
4052 | if (off > 0) | 4090 | if (off > 0) |
4053 | __skb_pull(skb, off); | 4091 | __skb_pull(skb, off); |
4054 | else if (off < 0) | 4092 | else if (off < 0) |
@@ -4058,10 +4096,11 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb, | |||
4058 | /* check if bpf_xdp_adjust_tail was used. it can only "shrink" | 4096 | /* check if bpf_xdp_adjust_tail was used. it can only "shrink" |
4059 | * pckt. | 4097 | * pckt. |
4060 | */ | 4098 | */ |
4061 | off = orig_data_end - xdp.data_end; | 4099 | off = orig_data_end - xdp->data_end; |
4062 | if (off != 0) { | 4100 | if (off != 0) { |
4063 | skb_set_tail_pointer(skb, xdp.data_end - xdp.data); | 4101 | skb_set_tail_pointer(skb, xdp->data_end - xdp->data); |
4064 | skb->len -= off; | 4102 | skb->len -= off; |
4103 | |||
4065 | } | 4104 | } |
4066 | 4105 | ||
4067 | switch (act) { | 4106 | switch (act) { |
@@ -4070,7 +4109,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb, | |||
4070 | __skb_push(skb, mac_len); | 4109 | __skb_push(skb, mac_len); |
4071 | break; | 4110 | break; |
4072 | case XDP_PASS: | 4111 | case XDP_PASS: |
4073 | metalen = xdp.data - xdp.data_meta; | 4112 | metalen = xdp->data - xdp->data_meta; |
4074 | if (metalen) | 4113 | if (metalen) |
4075 | skb_metadata_set(skb, metalen); | 4114 | skb_metadata_set(skb, metalen); |
4076 | break; | 4115 | break; |
@@ -4120,17 +4159,19 @@ static struct static_key generic_xdp_needed __read_mostly; | |||
4120 | int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb) | 4159 | int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb) |
4121 | { | 4160 | { |
4122 | if (xdp_prog) { | 4161 | if (xdp_prog) { |
4123 | u32 act = netif_receive_generic_xdp(skb, xdp_prog); | 4162 | struct xdp_buff xdp; |
4163 | u32 act; | ||
4124 | int err; | 4164 | int err; |
4125 | 4165 | ||
4166 | act = netif_receive_generic_xdp(skb, &xdp, xdp_prog); | ||
4126 | if (act != XDP_PASS) { | 4167 | if (act != XDP_PASS) { |
4127 | switch (act) { | 4168 | switch (act) { |
4128 | case XDP_REDIRECT: | 4169 | case XDP_REDIRECT: |
4129 | err = xdp_do_generic_redirect(skb->dev, skb, | 4170 | err = xdp_do_generic_redirect(skb->dev, skb, |
4130 | xdp_prog); | 4171 | &xdp, xdp_prog); |
4131 | if (err) | 4172 | if (err) |
4132 | goto out_redir; | 4173 | goto out_redir; |
4133 | /* fallthru to submit skb */ | 4174 | break; |
4134 | case XDP_TX: | 4175 | case XDP_TX: |
4135 | generic_xdp_tx(skb, xdp_prog); | 4176 | generic_xdp_tx(skb, xdp_prog); |
4136 | break; | 4177 | break; |