aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorDavid Miller <davem@davemloft.net>2015-04-05 22:19:04 -0400
committerDavid S. Miller <davem@davemloft.net>2015-04-07 15:25:55 -0400
commit7026b1ddb6b8d4e6ee33dc2bd06c0ca8746fa7ab (patch)
tree3e11ed0f186ea6066a3f7efecb88d85bc732ee51 /net/core
parent1c984f8a5df085bcf35364a8a870bd4db4da4ed3 (diff)
netfilter: Pass socket pointer down through okfn().
On the output paths in particular, we have to sometimes deal with two socket contexts. First, and usually skb->sk, is the local socket that generated the frame. And second, is potentially the socket used to control a tunneling socket, such as one the encapsulates using UDP. We do not want to disassociate skb->sk when encapsulating in order to fix this, because that would break socket memory accounting. The most extreme case where this can cause huge problems is an AF_PACKET socket transmitting over a vxlan device. We hit code paths doing checks that assume they are dealing with an ipv4 socket, but are actually operating upon the AF_PACKET one. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 3b3965288f52..b2775f06c710 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2879,7 +2879,7 @@ EXPORT_SYMBOL(xmit_recursion);
2879 * dev_loopback_xmit - loop back @skb 2879 * dev_loopback_xmit - loop back @skb
2880 * @skb: buffer to transmit 2880 * @skb: buffer to transmit
2881 */ 2881 */
2882int dev_loopback_xmit(struct sk_buff *skb) 2882int dev_loopback_xmit(struct sock *sk, struct sk_buff *skb)
2883{ 2883{
2884 skb_reset_mac_header(skb); 2884 skb_reset_mac_header(skb);
2885 __skb_pull(skb, skb_network_offset(skb)); 2885 __skb_pull(skb, skb_network_offset(skb));
@@ -3017,11 +3017,11 @@ out:
3017 return rc; 3017 return rc;
3018} 3018}
3019 3019
3020int dev_queue_xmit(struct sk_buff *skb) 3020int dev_queue_xmit_sk(struct sock *sk, struct sk_buff *skb)
3021{ 3021{
3022 return __dev_queue_xmit(skb, NULL); 3022 return __dev_queue_xmit(skb, NULL);
3023} 3023}
3024EXPORT_SYMBOL(dev_queue_xmit); 3024EXPORT_SYMBOL(dev_queue_xmit_sk);
3025 3025
3026int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv) 3026int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
3027{ 3027{
@@ -3853,13 +3853,13 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
3853 * NET_RX_SUCCESS: no congestion 3853 * NET_RX_SUCCESS: no congestion
3854 * NET_RX_DROP: packet was dropped 3854 * NET_RX_DROP: packet was dropped
3855 */ 3855 */
3856int netif_receive_skb(struct sk_buff *skb) 3856int netif_receive_skb_sk(struct sock *sk, struct sk_buff *skb)
3857{ 3857{
3858 trace_netif_receive_skb_entry(skb); 3858 trace_netif_receive_skb_entry(skb);
3859 3859
3860 return netif_receive_skb_internal(skb); 3860 return netif_receive_skb_internal(skb);
3861} 3861}
3862EXPORT_SYMBOL(netif_receive_skb); 3862EXPORT_SYMBOL(netif_receive_skb_sk);
3863 3863
3864/* Network device is going away, flush any packets still pending 3864/* Network device is going away, flush any packets still pending
3865 * Called with irqs disabled. 3865 * Called with irqs disabled.