diff options
author | David S. Miller <davem@davemloft.net> | 2015-03-09 15:58:21 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-03-09 15:58:21 -0400 |
commit | 5428aef81157768f1052b116e0cc8abf88ff3e36 (patch) | |
tree | a4fe8e39c5986b59ae50d3a6fefe46c309e2444e /net/bridge | |
parent | 26c459a8072f2bb0680081205376e1371c114b12 (diff) | |
parent | e5de75bf88858f5b3ab11e2504b86ec059f03102 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next
Pablo Neira Ayuso says:
====================
Netfilter updates for net-next
The following patchset contains Netfilter updates for your net-next
tree. Basically, improvements for the packet rejection infrastructure,
deprecation of CLUSTERIP, cleanups for nf_tables and some untangling for
br_netfilter. More specifically they are:
1) Send packet to reset flow if checksum is valid, from Florian Westphal.
2) Fix nf_tables reject bridge from the input chain, also from Florian.
3) Deprecate the CLUSTERIP target, the cluster match supersedes it in
functionality and it's known to have problems.
4) A couple of cleanups for nf_tables rule tracing infrastructure, from
Patrick McHardy.
5) Another cleanup to place transaction declarations at the bottom of
nf_tables.h, also from Patrick.
6) Consolidate Kconfig dependencies wrt. NF_TABLES.
7) Limit table names to 32 bytes in nf_tables.
8) mac header copying in bridge netfilter is already required when
calling ip_fragment(), from Florian Westphal.
9) move nf_bridge_update_protocol() to br_netfilter.c, also from
Florian.
10) Small refactor in br_netfilter in the transmission path, again from
Florian.
11) Move br_nf_pre_routing_finish_bridge_slow() to br_netfilter.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/bridge')
-rw-r--r-- | net/bridge/br_device.c | 5 | ||||
-rw-r--r-- | net/bridge/br_forward.c | 4 | ||||
-rw-r--r-- | net/bridge/br_netfilter.c | 78 | ||||
-rw-r--r-- | net/bridge/br_private.h | 5 | ||||
-rw-r--r-- | net/bridge/netfilter/nft_reject_bridge.c | 84 |
5 files changed, 147 insertions, 29 deletions
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index ffd379db5938..294cbcc49263 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c | |||
@@ -36,13 +36,10 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) | |||
36 | u16 vid = 0; | 36 | u16 vid = 0; |
37 | 37 | ||
38 | rcu_read_lock(); | 38 | rcu_read_lock(); |
39 | #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) | 39 | if (br_nf_prerouting_finish_bridge(skb)) { |
40 | if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) { | ||
41 | br_nf_pre_routing_finish_bridge_slow(skb); | ||
42 | rcu_read_unlock(); | 40 | rcu_read_unlock(); |
43 | return NETDEV_TX_OK; | 41 | return NETDEV_TX_OK; |
44 | } | 42 | } |
45 | #endif | ||
46 | 43 | ||
47 | u64_stats_update_begin(&brstats->syncp); | 44 | u64_stats_update_begin(&brstats->syncp); |
48 | brstats->tx_packets++; | 45 | brstats->tx_packets++; |
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 1238fabff874..3304a5442331 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c | |||
@@ -37,9 +37,7 @@ static inline int should_deliver(const struct net_bridge_port *p, | |||
37 | 37 | ||
38 | int br_dev_queue_push_xmit(struct sk_buff *skb) | 38 | int br_dev_queue_push_xmit(struct sk_buff *skb) |
39 | { | 39 | { |
40 | /* ip_fragment doesn't copy the MAC header */ | 40 | if (!is_skb_forwardable(skb->dev, skb)) { |
41 | if (nf_bridge_maybe_copy_header(skb) || | ||
42 | !is_skb_forwardable(skb->dev, skb)) { | ||
43 | kfree_skb(skb); | 41 | kfree_skb(skb); |
44 | } else { | 42 | } else { |
45 | skb_push(skb, ETH_HLEN); | 43 | skb_push(skb, ETH_HLEN); |
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 0ee453fad3de..a8361c7cdf81 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
@@ -239,6 +239,14 @@ drop: | |||
239 | return -1; | 239 | return -1; |
240 | } | 240 | } |
241 | 241 | ||
242 | static void nf_bridge_update_protocol(struct sk_buff *skb) | ||
243 | { | ||
244 | if (skb->nf_bridge->mask & BRNF_8021Q) | ||
245 | skb->protocol = htons(ETH_P_8021Q); | ||
246 | else if (skb->nf_bridge->mask & BRNF_PPPoE) | ||
247 | skb->protocol = htons(ETH_P_PPP_SES); | ||
248 | } | ||
249 | |||
242 | /* PF_BRIDGE/PRE_ROUTING *********************************************/ | 250 | /* PF_BRIDGE/PRE_ROUTING *********************************************/ |
243 | /* Undo the changes made for ip6tables PREROUTING and continue the | 251 | /* Undo the changes made for ip6tables PREROUTING and continue the |
244 | * bridge PRE_ROUTING hook. */ | 252 | * bridge PRE_ROUTING hook. */ |
@@ -764,23 +772,53 @@ static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops, | |||
764 | } | 772 | } |
765 | 773 | ||
766 | #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) | 774 | #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) |
775 | static bool nf_bridge_copy_header(struct sk_buff *skb) | ||
776 | { | ||
777 | int err; | ||
778 | unsigned int header_size; | ||
779 | |||
780 | nf_bridge_update_protocol(skb); | ||
781 | header_size = ETH_HLEN + nf_bridge_encap_header_len(skb); | ||
782 | err = skb_cow_head(skb, header_size); | ||
783 | if (err) | ||
784 | return false; | ||
785 | |||
786 | skb_copy_to_linear_data_offset(skb, -header_size, | ||
787 | skb->nf_bridge->data, header_size); | ||
788 | __skb_push(skb, nf_bridge_encap_header_len(skb)); | ||
789 | return true; | ||
790 | } | ||
791 | |||
792 | static int br_nf_push_frag_xmit(struct sk_buff *skb) | ||
793 | { | ||
794 | if (!nf_bridge_copy_header(skb)) { | ||
795 | kfree_skb(skb); | ||
796 | return 0; | ||
797 | } | ||
798 | |||
799 | return br_dev_queue_push_xmit(skb); | ||
800 | } | ||
801 | |||
767 | static int br_nf_dev_queue_xmit(struct sk_buff *skb) | 802 | static int br_nf_dev_queue_xmit(struct sk_buff *skb) |
768 | { | 803 | { |
769 | int ret; | 804 | int ret; |
770 | int frag_max_size; | 805 | int frag_max_size; |
806 | unsigned int mtu_reserved; | ||
771 | 807 | ||
808 | if (skb_is_gso(skb) || skb->protocol != htons(ETH_P_IP)) | ||
809 | return br_dev_queue_push_xmit(skb); | ||
810 | |||
811 | mtu_reserved = nf_bridge_mtu_reduction(skb); | ||
772 | /* This is wrong! We should preserve the original fragment | 812 | /* This is wrong! We should preserve the original fragment |
773 | * boundaries by preserving frag_list rather than refragmenting. | 813 | * boundaries by preserving frag_list rather than refragmenting. |
774 | */ | 814 | */ |
775 | if (skb->protocol == htons(ETH_P_IP) && | 815 | if (skb->len + mtu_reserved > skb->dev->mtu) { |
776 | skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu && | ||
777 | !skb_is_gso(skb)) { | ||
778 | frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size; | 816 | frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size; |
779 | if (br_parse_ip_options(skb)) | 817 | if (br_parse_ip_options(skb)) |
780 | /* Drop invalid packet */ | 818 | /* Drop invalid packet */ |
781 | return NF_DROP; | 819 | return NF_DROP; |
782 | IPCB(skb)->frag_max_size = frag_max_size; | 820 | IPCB(skb)->frag_max_size = frag_max_size; |
783 | ret = ip_fragment(skb, br_dev_queue_push_xmit); | 821 | ret = ip_fragment(skb, br_nf_push_frag_xmit); |
784 | } else | 822 | } else |
785 | ret = br_dev_queue_push_xmit(skb); | 823 | ret = br_dev_queue_push_xmit(skb); |
786 | 824 | ||
@@ -854,6 +892,38 @@ static unsigned int ip_sabotage_in(const struct nf_hook_ops *ops, | |||
854 | return NF_ACCEPT; | 892 | return NF_ACCEPT; |
855 | } | 893 | } |
856 | 894 | ||
895 | /* This is called when br_netfilter has called into iptables/netfilter, | ||
896 | * and DNAT has taken place on a bridge-forwarded packet. | ||
897 | * | ||
898 | * neigh->output has created a new MAC header, with local br0 MAC | ||
899 | * as saddr. | ||
900 | * | ||
901 | * This restores the original MAC saddr of the bridged packet | ||
902 | * before invoking bridge forward logic to transmit the packet. | ||
903 | */ | ||
904 | static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb) | ||
905 | { | ||
906 | struct nf_bridge_info *nf_bridge = skb->nf_bridge; | ||
907 | |||
908 | skb_pull(skb, ETH_HLEN); | ||
909 | nf_bridge->mask &= ~BRNF_BRIDGED_DNAT; | ||
910 | |||
911 | skb_copy_to_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN), | ||
912 | skb->nf_bridge->data, ETH_HLEN-ETH_ALEN); | ||
913 | skb->dev = nf_bridge->physindev; | ||
914 | br_handle_frame_finish(skb); | ||
915 | } | ||
916 | |||
917 | int br_nf_prerouting_finish_bridge(struct sk_buff *skb) | ||
918 | { | ||
919 | if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) { | ||
920 | br_nf_pre_routing_finish_bridge_slow(skb); | ||
921 | return 1; | ||
922 | } | ||
923 | return 0; | ||
924 | } | ||
925 | EXPORT_SYMBOL_GPL(br_nf_prerouting_finish_bridge); | ||
926 | |||
857 | void br_netfilter_enable(void) | 927 | void br_netfilter_enable(void) |
858 | { | 928 | { |
859 | } | 929 | } |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index c32e279c62f8..f0a0438dbd6d 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -765,10 +765,15 @@ static inline int br_vlan_enabled(struct net_bridge *br) | |||
765 | 765 | ||
766 | /* br_netfilter.c */ | 766 | /* br_netfilter.c */ |
767 | #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) | 767 | #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) |
768 | int br_nf_prerouting_finish_bridge(struct sk_buff *skb); | ||
768 | int br_nf_core_init(void); | 769 | int br_nf_core_init(void); |
769 | void br_nf_core_fini(void); | 770 | void br_nf_core_fini(void); |
770 | void br_netfilter_rtable_init(struct net_bridge *); | 771 | void br_netfilter_rtable_init(struct net_bridge *); |
771 | #else | 772 | #else |
773 | static inline int br_nf_prerouting_finish_bridge(struct sk_buff *skb) | ||
774 | { | ||
775 | return 0; | ||
776 | } | ||
772 | static inline int br_nf_core_init(void) { return 0; } | 777 | static inline int br_nf_core_init(void) { return 0; } |
773 | static inline void br_nf_core_fini(void) {} | 778 | static inline void br_nf_core_fini(void) {} |
774 | #define br_netfilter_rtable_init(x) | 779 | #define br_netfilter_rtable_init(x) |
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c index 3244aead0926..5c6c96585acd 100644 --- a/net/bridge/netfilter/nft_reject_bridge.c +++ b/net/bridge/netfilter/nft_reject_bridge.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <net/ip.h> | 21 | #include <net/ip.h> |
22 | #include <net/ip6_checksum.h> | 22 | #include <net/ip6_checksum.h> |
23 | #include <linux/netfilter_bridge.h> | 23 | #include <linux/netfilter_bridge.h> |
24 | #include <linux/netfilter_ipv6.h> | ||
24 | #include "../br_private.h" | 25 | #include "../br_private.h" |
25 | 26 | ||
26 | static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb, | 27 | static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb, |
@@ -36,7 +37,12 @@ static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb, | |||
36 | skb_pull(nskb, ETH_HLEN); | 37 | skb_pull(nskb, ETH_HLEN); |
37 | } | 38 | } |
38 | 39 | ||
39 | static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, int hook) | 40 | /* We cannot use oldskb->dev, it can be either bridge device (NF_BRIDGE INPUT) |
41 | * or the bridge port (NF_BRIDGE PREROUTING). | ||
42 | */ | ||
43 | static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, | ||
44 | const struct net_device *dev, | ||
45 | int hook) | ||
40 | { | 46 | { |
41 | struct sk_buff *nskb; | 47 | struct sk_buff *nskb; |
42 | struct iphdr *niph; | 48 | struct iphdr *niph; |
@@ -65,11 +71,12 @@ static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, int hook) | |||
65 | 71 | ||
66 | nft_reject_br_push_etherhdr(oldskb, nskb); | 72 | nft_reject_br_push_etherhdr(oldskb, nskb); |
67 | 73 | ||
68 | br_deliver(br_port_get_rcu(oldskb->dev), nskb); | 74 | br_deliver(br_port_get_rcu(dev), nskb); |
69 | } | 75 | } |
70 | 76 | ||
71 | static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook, | 77 | static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, |
72 | u8 code) | 78 | const struct net_device *dev, |
79 | int hook, u8 code) | ||
73 | { | 80 | { |
74 | struct sk_buff *nskb; | 81 | struct sk_buff *nskb; |
75 | struct iphdr *niph; | 82 | struct iphdr *niph; |
@@ -77,8 +84,9 @@ static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook, | |||
77 | unsigned int len; | 84 | unsigned int len; |
78 | void *payload; | 85 | void *payload; |
79 | __wsum csum; | 86 | __wsum csum; |
87 | u8 proto; | ||
80 | 88 | ||
81 | if (!nft_bridge_iphdr_validate(oldskb)) | 89 | if (oldskb->csum_bad || !nft_bridge_iphdr_validate(oldskb)) |
82 | return; | 90 | return; |
83 | 91 | ||
84 | /* IP header checks: fragment. */ | 92 | /* IP header checks: fragment. */ |
@@ -91,7 +99,17 @@ static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook, | |||
91 | if (!pskb_may_pull(oldskb, len)) | 99 | if (!pskb_may_pull(oldskb, len)) |
92 | return; | 100 | return; |
93 | 101 | ||
94 | if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), 0)) | 102 | if (pskb_trim_rcsum(oldskb, htons(ip_hdr(oldskb)->tot_len))) |
103 | return; | ||
104 | |||
105 | if (ip_hdr(oldskb)->protocol == IPPROTO_TCP || | ||
106 | ip_hdr(oldskb)->protocol == IPPROTO_UDP) | ||
107 | proto = ip_hdr(oldskb)->protocol; | ||
108 | else | ||
109 | proto = 0; | ||
110 | |||
111 | if (!skb_csum_unnecessary(oldskb) && | ||
112 | nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), proto)) | ||
95 | return; | 113 | return; |
96 | 114 | ||
97 | nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) + | 115 | nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) + |
@@ -120,11 +138,13 @@ static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook, | |||
120 | 138 | ||
121 | nft_reject_br_push_etherhdr(oldskb, nskb); | 139 | nft_reject_br_push_etherhdr(oldskb, nskb); |
122 | 140 | ||
123 | br_deliver(br_port_get_rcu(oldskb->dev), nskb); | 141 | br_deliver(br_port_get_rcu(dev), nskb); |
124 | } | 142 | } |
125 | 143 | ||
126 | static void nft_reject_br_send_v6_tcp_reset(struct net *net, | 144 | static void nft_reject_br_send_v6_tcp_reset(struct net *net, |
127 | struct sk_buff *oldskb, int hook) | 145 | struct sk_buff *oldskb, |
146 | const struct net_device *dev, | ||
147 | int hook) | ||
128 | { | 148 | { |
129 | struct sk_buff *nskb; | 149 | struct sk_buff *nskb; |
130 | const struct tcphdr *oth; | 150 | const struct tcphdr *oth; |
@@ -152,12 +172,37 @@ static void nft_reject_br_send_v6_tcp_reset(struct net *net, | |||
152 | 172 | ||
153 | nft_reject_br_push_etherhdr(oldskb, nskb); | 173 | nft_reject_br_push_etherhdr(oldskb, nskb); |
154 | 174 | ||
155 | br_deliver(br_port_get_rcu(oldskb->dev), nskb); | 175 | br_deliver(br_port_get_rcu(dev), nskb); |
176 | } | ||
177 | |||
178 | static bool reject6_br_csum_ok(struct sk_buff *skb, int hook) | ||
179 | { | ||
180 | const struct ipv6hdr *ip6h = ipv6_hdr(skb); | ||
181 | int thoff; | ||
182 | __be16 fo; | ||
183 | u8 proto = ip6h->nexthdr; | ||
184 | |||
185 | if (skb->csum_bad) | ||
186 | return false; | ||
187 | |||
188 | if (skb_csum_unnecessary(skb)) | ||
189 | return true; | ||
190 | |||
191 | if (ip6h->payload_len && | ||
192 | pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h))) | ||
193 | return false; | ||
194 | |||
195 | thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo); | ||
196 | if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0) | ||
197 | return false; | ||
198 | |||
199 | return nf_ip6_checksum(skb, hook, thoff, proto) == 0; | ||
156 | } | 200 | } |
157 | 201 | ||
158 | static void nft_reject_br_send_v6_unreach(struct net *net, | 202 | static void nft_reject_br_send_v6_unreach(struct net *net, |
159 | struct sk_buff *oldskb, int hook, | 203 | struct sk_buff *oldskb, |
160 | u8 code) | 204 | const struct net_device *dev, |
205 | int hook, u8 code) | ||
161 | { | 206 | { |
162 | struct sk_buff *nskb; | 207 | struct sk_buff *nskb; |
163 | struct ipv6hdr *nip6h; | 208 | struct ipv6hdr *nip6h; |
@@ -176,6 +221,9 @@ static void nft_reject_br_send_v6_unreach(struct net *net, | |||
176 | if (!pskb_may_pull(oldskb, len)) | 221 | if (!pskb_may_pull(oldskb, len)) |
177 | return; | 222 | return; |
178 | 223 | ||
224 | if (!reject6_br_csum_ok(oldskb, hook)) | ||
225 | return; | ||
226 | |||
179 | nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmp6hdr) + | 227 | nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmp6hdr) + |
180 | LL_MAX_HEADER + len, GFP_ATOMIC); | 228 | LL_MAX_HEADER + len, GFP_ATOMIC); |
181 | if (!nskb) | 229 | if (!nskb) |
@@ -205,7 +253,7 @@ static void nft_reject_br_send_v6_unreach(struct net *net, | |||
205 | 253 | ||
206 | nft_reject_br_push_etherhdr(oldskb, nskb); | 254 | nft_reject_br_push_etherhdr(oldskb, nskb); |
207 | 255 | ||
208 | br_deliver(br_port_get_rcu(oldskb->dev), nskb); | 256 | br_deliver(br_port_get_rcu(dev), nskb); |
209 | } | 257 | } |
210 | 258 | ||
211 | static void nft_reject_bridge_eval(const struct nft_expr *expr, | 259 | static void nft_reject_bridge_eval(const struct nft_expr *expr, |
@@ -224,16 +272,16 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr, | |||
224 | case htons(ETH_P_IP): | 272 | case htons(ETH_P_IP): |
225 | switch (priv->type) { | 273 | switch (priv->type) { |
226 | case NFT_REJECT_ICMP_UNREACH: | 274 | case NFT_REJECT_ICMP_UNREACH: |
227 | nft_reject_br_send_v4_unreach(pkt->skb, | 275 | nft_reject_br_send_v4_unreach(pkt->skb, pkt->in, |
228 | pkt->ops->hooknum, | 276 | pkt->ops->hooknum, |
229 | priv->icmp_code); | 277 | priv->icmp_code); |
230 | break; | 278 | break; |
231 | case NFT_REJECT_TCP_RST: | 279 | case NFT_REJECT_TCP_RST: |
232 | nft_reject_br_send_v4_tcp_reset(pkt->skb, | 280 | nft_reject_br_send_v4_tcp_reset(pkt->skb, pkt->in, |
233 | pkt->ops->hooknum); | 281 | pkt->ops->hooknum); |
234 | break; | 282 | break; |
235 | case NFT_REJECT_ICMPX_UNREACH: | 283 | case NFT_REJECT_ICMPX_UNREACH: |
236 | nft_reject_br_send_v4_unreach(pkt->skb, | 284 | nft_reject_br_send_v4_unreach(pkt->skb, pkt->in, |
237 | pkt->ops->hooknum, | 285 | pkt->ops->hooknum, |
238 | nft_reject_icmp_code(priv->icmp_code)); | 286 | nft_reject_icmp_code(priv->icmp_code)); |
239 | break; | 287 | break; |
@@ -242,16 +290,16 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr, | |||
242 | case htons(ETH_P_IPV6): | 290 | case htons(ETH_P_IPV6): |
243 | switch (priv->type) { | 291 | switch (priv->type) { |
244 | case NFT_REJECT_ICMP_UNREACH: | 292 | case NFT_REJECT_ICMP_UNREACH: |
245 | nft_reject_br_send_v6_unreach(net, pkt->skb, | 293 | nft_reject_br_send_v6_unreach(net, pkt->skb, pkt->in, |
246 | pkt->ops->hooknum, | 294 | pkt->ops->hooknum, |
247 | priv->icmp_code); | 295 | priv->icmp_code); |
248 | break; | 296 | break; |
249 | case NFT_REJECT_TCP_RST: | 297 | case NFT_REJECT_TCP_RST: |
250 | nft_reject_br_send_v6_tcp_reset(net, pkt->skb, | 298 | nft_reject_br_send_v6_tcp_reset(net, pkt->skb, pkt->in, |
251 | pkt->ops->hooknum); | 299 | pkt->ops->hooknum); |
252 | break; | 300 | break; |
253 | case NFT_REJECT_ICMPX_UNREACH: | 301 | case NFT_REJECT_ICMPX_UNREACH: |
254 | nft_reject_br_send_v6_unreach(net, pkt->skb, | 302 | nft_reject_br_send_v6_unreach(net, pkt->skb, pkt->in, |
255 | pkt->ops->hooknum, | 303 | pkt->ops->hooknum, |
256 | nft_reject_icmpv6_code(priv->icmp_code)); | 304 | nft_reject_icmpv6_code(priv->icmp_code)); |
257 | break; | 305 | break; |