diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-11 21:19:00 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-11 21:19:00 -0400 |
| commit | ca321885b0511a85e2d1cd40caafedbeb18f4af6 (patch) | |
| tree | 0042e8674aff7ae5785db467836d8d4101906f70 /include/net | |
| parent | 052db7ec86dff26f734031c3ef5c2c03a94af0af (diff) | |
| parent | 01d2d484e49e9bc0ed9b5fdaf345a0e2bf35ffed (diff) | |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller:
"This set fixes a bunch of fallout from the changes that went in during
this merge window, particularly:
- Fix fsl_pq_mdio (Claudiu Manoil) and fm10k (Pranith Kumar) build
failures.
- Several networking drivers do atomic_set() on page counts where
that's not exactly legal. From Eric Dumazet.
- Make __skb_flow_get_ports() work cleanly with unaligned data, from
Alexander Duyck.
- Fix some kernel-doc buglets in rfkill and netlabel, from Fabian
Frederick.
- Unbalanced enable_irq_wake usage in bcmgenet and systemport
drivers, from Florian Fainelli.
- pxa168_eth needs to depend on HAS_DMA, from Geert Uytterhoeven.
- Multi-dequeue in the qdisc layer severely bypasses the fairness
limits the previous code used to enforce, reintroduce in a way that
at the same time doesn't compromise bulk dequeue opportunities.
From Jesper Dangaard Brouer.
- macvlan receive path unnecessarily hops through a softirq by using
netif_rx() instead of netif_receive_skb(). From Jason Baron"
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (51 commits)
net: systemport: avoid unbalanced enable_irq_wake calls
net: bcmgenet: avoid unbalanced enable_irq_wake calls
net: bcmgenet: fix off-by-one in incrementing read pointer
net: fix races in page->_count manipulation
mlx4: fix race accessing page->_count
ixgbe: fix race accessing page->_count
igb: fix race accessing page->_count
fm10k: fix race accessing page->_count
net/phy: micrel: Add clock support for KSZ8021/KSZ8031
flow-dissector: Fix alignment issue in __skb_flow_get_ports
net: filter: fix the comments
Documentation: replace __sk_run_filter with __bpf_prog_run
macvlan: optimize the receive path
macvlan: pass 'bool' type to macvlan_count_rx()
drivers: net: xgene: Add 10GbE ethtool support
drivers: net: xgene: Add 10GbE support
drivers: net: xgene: Preparing for adding 10GbE support
dtb: Add 10GbE node to APM X-Gene SoC device tree
Documentation: dts: Update section header for APM X-Gene
MAINTAINERS: Update APM X-Gene section
...
Diffstat (limited to 'include/net')
| -rw-r--r-- | include/net/netfilter/ipv6/nf_reject.h | 157 |
1 files changed, 2 insertions, 155 deletions
diff --git a/include/net/netfilter/ipv6/nf_reject.h b/include/net/netfilter/ipv6/nf_reject.h index 7a10cfcd8e33..48e18810a9be 100644 --- a/include/net/netfilter/ipv6/nf_reject.h +++ b/include/net/netfilter/ipv6/nf_reject.h | |||
| @@ -1,11 +1,7 @@ | |||
| 1 | #ifndef _IPV6_NF_REJECT_H | 1 | #ifndef _IPV6_NF_REJECT_H |
| 2 | #define _IPV6_NF_REJECT_H | 2 | #define _IPV6_NF_REJECT_H |
| 3 | 3 | ||
| 4 | #include <net/ipv6.h> | 4 | #include <linux/icmpv6.h> |
| 5 | #include <net/ip6_route.h> | ||
| 6 | #include <net/ip6_fib.h> | ||
| 7 | #include <net/ip6_checksum.h> | ||
| 8 | #include <linux/netfilter_ipv6.h> | ||
| 9 | 5 | ||
| 10 | static inline void | 6 | static inline void |
| 11 | nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code, | 7 | nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code, |
| @@ -17,155 +13,6 @@ nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code, | |||
| 17 | icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0); | 13 | icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0); |
| 18 | } | 14 | } |
| 19 | 15 | ||
| 20 | /* Send RST reply */ | 16 | void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook); |
| 21 | static void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) | ||
| 22 | { | ||
| 23 | struct sk_buff *nskb; | ||
| 24 | struct tcphdr otcph, *tcph; | ||
| 25 | unsigned int otcplen, hh_len; | ||
| 26 | int tcphoff, needs_ack; | ||
| 27 | const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); | ||
| 28 | struct ipv6hdr *ip6h; | ||
| 29 | #define DEFAULT_TOS_VALUE 0x0U | ||
| 30 | const __u8 tclass = DEFAULT_TOS_VALUE; | ||
| 31 | struct dst_entry *dst = NULL; | ||
| 32 | u8 proto; | ||
| 33 | __be16 frag_off; | ||
| 34 | struct flowi6 fl6; | ||
| 35 | |||
| 36 | if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) || | ||
| 37 | (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) { | ||
| 38 | pr_debug("addr is not unicast.\n"); | ||
| 39 | return; | ||
| 40 | } | ||
| 41 | |||
| 42 | proto = oip6h->nexthdr; | ||
| 43 | tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto, &frag_off); | ||
| 44 | |||
| 45 | if ((tcphoff < 0) || (tcphoff > oldskb->len)) { | ||
| 46 | pr_debug("Cannot get TCP header.\n"); | ||
| 47 | return; | ||
| 48 | } | ||
| 49 | |||
| 50 | otcplen = oldskb->len - tcphoff; | ||
| 51 | |||
| 52 | /* IP header checks: fragment, too short. */ | ||
| 53 | if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) { | ||
| 54 | pr_debug("proto(%d) != IPPROTO_TCP, " | ||
| 55 | "or too short. otcplen = %d\n", | ||
| 56 | proto, otcplen); | ||
| 57 | return; | ||
| 58 | } | ||
| 59 | |||
| 60 | if (skb_copy_bits(oldskb, tcphoff, &otcph, sizeof(struct tcphdr))) | ||
| 61 | BUG(); | ||
| 62 | |||
| 63 | /* No RST for RST. */ | ||
| 64 | if (otcph.rst) { | ||
| 65 | pr_debug("RST is set\n"); | ||
| 66 | return; | ||
| 67 | } | ||
| 68 | |||
| 69 | /* Check checksum. */ | ||
| 70 | if (nf_ip6_checksum(oldskb, hook, tcphoff, IPPROTO_TCP)) { | ||
| 71 | pr_debug("TCP checksum is invalid\n"); | ||
| 72 | return; | ||
| 73 | } | ||
| 74 | |||
| 75 | memset(&fl6, 0, sizeof(fl6)); | ||
| 76 | fl6.flowi6_proto = IPPROTO_TCP; | ||
| 77 | fl6.saddr = oip6h->daddr; | ||
| 78 | fl6.daddr = oip6h->saddr; | ||
| 79 | fl6.fl6_sport = otcph.dest; | ||
| 80 | fl6.fl6_dport = otcph.source; | ||
| 81 | security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); | ||
| 82 | dst = ip6_route_output(net, NULL, &fl6); | ||
| 83 | if (dst == NULL || dst->error) { | ||
| 84 | dst_release(dst); | ||
| 85 | return; | ||
| 86 | } | ||
| 87 | dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0); | ||
| 88 | if (IS_ERR(dst)) | ||
| 89 | return; | ||
| 90 | |||
| 91 | hh_len = (dst->dev->hard_header_len + 15)&~15; | ||
| 92 | nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr) | ||
| 93 | + sizeof(struct tcphdr) + dst->trailer_len, | ||
| 94 | GFP_ATOMIC); | ||
| 95 | |||
| 96 | if (!nskb) { | ||
| 97 | net_dbg_ratelimited("cannot alloc skb\n"); | ||
| 98 | dst_release(dst); | ||
| 99 | return; | ||
| 100 | } | ||
| 101 | |||
| 102 | skb_dst_set(nskb, dst); | ||
| 103 | |||
| 104 | skb_reserve(nskb, hh_len + dst->header_len); | ||
| 105 | |||
| 106 | skb_put(nskb, sizeof(struct ipv6hdr)); | ||
| 107 | skb_reset_network_header(nskb); | ||
| 108 | ip6h = ipv6_hdr(nskb); | ||
| 109 | ip6_flow_hdr(ip6h, tclass, 0); | ||
| 110 | ip6h->hop_limit = ip6_dst_hoplimit(dst); | ||
| 111 | ip6h->nexthdr = IPPROTO_TCP; | ||
| 112 | ip6h->saddr = oip6h->daddr; | ||
| 113 | ip6h->daddr = oip6h->saddr; | ||
| 114 | |||
| 115 | skb_reset_transport_header(nskb); | ||
| 116 | tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); | ||
| 117 | /* Truncate to length (no data) */ | ||
| 118 | tcph->doff = sizeof(struct tcphdr)/4; | ||
| 119 | tcph->source = otcph.dest; | ||
| 120 | tcph->dest = otcph.source; | ||
| 121 | |||
| 122 | if (otcph.ack) { | ||
| 123 | needs_ack = 0; | ||
| 124 | tcph->seq = otcph.ack_seq; | ||
| 125 | tcph->ack_seq = 0; | ||
| 126 | } else { | ||
| 127 | needs_ack = 1; | ||
| 128 | tcph->ack_seq = htonl(ntohl(otcph.seq) + otcph.syn + otcph.fin | ||
| 129 | + otcplen - (otcph.doff<<2)); | ||
| 130 | tcph->seq = 0; | ||
| 131 | } | ||
| 132 | |||
| 133 | /* Reset flags */ | ||
| 134 | ((u_int8_t *)tcph)[13] = 0; | ||
| 135 | tcph->rst = 1; | ||
| 136 | tcph->ack = needs_ack; | ||
| 137 | tcph->window = 0; | ||
| 138 | tcph->urg_ptr = 0; | ||
| 139 | tcph->check = 0; | ||
| 140 | |||
| 141 | /* Adjust TCP checksum */ | ||
| 142 | tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr, | ||
| 143 | &ipv6_hdr(nskb)->daddr, | ||
| 144 | sizeof(struct tcphdr), IPPROTO_TCP, | ||
| 145 | csum_partial(tcph, | ||
| 146 | sizeof(struct tcphdr), 0)); | ||
| 147 | |||
| 148 | nf_ct_attach(nskb, oldskb); | ||
| 149 | |||
| 150 | #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) | ||
| 151 | /* If we use ip6_local_out for bridged traffic, the MAC source on | ||
| 152 | * the RST will be ours, instead of the destination's. This confuses | ||
| 153 | * some routers/firewalls, and they drop the packet. So we need to | ||
| 154 | * build the eth header using the original destination's MAC as the | ||
| 155 | * source, and send the RST packet directly. | ||
| 156 | */ | ||
| 157 | if (oldskb->nf_bridge) { | ||
| 158 | struct ethhdr *oeth = eth_hdr(oldskb); | ||
| 159 | nskb->dev = oldskb->nf_bridge->physindev; | ||
| 160 | nskb->protocol = htons(ETH_P_IPV6); | ||
| 161 | ip6h->payload_len = htons(sizeof(struct tcphdr)); | ||
| 162 | if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol), | ||
| 163 | oeth->h_source, oeth->h_dest, nskb->len) < 0) | ||
| 164 | return; | ||
| 165 | dev_queue_xmit(nskb); | ||
| 166 | } else | ||
| 167 | #endif | ||
| 168 | ip6_local_out(nskb); | ||
| 169 | } | ||
| 170 | 17 | ||
| 171 | #endif /* _IPV6_NF_REJECT_H */ | 18 | #endif /* _IPV6_NF_REJECT_H */ |
