diff options
author | Patrick McHardy <kaber@trash.net> | 2010-06-15 11:31:06 -0400 |
---|---|---|
committer | Patrick McHardy <kaber@trash.net> | 2010-06-15 11:31:06 -0400 |
commit | f9181f4ffc71d7b7dd1906c9a11d51d6659220ae (patch) | |
tree | 194f22e8216a1b9ee2c0dd019142202d73a7dc87 /net | |
parent | 0902b469bd25065aa0688c3cee6f11744c817e7c (diff) | |
parent | 1ab6c163dee279559e3a62d774af7e4c4c9b4c67 (diff) |
Merge branch 'master' of /repos/git/net-next-2.6
Conflicts:
include/net/netfilter/xt_rateest.h
net/bridge/br_netfilter.c
net/netfilter/nf_conntrack_core.c
Signed-off-by: Patrick McHardy <kaber@trash.net>
Diffstat (limited to 'net')
130 files changed, 3310 insertions, 2004 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index bd537fc10254..50f58f5f1c34 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c | |||
@@ -12,7 +12,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, | |||
12 | return NET_RX_DROP; | 12 | return NET_RX_DROP; |
13 | 13 | ||
14 | if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) | 14 | if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) |
15 | goto drop; | 15 | skb->deliver_no_wcard = 1; |
16 | 16 | ||
17 | skb->skb_iif = skb->dev->ifindex; | 17 | skb->skb_iif = skb->dev->ifindex; |
18 | __vlan_hwaccel_put_tag(skb, vlan_tci); | 18 | __vlan_hwaccel_put_tag(skb, vlan_tci); |
@@ -84,7 +84,7 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, | |||
84 | struct sk_buff *p; | 84 | struct sk_buff *p; |
85 | 85 | ||
86 | if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) | 86 | if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) |
87 | goto drop; | 87 | skb->deliver_no_wcard = 1; |
88 | 88 | ||
89 | skb->skb_iif = skb->dev->ifindex; | 89 | skb->skb_iif = skb->dev->ifindex; |
90 | __vlan_hwaccel_put_tag(skb, vlan_tci); | 90 | __vlan_hwaccel_put_tag(skb, vlan_tci); |
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 55be90826f5f..529842677817 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -708,7 +708,8 @@ static int vlan_dev_init(struct net_device *dev) | |||
708 | netif_carrier_off(dev); | 708 | netif_carrier_off(dev); |
709 | 709 | ||
710 | /* IFF_BROADCAST|IFF_MULTICAST; ??? */ | 710 | /* IFF_BROADCAST|IFF_MULTICAST; ??? */ |
711 | dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI); | 711 | dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | |
712 | IFF_MASTER | IFF_SLAVE); | ||
712 | dev->iflink = real_dev->ifindex; | 713 | dev->iflink = real_dev->ifindex; |
713 | dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) | | 714 | dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) | |
714 | (1<<__LINK_STATE_DORMANT))) | | 715 | (1<<__LINK_STATE_DORMANT))) | |
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c index afead353e215..df56f5ce887c 100644 --- a/net/8021q/vlanproc.c +++ b/net/8021q/vlanproc.c | |||
@@ -278,8 +278,9 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset) | |||
278 | { | 278 | { |
279 | struct net_device *vlandev = (struct net_device *) seq->private; | 279 | struct net_device *vlandev = (struct net_device *) seq->private; |
280 | const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev); | 280 | const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev); |
281 | const struct net_device_stats *stats; | 281 | const struct rtnl_link_stats64 *stats; |
282 | static const char fmt[] = "%30s %12lu\n"; | 282 | static const char fmt[] = "%30s %12lu\n"; |
283 | static const char fmt64[] = "%30s %12llu\n"; | ||
283 | int i; | 284 | int i; |
284 | 285 | ||
285 | if (!is_vlan_dev(vlandev)) | 286 | if (!is_vlan_dev(vlandev)) |
@@ -291,12 +292,12 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset) | |||
291 | vlandev->name, dev_info->vlan_id, | 292 | vlandev->name, dev_info->vlan_id, |
292 | (int)(dev_info->flags & 1), vlandev->priv_flags); | 293 | (int)(dev_info->flags & 1), vlandev->priv_flags); |
293 | 294 | ||
294 | seq_printf(seq, fmt, "total frames received", stats->rx_packets); | 295 | seq_printf(seq, fmt64, "total frames received", stats->rx_packets); |
295 | seq_printf(seq, fmt, "total bytes received", stats->rx_bytes); | 296 | seq_printf(seq, fmt64, "total bytes received", stats->rx_bytes); |
296 | seq_printf(seq, fmt, "Broadcast/Multicast Rcvd", stats->multicast); | 297 | seq_printf(seq, fmt64, "Broadcast/Multicast Rcvd", stats->multicast); |
297 | seq_puts(seq, "\n"); | 298 | seq_puts(seq, "\n"); |
298 | seq_printf(seq, fmt, "total frames transmitted", stats->tx_packets); | 299 | seq_printf(seq, fmt64, "total frames transmitted", stats->tx_packets); |
299 | seq_printf(seq, fmt, "total bytes transmitted", stats->tx_bytes); | 300 | seq_printf(seq, fmt64, "total bytes transmitted", stats->tx_bytes); |
300 | seq_printf(seq, fmt, "total headroom inc", | 301 | seq_printf(seq, fmt, "total headroom inc", |
301 | dev_info->cnt_inc_headroom_on_tx); | 302 | dev_info->cnt_inc_headroom_on_tx); |
302 | seq_printf(seq, fmt, "total encap on xmit", | 303 | seq_printf(seq, fmt, "total encap on xmit", |
diff --git a/net/atm/clip.c b/net/atm/clip.c index 313aba11316b..95fdd1185067 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c | |||
@@ -522,7 +522,7 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip) | |||
522 | error = ip_route_output_key(&init_net, &rt, &fl); | 522 | error = ip_route_output_key(&init_net, &rt, &fl); |
523 | if (error) | 523 | if (error) |
524 | return error; | 524 | return error; |
525 | neigh = __neigh_lookup(&clip_tbl, &ip, rt->u.dst.dev, 1); | 525 | neigh = __neigh_lookup(&clip_tbl, &ip, rt->dst.dev, 1); |
526 | ip_rt_put(rt); | 526 | ip_rt_put(rt); |
527 | if (!neigh) | 527 | if (!neigh) |
528 | return -ENOMEM; | 528 | return -ENOMEM; |
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h index 0d9e506f5d5a..70672544db86 100644 --- a/net/bluetooth/bnep/bnep.h +++ b/net/bluetooth/bnep/bnep.h | |||
@@ -86,26 +86,26 @@ struct bnep_setup_conn_req { | |||
86 | __u8 ctrl; | 86 | __u8 ctrl; |
87 | __u8 uuid_size; | 87 | __u8 uuid_size; |
88 | __u8 service[0]; | 88 | __u8 service[0]; |
89 | } __attribute__((packed)); | 89 | } __packed; |
90 | 90 | ||
91 | struct bnep_set_filter_req { | 91 | struct bnep_set_filter_req { |
92 | __u8 type; | 92 | __u8 type; |
93 | __u8 ctrl; | 93 | __u8 ctrl; |
94 | __be16 len; | 94 | __be16 len; |
95 | __u8 list[0]; | 95 | __u8 list[0]; |
96 | } __attribute__((packed)); | 96 | } __packed; |
97 | 97 | ||
98 | struct bnep_control_rsp { | 98 | struct bnep_control_rsp { |
99 | __u8 type; | 99 | __u8 type; |
100 | __u8 ctrl; | 100 | __u8 ctrl; |
101 | __be16 resp; | 101 | __be16 resp; |
102 | } __attribute__((packed)); | 102 | } __packed; |
103 | 103 | ||
104 | struct bnep_ext_hdr { | 104 | struct bnep_ext_hdr { |
105 | __u8 type; | 105 | __u8 type; |
106 | __u8 len; | 106 | __u8 len; |
107 | __u8 data[0]; | 107 | __u8 data[0]; |
108 | } __attribute__((packed)); | 108 | } __packed; |
109 | 109 | ||
110 | /* BNEP ioctl defines */ | 110 | /* BNEP ioctl defines */ |
111 | #define BNEPCONNADD _IOW('B', 200, int) | 111 | #define BNEPCONNADD _IOW('B', 200, int) |
diff --git a/net/bridge/br.c b/net/bridge/br.c index 76357b547752..c8436fa31344 100644 --- a/net/bridge/br.c +++ b/net/bridge/br.c | |||
@@ -63,7 +63,6 @@ static int __init br_init(void) | |||
63 | goto err_out4; | 63 | goto err_out4; |
64 | 64 | ||
65 | brioctl_set(br_ioctl_deviceless_stub); | 65 | brioctl_set(br_ioctl_deviceless_stub); |
66 | br_handle_frame_hook = br_handle_frame; | ||
67 | 66 | ||
68 | #if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE) | 67 | #if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE) |
69 | br_fdb_test_addr_hook = br_fdb_test_addr; | 68 | br_fdb_test_addr_hook = br_fdb_test_addr; |
@@ -100,7 +99,6 @@ static void __exit br_deinit(void) | |||
100 | br_fdb_test_addr_hook = NULL; | 99 | br_fdb_test_addr_hook = NULL; |
101 | #endif | 100 | #endif |
102 | 101 | ||
103 | br_handle_frame_hook = NULL; | ||
104 | br_fdb_fini(); | 102 | br_fdb_fini(); |
105 | } | 103 | } |
106 | 104 | ||
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index eedf2c94820e..b898364beaf5 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c | |||
@@ -127,7 +127,7 @@ static int br_change_mtu(struct net_device *dev, int new_mtu) | |||
127 | 127 | ||
128 | #ifdef CONFIG_BRIDGE_NETFILTER | 128 | #ifdef CONFIG_BRIDGE_NETFILTER |
129 | /* remember the MTU in the rtable for PMTU */ | 129 | /* remember the MTU in the rtable for PMTU */ |
130 | br->fake_rtable.u.dst.metrics[RTAX_MTU - 1] = new_mtu; | 130 | br->fake_rtable.dst.metrics[RTAX_MTU - 1] = new_mtu; |
131 | #endif | 131 | #endif |
132 | 132 | ||
133 | return 0; | 133 | return 0; |
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 18b245e2c00e..d9242342837e 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
@@ -147,6 +147,7 @@ static void del_nbp(struct net_bridge_port *p) | |||
147 | 147 | ||
148 | list_del_rcu(&p->list); | 148 | list_del_rcu(&p->list); |
149 | 149 | ||
150 | netdev_rx_handler_unregister(dev); | ||
150 | rcu_assign_pointer(dev->br_port, NULL); | 151 | rcu_assign_pointer(dev->br_port, NULL); |
151 | 152 | ||
152 | br_multicast_del_port(p); | 153 | br_multicast_del_port(p); |
@@ -429,6 +430,11 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) | |||
429 | goto err2; | 430 | goto err2; |
430 | 431 | ||
431 | rcu_assign_pointer(dev->br_port, p); | 432 | rcu_assign_pointer(dev->br_port, p); |
433 | |||
434 | err = netdev_rx_handler_register(dev, br_handle_frame); | ||
435 | if (err) | ||
436 | goto err3; | ||
437 | |||
432 | dev_disable_lro(dev); | 438 | dev_disable_lro(dev); |
433 | 439 | ||
434 | list_add_rcu(&p->list, &br->port_list); | 440 | list_add_rcu(&p->list, &br->port_list); |
@@ -451,6 +457,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) | |||
451 | br_netpoll_enable(br, dev); | 457 | br_netpoll_enable(br, dev); |
452 | 458 | ||
453 | return 0; | 459 | return 0; |
460 | err3: | ||
461 | rcu_assign_pointer(dev->br_port, NULL); | ||
454 | err2: | 462 | err2: |
455 | br_fdb_delete_by_port(br, p, 1); | 463 | br_fdb_delete_by_port(br, p, 1); |
456 | err1: | 464 | err1: |
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index d36e700f7a26..99647d8f95c8 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c | |||
@@ -131,15 +131,19 @@ static inline int is_link_local(const unsigned char *dest) | |||
131 | } | 131 | } |
132 | 132 | ||
133 | /* | 133 | /* |
134 | * Called via br_handle_frame_hook. | ||
135 | * Return NULL if skb is handled | 134 | * Return NULL if skb is handled |
136 | * note: already called with rcu_read_lock (preempt_disabled) | 135 | * note: already called with rcu_read_lock (preempt_disabled) from |
136 | * netif_receive_skb | ||
137 | */ | 137 | */ |
138 | struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb) | 138 | struct sk_buff *br_handle_frame(struct sk_buff *skb) |
139 | { | 139 | { |
140 | struct net_bridge_port *p; | ||
140 | const unsigned char *dest = eth_hdr(skb)->h_dest; | 141 | const unsigned char *dest = eth_hdr(skb)->h_dest; |
141 | int (*rhook)(struct sk_buff *skb); | 142 | int (*rhook)(struct sk_buff *skb); |
142 | 143 | ||
144 | if (skb->pkt_type == PACKET_LOOPBACK) | ||
145 | return skb; | ||
146 | |||
143 | if (!is_valid_ether_addr(eth_hdr(skb)->h_source)) | 147 | if (!is_valid_ether_addr(eth_hdr(skb)->h_source)) |
144 | goto drop; | 148 | goto drop; |
145 | 149 | ||
@@ -147,6 +151,8 @@ struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb) | |||
147 | if (!skb) | 151 | if (!skb) |
148 | return NULL; | 152 | return NULL; |
149 | 153 | ||
154 | p = rcu_dereference(skb->dev->br_port); | ||
155 | |||
150 | if (unlikely(is_link_local(dest))) { | 156 | if (unlikely(is_link_local(dest))) { |
151 | /* Pause frames shouldn't be passed up by driver anyway */ | 157 | /* Pause frames shouldn't be passed up by driver anyway */ |
152 | if (skb->protocol == htons(ETH_P_PAUSE)) | 158 | if (skb->protocol == htons(ETH_P_PAUSE)) |
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index cbea5af24ce6..6bb6f7c9e6e1 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
@@ -117,12 +117,12 @@ void br_netfilter_rtable_init(struct net_bridge *br) | |||
117 | { | 117 | { |
118 | struct rtable *rt = &br->fake_rtable; | 118 | struct rtable *rt = &br->fake_rtable; |
119 | 119 | ||
120 | atomic_set(&rt->u.dst.__refcnt, 1); | 120 | atomic_set(&rt->dst.__refcnt, 1); |
121 | rt->u.dst.dev = br->dev; | 121 | rt->dst.dev = br->dev; |
122 | rt->u.dst.path = &rt->u.dst; | 122 | rt->dst.path = &rt->dst; |
123 | rt->u.dst.metrics[RTAX_MTU - 1] = 1500; | 123 | rt->dst.metrics[RTAX_MTU - 1] = 1500; |
124 | rt->u.dst.flags = DST_NOXFRM; | 124 | rt->dst.flags = DST_NOXFRM; |
125 | rt->u.dst.ops = &fake_dst_ops; | 125 | rt->dst.ops = &fake_dst_ops; |
126 | } | 126 | } |
127 | 127 | ||
128 | static inline struct rtable *bridge_parent_rtable(const struct net_device *dev) | 128 | static inline struct rtable *bridge_parent_rtable(const struct net_device *dev) |
@@ -244,7 +244,7 @@ static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb) | |||
244 | kfree_skb(skb); | 244 | kfree_skb(skb); |
245 | return 0; | 245 | return 0; |
246 | } | 246 | } |
247 | skb_dst_set_noref(skb, &rt->u.dst); | 247 | skb_dst_set_noref(skb, &rt->dst); |
248 | 248 | ||
249 | skb->dev = nf_bridge->physindev; | 249 | skb->dev = nf_bridge->physindev; |
250 | nf_bridge_update_protocol(skb); | 250 | nf_bridge_update_protocol(skb); |
@@ -395,7 +395,7 @@ bridged_dnat: | |||
395 | kfree_skb(skb); | 395 | kfree_skb(skb); |
396 | return 0; | 396 | return 0; |
397 | } | 397 | } |
398 | skb_dst_set_noref(skb, &rt->u.dst); | 398 | skb_dst_set_noref(skb, &rt->dst); |
399 | } | 399 | } |
400 | 400 | ||
401 | skb->dev = nf_bridge->physindev; | 401 | skb->dev = nf_bridge->physindev; |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 0f4a74bc6a9b..c83519b555bb 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -331,8 +331,7 @@ extern void br_features_recompute(struct net_bridge *br); | |||
331 | 331 | ||
332 | /* br_input.c */ | 332 | /* br_input.c */ |
333 | extern int br_handle_frame_finish(struct sk_buff *skb); | 333 | extern int br_handle_frame_finish(struct sk_buff *skb); |
334 | extern struct sk_buff *br_handle_frame(struct net_bridge_port *p, | 334 | extern struct sk_buff *br_handle_frame(struct sk_buff *skb); |
335 | struct sk_buff *skb); | ||
336 | 335 | ||
337 | /* br_ioctl.c */ | 336 | /* br_ioctl.c */ |
338 | extern int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | 337 | extern int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c index cd2830fec935..fd27b172fb5d 100644 --- a/net/caif/cfrfml.c +++ b/net/caif/cfrfml.c | |||
@@ -83,7 +83,7 @@ static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt) | |||
83 | if (!cfsrvl_ready(service, &ret)) | 83 | if (!cfsrvl_ready(service, &ret)) |
84 | return ret; | 84 | return ret; |
85 | 85 | ||
86 | if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { | 86 | if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { |
87 | pr_err("CAIF: %s():Packet too large - size=%d\n", | 87 | pr_err("CAIF: %s():Packet too large - size=%d\n", |
88 | __func__, cfpkt_getlen(pkt)); | 88 | __func__, cfpkt_getlen(pkt)); |
89 | return -EOVERFLOW; | 89 | return -EOVERFLOW; |
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c index 0fd827f49491..e04f7d964e83 100644 --- a/net/caif/cfveil.c +++ b/net/caif/cfveil.c | |||
@@ -84,7 +84,7 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt) | |||
84 | return ret; | 84 | return ret; |
85 | caif_assert(layr->dn != NULL); | 85 | caif_assert(layr->dn != NULL); |
86 | caif_assert(layr->dn->transmit != NULL); | 86 | caif_assert(layr->dn->transmit != NULL); |
87 | if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { | 87 | if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { |
88 | pr_warning("CAIF: %s(): Packet too large - size=%d\n", | 88 | pr_warning("CAIF: %s(): Packet too large - size=%d\n", |
89 | __func__, cfpkt_getlen(pkt)); | 89 | __func__, cfpkt_getlen(pkt)); |
90 | return -EOVERFLOW; | 90 | return -EOVERFLOW; |
diff --git a/net/compat.c b/net/compat.c index ec24d9edb025..63d260e81472 100644 --- a/net/compat.c +++ b/net/compat.c | |||
@@ -81,7 +81,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov, | |||
81 | int tot_len; | 81 | int tot_len; |
82 | 82 | ||
83 | if (kern_msg->msg_namelen) { | 83 | if (kern_msg->msg_namelen) { |
84 | if (mode==VERIFY_READ) { | 84 | if (mode == VERIFY_READ) { |
85 | int err = move_addr_to_kernel(kern_msg->msg_name, | 85 | int err = move_addr_to_kernel(kern_msg->msg_name, |
86 | kern_msg->msg_namelen, | 86 | kern_msg->msg_namelen, |
87 | kern_address); | 87 | kern_address); |
@@ -354,7 +354,7 @@ static int do_set_attach_filter(struct socket *sock, int level, int optname, | |||
354 | static int do_set_sock_timeout(struct socket *sock, int level, | 354 | static int do_set_sock_timeout(struct socket *sock, int level, |
355 | int optname, char __user *optval, unsigned int optlen) | 355 | int optname, char __user *optval, unsigned int optlen) |
356 | { | 356 | { |
357 | struct compat_timeval __user *up = (struct compat_timeval __user *) optval; | 357 | struct compat_timeval __user *up = (struct compat_timeval __user *)optval; |
358 | struct timeval ktime; | 358 | struct timeval ktime; |
359 | mm_segment_t old_fs; | 359 | mm_segment_t old_fs; |
360 | int err; | 360 | int err; |
@@ -367,7 +367,7 @@ static int do_set_sock_timeout(struct socket *sock, int level, | |||
367 | return -EFAULT; | 367 | return -EFAULT; |
368 | old_fs = get_fs(); | 368 | old_fs = get_fs(); |
369 | set_fs(KERNEL_DS); | 369 | set_fs(KERNEL_DS); |
370 | err = sock_setsockopt(sock, level, optname, (char *) &ktime, sizeof(ktime)); | 370 | err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime)); |
371 | set_fs(old_fs); | 371 | set_fs(old_fs); |
372 | 372 | ||
373 | return err; | 373 | return err; |
@@ -389,11 +389,10 @@ asmlinkage long compat_sys_setsockopt(int fd, int level, int optname, | |||
389 | char __user *optval, unsigned int optlen) | 389 | char __user *optval, unsigned int optlen) |
390 | { | 390 | { |
391 | int err; | 391 | int err; |
392 | struct socket *sock; | 392 | struct socket *sock = sockfd_lookup(fd, &err); |
393 | 393 | ||
394 | if ((sock = sockfd_lookup(fd, &err))!=NULL) | 394 | if (sock) { |
395 | { | 395 | err = security_socket_setsockopt(sock, level, optname); |
396 | err = security_socket_setsockopt(sock,level,optname); | ||
397 | if (err) { | 396 | if (err) { |
398 | sockfd_put(sock); | 397 | sockfd_put(sock); |
399 | return err; | 398 | return err; |
@@ -453,7 +452,7 @@ static int compat_sock_getsockopt(struct socket *sock, int level, int optname, | |||
453 | int compat_sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) | 452 | int compat_sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) |
454 | { | 453 | { |
455 | struct compat_timeval __user *ctv = | 454 | struct compat_timeval __user *ctv = |
456 | (struct compat_timeval __user*) userstamp; | 455 | (struct compat_timeval __user *) userstamp; |
457 | int err = -ENOENT; | 456 | int err = -ENOENT; |
458 | struct timeval tv; | 457 | struct timeval tv; |
459 | 458 | ||
@@ -477,7 +476,7 @@ EXPORT_SYMBOL(compat_sock_get_timestamp); | |||
477 | int compat_sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp) | 476 | int compat_sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp) |
478 | { | 477 | { |
479 | struct compat_timespec __user *ctv = | 478 | struct compat_timespec __user *ctv = |
480 | (struct compat_timespec __user*) userstamp; | 479 | (struct compat_timespec __user *) userstamp; |
481 | int err = -ENOENT; | 480 | int err = -ENOENT; |
482 | struct timespec ts; | 481 | struct timespec ts; |
483 | 482 | ||
@@ -502,12 +501,10 @@ asmlinkage long compat_sys_getsockopt(int fd, int level, int optname, | |||
502 | char __user *optval, int __user *optlen) | 501 | char __user *optval, int __user *optlen) |
503 | { | 502 | { |
504 | int err; | 503 | int err; |
505 | struct socket *sock; | 504 | struct socket *sock = sockfd_lookup(fd, &err); |
506 | 505 | ||
507 | if ((sock = sockfd_lookup(fd, &err))!=NULL) | 506 | if (sock) { |
508 | { | 507 | err = security_socket_getsockopt(sock, level, optname); |
509 | err = security_socket_getsockopt(sock, level, | ||
510 | optname); | ||
511 | if (err) { | 508 | if (err) { |
512 | sockfd_put(sock); | 509 | sockfd_put(sock); |
513 | return err; | 510 | return err; |
@@ -531,7 +528,7 @@ struct compat_group_req { | |||
531 | __u32 gr_interface; | 528 | __u32 gr_interface; |
532 | struct __kernel_sockaddr_storage gr_group | 529 | struct __kernel_sockaddr_storage gr_group |
533 | __attribute__ ((aligned(4))); | 530 | __attribute__ ((aligned(4))); |
534 | } __attribute__ ((packed)); | 531 | } __packed; |
535 | 532 | ||
536 | struct compat_group_source_req { | 533 | struct compat_group_source_req { |
537 | __u32 gsr_interface; | 534 | __u32 gsr_interface; |
@@ -539,7 +536,7 @@ struct compat_group_source_req { | |||
539 | __attribute__ ((aligned(4))); | 536 | __attribute__ ((aligned(4))); |
540 | struct __kernel_sockaddr_storage gsr_source | 537 | struct __kernel_sockaddr_storage gsr_source |
541 | __attribute__ ((aligned(4))); | 538 | __attribute__ ((aligned(4))); |
542 | } __attribute__ ((packed)); | 539 | } __packed; |
543 | 540 | ||
544 | struct compat_group_filter { | 541 | struct compat_group_filter { |
545 | __u32 gf_interface; | 542 | __u32 gf_interface; |
@@ -549,7 +546,7 @@ struct compat_group_filter { | |||
549 | __u32 gf_numsrc; | 546 | __u32 gf_numsrc; |
550 | struct __kernel_sockaddr_storage gf_slist[1] | 547 | struct __kernel_sockaddr_storage gf_slist[1] |
551 | __attribute__ ((aligned(4))); | 548 | __attribute__ ((aligned(4))); |
552 | } __attribute__ ((packed)); | 549 | } __packed; |
553 | 550 | ||
554 | #define __COMPAT_GF0_SIZE (sizeof(struct compat_group_filter) - \ | 551 | #define __COMPAT_GF0_SIZE (sizeof(struct compat_group_filter) - \ |
555 | sizeof(struct __kernel_sockaddr_storage)) | 552 | sizeof(struct __kernel_sockaddr_storage)) |
@@ -557,7 +554,7 @@ struct compat_group_filter { | |||
557 | 554 | ||
558 | int compat_mc_setsockopt(struct sock *sock, int level, int optname, | 555 | int compat_mc_setsockopt(struct sock *sock, int level, int optname, |
559 | char __user *optval, unsigned int optlen, | 556 | char __user *optval, unsigned int optlen, |
560 | int (*setsockopt)(struct sock *,int,int,char __user *,unsigned int)) | 557 | int (*setsockopt)(struct sock *, int, int, char __user *, unsigned int)) |
561 | { | 558 | { |
562 | char __user *koptval = optval; | 559 | char __user *koptval = optval; |
563 | int koptlen = optlen; | 560 | int koptlen = optlen; |
@@ -640,12 +637,11 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname, | |||
640 | } | 637 | } |
641 | return setsockopt(sock, level, optname, koptval, koptlen); | 638 | return setsockopt(sock, level, optname, koptval, koptlen); |
642 | } | 639 | } |
643 | |||
644 | EXPORT_SYMBOL(compat_mc_setsockopt); | 640 | EXPORT_SYMBOL(compat_mc_setsockopt); |
645 | 641 | ||
646 | int compat_mc_getsockopt(struct sock *sock, int level, int optname, | 642 | int compat_mc_getsockopt(struct sock *sock, int level, int optname, |
647 | char __user *optval, int __user *optlen, | 643 | char __user *optval, int __user *optlen, |
648 | int (*getsockopt)(struct sock *,int,int,char __user *,int __user *)) | 644 | int (*getsockopt)(struct sock *, int, int, char __user *, int __user *)) |
649 | { | 645 | { |
650 | struct compat_group_filter __user *gf32 = (void *)optval; | 646 | struct compat_group_filter __user *gf32 = (void *)optval; |
651 | struct group_filter __user *kgf; | 647 | struct group_filter __user *kgf; |
@@ -681,7 +677,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname, | |||
681 | __put_user(interface, &kgf->gf_interface) || | 677 | __put_user(interface, &kgf->gf_interface) || |
682 | __put_user(fmode, &kgf->gf_fmode) || | 678 | __put_user(fmode, &kgf->gf_fmode) || |
683 | __put_user(numsrc, &kgf->gf_numsrc) || | 679 | __put_user(numsrc, &kgf->gf_numsrc) || |
684 | copy_in_user(&kgf->gf_group,&gf32->gf_group,sizeof(kgf->gf_group))) | 680 | copy_in_user(&kgf->gf_group, &gf32->gf_group, sizeof(kgf->gf_group))) |
685 | return -EFAULT; | 681 | return -EFAULT; |
686 | 682 | ||
687 | err = getsockopt(sock, level, optname, (char __user *)kgf, koptlen); | 683 | err = getsockopt(sock, level, optname, (char __user *)kgf, koptlen); |
@@ -714,21 +710,22 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname, | |||
714 | copylen = numsrc * sizeof(gf32->gf_slist[0]); | 710 | copylen = numsrc * sizeof(gf32->gf_slist[0]); |
715 | if (copylen > klen) | 711 | if (copylen > klen) |
716 | copylen = klen; | 712 | copylen = klen; |
717 | if (copy_in_user(gf32->gf_slist, kgf->gf_slist, copylen)) | 713 | if (copy_in_user(gf32->gf_slist, kgf->gf_slist, copylen)) |
718 | return -EFAULT; | 714 | return -EFAULT; |
719 | } | 715 | } |
720 | return err; | 716 | return err; |
721 | } | 717 | } |
722 | |||
723 | EXPORT_SYMBOL(compat_mc_getsockopt); | 718 | EXPORT_SYMBOL(compat_mc_getsockopt); |
724 | 719 | ||
725 | 720 | ||
726 | /* Argument list sizes for compat_sys_socketcall */ | 721 | /* Argument list sizes for compat_sys_socketcall */ |
727 | #define AL(x) ((x) * sizeof(u32)) | 722 | #define AL(x) ((x) * sizeof(u32)) |
728 | static unsigned char nas[20]={AL(0),AL(3),AL(3),AL(3),AL(2),AL(3), | 723 | static unsigned char nas[20] = { |
729 | AL(3),AL(3),AL(4),AL(4),AL(4),AL(6), | 724 | AL(0), AL(3), AL(3), AL(3), AL(2), AL(3), |
730 | AL(6),AL(2),AL(5),AL(5),AL(3),AL(3), | 725 | AL(3), AL(3), AL(4), AL(4), AL(4), AL(6), |
731 | AL(4),AL(5)}; | 726 | AL(6), AL(2), AL(5), AL(5), AL(3), AL(3), |
727 | AL(4), AL(5) | ||
728 | }; | ||
732 | #undef AL | 729 | #undef AL |
733 | 730 | ||
734 | asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags) | 731 | asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags) |
@@ -827,7 +824,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args) | |||
827 | compat_ptr(a[4]), compat_ptr(a[5])); | 824 | compat_ptr(a[4]), compat_ptr(a[5])); |
828 | break; | 825 | break; |
829 | case SYS_SHUTDOWN: | 826 | case SYS_SHUTDOWN: |
830 | ret = sys_shutdown(a0,a1); | 827 | ret = sys_shutdown(a0, a1); |
831 | break; | 828 | break; |
832 | case SYS_SETSOCKOPT: | 829 | case SYS_SETSOCKOPT: |
833 | ret = compat_sys_setsockopt(a0, a1, a[2], | 830 | ret = compat_sys_setsockopt(a0, a1, a[2], |
diff --git a/net/core/dev.c b/net/core/dev.c index 983a3c1d65c4..a1abc10db08a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -803,35 +803,31 @@ struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) | |||
803 | EXPORT_SYMBOL(dev_getfirstbyhwtype); | 803 | EXPORT_SYMBOL(dev_getfirstbyhwtype); |
804 | 804 | ||
805 | /** | 805 | /** |
806 | * dev_get_by_flags - find any device with given flags | 806 | * dev_get_by_flags_rcu - find any device with given flags |
807 | * @net: the applicable net namespace | 807 | * @net: the applicable net namespace |
808 | * @if_flags: IFF_* values | 808 | * @if_flags: IFF_* values |
809 | * @mask: bitmask of bits in if_flags to check | 809 | * @mask: bitmask of bits in if_flags to check |
810 | * | 810 | * |
811 | * Search for any interface with the given flags. Returns NULL if a device | 811 | * Search for any interface with the given flags. Returns NULL if a device |
812 | * is not found or a pointer to the device. The device returned has | 812 | * is not found or a pointer to the device. Must be called inside |
813 | * had a reference added and the pointer is safe until the user calls | 813 | * rcu_read_lock(), and result refcount is unchanged. |
814 | * dev_put to indicate they have finished with it. | ||
815 | */ | 814 | */ |
816 | 815 | ||
817 | struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags, | 816 | struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags, |
818 | unsigned short mask) | 817 | unsigned short mask) |
819 | { | 818 | { |
820 | struct net_device *dev, *ret; | 819 | struct net_device *dev, *ret; |
821 | 820 | ||
822 | ret = NULL; | 821 | ret = NULL; |
823 | rcu_read_lock(); | ||
824 | for_each_netdev_rcu(net, dev) { | 822 | for_each_netdev_rcu(net, dev) { |
825 | if (((dev->flags ^ if_flags) & mask) == 0) { | 823 | if (((dev->flags ^ if_flags) & mask) == 0) { |
826 | dev_hold(dev); | ||
827 | ret = dev; | 824 | ret = dev; |
828 | break; | 825 | break; |
829 | } | 826 | } |
830 | } | 827 | } |
831 | rcu_read_unlock(); | ||
832 | return ret; | 828 | return ret; |
833 | } | 829 | } |
834 | EXPORT_SYMBOL(dev_get_by_flags); | 830 | EXPORT_SYMBOL(dev_get_by_flags_rcu); |
835 | 831 | ||
836 | /** | 832 | /** |
837 | * dev_valid_name - check if name is okay for network device | 833 | * dev_valid_name - check if name is okay for network device |
@@ -2040,14 +2036,24 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, | |||
2040 | struct netdev_queue *txq) | 2036 | struct netdev_queue *txq) |
2041 | { | 2037 | { |
2042 | spinlock_t *root_lock = qdisc_lock(q); | 2038 | spinlock_t *root_lock = qdisc_lock(q); |
2039 | bool contended = qdisc_is_running(q); | ||
2043 | int rc; | 2040 | int rc; |
2044 | 2041 | ||
2042 | /* | ||
2043 | * Heuristic to force contended enqueues to serialize on a | ||
2044 | * separate lock before trying to get qdisc main lock. | ||
2045 | * This permits __QDISC_STATE_RUNNING owner to get the lock more often | ||
2046 | * and dequeue packets faster. | ||
2047 | */ | ||
2048 | if (unlikely(contended)) | ||
2049 | spin_lock(&q->busylock); | ||
2050 | |||
2045 | spin_lock(root_lock); | 2051 | spin_lock(root_lock); |
2046 | if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { | 2052 | if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { |
2047 | kfree_skb(skb); | 2053 | kfree_skb(skb); |
2048 | rc = NET_XMIT_DROP; | 2054 | rc = NET_XMIT_DROP; |
2049 | } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && | 2055 | } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && |
2050 | !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) { | 2056 | qdisc_run_begin(q)) { |
2051 | /* | 2057 | /* |
2052 | * This is a work-conserving queue; there are no old skbs | 2058 | * This is a work-conserving queue; there are no old skbs |
2053 | * waiting to be sent out; and the qdisc is not running - | 2059 | * waiting to be sent out; and the qdisc is not running - |
@@ -2056,19 +2062,30 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, | |||
2056 | if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) | 2062 | if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) |
2057 | skb_dst_force(skb); | 2063 | skb_dst_force(skb); |
2058 | __qdisc_update_bstats(q, skb->len); | 2064 | __qdisc_update_bstats(q, skb->len); |
2059 | if (sch_direct_xmit(skb, q, dev, txq, root_lock)) | 2065 | if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { |
2066 | if (unlikely(contended)) { | ||
2067 | spin_unlock(&q->busylock); | ||
2068 | contended = false; | ||
2069 | } | ||
2060 | __qdisc_run(q); | 2070 | __qdisc_run(q); |
2061 | else | 2071 | } else |
2062 | clear_bit(__QDISC_STATE_RUNNING, &q->state); | 2072 | qdisc_run_end(q); |
2063 | 2073 | ||
2064 | rc = NET_XMIT_SUCCESS; | 2074 | rc = NET_XMIT_SUCCESS; |
2065 | } else { | 2075 | } else { |
2066 | skb_dst_force(skb); | 2076 | skb_dst_force(skb); |
2067 | rc = qdisc_enqueue_root(skb, q); | 2077 | rc = qdisc_enqueue_root(skb, q); |
2068 | qdisc_run(q); | 2078 | if (qdisc_run_begin(q)) { |
2079 | if (unlikely(contended)) { | ||
2080 | spin_unlock(&q->busylock); | ||
2081 | contended = false; | ||
2082 | } | ||
2083 | __qdisc_run(q); | ||
2084 | } | ||
2069 | } | 2085 | } |
2070 | spin_unlock(root_lock); | 2086 | spin_unlock(root_lock); |
2071 | 2087 | if (unlikely(contended)) | |
2088 | spin_unlock(&q->busylock); | ||
2072 | return rc; | 2089 | return rc; |
2073 | } | 2090 | } |
2074 | 2091 | ||
@@ -2082,9 +2099,10 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, | |||
2082 | static inline int skb_needs_linearize(struct sk_buff *skb, | 2099 | static inline int skb_needs_linearize(struct sk_buff *skb, |
2083 | struct net_device *dev) | 2100 | struct net_device *dev) |
2084 | { | 2101 | { |
2085 | return (skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) || | 2102 | return skb_is_nonlinear(skb) && |
2086 | (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) || | 2103 | ((skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) || |
2087 | illegal_highdma(dev, skb))); | 2104 | (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) || |
2105 | illegal_highdma(dev, skb)))); | ||
2088 | } | 2106 | } |
2089 | 2107 | ||
2090 | /** | 2108 | /** |
@@ -2255,11 +2273,9 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, | |||
2255 | if (skb_rx_queue_recorded(skb)) { | 2273 | if (skb_rx_queue_recorded(skb)) { |
2256 | u16 index = skb_get_rx_queue(skb); | 2274 | u16 index = skb_get_rx_queue(skb); |
2257 | if (unlikely(index >= dev->num_rx_queues)) { | 2275 | if (unlikely(index >= dev->num_rx_queues)) { |
2258 | if (net_ratelimit()) { | 2276 | WARN_ONCE(dev->num_rx_queues > 1, "%s received packet " |
2259 | pr_warning("%s received packet on queue " | 2277 | "on queue %u, but number of RX queues is %u\n", |
2260 | "%u, but number of RX queues is %u\n", | 2278 | dev->name, index, dev->num_rx_queues); |
2261 | dev->name, index, dev->num_rx_queues); | ||
2262 | } | ||
2263 | goto done; | 2279 | goto done; |
2264 | } | 2280 | } |
2265 | rxqueue = dev->_rx + index; | 2281 | rxqueue = dev->_rx + index; |
@@ -2583,70 +2599,14 @@ static inline int deliver_skb(struct sk_buff *skb, | |||
2583 | return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); | 2599 | return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); |
2584 | } | 2600 | } |
2585 | 2601 | ||
2586 | #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE) | 2602 | #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \ |
2587 | 2603 | (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)) | |
2588 | #if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE) | ||
2589 | /* This hook is defined here for ATM LANE */ | 2604 | /* This hook is defined here for ATM LANE */ |
2590 | int (*br_fdb_test_addr_hook)(struct net_device *dev, | 2605 | int (*br_fdb_test_addr_hook)(struct net_device *dev, |
2591 | unsigned char *addr) __read_mostly; | 2606 | unsigned char *addr) __read_mostly; |
2592 | EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); | 2607 | EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); |
2593 | #endif | 2608 | #endif |
2594 | 2609 | ||
2595 | /* | ||
2596 | * If bridge module is loaded call bridging hook. | ||
2597 | * returns NULL if packet was consumed. | ||
2598 | */ | ||
2599 | struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p, | ||
2600 | struct sk_buff *skb) __read_mostly; | ||
2601 | EXPORT_SYMBOL_GPL(br_handle_frame_hook); | ||
2602 | |||
2603 | static inline struct sk_buff *handle_bridge(struct sk_buff *skb, | ||
2604 | struct packet_type **pt_prev, int *ret, | ||
2605 | struct net_device *orig_dev) | ||
2606 | { | ||
2607 | struct net_bridge_port *port; | ||
2608 | |||
2609 | if (skb->pkt_type == PACKET_LOOPBACK || | ||
2610 | (port = rcu_dereference(skb->dev->br_port)) == NULL) | ||
2611 | return skb; | ||
2612 | |||
2613 | if (*pt_prev) { | ||
2614 | *ret = deliver_skb(skb, *pt_prev, orig_dev); | ||
2615 | *pt_prev = NULL; | ||
2616 | } | ||
2617 | |||
2618 | return br_handle_frame_hook(port, skb); | ||
2619 | } | ||
2620 | #else | ||
2621 | #define handle_bridge(skb, pt_prev, ret, orig_dev) (skb) | ||
2622 | #endif | ||
2623 | |||
2624 | #if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE) | ||
2625 | struct sk_buff *(*macvlan_handle_frame_hook)(struct macvlan_port *p, | ||
2626 | struct sk_buff *skb) __read_mostly; | ||
2627 | EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook); | ||
2628 | |||
2629 | static inline struct sk_buff *handle_macvlan(struct sk_buff *skb, | ||
2630 | struct packet_type **pt_prev, | ||
2631 | int *ret, | ||
2632 | struct net_device *orig_dev) | ||
2633 | { | ||
2634 | struct macvlan_port *port; | ||
2635 | |||
2636 | port = rcu_dereference(skb->dev->macvlan_port); | ||
2637 | if (!port) | ||
2638 | return skb; | ||
2639 | |||
2640 | if (*pt_prev) { | ||
2641 | *ret = deliver_skb(skb, *pt_prev, orig_dev); | ||
2642 | *pt_prev = NULL; | ||
2643 | } | ||
2644 | return macvlan_handle_frame_hook(port, skb); | ||
2645 | } | ||
2646 | #else | ||
2647 | #define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb) | ||
2648 | #endif | ||
2649 | |||
2650 | #ifdef CONFIG_NET_CLS_ACT | 2610 | #ifdef CONFIG_NET_CLS_ACT |
2651 | /* TODO: Maybe we should just force sch_ingress to be compiled in | 2611 | /* TODO: Maybe we should just force sch_ingress to be compiled in |
2652 | * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions | 2612 | * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions |
@@ -2697,9 +2657,6 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb, | |||
2697 | if (*pt_prev) { | 2657 | if (*pt_prev) { |
2698 | *ret = deliver_skb(skb, *pt_prev, orig_dev); | 2658 | *ret = deliver_skb(skb, *pt_prev, orig_dev); |
2699 | *pt_prev = NULL; | 2659 | *pt_prev = NULL; |
2700 | } else { | ||
2701 | /* Huh? Why does turning on AF_PACKET affect this? */ | ||
2702 | skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd); | ||
2703 | } | 2660 | } |
2704 | 2661 | ||
2705 | switch (ing_filter(skb)) { | 2662 | switch (ing_filter(skb)) { |
@@ -2742,6 +2699,47 @@ void netif_nit_deliver(struct sk_buff *skb) | |||
2742 | rcu_read_unlock(); | 2699 | rcu_read_unlock(); |
2743 | } | 2700 | } |
2744 | 2701 | ||
2702 | /** | ||
2703 | * netdev_rx_handler_register - register receive handler | ||
2704 | * @dev: device to register a handler for | ||
2705 | * @rx_handler: receive handler to register | ||
2706 | * | ||
2707 | * Register a receive hander for a device. This handler will then be | ||
2708 | * called from __netif_receive_skb. A negative errno code is returned | ||
2709 | * on a failure. | ||
2710 | * | ||
2711 | * The caller must hold the rtnl_mutex. | ||
2712 | */ | ||
2713 | int netdev_rx_handler_register(struct net_device *dev, | ||
2714 | rx_handler_func_t *rx_handler) | ||
2715 | { | ||
2716 | ASSERT_RTNL(); | ||
2717 | |||
2718 | if (dev->rx_handler) | ||
2719 | return -EBUSY; | ||
2720 | |||
2721 | rcu_assign_pointer(dev->rx_handler, rx_handler); | ||
2722 | |||
2723 | return 0; | ||
2724 | } | ||
2725 | EXPORT_SYMBOL_GPL(netdev_rx_handler_register); | ||
2726 | |||
2727 | /** | ||
2728 | * netdev_rx_handler_unregister - unregister receive handler | ||
2729 | * @dev: device to unregister a handler from | ||
2730 | * | ||
2731 | * Unregister a receive hander from a device. | ||
2732 | * | ||
2733 | * The caller must hold the rtnl_mutex. | ||
2734 | */ | ||
2735 | void netdev_rx_handler_unregister(struct net_device *dev) | ||
2736 | { | ||
2737 | |||
2738 | ASSERT_RTNL(); | ||
2739 | rcu_assign_pointer(dev->rx_handler, NULL); | ||
2740 | } | ||
2741 | EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); | ||
2742 | |||
2745 | static inline void skb_bond_set_mac_by_master(struct sk_buff *skb, | 2743 | static inline void skb_bond_set_mac_by_master(struct sk_buff *skb, |
2746 | struct net_device *master) | 2744 | struct net_device *master) |
2747 | { | 2745 | { |
@@ -2794,10 +2792,11 @@ EXPORT_SYMBOL(__skb_bond_should_drop); | |||
2794 | static int __netif_receive_skb(struct sk_buff *skb) | 2792 | static int __netif_receive_skb(struct sk_buff *skb) |
2795 | { | 2793 | { |
2796 | struct packet_type *ptype, *pt_prev; | 2794 | struct packet_type *ptype, *pt_prev; |
2795 | rx_handler_func_t *rx_handler; | ||
2797 | struct net_device *orig_dev; | 2796 | struct net_device *orig_dev; |
2798 | struct net_device *master; | 2797 | struct net_device *master; |
2799 | struct net_device *null_or_orig; | 2798 | struct net_device *null_or_orig; |
2800 | struct net_device *null_or_bond; | 2799 | struct net_device *orig_or_bond; |
2801 | int ret = NET_RX_DROP; | 2800 | int ret = NET_RX_DROP; |
2802 | __be16 type; | 2801 | __be16 type; |
2803 | 2802 | ||
@@ -2814,13 +2813,24 @@ static int __netif_receive_skb(struct sk_buff *skb) | |||
2814 | if (!skb->skb_iif) | 2813 | if (!skb->skb_iif) |
2815 | skb->skb_iif = skb->dev->ifindex; | 2814 | skb->skb_iif = skb->dev->ifindex; |
2816 | 2815 | ||
2816 | /* | ||
2817 | * bonding note: skbs received on inactive slaves should only | ||
2818 | * be delivered to pkt handlers that are exact matches. Also | ||
2819 | * the deliver_no_wcard flag will be set. If packet handlers | ||
2820 | * are sensitive to duplicate packets these skbs will need to | ||
2821 | * be dropped at the handler. The vlan accel path may have | ||
2822 | * already set the deliver_no_wcard flag. | ||
2823 | */ | ||
2817 | null_or_orig = NULL; | 2824 | null_or_orig = NULL; |
2818 | orig_dev = skb->dev; | 2825 | orig_dev = skb->dev; |
2819 | master = ACCESS_ONCE(orig_dev->master); | 2826 | master = ACCESS_ONCE(orig_dev->master); |
2820 | if (master) { | 2827 | if (skb->deliver_no_wcard) |
2821 | if (skb_bond_should_drop(skb, master)) | 2828 | null_or_orig = orig_dev; |
2829 | else if (master) { | ||
2830 | if (skb_bond_should_drop(skb, master)) { | ||
2831 | skb->deliver_no_wcard = 1; | ||
2822 | null_or_orig = orig_dev; /* deliver only exact match */ | 2832 | null_or_orig = orig_dev; /* deliver only exact match */ |
2823 | else | 2833 | } else |
2824 | skb->dev = master; | 2834 | skb->dev = master; |
2825 | } | 2835 | } |
2826 | 2836 | ||
@@ -2856,12 +2866,17 @@ static int __netif_receive_skb(struct sk_buff *skb) | |||
2856 | ncls: | 2866 | ncls: |
2857 | #endif | 2867 | #endif |
2858 | 2868 | ||
2859 | skb = handle_bridge(skb, &pt_prev, &ret, orig_dev); | 2869 | /* Handle special case of bridge or macvlan */ |
2860 | if (!skb) | 2870 | rx_handler = rcu_dereference(skb->dev->rx_handler); |
2861 | goto out; | 2871 | if (rx_handler) { |
2862 | skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev); | 2872 | if (pt_prev) { |
2863 | if (!skb) | 2873 | ret = deliver_skb(skb, pt_prev, orig_dev); |
2864 | goto out; | 2874 | pt_prev = NULL; |
2875 | } | ||
2876 | skb = rx_handler(skb); | ||
2877 | if (!skb) | ||
2878 | goto out; | ||
2879 | } | ||
2865 | 2880 | ||
2866 | /* | 2881 | /* |
2867 | * Make sure frames received on VLAN interfaces stacked on | 2882 | * Make sure frames received on VLAN interfaces stacked on |
@@ -2869,10 +2884,10 @@ ncls: | |||
2869 | * device that may have registered for a specific ptype. The | 2884 | * device that may have registered for a specific ptype. The |
2870 | * handler may have to adjust skb->dev and orig_dev. | 2885 | * handler may have to adjust skb->dev and orig_dev. |
2871 | */ | 2886 | */ |
2872 | null_or_bond = NULL; | 2887 | orig_or_bond = orig_dev; |
2873 | if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) && | 2888 | if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) && |
2874 | (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) { | 2889 | (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) { |
2875 | null_or_bond = vlan_dev_real_dev(skb->dev); | 2890 | orig_or_bond = vlan_dev_real_dev(skb->dev); |
2876 | } | 2891 | } |
2877 | 2892 | ||
2878 | type = skb->protocol; | 2893 | type = skb->protocol; |
@@ -2880,7 +2895,7 @@ ncls: | |||
2880 | &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { | 2895 | &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { |
2881 | if (ptype->type == type && (ptype->dev == null_or_orig || | 2896 | if (ptype->type == type && (ptype->dev == null_or_orig || |
2882 | ptype->dev == skb->dev || ptype->dev == orig_dev || | 2897 | ptype->dev == skb->dev || ptype->dev == orig_dev || |
2883 | ptype->dev == null_or_bond)) { | 2898 | ptype->dev == orig_or_bond)) { |
2884 | if (pt_prev) | 2899 | if (pt_prev) |
2885 | ret = deliver_skb(skb, pt_prev, orig_dev); | 2900 | ret = deliver_skb(skb, pt_prev, orig_dev); |
2886 | pt_prev = ptype; | 2901 | pt_prev = ptype; |
@@ -3686,10 +3701,10 @@ void dev_seq_stop(struct seq_file *seq, void *v) | |||
3686 | 3701 | ||
3687 | static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev) | 3702 | static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev) |
3688 | { | 3703 | { |
3689 | const struct net_device_stats *stats = dev_get_stats(dev); | 3704 | const struct rtnl_link_stats64 *stats = dev_get_stats(dev); |
3690 | 3705 | ||
3691 | seq_printf(seq, "%6s: %7lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu " | 3706 | seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu " |
3692 | "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n", | 3707 | "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n", |
3693 | dev->name, stats->rx_bytes, stats->rx_packets, | 3708 | dev->name, stats->rx_bytes, stats->rx_packets, |
3694 | stats->rx_errors, | 3709 | stats->rx_errors, |
3695 | stats->rx_dropped + stats->rx_missed_errors, | 3710 | stats->rx_dropped + stats->rx_missed_errors, |
@@ -5266,18 +5281,21 @@ EXPORT_SYMBOL(dev_txq_stats_fold); | |||
5266 | * @dev: device to get statistics from | 5281 | * @dev: device to get statistics from |
5267 | * | 5282 | * |
5268 | * Get network statistics from device. The device driver may provide | 5283 | * Get network statistics from device. The device driver may provide |
5269 | * its own method by setting dev->netdev_ops->get_stats; otherwise | 5284 | * its own method by setting dev->netdev_ops->get_stats64 or |
5270 | * the internal statistics structure is used. | 5285 | * dev->netdev_ops->get_stats; otherwise the internal statistics |
5286 | * structure is used. | ||
5271 | */ | 5287 | */ |
5272 | const struct net_device_stats *dev_get_stats(struct net_device *dev) | 5288 | const struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev) |
5273 | { | 5289 | { |
5274 | const struct net_device_ops *ops = dev->netdev_ops; | 5290 | const struct net_device_ops *ops = dev->netdev_ops; |
5275 | 5291 | ||
5292 | if (ops->ndo_get_stats64) | ||
5293 | return ops->ndo_get_stats64(dev); | ||
5276 | if (ops->ndo_get_stats) | 5294 | if (ops->ndo_get_stats) |
5277 | return ops->ndo_get_stats(dev); | 5295 | return (struct rtnl_link_stats64 *)ops->ndo_get_stats(dev); |
5278 | 5296 | ||
5279 | dev_txq_stats_fold(dev, &dev->stats); | 5297 | dev_txq_stats_fold(dev, &dev->stats); |
5280 | return &dev->stats; | 5298 | return &dev->stats64; |
5281 | } | 5299 | } |
5282 | EXPORT_SYMBOL(dev_get_stats); | 5300 | EXPORT_SYMBOL(dev_get_stats); |
5283 | 5301 | ||
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index cf8e70392fe0..9fbe7f7429b0 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c | |||
@@ -107,6 +107,7 @@ static DEFINE_RWLOCK(est_lock); | |||
107 | 107 | ||
108 | /* Protects against soft lockup during large deletion */ | 108 | /* Protects against soft lockup during large deletion */ |
109 | static struct rb_root est_root = RB_ROOT; | 109 | static struct rb_root est_root = RB_ROOT; |
110 | static DEFINE_SPINLOCK(est_tree_lock); | ||
110 | 111 | ||
111 | static void est_timer(unsigned long arg) | 112 | static void est_timer(unsigned long arg) |
112 | { | 113 | { |
@@ -201,7 +202,6 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats | |||
201 | * | 202 | * |
202 | * Returns 0 on success or a negative error code. | 203 | * Returns 0 on success or a negative error code. |
203 | * | 204 | * |
204 | * NOTE: Called under rtnl_mutex | ||
205 | */ | 205 | */ |
206 | int gen_new_estimator(struct gnet_stats_basic_packed *bstats, | 206 | int gen_new_estimator(struct gnet_stats_basic_packed *bstats, |
207 | struct gnet_stats_rate_est *rate_est, | 207 | struct gnet_stats_rate_est *rate_est, |
@@ -232,6 +232,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, | |||
232 | est->last_packets = bstats->packets; | 232 | est->last_packets = bstats->packets; |
233 | est->avpps = rate_est->pps<<10; | 233 | est->avpps = rate_est->pps<<10; |
234 | 234 | ||
235 | spin_lock(&est_tree_lock); | ||
235 | if (!elist[idx].timer.function) { | 236 | if (!elist[idx].timer.function) { |
236 | INIT_LIST_HEAD(&elist[idx].list); | 237 | INIT_LIST_HEAD(&elist[idx].list); |
237 | setup_timer(&elist[idx].timer, est_timer, idx); | 238 | setup_timer(&elist[idx].timer, est_timer, idx); |
@@ -242,6 +243,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, | |||
242 | 243 | ||
243 | list_add_rcu(&est->list, &elist[idx].list); | 244 | list_add_rcu(&est->list, &elist[idx].list); |
244 | gen_add_node(est); | 245 | gen_add_node(est); |
246 | spin_unlock(&est_tree_lock); | ||
245 | 247 | ||
246 | return 0; | 248 | return 0; |
247 | } | 249 | } |
@@ -261,13 +263,14 @@ static void __gen_kill_estimator(struct rcu_head *head) | |||
261 | * | 263 | * |
262 | * Removes the rate estimator specified by &bstats and &rate_est. | 264 | * Removes the rate estimator specified by &bstats and &rate_est. |
263 | * | 265 | * |
264 | * NOTE: Called under rtnl_mutex | 266 | * Note : Caller should respect an RCU grace period before freeing stats_lock |
265 | */ | 267 | */ |
266 | void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, | 268 | void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, |
267 | struct gnet_stats_rate_est *rate_est) | 269 | struct gnet_stats_rate_est *rate_est) |
268 | { | 270 | { |
269 | struct gen_estimator *e; | 271 | struct gen_estimator *e; |
270 | 272 | ||
273 | spin_lock(&est_tree_lock); | ||
271 | while ((e = gen_find_node(bstats, rate_est))) { | 274 | while ((e = gen_find_node(bstats, rate_est))) { |
272 | rb_erase(&e->node, &est_root); | 275 | rb_erase(&e->node, &est_root); |
273 | 276 | ||
@@ -278,6 +281,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, | |||
278 | list_del_rcu(&e->list); | 281 | list_del_rcu(&e->list); |
279 | call_rcu(&e->e_rcu, __gen_kill_estimator); | 282 | call_rcu(&e->e_rcu, __gen_kill_estimator); |
280 | } | 283 | } |
284 | spin_unlock(&est_tree_lock); | ||
281 | } | 285 | } |
282 | EXPORT_SYMBOL(gen_kill_estimator); | 286 | EXPORT_SYMBOL(gen_kill_estimator); |
283 | 287 | ||
@@ -312,8 +316,14 @@ EXPORT_SYMBOL(gen_replace_estimator); | |||
312 | bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, | 316 | bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, |
313 | const struct gnet_stats_rate_est *rate_est) | 317 | const struct gnet_stats_rate_est *rate_est) |
314 | { | 318 | { |
319 | bool res; | ||
320 | |||
315 | ASSERT_RTNL(); | 321 | ASSERT_RTNL(); |
316 | 322 | ||
317 | return gen_find_node(bstats, rate_est) != NULL; | 323 | spin_lock(&est_tree_lock); |
324 | res = gen_find_node(bstats, rate_est) != NULL; | ||
325 | spin_unlock(&est_tree_lock); | ||
326 | |||
327 | return res; | ||
318 | } | 328 | } |
319 | EXPORT_SYMBOL(gen_estimator_active); | 329 | EXPORT_SYMBOL(gen_estimator_active); |
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 99e7052d7323..ea3bb4c3b87d 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
@@ -29,6 +29,7 @@ static const char fmt_hex[] = "%#x\n"; | |||
29 | static const char fmt_long_hex[] = "%#lx\n"; | 29 | static const char fmt_long_hex[] = "%#lx\n"; |
30 | static const char fmt_dec[] = "%d\n"; | 30 | static const char fmt_dec[] = "%d\n"; |
31 | static const char fmt_ulong[] = "%lu\n"; | 31 | static const char fmt_ulong[] = "%lu\n"; |
32 | static const char fmt_u64[] = "%llu\n"; | ||
32 | 33 | ||
33 | static inline int dev_isalive(const struct net_device *dev) | 34 | static inline int dev_isalive(const struct net_device *dev) |
34 | { | 35 | { |
@@ -324,14 +325,13 @@ static ssize_t netstat_show(const struct device *d, | |||
324 | struct net_device *dev = to_net_dev(d); | 325 | struct net_device *dev = to_net_dev(d); |
325 | ssize_t ret = -EINVAL; | 326 | ssize_t ret = -EINVAL; |
326 | 327 | ||
327 | WARN_ON(offset > sizeof(struct net_device_stats) || | 328 | WARN_ON(offset > sizeof(struct rtnl_link_stats64) || |
328 | offset % sizeof(unsigned long) != 0); | 329 | offset % sizeof(u64) != 0); |
329 | 330 | ||
330 | read_lock(&dev_base_lock); | 331 | read_lock(&dev_base_lock); |
331 | if (dev_isalive(dev)) { | 332 | if (dev_isalive(dev)) { |
332 | const struct net_device_stats *stats = dev_get_stats(dev); | 333 | const struct rtnl_link_stats64 *stats = dev_get_stats(dev); |
333 | ret = sprintf(buf, fmt_ulong, | 334 | ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *) stats) + offset)); |
334 | *(unsigned long *)(((u8 *) stats) + offset)); | ||
335 | } | 335 | } |
336 | read_unlock(&dev_base_lock); | 336 | read_unlock(&dev_base_lock); |
337 | return ret; | 337 | return ret; |
@@ -343,7 +343,7 @@ static ssize_t show_##name(struct device *d, \ | |||
343 | struct device_attribute *attr, char *buf) \ | 343 | struct device_attribute *attr, char *buf) \ |
344 | { \ | 344 | { \ |
345 | return netstat_show(d, attr, buf, \ | 345 | return netstat_show(d, attr, buf, \ |
346 | offsetof(struct net_device_stats, name)); \ | 346 | offsetof(struct rtnl_link_stats64, name)); \ |
347 | } \ | 347 | } \ |
348 | static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) | 348 | static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) |
349 | 349 | ||
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 2ad68da418df..6428653e9498 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -169,7 +169,7 @@ | |||
169 | #include <asm/dma.h> | 169 | #include <asm/dma.h> |
170 | #include <asm/div64.h> /* do_div */ | 170 | #include <asm/div64.h> /* do_div */ |
171 | 171 | ||
172 | #define VERSION "2.73" | 172 | #define VERSION "2.74" |
173 | #define IP_NAME_SZ 32 | 173 | #define IP_NAME_SZ 32 |
174 | #define MAX_MPLS_LABELS 16 /* This is the max label stack depth */ | 174 | #define MAX_MPLS_LABELS 16 /* This is the max label stack depth */ |
175 | #define MPLS_STACK_BOTTOM htonl(0x00000100) | 175 | #define MPLS_STACK_BOTTOM htonl(0x00000100) |
@@ -980,6 +980,40 @@ static ssize_t pktgen_if_write(struct file *file, | |||
980 | (unsigned long long) pkt_dev->delay); | 980 | (unsigned long long) pkt_dev->delay); |
981 | return count; | 981 | return count; |
982 | } | 982 | } |
983 | if (!strcmp(name, "rate")) { | ||
984 | len = num_arg(&user_buffer[i], 10, &value); | ||
985 | if (len < 0) | ||
986 | return len; | ||
987 | |||
988 | i += len; | ||
989 | if (!value) | ||
990 | return len; | ||
991 | pkt_dev->delay = pkt_dev->min_pkt_size*8*NSEC_PER_USEC/value; | ||
992 | if (debug) | ||
993 | printk(KERN_INFO | ||
994 | "pktgen: Delay set at: %llu ns\n", | ||
995 | pkt_dev->delay); | ||
996 | |||
997 | sprintf(pg_result, "OK: rate=%lu", value); | ||
998 | return count; | ||
999 | } | ||
1000 | if (!strcmp(name, "ratep")) { | ||
1001 | len = num_arg(&user_buffer[i], 10, &value); | ||
1002 | if (len < 0) | ||
1003 | return len; | ||
1004 | |||
1005 | i += len; | ||
1006 | if (!value) | ||
1007 | return len; | ||
1008 | pkt_dev->delay = NSEC_PER_SEC/value; | ||
1009 | if (debug) | ||
1010 | printk(KERN_INFO | ||
1011 | "pktgen: Delay set at: %llu ns\n", | ||
1012 | pkt_dev->delay); | ||
1013 | |||
1014 | sprintf(pg_result, "OK: rate=%lu", value); | ||
1015 | return count; | ||
1016 | } | ||
983 | if (!strcmp(name, "udp_src_min")) { | 1017 | if (!strcmp(name, "udp_src_min")) { |
984 | len = num_arg(&user_buffer[i], 10, &value); | 1018 | len = num_arg(&user_buffer[i], 10, &value); |
985 | if (len < 0) | 1019 | if (len < 0) |
@@ -2142,15 +2176,15 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) | |||
2142 | hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | 2176 | hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
2143 | hrtimer_set_expires(&t.timer, spin_until); | 2177 | hrtimer_set_expires(&t.timer, spin_until); |
2144 | 2178 | ||
2145 | remaining = ktime_to_us(hrtimer_expires_remaining(&t.timer)); | 2179 | remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer)); |
2146 | if (remaining <= 0) { | 2180 | if (remaining <= 0) { |
2147 | pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay); | 2181 | pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay); |
2148 | return; | 2182 | return; |
2149 | } | 2183 | } |
2150 | 2184 | ||
2151 | start_time = ktime_now(); | 2185 | start_time = ktime_now(); |
2152 | if (remaining < 100) | 2186 | if (remaining < 100000) |
2153 | udelay(remaining); /* really small just spin */ | 2187 | ndelay(remaining); /* really small just spin */ |
2154 | else { | 2188 | else { |
2155 | /* see do_nanosleep */ | 2189 | /* see do_nanosleep */ |
2156 | hrtimer_init_sleeper(&t, current); | 2190 | hrtimer_init_sleeper(&t, current); |
@@ -2170,7 +2204,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) | |||
2170 | end_time = ktime_now(); | 2204 | end_time = ktime_now(); |
2171 | 2205 | ||
2172 | pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); | 2206 | pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); |
2173 | pkt_dev->next_tx = ktime_add_ns(end_time, pkt_dev->delay); | 2207 | pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay); |
2174 | } | 2208 | } |
2175 | 2209 | ||
2176 | static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) | 2210 | static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 1a2af24e9e3d..e645778e9b7e 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -579,7 +579,7 @@ static unsigned int rtnl_dev_combine_flags(const struct net_device *dev, | |||
579 | } | 579 | } |
580 | 580 | ||
581 | static void copy_rtnl_link_stats(struct rtnl_link_stats *a, | 581 | static void copy_rtnl_link_stats(struct rtnl_link_stats *a, |
582 | const struct net_device_stats *b) | 582 | const struct rtnl_link_stats64 *b) |
583 | { | 583 | { |
584 | a->rx_packets = b->rx_packets; | 584 | a->rx_packets = b->rx_packets; |
585 | a->tx_packets = b->tx_packets; | 585 | a->tx_packets = b->tx_packets; |
@@ -610,7 +610,7 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a, | |||
610 | a->tx_compressed = b->tx_compressed; | 610 | a->tx_compressed = b->tx_compressed; |
611 | } | 611 | } |
612 | 612 | ||
613 | static void copy_rtnl_link_stats64(void *v, const struct net_device_stats *b) | 613 | static void copy_rtnl_link_stats64(void *v, const struct rtnl_link_stats64 *b) |
614 | { | 614 | { |
615 | struct rtnl_link_stats64 a; | 615 | struct rtnl_link_stats64 a; |
616 | 616 | ||
@@ -791,7 +791,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, | |||
791 | { | 791 | { |
792 | struct ifinfomsg *ifm; | 792 | struct ifinfomsg *ifm; |
793 | struct nlmsghdr *nlh; | 793 | struct nlmsghdr *nlh; |
794 | const struct net_device_stats *stats; | 794 | const struct rtnl_link_stats64 *stats; |
795 | struct nlattr *attr; | 795 | struct nlattr *attr; |
796 | 796 | ||
797 | nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); | 797 | nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 4e7ac09c281a..34432b4e96bb 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -532,6 +532,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | |||
532 | new->ip_summed = old->ip_summed; | 532 | new->ip_summed = old->ip_summed; |
533 | skb_copy_queue_mapping(new, old); | 533 | skb_copy_queue_mapping(new, old); |
534 | new->priority = old->priority; | 534 | new->priority = old->priority; |
535 | new->deliver_no_wcard = old->deliver_no_wcard; | ||
535 | #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) | 536 | #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) |
536 | new->ipvs_property = old->ipvs_property; | 537 | new->ipvs_property = old->ipvs_property; |
537 | #endif | 538 | #endif |
@@ -569,7 +570,6 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) | |||
569 | C(len); | 570 | C(len); |
570 | C(data_len); | 571 | C(data_len); |
571 | C(mac_len); | 572 | C(mac_len); |
572 | C(rxhash); | ||
573 | n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; | 573 | n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; |
574 | n->cloned = 1; | 574 | n->cloned = 1; |
575 | n->nohdr = 0; | 575 | n->nohdr = 0; |
@@ -2965,6 +2965,34 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) | |||
2965 | } | 2965 | } |
2966 | EXPORT_SYMBOL_GPL(skb_cow_data); | 2966 | EXPORT_SYMBOL_GPL(skb_cow_data); |
2967 | 2967 | ||
2968 | static void sock_rmem_free(struct sk_buff *skb) | ||
2969 | { | ||
2970 | struct sock *sk = skb->sk; | ||
2971 | |||
2972 | atomic_sub(skb->truesize, &sk->sk_rmem_alloc); | ||
2973 | } | ||
2974 | |||
2975 | /* | ||
2976 | * Note: We dont mem charge error packets (no sk_forward_alloc changes) | ||
2977 | */ | ||
2978 | int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) | ||
2979 | { | ||
2980 | if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= | ||
2981 | (unsigned)sk->sk_rcvbuf) | ||
2982 | return -ENOMEM; | ||
2983 | |||
2984 | skb_orphan(skb); | ||
2985 | skb->sk = sk; | ||
2986 | skb->destructor = sock_rmem_free; | ||
2987 | atomic_add(skb->truesize, &sk->sk_rmem_alloc); | ||
2988 | |||
2989 | skb_queue_tail(&sk->sk_error_queue, skb); | ||
2990 | if (!sock_flag(sk, SOCK_DEAD)) | ||
2991 | sk->sk_data_ready(sk, skb->len); | ||
2992 | return 0; | ||
2993 | } | ||
2994 | EXPORT_SYMBOL(sock_queue_err_skb); | ||
2995 | |||
2968 | void skb_tstamp_tx(struct sk_buff *orig_skb, | 2996 | void skb_tstamp_tx(struct sk_buff *orig_skb, |
2969 | struct skb_shared_hwtstamps *hwtstamps) | 2997 | struct skb_shared_hwtstamps *hwtstamps) |
2970 | { | 2998 | { |
@@ -2997,9 +3025,7 @@ void skb_tstamp_tx(struct sk_buff *orig_skb, | |||
2997 | serr->ee.ee_errno = ENOMSG; | 3025 | serr->ee.ee_errno = ENOMSG; |
2998 | serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; | 3026 | serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; |
2999 | 3027 | ||
3000 | bh_lock_sock(sk); | ||
3001 | err = sock_queue_err_skb(sk, skb); | 3028 | err = sock_queue_err_skb(sk, skb); |
3002 | bh_unlock_sock(sk); | ||
3003 | 3029 | ||
3004 | if (err) | 3030 | if (err) |
3005 | kfree_skb(skb); | 3031 | kfree_skb(skb); |
diff --git a/net/core/sock.c b/net/core/sock.c index 2cf7f9f7e775..f9ce0db41cd6 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -156,7 +156,7 @@ static const char *const af_family_key_strings[AF_MAX+1] = { | |||
156 | "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" , | 156 | "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" , |
157 | "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , | 157 | "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , |
158 | "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , | 158 | "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , |
159 | "sk_lock-AF_IEEE802154", | 159 | "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , |
160 | "sk_lock-AF_MAX" | 160 | "sk_lock-AF_MAX" |
161 | }; | 161 | }; |
162 | static const char *const af_family_slock_key_strings[AF_MAX+1] = { | 162 | static const char *const af_family_slock_key_strings[AF_MAX+1] = { |
@@ -172,7 +172,7 @@ static const char *const af_family_slock_key_strings[AF_MAX+1] = { | |||
172 | "slock-27" , "slock-28" , "slock-AF_CAN" , | 172 | "slock-27" , "slock-28" , "slock-AF_CAN" , |
173 | "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , | 173 | "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , |
174 | "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , | 174 | "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , |
175 | "slock-AF_IEEE802154", | 175 | "slock-AF_IEEE802154", "slock-AF_CAIF" , |
176 | "slock-AF_MAX" | 176 | "slock-AF_MAX" |
177 | }; | 177 | }; |
178 | static const char *const af_family_clock_key_strings[AF_MAX+1] = { | 178 | static const char *const af_family_clock_key_strings[AF_MAX+1] = { |
@@ -188,7 +188,7 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = { | |||
188 | "clock-27" , "clock-28" , "clock-AF_CAN" , | 188 | "clock-27" , "clock-28" , "clock-AF_CAN" , |
189 | "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , | 189 | "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , |
190 | "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , | 190 | "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , |
191 | "clock-AF_IEEE802154", | 191 | "clock-AF_IEEE802154", "clock-AF_CAIF" , |
192 | "clock-AF_MAX" | 192 | "clock-AF_MAX" |
193 | }; | 193 | }; |
194 | 194 | ||
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index d9b11ef8694c..d4a166f0f391 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c | |||
@@ -105,7 +105,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
105 | goto failure; | 105 | goto failure; |
106 | 106 | ||
107 | /* OK, now commit destination to socket. */ | 107 | /* OK, now commit destination to socket. */ |
108 | sk_setup_caps(sk, &rt->u.dst); | 108 | sk_setup_caps(sk, &rt->dst); |
109 | 109 | ||
110 | dp->dccps_iss = secure_dccp_sequence_number(inet->inet_saddr, | 110 | dp->dccps_iss = secure_dccp_sequence_number(inet->inet_saddr, |
111 | inet->inet_daddr, | 111 | inet->inet_daddr, |
@@ -475,7 +475,7 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk, | |||
475 | return NULL; | 475 | return NULL; |
476 | } | 476 | } |
477 | 477 | ||
478 | return &rt->u.dst; | 478 | return &rt->dst; |
479 | } | 479 | } |
480 | 480 | ||
481 | static int dccp_v4_send_response(struct sock *sk, struct request_sock *req, | 481 | static int dccp_v4_send_response(struct sock *sk, struct request_sock *req, |
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 091698899594..6e3f32575df7 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
@@ -248,7 +248,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req, | |||
248 | struct ipv6_pinfo *np = inet6_sk(sk); | 248 | struct ipv6_pinfo *np = inet6_sk(sk); |
249 | struct sk_buff *skb; | 249 | struct sk_buff *skb; |
250 | struct ipv6_txoptions *opt = NULL; | 250 | struct ipv6_txoptions *opt = NULL; |
251 | struct in6_addr *final_p = NULL, final; | 251 | struct in6_addr *final_p, final; |
252 | struct flowi fl; | 252 | struct flowi fl; |
253 | int err = -1; | 253 | int err = -1; |
254 | struct dst_entry *dst; | 254 | struct dst_entry *dst; |
@@ -265,13 +265,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req, | |||
265 | 265 | ||
266 | opt = np->opt; | 266 | opt = np->opt; |
267 | 267 | ||
268 | if (opt != NULL && opt->srcrt != NULL) { | 268 | final_p = fl6_update_dst(&fl, opt, &final); |
269 | const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt; | ||
270 | |||
271 | ipv6_addr_copy(&final, &fl.fl6_dst); | ||
272 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); | ||
273 | final_p = &final; | ||
274 | } | ||
275 | 269 | ||
276 | err = ip6_dst_lookup(sk, &dst, &fl); | 270 | err = ip6_dst_lookup(sk, &dst, &fl); |
277 | if (err) | 271 | if (err) |
@@ -545,19 +539,13 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk, | |||
545 | goto out_overflow; | 539 | goto out_overflow; |
546 | 540 | ||
547 | if (dst == NULL) { | 541 | if (dst == NULL) { |
548 | struct in6_addr *final_p = NULL, final; | 542 | struct in6_addr *final_p, final; |
549 | struct flowi fl; | 543 | struct flowi fl; |
550 | 544 | ||
551 | memset(&fl, 0, sizeof(fl)); | 545 | memset(&fl, 0, sizeof(fl)); |
552 | fl.proto = IPPROTO_DCCP; | 546 | fl.proto = IPPROTO_DCCP; |
553 | ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr); | 547 | ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr); |
554 | if (opt != NULL && opt->srcrt != NULL) { | 548 | final_p = fl6_update_dst(&fl, opt, &final); |
555 | const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt; | ||
556 | |||
557 | ipv6_addr_copy(&final, &fl.fl6_dst); | ||
558 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); | ||
559 | final_p = &final; | ||
560 | } | ||
561 | ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr); | 549 | ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr); |
562 | fl.oif = sk->sk_bound_dev_if; | 550 | fl.oif = sk->sk_bound_dev_if; |
563 | fl.fl_ip_dport = inet_rsk(req)->rmt_port; | 551 | fl.fl_ip_dport = inet_rsk(req)->rmt_port; |
@@ -885,7 +873,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
885 | struct inet_sock *inet = inet_sk(sk); | 873 | struct inet_sock *inet = inet_sk(sk); |
886 | struct ipv6_pinfo *np = inet6_sk(sk); | 874 | struct ipv6_pinfo *np = inet6_sk(sk); |
887 | struct dccp_sock *dp = dccp_sk(sk); | 875 | struct dccp_sock *dp = dccp_sk(sk); |
888 | struct in6_addr *saddr = NULL, *final_p = NULL, final; | 876 | struct in6_addr *saddr = NULL, *final_p, final; |
889 | struct flowi fl; | 877 | struct flowi fl; |
890 | struct dst_entry *dst; | 878 | struct dst_entry *dst; |
891 | int addr_type; | 879 | int addr_type; |
@@ -988,13 +976,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
988 | fl.fl_ip_sport = inet->inet_sport; | 976 | fl.fl_ip_sport = inet->inet_sport; |
989 | security_sk_classify_flow(sk, &fl); | 977 | security_sk_classify_flow(sk, &fl); |
990 | 978 | ||
991 | if (np->opt != NULL && np->opt->srcrt != NULL) { | 979 | final_p = fl6_update_dst(&fl, np->opt, &final); |
992 | const struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt; | ||
993 | |||
994 | ipv6_addr_copy(&final, &fl.fl6_dst); | ||
995 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); | ||
996 | final_p = &final; | ||
997 | } | ||
998 | 980 | ||
999 | err = ip6_dst_lookup(sk, &dst, &fl); | 981 | err = ip6_dst_lookup(sk, &dst, &fl); |
1000 | if (err) | 982 | if (err) |
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index 812e6dff6067..6585ea6d1182 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c | |||
@@ -146,13 +146,13 @@ static __inline__ unsigned dn_hash(__le16 src, __le16 dst) | |||
146 | 146 | ||
147 | static inline void dnrt_free(struct dn_route *rt) | 147 | static inline void dnrt_free(struct dn_route *rt) |
148 | { | 148 | { |
149 | call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free); | 149 | call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); |
150 | } | 150 | } |
151 | 151 | ||
152 | static inline void dnrt_drop(struct dn_route *rt) | 152 | static inline void dnrt_drop(struct dn_route *rt) |
153 | { | 153 | { |
154 | dst_release(&rt->u.dst); | 154 | dst_release(&rt->dst); |
155 | call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free); | 155 | call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); |
156 | } | 156 | } |
157 | 157 | ||
158 | static void dn_dst_check_expire(unsigned long dummy) | 158 | static void dn_dst_check_expire(unsigned long dummy) |
@@ -167,13 +167,13 @@ static void dn_dst_check_expire(unsigned long dummy) | |||
167 | 167 | ||
168 | spin_lock(&dn_rt_hash_table[i].lock); | 168 | spin_lock(&dn_rt_hash_table[i].lock); |
169 | while((rt=*rtp) != NULL) { | 169 | while((rt=*rtp) != NULL) { |
170 | if (atomic_read(&rt->u.dst.__refcnt) || | 170 | if (atomic_read(&rt->dst.__refcnt) || |
171 | (now - rt->u.dst.lastuse) < expire) { | 171 | (now - rt->dst.lastuse) < expire) { |
172 | rtp = &rt->u.dst.dn_next; | 172 | rtp = &rt->dst.dn_next; |
173 | continue; | 173 | continue; |
174 | } | 174 | } |
175 | *rtp = rt->u.dst.dn_next; | 175 | *rtp = rt->dst.dn_next; |
176 | rt->u.dst.dn_next = NULL; | 176 | rt->dst.dn_next = NULL; |
177 | dnrt_free(rt); | 177 | dnrt_free(rt); |
178 | } | 178 | } |
179 | spin_unlock(&dn_rt_hash_table[i].lock); | 179 | spin_unlock(&dn_rt_hash_table[i].lock); |
@@ -198,13 +198,13 @@ static int dn_dst_gc(struct dst_ops *ops) | |||
198 | rtp = &dn_rt_hash_table[i].chain; | 198 | rtp = &dn_rt_hash_table[i].chain; |
199 | 199 | ||
200 | while((rt=*rtp) != NULL) { | 200 | while((rt=*rtp) != NULL) { |
201 | if (atomic_read(&rt->u.dst.__refcnt) || | 201 | if (atomic_read(&rt->dst.__refcnt) || |
202 | (now - rt->u.dst.lastuse) < expire) { | 202 | (now - rt->dst.lastuse) < expire) { |
203 | rtp = &rt->u.dst.dn_next; | 203 | rtp = &rt->dst.dn_next; |
204 | continue; | 204 | continue; |
205 | } | 205 | } |
206 | *rtp = rt->u.dst.dn_next; | 206 | *rtp = rt->dst.dn_next; |
207 | rt->u.dst.dn_next = NULL; | 207 | rt->dst.dn_next = NULL; |
208 | dnrt_drop(rt); | 208 | dnrt_drop(rt); |
209 | break; | 209 | break; |
210 | } | 210 | } |
@@ -287,25 +287,25 @@ static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route * | |||
287 | while((rth = *rthp) != NULL) { | 287 | while((rth = *rthp) != NULL) { |
288 | if (compare_keys(&rth->fl, &rt->fl)) { | 288 | if (compare_keys(&rth->fl, &rt->fl)) { |
289 | /* Put it first */ | 289 | /* Put it first */ |
290 | *rthp = rth->u.dst.dn_next; | 290 | *rthp = rth->dst.dn_next; |
291 | rcu_assign_pointer(rth->u.dst.dn_next, | 291 | rcu_assign_pointer(rth->dst.dn_next, |
292 | dn_rt_hash_table[hash].chain); | 292 | dn_rt_hash_table[hash].chain); |
293 | rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth); | 293 | rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth); |
294 | 294 | ||
295 | dst_use(&rth->u.dst, now); | 295 | dst_use(&rth->dst, now); |
296 | spin_unlock_bh(&dn_rt_hash_table[hash].lock); | 296 | spin_unlock_bh(&dn_rt_hash_table[hash].lock); |
297 | 297 | ||
298 | dnrt_drop(rt); | 298 | dnrt_drop(rt); |
299 | *rp = rth; | 299 | *rp = rth; |
300 | return 0; | 300 | return 0; |
301 | } | 301 | } |
302 | rthp = &rth->u.dst.dn_next; | 302 | rthp = &rth->dst.dn_next; |
303 | } | 303 | } |
304 | 304 | ||
305 | rcu_assign_pointer(rt->u.dst.dn_next, dn_rt_hash_table[hash].chain); | 305 | rcu_assign_pointer(rt->dst.dn_next, dn_rt_hash_table[hash].chain); |
306 | rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt); | 306 | rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt); |
307 | 307 | ||
308 | dst_use(&rt->u.dst, now); | 308 | dst_use(&rt->dst, now); |
309 | spin_unlock_bh(&dn_rt_hash_table[hash].lock); | 309 | spin_unlock_bh(&dn_rt_hash_table[hash].lock); |
310 | *rp = rt; | 310 | *rp = rt; |
311 | return 0; | 311 | return 0; |
@@ -323,8 +323,8 @@ static void dn_run_flush(unsigned long dummy) | |||
323 | goto nothing_to_declare; | 323 | goto nothing_to_declare; |
324 | 324 | ||
325 | for(; rt; rt=next) { | 325 | for(; rt; rt=next) { |
326 | next = rt->u.dst.dn_next; | 326 | next = rt->dst.dn_next; |
327 | rt->u.dst.dn_next = NULL; | 327 | rt->dst.dn_next = NULL; |
328 | dst_free((struct dst_entry *)rt); | 328 | dst_free((struct dst_entry *)rt); |
329 | } | 329 | } |
330 | 330 | ||
@@ -743,7 +743,7 @@ static int dn_forward(struct sk_buff *skb) | |||
743 | /* Ensure that we have enough space for headers */ | 743 | /* Ensure that we have enough space for headers */ |
744 | rt = (struct dn_route *)skb_dst(skb); | 744 | rt = (struct dn_route *)skb_dst(skb); |
745 | header_len = dn_db->use_long ? 21 : 6; | 745 | header_len = dn_db->use_long ? 21 : 6; |
746 | if (skb_cow(skb, LL_RESERVED_SPACE(rt->u.dst.dev)+header_len)) | 746 | if (skb_cow(skb, LL_RESERVED_SPACE(rt->dst.dev)+header_len)) |
747 | goto drop; | 747 | goto drop; |
748 | 748 | ||
749 | /* | 749 | /* |
@@ -752,7 +752,7 @@ static int dn_forward(struct sk_buff *skb) | |||
752 | if (++cb->hops > 30) | 752 | if (++cb->hops > 30) |
753 | goto drop; | 753 | goto drop; |
754 | 754 | ||
755 | skb->dev = rt->u.dst.dev; | 755 | skb->dev = rt->dst.dev; |
756 | 756 | ||
757 | /* | 757 | /* |
758 | * If packet goes out same interface it came in on, then set | 758 | * If packet goes out same interface it came in on, then set |
@@ -792,7 +792,7 @@ static int dn_rt_bug(struct sk_buff *skb) | |||
792 | static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res) | 792 | static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res) |
793 | { | 793 | { |
794 | struct dn_fib_info *fi = res->fi; | 794 | struct dn_fib_info *fi = res->fi; |
795 | struct net_device *dev = rt->u.dst.dev; | 795 | struct net_device *dev = rt->dst.dev; |
796 | struct neighbour *n; | 796 | struct neighbour *n; |
797 | unsigned mss; | 797 | unsigned mss; |
798 | 798 | ||
@@ -800,25 +800,25 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res) | |||
800 | if (DN_FIB_RES_GW(*res) && | 800 | if (DN_FIB_RES_GW(*res) && |
801 | DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) | 801 | DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) |
802 | rt->rt_gateway = DN_FIB_RES_GW(*res); | 802 | rt->rt_gateway = DN_FIB_RES_GW(*res); |
803 | memcpy(rt->u.dst.metrics, fi->fib_metrics, | 803 | memcpy(rt->dst.metrics, fi->fib_metrics, |
804 | sizeof(rt->u.dst.metrics)); | 804 | sizeof(rt->dst.metrics)); |
805 | } | 805 | } |
806 | rt->rt_type = res->type; | 806 | rt->rt_type = res->type; |
807 | 807 | ||
808 | if (dev != NULL && rt->u.dst.neighbour == NULL) { | 808 | if (dev != NULL && rt->dst.neighbour == NULL) { |
809 | n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev); | 809 | n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev); |
810 | if (IS_ERR(n)) | 810 | if (IS_ERR(n)) |
811 | return PTR_ERR(n); | 811 | return PTR_ERR(n); |
812 | rt->u.dst.neighbour = n; | 812 | rt->dst.neighbour = n; |
813 | } | 813 | } |
814 | 814 | ||
815 | if (dst_metric(&rt->u.dst, RTAX_MTU) == 0 || | 815 | if (dst_metric(&rt->dst, RTAX_MTU) == 0 || |
816 | dst_metric(&rt->u.dst, RTAX_MTU) > rt->u.dst.dev->mtu) | 816 | dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu) |
817 | rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu; | 817 | rt->dst.metrics[RTAX_MTU-1] = rt->dst.dev->mtu; |
818 | mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->u.dst)); | 818 | mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->dst)); |
819 | if (dst_metric(&rt->u.dst, RTAX_ADVMSS) == 0 || | 819 | if (dst_metric(&rt->dst, RTAX_ADVMSS) == 0 || |
820 | dst_metric(&rt->u.dst, RTAX_ADVMSS) > mss) | 820 | dst_metric(&rt->dst, RTAX_ADVMSS) > mss) |
821 | rt->u.dst.metrics[RTAX_ADVMSS-1] = mss; | 821 | rt->dst.metrics[RTAX_ADVMSS-1] = mss; |
822 | return 0; | 822 | return 0; |
823 | } | 823 | } |
824 | 824 | ||
@@ -1096,8 +1096,8 @@ make_route: | |||
1096 | if (rt == NULL) | 1096 | if (rt == NULL) |
1097 | goto e_nobufs; | 1097 | goto e_nobufs; |
1098 | 1098 | ||
1099 | atomic_set(&rt->u.dst.__refcnt, 1); | 1099 | atomic_set(&rt->dst.__refcnt, 1); |
1100 | rt->u.dst.flags = DST_HOST; | 1100 | rt->dst.flags = DST_HOST; |
1101 | 1101 | ||
1102 | rt->fl.fld_src = oldflp->fld_src; | 1102 | rt->fl.fld_src = oldflp->fld_src; |
1103 | rt->fl.fld_dst = oldflp->fld_dst; | 1103 | rt->fl.fld_dst = oldflp->fld_dst; |
@@ -1113,17 +1113,17 @@ make_route: | |||
1113 | rt->rt_dst_map = fl.fld_dst; | 1113 | rt->rt_dst_map = fl.fld_dst; |
1114 | rt->rt_src_map = fl.fld_src; | 1114 | rt->rt_src_map = fl.fld_src; |
1115 | 1115 | ||
1116 | rt->u.dst.dev = dev_out; | 1116 | rt->dst.dev = dev_out; |
1117 | dev_hold(dev_out); | 1117 | dev_hold(dev_out); |
1118 | rt->u.dst.neighbour = neigh; | 1118 | rt->dst.neighbour = neigh; |
1119 | neigh = NULL; | 1119 | neigh = NULL; |
1120 | 1120 | ||
1121 | rt->u.dst.lastuse = jiffies; | 1121 | rt->dst.lastuse = jiffies; |
1122 | rt->u.dst.output = dn_output; | 1122 | rt->dst.output = dn_output; |
1123 | rt->u.dst.input = dn_rt_bug; | 1123 | rt->dst.input = dn_rt_bug; |
1124 | rt->rt_flags = flags; | 1124 | rt->rt_flags = flags; |
1125 | if (flags & RTCF_LOCAL) | 1125 | if (flags & RTCF_LOCAL) |
1126 | rt->u.dst.input = dn_nsp_rx; | 1126 | rt->dst.input = dn_nsp_rx; |
1127 | 1127 | ||
1128 | err = dn_rt_set_next_hop(rt, &res); | 1128 | err = dn_rt_set_next_hop(rt, &res); |
1129 | if (err) | 1129 | if (err) |
@@ -1152,7 +1152,7 @@ e_nobufs: | |||
1152 | err = -ENOBUFS; | 1152 | err = -ENOBUFS; |
1153 | goto done; | 1153 | goto done; |
1154 | e_neighbour: | 1154 | e_neighbour: |
1155 | dst_free(&rt->u.dst); | 1155 | dst_free(&rt->dst); |
1156 | goto e_nobufs; | 1156 | goto e_nobufs; |
1157 | } | 1157 | } |
1158 | 1158 | ||
@@ -1168,15 +1168,15 @@ static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *fl | |||
1168 | if (!(flags & MSG_TRYHARD)) { | 1168 | if (!(flags & MSG_TRYHARD)) { |
1169 | rcu_read_lock_bh(); | 1169 | rcu_read_lock_bh(); |
1170 | for (rt = rcu_dereference_bh(dn_rt_hash_table[hash].chain); rt; | 1170 | for (rt = rcu_dereference_bh(dn_rt_hash_table[hash].chain); rt; |
1171 | rt = rcu_dereference_bh(rt->u.dst.dn_next)) { | 1171 | rt = rcu_dereference_bh(rt->dst.dn_next)) { |
1172 | if ((flp->fld_dst == rt->fl.fld_dst) && | 1172 | if ((flp->fld_dst == rt->fl.fld_dst) && |
1173 | (flp->fld_src == rt->fl.fld_src) && | 1173 | (flp->fld_src == rt->fl.fld_src) && |
1174 | (flp->mark == rt->fl.mark) && | 1174 | (flp->mark == rt->fl.mark) && |
1175 | (rt->fl.iif == 0) && | 1175 | (rt->fl.iif == 0) && |
1176 | (rt->fl.oif == flp->oif)) { | 1176 | (rt->fl.oif == flp->oif)) { |
1177 | dst_use(&rt->u.dst, jiffies); | 1177 | dst_use(&rt->dst, jiffies); |
1178 | rcu_read_unlock_bh(); | 1178 | rcu_read_unlock_bh(); |
1179 | *pprt = &rt->u.dst; | 1179 | *pprt = &rt->dst; |
1180 | return 0; | 1180 | return 0; |
1181 | } | 1181 | } |
1182 | } | 1182 | } |
@@ -1375,29 +1375,29 @@ make_route: | |||
1375 | rt->fl.iif = in_dev->ifindex; | 1375 | rt->fl.iif = in_dev->ifindex; |
1376 | rt->fl.mark = fl.mark; | 1376 | rt->fl.mark = fl.mark; |
1377 | 1377 | ||
1378 | rt->u.dst.flags = DST_HOST; | 1378 | rt->dst.flags = DST_HOST; |
1379 | rt->u.dst.neighbour = neigh; | 1379 | rt->dst.neighbour = neigh; |
1380 | rt->u.dst.dev = out_dev; | 1380 | rt->dst.dev = out_dev; |
1381 | rt->u.dst.lastuse = jiffies; | 1381 | rt->dst.lastuse = jiffies; |
1382 | rt->u.dst.output = dn_rt_bug; | 1382 | rt->dst.output = dn_rt_bug; |
1383 | switch(res.type) { | 1383 | switch(res.type) { |
1384 | case RTN_UNICAST: | 1384 | case RTN_UNICAST: |
1385 | rt->u.dst.input = dn_forward; | 1385 | rt->dst.input = dn_forward; |
1386 | break; | 1386 | break; |
1387 | case RTN_LOCAL: | 1387 | case RTN_LOCAL: |
1388 | rt->u.dst.output = dn_output; | 1388 | rt->dst.output = dn_output; |
1389 | rt->u.dst.input = dn_nsp_rx; | 1389 | rt->dst.input = dn_nsp_rx; |
1390 | rt->u.dst.dev = in_dev; | 1390 | rt->dst.dev = in_dev; |
1391 | flags |= RTCF_LOCAL; | 1391 | flags |= RTCF_LOCAL; |
1392 | break; | 1392 | break; |
1393 | default: | 1393 | default: |
1394 | case RTN_UNREACHABLE: | 1394 | case RTN_UNREACHABLE: |
1395 | case RTN_BLACKHOLE: | 1395 | case RTN_BLACKHOLE: |
1396 | rt->u.dst.input = dst_discard; | 1396 | rt->dst.input = dst_discard; |
1397 | } | 1397 | } |
1398 | rt->rt_flags = flags; | 1398 | rt->rt_flags = flags; |
1399 | if (rt->u.dst.dev) | 1399 | if (rt->dst.dev) |
1400 | dev_hold(rt->u.dst.dev); | 1400 | dev_hold(rt->dst.dev); |
1401 | 1401 | ||
1402 | err = dn_rt_set_next_hop(rt, &res); | 1402 | err = dn_rt_set_next_hop(rt, &res); |
1403 | if (err) | 1403 | if (err) |
@@ -1405,7 +1405,7 @@ make_route: | |||
1405 | 1405 | ||
1406 | hash = dn_hash(rt->fl.fld_src, rt->fl.fld_dst); | 1406 | hash = dn_hash(rt->fl.fld_src, rt->fl.fld_dst); |
1407 | dn_insert_route(rt, hash, &rt); | 1407 | dn_insert_route(rt, hash, &rt); |
1408 | skb_dst_set(skb, &rt->u.dst); | 1408 | skb_dst_set(skb, &rt->dst); |
1409 | 1409 | ||
1410 | done: | 1410 | done: |
1411 | if (neigh) | 1411 | if (neigh) |
@@ -1427,7 +1427,7 @@ e_nobufs: | |||
1427 | goto done; | 1427 | goto done; |
1428 | 1428 | ||
1429 | e_neighbour: | 1429 | e_neighbour: |
1430 | dst_free(&rt->u.dst); | 1430 | dst_free(&rt->dst); |
1431 | goto done; | 1431 | goto done; |
1432 | } | 1432 | } |
1433 | 1433 | ||
@@ -1442,13 +1442,13 @@ static int dn_route_input(struct sk_buff *skb) | |||
1442 | 1442 | ||
1443 | rcu_read_lock(); | 1443 | rcu_read_lock(); |
1444 | for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL; | 1444 | for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL; |
1445 | rt = rcu_dereference(rt->u.dst.dn_next)) { | 1445 | rt = rcu_dereference(rt->dst.dn_next)) { |
1446 | if ((rt->fl.fld_src == cb->src) && | 1446 | if ((rt->fl.fld_src == cb->src) && |
1447 | (rt->fl.fld_dst == cb->dst) && | 1447 | (rt->fl.fld_dst == cb->dst) && |
1448 | (rt->fl.oif == 0) && | 1448 | (rt->fl.oif == 0) && |
1449 | (rt->fl.mark == skb->mark) && | 1449 | (rt->fl.mark == skb->mark) && |
1450 | (rt->fl.iif == cb->iif)) { | 1450 | (rt->fl.iif == cb->iif)) { |
1451 | dst_use(&rt->u.dst, jiffies); | 1451 | dst_use(&rt->dst, jiffies); |
1452 | rcu_read_unlock(); | 1452 | rcu_read_unlock(); |
1453 | skb_dst_set(skb, (struct dst_entry *)rt); | 1453 | skb_dst_set(skb, (struct dst_entry *)rt); |
1454 | return 0; | 1454 | return 0; |
@@ -1487,8 +1487,8 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, | |||
1487 | r->rtm_src_len = 16; | 1487 | r->rtm_src_len = 16; |
1488 | RTA_PUT(skb, RTA_SRC, 2, &rt->fl.fld_src); | 1488 | RTA_PUT(skb, RTA_SRC, 2, &rt->fl.fld_src); |
1489 | } | 1489 | } |
1490 | if (rt->u.dst.dev) | 1490 | if (rt->dst.dev) |
1491 | RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->u.dst.dev->ifindex); | 1491 | RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->dst.dev->ifindex); |
1492 | /* | 1492 | /* |
1493 | * Note to self - change this if input routes reverse direction when | 1493 | * Note to self - change this if input routes reverse direction when |
1494 | * they deal only with inputs and not with replies like they do | 1494 | * they deal only with inputs and not with replies like they do |
@@ -1497,11 +1497,11 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, | |||
1497 | RTA_PUT(skb, RTA_PREFSRC, 2, &rt->rt_local_src); | 1497 | RTA_PUT(skb, RTA_PREFSRC, 2, &rt->rt_local_src); |
1498 | if (rt->rt_daddr != rt->rt_gateway) | 1498 | if (rt->rt_daddr != rt->rt_gateway) |
1499 | RTA_PUT(skb, RTA_GATEWAY, 2, &rt->rt_gateway); | 1499 | RTA_PUT(skb, RTA_GATEWAY, 2, &rt->rt_gateway); |
1500 | if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0) | 1500 | if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0) |
1501 | goto rtattr_failure; | 1501 | goto rtattr_failure; |
1502 | expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0; | 1502 | expires = rt->dst.expires ? rt->dst.expires - jiffies : 0; |
1503 | if (rtnl_put_cacheinfo(skb, &rt->u.dst, 0, 0, 0, expires, | 1503 | if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0, expires, |
1504 | rt->u.dst.error) < 0) | 1504 | rt->dst.error) < 0) |
1505 | goto rtattr_failure; | 1505 | goto rtattr_failure; |
1506 | if (rt->fl.iif) | 1506 | if (rt->fl.iif) |
1507 | RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif); | 1507 | RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif); |
@@ -1568,8 +1568,8 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void | |||
1568 | local_bh_enable(); | 1568 | local_bh_enable(); |
1569 | memset(cb, 0, sizeof(struct dn_skb_cb)); | 1569 | memset(cb, 0, sizeof(struct dn_skb_cb)); |
1570 | rt = (struct dn_route *)skb_dst(skb); | 1570 | rt = (struct dn_route *)skb_dst(skb); |
1571 | if (!err && -rt->u.dst.error) | 1571 | if (!err && -rt->dst.error) |
1572 | err = rt->u.dst.error; | 1572 | err = rt->dst.error; |
1573 | } else { | 1573 | } else { |
1574 | int oif = 0; | 1574 | int oif = 0; |
1575 | if (rta[RTA_OIF - 1]) | 1575 | if (rta[RTA_OIF - 1]) |
@@ -1583,7 +1583,7 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void | |||
1583 | skb->dev = NULL; | 1583 | skb->dev = NULL; |
1584 | if (err) | 1584 | if (err) |
1585 | goto out_free; | 1585 | goto out_free; |
1586 | skb_dst_set(skb, &rt->u.dst); | 1586 | skb_dst_set(skb, &rt->dst); |
1587 | if (rtm->rtm_flags & RTM_F_NOTIFY) | 1587 | if (rtm->rtm_flags & RTM_F_NOTIFY) |
1588 | rt->rt_flags |= RTCF_NOTIFY; | 1588 | rt->rt_flags |= RTCF_NOTIFY; |
1589 | 1589 | ||
@@ -1632,10 +1632,10 @@ int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
1632 | rcu_read_lock_bh(); | 1632 | rcu_read_lock_bh(); |
1633 | for(rt = rcu_dereference_bh(dn_rt_hash_table[h].chain), idx = 0; | 1633 | for(rt = rcu_dereference_bh(dn_rt_hash_table[h].chain), idx = 0; |
1634 | rt; | 1634 | rt; |
1635 | rt = rcu_dereference_bh(rt->u.dst.dn_next), idx++) { | 1635 | rt = rcu_dereference_bh(rt->dst.dn_next), idx++) { |
1636 | if (idx < s_idx) | 1636 | if (idx < s_idx) |
1637 | continue; | 1637 | continue; |
1638 | skb_dst_set(skb, dst_clone(&rt->u.dst)); | 1638 | skb_dst_set(skb, dst_clone(&rt->dst)); |
1639 | if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid, | 1639 | if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid, |
1640 | cb->nlh->nlmsg_seq, RTM_NEWROUTE, | 1640 | cb->nlh->nlmsg_seq, RTM_NEWROUTE, |
1641 | 1, NLM_F_MULTI) <= 0) { | 1641 | 1, NLM_F_MULTI) <= 0) { |
@@ -1678,7 +1678,7 @@ static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_rou | |||
1678 | { | 1678 | { |
1679 | struct dn_rt_cache_iter_state *s = seq->private; | 1679 | struct dn_rt_cache_iter_state *s = seq->private; |
1680 | 1680 | ||
1681 | rt = rt->u.dst.dn_next; | 1681 | rt = rt->dst.dn_next; |
1682 | while(!rt) { | 1682 | while(!rt) { |
1683 | rcu_read_unlock_bh(); | 1683 | rcu_read_unlock_bh(); |
1684 | if (--s->bucket < 0) | 1684 | if (--s->bucket < 0) |
@@ -1719,12 +1719,12 @@ static int dn_rt_cache_seq_show(struct seq_file *seq, void *v) | |||
1719 | char buf1[DN_ASCBUF_LEN], buf2[DN_ASCBUF_LEN]; | 1719 | char buf1[DN_ASCBUF_LEN], buf2[DN_ASCBUF_LEN]; |
1720 | 1720 | ||
1721 | seq_printf(seq, "%-8s %-7s %-7s %04d %04d %04d\n", | 1721 | seq_printf(seq, "%-8s %-7s %-7s %04d %04d %04d\n", |
1722 | rt->u.dst.dev ? rt->u.dst.dev->name : "*", | 1722 | rt->dst.dev ? rt->dst.dev->name : "*", |
1723 | dn_addr2asc(le16_to_cpu(rt->rt_daddr), buf1), | 1723 | dn_addr2asc(le16_to_cpu(rt->rt_daddr), buf1), |
1724 | dn_addr2asc(le16_to_cpu(rt->rt_saddr), buf2), | 1724 | dn_addr2asc(le16_to_cpu(rt->rt_saddr), buf2), |
1725 | atomic_read(&rt->u.dst.__refcnt), | 1725 | atomic_read(&rt->dst.__refcnt), |
1726 | rt->u.dst.__use, | 1726 | rt->dst.__use, |
1727 | (int) dst_metric(&rt->u.dst, RTAX_RTT)); | 1727 | (int) dst_metric(&rt->dst, RTAX_RTT)); |
1728 | return 0; | 1728 | return 0; |
1729 | } | 1729 | } |
1730 | 1730 | ||
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c index 2a5a8053e000..dc54bd0d083b 100644 --- a/net/econet/af_econet.c +++ b/net/econet/af_econet.c | |||
@@ -48,7 +48,7 @@ | |||
48 | 48 | ||
49 | static const struct proto_ops econet_ops; | 49 | static const struct proto_ops econet_ops; |
50 | static struct hlist_head econet_sklist; | 50 | static struct hlist_head econet_sklist; |
51 | static DEFINE_RWLOCK(econet_lock); | 51 | static DEFINE_SPINLOCK(econet_lock); |
52 | static DEFINE_MUTEX(econet_mutex); | 52 | static DEFINE_MUTEX(econet_mutex); |
53 | 53 | ||
54 | /* Since there are only 256 possible network numbers (or fewer, depends | 54 | /* Since there are only 256 possible network numbers (or fewer, depends |
@@ -98,16 +98,16 @@ struct ec_cb | |||
98 | 98 | ||
99 | static void econet_remove_socket(struct hlist_head *list, struct sock *sk) | 99 | static void econet_remove_socket(struct hlist_head *list, struct sock *sk) |
100 | { | 100 | { |
101 | write_lock_bh(&econet_lock); | 101 | spin_lock_bh(&econet_lock); |
102 | sk_del_node_init(sk); | 102 | sk_del_node_init(sk); |
103 | write_unlock_bh(&econet_lock); | 103 | spin_unlock_bh(&econet_lock); |
104 | } | 104 | } |
105 | 105 | ||
106 | static void econet_insert_socket(struct hlist_head *list, struct sock *sk) | 106 | static void econet_insert_socket(struct hlist_head *list, struct sock *sk) |
107 | { | 107 | { |
108 | write_lock_bh(&econet_lock); | 108 | spin_lock_bh(&econet_lock); |
109 | sk_add_node(sk, list); | 109 | sk_add_node(sk, list); |
110 | write_unlock_bh(&econet_lock); | 110 | spin_unlock_bh(&econet_lock); |
111 | } | 111 | } |
112 | 112 | ||
113 | /* | 113 | /* |
@@ -782,15 +782,19 @@ static struct sock *ec_listening_socket(unsigned char port, unsigned char | |||
782 | struct sock *sk; | 782 | struct sock *sk; |
783 | struct hlist_node *node; | 783 | struct hlist_node *node; |
784 | 784 | ||
785 | spin_lock(&econet_lock); | ||
785 | sk_for_each(sk, node, &econet_sklist) { | 786 | sk_for_each(sk, node, &econet_sklist) { |
786 | struct econet_sock *opt = ec_sk(sk); | 787 | struct econet_sock *opt = ec_sk(sk); |
787 | if ((opt->port == port || opt->port == 0) && | 788 | if ((opt->port == port || opt->port == 0) && |
788 | (opt->station == station || opt->station == 0) && | 789 | (opt->station == station || opt->station == 0) && |
789 | (opt->net == net || opt->net == 0)) | 790 | (opt->net == net || opt->net == 0)) { |
791 | sock_hold(sk); | ||
790 | goto found; | 792 | goto found; |
793 | } | ||
791 | } | 794 | } |
792 | sk = NULL; | 795 | sk = NULL; |
793 | found: | 796 | found: |
797 | spin_unlock(&econet_lock); | ||
794 | return sk; | 798 | return sk; |
795 | } | 799 | } |
796 | 800 | ||
@@ -852,7 +856,7 @@ static void aun_incoming(struct sk_buff *skb, struct aunhdr *ah, size_t len) | |||
852 | { | 856 | { |
853 | struct iphdr *ip = ip_hdr(skb); | 857 | struct iphdr *ip = ip_hdr(skb); |
854 | unsigned char stn = ntohl(ip->saddr) & 0xff; | 858 | unsigned char stn = ntohl(ip->saddr) & 0xff; |
855 | struct sock *sk; | 859 | struct sock *sk = NULL; |
856 | struct sk_buff *newskb; | 860 | struct sk_buff *newskb; |
857 | struct ec_device *edev = skb->dev->ec_ptr; | 861 | struct ec_device *edev = skb->dev->ec_ptr; |
858 | 862 | ||
@@ -882,10 +886,13 @@ static void aun_incoming(struct sk_buff *skb, struct aunhdr *ah, size_t len) | |||
882 | } | 886 | } |
883 | 887 | ||
884 | aun_send_response(ip->saddr, ah->handle, 3, 0); | 888 | aun_send_response(ip->saddr, ah->handle, 3, 0); |
889 | sock_put(sk); | ||
885 | return; | 890 | return; |
886 | 891 | ||
887 | bad: | 892 | bad: |
888 | aun_send_response(ip->saddr, ah->handle, 4, 0); | 893 | aun_send_response(ip->saddr, ah->handle, 4, 0); |
894 | if (sk) | ||
895 | sock_put(sk); | ||
889 | } | 896 | } |
890 | 897 | ||
891 | /* | 898 | /* |
@@ -1050,7 +1057,7 @@ release: | |||
1050 | static int econet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) | 1057 | static int econet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) |
1051 | { | 1058 | { |
1052 | struct ec_framehdr *hdr; | 1059 | struct ec_framehdr *hdr; |
1053 | struct sock *sk; | 1060 | struct sock *sk = NULL; |
1054 | struct ec_device *edev = dev->ec_ptr; | 1061 | struct ec_device *edev = dev->ec_ptr; |
1055 | 1062 | ||
1056 | if (!net_eq(dev_net(dev), &init_net)) | 1063 | if (!net_eq(dev_net(dev), &init_net)) |
@@ -1085,10 +1092,12 @@ static int econet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet | |||
1085 | if (ec_queue_packet(sk, skb, edev->net, hdr->src_stn, hdr->cb, | 1092 | if (ec_queue_packet(sk, skb, edev->net, hdr->src_stn, hdr->cb, |
1086 | hdr->port)) | 1093 | hdr->port)) |
1087 | goto drop; | 1094 | goto drop; |
1088 | 1095 | sock_put(sk); | |
1089 | return NET_RX_SUCCESS; | 1096 | return NET_RX_SUCCESS; |
1090 | 1097 | ||
1091 | drop: | 1098 | drop: |
1099 | if (sk) | ||
1100 | sock_put(sk); | ||
1092 | kfree_skb(skb); | 1101 | kfree_skb(skb); |
1093 | return NET_RX_DROP; | 1102 | return NET_RX_DROP; |
1094 | } | 1103 | } |
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index 61ec0329316c..215c83986a9d 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c | |||
@@ -158,7 +158,6 @@ EXPORT_SYMBOL(eth_rebuild_header); | |||
158 | __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev) | 158 | __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev) |
159 | { | 159 | { |
160 | struct ethhdr *eth; | 160 | struct ethhdr *eth; |
161 | unsigned char *rawp; | ||
162 | 161 | ||
163 | skb->dev = dev; | 162 | skb->dev = dev; |
164 | skb_reset_mac_header(skb); | 163 | skb_reset_mac_header(skb); |
@@ -199,15 +198,13 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev) | |||
199 | if (ntohs(eth->h_proto) >= 1536) | 198 | if (ntohs(eth->h_proto) >= 1536) |
200 | return eth->h_proto; | 199 | return eth->h_proto; |
201 | 200 | ||
202 | rawp = skb->data; | ||
203 | |||
204 | /* | 201 | /* |
205 | * This is a magic hack to spot IPX packets. Older Novell breaks | 202 | * This is a magic hack to spot IPX packets. Older Novell breaks |
206 | * the protocol design and runs IPX over 802.3 without an 802.2 LLC | 203 | * the protocol design and runs IPX over 802.3 without an 802.2 LLC |
207 | * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This | 204 | * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This |
208 | * won't work for fault tolerant netware but does for the rest. | 205 | * won't work for fault tolerant netware but does for the rest. |
209 | */ | 206 | */ |
210 | if (*(unsigned short *)rawp == 0xFFFF) | 207 | if (skb->len >= 2 && *(unsigned short *)(skb->data) == 0xFFFF) |
211 | return htons(ETH_P_802_3); | 208 | return htons(ETH_P_802_3); |
212 | 209 | ||
213 | /* | 210 | /* |
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 8e3a1fd938ab..7c3a7d191249 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig | |||
@@ -303,7 +303,7 @@ config ARPD | |||
303 | If unsure, say N. | 303 | If unsure, say N. |
304 | 304 | ||
305 | config SYN_COOKIES | 305 | config SYN_COOKIES |
306 | bool "IP: TCP syncookie support (disabled per default)" | 306 | bool "IP: TCP syncookie support" |
307 | ---help--- | 307 | ---help--- |
308 | Normal TCP/IP networking is open to an attack known as "SYN | 308 | Normal TCP/IP networking is open to an attack known as "SYN |
309 | flooding". This denial-of-service attack prevents legitimate remote | 309 | flooding". This denial-of-service attack prevents legitimate remote |
@@ -328,13 +328,13 @@ config SYN_COOKIES | |||
328 | server is really overloaded. If this happens frequently better turn | 328 | server is really overloaded. If this happens frequently better turn |
329 | them off. | 329 | them off. |
330 | 330 | ||
331 | If you say Y here, note that SYN cookies aren't enabled by default; | 331 | If you say Y here, you can disable SYN cookies at run time by |
332 | you can enable them by saying Y to "/proc file system support" and | 332 | saying Y to "/proc file system support" and |
333 | "Sysctl support" below and executing the command | 333 | "Sysctl support" below and executing the command |
334 | 334 | ||
335 | echo 1 >/proc/sys/net/ipv4/tcp_syncookies | 335 | echo 0 > /proc/sys/net/ipv4/tcp_syncookies |
336 | 336 | ||
337 | at boot time after the /proc file system has been mounted. | 337 | after the /proc file system has been mounted. |
338 | 338 | ||
339 | If unsure, say N. | 339 | If unsure, say N. |
340 | 340 | ||
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 551ce564b035..d99e7e020189 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -1100,7 +1100,7 @@ static int inet_sk_reselect_saddr(struct sock *sk) | |||
1100 | if (err) | 1100 | if (err) |
1101 | return err; | 1101 | return err; |
1102 | 1102 | ||
1103 | sk_setup_caps(sk, &rt->u.dst); | 1103 | sk_setup_caps(sk, &rt->dst); |
1104 | 1104 | ||
1105 | new_saddr = rt->rt_src; | 1105 | new_saddr = rt->rt_src; |
1106 | 1106 | ||
@@ -1166,7 +1166,7 @@ int inet_sk_rebuild_header(struct sock *sk) | |||
1166 | err = ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0); | 1166 | err = ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0); |
1167 | } | 1167 | } |
1168 | if (!err) | 1168 | if (!err) |
1169 | sk_setup_caps(sk, &rt->u.dst); | 1169 | sk_setup_caps(sk, &rt->dst); |
1170 | else { | 1170 | else { |
1171 | /* Routing failed... */ | 1171 | /* Routing failed... */ |
1172 | sk->sk_route_caps = 0; | 1172 | sk->sk_route_caps = 0; |
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index f094b75810db..cf78f41830ca 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c | |||
@@ -427,7 +427,7 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev) | |||
427 | 427 | ||
428 | if (ip_route_output_key(net, &rt, &fl) < 0) | 428 | if (ip_route_output_key(net, &rt, &fl) < 0) |
429 | return 1; | 429 | return 1; |
430 | if (rt->u.dst.dev != dev) { | 430 | if (rt->dst.dev != dev) { |
431 | NET_INC_STATS_BH(net, LINUX_MIB_ARPFILTER); | 431 | NET_INC_STATS_BH(net, LINUX_MIB_ARPFILTER); |
432 | flag = 1; | 432 | flag = 1; |
433 | } | 433 | } |
@@ -532,7 +532,7 @@ static inline int arp_fwd_proxy(struct in_device *in_dev, | |||
532 | struct in_device *out_dev; | 532 | struct in_device *out_dev; |
533 | int imi, omi = -1; | 533 | int imi, omi = -1; |
534 | 534 | ||
535 | if (rt->u.dst.dev == dev) | 535 | if (rt->dst.dev == dev) |
536 | return 0; | 536 | return 0; |
537 | 537 | ||
538 | if (!IN_DEV_PROXY_ARP(in_dev)) | 538 | if (!IN_DEV_PROXY_ARP(in_dev)) |
@@ -545,10 +545,10 @@ static inline int arp_fwd_proxy(struct in_device *in_dev, | |||
545 | 545 | ||
546 | /* place to check for proxy_arp for routes */ | 546 | /* place to check for proxy_arp for routes */ |
547 | 547 | ||
548 | if ((out_dev = in_dev_get(rt->u.dst.dev)) != NULL) { | 548 | out_dev = __in_dev_get_rcu(rt->dst.dev); |
549 | if (out_dev) | ||
549 | omi = IN_DEV_MEDIUM_ID(out_dev); | 550 | omi = IN_DEV_MEDIUM_ID(out_dev); |
550 | in_dev_put(out_dev); | 551 | |
551 | } | ||
552 | return (omi != imi && omi != -1); | 552 | return (omi != imi && omi != -1); |
553 | } | 553 | } |
554 | 554 | ||
@@ -576,7 +576,7 @@ static inline int arp_fwd_pvlan(struct in_device *in_dev, | |||
576 | __be32 sip, __be32 tip) | 576 | __be32 sip, __be32 tip) |
577 | { | 577 | { |
578 | /* Private VLAN is only concerned about the same ethernet segment */ | 578 | /* Private VLAN is only concerned about the same ethernet segment */ |
579 | if (rt->u.dst.dev != dev) | 579 | if (rt->dst.dev != dev) |
580 | return 0; | 580 | return 0; |
581 | 581 | ||
582 | /* Don't reply on self probes (often done by windowz boxes)*/ | 582 | /* Don't reply on self probes (often done by windowz boxes)*/ |
@@ -741,7 +741,7 @@ void arp_send(int type, int ptype, __be32 dest_ip, | |||
741 | static int arp_process(struct sk_buff *skb) | 741 | static int arp_process(struct sk_buff *skb) |
742 | { | 742 | { |
743 | struct net_device *dev = skb->dev; | 743 | struct net_device *dev = skb->dev; |
744 | struct in_device *in_dev = in_dev_get(dev); | 744 | struct in_device *in_dev = __in_dev_get_rcu(dev); |
745 | struct arphdr *arp; | 745 | struct arphdr *arp; |
746 | unsigned char *arp_ptr; | 746 | unsigned char *arp_ptr; |
747 | struct rtable *rt; | 747 | struct rtable *rt; |
@@ -890,7 +890,6 @@ static int arp_process(struct sk_buff *skb) | |||
890 | arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha); | 890 | arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha); |
891 | } else { | 891 | } else { |
892 | pneigh_enqueue(&arp_tbl, in_dev->arp_parms, skb); | 892 | pneigh_enqueue(&arp_tbl, in_dev->arp_parms, skb); |
893 | in_dev_put(in_dev); | ||
894 | return 0; | 893 | return 0; |
895 | } | 894 | } |
896 | goto out; | 895 | goto out; |
@@ -936,8 +935,6 @@ static int arp_process(struct sk_buff *skb) | |||
936 | } | 935 | } |
937 | 936 | ||
938 | out: | 937 | out: |
939 | if (in_dev) | ||
940 | in_dev_put(in_dev); | ||
941 | consume_skb(skb); | 938 | consume_skb(skb); |
942 | return 0; | 939 | return 0; |
943 | } | 940 | } |
@@ -1045,7 +1042,7 @@ static int arp_req_set(struct net *net, struct arpreq *r, | |||
1045 | struct rtable * rt; | 1042 | struct rtable * rt; |
1046 | if ((err = ip_route_output_key(net, &rt, &fl)) != 0) | 1043 | if ((err = ip_route_output_key(net, &rt, &fl)) != 0) |
1047 | return err; | 1044 | return err; |
1048 | dev = rt->u.dst.dev; | 1045 | dev = rt->dst.dev; |
1049 | ip_rt_put(rt); | 1046 | ip_rt_put(rt); |
1050 | if (!dev) | 1047 | if (!dev) |
1051 | return -EINVAL; | 1048 | return -EINVAL; |
@@ -1152,7 +1149,7 @@ static int arp_req_delete(struct net *net, struct arpreq *r, | |||
1152 | struct rtable * rt; | 1149 | struct rtable * rt; |
1153 | if ((err = ip_route_output_key(net, &rt, &fl)) != 0) | 1150 | if ((err = ip_route_output_key(net, &rt, &fl)) != 0) |
1154 | return err; | 1151 | return err; |
1155 | dev = rt->u.dst.dev; | 1152 | dev = rt->dst.dev; |
1156 | ip_rt_put(rt); | 1153 | ip_rt_put(rt); |
1157 | if (!dev) | 1154 | if (!dev) |
1158 | return -EINVAL; | 1155 | return -EINVAL; |
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c index fb2465811b48..fe3daa7f07a9 100644 --- a/net/ipv4/datagram.c +++ b/net/ipv4/datagram.c | |||
@@ -69,7 +69,7 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
69 | sk->sk_state = TCP_ESTABLISHED; | 69 | sk->sk_state = TCP_ESTABLISHED; |
70 | inet->inet_id = jiffies; | 70 | inet->inet_id = jiffies; |
71 | 71 | ||
72 | sk_dst_set(sk, &rt->u.dst); | 72 | sk_dst_set(sk, &rt->dst); |
73 | return(0); | 73 | return(0); |
74 | } | 74 | } |
75 | 75 | ||
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 4f0ed458c883..e830f7a123bd 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -284,7 +284,7 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif, | |||
284 | if (no_addr) | 284 | if (no_addr) |
285 | goto last_resort; | 285 | goto last_resort; |
286 | if (rpf == 1) | 286 | if (rpf == 1) |
287 | goto e_inval; | 287 | goto e_rpf; |
288 | fl.oif = dev->ifindex; | 288 | fl.oif = dev->ifindex; |
289 | 289 | ||
290 | ret = 0; | 290 | ret = 0; |
@@ -299,7 +299,7 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif, | |||
299 | 299 | ||
300 | last_resort: | 300 | last_resort: |
301 | if (rpf) | 301 | if (rpf) |
302 | goto e_inval; | 302 | goto e_rpf; |
303 | *spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE); | 303 | *spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE); |
304 | *itag = 0; | 304 | *itag = 0; |
305 | return 0; | 305 | return 0; |
@@ -308,6 +308,8 @@ e_inval_res: | |||
308 | fib_res_put(&res); | 308 | fib_res_put(&res); |
309 | e_inval: | 309 | e_inval: |
310 | return -EINVAL; | 310 | return -EINVAL; |
311 | e_rpf: | ||
312 | return -EXDEV; | ||
311 | } | 313 | } |
312 | 314 | ||
313 | static inline __be32 sk_extract_addr(struct sockaddr *addr) | 315 | static inline __be32 sk_extract_addr(struct sockaddr *addr) |
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index d65e9215bcd7..7569b21a3a2d 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
@@ -271,7 +271,7 @@ int xrlim_allow(struct dst_entry *dst, int timeout) | |||
271 | static inline int icmpv4_xrlim_allow(struct net *net, struct rtable *rt, | 271 | static inline int icmpv4_xrlim_allow(struct net *net, struct rtable *rt, |
272 | int type, int code) | 272 | int type, int code) |
273 | { | 273 | { |
274 | struct dst_entry *dst = &rt->u.dst; | 274 | struct dst_entry *dst = &rt->dst; |
275 | int rc = 1; | 275 | int rc = 1; |
276 | 276 | ||
277 | if (type > NR_ICMP_TYPES) | 277 | if (type > NR_ICMP_TYPES) |
@@ -327,7 +327,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param, | |||
327 | struct sock *sk; | 327 | struct sock *sk; |
328 | struct sk_buff *skb; | 328 | struct sk_buff *skb; |
329 | 329 | ||
330 | sk = icmp_sk(dev_net((*rt)->u.dst.dev)); | 330 | sk = icmp_sk(dev_net((*rt)->dst.dev)); |
331 | if (ip_append_data(sk, icmp_glue_bits, icmp_param, | 331 | if (ip_append_data(sk, icmp_glue_bits, icmp_param, |
332 | icmp_param->data_len+icmp_param->head_len, | 332 | icmp_param->data_len+icmp_param->head_len, |
333 | icmp_param->head_len, | 333 | icmp_param->head_len, |
@@ -359,7 +359,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) | |||
359 | { | 359 | { |
360 | struct ipcm_cookie ipc; | 360 | struct ipcm_cookie ipc; |
361 | struct rtable *rt = skb_rtable(skb); | 361 | struct rtable *rt = skb_rtable(skb); |
362 | struct net *net = dev_net(rt->u.dst.dev); | 362 | struct net *net = dev_net(rt->dst.dev); |
363 | struct sock *sk; | 363 | struct sock *sk; |
364 | struct inet_sock *inet; | 364 | struct inet_sock *inet; |
365 | __be32 daddr; | 365 | __be32 daddr; |
@@ -427,7 +427,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) | |||
427 | 427 | ||
428 | if (!rt) | 428 | if (!rt) |
429 | goto out; | 429 | goto out; |
430 | net = dev_net(rt->u.dst.dev); | 430 | net = dev_net(rt->dst.dev); |
431 | 431 | ||
432 | /* | 432 | /* |
433 | * Find the original header. It is expected to be valid, of course. | 433 | * Find the original header. It is expected to be valid, of course. |
@@ -596,9 +596,9 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) | |||
596 | /* Ugh! */ | 596 | /* Ugh! */ |
597 | orefdst = skb_in->_skb_refdst; /* save old refdst */ | 597 | orefdst = skb_in->_skb_refdst; /* save old refdst */ |
598 | err = ip_route_input(skb_in, fl.fl4_dst, fl.fl4_src, | 598 | err = ip_route_input(skb_in, fl.fl4_dst, fl.fl4_src, |
599 | RT_TOS(tos), rt2->u.dst.dev); | 599 | RT_TOS(tos), rt2->dst.dev); |
600 | 600 | ||
601 | dst_release(&rt2->u.dst); | 601 | dst_release(&rt2->dst); |
602 | rt2 = skb_rtable(skb_in); | 602 | rt2 = skb_rtable(skb_in); |
603 | skb_in->_skb_refdst = orefdst; /* restore old refdst */ | 603 | skb_in->_skb_refdst = orefdst; /* restore old refdst */ |
604 | } | 604 | } |
@@ -610,7 +610,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) | |||
610 | XFRM_LOOKUP_ICMP); | 610 | XFRM_LOOKUP_ICMP); |
611 | switch (err) { | 611 | switch (err) { |
612 | case 0: | 612 | case 0: |
613 | dst_release(&rt->u.dst); | 613 | dst_release(&rt->dst); |
614 | rt = rt2; | 614 | rt = rt2; |
615 | break; | 615 | break; |
616 | case -EPERM: | 616 | case -EPERM: |
@@ -629,7 +629,7 @@ route_done: | |||
629 | 629 | ||
630 | /* RFC says return as much as we can without exceeding 576 bytes. */ | 630 | /* RFC says return as much as we can without exceeding 576 bytes. */ |
631 | 631 | ||
632 | room = dst_mtu(&rt->u.dst); | 632 | room = dst_mtu(&rt->dst); |
633 | if (room > 576) | 633 | if (room > 576) |
634 | room = 576; | 634 | room = 576; |
635 | room -= sizeof(struct iphdr) + icmp_param.replyopts.optlen; | 635 | room -= sizeof(struct iphdr) + icmp_param.replyopts.optlen; |
@@ -925,6 +925,7 @@ static void icmp_address(struct sk_buff *skb) | |||
925 | /* | 925 | /* |
926 | * RFC1812 (4.3.3.9). A router SHOULD listen all replies, and complain | 926 | * RFC1812 (4.3.3.9). A router SHOULD listen all replies, and complain |
927 | * loudly if an inconsistency is found. | 927 | * loudly if an inconsistency is found. |
928 | * called with rcu_read_lock() | ||
928 | */ | 929 | */ |
929 | 930 | ||
930 | static void icmp_address_reply(struct sk_buff *skb) | 931 | static void icmp_address_reply(struct sk_buff *skb) |
@@ -935,12 +936,12 @@ static void icmp_address_reply(struct sk_buff *skb) | |||
935 | struct in_ifaddr *ifa; | 936 | struct in_ifaddr *ifa; |
936 | 937 | ||
937 | if (skb->len < 4 || !(rt->rt_flags&RTCF_DIRECTSRC)) | 938 | if (skb->len < 4 || !(rt->rt_flags&RTCF_DIRECTSRC)) |
938 | goto out; | 939 | return; |
939 | 940 | ||
940 | in_dev = in_dev_get(dev); | 941 | in_dev = __in_dev_get_rcu(dev); |
941 | if (!in_dev) | 942 | if (!in_dev) |
942 | goto out; | 943 | return; |
943 | rcu_read_lock(); | 944 | |
944 | if (in_dev->ifa_list && | 945 | if (in_dev->ifa_list && |
945 | IN_DEV_LOG_MARTIANS(in_dev) && | 946 | IN_DEV_LOG_MARTIANS(in_dev) && |
946 | IN_DEV_FORWARD(in_dev)) { | 947 | IN_DEV_FORWARD(in_dev)) { |
@@ -958,9 +959,6 @@ static void icmp_address_reply(struct sk_buff *skb) | |||
958 | mp, dev->name, &rt->rt_src); | 959 | mp, dev->name, &rt->rt_src); |
959 | } | 960 | } |
960 | } | 961 | } |
961 | rcu_read_unlock(); | ||
962 | in_dev_put(in_dev); | ||
963 | out:; | ||
964 | } | 962 | } |
965 | 963 | ||
966 | static void icmp_discard(struct sk_buff *skb) | 964 | static void icmp_discard(struct sk_buff *skb) |
@@ -974,7 +972,7 @@ int icmp_rcv(struct sk_buff *skb) | |||
974 | { | 972 | { |
975 | struct icmphdr *icmph; | 973 | struct icmphdr *icmph; |
976 | struct rtable *rt = skb_rtable(skb); | 974 | struct rtable *rt = skb_rtable(skb); |
977 | struct net *net = dev_net(rt->u.dst.dev); | 975 | struct net *net = dev_net(rt->dst.dev); |
978 | 976 | ||
979 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { | 977 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { |
980 | struct sec_path *sp = skb_sec_path(skb); | 978 | struct sec_path *sp = skb_sec_path(skb); |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 5fff865a4fa7..b5580d422994 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -312,7 +312,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) | |||
312 | return NULL; | 312 | return NULL; |
313 | } | 313 | } |
314 | 314 | ||
315 | skb_dst_set(skb, &rt->u.dst); | 315 | skb_dst_set(skb, &rt->dst); |
316 | skb->dev = dev; | 316 | skb->dev = dev; |
317 | 317 | ||
318 | skb_reserve(skb, LL_RESERVED_SPACE(dev)); | 318 | skb_reserve(skb, LL_RESERVED_SPACE(dev)); |
@@ -330,7 +330,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) | |||
330 | pip->saddr = rt->rt_src; | 330 | pip->saddr = rt->rt_src; |
331 | pip->protocol = IPPROTO_IGMP; | 331 | pip->protocol = IPPROTO_IGMP; |
332 | pip->tot_len = 0; /* filled in later */ | 332 | pip->tot_len = 0; /* filled in later */ |
333 | ip_select_ident(pip, &rt->u.dst, NULL); | 333 | ip_select_ident(pip, &rt->dst, NULL); |
334 | ((u8*)&pip[1])[0] = IPOPT_RA; | 334 | ((u8*)&pip[1])[0] = IPOPT_RA; |
335 | ((u8*)&pip[1])[1] = 4; | 335 | ((u8*)&pip[1])[1] = 4; |
336 | ((u8*)&pip[1])[2] = 0; | 336 | ((u8*)&pip[1])[2] = 0; |
@@ -660,7 +660,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, | |||
660 | return -1; | 660 | return -1; |
661 | } | 661 | } |
662 | 662 | ||
663 | skb_dst_set(skb, &rt->u.dst); | 663 | skb_dst_set(skb, &rt->dst); |
664 | 664 | ||
665 | skb_reserve(skb, LL_RESERVED_SPACE(dev)); | 665 | skb_reserve(skb, LL_RESERVED_SPACE(dev)); |
666 | 666 | ||
@@ -676,7 +676,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, | |||
676 | iph->daddr = dst; | 676 | iph->daddr = dst; |
677 | iph->saddr = rt->rt_src; | 677 | iph->saddr = rt->rt_src; |
678 | iph->protocol = IPPROTO_IGMP; | 678 | iph->protocol = IPPROTO_IGMP; |
679 | ip_select_ident(iph, &rt->u.dst, NULL); | 679 | ip_select_ident(iph, &rt->dst, NULL); |
680 | ((u8*)&iph[1])[0] = IPOPT_RA; | 680 | ((u8*)&iph[1])[0] = IPOPT_RA; |
681 | ((u8*)&iph[1])[1] = 4; | 681 | ((u8*)&iph[1])[1] = 4; |
682 | ((u8*)&iph[1])[2] = 0; | 682 | ((u8*)&iph[1])[2] = 0; |
@@ -916,18 +916,19 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, | |||
916 | read_unlock(&in_dev->mc_list_lock); | 916 | read_unlock(&in_dev->mc_list_lock); |
917 | } | 917 | } |
918 | 918 | ||
919 | /* called in rcu_read_lock() section */ | ||
919 | int igmp_rcv(struct sk_buff *skb) | 920 | int igmp_rcv(struct sk_buff *skb) |
920 | { | 921 | { |
921 | /* This basically follows the spec line by line -- see RFC1112 */ | 922 | /* This basically follows the spec line by line -- see RFC1112 */ |
922 | struct igmphdr *ih; | 923 | struct igmphdr *ih; |
923 | struct in_device *in_dev = in_dev_get(skb->dev); | 924 | struct in_device *in_dev = __in_dev_get_rcu(skb->dev); |
924 | int len = skb->len; | 925 | int len = skb->len; |
925 | 926 | ||
926 | if (in_dev == NULL) | 927 | if (in_dev == NULL) |
927 | goto drop; | 928 | goto drop; |
928 | 929 | ||
929 | if (!pskb_may_pull(skb, sizeof(struct igmphdr))) | 930 | if (!pskb_may_pull(skb, sizeof(struct igmphdr))) |
930 | goto drop_ref; | 931 | goto drop; |
931 | 932 | ||
932 | switch (skb->ip_summed) { | 933 | switch (skb->ip_summed) { |
933 | case CHECKSUM_COMPLETE: | 934 | case CHECKSUM_COMPLETE: |
@@ -937,7 +938,7 @@ int igmp_rcv(struct sk_buff *skb) | |||
937 | case CHECKSUM_NONE: | 938 | case CHECKSUM_NONE: |
938 | skb->csum = 0; | 939 | skb->csum = 0; |
939 | if (__skb_checksum_complete(skb)) | 940 | if (__skb_checksum_complete(skb)) |
940 | goto drop_ref; | 941 | goto drop; |
941 | } | 942 | } |
942 | 943 | ||
943 | ih = igmp_hdr(skb); | 944 | ih = igmp_hdr(skb); |
@@ -957,7 +958,6 @@ int igmp_rcv(struct sk_buff *skb) | |||
957 | break; | 958 | break; |
958 | case IGMP_PIM: | 959 | case IGMP_PIM: |
959 | #ifdef CONFIG_IP_PIMSM_V1 | 960 | #ifdef CONFIG_IP_PIMSM_V1 |
960 | in_dev_put(in_dev); | ||
961 | return pim_rcv_v1(skb); | 961 | return pim_rcv_v1(skb); |
962 | #endif | 962 | #endif |
963 | case IGMPV3_HOST_MEMBERSHIP_REPORT: | 963 | case IGMPV3_HOST_MEMBERSHIP_REPORT: |
@@ -971,8 +971,6 @@ int igmp_rcv(struct sk_buff *skb) | |||
971 | break; | 971 | break; |
972 | } | 972 | } |
973 | 973 | ||
974 | drop_ref: | ||
975 | in_dev_put(in_dev); | ||
976 | drop: | 974 | drop: |
977 | kfree_skb(skb); | 975 | kfree_skb(skb); |
978 | return 0; | 976 | return 0; |
@@ -1427,7 +1425,7 @@ static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr) | |||
1427 | } | 1425 | } |
1428 | 1426 | ||
1429 | if (!dev && !ip_route_output_key(net, &rt, &fl)) { | 1427 | if (!dev && !ip_route_output_key(net, &rt, &fl)) { |
1430 | dev = rt->u.dst.dev; | 1428 | dev = rt->dst.dev; |
1431 | ip_rt_put(rt); | 1429 | ip_rt_put(rt); |
1432 | } | 1430 | } |
1433 | if (dev) { | 1431 | if (dev) { |
@@ -1646,8 +1644,7 @@ static int sf_setstate(struct ip_mc_list *pmc) | |||
1646 | if (dpsf->sf_inaddr == psf->sf_inaddr) | 1644 | if (dpsf->sf_inaddr == psf->sf_inaddr) |
1647 | break; | 1645 | break; |
1648 | if (!dpsf) { | 1646 | if (!dpsf) { |
1649 | dpsf = (struct ip_sf_list *) | 1647 | dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC); |
1650 | kmalloc(sizeof(*dpsf), GFP_ATOMIC); | ||
1651 | if (!dpsf) | 1648 | if (!dpsf) |
1652 | continue; | 1649 | continue; |
1653 | *dpsf = *psf; | 1650 | *dpsf = *psf; |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 70eb3507c406..57c9e4d7b805 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -383,7 +383,7 @@ struct dst_entry *inet_csk_route_req(struct sock *sk, | |||
383 | goto no_route; | 383 | goto no_route; |
384 | if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) | 384 | if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) |
385 | goto route_err; | 385 | goto route_err; |
386 | return &rt->u.dst; | 386 | return &rt->dst; |
387 | 387 | ||
388 | route_err: | 388 | route_err: |
389 | ip_rt_put(rt); | 389 | ip_rt_put(rt); |
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index 6bcfe52a9c87..035673fd42d4 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c | |||
@@ -70,17 +70,25 @@ | |||
70 | static struct kmem_cache *peer_cachep __read_mostly; | 70 | static struct kmem_cache *peer_cachep __read_mostly; |
71 | 71 | ||
72 | #define node_height(x) x->avl_height | 72 | #define node_height(x) x->avl_height |
73 | static struct inet_peer peer_fake_node = { | 73 | |
74 | .avl_left = &peer_fake_node, | 74 | #define peer_avl_empty ((struct inet_peer *)&peer_fake_node) |
75 | .avl_right = &peer_fake_node, | 75 | static const struct inet_peer peer_fake_node = { |
76 | .avl_left = peer_avl_empty, | ||
77 | .avl_right = peer_avl_empty, | ||
76 | .avl_height = 0 | 78 | .avl_height = 0 |
77 | }; | 79 | }; |
78 | #define peer_avl_empty (&peer_fake_node) | 80 | |
79 | static struct inet_peer *peer_root = peer_avl_empty; | 81 | static struct { |
80 | static DEFINE_RWLOCK(peer_pool_lock); | 82 | struct inet_peer *root; |
83 | rwlock_t lock; | ||
84 | int total; | ||
85 | } peers = { | ||
86 | .root = peer_avl_empty, | ||
87 | .lock = __RW_LOCK_UNLOCKED(peers.lock), | ||
88 | .total = 0, | ||
89 | }; | ||
81 | #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */ | 90 | #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */ |
82 | 91 | ||
83 | static int peer_total; | ||
84 | /* Exported for sysctl_net_ipv4. */ | 92 | /* Exported for sysctl_net_ipv4. */ |
85 | int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries more | 93 | int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries more |
86 | * aggressively at this stage */ | 94 | * aggressively at this stage */ |
@@ -89,8 +97,13 @@ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min | |||
89 | int inet_peer_gc_mintime __read_mostly = 10 * HZ; | 97 | int inet_peer_gc_mintime __read_mostly = 10 * HZ; |
90 | int inet_peer_gc_maxtime __read_mostly = 120 * HZ; | 98 | int inet_peer_gc_maxtime __read_mostly = 120 * HZ; |
91 | 99 | ||
92 | static LIST_HEAD(unused_peers); | 100 | static struct { |
93 | static DEFINE_SPINLOCK(inet_peer_unused_lock); | 101 | struct list_head list; |
102 | spinlock_t lock; | ||
103 | } unused_peers = { | ||
104 | .list = LIST_HEAD_INIT(unused_peers.list), | ||
105 | .lock = __SPIN_LOCK_UNLOCKED(unused_peers.lock), | ||
106 | }; | ||
94 | 107 | ||
95 | static void peer_check_expire(unsigned long dummy); | 108 | static void peer_check_expire(unsigned long dummy); |
96 | static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0); | 109 | static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0); |
@@ -131,9 +144,11 @@ void __init inet_initpeers(void) | |||
131 | /* Called with or without local BH being disabled. */ | 144 | /* Called with or without local BH being disabled. */ |
132 | static void unlink_from_unused(struct inet_peer *p) | 145 | static void unlink_from_unused(struct inet_peer *p) |
133 | { | 146 | { |
134 | spin_lock_bh(&inet_peer_unused_lock); | 147 | if (!list_empty(&p->unused)) { |
135 | list_del_init(&p->unused); | 148 | spin_lock_bh(&unused_peers.lock); |
136 | spin_unlock_bh(&inet_peer_unused_lock); | 149 | list_del_init(&p->unused); |
150 | spin_unlock_bh(&unused_peers.lock); | ||
151 | } | ||
137 | } | 152 | } |
138 | 153 | ||
139 | /* | 154 | /* |
@@ -146,9 +161,9 @@ static void unlink_from_unused(struct inet_peer *p) | |||
146 | struct inet_peer *u, **v; \ | 161 | struct inet_peer *u, **v; \ |
147 | if (_stack != NULL) { \ | 162 | if (_stack != NULL) { \ |
148 | stackptr = _stack; \ | 163 | stackptr = _stack; \ |
149 | *stackptr++ = &peer_root; \ | 164 | *stackptr++ = &peers.root; \ |
150 | } \ | 165 | } \ |
151 | for (u = peer_root; u != peer_avl_empty; ) { \ | 166 | for (u = peers.root; u != peer_avl_empty; ) { \ |
152 | if (_daddr == u->v4daddr) \ | 167 | if (_daddr == u->v4daddr) \ |
153 | break; \ | 168 | break; \ |
154 | if ((__force __u32)_daddr < (__force __u32)u->v4daddr) \ | 169 | if ((__force __u32)_daddr < (__force __u32)u->v4daddr) \ |
@@ -262,7 +277,7 @@ do { \ | |||
262 | n->avl_right = peer_avl_empty; \ | 277 | n->avl_right = peer_avl_empty; \ |
263 | **--stackptr = n; \ | 278 | **--stackptr = n; \ |
264 | peer_avl_rebalance(stack, stackptr); \ | 279 | peer_avl_rebalance(stack, stackptr); \ |
265 | } while(0) | 280 | } while (0) |
266 | 281 | ||
267 | /* May be called with local BH enabled. */ | 282 | /* May be called with local BH enabled. */ |
268 | static void unlink_from_pool(struct inet_peer *p) | 283 | static void unlink_from_pool(struct inet_peer *p) |
@@ -271,7 +286,7 @@ static void unlink_from_pool(struct inet_peer *p) | |||
271 | 286 | ||
272 | do_free = 0; | 287 | do_free = 0; |
273 | 288 | ||
274 | write_lock_bh(&peer_pool_lock); | 289 | write_lock_bh(&peers.lock); |
275 | /* Check the reference counter. It was artificially incremented by 1 | 290 | /* Check the reference counter. It was artificially incremented by 1 |
276 | * in cleanup() function to prevent sudden disappearing. If the | 291 | * in cleanup() function to prevent sudden disappearing. If the |
277 | * reference count is still 1 then the node is referenced only as `p' | 292 | * reference count is still 1 then the node is referenced only as `p' |
@@ -303,10 +318,10 @@ static void unlink_from_pool(struct inet_peer *p) | |||
303 | delp[1] = &t->avl_left; /* was &p->avl_left */ | 318 | delp[1] = &t->avl_left; /* was &p->avl_left */ |
304 | } | 319 | } |
305 | peer_avl_rebalance(stack, stackptr); | 320 | peer_avl_rebalance(stack, stackptr); |
306 | peer_total--; | 321 | peers.total--; |
307 | do_free = 1; | 322 | do_free = 1; |
308 | } | 323 | } |
309 | write_unlock_bh(&peer_pool_lock); | 324 | write_unlock_bh(&peers.lock); |
310 | 325 | ||
311 | if (do_free) | 326 | if (do_free) |
312 | kmem_cache_free(peer_cachep, p); | 327 | kmem_cache_free(peer_cachep, p); |
@@ -326,16 +341,16 @@ static int cleanup_once(unsigned long ttl) | |||
326 | struct inet_peer *p = NULL; | 341 | struct inet_peer *p = NULL; |
327 | 342 | ||
328 | /* Remove the first entry from the list of unused nodes. */ | 343 | /* Remove the first entry from the list of unused nodes. */ |
329 | spin_lock_bh(&inet_peer_unused_lock); | 344 | spin_lock_bh(&unused_peers.lock); |
330 | if (!list_empty(&unused_peers)) { | 345 | if (!list_empty(&unused_peers.list)) { |
331 | __u32 delta; | 346 | __u32 delta; |
332 | 347 | ||
333 | p = list_first_entry(&unused_peers, struct inet_peer, unused); | 348 | p = list_first_entry(&unused_peers.list, struct inet_peer, unused); |
334 | delta = (__u32)jiffies - p->dtime; | 349 | delta = (__u32)jiffies - p->dtime; |
335 | 350 | ||
336 | if (delta < ttl) { | 351 | if (delta < ttl) { |
337 | /* Do not prune fresh entries. */ | 352 | /* Do not prune fresh entries. */ |
338 | spin_unlock_bh(&inet_peer_unused_lock); | 353 | spin_unlock_bh(&unused_peers.lock); |
339 | return -1; | 354 | return -1; |
340 | } | 355 | } |
341 | 356 | ||
@@ -345,7 +360,7 @@ static int cleanup_once(unsigned long ttl) | |||
345 | * before unlink_from_pool() call. */ | 360 | * before unlink_from_pool() call. */ |
346 | atomic_inc(&p->refcnt); | 361 | atomic_inc(&p->refcnt); |
347 | } | 362 | } |
348 | spin_unlock_bh(&inet_peer_unused_lock); | 363 | spin_unlock_bh(&unused_peers.lock); |
349 | 364 | ||
350 | if (p == NULL) | 365 | if (p == NULL) |
351 | /* It means that the total number of USED entries has | 366 | /* It means that the total number of USED entries has |
@@ -364,11 +379,11 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create) | |||
364 | struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr; | 379 | struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr; |
365 | 380 | ||
366 | /* Look up for the address quickly. */ | 381 | /* Look up for the address quickly. */ |
367 | read_lock_bh(&peer_pool_lock); | 382 | read_lock_bh(&peers.lock); |
368 | p = lookup(daddr, NULL); | 383 | p = lookup(daddr, NULL); |
369 | if (p != peer_avl_empty) | 384 | if (p != peer_avl_empty) |
370 | atomic_inc(&p->refcnt); | 385 | atomic_inc(&p->refcnt); |
371 | read_unlock_bh(&peer_pool_lock); | 386 | read_unlock_bh(&peers.lock); |
372 | 387 | ||
373 | if (p != peer_avl_empty) { | 388 | if (p != peer_avl_empty) { |
374 | /* The existing node has been found. */ | 389 | /* The existing node has been found. */ |
@@ -390,7 +405,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create) | |||
390 | atomic_set(&n->ip_id_count, secure_ip_id(daddr)); | 405 | atomic_set(&n->ip_id_count, secure_ip_id(daddr)); |
391 | n->tcp_ts_stamp = 0; | 406 | n->tcp_ts_stamp = 0; |
392 | 407 | ||
393 | write_lock_bh(&peer_pool_lock); | 408 | write_lock_bh(&peers.lock); |
394 | /* Check if an entry has suddenly appeared. */ | 409 | /* Check if an entry has suddenly appeared. */ |
395 | p = lookup(daddr, stack); | 410 | p = lookup(daddr, stack); |
396 | if (p != peer_avl_empty) | 411 | if (p != peer_avl_empty) |
@@ -399,10 +414,10 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create) | |||
399 | /* Link the node. */ | 414 | /* Link the node. */ |
400 | link_to_pool(n); | 415 | link_to_pool(n); |
401 | INIT_LIST_HEAD(&n->unused); | 416 | INIT_LIST_HEAD(&n->unused); |
402 | peer_total++; | 417 | peers.total++; |
403 | write_unlock_bh(&peer_pool_lock); | 418 | write_unlock_bh(&peers.lock); |
404 | 419 | ||
405 | if (peer_total >= inet_peer_threshold) | 420 | if (peers.total >= inet_peer_threshold) |
406 | /* Remove one less-recently-used entry. */ | 421 | /* Remove one less-recently-used entry. */ |
407 | cleanup_once(0); | 422 | cleanup_once(0); |
408 | 423 | ||
@@ -411,7 +426,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create) | |||
411 | out_free: | 426 | out_free: |
412 | /* The appropriate node is already in the pool. */ | 427 | /* The appropriate node is already in the pool. */ |
413 | atomic_inc(&p->refcnt); | 428 | atomic_inc(&p->refcnt); |
414 | write_unlock_bh(&peer_pool_lock); | 429 | write_unlock_bh(&peers.lock); |
415 | /* Remove the entry from unused list if it was there. */ | 430 | /* Remove the entry from unused list if it was there. */ |
416 | unlink_from_unused(p); | 431 | unlink_from_unused(p); |
417 | /* Free preallocated the preallocated node. */ | 432 | /* Free preallocated the preallocated node. */ |
@@ -425,12 +440,12 @@ static void peer_check_expire(unsigned long dummy) | |||
425 | unsigned long now = jiffies; | 440 | unsigned long now = jiffies; |
426 | int ttl; | 441 | int ttl; |
427 | 442 | ||
428 | if (peer_total >= inet_peer_threshold) | 443 | if (peers.total >= inet_peer_threshold) |
429 | ttl = inet_peer_minttl; | 444 | ttl = inet_peer_minttl; |
430 | else | 445 | else |
431 | ttl = inet_peer_maxttl | 446 | ttl = inet_peer_maxttl |
432 | - (inet_peer_maxttl - inet_peer_minttl) / HZ * | 447 | - (inet_peer_maxttl - inet_peer_minttl) / HZ * |
433 | peer_total / inet_peer_threshold * HZ; | 448 | peers.total / inet_peer_threshold * HZ; |
434 | while (!cleanup_once(ttl)) { | 449 | while (!cleanup_once(ttl)) { |
435 | if (jiffies != now) | 450 | if (jiffies != now) |
436 | break; | 451 | break; |
@@ -439,22 +454,25 @@ static void peer_check_expire(unsigned long dummy) | |||
439 | /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime | 454 | /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime |
440 | * interval depending on the total number of entries (more entries, | 455 | * interval depending on the total number of entries (more entries, |
441 | * less interval). */ | 456 | * less interval). */ |
442 | if (peer_total >= inet_peer_threshold) | 457 | if (peers.total >= inet_peer_threshold) |
443 | peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime; | 458 | peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime; |
444 | else | 459 | else |
445 | peer_periodic_timer.expires = jiffies | 460 | peer_periodic_timer.expires = jiffies |
446 | + inet_peer_gc_maxtime | 461 | + inet_peer_gc_maxtime |
447 | - (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ * | 462 | - (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ * |
448 | peer_total / inet_peer_threshold * HZ; | 463 | peers.total / inet_peer_threshold * HZ; |
449 | add_timer(&peer_periodic_timer); | 464 | add_timer(&peer_periodic_timer); |
450 | } | 465 | } |
451 | 466 | ||
452 | void inet_putpeer(struct inet_peer *p) | 467 | void inet_putpeer(struct inet_peer *p) |
453 | { | 468 | { |
454 | spin_lock_bh(&inet_peer_unused_lock); | 469 | local_bh_disable(); |
455 | if (atomic_dec_and_test(&p->refcnt)) { | 470 | |
456 | list_add_tail(&p->unused, &unused_peers); | 471 | if (atomic_dec_and_lock(&p->refcnt, &unused_peers.lock)) { |
472 | list_add_tail(&p->unused, &unused_peers.list); | ||
457 | p->dtime = (__u32)jiffies; | 473 | p->dtime = (__u32)jiffies; |
474 | spin_unlock(&unused_peers.lock); | ||
458 | } | 475 | } |
459 | spin_unlock_bh(&inet_peer_unused_lock); | 476 | |
477 | local_bh_enable(); | ||
460 | } | 478 | } |
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 56cdf68a074c..99461f09320f 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c | |||
@@ -87,16 +87,16 @@ int ip_forward(struct sk_buff *skb) | |||
87 | if (opt->is_strictroute && rt->rt_dst != rt->rt_gateway) | 87 | if (opt->is_strictroute && rt->rt_dst != rt->rt_gateway) |
88 | goto sr_failed; | 88 | goto sr_failed; |
89 | 89 | ||
90 | if (unlikely(skb->len > dst_mtu(&rt->u.dst) && !skb_is_gso(skb) && | 90 | if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) && |
91 | (ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) { | 91 | (ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) { |
92 | IP_INC_STATS(dev_net(rt->u.dst.dev), IPSTATS_MIB_FRAGFAILS); | 92 | IP_INC_STATS(dev_net(rt->dst.dev), IPSTATS_MIB_FRAGFAILS); |
93 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, | 93 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, |
94 | htonl(dst_mtu(&rt->u.dst))); | 94 | htonl(dst_mtu(&rt->dst))); |
95 | goto drop; | 95 | goto drop; |
96 | } | 96 | } |
97 | 97 | ||
98 | /* We are about to mangle packet. Copy it! */ | 98 | /* We are about to mangle packet. Copy it! */ |
99 | if (skb_cow(skb, LL_RESERVED_SPACE(rt->u.dst.dev)+rt->u.dst.header_len)) | 99 | if (skb_cow(skb, LL_RESERVED_SPACE(rt->dst.dev)+rt->dst.header_len)) |
100 | goto drop; | 100 | goto drop; |
101 | iph = ip_hdr(skb); | 101 | iph = ip_hdr(skb); |
102 | 102 | ||
@@ -113,7 +113,7 @@ int ip_forward(struct sk_buff *skb) | |||
113 | skb->priority = rt_tos2priority(iph->tos); | 113 | skb->priority = rt_tos2priority(iph->tos); |
114 | 114 | ||
115 | return NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev, | 115 | return NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev, |
116 | rt->u.dst.dev, ip_forward_finish); | 116 | rt->dst.dev, ip_forward_finish); |
117 | 117 | ||
118 | sr_failed: | 118 | sr_failed: |
119 | /* | 119 | /* |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 32618e11076d..749e54889e82 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -745,7 +745,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
745 | goto tx_error; | 745 | goto tx_error; |
746 | } | 746 | } |
747 | } | 747 | } |
748 | tdev = rt->u.dst.dev; | 748 | tdev = rt->dst.dev; |
749 | 749 | ||
750 | if (tdev == dev) { | 750 | if (tdev == dev) { |
751 | ip_rt_put(rt); | 751 | ip_rt_put(rt); |
@@ -755,7 +755,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
755 | 755 | ||
756 | df = tiph->frag_off; | 756 | df = tiph->frag_off; |
757 | if (df) | 757 | if (df) |
758 | mtu = dst_mtu(&rt->u.dst) - dev->hard_header_len - tunnel->hlen; | 758 | mtu = dst_mtu(&rt->dst) - dev->hard_header_len - tunnel->hlen; |
759 | else | 759 | else |
760 | mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; | 760 | mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; |
761 | 761 | ||
@@ -803,7 +803,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
803 | tunnel->err_count = 0; | 803 | tunnel->err_count = 0; |
804 | } | 804 | } |
805 | 805 | ||
806 | max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->u.dst.header_len; | 806 | max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->dst.header_len; |
807 | 807 | ||
808 | if (skb_headroom(skb) < max_headroom || skb_shared(skb)|| | 808 | if (skb_headroom(skb) < max_headroom || skb_shared(skb)|| |
809 | (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { | 809 | (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { |
@@ -830,7 +830,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
830 | IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | | 830 | IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | |
831 | IPSKB_REROUTED); | 831 | IPSKB_REROUTED); |
832 | skb_dst_drop(skb); | 832 | skb_dst_drop(skb); |
833 | skb_dst_set(skb, &rt->u.dst); | 833 | skb_dst_set(skb, &rt->dst); |
834 | 834 | ||
835 | /* | 835 | /* |
836 | * Push down and install the IPIP header. | 836 | * Push down and install the IPIP header. |
@@ -853,7 +853,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
853 | iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit; | 853 | iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit; |
854 | #endif | 854 | #endif |
855 | else | 855 | else |
856 | iph->ttl = dst_metric(&rt->u.dst, RTAX_HOPLIMIT); | 856 | iph->ttl = dst_metric(&rt->dst, RTAX_HOPLIMIT); |
857 | } | 857 | } |
858 | 858 | ||
859 | ((__be16 *)(iph + 1))[0] = tunnel->parms.o_flags; | 859 | ((__be16 *)(iph + 1))[0] = tunnel->parms.o_flags; |
@@ -915,7 +915,7 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev) | |||
915 | .proto = IPPROTO_GRE }; | 915 | .proto = IPPROTO_GRE }; |
916 | struct rtable *rt; | 916 | struct rtable *rt; |
917 | if (!ip_route_output_key(dev_net(dev), &rt, &fl)) { | 917 | if (!ip_route_output_key(dev_net(dev), &rt, &fl)) { |
918 | tdev = rt->u.dst.dev; | 918 | tdev = rt->dst.dev; |
919 | ip_rt_put(rt); | 919 | ip_rt_put(rt); |
920 | } | 920 | } |
921 | 921 | ||
@@ -1174,7 +1174,7 @@ static int ipgre_open(struct net_device *dev) | |||
1174 | struct rtable *rt; | 1174 | struct rtable *rt; |
1175 | if (ip_route_output_key(dev_net(dev), &rt, &fl)) | 1175 | if (ip_route_output_key(dev_net(dev), &rt, &fl)) |
1176 | return -EADDRNOTAVAIL; | 1176 | return -EADDRNOTAVAIL; |
1177 | dev = rt->u.dst.dev; | 1177 | dev = rt->dst.dev; |
1178 | ip_rt_put(rt); | 1178 | ip_rt_put(rt); |
1179 | if (__in_dev_get_rtnl(dev) == NULL) | 1179 | if (__in_dev_get_rtnl(dev) == NULL) |
1180 | return -EADDRNOTAVAIL; | 1180 | return -EADDRNOTAVAIL; |
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index d930dc5e4d85..db47a5a00ed2 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c | |||
@@ -146,7 +146,7 @@ | |||
146 | #include <linux/netlink.h> | 146 | #include <linux/netlink.h> |
147 | 147 | ||
148 | /* | 148 | /* |
149 | * Process Router Attention IP option | 149 | * Process Router Attention IP option (RFC 2113) |
150 | */ | 150 | */ |
151 | int ip_call_ra_chain(struct sk_buff *skb) | 151 | int ip_call_ra_chain(struct sk_buff *skb) |
152 | { | 152 | { |
@@ -155,8 +155,7 @@ int ip_call_ra_chain(struct sk_buff *skb) | |||
155 | struct sock *last = NULL; | 155 | struct sock *last = NULL; |
156 | struct net_device *dev = skb->dev; | 156 | struct net_device *dev = skb->dev; |
157 | 157 | ||
158 | read_lock(&ip_ra_lock); | 158 | for (ra = rcu_dereference(ip_ra_chain); ra; ra = rcu_dereference(ra->next)) { |
159 | for (ra = ip_ra_chain; ra; ra = ra->next) { | ||
160 | struct sock *sk = ra->sk; | 159 | struct sock *sk = ra->sk; |
161 | 160 | ||
162 | /* If socket is bound to an interface, only report | 161 | /* If socket is bound to an interface, only report |
@@ -167,10 +166,8 @@ int ip_call_ra_chain(struct sk_buff *skb) | |||
167 | sk->sk_bound_dev_if == dev->ifindex) && | 166 | sk->sk_bound_dev_if == dev->ifindex) && |
168 | net_eq(sock_net(sk), dev_net(dev))) { | 167 | net_eq(sock_net(sk), dev_net(dev))) { |
169 | if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { | 168 | if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { |
170 | if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN)) { | 169 | if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN)) |
171 | read_unlock(&ip_ra_lock); | ||
172 | return 1; | 170 | return 1; |
173 | } | ||
174 | } | 171 | } |
175 | if (last) { | 172 | if (last) { |
176 | struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); | 173 | struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); |
@@ -183,10 +180,8 @@ int ip_call_ra_chain(struct sk_buff *skb) | |||
183 | 180 | ||
184 | if (last) { | 181 | if (last) { |
185 | raw_rcv(last, skb); | 182 | raw_rcv(last, skb); |
186 | read_unlock(&ip_ra_lock); | ||
187 | return 1; | 183 | return 1; |
188 | } | 184 | } |
189 | read_unlock(&ip_ra_lock); | ||
190 | return 0; | 185 | return 0; |
191 | } | 186 | } |
192 | 187 | ||
@@ -298,18 +293,16 @@ static inline int ip_rcv_options(struct sk_buff *skb) | |||
298 | } | 293 | } |
299 | 294 | ||
300 | if (unlikely(opt->srr)) { | 295 | if (unlikely(opt->srr)) { |
301 | struct in_device *in_dev = in_dev_get(dev); | 296 | struct in_device *in_dev = __in_dev_get_rcu(dev); |
297 | |||
302 | if (in_dev) { | 298 | if (in_dev) { |
303 | if (!IN_DEV_SOURCE_ROUTE(in_dev)) { | 299 | if (!IN_DEV_SOURCE_ROUTE(in_dev)) { |
304 | if (IN_DEV_LOG_MARTIANS(in_dev) && | 300 | if (IN_DEV_LOG_MARTIANS(in_dev) && |
305 | net_ratelimit()) | 301 | net_ratelimit()) |
306 | printk(KERN_INFO "source route option %pI4 -> %pI4\n", | 302 | printk(KERN_INFO "source route option %pI4 -> %pI4\n", |
307 | &iph->saddr, &iph->daddr); | 303 | &iph->saddr, &iph->daddr); |
308 | in_dev_put(in_dev); | ||
309 | goto drop; | 304 | goto drop; |
310 | } | 305 | } |
311 | |||
312 | in_dev_put(in_dev); | ||
313 | } | 306 | } |
314 | 307 | ||
315 | if (ip_options_rcv_srr(skb)) | 308 | if (ip_options_rcv_srr(skb)) |
@@ -340,6 +333,9 @@ static int ip_rcv_finish(struct sk_buff *skb) | |||
340 | else if (err == -ENETUNREACH) | 333 | else if (err == -ENETUNREACH) |
341 | IP_INC_STATS_BH(dev_net(skb->dev), | 334 | IP_INC_STATS_BH(dev_net(skb->dev), |
342 | IPSTATS_MIB_INNOROUTES); | 335 | IPSTATS_MIB_INNOROUTES); |
336 | else if (err == -EXDEV) | ||
337 | NET_INC_STATS_BH(dev_net(skb->dev), | ||
338 | LINUX_MIB_IPRPFILTER); | ||
343 | goto drop; | 339 | goto drop; |
344 | } | 340 | } |
345 | } | 341 | } |
@@ -360,10 +356,10 @@ static int ip_rcv_finish(struct sk_buff *skb) | |||
360 | 356 | ||
361 | rt = skb_rtable(skb); | 357 | rt = skb_rtable(skb); |
362 | if (rt->rt_type == RTN_MULTICAST) { | 358 | if (rt->rt_type == RTN_MULTICAST) { |
363 | IP_UPD_PO_STATS_BH(dev_net(rt->u.dst.dev), IPSTATS_MIB_INMCAST, | 359 | IP_UPD_PO_STATS_BH(dev_net(rt->dst.dev), IPSTATS_MIB_INMCAST, |
364 | skb->len); | 360 | skb->len); |
365 | } else if (rt->rt_type == RTN_BROADCAST) | 361 | } else if (rt->rt_type == RTN_BROADCAST) |
366 | IP_UPD_PO_STATS_BH(dev_net(rt->u.dst.dev), IPSTATS_MIB_INBCAST, | 362 | IP_UPD_PO_STATS_BH(dev_net(rt->dst.dev), IPSTATS_MIB_INBCAST, |
367 | skb->len); | 363 | skb->len); |
368 | 364 | ||
369 | return dst_input(skb); | 365 | return dst_input(skb); |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 9a4a6c96cb0d..6cbeb2e108de 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -151,15 +151,15 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, | |||
151 | iph->version = 4; | 151 | iph->version = 4; |
152 | iph->ihl = 5; | 152 | iph->ihl = 5; |
153 | iph->tos = inet->tos; | 153 | iph->tos = inet->tos; |
154 | if (ip_dont_fragment(sk, &rt->u.dst)) | 154 | if (ip_dont_fragment(sk, &rt->dst)) |
155 | iph->frag_off = htons(IP_DF); | 155 | iph->frag_off = htons(IP_DF); |
156 | else | 156 | else |
157 | iph->frag_off = 0; | 157 | iph->frag_off = 0; |
158 | iph->ttl = ip_select_ttl(inet, &rt->u.dst); | 158 | iph->ttl = ip_select_ttl(inet, &rt->dst); |
159 | iph->daddr = rt->rt_dst; | 159 | iph->daddr = rt->rt_dst; |
160 | iph->saddr = rt->rt_src; | 160 | iph->saddr = rt->rt_src; |
161 | iph->protocol = sk->sk_protocol; | 161 | iph->protocol = sk->sk_protocol; |
162 | ip_select_ident(iph, &rt->u.dst, sk); | 162 | ip_select_ident(iph, &rt->dst, sk); |
163 | 163 | ||
164 | if (opt && opt->optlen) { | 164 | if (opt && opt->optlen) { |
165 | iph->ihl += opt->optlen>>2; | 165 | iph->ihl += opt->optlen>>2; |
@@ -240,7 +240,7 @@ int ip_mc_output(struct sk_buff *skb) | |||
240 | { | 240 | { |
241 | struct sock *sk = skb->sk; | 241 | struct sock *sk = skb->sk; |
242 | struct rtable *rt = skb_rtable(skb); | 242 | struct rtable *rt = skb_rtable(skb); |
243 | struct net_device *dev = rt->u.dst.dev; | 243 | struct net_device *dev = rt->dst.dev; |
244 | 244 | ||
245 | /* | 245 | /* |
246 | * If the indicated interface is up and running, send the packet. | 246 | * If the indicated interface is up and running, send the packet. |
@@ -359,9 +359,9 @@ int ip_queue_xmit(struct sk_buff *skb) | |||
359 | if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0)) | 359 | if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0)) |
360 | goto no_route; | 360 | goto no_route; |
361 | } | 361 | } |
362 | sk_setup_caps(sk, &rt->u.dst); | 362 | sk_setup_caps(sk, &rt->dst); |
363 | } | 363 | } |
364 | skb_dst_set_noref(skb, &rt->u.dst); | 364 | skb_dst_set_noref(skb, &rt->dst); |
365 | 365 | ||
366 | packet_routed: | 366 | packet_routed: |
367 | if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) | 367 | if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) |
@@ -372,11 +372,11 @@ packet_routed: | |||
372 | skb_reset_network_header(skb); | 372 | skb_reset_network_header(skb); |
373 | iph = ip_hdr(skb); | 373 | iph = ip_hdr(skb); |
374 | *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff)); | 374 | *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff)); |
375 | if (ip_dont_fragment(sk, &rt->u.dst) && !skb->local_df) | 375 | if (ip_dont_fragment(sk, &rt->dst) && !skb->local_df) |
376 | iph->frag_off = htons(IP_DF); | 376 | iph->frag_off = htons(IP_DF); |
377 | else | 377 | else |
378 | iph->frag_off = 0; | 378 | iph->frag_off = 0; |
379 | iph->ttl = ip_select_ttl(inet, &rt->u.dst); | 379 | iph->ttl = ip_select_ttl(inet, &rt->dst); |
380 | iph->protocol = sk->sk_protocol; | 380 | iph->protocol = sk->sk_protocol; |
381 | iph->saddr = rt->rt_src; | 381 | iph->saddr = rt->rt_src; |
382 | iph->daddr = rt->rt_dst; | 382 | iph->daddr = rt->rt_dst; |
@@ -387,7 +387,7 @@ packet_routed: | |||
387 | ip_options_build(skb, opt, inet->inet_daddr, rt, 0); | 387 | ip_options_build(skb, opt, inet->inet_daddr, rt, 0); |
388 | } | 388 | } |
389 | 389 | ||
390 | ip_select_ident_more(iph, &rt->u.dst, sk, | 390 | ip_select_ident_more(iph, &rt->dst, sk, |
391 | (skb_shinfo(skb)->gso_segs ?: 1) - 1); | 391 | (skb_shinfo(skb)->gso_segs ?: 1) - 1); |
392 | 392 | ||
393 | skb->priority = sk->sk_priority; | 393 | skb->priority = sk->sk_priority; |
@@ -452,7 +452,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
452 | struct rtable *rt = skb_rtable(skb); | 452 | struct rtable *rt = skb_rtable(skb); |
453 | int err = 0; | 453 | int err = 0; |
454 | 454 | ||
455 | dev = rt->u.dst.dev; | 455 | dev = rt->dst.dev; |
456 | 456 | ||
457 | /* | 457 | /* |
458 | * Point into the IP datagram header. | 458 | * Point into the IP datagram header. |
@@ -473,7 +473,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
473 | */ | 473 | */ |
474 | 474 | ||
475 | hlen = iph->ihl * 4; | 475 | hlen = iph->ihl * 4; |
476 | mtu = dst_mtu(&rt->u.dst) - hlen; /* Size of data space */ | 476 | mtu = dst_mtu(&rt->dst) - hlen; /* Size of data space */ |
477 | #ifdef CONFIG_BRIDGE_NETFILTER | 477 | #ifdef CONFIG_BRIDGE_NETFILTER |
478 | if (skb->nf_bridge) | 478 | if (skb->nf_bridge) |
479 | mtu -= nf_bridge_mtu_reduction(skb); | 479 | mtu -= nf_bridge_mtu_reduction(skb); |
@@ -586,7 +586,7 @@ slow_path: | |||
586 | * we need to make room for the encapsulating header | 586 | * we need to make room for the encapsulating header |
587 | */ | 587 | */ |
588 | pad = nf_bridge_pad(skb); | 588 | pad = nf_bridge_pad(skb); |
589 | ll_rs = LL_RESERVED_SPACE_EXTRA(rt->u.dst.dev, pad); | 589 | ll_rs = LL_RESERVED_SPACE_EXTRA(rt->dst.dev, pad); |
590 | mtu -= pad; | 590 | mtu -= pad; |
591 | 591 | ||
592 | /* | 592 | /* |
@@ -833,13 +833,13 @@ int ip_append_data(struct sock *sk, | |||
833 | */ | 833 | */ |
834 | *rtp = NULL; | 834 | *rtp = NULL; |
835 | inet->cork.fragsize = mtu = inet->pmtudisc == IP_PMTUDISC_PROBE ? | 835 | inet->cork.fragsize = mtu = inet->pmtudisc == IP_PMTUDISC_PROBE ? |
836 | rt->u.dst.dev->mtu : | 836 | rt->dst.dev->mtu : |
837 | dst_mtu(rt->u.dst.path); | 837 | dst_mtu(rt->dst.path); |
838 | inet->cork.dst = &rt->u.dst; | 838 | inet->cork.dst = &rt->dst; |
839 | inet->cork.length = 0; | 839 | inet->cork.length = 0; |
840 | sk->sk_sndmsg_page = NULL; | 840 | sk->sk_sndmsg_page = NULL; |
841 | sk->sk_sndmsg_off = 0; | 841 | sk->sk_sndmsg_off = 0; |
842 | if ((exthdrlen = rt->u.dst.header_len) != 0) { | 842 | if ((exthdrlen = rt->dst.header_len) != 0) { |
843 | length += exthdrlen; | 843 | length += exthdrlen; |
844 | transhdrlen += exthdrlen; | 844 | transhdrlen += exthdrlen; |
845 | } | 845 | } |
@@ -852,7 +852,7 @@ int ip_append_data(struct sock *sk, | |||
852 | exthdrlen = 0; | 852 | exthdrlen = 0; |
853 | mtu = inet->cork.fragsize; | 853 | mtu = inet->cork.fragsize; |
854 | } | 854 | } |
855 | hh_len = LL_RESERVED_SPACE(rt->u.dst.dev); | 855 | hh_len = LL_RESERVED_SPACE(rt->dst.dev); |
856 | 856 | ||
857 | fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0); | 857 | fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0); |
858 | maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen; | 858 | maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen; |
@@ -869,14 +869,14 @@ int ip_append_data(struct sock *sk, | |||
869 | */ | 869 | */ |
870 | if (transhdrlen && | 870 | if (transhdrlen && |
871 | length + fragheaderlen <= mtu && | 871 | length + fragheaderlen <= mtu && |
872 | rt->u.dst.dev->features & NETIF_F_V4_CSUM && | 872 | rt->dst.dev->features & NETIF_F_V4_CSUM && |
873 | !exthdrlen) | 873 | !exthdrlen) |
874 | csummode = CHECKSUM_PARTIAL; | 874 | csummode = CHECKSUM_PARTIAL; |
875 | 875 | ||
876 | inet->cork.length += length; | 876 | inet->cork.length += length; |
877 | if (((length> mtu) || !skb_queue_empty(&sk->sk_write_queue)) && | 877 | if (((length> mtu) || !skb_queue_empty(&sk->sk_write_queue)) && |
878 | (sk->sk_protocol == IPPROTO_UDP) && | 878 | (sk->sk_protocol == IPPROTO_UDP) && |
879 | (rt->u.dst.dev->features & NETIF_F_UFO)) { | 879 | (rt->dst.dev->features & NETIF_F_UFO)) { |
880 | err = ip_ufo_append_data(sk, getfrag, from, length, hh_len, | 880 | err = ip_ufo_append_data(sk, getfrag, from, length, hh_len, |
881 | fragheaderlen, transhdrlen, mtu, | 881 | fragheaderlen, transhdrlen, mtu, |
882 | flags); | 882 | flags); |
@@ -924,7 +924,7 @@ alloc_new_skb: | |||
924 | fraglen = datalen + fragheaderlen; | 924 | fraglen = datalen + fragheaderlen; |
925 | 925 | ||
926 | if ((flags & MSG_MORE) && | 926 | if ((flags & MSG_MORE) && |
927 | !(rt->u.dst.dev->features&NETIF_F_SG)) | 927 | !(rt->dst.dev->features&NETIF_F_SG)) |
928 | alloclen = mtu; | 928 | alloclen = mtu; |
929 | else | 929 | else |
930 | alloclen = datalen + fragheaderlen; | 930 | alloclen = datalen + fragheaderlen; |
@@ -935,7 +935,7 @@ alloc_new_skb: | |||
935 | * the last. | 935 | * the last. |
936 | */ | 936 | */ |
937 | if (datalen == length + fraggap) | 937 | if (datalen == length + fraggap) |
938 | alloclen += rt->u.dst.trailer_len; | 938 | alloclen += rt->dst.trailer_len; |
939 | 939 | ||
940 | if (transhdrlen) { | 940 | if (transhdrlen) { |
941 | skb = sock_alloc_send_skb(sk, | 941 | skb = sock_alloc_send_skb(sk, |
@@ -1008,7 +1008,7 @@ alloc_new_skb: | |||
1008 | if (copy > length) | 1008 | if (copy > length) |
1009 | copy = length; | 1009 | copy = length; |
1010 | 1010 | ||
1011 | if (!(rt->u.dst.dev->features&NETIF_F_SG)) { | 1011 | if (!(rt->dst.dev->features&NETIF_F_SG)) { |
1012 | unsigned int off; | 1012 | unsigned int off; |
1013 | 1013 | ||
1014 | off = skb->len; | 1014 | off = skb->len; |
@@ -1103,10 +1103,10 @@ ssize_t ip_append_page(struct sock *sk, struct page *page, | |||
1103 | if (inet->cork.flags & IPCORK_OPT) | 1103 | if (inet->cork.flags & IPCORK_OPT) |
1104 | opt = inet->cork.opt; | 1104 | opt = inet->cork.opt; |
1105 | 1105 | ||
1106 | if (!(rt->u.dst.dev->features&NETIF_F_SG)) | 1106 | if (!(rt->dst.dev->features&NETIF_F_SG)) |
1107 | return -EOPNOTSUPP; | 1107 | return -EOPNOTSUPP; |
1108 | 1108 | ||
1109 | hh_len = LL_RESERVED_SPACE(rt->u.dst.dev); | 1109 | hh_len = LL_RESERVED_SPACE(rt->dst.dev); |
1110 | mtu = inet->cork.fragsize; | 1110 | mtu = inet->cork.fragsize; |
1111 | 1111 | ||
1112 | fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0); | 1112 | fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0); |
@@ -1122,7 +1122,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page, | |||
1122 | 1122 | ||
1123 | inet->cork.length += size; | 1123 | inet->cork.length += size; |
1124 | if ((sk->sk_protocol == IPPROTO_UDP) && | 1124 | if ((sk->sk_protocol == IPPROTO_UDP) && |
1125 | (rt->u.dst.dev->features & NETIF_F_UFO)) { | 1125 | (rt->dst.dev->features & NETIF_F_UFO)) { |
1126 | skb_shinfo(skb)->gso_size = mtu - fragheaderlen; | 1126 | skb_shinfo(skb)->gso_size = mtu - fragheaderlen; |
1127 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP; | 1127 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP; |
1128 | } | 1128 | } |
@@ -1274,8 +1274,8 @@ int ip_push_pending_frames(struct sock *sk) | |||
1274 | * If local_df is set too, we still allow to fragment this frame | 1274 | * If local_df is set too, we still allow to fragment this frame |
1275 | * locally. */ | 1275 | * locally. */ |
1276 | if (inet->pmtudisc >= IP_PMTUDISC_DO || | 1276 | if (inet->pmtudisc >= IP_PMTUDISC_DO || |
1277 | (skb->len <= dst_mtu(&rt->u.dst) && | 1277 | (skb->len <= dst_mtu(&rt->dst) && |
1278 | ip_dont_fragment(sk, &rt->u.dst))) | 1278 | ip_dont_fragment(sk, &rt->dst))) |
1279 | df = htons(IP_DF); | 1279 | df = htons(IP_DF); |
1280 | 1280 | ||
1281 | if (inet->cork.flags & IPCORK_OPT) | 1281 | if (inet->cork.flags & IPCORK_OPT) |
@@ -1284,7 +1284,7 @@ int ip_push_pending_frames(struct sock *sk) | |||
1284 | if (rt->rt_type == RTN_MULTICAST) | 1284 | if (rt->rt_type == RTN_MULTICAST) |
1285 | ttl = inet->mc_ttl; | 1285 | ttl = inet->mc_ttl; |
1286 | else | 1286 | else |
1287 | ttl = ip_select_ttl(inet, &rt->u.dst); | 1287 | ttl = ip_select_ttl(inet, &rt->dst); |
1288 | 1288 | ||
1289 | iph = (struct iphdr *)skb->data; | 1289 | iph = (struct iphdr *)skb->data; |
1290 | iph->version = 4; | 1290 | iph->version = 4; |
@@ -1295,7 +1295,7 @@ int ip_push_pending_frames(struct sock *sk) | |||
1295 | } | 1295 | } |
1296 | iph->tos = inet->tos; | 1296 | iph->tos = inet->tos; |
1297 | iph->frag_off = df; | 1297 | iph->frag_off = df; |
1298 | ip_select_ident(iph, &rt->u.dst, sk); | 1298 | ip_select_ident(iph, &rt->dst, sk); |
1299 | iph->ttl = ttl; | 1299 | iph->ttl = ttl; |
1300 | iph->protocol = sk->sk_protocol; | 1300 | iph->protocol = sk->sk_protocol; |
1301 | iph->saddr = rt->rt_src; | 1301 | iph->saddr = rt->rt_src; |
@@ -1308,7 +1308,7 @@ int ip_push_pending_frames(struct sock *sk) | |||
1308 | * on dst refcount | 1308 | * on dst refcount |
1309 | */ | 1309 | */ |
1310 | inet->cork.dst = NULL; | 1310 | inet->cork.dst = NULL; |
1311 | skb_dst_set(skb, &rt->u.dst); | 1311 | skb_dst_set(skb, &rt->dst); |
1312 | 1312 | ||
1313 | if (iph->protocol == IPPROTO_ICMP) | 1313 | if (iph->protocol == IPPROTO_ICMP) |
1314 | icmp_out_count(net, ((struct icmphdr *) | 1314 | icmp_out_count(net, ((struct icmphdr *) |
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index ce231780a2b1..47fff528ff39 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -239,7 +239,16 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc) | |||
239 | sent to multicast group to reach destination designated router. | 239 | sent to multicast group to reach destination designated router. |
240 | */ | 240 | */ |
241 | struct ip_ra_chain *ip_ra_chain; | 241 | struct ip_ra_chain *ip_ra_chain; |
242 | DEFINE_RWLOCK(ip_ra_lock); | 242 | static DEFINE_SPINLOCK(ip_ra_lock); |
243 | |||
244 | |||
245 | static void ip_ra_destroy_rcu(struct rcu_head *head) | ||
246 | { | ||
247 | struct ip_ra_chain *ra = container_of(head, struct ip_ra_chain, rcu); | ||
248 | |||
249 | sock_put(ra->saved_sk); | ||
250 | kfree(ra); | ||
251 | } | ||
243 | 252 | ||
244 | int ip_ra_control(struct sock *sk, unsigned char on, | 253 | int ip_ra_control(struct sock *sk, unsigned char on, |
245 | void (*destructor)(struct sock *)) | 254 | void (*destructor)(struct sock *)) |
@@ -251,35 +260,42 @@ int ip_ra_control(struct sock *sk, unsigned char on, | |||
251 | 260 | ||
252 | new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; | 261 | new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; |
253 | 262 | ||
254 | write_lock_bh(&ip_ra_lock); | 263 | spin_lock_bh(&ip_ra_lock); |
255 | for (rap = &ip_ra_chain; (ra = *rap) != NULL; rap = &ra->next) { | 264 | for (rap = &ip_ra_chain; (ra = *rap) != NULL; rap = &ra->next) { |
256 | if (ra->sk == sk) { | 265 | if (ra->sk == sk) { |
257 | if (on) { | 266 | if (on) { |
258 | write_unlock_bh(&ip_ra_lock); | 267 | spin_unlock_bh(&ip_ra_lock); |
259 | kfree(new_ra); | 268 | kfree(new_ra); |
260 | return -EADDRINUSE; | 269 | return -EADDRINUSE; |
261 | } | 270 | } |
262 | *rap = ra->next; | 271 | /* dont let ip_call_ra_chain() use sk again */ |
263 | write_unlock_bh(&ip_ra_lock); | 272 | ra->sk = NULL; |
273 | rcu_assign_pointer(*rap, ra->next); | ||
274 | spin_unlock_bh(&ip_ra_lock); | ||
264 | 275 | ||
265 | if (ra->destructor) | 276 | if (ra->destructor) |
266 | ra->destructor(sk); | 277 | ra->destructor(sk); |
267 | sock_put(sk); | 278 | /* |
268 | kfree(ra); | 279 | * Delay sock_put(sk) and kfree(ra) after one rcu grace |
280 | * period. This guarantee ip_call_ra_chain() dont need | ||
281 | * to mess with socket refcounts. | ||
282 | */ | ||
283 | ra->saved_sk = sk; | ||
284 | call_rcu(&ra->rcu, ip_ra_destroy_rcu); | ||
269 | return 0; | 285 | return 0; |
270 | } | 286 | } |
271 | } | 287 | } |
272 | if (new_ra == NULL) { | 288 | if (new_ra == NULL) { |
273 | write_unlock_bh(&ip_ra_lock); | 289 | spin_unlock_bh(&ip_ra_lock); |
274 | return -ENOBUFS; | 290 | return -ENOBUFS; |
275 | } | 291 | } |
276 | new_ra->sk = sk; | 292 | new_ra->sk = sk; |
277 | new_ra->destructor = destructor; | 293 | new_ra->destructor = destructor; |
278 | 294 | ||
279 | new_ra->next = ra; | 295 | new_ra->next = ra; |
280 | *rap = new_ra; | 296 | rcu_assign_pointer(*rap, new_ra); |
281 | sock_hold(sk); | 297 | sock_hold(sk); |
282 | write_unlock_bh(&ip_ra_lock); | 298 | spin_unlock_bh(&ip_ra_lock); |
283 | 299 | ||
284 | return 0; | 300 | return 0; |
285 | } | 301 | } |
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index b9d84e800cf4..3a6e1ec5e9ae 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c | |||
@@ -665,6 +665,13 @@ ic_dhcp_init_options(u8 *options) | |||
665 | memcpy(e, ic_req_params, sizeof(ic_req_params)); | 665 | memcpy(e, ic_req_params, sizeof(ic_req_params)); |
666 | e += sizeof(ic_req_params); | 666 | e += sizeof(ic_req_params); |
667 | 667 | ||
668 | if (ic_host_name_set) { | ||
669 | *e++ = 12; /* host-name */ | ||
670 | len = strlen(utsname()->nodename); | ||
671 | *e++ = len; | ||
672 | memcpy(e, utsname()->nodename, len); | ||
673 | e += len; | ||
674 | } | ||
668 | if (*vendor_class_identifier) { | 675 | if (*vendor_class_identifier) { |
669 | printk(KERN_INFO "DHCP: sending class identifier \"%s\"\n", | 676 | printk(KERN_INFO "DHCP: sending class identifier \"%s\"\n", |
670 | vendor_class_identifier); | 677 | vendor_class_identifier); |
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 7fd636711037..ec036731a70b 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c | |||
@@ -435,7 +435,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
435 | goto tx_error_icmp; | 435 | goto tx_error_icmp; |
436 | } | 436 | } |
437 | } | 437 | } |
438 | tdev = rt->u.dst.dev; | 438 | tdev = rt->dst.dev; |
439 | 439 | ||
440 | if (tdev == dev) { | 440 | if (tdev == dev) { |
441 | ip_rt_put(rt); | 441 | ip_rt_put(rt); |
@@ -446,7 +446,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
446 | df |= old_iph->frag_off & htons(IP_DF); | 446 | df |= old_iph->frag_off & htons(IP_DF); |
447 | 447 | ||
448 | if (df) { | 448 | if (df) { |
449 | mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr); | 449 | mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr); |
450 | 450 | ||
451 | if (mtu < 68) { | 451 | if (mtu < 68) { |
452 | stats->collisions++; | 452 | stats->collisions++; |
@@ -503,7 +503,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
503 | IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | | 503 | IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | |
504 | IPSKB_REROUTED); | 504 | IPSKB_REROUTED); |
505 | skb_dst_drop(skb); | 505 | skb_dst_drop(skb); |
506 | skb_dst_set(skb, &rt->u.dst); | 506 | skb_dst_set(skb, &rt->dst); |
507 | 507 | ||
508 | /* | 508 | /* |
509 | * Push down and install the IPIP header. | 509 | * Push down and install the IPIP header. |
@@ -552,7 +552,7 @@ static void ipip_tunnel_bind_dev(struct net_device *dev) | |||
552 | .proto = IPPROTO_IPIP }; | 552 | .proto = IPPROTO_IPIP }; |
553 | struct rtable *rt; | 553 | struct rtable *rt; |
554 | if (!ip_route_output_key(dev_net(dev), &rt, &fl)) { | 554 | if (!ip_route_output_key(dev_net(dev), &rt, &fl)) { |
555 | tdev = rt->u.dst.dev; | 555 | tdev = rt->dst.dev; |
556 | ip_rt_put(rt); | 556 | ip_rt_put(rt); |
557 | } | 557 | } |
558 | dev->flags |= IFF_POINTOPOINT; | 558 | dev->flags |= IFF_POINTOPOINT; |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 856123fe32f9..539592294f45 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -267,8 +267,10 @@ static void __net_exit ipmr_rules_exit(struct net *net) | |||
267 | { | 267 | { |
268 | struct mr_table *mrt, *next; | 268 | struct mr_table *mrt, *next; |
269 | 269 | ||
270 | list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) | 270 | list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { |
271 | list_del(&mrt->list); | ||
271 | kfree(mrt); | 272 | kfree(mrt); |
273 | } | ||
272 | fib_rules_unregister(net->ipv4.mr_rules_ops); | 274 | fib_rules_unregister(net->ipv4.mr_rules_ops); |
273 | } | 275 | } |
274 | #else | 276 | #else |
@@ -1551,9 +1553,9 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, | |||
1551 | goto out_free; | 1553 | goto out_free; |
1552 | } | 1554 | } |
1553 | 1555 | ||
1554 | dev = rt->u.dst.dev; | 1556 | dev = rt->dst.dev; |
1555 | 1557 | ||
1556 | if (skb->len+encap > dst_mtu(&rt->u.dst) && (ntohs(iph->frag_off) & IP_DF)) { | 1558 | if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) { |
1557 | /* Do not fragment multicasts. Alas, IPv4 does not | 1559 | /* Do not fragment multicasts. Alas, IPv4 does not |
1558 | allow to send ICMP, so that packets will disappear | 1560 | allow to send ICMP, so that packets will disappear |
1559 | to blackhole. | 1561 | to blackhole. |
@@ -1564,7 +1566,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, | |||
1564 | goto out_free; | 1566 | goto out_free; |
1565 | } | 1567 | } |
1566 | 1568 | ||
1567 | encap += LL_RESERVED_SPACE(dev) + rt->u.dst.header_len; | 1569 | encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len; |
1568 | 1570 | ||
1569 | if (skb_cow(skb, encap)) { | 1571 | if (skb_cow(skb, encap)) { |
1570 | ip_rt_put(rt); | 1572 | ip_rt_put(rt); |
@@ -1575,7 +1577,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, | |||
1575 | vif->bytes_out += skb->len; | 1577 | vif->bytes_out += skb->len; |
1576 | 1578 | ||
1577 | skb_dst_drop(skb); | 1579 | skb_dst_drop(skb); |
1578 | skb_dst_set(skb, &rt->u.dst); | 1580 | skb_dst_set(skb, &rt->dst); |
1579 | ip_decrease_ttl(ip_hdr(skb)); | 1581 | ip_decrease_ttl(ip_hdr(skb)); |
1580 | 1582 | ||
1581 | /* FIXME: forward and output firewalls used to be called here. | 1583 | /* FIXME: forward and output firewalls used to be called here. |
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index acd1ea87ba51..d88a46c54fd1 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c | |||
@@ -43,7 +43,7 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) | |||
43 | 43 | ||
44 | /* Drop old route. */ | 44 | /* Drop old route. */ |
45 | skb_dst_drop(skb); | 45 | skb_dst_drop(skb); |
46 | skb_dst_set(skb, &rt->u.dst); | 46 | skb_dst_set(skb, &rt->dst); |
47 | } else { | 47 | } else { |
48 | /* non-local src, find valid iif to satisfy | 48 | /* non-local src, find valid iif to satisfy |
49 | * rp-filter when calling ip_route_input. */ | 49 | * rp-filter when calling ip_route_input. */ |
@@ -53,11 +53,11 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) | |||
53 | 53 | ||
54 | orefdst = skb->_skb_refdst; | 54 | orefdst = skb->_skb_refdst; |
55 | if (ip_route_input(skb, iph->daddr, iph->saddr, | 55 | if (ip_route_input(skb, iph->daddr, iph->saddr, |
56 | RT_TOS(iph->tos), rt->u.dst.dev) != 0) { | 56 | RT_TOS(iph->tos), rt->dst.dev) != 0) { |
57 | dst_release(&rt->u.dst); | 57 | dst_release(&rt->dst); |
58 | return -1; | 58 | return -1; |
59 | } | 59 | } |
60 | dst_release(&rt->u.dst); | 60 | dst_release(&rt->dst); |
61 | refdst_drop(orefdst); | 61 | refdst_drop(orefdst); |
62 | } | 62 | } |
63 | 63 | ||
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 7c0b8ad61f9d..b38c11810c65 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
@@ -336,7 +336,7 @@ ipt_do_table(struct sk_buff *skb, | |||
336 | cpu = smp_processor_id(); | 336 | cpu = smp_processor_id(); |
337 | table_base = private->entries[cpu]; | 337 | table_base = private->entries[cpu]; |
338 | jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; | 338 | jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; |
339 | stackptr = &private->stackptr[cpu]; | 339 | stackptr = per_cpu_ptr(private->stackptr, cpu); |
340 | origptr = *stackptr; | 340 | origptr = *stackptr; |
341 | 341 | ||
342 | e = get_entry(table_base, private->hook_entry[hook]); | 342 | e = get_entry(table_base, private->hook_entry[hook]); |
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 3dc9914c1dce..e320ca6b3ef3 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c | |||
@@ -252,6 +252,7 @@ static const struct snmp_mib snmp4_net_list[] = { | |||
252 | SNMP_MIB_ITEM("TCPBacklogDrop", LINUX_MIB_TCPBACKLOGDROP), | 252 | SNMP_MIB_ITEM("TCPBacklogDrop", LINUX_MIB_TCPBACKLOGDROP), |
253 | SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP), | 253 | SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP), |
254 | SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP), | 254 | SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP), |
255 | SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER), | ||
255 | SNMP_MIB_SENTINEL | 256 | SNMP_MIB_SENTINEL |
256 | }; | 257 | }; |
257 | 258 | ||
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 2c7a1639388a..009a7b2aa1ef 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
@@ -314,7 +314,7 @@ int raw_rcv(struct sock *sk, struct sk_buff *skb) | |||
314 | } | 314 | } |
315 | 315 | ||
316 | static int raw_send_hdrinc(struct sock *sk, void *from, size_t length, | 316 | static int raw_send_hdrinc(struct sock *sk, void *from, size_t length, |
317 | struct rtable *rt, | 317 | struct rtable **rtp, |
318 | unsigned int flags) | 318 | unsigned int flags) |
319 | { | 319 | { |
320 | struct inet_sock *inet = inet_sk(sk); | 320 | struct inet_sock *inet = inet_sk(sk); |
@@ -323,25 +323,27 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length, | |||
323 | struct sk_buff *skb; | 323 | struct sk_buff *skb; |
324 | unsigned int iphlen; | 324 | unsigned int iphlen; |
325 | int err; | 325 | int err; |
326 | struct rtable *rt = *rtp; | ||
326 | 327 | ||
327 | if (length > rt->u.dst.dev->mtu) { | 328 | if (length > rt->dst.dev->mtu) { |
328 | ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport, | 329 | ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport, |
329 | rt->u.dst.dev->mtu); | 330 | rt->dst.dev->mtu); |
330 | return -EMSGSIZE; | 331 | return -EMSGSIZE; |
331 | } | 332 | } |
332 | if (flags&MSG_PROBE) | 333 | if (flags&MSG_PROBE) |
333 | goto out; | 334 | goto out; |
334 | 335 | ||
335 | skb = sock_alloc_send_skb(sk, | 336 | skb = sock_alloc_send_skb(sk, |
336 | length + LL_ALLOCATED_SPACE(rt->u.dst.dev) + 15, | 337 | length + LL_ALLOCATED_SPACE(rt->dst.dev) + 15, |
337 | flags & MSG_DONTWAIT, &err); | 338 | flags & MSG_DONTWAIT, &err); |
338 | if (skb == NULL) | 339 | if (skb == NULL) |
339 | goto error; | 340 | goto error; |
340 | skb_reserve(skb, LL_RESERVED_SPACE(rt->u.dst.dev)); | 341 | skb_reserve(skb, LL_RESERVED_SPACE(rt->dst.dev)); |
341 | 342 | ||
342 | skb->priority = sk->sk_priority; | 343 | skb->priority = sk->sk_priority; |
343 | skb->mark = sk->sk_mark; | 344 | skb->mark = sk->sk_mark; |
344 | skb_dst_set(skb, dst_clone(&rt->u.dst)); | 345 | skb_dst_set(skb, &rt->dst); |
346 | *rtp = NULL; | ||
345 | 347 | ||
346 | skb_reset_network_header(skb); | 348 | skb_reset_network_header(skb); |
347 | iph = ip_hdr(skb); | 349 | iph = ip_hdr(skb); |
@@ -373,7 +375,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length, | |||
373 | iph->check = 0; | 375 | iph->check = 0; |
374 | iph->tot_len = htons(length); | 376 | iph->tot_len = htons(length); |
375 | if (!iph->id) | 377 | if (!iph->id) |
376 | ip_select_ident(iph, &rt->u.dst, NULL); | 378 | ip_select_ident(iph, &rt->dst, NULL); |
377 | 379 | ||
378 | iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); | 380 | iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); |
379 | } | 381 | } |
@@ -382,7 +384,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length, | |||
382 | skb_transport_header(skb))->type); | 384 | skb_transport_header(skb))->type); |
383 | 385 | ||
384 | err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL, | 386 | err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL, |
385 | rt->u.dst.dev, dst_output); | 387 | rt->dst.dev, dst_output); |
386 | if (err > 0) | 388 | if (err > 0) |
387 | err = net_xmit_errno(err); | 389 | err = net_xmit_errno(err); |
388 | if (err) | 390 | if (err) |
@@ -576,7 +578,7 @@ back_from_confirm: | |||
576 | 578 | ||
577 | if (inet->hdrincl) | 579 | if (inet->hdrincl) |
578 | err = raw_send_hdrinc(sk, msg->msg_iov, len, | 580 | err = raw_send_hdrinc(sk, msg->msg_iov, len, |
579 | rt, msg->msg_flags); | 581 | &rt, msg->msg_flags); |
580 | 582 | ||
581 | else { | 583 | else { |
582 | if (!ipc.addr) | 584 | if (!ipc.addr) |
@@ -604,7 +606,7 @@ out: | |||
604 | return len; | 606 | return len; |
605 | 607 | ||
606 | do_confirm: | 608 | do_confirm: |
607 | dst_confirm(&rt->u.dst); | 609 | dst_confirm(&rt->dst); |
608 | if (!(msg->msg_flags & MSG_PROBE) || len) | 610 | if (!(msg->msg_flags & MSG_PROBE) || len) |
609 | goto back_from_confirm; | 611 | goto back_from_confirm; |
610 | err = 0; | 612 | err = 0; |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 8495bceec764..a291edbbc97f 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -286,10 +286,10 @@ static struct rtable *rt_cache_get_first(struct seq_file *seq) | |||
286 | rcu_read_lock_bh(); | 286 | rcu_read_lock_bh(); |
287 | r = rcu_dereference_bh(rt_hash_table[st->bucket].chain); | 287 | r = rcu_dereference_bh(rt_hash_table[st->bucket].chain); |
288 | while (r) { | 288 | while (r) { |
289 | if (dev_net(r->u.dst.dev) == seq_file_net(seq) && | 289 | if (dev_net(r->dst.dev) == seq_file_net(seq) && |
290 | r->rt_genid == st->genid) | 290 | r->rt_genid == st->genid) |
291 | return r; | 291 | return r; |
292 | r = rcu_dereference_bh(r->u.dst.rt_next); | 292 | r = rcu_dereference_bh(r->dst.rt_next); |
293 | } | 293 | } |
294 | rcu_read_unlock_bh(); | 294 | rcu_read_unlock_bh(); |
295 | } | 295 | } |
@@ -301,7 +301,7 @@ static struct rtable *__rt_cache_get_next(struct seq_file *seq, | |||
301 | { | 301 | { |
302 | struct rt_cache_iter_state *st = seq->private; | 302 | struct rt_cache_iter_state *st = seq->private; |
303 | 303 | ||
304 | r = r->u.dst.rt_next; | 304 | r = r->dst.rt_next; |
305 | while (!r) { | 305 | while (!r) { |
306 | rcu_read_unlock_bh(); | 306 | rcu_read_unlock_bh(); |
307 | do { | 307 | do { |
@@ -319,7 +319,7 @@ static struct rtable *rt_cache_get_next(struct seq_file *seq, | |||
319 | { | 319 | { |
320 | struct rt_cache_iter_state *st = seq->private; | 320 | struct rt_cache_iter_state *st = seq->private; |
321 | while ((r = __rt_cache_get_next(seq, r)) != NULL) { | 321 | while ((r = __rt_cache_get_next(seq, r)) != NULL) { |
322 | if (dev_net(r->u.dst.dev) != seq_file_net(seq)) | 322 | if (dev_net(r->dst.dev) != seq_file_net(seq)) |
323 | continue; | 323 | continue; |
324 | if (r->rt_genid == st->genid) | 324 | if (r->rt_genid == st->genid) |
325 | break; | 325 | break; |
@@ -377,19 +377,19 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v) | |||
377 | 377 | ||
378 | seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t" | 378 | seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t" |
379 | "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n", | 379 | "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n", |
380 | r->u.dst.dev ? r->u.dst.dev->name : "*", | 380 | r->dst.dev ? r->dst.dev->name : "*", |
381 | (__force u32)r->rt_dst, | 381 | (__force u32)r->rt_dst, |
382 | (__force u32)r->rt_gateway, | 382 | (__force u32)r->rt_gateway, |
383 | r->rt_flags, atomic_read(&r->u.dst.__refcnt), | 383 | r->rt_flags, atomic_read(&r->dst.__refcnt), |
384 | r->u.dst.__use, 0, (__force u32)r->rt_src, | 384 | r->dst.__use, 0, (__force u32)r->rt_src, |
385 | (dst_metric(&r->u.dst, RTAX_ADVMSS) ? | 385 | (dst_metric(&r->dst, RTAX_ADVMSS) ? |
386 | (int)dst_metric(&r->u.dst, RTAX_ADVMSS) + 40 : 0), | 386 | (int)dst_metric(&r->dst, RTAX_ADVMSS) + 40 : 0), |
387 | dst_metric(&r->u.dst, RTAX_WINDOW), | 387 | dst_metric(&r->dst, RTAX_WINDOW), |
388 | (int)((dst_metric(&r->u.dst, RTAX_RTT) >> 3) + | 388 | (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) + |
389 | dst_metric(&r->u.dst, RTAX_RTTVAR)), | 389 | dst_metric(&r->dst, RTAX_RTTVAR)), |
390 | r->fl.fl4_tos, | 390 | r->fl.fl4_tos, |
391 | r->u.dst.hh ? atomic_read(&r->u.dst.hh->hh_refcnt) : -1, | 391 | r->dst.hh ? atomic_read(&r->dst.hh->hh_refcnt) : -1, |
392 | r->u.dst.hh ? (r->u.dst.hh->hh_output == | 392 | r->dst.hh ? (r->dst.hh->hh_output == |
393 | dev_queue_xmit) : 0, | 393 | dev_queue_xmit) : 0, |
394 | r->rt_spec_dst, &len); | 394 | r->rt_spec_dst, &len); |
395 | 395 | ||
@@ -608,13 +608,13 @@ static inline int ip_rt_proc_init(void) | |||
608 | 608 | ||
609 | static inline void rt_free(struct rtable *rt) | 609 | static inline void rt_free(struct rtable *rt) |
610 | { | 610 | { |
611 | call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free); | 611 | call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); |
612 | } | 612 | } |
613 | 613 | ||
614 | static inline void rt_drop(struct rtable *rt) | 614 | static inline void rt_drop(struct rtable *rt) |
615 | { | 615 | { |
616 | ip_rt_put(rt); | 616 | ip_rt_put(rt); |
617 | call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free); | 617 | call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); |
618 | } | 618 | } |
619 | 619 | ||
620 | static inline int rt_fast_clean(struct rtable *rth) | 620 | static inline int rt_fast_clean(struct rtable *rth) |
@@ -622,13 +622,13 @@ static inline int rt_fast_clean(struct rtable *rth) | |||
622 | /* Kill broadcast/multicast entries very aggresively, if they | 622 | /* Kill broadcast/multicast entries very aggresively, if they |
623 | collide in hash table with more useful entries */ | 623 | collide in hash table with more useful entries */ |
624 | return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) && | 624 | return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) && |
625 | rth->fl.iif && rth->u.dst.rt_next; | 625 | rth->fl.iif && rth->dst.rt_next; |
626 | } | 626 | } |
627 | 627 | ||
628 | static inline int rt_valuable(struct rtable *rth) | 628 | static inline int rt_valuable(struct rtable *rth) |
629 | { | 629 | { |
630 | return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) || | 630 | return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) || |
631 | rth->u.dst.expires; | 631 | rth->dst.expires; |
632 | } | 632 | } |
633 | 633 | ||
634 | static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2) | 634 | static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2) |
@@ -636,15 +636,15 @@ static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long t | |||
636 | unsigned long age; | 636 | unsigned long age; |
637 | int ret = 0; | 637 | int ret = 0; |
638 | 638 | ||
639 | if (atomic_read(&rth->u.dst.__refcnt)) | 639 | if (atomic_read(&rth->dst.__refcnt)) |
640 | goto out; | 640 | goto out; |
641 | 641 | ||
642 | ret = 1; | 642 | ret = 1; |
643 | if (rth->u.dst.expires && | 643 | if (rth->dst.expires && |
644 | time_after_eq(jiffies, rth->u.dst.expires)) | 644 | time_after_eq(jiffies, rth->dst.expires)) |
645 | goto out; | 645 | goto out; |
646 | 646 | ||
647 | age = jiffies - rth->u.dst.lastuse; | 647 | age = jiffies - rth->dst.lastuse; |
648 | ret = 0; | 648 | ret = 0; |
649 | if ((age <= tmo1 && !rt_fast_clean(rth)) || | 649 | if ((age <= tmo1 && !rt_fast_clean(rth)) || |
650 | (age <= tmo2 && rt_valuable(rth))) | 650 | (age <= tmo2 && rt_valuable(rth))) |
@@ -660,7 +660,7 @@ out: return ret; | |||
660 | */ | 660 | */ |
661 | static inline u32 rt_score(struct rtable *rt) | 661 | static inline u32 rt_score(struct rtable *rt) |
662 | { | 662 | { |
663 | u32 score = jiffies - rt->u.dst.lastuse; | 663 | u32 score = jiffies - rt->dst.lastuse; |
664 | 664 | ||
665 | score = ~score & ~(3<<30); | 665 | score = ~score & ~(3<<30); |
666 | 666 | ||
@@ -700,12 +700,12 @@ static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) | |||
700 | 700 | ||
701 | static inline int compare_netns(struct rtable *rt1, struct rtable *rt2) | 701 | static inline int compare_netns(struct rtable *rt1, struct rtable *rt2) |
702 | { | 702 | { |
703 | return net_eq(dev_net(rt1->u.dst.dev), dev_net(rt2->u.dst.dev)); | 703 | return net_eq(dev_net(rt1->dst.dev), dev_net(rt2->dst.dev)); |
704 | } | 704 | } |
705 | 705 | ||
706 | static inline int rt_is_expired(struct rtable *rth) | 706 | static inline int rt_is_expired(struct rtable *rth) |
707 | { | 707 | { |
708 | return rth->rt_genid != rt_genid(dev_net(rth->u.dst.dev)); | 708 | return rth->rt_genid != rt_genid(dev_net(rth->dst.dev)); |
709 | } | 709 | } |
710 | 710 | ||
711 | /* | 711 | /* |
@@ -734,7 +734,7 @@ static void rt_do_flush(int process_context) | |||
734 | rth = rt_hash_table[i].chain; | 734 | rth = rt_hash_table[i].chain; |
735 | 735 | ||
736 | /* defer releasing the head of the list after spin_unlock */ | 736 | /* defer releasing the head of the list after spin_unlock */ |
737 | for (tail = rth; tail; tail = tail->u.dst.rt_next) | 737 | for (tail = rth; tail; tail = tail->dst.rt_next) |
738 | if (!rt_is_expired(tail)) | 738 | if (!rt_is_expired(tail)) |
739 | break; | 739 | break; |
740 | if (rth != tail) | 740 | if (rth != tail) |
@@ -743,9 +743,9 @@ static void rt_do_flush(int process_context) | |||
743 | /* call rt_free on entries after the tail requiring flush */ | 743 | /* call rt_free on entries after the tail requiring flush */ |
744 | prev = &rt_hash_table[i].chain; | 744 | prev = &rt_hash_table[i].chain; |
745 | for (p = *prev; p; p = next) { | 745 | for (p = *prev; p; p = next) { |
746 | next = p->u.dst.rt_next; | 746 | next = p->dst.rt_next; |
747 | if (!rt_is_expired(p)) { | 747 | if (!rt_is_expired(p)) { |
748 | prev = &p->u.dst.rt_next; | 748 | prev = &p->dst.rt_next; |
749 | } else { | 749 | } else { |
750 | *prev = next; | 750 | *prev = next; |
751 | rt_free(p); | 751 | rt_free(p); |
@@ -760,7 +760,7 @@ static void rt_do_flush(int process_context) | |||
760 | spin_unlock_bh(rt_hash_lock_addr(i)); | 760 | spin_unlock_bh(rt_hash_lock_addr(i)); |
761 | 761 | ||
762 | for (; rth != tail; rth = next) { | 762 | for (; rth != tail; rth = next) { |
763 | next = rth->u.dst.rt_next; | 763 | next = rth->dst.rt_next; |
764 | rt_free(rth); | 764 | rt_free(rth); |
765 | } | 765 | } |
766 | } | 766 | } |
@@ -791,7 +791,7 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth) | |||
791 | while (aux != rth) { | 791 | while (aux != rth) { |
792 | if (compare_hash_inputs(&aux->fl, &rth->fl)) | 792 | if (compare_hash_inputs(&aux->fl, &rth->fl)) |
793 | return 0; | 793 | return 0; |
794 | aux = aux->u.dst.rt_next; | 794 | aux = aux->dst.rt_next; |
795 | } | 795 | } |
796 | return ONE; | 796 | return ONE; |
797 | } | 797 | } |
@@ -831,18 +831,18 @@ static void rt_check_expire(void) | |||
831 | length = 0; | 831 | length = 0; |
832 | spin_lock_bh(rt_hash_lock_addr(i)); | 832 | spin_lock_bh(rt_hash_lock_addr(i)); |
833 | while ((rth = *rthp) != NULL) { | 833 | while ((rth = *rthp) != NULL) { |
834 | prefetch(rth->u.dst.rt_next); | 834 | prefetch(rth->dst.rt_next); |
835 | if (rt_is_expired(rth)) { | 835 | if (rt_is_expired(rth)) { |
836 | *rthp = rth->u.dst.rt_next; | 836 | *rthp = rth->dst.rt_next; |
837 | rt_free(rth); | 837 | rt_free(rth); |
838 | continue; | 838 | continue; |
839 | } | 839 | } |
840 | if (rth->u.dst.expires) { | 840 | if (rth->dst.expires) { |
841 | /* Entry is expired even if it is in use */ | 841 | /* Entry is expired even if it is in use */ |
842 | if (time_before_eq(jiffies, rth->u.dst.expires)) { | 842 | if (time_before_eq(jiffies, rth->dst.expires)) { |
843 | nofree: | 843 | nofree: |
844 | tmo >>= 1; | 844 | tmo >>= 1; |
845 | rthp = &rth->u.dst.rt_next; | 845 | rthp = &rth->dst.rt_next; |
846 | /* | 846 | /* |
847 | * We only count entries on | 847 | * We only count entries on |
848 | * a chain with equal hash inputs once | 848 | * a chain with equal hash inputs once |
@@ -858,7 +858,7 @@ nofree: | |||
858 | goto nofree; | 858 | goto nofree; |
859 | 859 | ||
860 | /* Cleanup aged off entries. */ | 860 | /* Cleanup aged off entries. */ |
861 | *rthp = rth->u.dst.rt_next; | 861 | *rthp = rth->dst.rt_next; |
862 | rt_free(rth); | 862 | rt_free(rth); |
863 | } | 863 | } |
864 | spin_unlock_bh(rt_hash_lock_addr(i)); | 864 | spin_unlock_bh(rt_hash_lock_addr(i)); |
@@ -999,10 +999,10 @@ static int rt_garbage_collect(struct dst_ops *ops) | |||
999 | if (!rt_is_expired(rth) && | 999 | if (!rt_is_expired(rth) && |
1000 | !rt_may_expire(rth, tmo, expire)) { | 1000 | !rt_may_expire(rth, tmo, expire)) { |
1001 | tmo >>= 1; | 1001 | tmo >>= 1; |
1002 | rthp = &rth->u.dst.rt_next; | 1002 | rthp = &rth->dst.rt_next; |
1003 | continue; | 1003 | continue; |
1004 | } | 1004 | } |
1005 | *rthp = rth->u.dst.rt_next; | 1005 | *rthp = rth->dst.rt_next; |
1006 | rt_free(rth); | 1006 | rt_free(rth); |
1007 | goal--; | 1007 | goal--; |
1008 | } | 1008 | } |
@@ -1068,7 +1068,7 @@ static int slow_chain_length(const struct rtable *head) | |||
1068 | 1068 | ||
1069 | while (rth) { | 1069 | while (rth) { |
1070 | length += has_noalias(head, rth); | 1070 | length += has_noalias(head, rth); |
1071 | rth = rth->u.dst.rt_next; | 1071 | rth = rth->dst.rt_next; |
1072 | } | 1072 | } |
1073 | return length >> FRACT_BITS; | 1073 | return length >> FRACT_BITS; |
1074 | } | 1074 | } |
@@ -1090,7 +1090,7 @@ restart: | |||
1090 | candp = NULL; | 1090 | candp = NULL; |
1091 | now = jiffies; | 1091 | now = jiffies; |
1092 | 1092 | ||
1093 | if (!rt_caching(dev_net(rt->u.dst.dev))) { | 1093 | if (!rt_caching(dev_net(rt->dst.dev))) { |
1094 | /* | 1094 | /* |
1095 | * If we're not caching, just tell the caller we | 1095 | * If we're not caching, just tell the caller we |
1096 | * were successful and don't touch the route. The | 1096 | * were successful and don't touch the route. The |
@@ -1108,7 +1108,7 @@ restart: | |||
1108 | */ | 1108 | */ |
1109 | 1109 | ||
1110 | if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) { | 1110 | if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) { |
1111 | int err = arp_bind_neighbour(&rt->u.dst); | 1111 | int err = arp_bind_neighbour(&rt->dst); |
1112 | if (err) { | 1112 | if (err) { |
1113 | if (net_ratelimit()) | 1113 | if (net_ratelimit()) |
1114 | printk(KERN_WARNING | 1114 | printk(KERN_WARNING |
@@ -1127,19 +1127,19 @@ restart: | |||
1127 | spin_lock_bh(rt_hash_lock_addr(hash)); | 1127 | spin_lock_bh(rt_hash_lock_addr(hash)); |
1128 | while ((rth = *rthp) != NULL) { | 1128 | while ((rth = *rthp) != NULL) { |
1129 | if (rt_is_expired(rth)) { | 1129 | if (rt_is_expired(rth)) { |
1130 | *rthp = rth->u.dst.rt_next; | 1130 | *rthp = rth->dst.rt_next; |
1131 | rt_free(rth); | 1131 | rt_free(rth); |
1132 | continue; | 1132 | continue; |
1133 | } | 1133 | } |
1134 | if (compare_keys(&rth->fl, &rt->fl) && compare_netns(rth, rt)) { | 1134 | if (compare_keys(&rth->fl, &rt->fl) && compare_netns(rth, rt)) { |
1135 | /* Put it first */ | 1135 | /* Put it first */ |
1136 | *rthp = rth->u.dst.rt_next; | 1136 | *rthp = rth->dst.rt_next; |
1137 | /* | 1137 | /* |
1138 | * Since lookup is lockfree, the deletion | 1138 | * Since lookup is lockfree, the deletion |
1139 | * must be visible to another weakly ordered CPU before | 1139 | * must be visible to another weakly ordered CPU before |
1140 | * the insertion at the start of the hash chain. | 1140 | * the insertion at the start of the hash chain. |
1141 | */ | 1141 | */ |
1142 | rcu_assign_pointer(rth->u.dst.rt_next, | 1142 | rcu_assign_pointer(rth->dst.rt_next, |
1143 | rt_hash_table[hash].chain); | 1143 | rt_hash_table[hash].chain); |
1144 | /* | 1144 | /* |
1145 | * Since lookup is lockfree, the update writes | 1145 | * Since lookup is lockfree, the update writes |
@@ -1147,18 +1147,18 @@ restart: | |||
1147 | */ | 1147 | */ |
1148 | rcu_assign_pointer(rt_hash_table[hash].chain, rth); | 1148 | rcu_assign_pointer(rt_hash_table[hash].chain, rth); |
1149 | 1149 | ||
1150 | dst_use(&rth->u.dst, now); | 1150 | dst_use(&rth->dst, now); |
1151 | spin_unlock_bh(rt_hash_lock_addr(hash)); | 1151 | spin_unlock_bh(rt_hash_lock_addr(hash)); |
1152 | 1152 | ||
1153 | rt_drop(rt); | 1153 | rt_drop(rt); |
1154 | if (rp) | 1154 | if (rp) |
1155 | *rp = rth; | 1155 | *rp = rth; |
1156 | else | 1156 | else |
1157 | skb_dst_set(skb, &rth->u.dst); | 1157 | skb_dst_set(skb, &rth->dst); |
1158 | return 0; | 1158 | return 0; |
1159 | } | 1159 | } |
1160 | 1160 | ||
1161 | if (!atomic_read(&rth->u.dst.__refcnt)) { | 1161 | if (!atomic_read(&rth->dst.__refcnt)) { |
1162 | u32 score = rt_score(rth); | 1162 | u32 score = rt_score(rth); |
1163 | 1163 | ||
1164 | if (score <= min_score) { | 1164 | if (score <= min_score) { |
@@ -1170,7 +1170,7 @@ restart: | |||
1170 | 1170 | ||
1171 | chain_length++; | 1171 | chain_length++; |
1172 | 1172 | ||
1173 | rthp = &rth->u.dst.rt_next; | 1173 | rthp = &rth->dst.rt_next; |
1174 | } | 1174 | } |
1175 | 1175 | ||
1176 | if (cand) { | 1176 | if (cand) { |
@@ -1181,17 +1181,17 @@ restart: | |||
1181 | * only 2 entries per bucket. We will see. | 1181 | * only 2 entries per bucket. We will see. |
1182 | */ | 1182 | */ |
1183 | if (chain_length > ip_rt_gc_elasticity) { | 1183 | if (chain_length > ip_rt_gc_elasticity) { |
1184 | *candp = cand->u.dst.rt_next; | 1184 | *candp = cand->dst.rt_next; |
1185 | rt_free(cand); | 1185 | rt_free(cand); |
1186 | } | 1186 | } |
1187 | } else { | 1187 | } else { |
1188 | if (chain_length > rt_chain_length_max && | 1188 | if (chain_length > rt_chain_length_max && |
1189 | slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) { | 1189 | slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) { |
1190 | struct net *net = dev_net(rt->u.dst.dev); | 1190 | struct net *net = dev_net(rt->dst.dev); |
1191 | int num = ++net->ipv4.current_rt_cache_rebuild_count; | 1191 | int num = ++net->ipv4.current_rt_cache_rebuild_count; |
1192 | if (!rt_caching(net)) { | 1192 | if (!rt_caching(net)) { |
1193 | printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n", | 1193 | printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n", |
1194 | rt->u.dst.dev->name, num); | 1194 | rt->dst.dev->name, num); |
1195 | } | 1195 | } |
1196 | rt_emergency_hash_rebuild(net); | 1196 | rt_emergency_hash_rebuild(net); |
1197 | spin_unlock_bh(rt_hash_lock_addr(hash)); | 1197 | spin_unlock_bh(rt_hash_lock_addr(hash)); |
@@ -1206,7 +1206,7 @@ restart: | |||
1206 | route or unicast forwarding path. | 1206 | route or unicast forwarding path. |
1207 | */ | 1207 | */ |
1208 | if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) { | 1208 | if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) { |
1209 | int err = arp_bind_neighbour(&rt->u.dst); | 1209 | int err = arp_bind_neighbour(&rt->dst); |
1210 | if (err) { | 1210 | if (err) { |
1211 | spin_unlock_bh(rt_hash_lock_addr(hash)); | 1211 | spin_unlock_bh(rt_hash_lock_addr(hash)); |
1212 | 1212 | ||
@@ -1237,14 +1237,14 @@ restart: | |||
1237 | } | 1237 | } |
1238 | } | 1238 | } |
1239 | 1239 | ||
1240 | rt->u.dst.rt_next = rt_hash_table[hash].chain; | 1240 | rt->dst.rt_next = rt_hash_table[hash].chain; |
1241 | 1241 | ||
1242 | #if RT_CACHE_DEBUG >= 2 | 1242 | #if RT_CACHE_DEBUG >= 2 |
1243 | if (rt->u.dst.rt_next) { | 1243 | if (rt->dst.rt_next) { |
1244 | struct rtable *trt; | 1244 | struct rtable *trt; |
1245 | printk(KERN_DEBUG "rt_cache @%02x: %pI4", | 1245 | printk(KERN_DEBUG "rt_cache @%02x: %pI4", |
1246 | hash, &rt->rt_dst); | 1246 | hash, &rt->rt_dst); |
1247 | for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next) | 1247 | for (trt = rt->dst.rt_next; trt; trt = trt->dst.rt_next) |
1248 | printk(" . %pI4", &trt->rt_dst); | 1248 | printk(" . %pI4", &trt->rt_dst); |
1249 | printk("\n"); | 1249 | printk("\n"); |
1250 | } | 1250 | } |
@@ -1262,7 +1262,7 @@ skip_hashing: | |||
1262 | if (rp) | 1262 | if (rp) |
1263 | *rp = rt; | 1263 | *rp = rt; |
1264 | else | 1264 | else |
1265 | skb_dst_set(skb, &rt->u.dst); | 1265 | skb_dst_set(skb, &rt->dst); |
1266 | return 0; | 1266 | return 0; |
1267 | } | 1267 | } |
1268 | 1268 | ||
@@ -1334,20 +1334,21 @@ static void rt_del(unsigned hash, struct rtable *rt) | |||
1334 | ip_rt_put(rt); | 1334 | ip_rt_put(rt); |
1335 | while ((aux = *rthp) != NULL) { | 1335 | while ((aux = *rthp) != NULL) { |
1336 | if (aux == rt || rt_is_expired(aux)) { | 1336 | if (aux == rt || rt_is_expired(aux)) { |
1337 | *rthp = aux->u.dst.rt_next; | 1337 | *rthp = aux->dst.rt_next; |
1338 | rt_free(aux); | 1338 | rt_free(aux); |
1339 | continue; | 1339 | continue; |
1340 | } | 1340 | } |
1341 | rthp = &aux->u.dst.rt_next; | 1341 | rthp = &aux->dst.rt_next; |
1342 | } | 1342 | } |
1343 | spin_unlock_bh(rt_hash_lock_addr(hash)); | 1343 | spin_unlock_bh(rt_hash_lock_addr(hash)); |
1344 | } | 1344 | } |
1345 | 1345 | ||
1346 | /* called in rcu_read_lock() section */ | ||
1346 | void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, | 1347 | void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, |
1347 | __be32 saddr, struct net_device *dev) | 1348 | __be32 saddr, struct net_device *dev) |
1348 | { | 1349 | { |
1349 | int i, k; | 1350 | int i, k; |
1350 | struct in_device *in_dev = in_dev_get(dev); | 1351 | struct in_device *in_dev = __in_dev_get_rcu(dev); |
1351 | struct rtable *rth, **rthp; | 1352 | struct rtable *rth, **rthp; |
1352 | __be32 skeys[2] = { saddr, 0 }; | 1353 | __be32 skeys[2] = { saddr, 0 }; |
1353 | int ikeys[2] = { dev->ifindex, 0 }; | 1354 | int ikeys[2] = { dev->ifindex, 0 }; |
@@ -1383,7 +1384,6 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, | |||
1383 | 1384 | ||
1384 | rthp=&rt_hash_table[hash].chain; | 1385 | rthp=&rt_hash_table[hash].chain; |
1385 | 1386 | ||
1386 | rcu_read_lock(); | ||
1387 | while ((rth = rcu_dereference(*rthp)) != NULL) { | 1387 | while ((rth = rcu_dereference(*rthp)) != NULL) { |
1388 | struct rtable *rt; | 1388 | struct rtable *rt; |
1389 | 1389 | ||
@@ -1392,44 +1392,42 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, | |||
1392 | rth->fl.oif != ikeys[k] || | 1392 | rth->fl.oif != ikeys[k] || |
1393 | rth->fl.iif != 0 || | 1393 | rth->fl.iif != 0 || |
1394 | rt_is_expired(rth) || | 1394 | rt_is_expired(rth) || |
1395 | !net_eq(dev_net(rth->u.dst.dev), net)) { | 1395 | !net_eq(dev_net(rth->dst.dev), net)) { |
1396 | rthp = &rth->u.dst.rt_next; | 1396 | rthp = &rth->dst.rt_next; |
1397 | continue; | 1397 | continue; |
1398 | } | 1398 | } |
1399 | 1399 | ||
1400 | if (rth->rt_dst != daddr || | 1400 | if (rth->rt_dst != daddr || |
1401 | rth->rt_src != saddr || | 1401 | rth->rt_src != saddr || |
1402 | rth->u.dst.error || | 1402 | rth->dst.error || |
1403 | rth->rt_gateway != old_gw || | 1403 | rth->rt_gateway != old_gw || |
1404 | rth->u.dst.dev != dev) | 1404 | rth->dst.dev != dev) |
1405 | break; | 1405 | break; |
1406 | 1406 | ||
1407 | dst_hold(&rth->u.dst); | 1407 | dst_hold(&rth->dst); |
1408 | rcu_read_unlock(); | ||
1409 | 1408 | ||
1410 | rt = dst_alloc(&ipv4_dst_ops); | 1409 | rt = dst_alloc(&ipv4_dst_ops); |
1411 | if (rt == NULL) { | 1410 | if (rt == NULL) { |
1412 | ip_rt_put(rth); | 1411 | ip_rt_put(rth); |
1413 | in_dev_put(in_dev); | ||
1414 | return; | 1412 | return; |
1415 | } | 1413 | } |
1416 | 1414 | ||
1417 | /* Copy all the information. */ | 1415 | /* Copy all the information. */ |
1418 | *rt = *rth; | 1416 | *rt = *rth; |
1419 | rt->u.dst.__use = 1; | 1417 | rt->dst.__use = 1; |
1420 | atomic_set(&rt->u.dst.__refcnt, 1); | 1418 | atomic_set(&rt->dst.__refcnt, 1); |
1421 | rt->u.dst.child = NULL; | 1419 | rt->dst.child = NULL; |
1422 | if (rt->u.dst.dev) | 1420 | if (rt->dst.dev) |
1423 | dev_hold(rt->u.dst.dev); | 1421 | dev_hold(rt->dst.dev); |
1424 | if (rt->idev) | 1422 | if (rt->idev) |
1425 | in_dev_hold(rt->idev); | 1423 | in_dev_hold(rt->idev); |
1426 | rt->u.dst.obsolete = -1; | 1424 | rt->dst.obsolete = -1; |
1427 | rt->u.dst.lastuse = jiffies; | 1425 | rt->dst.lastuse = jiffies; |
1428 | rt->u.dst.path = &rt->u.dst; | 1426 | rt->dst.path = &rt->dst; |
1429 | rt->u.dst.neighbour = NULL; | 1427 | rt->dst.neighbour = NULL; |
1430 | rt->u.dst.hh = NULL; | 1428 | rt->dst.hh = NULL; |
1431 | #ifdef CONFIG_XFRM | 1429 | #ifdef CONFIG_XFRM |
1432 | rt->u.dst.xfrm = NULL; | 1430 | rt->dst.xfrm = NULL; |
1433 | #endif | 1431 | #endif |
1434 | rt->rt_genid = rt_genid(net); | 1432 | rt->rt_genid = rt_genid(net); |
1435 | rt->rt_flags |= RTCF_REDIRECTED; | 1433 | rt->rt_flags |= RTCF_REDIRECTED; |
@@ -1438,23 +1436,23 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, | |||
1438 | rt->rt_gateway = new_gw; | 1436 | rt->rt_gateway = new_gw; |
1439 | 1437 | ||
1440 | /* Redirect received -> path was valid */ | 1438 | /* Redirect received -> path was valid */ |
1441 | dst_confirm(&rth->u.dst); | 1439 | dst_confirm(&rth->dst); |
1442 | 1440 | ||
1443 | if (rt->peer) | 1441 | if (rt->peer) |
1444 | atomic_inc(&rt->peer->refcnt); | 1442 | atomic_inc(&rt->peer->refcnt); |
1445 | 1443 | ||
1446 | if (arp_bind_neighbour(&rt->u.dst) || | 1444 | if (arp_bind_neighbour(&rt->dst) || |
1447 | !(rt->u.dst.neighbour->nud_state & | 1445 | !(rt->dst.neighbour->nud_state & |
1448 | NUD_VALID)) { | 1446 | NUD_VALID)) { |
1449 | if (rt->u.dst.neighbour) | 1447 | if (rt->dst.neighbour) |
1450 | neigh_event_send(rt->u.dst.neighbour, NULL); | 1448 | neigh_event_send(rt->dst.neighbour, NULL); |
1451 | ip_rt_put(rth); | 1449 | ip_rt_put(rth); |
1452 | rt_drop(rt); | 1450 | rt_drop(rt); |
1453 | goto do_next; | 1451 | goto do_next; |
1454 | } | 1452 | } |
1455 | 1453 | ||
1456 | netevent.old = &rth->u.dst; | 1454 | netevent.old = &rth->dst; |
1457 | netevent.new = &rt->u.dst; | 1455 | netevent.new = &rt->dst; |
1458 | call_netevent_notifiers(NETEVENT_REDIRECT, | 1456 | call_netevent_notifiers(NETEVENT_REDIRECT, |
1459 | &netevent); | 1457 | &netevent); |
1460 | 1458 | ||
@@ -1463,12 +1461,10 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, | |||
1463 | ip_rt_put(rt); | 1461 | ip_rt_put(rt); |
1464 | goto do_next; | 1462 | goto do_next; |
1465 | } | 1463 | } |
1466 | rcu_read_unlock(); | ||
1467 | do_next: | 1464 | do_next: |
1468 | ; | 1465 | ; |
1469 | } | 1466 | } |
1470 | } | 1467 | } |
1471 | in_dev_put(in_dev); | ||
1472 | return; | 1468 | return; |
1473 | 1469 | ||
1474 | reject_redirect: | 1470 | reject_redirect: |
@@ -1479,7 +1475,7 @@ reject_redirect: | |||
1479 | &old_gw, dev->name, &new_gw, | 1475 | &old_gw, dev->name, &new_gw, |
1480 | &saddr, &daddr); | 1476 | &saddr, &daddr); |
1481 | #endif | 1477 | #endif |
1482 | in_dev_put(in_dev); | 1478 | ; |
1483 | } | 1479 | } |
1484 | 1480 | ||
1485 | static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) | 1481 | static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) |
@@ -1492,8 +1488,8 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) | |||
1492 | ip_rt_put(rt); | 1488 | ip_rt_put(rt); |
1493 | ret = NULL; | 1489 | ret = NULL; |
1494 | } else if ((rt->rt_flags & RTCF_REDIRECTED) || | 1490 | } else if ((rt->rt_flags & RTCF_REDIRECTED) || |
1495 | (rt->u.dst.expires && | 1491 | (rt->dst.expires && |
1496 | time_after_eq(jiffies, rt->u.dst.expires))) { | 1492 | time_after_eq(jiffies, rt->dst.expires))) { |
1497 | unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src, | 1493 | unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src, |
1498 | rt->fl.oif, | 1494 | rt->fl.oif, |
1499 | rt_genid(dev_net(dst->dev))); | 1495 | rt_genid(dev_net(dst->dev))); |
@@ -1531,7 +1527,7 @@ void ip_rt_send_redirect(struct sk_buff *skb) | |||
1531 | int log_martians; | 1527 | int log_martians; |
1532 | 1528 | ||
1533 | rcu_read_lock(); | 1529 | rcu_read_lock(); |
1534 | in_dev = __in_dev_get_rcu(rt->u.dst.dev); | 1530 | in_dev = __in_dev_get_rcu(rt->dst.dev); |
1535 | if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) { | 1531 | if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) { |
1536 | rcu_read_unlock(); | 1532 | rcu_read_unlock(); |
1537 | return; | 1533 | return; |
@@ -1542,30 +1538,30 @@ void ip_rt_send_redirect(struct sk_buff *skb) | |||
1542 | /* No redirected packets during ip_rt_redirect_silence; | 1538 | /* No redirected packets during ip_rt_redirect_silence; |
1543 | * reset the algorithm. | 1539 | * reset the algorithm. |
1544 | */ | 1540 | */ |
1545 | if (time_after(jiffies, rt->u.dst.rate_last + ip_rt_redirect_silence)) | 1541 | if (time_after(jiffies, rt->dst.rate_last + ip_rt_redirect_silence)) |
1546 | rt->u.dst.rate_tokens = 0; | 1542 | rt->dst.rate_tokens = 0; |
1547 | 1543 | ||
1548 | /* Too many ignored redirects; do not send anything | 1544 | /* Too many ignored redirects; do not send anything |
1549 | * set u.dst.rate_last to the last seen redirected packet. | 1545 | * set dst.rate_last to the last seen redirected packet. |
1550 | */ | 1546 | */ |
1551 | if (rt->u.dst.rate_tokens >= ip_rt_redirect_number) { | 1547 | if (rt->dst.rate_tokens >= ip_rt_redirect_number) { |
1552 | rt->u.dst.rate_last = jiffies; | 1548 | rt->dst.rate_last = jiffies; |
1553 | return; | 1549 | return; |
1554 | } | 1550 | } |
1555 | 1551 | ||
1556 | /* Check for load limit; set rate_last to the latest sent | 1552 | /* Check for load limit; set rate_last to the latest sent |
1557 | * redirect. | 1553 | * redirect. |
1558 | */ | 1554 | */ |
1559 | if (rt->u.dst.rate_tokens == 0 || | 1555 | if (rt->dst.rate_tokens == 0 || |
1560 | time_after(jiffies, | 1556 | time_after(jiffies, |
1561 | (rt->u.dst.rate_last + | 1557 | (rt->dst.rate_last + |
1562 | (ip_rt_redirect_load << rt->u.dst.rate_tokens)))) { | 1558 | (ip_rt_redirect_load << rt->dst.rate_tokens)))) { |
1563 | icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway); | 1559 | icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway); |
1564 | rt->u.dst.rate_last = jiffies; | 1560 | rt->dst.rate_last = jiffies; |
1565 | ++rt->u.dst.rate_tokens; | 1561 | ++rt->dst.rate_tokens; |
1566 | #ifdef CONFIG_IP_ROUTE_VERBOSE | 1562 | #ifdef CONFIG_IP_ROUTE_VERBOSE |
1567 | if (log_martians && | 1563 | if (log_martians && |
1568 | rt->u.dst.rate_tokens == ip_rt_redirect_number && | 1564 | rt->dst.rate_tokens == ip_rt_redirect_number && |
1569 | net_ratelimit()) | 1565 | net_ratelimit()) |
1570 | printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n", | 1566 | printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n", |
1571 | &rt->rt_src, rt->rt_iif, | 1567 | &rt->rt_src, rt->rt_iif, |
@@ -1580,7 +1576,7 @@ static int ip_error(struct sk_buff *skb) | |||
1580 | unsigned long now; | 1576 | unsigned long now; |
1581 | int code; | 1577 | int code; |
1582 | 1578 | ||
1583 | switch (rt->u.dst.error) { | 1579 | switch (rt->dst.error) { |
1584 | case EINVAL: | 1580 | case EINVAL: |
1585 | default: | 1581 | default: |
1586 | goto out; | 1582 | goto out; |
@@ -1589,7 +1585,7 @@ static int ip_error(struct sk_buff *skb) | |||
1589 | break; | 1585 | break; |
1590 | case ENETUNREACH: | 1586 | case ENETUNREACH: |
1591 | code = ICMP_NET_UNREACH; | 1587 | code = ICMP_NET_UNREACH; |
1592 | IP_INC_STATS_BH(dev_net(rt->u.dst.dev), | 1588 | IP_INC_STATS_BH(dev_net(rt->dst.dev), |
1593 | IPSTATS_MIB_INNOROUTES); | 1589 | IPSTATS_MIB_INNOROUTES); |
1594 | break; | 1590 | break; |
1595 | case EACCES: | 1591 | case EACCES: |
@@ -1598,12 +1594,12 @@ static int ip_error(struct sk_buff *skb) | |||
1598 | } | 1594 | } |
1599 | 1595 | ||
1600 | now = jiffies; | 1596 | now = jiffies; |
1601 | rt->u.dst.rate_tokens += now - rt->u.dst.rate_last; | 1597 | rt->dst.rate_tokens += now - rt->dst.rate_last; |
1602 | if (rt->u.dst.rate_tokens > ip_rt_error_burst) | 1598 | if (rt->dst.rate_tokens > ip_rt_error_burst) |
1603 | rt->u.dst.rate_tokens = ip_rt_error_burst; | 1599 | rt->dst.rate_tokens = ip_rt_error_burst; |
1604 | rt->u.dst.rate_last = now; | 1600 | rt->dst.rate_last = now; |
1605 | if (rt->u.dst.rate_tokens >= ip_rt_error_cost) { | 1601 | if (rt->dst.rate_tokens >= ip_rt_error_cost) { |
1606 | rt->u.dst.rate_tokens -= ip_rt_error_cost; | 1602 | rt->dst.rate_tokens -= ip_rt_error_cost; |
1607 | icmp_send(skb, ICMP_DEST_UNREACH, code, 0); | 1603 | icmp_send(skb, ICMP_DEST_UNREACH, code, 0); |
1608 | } | 1604 | } |
1609 | 1605 | ||
@@ -1648,7 +1644,7 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph, | |||
1648 | 1644 | ||
1649 | rcu_read_lock(); | 1645 | rcu_read_lock(); |
1650 | for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; | 1646 | for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; |
1651 | rth = rcu_dereference(rth->u.dst.rt_next)) { | 1647 | rth = rcu_dereference(rth->dst.rt_next)) { |
1652 | unsigned short mtu = new_mtu; | 1648 | unsigned short mtu = new_mtu; |
1653 | 1649 | ||
1654 | if (rth->fl.fl4_dst != daddr || | 1650 | if (rth->fl.fl4_dst != daddr || |
@@ -1657,8 +1653,8 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph, | |||
1657 | rth->rt_src != iph->saddr || | 1653 | rth->rt_src != iph->saddr || |
1658 | rth->fl.oif != ikeys[k] || | 1654 | rth->fl.oif != ikeys[k] || |
1659 | rth->fl.iif != 0 || | 1655 | rth->fl.iif != 0 || |
1660 | dst_metric_locked(&rth->u.dst, RTAX_MTU) || | 1656 | dst_metric_locked(&rth->dst, RTAX_MTU) || |
1661 | !net_eq(dev_net(rth->u.dst.dev), net) || | 1657 | !net_eq(dev_net(rth->dst.dev), net) || |
1662 | rt_is_expired(rth)) | 1658 | rt_is_expired(rth)) |
1663 | continue; | 1659 | continue; |
1664 | 1660 | ||
@@ -1666,22 +1662,22 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph, | |||
1666 | 1662 | ||
1667 | /* BSD 4.2 compatibility hack :-( */ | 1663 | /* BSD 4.2 compatibility hack :-( */ |
1668 | if (mtu == 0 && | 1664 | if (mtu == 0 && |
1669 | old_mtu >= dst_mtu(&rth->u.dst) && | 1665 | old_mtu >= dst_mtu(&rth->dst) && |
1670 | old_mtu >= 68 + (iph->ihl << 2)) | 1666 | old_mtu >= 68 + (iph->ihl << 2)) |
1671 | old_mtu -= iph->ihl << 2; | 1667 | old_mtu -= iph->ihl << 2; |
1672 | 1668 | ||
1673 | mtu = guess_mtu(old_mtu); | 1669 | mtu = guess_mtu(old_mtu); |
1674 | } | 1670 | } |
1675 | if (mtu <= dst_mtu(&rth->u.dst)) { | 1671 | if (mtu <= dst_mtu(&rth->dst)) { |
1676 | if (mtu < dst_mtu(&rth->u.dst)) { | 1672 | if (mtu < dst_mtu(&rth->dst)) { |
1677 | dst_confirm(&rth->u.dst); | 1673 | dst_confirm(&rth->dst); |
1678 | if (mtu < ip_rt_min_pmtu) { | 1674 | if (mtu < ip_rt_min_pmtu) { |
1679 | mtu = ip_rt_min_pmtu; | 1675 | mtu = ip_rt_min_pmtu; |
1680 | rth->u.dst.metrics[RTAX_LOCK-1] |= | 1676 | rth->dst.metrics[RTAX_LOCK-1] |= |
1681 | (1 << RTAX_MTU); | 1677 | (1 << RTAX_MTU); |
1682 | } | 1678 | } |
1683 | rth->u.dst.metrics[RTAX_MTU-1] = mtu; | 1679 | rth->dst.metrics[RTAX_MTU-1] = mtu; |
1684 | dst_set_expires(&rth->u.dst, | 1680 | dst_set_expires(&rth->dst, |
1685 | ip_rt_mtu_expires); | 1681 | ip_rt_mtu_expires); |
1686 | } | 1682 | } |
1687 | est_mtu = mtu; | 1683 | est_mtu = mtu; |
@@ -1754,7 +1750,7 @@ static void ipv4_link_failure(struct sk_buff *skb) | |||
1754 | 1750 | ||
1755 | rt = skb_rtable(skb); | 1751 | rt = skb_rtable(skb); |
1756 | if (rt) | 1752 | if (rt) |
1757 | dst_set_expires(&rt->u.dst, 0); | 1753 | dst_set_expires(&rt->dst, 0); |
1758 | } | 1754 | } |
1759 | 1755 | ||
1760 | static int ip_rt_bug(struct sk_buff *skb) | 1756 | static int ip_rt_bug(struct sk_buff *skb) |
@@ -1782,11 +1778,11 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt) | |||
1782 | 1778 | ||
1783 | if (rt->fl.iif == 0) | 1779 | if (rt->fl.iif == 0) |
1784 | src = rt->rt_src; | 1780 | src = rt->rt_src; |
1785 | else if (fib_lookup(dev_net(rt->u.dst.dev), &rt->fl, &res) == 0) { | 1781 | else if (fib_lookup(dev_net(rt->dst.dev), &rt->fl, &res) == 0) { |
1786 | src = FIB_RES_PREFSRC(res); | 1782 | src = FIB_RES_PREFSRC(res); |
1787 | fib_res_put(&res); | 1783 | fib_res_put(&res); |
1788 | } else | 1784 | } else |
1789 | src = inet_select_addr(rt->u.dst.dev, rt->rt_gateway, | 1785 | src = inet_select_addr(rt->dst.dev, rt->rt_gateway, |
1790 | RT_SCOPE_UNIVERSE); | 1786 | RT_SCOPE_UNIVERSE); |
1791 | memcpy(addr, &src, 4); | 1787 | memcpy(addr, &src, 4); |
1792 | } | 1788 | } |
@@ -1794,10 +1790,10 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt) | |||
1794 | #ifdef CONFIG_NET_CLS_ROUTE | 1790 | #ifdef CONFIG_NET_CLS_ROUTE |
1795 | static void set_class_tag(struct rtable *rt, u32 tag) | 1791 | static void set_class_tag(struct rtable *rt, u32 tag) |
1796 | { | 1792 | { |
1797 | if (!(rt->u.dst.tclassid & 0xFFFF)) | 1793 | if (!(rt->dst.tclassid & 0xFFFF)) |
1798 | rt->u.dst.tclassid |= tag & 0xFFFF; | 1794 | rt->dst.tclassid |= tag & 0xFFFF; |
1799 | if (!(rt->u.dst.tclassid & 0xFFFF0000)) | 1795 | if (!(rt->dst.tclassid & 0xFFFF0000)) |
1800 | rt->u.dst.tclassid |= tag & 0xFFFF0000; | 1796 | rt->dst.tclassid |= tag & 0xFFFF0000; |
1801 | } | 1797 | } |
1802 | #endif | 1798 | #endif |
1803 | 1799 | ||
@@ -1809,30 +1805,30 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag) | |||
1809 | if (FIB_RES_GW(*res) && | 1805 | if (FIB_RES_GW(*res) && |
1810 | FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) | 1806 | FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) |
1811 | rt->rt_gateway = FIB_RES_GW(*res); | 1807 | rt->rt_gateway = FIB_RES_GW(*res); |
1812 | memcpy(rt->u.dst.metrics, fi->fib_metrics, | 1808 | memcpy(rt->dst.metrics, fi->fib_metrics, |
1813 | sizeof(rt->u.dst.metrics)); | 1809 | sizeof(rt->dst.metrics)); |
1814 | if (fi->fib_mtu == 0) { | 1810 | if (fi->fib_mtu == 0) { |
1815 | rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu; | 1811 | rt->dst.metrics[RTAX_MTU-1] = rt->dst.dev->mtu; |
1816 | if (dst_metric_locked(&rt->u.dst, RTAX_MTU) && | 1812 | if (dst_metric_locked(&rt->dst, RTAX_MTU) && |
1817 | rt->rt_gateway != rt->rt_dst && | 1813 | rt->rt_gateway != rt->rt_dst && |
1818 | rt->u.dst.dev->mtu > 576) | 1814 | rt->dst.dev->mtu > 576) |
1819 | rt->u.dst.metrics[RTAX_MTU-1] = 576; | 1815 | rt->dst.metrics[RTAX_MTU-1] = 576; |
1820 | } | 1816 | } |
1821 | #ifdef CONFIG_NET_CLS_ROUTE | 1817 | #ifdef CONFIG_NET_CLS_ROUTE |
1822 | rt->u.dst.tclassid = FIB_RES_NH(*res).nh_tclassid; | 1818 | rt->dst.tclassid = FIB_RES_NH(*res).nh_tclassid; |
1823 | #endif | 1819 | #endif |
1824 | } else | 1820 | } else |
1825 | rt->u.dst.metrics[RTAX_MTU-1]= rt->u.dst.dev->mtu; | 1821 | rt->dst.metrics[RTAX_MTU-1]= rt->dst.dev->mtu; |
1826 | 1822 | ||
1827 | if (dst_metric(&rt->u.dst, RTAX_HOPLIMIT) == 0) | 1823 | if (dst_metric(&rt->dst, RTAX_HOPLIMIT) == 0) |
1828 | rt->u.dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl; | 1824 | rt->dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl; |
1829 | if (dst_mtu(&rt->u.dst) > IP_MAX_MTU) | 1825 | if (dst_mtu(&rt->dst) > IP_MAX_MTU) |
1830 | rt->u.dst.metrics[RTAX_MTU-1] = IP_MAX_MTU; | 1826 | rt->dst.metrics[RTAX_MTU-1] = IP_MAX_MTU; |
1831 | if (dst_metric(&rt->u.dst, RTAX_ADVMSS) == 0) | 1827 | if (dst_metric(&rt->dst, RTAX_ADVMSS) == 0) |
1832 | rt->u.dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->u.dst.dev->mtu - 40, | 1828 | rt->dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->dst.dev->mtu - 40, |
1833 | ip_rt_min_advmss); | 1829 | ip_rt_min_advmss); |
1834 | if (dst_metric(&rt->u.dst, RTAX_ADVMSS) > 65535 - 40) | 1830 | if (dst_metric(&rt->dst, RTAX_ADVMSS) > 65535 - 40) |
1835 | rt->u.dst.metrics[RTAX_ADVMSS-1] = 65535 - 40; | 1831 | rt->dst.metrics[RTAX_ADVMSS-1] = 65535 - 40; |
1836 | 1832 | ||
1837 | #ifdef CONFIG_NET_CLS_ROUTE | 1833 | #ifdef CONFIG_NET_CLS_ROUTE |
1838 | #ifdef CONFIG_IP_MULTIPLE_TABLES | 1834 | #ifdef CONFIG_IP_MULTIPLE_TABLES |
@@ -1843,14 +1839,16 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag) | |||
1843 | rt->rt_type = res->type; | 1839 | rt->rt_type = res->type; |
1844 | } | 1840 | } |
1845 | 1841 | ||
1842 | /* called in rcu_read_lock() section */ | ||
1846 | static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, | 1843 | static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, |
1847 | u8 tos, struct net_device *dev, int our) | 1844 | u8 tos, struct net_device *dev, int our) |
1848 | { | 1845 | { |
1849 | unsigned hash; | 1846 | unsigned int hash; |
1850 | struct rtable *rth; | 1847 | struct rtable *rth; |
1851 | __be32 spec_dst; | 1848 | __be32 spec_dst; |
1852 | struct in_device *in_dev = in_dev_get(dev); | 1849 | struct in_device *in_dev = __in_dev_get_rcu(dev); |
1853 | u32 itag = 0; | 1850 | u32 itag = 0; |
1851 | int err; | ||
1854 | 1852 | ||
1855 | /* Primary sanity checks. */ | 1853 | /* Primary sanity checks. */ |
1856 | 1854 | ||
@@ -1865,21 +1863,23 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
1865 | if (!ipv4_is_local_multicast(daddr)) | 1863 | if (!ipv4_is_local_multicast(daddr)) |
1866 | goto e_inval; | 1864 | goto e_inval; |
1867 | spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK); | 1865 | spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK); |
1868 | } else if (fib_validate_source(saddr, 0, tos, 0, | 1866 | } else { |
1869 | dev, &spec_dst, &itag, 0) < 0) | 1867 | err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst, |
1870 | goto e_inval; | 1868 | &itag, 0); |
1871 | 1869 | if (err < 0) | |
1870 | goto e_err; | ||
1871 | } | ||
1872 | rth = dst_alloc(&ipv4_dst_ops); | 1872 | rth = dst_alloc(&ipv4_dst_ops); |
1873 | if (!rth) | 1873 | if (!rth) |
1874 | goto e_nobufs; | 1874 | goto e_nobufs; |
1875 | 1875 | ||
1876 | rth->u.dst.output = ip_rt_bug; | 1876 | rth->dst.output = ip_rt_bug; |
1877 | rth->u.dst.obsolete = -1; | 1877 | rth->dst.obsolete = -1; |
1878 | 1878 | ||
1879 | atomic_set(&rth->u.dst.__refcnt, 1); | 1879 | atomic_set(&rth->dst.__refcnt, 1); |
1880 | rth->u.dst.flags= DST_HOST; | 1880 | rth->dst.flags= DST_HOST; |
1881 | if (IN_DEV_CONF_GET(in_dev, NOPOLICY)) | 1881 | if (IN_DEV_CONF_GET(in_dev, NOPOLICY)) |
1882 | rth->u.dst.flags |= DST_NOPOLICY; | 1882 | rth->dst.flags |= DST_NOPOLICY; |
1883 | rth->fl.fl4_dst = daddr; | 1883 | rth->fl.fl4_dst = daddr; |
1884 | rth->rt_dst = daddr; | 1884 | rth->rt_dst = daddr; |
1885 | rth->fl.fl4_tos = tos; | 1885 | rth->fl.fl4_tos = tos; |
@@ -1887,13 +1887,13 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
1887 | rth->fl.fl4_src = saddr; | 1887 | rth->fl.fl4_src = saddr; |
1888 | rth->rt_src = saddr; | 1888 | rth->rt_src = saddr; |
1889 | #ifdef CONFIG_NET_CLS_ROUTE | 1889 | #ifdef CONFIG_NET_CLS_ROUTE |
1890 | rth->u.dst.tclassid = itag; | 1890 | rth->dst.tclassid = itag; |
1891 | #endif | 1891 | #endif |
1892 | rth->rt_iif = | 1892 | rth->rt_iif = |
1893 | rth->fl.iif = dev->ifindex; | 1893 | rth->fl.iif = dev->ifindex; |
1894 | rth->u.dst.dev = init_net.loopback_dev; | 1894 | rth->dst.dev = init_net.loopback_dev; |
1895 | dev_hold(rth->u.dst.dev); | 1895 | dev_hold(rth->dst.dev); |
1896 | rth->idev = in_dev_get(rth->u.dst.dev); | 1896 | rth->idev = in_dev_get(rth->dst.dev); |
1897 | rth->fl.oif = 0; | 1897 | rth->fl.oif = 0; |
1898 | rth->rt_gateway = daddr; | 1898 | rth->rt_gateway = daddr; |
1899 | rth->rt_spec_dst= spec_dst; | 1899 | rth->rt_spec_dst= spec_dst; |
@@ -1901,27 +1901,25 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
1901 | rth->rt_flags = RTCF_MULTICAST; | 1901 | rth->rt_flags = RTCF_MULTICAST; |
1902 | rth->rt_type = RTN_MULTICAST; | 1902 | rth->rt_type = RTN_MULTICAST; |
1903 | if (our) { | 1903 | if (our) { |
1904 | rth->u.dst.input= ip_local_deliver; | 1904 | rth->dst.input= ip_local_deliver; |
1905 | rth->rt_flags |= RTCF_LOCAL; | 1905 | rth->rt_flags |= RTCF_LOCAL; |
1906 | } | 1906 | } |
1907 | 1907 | ||
1908 | #ifdef CONFIG_IP_MROUTE | 1908 | #ifdef CONFIG_IP_MROUTE |
1909 | if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev)) | 1909 | if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev)) |
1910 | rth->u.dst.input = ip_mr_input; | 1910 | rth->dst.input = ip_mr_input; |
1911 | #endif | 1911 | #endif |
1912 | RT_CACHE_STAT_INC(in_slow_mc); | 1912 | RT_CACHE_STAT_INC(in_slow_mc); |
1913 | 1913 | ||
1914 | in_dev_put(in_dev); | ||
1915 | hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev))); | 1914 | hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev))); |
1916 | return rt_intern_hash(hash, rth, NULL, skb, dev->ifindex); | 1915 | return rt_intern_hash(hash, rth, NULL, skb, dev->ifindex); |
1917 | 1916 | ||
1918 | e_nobufs: | 1917 | e_nobufs: |
1919 | in_dev_put(in_dev); | ||
1920 | return -ENOBUFS; | 1918 | return -ENOBUFS; |
1921 | |||
1922 | e_inval: | 1919 | e_inval: |
1923 | in_dev_put(in_dev); | ||
1924 | return -EINVAL; | 1920 | return -EINVAL; |
1921 | e_err: | ||
1922 | return err; | ||
1925 | } | 1923 | } |
1926 | 1924 | ||
1927 | 1925 | ||
@@ -1955,22 +1953,22 @@ static void ip_handle_martian_source(struct net_device *dev, | |||
1955 | #endif | 1953 | #endif |
1956 | } | 1954 | } |
1957 | 1955 | ||
1956 | /* called in rcu_read_lock() section */ | ||
1958 | static int __mkroute_input(struct sk_buff *skb, | 1957 | static int __mkroute_input(struct sk_buff *skb, |
1959 | struct fib_result *res, | 1958 | struct fib_result *res, |
1960 | struct in_device *in_dev, | 1959 | struct in_device *in_dev, |
1961 | __be32 daddr, __be32 saddr, u32 tos, | 1960 | __be32 daddr, __be32 saddr, u32 tos, |
1962 | struct rtable **result) | 1961 | struct rtable **result) |
1963 | { | 1962 | { |
1964 | |||
1965 | struct rtable *rth; | 1963 | struct rtable *rth; |
1966 | int err; | 1964 | int err; |
1967 | struct in_device *out_dev; | 1965 | struct in_device *out_dev; |
1968 | unsigned flags = 0; | 1966 | unsigned int flags = 0; |
1969 | __be32 spec_dst; | 1967 | __be32 spec_dst; |
1970 | u32 itag; | 1968 | u32 itag; |
1971 | 1969 | ||
1972 | /* get a working reference to the output device */ | 1970 | /* get a working reference to the output device */ |
1973 | out_dev = in_dev_get(FIB_RES_DEV(*res)); | 1971 | out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res)); |
1974 | if (out_dev == NULL) { | 1972 | if (out_dev == NULL) { |
1975 | if (net_ratelimit()) | 1973 | if (net_ratelimit()) |
1976 | printk(KERN_CRIT "Bug in ip_route_input" \ | 1974 | printk(KERN_CRIT "Bug in ip_route_input" \ |
@@ -1985,7 +1983,6 @@ static int __mkroute_input(struct sk_buff *skb, | |||
1985 | ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr, | 1983 | ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr, |
1986 | saddr); | 1984 | saddr); |
1987 | 1985 | ||
1988 | err = -EINVAL; | ||
1989 | goto cleanup; | 1986 | goto cleanup; |
1990 | } | 1987 | } |
1991 | 1988 | ||
@@ -2019,12 +2016,12 @@ static int __mkroute_input(struct sk_buff *skb, | |||
2019 | goto cleanup; | 2016 | goto cleanup; |
2020 | } | 2017 | } |
2021 | 2018 | ||
2022 | atomic_set(&rth->u.dst.__refcnt, 1); | 2019 | atomic_set(&rth->dst.__refcnt, 1); |
2023 | rth->u.dst.flags= DST_HOST; | 2020 | rth->dst.flags= DST_HOST; |
2024 | if (IN_DEV_CONF_GET(in_dev, NOPOLICY)) | 2021 | if (IN_DEV_CONF_GET(in_dev, NOPOLICY)) |
2025 | rth->u.dst.flags |= DST_NOPOLICY; | 2022 | rth->dst.flags |= DST_NOPOLICY; |
2026 | if (IN_DEV_CONF_GET(out_dev, NOXFRM)) | 2023 | if (IN_DEV_CONF_GET(out_dev, NOXFRM)) |
2027 | rth->u.dst.flags |= DST_NOXFRM; | 2024 | rth->dst.flags |= DST_NOXFRM; |
2028 | rth->fl.fl4_dst = daddr; | 2025 | rth->fl.fl4_dst = daddr; |
2029 | rth->rt_dst = daddr; | 2026 | rth->rt_dst = daddr; |
2030 | rth->fl.fl4_tos = tos; | 2027 | rth->fl.fl4_tos = tos; |
@@ -2034,16 +2031,16 @@ static int __mkroute_input(struct sk_buff *skb, | |||
2034 | rth->rt_gateway = daddr; | 2031 | rth->rt_gateway = daddr; |
2035 | rth->rt_iif = | 2032 | rth->rt_iif = |
2036 | rth->fl.iif = in_dev->dev->ifindex; | 2033 | rth->fl.iif = in_dev->dev->ifindex; |
2037 | rth->u.dst.dev = (out_dev)->dev; | 2034 | rth->dst.dev = (out_dev)->dev; |
2038 | dev_hold(rth->u.dst.dev); | 2035 | dev_hold(rth->dst.dev); |
2039 | rth->idev = in_dev_get(rth->u.dst.dev); | 2036 | rth->idev = in_dev_get(rth->dst.dev); |
2040 | rth->fl.oif = 0; | 2037 | rth->fl.oif = 0; |
2041 | rth->rt_spec_dst= spec_dst; | 2038 | rth->rt_spec_dst= spec_dst; |
2042 | 2039 | ||
2043 | rth->u.dst.obsolete = -1; | 2040 | rth->dst.obsolete = -1; |
2044 | rth->u.dst.input = ip_forward; | 2041 | rth->dst.input = ip_forward; |
2045 | rth->u.dst.output = ip_output; | 2042 | rth->dst.output = ip_output; |
2046 | rth->rt_genid = rt_genid(dev_net(rth->u.dst.dev)); | 2043 | rth->rt_genid = rt_genid(dev_net(rth->dst.dev)); |
2047 | 2044 | ||
2048 | rt_set_nexthop(rth, res, itag); | 2045 | rt_set_nexthop(rth, res, itag); |
2049 | 2046 | ||
@@ -2052,8 +2049,6 @@ static int __mkroute_input(struct sk_buff *skb, | |||
2052 | *result = rth; | 2049 | *result = rth; |
2053 | err = 0; | 2050 | err = 0; |
2054 | cleanup: | 2051 | cleanup: |
2055 | /* release the working reference to the output device */ | ||
2056 | in_dev_put(out_dev); | ||
2057 | return err; | 2052 | return err; |
2058 | } | 2053 | } |
2059 | 2054 | ||
@@ -2079,7 +2074,7 @@ static int ip_mkroute_input(struct sk_buff *skb, | |||
2079 | 2074 | ||
2080 | /* put it into the cache */ | 2075 | /* put it into the cache */ |
2081 | hash = rt_hash(daddr, saddr, fl->iif, | 2076 | hash = rt_hash(daddr, saddr, fl->iif, |
2082 | rt_genid(dev_net(rth->u.dst.dev))); | 2077 | rt_genid(dev_net(rth->dst.dev))); |
2083 | return rt_intern_hash(hash, rth, NULL, skb, fl->iif); | 2078 | return rt_intern_hash(hash, rth, NULL, skb, fl->iif); |
2084 | } | 2079 | } |
2085 | 2080 | ||
@@ -2097,7 +2092,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
2097 | u8 tos, struct net_device *dev) | 2092 | u8 tos, struct net_device *dev) |
2098 | { | 2093 | { |
2099 | struct fib_result res; | 2094 | struct fib_result res; |
2100 | struct in_device *in_dev = in_dev_get(dev); | 2095 | struct in_device *in_dev = __in_dev_get_rcu(dev); |
2101 | struct flowi fl = { .nl_u = { .ip4_u = | 2096 | struct flowi fl = { .nl_u = { .ip4_u = |
2102 | { .daddr = daddr, | 2097 | { .daddr = daddr, |
2103 | .saddr = saddr, | 2098 | .saddr = saddr, |
@@ -2157,13 +2152,12 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
2157 | goto brd_input; | 2152 | goto brd_input; |
2158 | 2153 | ||
2159 | if (res.type == RTN_LOCAL) { | 2154 | if (res.type == RTN_LOCAL) { |
2160 | int result; | 2155 | err = fib_validate_source(saddr, daddr, tos, |
2161 | result = fib_validate_source(saddr, daddr, tos, | ||
2162 | net->loopback_dev->ifindex, | 2156 | net->loopback_dev->ifindex, |
2163 | dev, &spec_dst, &itag, skb->mark); | 2157 | dev, &spec_dst, &itag, skb->mark); |
2164 | if (result < 0) | 2158 | if (err < 0) |
2165 | goto martian_source; | 2159 | goto martian_source_keep_err; |
2166 | if (result) | 2160 | if (err) |
2167 | flags |= RTCF_DIRECTSRC; | 2161 | flags |= RTCF_DIRECTSRC; |
2168 | spec_dst = daddr; | 2162 | spec_dst = daddr; |
2169 | goto local_input; | 2163 | goto local_input; |
@@ -2176,7 +2170,6 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
2176 | 2170 | ||
2177 | err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos); | 2171 | err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos); |
2178 | done: | 2172 | done: |
2179 | in_dev_put(in_dev); | ||
2180 | if (free_res) | 2173 | if (free_res) |
2181 | fib_res_put(&res); | 2174 | fib_res_put(&res); |
2182 | out: return err; | 2175 | out: return err; |
@@ -2191,7 +2184,7 @@ brd_input: | |||
2191 | err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst, | 2184 | err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst, |
2192 | &itag, skb->mark); | 2185 | &itag, skb->mark); |
2193 | if (err < 0) | 2186 | if (err < 0) |
2194 | goto martian_source; | 2187 | goto martian_source_keep_err; |
2195 | if (err) | 2188 | if (err) |
2196 | flags |= RTCF_DIRECTSRC; | 2189 | flags |= RTCF_DIRECTSRC; |
2197 | } | 2190 | } |
@@ -2204,14 +2197,14 @@ local_input: | |||
2204 | if (!rth) | 2197 | if (!rth) |
2205 | goto e_nobufs; | 2198 | goto e_nobufs; |
2206 | 2199 | ||
2207 | rth->u.dst.output= ip_rt_bug; | 2200 | rth->dst.output= ip_rt_bug; |
2208 | rth->u.dst.obsolete = -1; | 2201 | rth->dst.obsolete = -1; |
2209 | rth->rt_genid = rt_genid(net); | 2202 | rth->rt_genid = rt_genid(net); |
2210 | 2203 | ||
2211 | atomic_set(&rth->u.dst.__refcnt, 1); | 2204 | atomic_set(&rth->dst.__refcnt, 1); |
2212 | rth->u.dst.flags= DST_HOST; | 2205 | rth->dst.flags= DST_HOST; |
2213 | if (IN_DEV_CONF_GET(in_dev, NOPOLICY)) | 2206 | if (IN_DEV_CONF_GET(in_dev, NOPOLICY)) |
2214 | rth->u.dst.flags |= DST_NOPOLICY; | 2207 | rth->dst.flags |= DST_NOPOLICY; |
2215 | rth->fl.fl4_dst = daddr; | 2208 | rth->fl.fl4_dst = daddr; |
2216 | rth->rt_dst = daddr; | 2209 | rth->rt_dst = daddr; |
2217 | rth->fl.fl4_tos = tos; | 2210 | rth->fl.fl4_tos = tos; |
@@ -2219,20 +2212,20 @@ local_input: | |||
2219 | rth->fl.fl4_src = saddr; | 2212 | rth->fl.fl4_src = saddr; |
2220 | rth->rt_src = saddr; | 2213 | rth->rt_src = saddr; |
2221 | #ifdef CONFIG_NET_CLS_ROUTE | 2214 | #ifdef CONFIG_NET_CLS_ROUTE |
2222 | rth->u.dst.tclassid = itag; | 2215 | rth->dst.tclassid = itag; |
2223 | #endif | 2216 | #endif |
2224 | rth->rt_iif = | 2217 | rth->rt_iif = |
2225 | rth->fl.iif = dev->ifindex; | 2218 | rth->fl.iif = dev->ifindex; |
2226 | rth->u.dst.dev = net->loopback_dev; | 2219 | rth->dst.dev = net->loopback_dev; |
2227 | dev_hold(rth->u.dst.dev); | 2220 | dev_hold(rth->dst.dev); |
2228 | rth->idev = in_dev_get(rth->u.dst.dev); | 2221 | rth->idev = in_dev_get(rth->dst.dev); |
2229 | rth->rt_gateway = daddr; | 2222 | rth->rt_gateway = daddr; |
2230 | rth->rt_spec_dst= spec_dst; | 2223 | rth->rt_spec_dst= spec_dst; |
2231 | rth->u.dst.input= ip_local_deliver; | 2224 | rth->dst.input= ip_local_deliver; |
2232 | rth->rt_flags = flags|RTCF_LOCAL; | 2225 | rth->rt_flags = flags|RTCF_LOCAL; |
2233 | if (res.type == RTN_UNREACHABLE) { | 2226 | if (res.type == RTN_UNREACHABLE) { |
2234 | rth->u.dst.input= ip_error; | 2227 | rth->dst.input= ip_error; |
2235 | rth->u.dst.error= -err; | 2228 | rth->dst.error= -err; |
2236 | rth->rt_flags &= ~RTCF_LOCAL; | 2229 | rth->rt_flags &= ~RTCF_LOCAL; |
2237 | } | 2230 | } |
2238 | rth->rt_type = res.type; | 2231 | rth->rt_type = res.type; |
@@ -2272,8 +2265,10 @@ e_nobufs: | |||
2272 | goto done; | 2265 | goto done; |
2273 | 2266 | ||
2274 | martian_source: | 2267 | martian_source: |
2268 | err = -EINVAL; | ||
2269 | martian_source_keep_err: | ||
2275 | ip_handle_martian_source(dev, in_dev, skb, daddr, saddr); | 2270 | ip_handle_martian_source(dev, in_dev, skb, daddr, saddr); |
2276 | goto e_inval; | 2271 | goto done; |
2277 | } | 2272 | } |
2278 | 2273 | ||
2279 | int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr, | 2274 | int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr, |
@@ -2283,32 +2278,34 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
2283 | unsigned hash; | 2278 | unsigned hash; |
2284 | int iif = dev->ifindex; | 2279 | int iif = dev->ifindex; |
2285 | struct net *net; | 2280 | struct net *net; |
2281 | int res; | ||
2286 | 2282 | ||
2287 | net = dev_net(dev); | 2283 | net = dev_net(dev); |
2288 | 2284 | ||
2285 | rcu_read_lock(); | ||
2286 | |||
2289 | if (!rt_caching(net)) | 2287 | if (!rt_caching(net)) |
2290 | goto skip_cache; | 2288 | goto skip_cache; |
2291 | 2289 | ||
2292 | tos &= IPTOS_RT_MASK; | 2290 | tos &= IPTOS_RT_MASK; |
2293 | hash = rt_hash(daddr, saddr, iif, rt_genid(net)); | 2291 | hash = rt_hash(daddr, saddr, iif, rt_genid(net)); |
2294 | 2292 | ||
2295 | rcu_read_lock(); | ||
2296 | for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; | 2293 | for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; |
2297 | rth = rcu_dereference(rth->u.dst.rt_next)) { | 2294 | rth = rcu_dereference(rth->dst.rt_next)) { |
2298 | if ((((__force u32)rth->fl.fl4_dst ^ (__force u32)daddr) | | 2295 | if ((((__force u32)rth->fl.fl4_dst ^ (__force u32)daddr) | |
2299 | ((__force u32)rth->fl.fl4_src ^ (__force u32)saddr) | | 2296 | ((__force u32)rth->fl.fl4_src ^ (__force u32)saddr) | |
2300 | (rth->fl.iif ^ iif) | | 2297 | (rth->fl.iif ^ iif) | |
2301 | rth->fl.oif | | 2298 | rth->fl.oif | |
2302 | (rth->fl.fl4_tos ^ tos)) == 0 && | 2299 | (rth->fl.fl4_tos ^ tos)) == 0 && |
2303 | rth->fl.mark == skb->mark && | 2300 | rth->fl.mark == skb->mark && |
2304 | net_eq(dev_net(rth->u.dst.dev), net) && | 2301 | net_eq(dev_net(rth->dst.dev), net) && |
2305 | !rt_is_expired(rth)) { | 2302 | !rt_is_expired(rth)) { |
2306 | if (noref) { | 2303 | if (noref) { |
2307 | dst_use_noref(&rth->u.dst, jiffies); | 2304 | dst_use_noref(&rth->dst, jiffies); |
2308 | skb_dst_set_noref(skb, &rth->u.dst); | 2305 | skb_dst_set_noref(skb, &rth->dst); |
2309 | } else { | 2306 | } else { |
2310 | dst_use(&rth->u.dst, jiffies); | 2307 | dst_use(&rth->dst, jiffies); |
2311 | skb_dst_set(skb, &rth->u.dst); | 2308 | skb_dst_set(skb, &rth->dst); |
2312 | } | 2309 | } |
2313 | RT_CACHE_STAT_INC(in_hit); | 2310 | RT_CACHE_STAT_INC(in_hit); |
2314 | rcu_read_unlock(); | 2311 | rcu_read_unlock(); |
@@ -2316,7 +2313,6 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
2316 | } | 2313 | } |
2317 | RT_CACHE_STAT_INC(in_hlist_search); | 2314 | RT_CACHE_STAT_INC(in_hlist_search); |
2318 | } | 2315 | } |
2319 | rcu_read_unlock(); | ||
2320 | 2316 | ||
2321 | skip_cache: | 2317 | skip_cache: |
2322 | /* Multicast recognition logic is moved from route cache to here. | 2318 | /* Multicast recognition logic is moved from route cache to here. |
@@ -2331,12 +2327,11 @@ skip_cache: | |||
2331 | route cache entry is created eventually. | 2327 | route cache entry is created eventually. |
2332 | */ | 2328 | */ |
2333 | if (ipv4_is_multicast(daddr)) { | 2329 | if (ipv4_is_multicast(daddr)) { |
2334 | struct in_device *in_dev; | 2330 | struct in_device *in_dev = __in_dev_get_rcu(dev); |
2335 | 2331 | ||
2336 | rcu_read_lock(); | 2332 | if (in_dev) { |
2337 | if ((in_dev = __in_dev_get_rcu(dev)) != NULL) { | ||
2338 | int our = ip_check_mc(in_dev, daddr, saddr, | 2333 | int our = ip_check_mc(in_dev, daddr, saddr, |
2339 | ip_hdr(skb)->protocol); | 2334 | ip_hdr(skb)->protocol); |
2340 | if (our | 2335 | if (our |
2341 | #ifdef CONFIG_IP_MROUTE | 2336 | #ifdef CONFIG_IP_MROUTE |
2342 | || | 2337 | || |
@@ -2344,15 +2339,18 @@ skip_cache: | |||
2344 | IN_DEV_MFORWARD(in_dev)) | 2339 | IN_DEV_MFORWARD(in_dev)) |
2345 | #endif | 2340 | #endif |
2346 | ) { | 2341 | ) { |
2342 | int res = ip_route_input_mc(skb, daddr, saddr, | ||
2343 | tos, dev, our); | ||
2347 | rcu_read_unlock(); | 2344 | rcu_read_unlock(); |
2348 | return ip_route_input_mc(skb, daddr, saddr, | 2345 | return res; |
2349 | tos, dev, our); | ||
2350 | } | 2346 | } |
2351 | } | 2347 | } |
2352 | rcu_read_unlock(); | 2348 | rcu_read_unlock(); |
2353 | return -EINVAL; | 2349 | return -EINVAL; |
2354 | } | 2350 | } |
2355 | return ip_route_input_slow(skb, daddr, saddr, tos, dev); | 2351 | res = ip_route_input_slow(skb, daddr, saddr, tos, dev); |
2352 | rcu_read_unlock(); | ||
2353 | return res; | ||
2356 | } | 2354 | } |
2357 | EXPORT_SYMBOL(ip_route_input_common); | 2355 | EXPORT_SYMBOL(ip_route_input_common); |
2358 | 2356 | ||
@@ -2414,12 +2412,12 @@ static int __mkroute_output(struct rtable **result, | |||
2414 | goto cleanup; | 2412 | goto cleanup; |
2415 | } | 2413 | } |
2416 | 2414 | ||
2417 | atomic_set(&rth->u.dst.__refcnt, 1); | 2415 | atomic_set(&rth->dst.__refcnt, 1); |
2418 | rth->u.dst.flags= DST_HOST; | 2416 | rth->dst.flags= DST_HOST; |
2419 | if (IN_DEV_CONF_GET(in_dev, NOXFRM)) | 2417 | if (IN_DEV_CONF_GET(in_dev, NOXFRM)) |
2420 | rth->u.dst.flags |= DST_NOXFRM; | 2418 | rth->dst.flags |= DST_NOXFRM; |
2421 | if (IN_DEV_CONF_GET(in_dev, NOPOLICY)) | 2419 | if (IN_DEV_CONF_GET(in_dev, NOPOLICY)) |
2422 | rth->u.dst.flags |= DST_NOPOLICY; | 2420 | rth->dst.flags |= DST_NOPOLICY; |
2423 | 2421 | ||
2424 | rth->fl.fl4_dst = oldflp->fl4_dst; | 2422 | rth->fl.fl4_dst = oldflp->fl4_dst; |
2425 | rth->fl.fl4_tos = tos; | 2423 | rth->fl.fl4_tos = tos; |
@@ -2431,35 +2429,35 @@ static int __mkroute_output(struct rtable **result, | |||
2431 | rth->rt_iif = oldflp->oif ? : dev_out->ifindex; | 2429 | rth->rt_iif = oldflp->oif ? : dev_out->ifindex; |
2432 | /* get references to the devices that are to be hold by the routing | 2430 | /* get references to the devices that are to be hold by the routing |
2433 | cache entry */ | 2431 | cache entry */ |
2434 | rth->u.dst.dev = dev_out; | 2432 | rth->dst.dev = dev_out; |
2435 | dev_hold(dev_out); | 2433 | dev_hold(dev_out); |
2436 | rth->idev = in_dev_get(dev_out); | 2434 | rth->idev = in_dev_get(dev_out); |
2437 | rth->rt_gateway = fl->fl4_dst; | 2435 | rth->rt_gateway = fl->fl4_dst; |
2438 | rth->rt_spec_dst= fl->fl4_src; | 2436 | rth->rt_spec_dst= fl->fl4_src; |
2439 | 2437 | ||
2440 | rth->u.dst.output=ip_output; | 2438 | rth->dst.output=ip_output; |
2441 | rth->u.dst.obsolete = -1; | 2439 | rth->dst.obsolete = -1; |
2442 | rth->rt_genid = rt_genid(dev_net(dev_out)); | 2440 | rth->rt_genid = rt_genid(dev_net(dev_out)); |
2443 | 2441 | ||
2444 | RT_CACHE_STAT_INC(out_slow_tot); | 2442 | RT_CACHE_STAT_INC(out_slow_tot); |
2445 | 2443 | ||
2446 | if (flags & RTCF_LOCAL) { | 2444 | if (flags & RTCF_LOCAL) { |
2447 | rth->u.dst.input = ip_local_deliver; | 2445 | rth->dst.input = ip_local_deliver; |
2448 | rth->rt_spec_dst = fl->fl4_dst; | 2446 | rth->rt_spec_dst = fl->fl4_dst; |
2449 | } | 2447 | } |
2450 | if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) { | 2448 | if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) { |
2451 | rth->rt_spec_dst = fl->fl4_src; | 2449 | rth->rt_spec_dst = fl->fl4_src; |
2452 | if (flags & RTCF_LOCAL && | 2450 | if (flags & RTCF_LOCAL && |
2453 | !(dev_out->flags & IFF_LOOPBACK)) { | 2451 | !(dev_out->flags & IFF_LOOPBACK)) { |
2454 | rth->u.dst.output = ip_mc_output; | 2452 | rth->dst.output = ip_mc_output; |
2455 | RT_CACHE_STAT_INC(out_slow_mc); | 2453 | RT_CACHE_STAT_INC(out_slow_mc); |
2456 | } | 2454 | } |
2457 | #ifdef CONFIG_IP_MROUTE | 2455 | #ifdef CONFIG_IP_MROUTE |
2458 | if (res->type == RTN_MULTICAST) { | 2456 | if (res->type == RTN_MULTICAST) { |
2459 | if (IN_DEV_MFORWARD(in_dev) && | 2457 | if (IN_DEV_MFORWARD(in_dev) && |
2460 | !ipv4_is_local_multicast(oldflp->fl4_dst)) { | 2458 | !ipv4_is_local_multicast(oldflp->fl4_dst)) { |
2461 | rth->u.dst.input = ip_mr_input; | 2459 | rth->dst.input = ip_mr_input; |
2462 | rth->u.dst.output = ip_mc_output; | 2460 | rth->dst.output = ip_mc_output; |
2463 | } | 2461 | } |
2464 | } | 2462 | } |
2465 | #endif | 2463 | #endif |
@@ -2714,7 +2712,7 @@ int __ip_route_output_key(struct net *net, struct rtable **rp, | |||
2714 | 2712 | ||
2715 | rcu_read_lock_bh(); | 2713 | rcu_read_lock_bh(); |
2716 | for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth; | 2714 | for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth; |
2717 | rth = rcu_dereference_bh(rth->u.dst.rt_next)) { | 2715 | rth = rcu_dereference_bh(rth->dst.rt_next)) { |
2718 | if (rth->fl.fl4_dst == flp->fl4_dst && | 2716 | if (rth->fl.fl4_dst == flp->fl4_dst && |
2719 | rth->fl.fl4_src == flp->fl4_src && | 2717 | rth->fl.fl4_src == flp->fl4_src && |
2720 | rth->fl.iif == 0 && | 2718 | rth->fl.iif == 0 && |
@@ -2722,9 +2720,9 @@ int __ip_route_output_key(struct net *net, struct rtable **rp, | |||
2722 | rth->fl.mark == flp->mark && | 2720 | rth->fl.mark == flp->mark && |
2723 | !((rth->fl.fl4_tos ^ flp->fl4_tos) & | 2721 | !((rth->fl.fl4_tos ^ flp->fl4_tos) & |
2724 | (IPTOS_RT_MASK | RTO_ONLINK)) && | 2722 | (IPTOS_RT_MASK | RTO_ONLINK)) && |
2725 | net_eq(dev_net(rth->u.dst.dev), net) && | 2723 | net_eq(dev_net(rth->dst.dev), net) && |
2726 | !rt_is_expired(rth)) { | 2724 | !rt_is_expired(rth)) { |
2727 | dst_use(&rth->u.dst, jiffies); | 2725 | dst_use(&rth->dst, jiffies); |
2728 | RT_CACHE_STAT_INC(out_hit); | 2726 | RT_CACHE_STAT_INC(out_hit); |
2729 | rcu_read_unlock_bh(); | 2727 | rcu_read_unlock_bh(); |
2730 | *rp = rth; | 2728 | *rp = rth; |
@@ -2761,15 +2759,15 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi | |||
2761 | dst_alloc(&ipv4_dst_blackhole_ops); | 2759 | dst_alloc(&ipv4_dst_blackhole_ops); |
2762 | 2760 | ||
2763 | if (rt) { | 2761 | if (rt) { |
2764 | struct dst_entry *new = &rt->u.dst; | 2762 | struct dst_entry *new = &rt->dst; |
2765 | 2763 | ||
2766 | atomic_set(&new->__refcnt, 1); | 2764 | atomic_set(&new->__refcnt, 1); |
2767 | new->__use = 1; | 2765 | new->__use = 1; |
2768 | new->input = dst_discard; | 2766 | new->input = dst_discard; |
2769 | new->output = dst_discard; | 2767 | new->output = dst_discard; |
2770 | memcpy(new->metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32)); | 2768 | memcpy(new->metrics, ort->dst.metrics, RTAX_MAX*sizeof(u32)); |
2771 | 2769 | ||
2772 | new->dev = ort->u.dst.dev; | 2770 | new->dev = ort->dst.dev; |
2773 | if (new->dev) | 2771 | if (new->dev) |
2774 | dev_hold(new->dev); | 2772 | dev_hold(new->dev); |
2775 | 2773 | ||
@@ -2793,7 +2791,7 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi | |||
2793 | dst_free(new); | 2791 | dst_free(new); |
2794 | } | 2792 | } |
2795 | 2793 | ||
2796 | dst_release(&(*rp)->u.dst); | 2794 | dst_release(&(*rp)->dst); |
2797 | *rp = rt; | 2795 | *rp = rt; |
2798 | return (rt ? 0 : -ENOMEM); | 2796 | return (rt ? 0 : -ENOMEM); |
2799 | } | 2797 | } |
@@ -2863,11 +2861,11 @@ static int rt_fill_info(struct net *net, | |||
2863 | r->rtm_src_len = 32; | 2861 | r->rtm_src_len = 32; |
2864 | NLA_PUT_BE32(skb, RTA_SRC, rt->fl.fl4_src); | 2862 | NLA_PUT_BE32(skb, RTA_SRC, rt->fl.fl4_src); |
2865 | } | 2863 | } |
2866 | if (rt->u.dst.dev) | 2864 | if (rt->dst.dev) |
2867 | NLA_PUT_U32(skb, RTA_OIF, rt->u.dst.dev->ifindex); | 2865 | NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex); |
2868 | #ifdef CONFIG_NET_CLS_ROUTE | 2866 | #ifdef CONFIG_NET_CLS_ROUTE |
2869 | if (rt->u.dst.tclassid) | 2867 | if (rt->dst.tclassid) |
2870 | NLA_PUT_U32(skb, RTA_FLOW, rt->u.dst.tclassid); | 2868 | NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid); |
2871 | #endif | 2869 | #endif |
2872 | if (rt->fl.iif) | 2870 | if (rt->fl.iif) |
2873 | NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst); | 2871 | NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst); |
@@ -2877,11 +2875,11 @@ static int rt_fill_info(struct net *net, | |||
2877 | if (rt->rt_dst != rt->rt_gateway) | 2875 | if (rt->rt_dst != rt->rt_gateway) |
2878 | NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway); | 2876 | NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway); |
2879 | 2877 | ||
2880 | if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0) | 2878 | if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0) |
2881 | goto nla_put_failure; | 2879 | goto nla_put_failure; |
2882 | 2880 | ||
2883 | error = rt->u.dst.error; | 2881 | error = rt->dst.error; |
2884 | expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0; | 2882 | expires = rt->dst.expires ? rt->dst.expires - jiffies : 0; |
2885 | if (rt->peer) { | 2883 | if (rt->peer) { |
2886 | id = atomic_read(&rt->peer->ip_id_count) & 0xffff; | 2884 | id = atomic_read(&rt->peer->ip_id_count) & 0xffff; |
2887 | if (rt->peer->tcp_ts_stamp) { | 2885 | if (rt->peer->tcp_ts_stamp) { |
@@ -2913,7 +2911,7 @@ static int rt_fill_info(struct net *net, | |||
2913 | NLA_PUT_U32(skb, RTA_IIF, rt->fl.iif); | 2911 | NLA_PUT_U32(skb, RTA_IIF, rt->fl.iif); |
2914 | } | 2912 | } |
2915 | 2913 | ||
2916 | if (rtnl_put_cacheinfo(skb, &rt->u.dst, id, ts, tsage, | 2914 | if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage, |
2917 | expires, error) < 0) | 2915 | expires, error) < 0) |
2918 | goto nla_put_failure; | 2916 | goto nla_put_failure; |
2919 | 2917 | ||
@@ -2978,8 +2976,8 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void | |||
2978 | local_bh_enable(); | 2976 | local_bh_enable(); |
2979 | 2977 | ||
2980 | rt = skb_rtable(skb); | 2978 | rt = skb_rtable(skb); |
2981 | if (err == 0 && rt->u.dst.error) | 2979 | if (err == 0 && rt->dst.error) |
2982 | err = -rt->u.dst.error; | 2980 | err = -rt->dst.error; |
2983 | } else { | 2981 | } else { |
2984 | struct flowi fl = { | 2982 | struct flowi fl = { |
2985 | .nl_u = { | 2983 | .nl_u = { |
@@ -2997,7 +2995,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void | |||
2997 | if (err) | 2995 | if (err) |
2998 | goto errout_free; | 2996 | goto errout_free; |
2999 | 2997 | ||
3000 | skb_dst_set(skb, &rt->u.dst); | 2998 | skb_dst_set(skb, &rt->dst); |
3001 | if (rtm->rtm_flags & RTM_F_NOTIFY) | 2999 | if (rtm->rtm_flags & RTM_F_NOTIFY) |
3002 | rt->rt_flags |= RTCF_NOTIFY; | 3000 | rt->rt_flags |= RTCF_NOTIFY; |
3003 | 3001 | ||
@@ -3033,12 +3031,12 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
3033 | continue; | 3031 | continue; |
3034 | rcu_read_lock_bh(); | 3032 | rcu_read_lock_bh(); |
3035 | for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt; | 3033 | for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt; |
3036 | rt = rcu_dereference_bh(rt->u.dst.rt_next), idx++) { | 3034 | rt = rcu_dereference_bh(rt->dst.rt_next), idx++) { |
3037 | if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx) | 3035 | if (!net_eq(dev_net(rt->dst.dev), net) || idx < s_idx) |
3038 | continue; | 3036 | continue; |
3039 | if (rt_is_expired(rt)) | 3037 | if (rt_is_expired(rt)) |
3040 | continue; | 3038 | continue; |
3041 | skb_dst_set_noref(skb, &rt->u.dst); | 3039 | skb_dst_set_noref(skb, &rt->dst); |
3042 | if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid, | 3040 | if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid, |
3043 | cb->nlh->nlmsg_seq, RTM_NEWROUTE, | 3041 | cb->nlh->nlmsg_seq, RTM_NEWROUTE, |
3044 | 1, NLM_F_MULTI) <= 0) { | 3042 | 1, NLM_F_MULTI) <= 0) { |
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 5c24db4a3c91..02bef6aa8b30 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c | |||
@@ -138,23 +138,23 @@ static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr, | |||
138 | } | 138 | } |
139 | 139 | ||
140 | /* | 140 | /* |
141 | * This table has to be sorted and terminated with (__u16)-1. | 141 | * MSS Values are taken from the 2009 paper |
142 | * XXX generate a better table. | 142 | * 'Measuring TCP Maximum Segment Size' by S. Alcock and R. Nelson: |
143 | * Unresolved Issues: HIPPI with a 64k MSS is not well supported. | 143 | * - values 1440 to 1460 accounted for 80% of observed mss values |
144 | * - values outside the 536-1460 range are rare (<0.2%). | ||
145 | * | ||
146 | * Table must be sorted. | ||
144 | */ | 147 | */ |
145 | static __u16 const msstab[] = { | 148 | static __u16 const msstab[] = { |
146 | 64 - 1, | 149 | 64, |
147 | 256 - 1, | 150 | 512, |
148 | 512 - 1, | 151 | 536, |
149 | 536 - 1, | 152 | 1024, |
150 | 1024 - 1, | 153 | 1440, |
151 | 1440 - 1, | 154 | 1460, |
152 | 1460 - 1, | 155 | 4312, |
153 | 4312 - 1, | 156 | 8960, |
154 | (__u16)-1 | ||
155 | }; | 157 | }; |
156 | /* The number doesn't include the -1 terminator */ | ||
157 | #define NUM_MSS (ARRAY_SIZE(msstab) - 1) | ||
158 | 158 | ||
159 | /* | 159 | /* |
160 | * Generate a syncookie. mssp points to the mss, which is returned | 160 | * Generate a syncookie. mssp points to the mss, which is returned |
@@ -169,10 +169,10 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp) | |||
169 | 169 | ||
170 | tcp_synq_overflow(sk); | 170 | tcp_synq_overflow(sk); |
171 | 171 | ||
172 | /* XXX sort msstab[] by probability? Binary search? */ | 172 | for (mssind = ARRAY_SIZE(msstab) - 1; mssind ; mssind--) |
173 | for (mssind = 0; mss > msstab[mssind + 1]; mssind++) | 173 | if (mss >= msstab[mssind]) |
174 | ; | 174 | break; |
175 | *mssp = msstab[mssind] + 1; | 175 | *mssp = msstab[mssind]; |
176 | 176 | ||
177 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); | 177 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); |
178 | 178 | ||
@@ -202,7 +202,7 @@ static inline int cookie_check(struct sk_buff *skb, __u32 cookie) | |||
202 | jiffies / (HZ * 60), | 202 | jiffies / (HZ * 60), |
203 | COUNTER_TRIES); | 203 | COUNTER_TRIES); |
204 | 204 | ||
205 | return mssind < NUM_MSS ? msstab[mssind] + 1 : 0; | 205 | return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0; |
206 | } | 206 | } |
207 | 207 | ||
208 | static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb, | 208 | static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb, |
@@ -266,7 +266,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
266 | struct rtable *rt; | 266 | struct rtable *rt; |
267 | __u8 rcv_wscale; | 267 | __u8 rcv_wscale; |
268 | 268 | ||
269 | if (!sysctl_tcp_syncookies || !th->ack) | 269 | if (!sysctl_tcp_syncookies || !th->ack || th->rst) |
270 | goto out; | 270 | goto out; |
271 | 271 | ||
272 | if (tcp_synq_no_recent_overflow(sk) || | 272 | if (tcp_synq_no_recent_overflow(sk) || |
@@ -347,22 +347,22 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
347 | { .sport = th->dest, | 347 | { .sport = th->dest, |
348 | .dport = th->source } } }; | 348 | .dport = th->source } } }; |
349 | security_req_classify_flow(req, &fl); | 349 | security_req_classify_flow(req, &fl); |
350 | if (ip_route_output_key(&init_net, &rt, &fl)) { | 350 | if (ip_route_output_key(sock_net(sk), &rt, &fl)) { |
351 | reqsk_free(req); | 351 | reqsk_free(req); |
352 | goto out; | 352 | goto out; |
353 | } | 353 | } |
354 | } | 354 | } |
355 | 355 | ||
356 | /* Try to redo what tcp_v4_send_synack did. */ | 356 | /* Try to redo what tcp_v4_send_synack did. */ |
357 | req->window_clamp = tp->window_clamp ? :dst_metric(&rt->u.dst, RTAX_WINDOW); | 357 | req->window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW); |
358 | 358 | ||
359 | tcp_select_initial_window(tcp_full_space(sk), req->mss, | 359 | tcp_select_initial_window(tcp_full_space(sk), req->mss, |
360 | &req->rcv_wnd, &req->window_clamp, | 360 | &req->rcv_wnd, &req->window_clamp, |
361 | ireq->wscale_ok, &rcv_wscale, | 361 | ireq->wscale_ok, &rcv_wscale, |
362 | dst_metric(&rt->u.dst, RTAX_INITRWND)); | 362 | dst_metric(&rt->dst, RTAX_INITRWND)); |
363 | 363 | ||
364 | ireq->rcv_wscale = rcv_wscale; | 364 | ireq->rcv_wscale = rcv_wscale; |
365 | 365 | ||
366 | ret = get_cookie_sock(sk, skb, req, &rt->u.dst); | 366 | ret = get_cookie_sock(sk, skb, req, &rt->dst); |
367 | out: return ret; | 367 | out: return ret; |
368 | } | 368 | } |
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c index c209e054a634..377bc9349371 100644 --- a/net/ipv4/tcp_hybla.c +++ b/net/ipv4/tcp_hybla.c | |||
@@ -126,8 +126,8 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) | |||
126 | * calculate 2^fract in a <<7 value. | 126 | * calculate 2^fract in a <<7 value. |
127 | */ | 127 | */ |
128 | is_slowstart = 1; | 128 | is_slowstart = 1; |
129 | increment = ((1 << ca->rho) * hybla_fraction(rho_fractions)) | 129 | increment = ((1 << min(ca->rho, 16U)) * |
130 | - 128; | 130 | hybla_fraction(rho_fractions)) - 128; |
131 | } else { | 131 | } else { |
132 | /* | 132 | /* |
133 | * congestion avoidance | 133 | * congestion avoidance |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 3e6dafcb1071..548d575e6cc6 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -2639,7 +2639,7 @@ static void DBGUNDO(struct sock *sk, const char *msg) | |||
2639 | if (sk->sk_family == AF_INET) { | 2639 | if (sk->sk_family == AF_INET) { |
2640 | printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", | 2640 | printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", |
2641 | msg, | 2641 | msg, |
2642 | &inet->daddr, ntohs(inet->dport), | 2642 | &inet->inet_daddr, ntohs(inet->inet_dport), |
2643 | tp->snd_cwnd, tcp_left_out(tp), | 2643 | tp->snd_cwnd, tcp_left_out(tp), |
2644 | tp->snd_ssthresh, tp->prior_ssthresh, | 2644 | tp->snd_ssthresh, tp->prior_ssthresh, |
2645 | tp->packets_out); | 2645 | tp->packets_out); |
@@ -2649,7 +2649,7 @@ static void DBGUNDO(struct sock *sk, const char *msg) | |||
2649 | struct ipv6_pinfo *np = inet6_sk(sk); | 2649 | struct ipv6_pinfo *np = inet6_sk(sk); |
2650 | printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", | 2650 | printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", |
2651 | msg, | 2651 | msg, |
2652 | &np->daddr, ntohs(inet->dport), | 2652 | &np->daddr, ntohs(inet->inet_dport), |
2653 | tp->snd_cwnd, tcp_left_out(tp), | 2653 | tp->snd_cwnd, tcp_left_out(tp), |
2654 | tp->snd_ssthresh, tp->prior_ssthresh, | 2654 | tp->snd_ssthresh, tp->prior_ssthresh, |
2655 | tp->packets_out); | 2655 | tp->packets_out); |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 202cf09c4cd4..7f9515c0379f 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -237,7 +237,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
237 | 237 | ||
238 | /* OK, now commit destination to socket. */ | 238 | /* OK, now commit destination to socket. */ |
239 | sk->sk_gso_type = SKB_GSO_TCPV4; | 239 | sk->sk_gso_type = SKB_GSO_TCPV4; |
240 | sk_setup_caps(sk, &rt->u.dst); | 240 | sk_setup_caps(sk, &rt->dst); |
241 | 241 | ||
242 | if (!tp->write_seq) | 242 | if (!tp->write_seq) |
243 | tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr, | 243 | tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr, |
@@ -793,19 +793,20 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req) | |||
793 | kfree(inet_rsk(req)->opt); | 793 | kfree(inet_rsk(req)->opt); |
794 | } | 794 | } |
795 | 795 | ||
796 | #ifdef CONFIG_SYN_COOKIES | 796 | static void syn_flood_warning(const struct sk_buff *skb) |
797 | static void syn_flood_warning(struct sk_buff *skb) | ||
798 | { | 797 | { |
799 | static unsigned long warntime; | 798 | const char *msg; |
800 | 799 | ||
801 | if (time_after(jiffies, (warntime + HZ * 60))) { | 800 | #ifdef CONFIG_SYN_COOKIES |
802 | warntime = jiffies; | 801 | if (sysctl_tcp_syncookies) |
803 | printk(KERN_INFO | 802 | msg = "Sending cookies"; |
804 | "possible SYN flooding on port %d. Sending cookies.\n", | 803 | else |
805 | ntohs(tcp_hdr(skb)->dest)); | ||
806 | } | ||
807 | } | ||
808 | #endif | 804 | #endif |
805 | msg = "Dropping request"; | ||
806 | |||
807 | pr_info("TCP: Possible SYN flooding on port %d. %s.\n", | ||
808 | ntohs(tcp_hdr(skb)->dest), msg); | ||
809 | } | ||
809 | 810 | ||
810 | /* | 811 | /* |
811 | * Save and compile IPv4 options into the request_sock if needed. | 812 | * Save and compile IPv4 options into the request_sock if needed. |
@@ -1243,6 +1244,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1243 | * evidently real one. | 1244 | * evidently real one. |
1244 | */ | 1245 | */ |
1245 | if (inet_csk_reqsk_queue_is_full(sk) && !isn) { | 1246 | if (inet_csk_reqsk_queue_is_full(sk) && !isn) { |
1247 | if (net_ratelimit()) | ||
1248 | syn_flood_warning(skb); | ||
1246 | #ifdef CONFIG_SYN_COOKIES | 1249 | #ifdef CONFIG_SYN_COOKIES |
1247 | if (sysctl_tcp_syncookies) { | 1250 | if (sysctl_tcp_syncookies) { |
1248 | want_cookie = 1; | 1251 | want_cookie = 1; |
@@ -1328,7 +1331,6 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1328 | 1331 | ||
1329 | if (want_cookie) { | 1332 | if (want_cookie) { |
1330 | #ifdef CONFIG_SYN_COOKIES | 1333 | #ifdef CONFIG_SYN_COOKIES |
1331 | syn_flood_warning(skb); | ||
1332 | req->cookie_ts = tmp_opt.tstamp_ok; | 1334 | req->cookie_ts = tmp_opt.tstamp_ok; |
1333 | #endif | 1335 | #endif |
1334 | isn = cookie_v4_init_sequence(sk, skb, &req->mss); | 1336 | isn = cookie_v4_init_sequence(sk, skb, &req->mss); |
@@ -1504,7 +1506,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) | |||
1504 | } | 1506 | } |
1505 | 1507 | ||
1506 | #ifdef CONFIG_SYN_COOKIES | 1508 | #ifdef CONFIG_SYN_COOKIES |
1507 | if (!th->rst && !th->syn && th->ack) | 1509 | if (!th->syn) |
1508 | sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt)); | 1510 | sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt)); |
1509 | #endif | 1511 | #endif |
1510 | return sk; | 1512 | return sk; |
@@ -1555,6 +1557,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
1555 | #endif | 1557 | #endif |
1556 | 1558 | ||
1557 | if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ | 1559 | if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ |
1560 | sock_rps_save_rxhash(sk, skb->rxhash); | ||
1558 | TCP_CHECK_TIMER(sk); | 1561 | TCP_CHECK_TIMER(sk); |
1559 | if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { | 1562 | if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { |
1560 | rsk = sk; | 1563 | rsk = sk; |
@@ -1579,7 +1582,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
1579 | } | 1582 | } |
1580 | return 0; | 1583 | return 0; |
1581 | } | 1584 | } |
1582 | } | 1585 | } else |
1586 | sock_rps_save_rxhash(sk, skb->rxhash); | ||
1587 | |||
1583 | 1588 | ||
1584 | TCP_CHECK_TIMER(sk); | 1589 | TCP_CHECK_TIMER(sk); |
1585 | if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) { | 1590 | if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) { |
@@ -1672,8 +1677,6 @@ process: | |||
1672 | 1677 | ||
1673 | skb->dev = NULL; | 1678 | skb->dev = NULL; |
1674 | 1679 | ||
1675 | sock_rps_save_rxhash(sk, skb->rxhash); | ||
1676 | |||
1677 | bh_lock_sock_nested(sk); | 1680 | bh_lock_sock_nested(sk); |
1678 | ret = 0; | 1681 | ret = 0; |
1679 | if (!sock_owned_by_user(sk)) { | 1682 | if (!sock_owned_by_user(sk)) { |
@@ -1977,6 +1980,11 @@ static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw) | |||
1977 | hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL; | 1980 | hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL; |
1978 | } | 1981 | } |
1979 | 1982 | ||
1983 | /* | ||
1984 | * Get next listener socket follow cur. If cur is NULL, get first socket | ||
1985 | * starting from bucket given in st->bucket; when st->bucket is zero the | ||
1986 | * very first socket in the hash table is returned. | ||
1987 | */ | ||
1980 | static void *listening_get_next(struct seq_file *seq, void *cur) | 1988 | static void *listening_get_next(struct seq_file *seq, void *cur) |
1981 | { | 1989 | { |
1982 | struct inet_connection_sock *icsk; | 1990 | struct inet_connection_sock *icsk; |
@@ -1987,14 +1995,15 @@ static void *listening_get_next(struct seq_file *seq, void *cur) | |||
1987 | struct net *net = seq_file_net(seq); | 1995 | struct net *net = seq_file_net(seq); |
1988 | 1996 | ||
1989 | if (!sk) { | 1997 | if (!sk) { |
1990 | st->bucket = 0; | 1998 | ilb = &tcp_hashinfo.listening_hash[st->bucket]; |
1991 | ilb = &tcp_hashinfo.listening_hash[0]; | ||
1992 | spin_lock_bh(&ilb->lock); | 1999 | spin_lock_bh(&ilb->lock); |
1993 | sk = sk_nulls_head(&ilb->head); | 2000 | sk = sk_nulls_head(&ilb->head); |
2001 | st->offset = 0; | ||
1994 | goto get_sk; | 2002 | goto get_sk; |
1995 | } | 2003 | } |
1996 | ilb = &tcp_hashinfo.listening_hash[st->bucket]; | 2004 | ilb = &tcp_hashinfo.listening_hash[st->bucket]; |
1997 | ++st->num; | 2005 | ++st->num; |
2006 | ++st->offset; | ||
1998 | 2007 | ||
1999 | if (st->state == TCP_SEQ_STATE_OPENREQ) { | 2008 | if (st->state == TCP_SEQ_STATE_OPENREQ) { |
2000 | struct request_sock *req = cur; | 2009 | struct request_sock *req = cur; |
@@ -2009,6 +2018,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur) | |||
2009 | } | 2018 | } |
2010 | req = req->dl_next; | 2019 | req = req->dl_next; |
2011 | } | 2020 | } |
2021 | st->offset = 0; | ||
2012 | if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries) | 2022 | if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries) |
2013 | break; | 2023 | break; |
2014 | get_req: | 2024 | get_req: |
@@ -2044,6 +2054,7 @@ start_req: | |||
2044 | read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); | 2054 | read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); |
2045 | } | 2055 | } |
2046 | spin_unlock_bh(&ilb->lock); | 2056 | spin_unlock_bh(&ilb->lock); |
2057 | st->offset = 0; | ||
2047 | if (++st->bucket < INET_LHTABLE_SIZE) { | 2058 | if (++st->bucket < INET_LHTABLE_SIZE) { |
2048 | ilb = &tcp_hashinfo.listening_hash[st->bucket]; | 2059 | ilb = &tcp_hashinfo.listening_hash[st->bucket]; |
2049 | spin_lock_bh(&ilb->lock); | 2060 | spin_lock_bh(&ilb->lock); |
@@ -2057,7 +2068,12 @@ out: | |||
2057 | 2068 | ||
2058 | static void *listening_get_idx(struct seq_file *seq, loff_t *pos) | 2069 | static void *listening_get_idx(struct seq_file *seq, loff_t *pos) |
2059 | { | 2070 | { |
2060 | void *rc = listening_get_next(seq, NULL); | 2071 | struct tcp_iter_state *st = seq->private; |
2072 | void *rc; | ||
2073 | |||
2074 | st->bucket = 0; | ||
2075 | st->offset = 0; | ||
2076 | rc = listening_get_next(seq, NULL); | ||
2061 | 2077 | ||
2062 | while (rc && *pos) { | 2078 | while (rc && *pos) { |
2063 | rc = listening_get_next(seq, rc); | 2079 | rc = listening_get_next(seq, rc); |
@@ -2072,13 +2088,18 @@ static inline int empty_bucket(struct tcp_iter_state *st) | |||
2072 | hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain); | 2088 | hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain); |
2073 | } | 2089 | } |
2074 | 2090 | ||
2091 | /* | ||
2092 | * Get first established socket starting from bucket given in st->bucket. | ||
2093 | * If st->bucket is zero, the very first socket in the hash is returned. | ||
2094 | */ | ||
2075 | static void *established_get_first(struct seq_file *seq) | 2095 | static void *established_get_first(struct seq_file *seq) |
2076 | { | 2096 | { |
2077 | struct tcp_iter_state *st = seq->private; | 2097 | struct tcp_iter_state *st = seq->private; |
2078 | struct net *net = seq_file_net(seq); | 2098 | struct net *net = seq_file_net(seq); |
2079 | void *rc = NULL; | 2099 | void *rc = NULL; |
2080 | 2100 | ||
2081 | for (st->bucket = 0; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) { | 2101 | st->offset = 0; |
2102 | for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) { | ||
2082 | struct sock *sk; | 2103 | struct sock *sk; |
2083 | struct hlist_nulls_node *node; | 2104 | struct hlist_nulls_node *node; |
2084 | struct inet_timewait_sock *tw; | 2105 | struct inet_timewait_sock *tw; |
@@ -2123,6 +2144,7 @@ static void *established_get_next(struct seq_file *seq, void *cur) | |||
2123 | struct net *net = seq_file_net(seq); | 2144 | struct net *net = seq_file_net(seq); |
2124 | 2145 | ||
2125 | ++st->num; | 2146 | ++st->num; |
2147 | ++st->offset; | ||
2126 | 2148 | ||
2127 | if (st->state == TCP_SEQ_STATE_TIME_WAIT) { | 2149 | if (st->state == TCP_SEQ_STATE_TIME_WAIT) { |
2128 | tw = cur; | 2150 | tw = cur; |
@@ -2139,6 +2161,7 @@ get_tw: | |||
2139 | st->state = TCP_SEQ_STATE_ESTABLISHED; | 2161 | st->state = TCP_SEQ_STATE_ESTABLISHED; |
2140 | 2162 | ||
2141 | /* Look for next non empty bucket */ | 2163 | /* Look for next non empty bucket */ |
2164 | st->offset = 0; | ||
2142 | while (++st->bucket <= tcp_hashinfo.ehash_mask && | 2165 | while (++st->bucket <= tcp_hashinfo.ehash_mask && |
2143 | empty_bucket(st)) | 2166 | empty_bucket(st)) |
2144 | ; | 2167 | ; |
@@ -2166,7 +2189,11 @@ out: | |||
2166 | 2189 | ||
2167 | static void *established_get_idx(struct seq_file *seq, loff_t pos) | 2190 | static void *established_get_idx(struct seq_file *seq, loff_t pos) |
2168 | { | 2191 | { |
2169 | void *rc = established_get_first(seq); | 2192 | struct tcp_iter_state *st = seq->private; |
2193 | void *rc; | ||
2194 | |||
2195 | st->bucket = 0; | ||
2196 | rc = established_get_first(seq); | ||
2170 | 2197 | ||
2171 | while (rc && pos) { | 2198 | while (rc && pos) { |
2172 | rc = established_get_next(seq, rc); | 2199 | rc = established_get_next(seq, rc); |
@@ -2191,24 +2218,72 @@ static void *tcp_get_idx(struct seq_file *seq, loff_t pos) | |||
2191 | return rc; | 2218 | return rc; |
2192 | } | 2219 | } |
2193 | 2220 | ||
2221 | static void *tcp_seek_last_pos(struct seq_file *seq) | ||
2222 | { | ||
2223 | struct tcp_iter_state *st = seq->private; | ||
2224 | int offset = st->offset; | ||
2225 | int orig_num = st->num; | ||
2226 | void *rc = NULL; | ||
2227 | |||
2228 | switch (st->state) { | ||
2229 | case TCP_SEQ_STATE_OPENREQ: | ||
2230 | case TCP_SEQ_STATE_LISTENING: | ||
2231 | if (st->bucket >= INET_LHTABLE_SIZE) | ||
2232 | break; | ||
2233 | st->state = TCP_SEQ_STATE_LISTENING; | ||
2234 | rc = listening_get_next(seq, NULL); | ||
2235 | while (offset-- && rc) | ||
2236 | rc = listening_get_next(seq, rc); | ||
2237 | if (rc) | ||
2238 | break; | ||
2239 | st->bucket = 0; | ||
2240 | /* Fallthrough */ | ||
2241 | case TCP_SEQ_STATE_ESTABLISHED: | ||
2242 | case TCP_SEQ_STATE_TIME_WAIT: | ||
2243 | st->state = TCP_SEQ_STATE_ESTABLISHED; | ||
2244 | if (st->bucket > tcp_hashinfo.ehash_mask) | ||
2245 | break; | ||
2246 | rc = established_get_first(seq); | ||
2247 | while (offset-- && rc) | ||
2248 | rc = established_get_next(seq, rc); | ||
2249 | } | ||
2250 | |||
2251 | st->num = orig_num; | ||
2252 | |||
2253 | return rc; | ||
2254 | } | ||
2255 | |||
2194 | static void *tcp_seq_start(struct seq_file *seq, loff_t *pos) | 2256 | static void *tcp_seq_start(struct seq_file *seq, loff_t *pos) |
2195 | { | 2257 | { |
2196 | struct tcp_iter_state *st = seq->private; | 2258 | struct tcp_iter_state *st = seq->private; |
2259 | void *rc; | ||
2260 | |||
2261 | if (*pos && *pos == st->last_pos) { | ||
2262 | rc = tcp_seek_last_pos(seq); | ||
2263 | if (rc) | ||
2264 | goto out; | ||
2265 | } | ||
2266 | |||
2197 | st->state = TCP_SEQ_STATE_LISTENING; | 2267 | st->state = TCP_SEQ_STATE_LISTENING; |
2198 | st->num = 0; | 2268 | st->num = 0; |
2199 | return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; | 2269 | st->bucket = 0; |
2270 | st->offset = 0; | ||
2271 | rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; | ||
2272 | |||
2273 | out: | ||
2274 | st->last_pos = *pos; | ||
2275 | return rc; | ||
2200 | } | 2276 | } |
2201 | 2277 | ||
2202 | static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos) | 2278 | static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
2203 | { | 2279 | { |
2280 | struct tcp_iter_state *st = seq->private; | ||
2204 | void *rc = NULL; | 2281 | void *rc = NULL; |
2205 | struct tcp_iter_state *st; | ||
2206 | 2282 | ||
2207 | if (v == SEQ_START_TOKEN) { | 2283 | if (v == SEQ_START_TOKEN) { |
2208 | rc = tcp_get_idx(seq, 0); | 2284 | rc = tcp_get_idx(seq, 0); |
2209 | goto out; | 2285 | goto out; |
2210 | } | 2286 | } |
2211 | st = seq->private; | ||
2212 | 2287 | ||
2213 | switch (st->state) { | 2288 | switch (st->state) { |
2214 | case TCP_SEQ_STATE_OPENREQ: | 2289 | case TCP_SEQ_STATE_OPENREQ: |
@@ -2216,6 +2291,8 @@ static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
2216 | rc = listening_get_next(seq, v); | 2291 | rc = listening_get_next(seq, v); |
2217 | if (!rc) { | 2292 | if (!rc) { |
2218 | st->state = TCP_SEQ_STATE_ESTABLISHED; | 2293 | st->state = TCP_SEQ_STATE_ESTABLISHED; |
2294 | st->bucket = 0; | ||
2295 | st->offset = 0; | ||
2219 | rc = established_get_first(seq); | 2296 | rc = established_get_first(seq); |
2220 | } | 2297 | } |
2221 | break; | 2298 | break; |
@@ -2226,6 +2303,7 @@ static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
2226 | } | 2303 | } |
2227 | out: | 2304 | out: |
2228 | ++*pos; | 2305 | ++*pos; |
2306 | st->last_pos = *pos; | ||
2229 | return rc; | 2307 | return rc; |
2230 | } | 2308 | } |
2231 | 2309 | ||
@@ -2264,6 +2342,7 @@ static int tcp_seq_open(struct inode *inode, struct file *file) | |||
2264 | 2342 | ||
2265 | s = ((struct seq_file *)file->private_data)->private; | 2343 | s = ((struct seq_file *)file->private_data)->private; |
2266 | s->family = afinfo->family; | 2344 | s->family = afinfo->family; |
2345 | s->last_pos = 0; | ||
2267 | return 0; | 2346 | return 0; |
2268 | } | 2347 | } |
2269 | 2348 | ||
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 50678f9a2763..32e0bef60d0a 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -633,11 +633,9 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) | |||
633 | if (!inet->recverr) { | 633 | if (!inet->recverr) { |
634 | if (!harderr || sk->sk_state != TCP_ESTABLISHED) | 634 | if (!harderr || sk->sk_state != TCP_ESTABLISHED) |
635 | goto out; | 635 | goto out; |
636 | } else { | 636 | } else |
637 | bh_lock_sock(sk); | ||
638 | ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); | 637 | ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); |
639 | bh_unlock_sock(sk); | 638 | |
640 | } | ||
641 | sk->sk_err = err; | 639 | sk->sk_err = err; |
642 | sk->sk_error_report(sk); | 640 | sk->sk_error_report(sk); |
643 | out: | 641 | out: |
@@ -916,7 +914,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
916 | !sock_flag(sk, SOCK_BROADCAST)) | 914 | !sock_flag(sk, SOCK_BROADCAST)) |
917 | goto out; | 915 | goto out; |
918 | if (connected) | 916 | if (connected) |
919 | sk_dst_set(sk, dst_clone(&rt->u.dst)); | 917 | sk_dst_set(sk, dst_clone(&rt->dst)); |
920 | } | 918 | } |
921 | 919 | ||
922 | if (msg->msg_flags&MSG_CONFIRM) | 920 | if (msg->msg_flags&MSG_CONFIRM) |
@@ -980,7 +978,7 @@ out: | |||
980 | return err; | 978 | return err; |
981 | 979 | ||
982 | do_confirm: | 980 | do_confirm: |
983 | dst_confirm(&rt->u.dst); | 981 | dst_confirm(&rt->dst); |
984 | if (!(msg->msg_flags&MSG_PROBE) || len) | 982 | if (!(msg->msg_flags&MSG_PROBE) || len) |
985 | goto back_from_confirm; | 983 | goto back_from_confirm; |
986 | err = 0; | 984 | err = 0; |
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 1705476670ef..349327092c9e 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c | |||
@@ -37,7 +37,7 @@ static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos, | |||
37 | fl.fl4_src = saddr->a4; | 37 | fl.fl4_src = saddr->a4; |
38 | 38 | ||
39 | err = __ip_route_output_key(net, &rt, &fl); | 39 | err = __ip_route_output_key(net, &rt, &fl); |
40 | dst = &rt->u.dst; | 40 | dst = &rt->dst; |
41 | if (err) | 41 | if (err) |
42 | dst = ERR_PTR(err); | 42 | dst = ERR_PTR(err); |
43 | return dst; | 43 | return dst; |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index e1a698df5706..b97bb1f30808 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -557,7 +557,7 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp) | |||
557 | pr_warning("Freeing alive inet6 address %p\n", ifp); | 557 | pr_warning("Freeing alive inet6 address %p\n", ifp); |
558 | return; | 558 | return; |
559 | } | 559 | } |
560 | dst_release(&ifp->rt->u.dst); | 560 | dst_release(&ifp->rt->dst); |
561 | 561 | ||
562 | call_rcu(&ifp->rcu, inet6_ifa_finish_destroy_rcu); | 562 | call_rcu(&ifp->rcu, inet6_ifa_finish_destroy_rcu); |
563 | } | 563 | } |
@@ -823,7 +823,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp) | |||
823 | rt->rt6i_flags |= RTF_EXPIRES; | 823 | rt->rt6i_flags |= RTF_EXPIRES; |
824 | } | 824 | } |
825 | } | 825 | } |
826 | dst_release(&rt->u.dst); | 826 | dst_release(&rt->dst); |
827 | } | 827 | } |
828 | 828 | ||
829 | out: | 829 | out: |
@@ -1863,7 +1863,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len) | |||
1863 | dev, expires, flags); | 1863 | dev, expires, flags); |
1864 | } | 1864 | } |
1865 | if (rt) | 1865 | if (rt) |
1866 | dst_release(&rt->u.dst); | 1866 | dst_release(&rt->dst); |
1867 | } | 1867 | } |
1868 | 1868 | ||
1869 | /* Try to figure out our local address for this prefix */ | 1869 | /* Try to figure out our local address for this prefix */ |
@@ -4093,11 +4093,11 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) | |||
4093 | if (ifp->idev->cnf.forwarding) | 4093 | if (ifp->idev->cnf.forwarding) |
4094 | addrconf_leave_anycast(ifp); | 4094 | addrconf_leave_anycast(ifp); |
4095 | addrconf_leave_solict(ifp->idev, &ifp->addr); | 4095 | addrconf_leave_solict(ifp->idev, &ifp->addr); |
4096 | dst_hold(&ifp->rt->u.dst); | 4096 | dst_hold(&ifp->rt->dst); |
4097 | 4097 | ||
4098 | if (ifp->state == INET6_IFADDR_STATE_DEAD && | 4098 | if (ifp->state == INET6_IFADDR_STATE_DEAD && |
4099 | ip6_del_rt(ifp->rt)) | 4099 | ip6_del_rt(ifp->rt)) |
4100 | dst_free(&ifp->rt->u.dst); | 4100 | dst_free(&ifp->rt->dst); |
4101 | break; | 4101 | break; |
4102 | } | 4102 | } |
4103 | } | 4103 | } |
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c index 8c4348cb1950..f0e774cea386 100644 --- a/net/ipv6/addrlabel.c +++ b/net/ipv6/addrlabel.c | |||
@@ -53,11 +53,7 @@ static struct ip6addrlbl_table | |||
53 | static inline | 53 | static inline |
54 | struct net *ip6addrlbl_net(const struct ip6addrlbl_entry *lbl) | 54 | struct net *ip6addrlbl_net(const struct ip6addrlbl_entry *lbl) |
55 | { | 55 | { |
56 | #ifdef CONFIG_NET_NS | 56 | return read_pnet(&lbl->lbl_net); |
57 | return lbl->lbl_net; | ||
58 | #else | ||
59 | return &init_net; | ||
60 | #endif | ||
61 | } | 57 | } |
62 | 58 | ||
63 | /* | 59 | /* |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index e733942dafe1..94b1b9c954bf 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -651,7 +651,7 @@ int inet6_sk_rebuild_header(struct sock *sk) | |||
651 | 651 | ||
652 | if (dst == NULL) { | 652 | if (dst == NULL) { |
653 | struct inet_sock *inet = inet_sk(sk); | 653 | struct inet_sock *inet = inet_sk(sk); |
654 | struct in6_addr *final_p = NULL, final; | 654 | struct in6_addr *final_p, final; |
655 | struct flowi fl; | 655 | struct flowi fl; |
656 | 656 | ||
657 | memset(&fl, 0, sizeof(fl)); | 657 | memset(&fl, 0, sizeof(fl)); |
@@ -665,12 +665,7 @@ int inet6_sk_rebuild_header(struct sock *sk) | |||
665 | fl.fl_ip_sport = inet->inet_sport; | 665 | fl.fl_ip_sport = inet->inet_sport; |
666 | security_sk_classify_flow(sk, &fl); | 666 | security_sk_classify_flow(sk, &fl); |
667 | 667 | ||
668 | if (np->opt && np->opt->srcrt) { | 668 | final_p = fl6_update_dst(&fl, np->opt, &final); |
669 | struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt; | ||
670 | ipv6_addr_copy(&final, &fl.fl6_dst); | ||
671 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); | ||
672 | final_p = &final; | ||
673 | } | ||
674 | 669 | ||
675 | err = ip6_dst_lookup(sk, &dst, &fl); | 670 | err = ip6_dst_lookup(sk, &dst, &fl); |
676 | if (err) { | 671 | if (err) { |
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c index b5b07054508a..0e5e943446f0 100644 --- a/net/ipv6/anycast.c +++ b/net/ipv6/anycast.c | |||
@@ -77,41 +77,40 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, struct in6_addr *addr) | |||
77 | pac->acl_next = NULL; | 77 | pac->acl_next = NULL; |
78 | ipv6_addr_copy(&pac->acl_addr, addr); | 78 | ipv6_addr_copy(&pac->acl_addr, addr); |
79 | 79 | ||
80 | rcu_read_lock(); | ||
80 | if (ifindex == 0) { | 81 | if (ifindex == 0) { |
81 | struct rt6_info *rt; | 82 | struct rt6_info *rt; |
82 | 83 | ||
83 | rt = rt6_lookup(net, addr, NULL, 0, 0); | 84 | rt = rt6_lookup(net, addr, NULL, 0, 0); |
84 | if (rt) { | 85 | if (rt) { |
85 | dev = rt->rt6i_dev; | 86 | dev = rt->rt6i_dev; |
86 | dev_hold(dev); | 87 | dst_release(&rt->dst); |
87 | dst_release(&rt->u.dst); | ||
88 | } else if (ishost) { | 88 | } else if (ishost) { |
89 | err = -EADDRNOTAVAIL; | 89 | err = -EADDRNOTAVAIL; |
90 | goto out_free_pac; | 90 | goto error; |
91 | } else { | 91 | } else { |
92 | /* router, no matching interface: just pick one */ | 92 | /* router, no matching interface: just pick one */ |
93 | 93 | dev = dev_get_by_flags_rcu(net, IFF_UP, | |
94 | dev = dev_get_by_flags(net, IFF_UP, IFF_UP|IFF_LOOPBACK); | 94 | IFF_UP | IFF_LOOPBACK); |
95 | } | 95 | } |
96 | } else | 96 | } else |
97 | dev = dev_get_by_index(net, ifindex); | 97 | dev = dev_get_by_index_rcu(net, ifindex); |
98 | 98 | ||
99 | if (dev == NULL) { | 99 | if (dev == NULL) { |
100 | err = -ENODEV; | 100 | err = -ENODEV; |
101 | goto out_free_pac; | 101 | goto error; |
102 | } | 102 | } |
103 | 103 | ||
104 | idev = in6_dev_get(dev); | 104 | idev = __in6_dev_get(dev); |
105 | if (!idev) { | 105 | if (!idev) { |
106 | if (ifindex) | 106 | if (ifindex) |
107 | err = -ENODEV; | 107 | err = -ENODEV; |
108 | else | 108 | else |
109 | err = -EADDRNOTAVAIL; | 109 | err = -EADDRNOTAVAIL; |
110 | goto out_dev_put; | 110 | goto error; |
111 | } | 111 | } |
112 | /* reset ishost, now that we have a specific device */ | 112 | /* reset ishost, now that we have a specific device */ |
113 | ishost = !idev->cnf.forwarding; | 113 | ishost = !idev->cnf.forwarding; |
114 | in6_dev_put(idev); | ||
115 | 114 | ||
116 | pac->acl_ifindex = dev->ifindex; | 115 | pac->acl_ifindex = dev->ifindex; |
117 | 116 | ||
@@ -124,26 +123,22 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, struct in6_addr *addr) | |||
124 | if (ishost) | 123 | if (ishost) |
125 | err = -EADDRNOTAVAIL; | 124 | err = -EADDRNOTAVAIL; |
126 | if (err) | 125 | if (err) |
127 | goto out_dev_put; | 126 | goto error; |
128 | } | 127 | } |
129 | 128 | ||
130 | err = ipv6_dev_ac_inc(dev, addr); | 129 | err = ipv6_dev_ac_inc(dev, addr); |
131 | if (err) | 130 | if (!err) { |
132 | goto out_dev_put; | 131 | write_lock_bh(&ipv6_sk_ac_lock); |
133 | 132 | pac->acl_next = np->ipv6_ac_list; | |
134 | write_lock_bh(&ipv6_sk_ac_lock); | 133 | np->ipv6_ac_list = pac; |
135 | pac->acl_next = np->ipv6_ac_list; | 134 | write_unlock_bh(&ipv6_sk_ac_lock); |
136 | np->ipv6_ac_list = pac; | 135 | pac = NULL; |
137 | write_unlock_bh(&ipv6_sk_ac_lock); | 136 | } |
138 | |||
139 | dev_put(dev); | ||
140 | |||
141 | return 0; | ||
142 | 137 | ||
143 | out_dev_put: | 138 | error: |
144 | dev_put(dev); | 139 | rcu_read_unlock(); |
145 | out_free_pac: | 140 | if (pac) |
146 | sock_kfree_s(sk, pac, sizeof(*pac)); | 141 | sock_kfree_s(sk, pac, sizeof(*pac)); |
147 | return err; | 142 | return err; |
148 | } | 143 | } |
149 | 144 | ||
@@ -176,11 +171,12 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, struct in6_addr *addr) | |||
176 | 171 | ||
177 | write_unlock_bh(&ipv6_sk_ac_lock); | 172 | write_unlock_bh(&ipv6_sk_ac_lock); |
178 | 173 | ||
179 | dev = dev_get_by_index(net, pac->acl_ifindex); | 174 | rcu_read_lock(); |
180 | if (dev) { | 175 | dev = dev_get_by_index_rcu(net, pac->acl_ifindex); |
176 | if (dev) | ||
181 | ipv6_dev_ac_dec(dev, &pac->acl_addr); | 177 | ipv6_dev_ac_dec(dev, &pac->acl_addr); |
182 | dev_put(dev); | 178 | rcu_read_unlock(); |
183 | } | 179 | |
184 | sock_kfree_s(sk, pac, sizeof(*pac)); | 180 | sock_kfree_s(sk, pac, sizeof(*pac)); |
185 | return 0; | 181 | return 0; |
186 | } | 182 | } |
@@ -199,13 +195,12 @@ void ipv6_sock_ac_close(struct sock *sk) | |||
199 | write_unlock_bh(&ipv6_sk_ac_lock); | 195 | write_unlock_bh(&ipv6_sk_ac_lock); |
200 | 196 | ||
201 | prev_index = 0; | 197 | prev_index = 0; |
198 | rcu_read_lock(); | ||
202 | while (pac) { | 199 | while (pac) { |
203 | struct ipv6_ac_socklist *next = pac->acl_next; | 200 | struct ipv6_ac_socklist *next = pac->acl_next; |
204 | 201 | ||
205 | if (pac->acl_ifindex != prev_index) { | 202 | if (pac->acl_ifindex != prev_index) { |
206 | if (dev) | 203 | dev = dev_get_by_index_rcu(net, pac->acl_ifindex); |
207 | dev_put(dev); | ||
208 | dev = dev_get_by_index(net, pac->acl_ifindex); | ||
209 | prev_index = pac->acl_ifindex; | 204 | prev_index = pac->acl_ifindex; |
210 | } | 205 | } |
211 | if (dev) | 206 | if (dev) |
@@ -213,8 +208,7 @@ void ipv6_sock_ac_close(struct sock *sk) | |||
213 | sock_kfree_s(sk, pac, sizeof(*pac)); | 208 | sock_kfree_s(sk, pac, sizeof(*pac)); |
214 | pac = next; | 209 | pac = next; |
215 | } | 210 | } |
216 | if (dev) | 211 | rcu_read_unlock(); |
217 | dev_put(dev); | ||
218 | } | 212 | } |
219 | 213 | ||
220 | #if 0 | 214 | #if 0 |
@@ -250,7 +244,7 @@ static void aca_put(struct ifacaddr6 *ac) | |||
250 | { | 244 | { |
251 | if (atomic_dec_and_test(&ac->aca_refcnt)) { | 245 | if (atomic_dec_and_test(&ac->aca_refcnt)) { |
252 | in6_dev_put(ac->aca_idev); | 246 | in6_dev_put(ac->aca_idev); |
253 | dst_release(&ac->aca_rt->u.dst); | 247 | dst_release(&ac->aca_rt->dst); |
254 | kfree(ac); | 248 | kfree(ac); |
255 | } | 249 | } |
256 | } | 250 | } |
@@ -356,40 +350,39 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, struct in6_addr *addr) | |||
356 | write_unlock_bh(&idev->lock); | 350 | write_unlock_bh(&idev->lock); |
357 | addrconf_leave_solict(idev, &aca->aca_addr); | 351 | addrconf_leave_solict(idev, &aca->aca_addr); |
358 | 352 | ||
359 | dst_hold(&aca->aca_rt->u.dst); | 353 | dst_hold(&aca->aca_rt->dst); |
360 | ip6_del_rt(aca->aca_rt); | 354 | ip6_del_rt(aca->aca_rt); |
361 | 355 | ||
362 | aca_put(aca); | 356 | aca_put(aca); |
363 | return 0; | 357 | return 0; |
364 | } | 358 | } |
365 | 359 | ||
360 | /* called with rcu_read_lock() */ | ||
366 | static int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr) | 361 | static int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr) |
367 | { | 362 | { |
368 | int ret; | 363 | struct inet6_dev *idev = __in6_dev_get(dev); |
369 | struct inet6_dev *idev = in6_dev_get(dev); | 364 | |
370 | if (idev == NULL) | 365 | if (idev == NULL) |
371 | return -ENODEV; | 366 | return -ENODEV; |
372 | ret = __ipv6_dev_ac_dec(idev, addr); | 367 | return __ipv6_dev_ac_dec(idev, addr); |
373 | in6_dev_put(idev); | ||
374 | return ret; | ||
375 | } | 368 | } |
376 | 369 | ||
377 | /* | 370 | /* |
378 | * check if the interface has this anycast address | 371 | * check if the interface has this anycast address |
372 | * called with rcu_read_lock() | ||
379 | */ | 373 | */ |
380 | static int ipv6_chk_acast_dev(struct net_device *dev, struct in6_addr *addr) | 374 | static int ipv6_chk_acast_dev(struct net_device *dev, struct in6_addr *addr) |
381 | { | 375 | { |
382 | struct inet6_dev *idev; | 376 | struct inet6_dev *idev; |
383 | struct ifacaddr6 *aca; | 377 | struct ifacaddr6 *aca; |
384 | 378 | ||
385 | idev = in6_dev_get(dev); | 379 | idev = __in6_dev_get(dev); |
386 | if (idev) { | 380 | if (idev) { |
387 | read_lock_bh(&idev->lock); | 381 | read_lock_bh(&idev->lock); |
388 | for (aca = idev->ac_list; aca; aca = aca->aca_next) | 382 | for (aca = idev->ac_list; aca; aca = aca->aca_next) |
389 | if (ipv6_addr_equal(&aca->aca_addr, addr)) | 383 | if (ipv6_addr_equal(&aca->aca_addr, addr)) |
390 | break; | 384 | break; |
391 | read_unlock_bh(&idev->lock); | 385 | read_unlock_bh(&idev->lock); |
392 | in6_dev_put(idev); | ||
393 | return aca != NULL; | 386 | return aca != NULL; |
394 | } | 387 | } |
395 | return 0; | 388 | return 0; |
@@ -403,14 +396,15 @@ int ipv6_chk_acast_addr(struct net *net, struct net_device *dev, | |||
403 | { | 396 | { |
404 | int found = 0; | 397 | int found = 0; |
405 | 398 | ||
406 | if (dev) | ||
407 | return ipv6_chk_acast_dev(dev, addr); | ||
408 | rcu_read_lock(); | 399 | rcu_read_lock(); |
409 | for_each_netdev_rcu(net, dev) | 400 | if (dev) |
410 | if (ipv6_chk_acast_dev(dev, addr)) { | 401 | found = ipv6_chk_acast_dev(dev, addr); |
411 | found = 1; | 402 | else |
412 | break; | 403 | for_each_netdev_rcu(net, dev) |
413 | } | 404 | if (ipv6_chk_acast_dev(dev, addr)) { |
405 | found = 1; | ||
406 | break; | ||
407 | } | ||
414 | rcu_read_unlock(); | 408 | rcu_read_unlock(); |
415 | return found; | 409 | return found; |
416 | } | 410 | } |
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 712684687c9a..7d929a22cbc2 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c | |||
@@ -38,10 +38,11 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
38 | struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; | 38 | struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; |
39 | struct inet_sock *inet = inet_sk(sk); | 39 | struct inet_sock *inet = inet_sk(sk); |
40 | struct ipv6_pinfo *np = inet6_sk(sk); | 40 | struct ipv6_pinfo *np = inet6_sk(sk); |
41 | struct in6_addr *daddr, *final_p = NULL, final; | 41 | struct in6_addr *daddr, *final_p, final; |
42 | struct dst_entry *dst; | 42 | struct dst_entry *dst; |
43 | struct flowi fl; | 43 | struct flowi fl; |
44 | struct ip6_flowlabel *flowlabel = NULL; | 44 | struct ip6_flowlabel *flowlabel = NULL; |
45 | struct ipv6_txoptions *opt; | ||
45 | int addr_type; | 46 | int addr_type; |
46 | int err; | 47 | int err; |
47 | 48 | ||
@@ -155,19 +156,8 @@ ipv4_connected: | |||
155 | 156 | ||
156 | security_sk_classify_flow(sk, &fl); | 157 | security_sk_classify_flow(sk, &fl); |
157 | 158 | ||
158 | if (flowlabel) { | 159 | opt = flowlabel ? flowlabel->opt : np->opt; |
159 | if (flowlabel->opt && flowlabel->opt->srcrt) { | 160 | final_p = fl6_update_dst(&fl, opt, &final); |
160 | struct rt0_hdr *rt0 = (struct rt0_hdr *) flowlabel->opt->srcrt; | ||
161 | ipv6_addr_copy(&final, &fl.fl6_dst); | ||
162 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); | ||
163 | final_p = &final; | ||
164 | } | ||
165 | } else if (np->opt && np->opt->srcrt) { | ||
166 | struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt; | ||
167 | ipv6_addr_copy(&final, &fl.fl6_dst); | ||
168 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); | ||
169 | final_p = &final; | ||
170 | } | ||
171 | 161 | ||
172 | err = ip6_dst_lookup(sk, &dst, &fl); | 162 | err = ip6_dst_lookup(sk, &dst, &fl); |
173 | if (err) | 163 | if (err) |
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index 8a659f92d17a..262f105d23b9 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c | |||
@@ -312,6 +312,7 @@ static int ipv6_destopt_rcv(struct sk_buff *skb) | |||
312 | Routing header. | 312 | Routing header. |
313 | ********************************/ | 313 | ********************************/ |
314 | 314 | ||
315 | /* called with rcu_read_lock() */ | ||
315 | static int ipv6_rthdr_rcv(struct sk_buff *skb) | 316 | static int ipv6_rthdr_rcv(struct sk_buff *skb) |
316 | { | 317 | { |
317 | struct inet6_skb_parm *opt = IP6CB(skb); | 318 | struct inet6_skb_parm *opt = IP6CB(skb); |
@@ -324,12 +325,9 @@ static int ipv6_rthdr_rcv(struct sk_buff *skb) | |||
324 | struct net *net = dev_net(skb->dev); | 325 | struct net *net = dev_net(skb->dev); |
325 | int accept_source_route = net->ipv6.devconf_all->accept_source_route; | 326 | int accept_source_route = net->ipv6.devconf_all->accept_source_route; |
326 | 327 | ||
327 | idev = in6_dev_get(skb->dev); | 328 | idev = __in6_dev_get(skb->dev); |
328 | if (idev) { | 329 | if (idev && accept_source_route > idev->cnf.accept_source_route) |
329 | if (accept_source_route > idev->cnf.accept_source_route) | 330 | accept_source_route = idev->cnf.accept_source_route; |
330 | accept_source_route = idev->cnf.accept_source_route; | ||
331 | in6_dev_put(idev); | ||
332 | } | ||
333 | 331 | ||
334 | if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || | 332 | if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || |
335 | !pskb_may_pull(skb, (skb_transport_offset(skb) + | 333 | !pskb_may_pull(skb, (skb_transport_offset(skb) + |
@@ -874,3 +872,27 @@ struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space, | |||
874 | return opt; | 872 | return opt; |
875 | } | 873 | } |
876 | 874 | ||
875 | /** | ||
876 | * fl6_update_dst - update flowi destination address with info given | ||
877 | * by srcrt option, if any. | ||
878 | * | ||
879 | * @fl: flowi for which fl6_dst is to be updated | ||
880 | * @opt: struct ipv6_txoptions in which to look for srcrt opt | ||
881 | * @orig: copy of original fl6_dst address if modified | ||
882 | * | ||
883 | * Returns NULL if no txoptions or no srcrt, otherwise returns orig | ||
884 | * and initial value of fl->fl6_dst set in orig | ||
885 | */ | ||
886 | struct in6_addr *fl6_update_dst(struct flowi *fl, | ||
887 | const struct ipv6_txoptions *opt, | ||
888 | struct in6_addr *orig) | ||
889 | { | ||
890 | if (!opt || !opt->srcrt) | ||
891 | return NULL; | ||
892 | |||
893 | ipv6_addr_copy(orig, &fl->fl6_dst); | ||
894 | ipv6_addr_copy(&fl->fl6_dst, ((struct rt0_hdr *)opt->srcrt)->addr); | ||
895 | return orig; | ||
896 | } | ||
897 | |||
898 | EXPORT_SYMBOL_GPL(fl6_update_dst); | ||
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index 8e44f8f9c188..b1108ede18e1 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c | |||
@@ -43,8 +43,8 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi *fl, | |||
43 | if (arg.result) | 43 | if (arg.result) |
44 | return arg.result; | 44 | return arg.result; |
45 | 45 | ||
46 | dst_hold(&net->ipv6.ip6_null_entry->u.dst); | 46 | dst_hold(&net->ipv6.ip6_null_entry->dst); |
47 | return &net->ipv6.ip6_null_entry->u.dst; | 47 | return &net->ipv6.ip6_null_entry->dst; |
48 | } | 48 | } |
49 | 49 | ||
50 | static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, | 50 | static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, |
@@ -86,7 +86,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, | |||
86 | struct in6_addr saddr; | 86 | struct in6_addr saddr; |
87 | 87 | ||
88 | if (ipv6_dev_get_saddr(net, | 88 | if (ipv6_dev_get_saddr(net, |
89 | ip6_dst_idev(&rt->u.dst)->dev, | 89 | ip6_dst_idev(&rt->dst)->dev, |
90 | &flp->fl6_dst, | 90 | &flp->fl6_dst, |
91 | rt6_flags2srcprefs(flags), | 91 | rt6_flags2srcprefs(flags), |
92 | &saddr)) | 92 | &saddr)) |
@@ -99,12 +99,12 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, | |||
99 | goto out; | 99 | goto out; |
100 | } | 100 | } |
101 | again: | 101 | again: |
102 | dst_release(&rt->u.dst); | 102 | dst_release(&rt->dst); |
103 | rt = NULL; | 103 | rt = NULL; |
104 | goto out; | 104 | goto out; |
105 | 105 | ||
106 | discard_pkt: | 106 | discard_pkt: |
107 | dst_hold(&rt->u.dst); | 107 | dst_hold(&rt->dst); |
108 | out: | 108 | out: |
109 | arg->result = rt; | 109 | arg->result = rt; |
110 | return rt == NULL ? -EAGAIN : 0; | 110 | return rt == NULL ? -EAGAIN : 0; |
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index ce7992982557..03e62f94ff8e 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c | |||
@@ -483,7 +483,7 @@ route_done: | |||
483 | np->tclass, NULL, &fl, (struct rt6_info*)dst, | 483 | np->tclass, NULL, &fl, (struct rt6_info*)dst, |
484 | MSG_DONTWAIT, np->dontfrag); | 484 | MSG_DONTWAIT, np->dontfrag); |
485 | if (err) { | 485 | if (err) { |
486 | ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); | 486 | ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); |
487 | ip6_flush_pending_frames(sk); | 487 | ip6_flush_pending_frames(sk); |
488 | goto out_put; | 488 | goto out_put; |
489 | } | 489 | } |
@@ -565,7 +565,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb) | |||
565 | np->dontfrag); | 565 | np->dontfrag); |
566 | 566 | ||
567 | if (err) { | 567 | if (err) { |
568 | ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); | 568 | ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); |
569 | ip6_flush_pending_frames(sk); | 569 | ip6_flush_pending_frames(sk); |
570 | goto out_put; | 570 | goto out_put; |
571 | } | 571 | } |
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index 0c5e3c3b7fd5..8a1628023bd1 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c | |||
@@ -185,7 +185,7 @@ int inet6_csk_xmit(struct sk_buff *skb) | |||
185 | struct ipv6_pinfo *np = inet6_sk(sk); | 185 | struct ipv6_pinfo *np = inet6_sk(sk); |
186 | struct flowi fl; | 186 | struct flowi fl; |
187 | struct dst_entry *dst; | 187 | struct dst_entry *dst; |
188 | struct in6_addr *final_p = NULL, final; | 188 | struct in6_addr *final_p, final; |
189 | 189 | ||
190 | memset(&fl, 0, sizeof(fl)); | 190 | memset(&fl, 0, sizeof(fl)); |
191 | fl.proto = sk->sk_protocol; | 191 | fl.proto = sk->sk_protocol; |
@@ -199,12 +199,7 @@ int inet6_csk_xmit(struct sk_buff *skb) | |||
199 | fl.fl_ip_dport = inet->inet_dport; | 199 | fl.fl_ip_dport = inet->inet_dport; |
200 | security_sk_classify_flow(sk, &fl); | 200 | security_sk_classify_flow(sk, &fl); |
201 | 201 | ||
202 | if (np->opt && np->opt->srcrt) { | 202 | final_p = fl6_update_dst(&fl, np->opt, &final); |
203 | struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt; | ||
204 | ipv6_addr_copy(&final, &fl.fl6_dst); | ||
205 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); | ||
206 | final_p = &final; | ||
207 | } | ||
208 | 203 | ||
209 | dst = __inet6_csk_dst_check(sk, np->dst_cookie); | 204 | dst = __inet6_csk_dst_check(sk, np->dst_cookie); |
210 | 205 | ||
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 92a122b7795d..b6a585909d35 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -165,7 +165,7 @@ static __inline__ void node_free(struct fib6_node * fn) | |||
165 | static __inline__ void rt6_release(struct rt6_info *rt) | 165 | static __inline__ void rt6_release(struct rt6_info *rt) |
166 | { | 166 | { |
167 | if (atomic_dec_and_test(&rt->rt6i_ref)) | 167 | if (atomic_dec_and_test(&rt->rt6i_ref)) |
168 | dst_free(&rt->u.dst); | 168 | dst_free(&rt->dst); |
169 | } | 169 | } |
170 | 170 | ||
171 | static void fib6_link_table(struct net *net, struct fib6_table *tb) | 171 | static void fib6_link_table(struct net *net, struct fib6_table *tb) |
@@ -278,7 +278,7 @@ static int fib6_dump_node(struct fib6_walker_t *w) | |||
278 | int res; | 278 | int res; |
279 | struct rt6_info *rt; | 279 | struct rt6_info *rt; |
280 | 280 | ||
281 | for (rt = w->leaf; rt; rt = rt->u.dst.rt6_next) { | 281 | for (rt = w->leaf; rt; rt = rt->dst.rt6_next) { |
282 | res = rt6_dump_route(rt, w->args); | 282 | res = rt6_dump_route(rt, w->args); |
283 | if (res < 0) { | 283 | if (res < 0) { |
284 | /* Frame is full, suspend walking */ | 284 | /* Frame is full, suspend walking */ |
@@ -619,7 +619,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, | |||
619 | 619 | ||
620 | ins = &fn->leaf; | 620 | ins = &fn->leaf; |
621 | 621 | ||
622 | for (iter = fn->leaf; iter; iter=iter->u.dst.rt6_next) { | 622 | for (iter = fn->leaf; iter; iter=iter->dst.rt6_next) { |
623 | /* | 623 | /* |
624 | * Search for duplicates | 624 | * Search for duplicates |
625 | */ | 625 | */ |
@@ -647,7 +647,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, | |||
647 | if (iter->rt6i_metric > rt->rt6i_metric) | 647 | if (iter->rt6i_metric > rt->rt6i_metric) |
648 | break; | 648 | break; |
649 | 649 | ||
650 | ins = &iter->u.dst.rt6_next; | 650 | ins = &iter->dst.rt6_next; |
651 | } | 651 | } |
652 | 652 | ||
653 | /* Reset round-robin state, if necessary */ | 653 | /* Reset round-robin state, if necessary */ |
@@ -658,7 +658,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, | |||
658 | * insert node | 658 | * insert node |
659 | */ | 659 | */ |
660 | 660 | ||
661 | rt->u.dst.rt6_next = iter; | 661 | rt->dst.rt6_next = iter; |
662 | *ins = rt; | 662 | *ins = rt; |
663 | rt->rt6i_node = fn; | 663 | rt->rt6i_node = fn; |
664 | atomic_inc(&rt->rt6i_ref); | 664 | atomic_inc(&rt->rt6i_ref); |
@@ -799,7 +799,7 @@ out: | |||
799 | atomic_inc(&pn->leaf->rt6i_ref); | 799 | atomic_inc(&pn->leaf->rt6i_ref); |
800 | } | 800 | } |
801 | #endif | 801 | #endif |
802 | dst_free(&rt->u.dst); | 802 | dst_free(&rt->dst); |
803 | } | 803 | } |
804 | return err; | 804 | return err; |
805 | 805 | ||
@@ -810,7 +810,7 @@ out: | |||
810 | st_failure: | 810 | st_failure: |
811 | if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT))) | 811 | if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT))) |
812 | fib6_repair_tree(info->nl_net, fn); | 812 | fib6_repair_tree(info->nl_net, fn); |
813 | dst_free(&rt->u.dst); | 813 | dst_free(&rt->dst); |
814 | return err; | 814 | return err; |
815 | #endif | 815 | #endif |
816 | } | 816 | } |
@@ -1108,7 +1108,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp, | |||
1108 | RT6_TRACE("fib6_del_route\n"); | 1108 | RT6_TRACE("fib6_del_route\n"); |
1109 | 1109 | ||
1110 | /* Unlink it */ | 1110 | /* Unlink it */ |
1111 | *rtp = rt->u.dst.rt6_next; | 1111 | *rtp = rt->dst.rt6_next; |
1112 | rt->rt6i_node = NULL; | 1112 | rt->rt6i_node = NULL; |
1113 | net->ipv6.rt6_stats->fib_rt_entries--; | 1113 | net->ipv6.rt6_stats->fib_rt_entries--; |
1114 | net->ipv6.rt6_stats->fib_discarded_routes++; | 1114 | net->ipv6.rt6_stats->fib_discarded_routes++; |
@@ -1122,14 +1122,14 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp, | |||
1122 | FOR_WALKERS(w) { | 1122 | FOR_WALKERS(w) { |
1123 | if (w->state == FWS_C && w->leaf == rt) { | 1123 | if (w->state == FWS_C && w->leaf == rt) { |
1124 | RT6_TRACE("walker %p adjusted by delroute\n", w); | 1124 | RT6_TRACE("walker %p adjusted by delroute\n", w); |
1125 | w->leaf = rt->u.dst.rt6_next; | 1125 | w->leaf = rt->dst.rt6_next; |
1126 | if (w->leaf == NULL) | 1126 | if (w->leaf == NULL) |
1127 | w->state = FWS_U; | 1127 | w->state = FWS_U; |
1128 | } | 1128 | } |
1129 | } | 1129 | } |
1130 | read_unlock(&fib6_walker_lock); | 1130 | read_unlock(&fib6_walker_lock); |
1131 | 1131 | ||
1132 | rt->u.dst.rt6_next = NULL; | 1132 | rt->dst.rt6_next = NULL; |
1133 | 1133 | ||
1134 | /* If it was last route, expunge its radix tree node */ | 1134 | /* If it was last route, expunge its radix tree node */ |
1135 | if (fn->leaf == NULL) { | 1135 | if (fn->leaf == NULL) { |
@@ -1168,7 +1168,7 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info) | |||
1168 | struct rt6_info **rtp; | 1168 | struct rt6_info **rtp; |
1169 | 1169 | ||
1170 | #if RT6_DEBUG >= 2 | 1170 | #if RT6_DEBUG >= 2 |
1171 | if (rt->u.dst.obsolete>0) { | 1171 | if (rt->dst.obsolete>0) { |
1172 | WARN_ON(fn != NULL); | 1172 | WARN_ON(fn != NULL); |
1173 | return -ENOENT; | 1173 | return -ENOENT; |
1174 | } | 1174 | } |
@@ -1195,7 +1195,7 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info) | |||
1195 | * Walk the leaf entries looking for ourself | 1195 | * Walk the leaf entries looking for ourself |
1196 | */ | 1196 | */ |
1197 | 1197 | ||
1198 | for (rtp = &fn->leaf; *rtp; rtp = &(*rtp)->u.dst.rt6_next) { | 1198 | for (rtp = &fn->leaf; *rtp; rtp = &(*rtp)->dst.rt6_next) { |
1199 | if (*rtp == rt) { | 1199 | if (*rtp == rt) { |
1200 | fib6_del_route(fn, rtp, info); | 1200 | fib6_del_route(fn, rtp, info); |
1201 | return 0; | 1201 | return 0; |
@@ -1334,7 +1334,7 @@ static int fib6_clean_node(struct fib6_walker_t *w) | |||
1334 | .nl_net = c->net, | 1334 | .nl_net = c->net, |
1335 | }; | 1335 | }; |
1336 | 1336 | ||
1337 | for (rt = w->leaf; rt; rt = rt->u.dst.rt6_next) { | 1337 | for (rt = w->leaf; rt; rt = rt->dst.rt6_next) { |
1338 | res = c->func(rt, c->arg); | 1338 | res = c->func(rt, c->arg); |
1339 | if (res < 0) { | 1339 | if (res < 0) { |
1340 | w->leaf = rt; | 1340 | w->leaf = rt; |
@@ -1448,8 +1448,8 @@ static int fib6_age(struct rt6_info *rt, void *arg) | |||
1448 | } | 1448 | } |
1449 | gc_args.more++; | 1449 | gc_args.more++; |
1450 | } else if (rt->rt6i_flags & RTF_CACHE) { | 1450 | } else if (rt->rt6i_flags & RTF_CACHE) { |
1451 | if (atomic_read(&rt->u.dst.__refcnt) == 0 && | 1451 | if (atomic_read(&rt->dst.__refcnt) == 0 && |
1452 | time_after_eq(now, rt->u.dst.lastuse + gc_args.timeout)) { | 1452 | time_after_eq(now, rt->dst.lastuse + gc_args.timeout)) { |
1453 | RT6_TRACE("aging clone %p\n", rt); | 1453 | RT6_TRACE("aging clone %p\n", rt); |
1454 | return -1; | 1454 | return -1; |
1455 | } else if ((rt->rt6i_flags & RTF_GATEWAY) && | 1455 | } else if ((rt->rt6i_flags & RTF_GATEWAY) && |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 89425af0684c..d40b330c0ee6 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -698,7 +698,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
698 | ipv6_hdr(skb)->payload_len = htons(first_len - | 698 | ipv6_hdr(skb)->payload_len = htons(first_len - |
699 | sizeof(struct ipv6hdr)); | 699 | sizeof(struct ipv6hdr)); |
700 | 700 | ||
701 | dst_hold(&rt->u.dst); | 701 | dst_hold(&rt->dst); |
702 | 702 | ||
703 | for (;;) { | 703 | for (;;) { |
704 | /* Prepare header of the next frame, | 704 | /* Prepare header of the next frame, |
@@ -726,7 +726,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
726 | 726 | ||
727 | err = output(skb); | 727 | err = output(skb); |
728 | if(!err) | 728 | if(!err) |
729 | IP6_INC_STATS(net, ip6_dst_idev(&rt->u.dst), | 729 | IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), |
730 | IPSTATS_MIB_FRAGCREATES); | 730 | IPSTATS_MIB_FRAGCREATES); |
731 | 731 | ||
732 | if (err || !frag) | 732 | if (err || !frag) |
@@ -740,9 +740,9 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
740 | kfree(tmp_hdr); | 740 | kfree(tmp_hdr); |
741 | 741 | ||
742 | if (err == 0) { | 742 | if (err == 0) { |
743 | IP6_INC_STATS(net, ip6_dst_idev(&rt->u.dst), | 743 | IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), |
744 | IPSTATS_MIB_FRAGOKS); | 744 | IPSTATS_MIB_FRAGOKS); |
745 | dst_release(&rt->u.dst); | 745 | dst_release(&rt->dst); |
746 | return 0; | 746 | return 0; |
747 | } | 747 | } |
748 | 748 | ||
@@ -752,9 +752,9 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
752 | frag = skb; | 752 | frag = skb; |
753 | } | 753 | } |
754 | 754 | ||
755 | IP6_INC_STATS(net, ip6_dst_idev(&rt->u.dst), | 755 | IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), |
756 | IPSTATS_MIB_FRAGFAILS); | 756 | IPSTATS_MIB_FRAGFAILS); |
757 | dst_release(&rt->u.dst); | 757 | dst_release(&rt->dst); |
758 | return err; | 758 | return err; |
759 | } | 759 | } |
760 | 760 | ||
@@ -785,7 +785,7 @@ slow_path: | |||
785 | * Allocate buffer. | 785 | * Allocate buffer. |
786 | */ | 786 | */ |
787 | 787 | ||
788 | if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) { | 788 | if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->dst.dev), GFP_ATOMIC)) == NULL) { |
789 | NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n"); | 789 | NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n"); |
790 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), | 790 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), |
791 | IPSTATS_MIB_FRAGFAILS); | 791 | IPSTATS_MIB_FRAGFAILS); |
@@ -798,7 +798,7 @@ slow_path: | |||
798 | */ | 798 | */ |
799 | 799 | ||
800 | ip6_copy_metadata(frag, skb); | 800 | ip6_copy_metadata(frag, skb); |
801 | skb_reserve(frag, LL_RESERVED_SPACE(rt->u.dst.dev)); | 801 | skb_reserve(frag, LL_RESERVED_SPACE(rt->dst.dev)); |
802 | skb_put(frag, len + hlen + sizeof(struct frag_hdr)); | 802 | skb_put(frag, len + hlen + sizeof(struct frag_hdr)); |
803 | skb_reset_network_header(frag); | 803 | skb_reset_network_header(frag); |
804 | fh = (struct frag_hdr *)(skb_network_header(frag) + hlen); | 804 | fh = (struct frag_hdr *)(skb_network_header(frag) + hlen); |
@@ -1156,24 +1156,24 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, | |||
1156 | 1156 | ||
1157 | /* need source address above miyazawa*/ | 1157 | /* need source address above miyazawa*/ |
1158 | } | 1158 | } |
1159 | dst_hold(&rt->u.dst); | 1159 | dst_hold(&rt->dst); |
1160 | inet->cork.dst = &rt->u.dst; | 1160 | inet->cork.dst = &rt->dst; |
1161 | inet->cork.fl = *fl; | 1161 | inet->cork.fl = *fl; |
1162 | np->cork.hop_limit = hlimit; | 1162 | np->cork.hop_limit = hlimit; |
1163 | np->cork.tclass = tclass; | 1163 | np->cork.tclass = tclass; |
1164 | mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ? | 1164 | mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ? |
1165 | rt->u.dst.dev->mtu : dst_mtu(rt->u.dst.path); | 1165 | rt->dst.dev->mtu : dst_mtu(rt->dst.path); |
1166 | if (np->frag_size < mtu) { | 1166 | if (np->frag_size < mtu) { |
1167 | if (np->frag_size) | 1167 | if (np->frag_size) |
1168 | mtu = np->frag_size; | 1168 | mtu = np->frag_size; |
1169 | } | 1169 | } |
1170 | inet->cork.fragsize = mtu; | 1170 | inet->cork.fragsize = mtu; |
1171 | if (dst_allfrag(rt->u.dst.path)) | 1171 | if (dst_allfrag(rt->dst.path)) |
1172 | inet->cork.flags |= IPCORK_ALLFRAG; | 1172 | inet->cork.flags |= IPCORK_ALLFRAG; |
1173 | inet->cork.length = 0; | 1173 | inet->cork.length = 0; |
1174 | sk->sk_sndmsg_page = NULL; | 1174 | sk->sk_sndmsg_page = NULL; |
1175 | sk->sk_sndmsg_off = 0; | 1175 | sk->sk_sndmsg_off = 0; |
1176 | exthdrlen = rt->u.dst.header_len + (opt ? opt->opt_flen : 0) - | 1176 | exthdrlen = rt->dst.header_len + (opt ? opt->opt_flen : 0) - |
1177 | rt->rt6i_nfheader_len; | 1177 | rt->rt6i_nfheader_len; |
1178 | length += exthdrlen; | 1178 | length += exthdrlen; |
1179 | transhdrlen += exthdrlen; | 1179 | transhdrlen += exthdrlen; |
@@ -1186,7 +1186,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, | |||
1186 | mtu = inet->cork.fragsize; | 1186 | mtu = inet->cork.fragsize; |
1187 | } | 1187 | } |
1188 | 1188 | ||
1189 | hh_len = LL_RESERVED_SPACE(rt->u.dst.dev); | 1189 | hh_len = LL_RESERVED_SPACE(rt->dst.dev); |
1190 | 1190 | ||
1191 | fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len + | 1191 | fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len + |
1192 | (opt ? opt->opt_nflen : 0); | 1192 | (opt ? opt->opt_nflen : 0); |
@@ -1224,7 +1224,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, | |||
1224 | } | 1224 | } |
1225 | 1225 | ||
1226 | if (proto == IPPROTO_UDP && | 1226 | if (proto == IPPROTO_UDP && |
1227 | (rt->u.dst.dev->features & NETIF_F_UFO)) { | 1227 | (rt->dst.dev->features & NETIF_F_UFO)) { |
1228 | 1228 | ||
1229 | err = ip6_ufo_append_data(sk, getfrag, from, length, | 1229 | err = ip6_ufo_append_data(sk, getfrag, from, length, |
1230 | hh_len, fragheaderlen, | 1230 | hh_len, fragheaderlen, |
@@ -1270,7 +1270,7 @@ alloc_new_skb: | |||
1270 | 1270 | ||
1271 | fraglen = datalen + fragheaderlen; | 1271 | fraglen = datalen + fragheaderlen; |
1272 | if ((flags & MSG_MORE) && | 1272 | if ((flags & MSG_MORE) && |
1273 | !(rt->u.dst.dev->features&NETIF_F_SG)) | 1273 | !(rt->dst.dev->features&NETIF_F_SG)) |
1274 | alloclen = mtu; | 1274 | alloclen = mtu; |
1275 | else | 1275 | else |
1276 | alloclen = datalen + fragheaderlen; | 1276 | alloclen = datalen + fragheaderlen; |
@@ -1281,7 +1281,7 @@ alloc_new_skb: | |||
1281 | * because we have no idea if we're the last one. | 1281 | * because we have no idea if we're the last one. |
1282 | */ | 1282 | */ |
1283 | if (datalen == length + fraggap) | 1283 | if (datalen == length + fraggap) |
1284 | alloclen += rt->u.dst.trailer_len; | 1284 | alloclen += rt->dst.trailer_len; |
1285 | 1285 | ||
1286 | /* | 1286 | /* |
1287 | * We just reserve space for fragment header. | 1287 | * We just reserve space for fragment header. |
@@ -1358,7 +1358,7 @@ alloc_new_skb: | |||
1358 | if (copy > length) | 1358 | if (copy > length) |
1359 | copy = length; | 1359 | copy = length; |
1360 | 1360 | ||
1361 | if (!(rt->u.dst.dev->features&NETIF_F_SG)) { | 1361 | if (!(rt->dst.dev->features&NETIF_F_SG)) { |
1362 | unsigned int off; | 1362 | unsigned int off; |
1363 | 1363 | ||
1364 | off = skb->len; | 1364 | off = skb->len; |
@@ -1503,7 +1503,7 @@ int ip6_push_pending_frames(struct sock *sk) | |||
1503 | skb->priority = sk->sk_priority; | 1503 | skb->priority = sk->sk_priority; |
1504 | skb->mark = sk->sk_mark; | 1504 | skb->mark = sk->sk_mark; |
1505 | 1505 | ||
1506 | skb_dst_set(skb, dst_clone(&rt->u.dst)); | 1506 | skb_dst_set(skb, dst_clone(&rt->dst)); |
1507 | IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len); | 1507 | IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len); |
1508 | if (proto == IPPROTO_ICMPV6) { | 1508 | if (proto == IPPROTO_ICMPV6) { |
1509 | struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); | 1509 | struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 8f39893d8081..0fd027f3f47e 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -552,7 +552,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
552 | if (ip_route_output_key(dev_net(skb->dev), &rt, &fl)) | 552 | if (ip_route_output_key(dev_net(skb->dev), &rt, &fl)) |
553 | goto out; | 553 | goto out; |
554 | 554 | ||
555 | skb2->dev = rt->u.dst.dev; | 555 | skb2->dev = rt->dst.dev; |
556 | 556 | ||
557 | /* route "incoming" packet */ | 557 | /* route "incoming" packet */ |
558 | if (rt->rt_flags & RTCF_LOCAL) { | 558 | if (rt->rt_flags & RTCF_LOCAL) { |
@@ -562,7 +562,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
562 | fl.fl4_src = eiph->saddr; | 562 | fl.fl4_src = eiph->saddr; |
563 | fl.fl4_tos = eiph->tos; | 563 | fl.fl4_tos = eiph->tos; |
564 | if (ip_route_output_key(dev_net(skb->dev), &rt, &fl) || | 564 | if (ip_route_output_key(dev_net(skb->dev), &rt, &fl) || |
565 | rt->u.dst.dev->type != ARPHRD_TUNNEL) { | 565 | rt->dst.dev->type != ARPHRD_TUNNEL) { |
566 | ip_rt_put(rt); | 566 | ip_rt_put(rt); |
567 | goto out; | 567 | goto out; |
568 | } | 568 | } |
@@ -626,7 +626,7 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
626 | icmpv6_send(skb2, rel_type, rel_code, rel_info); | 626 | icmpv6_send(skb2, rel_type, rel_code, rel_info); |
627 | 627 | ||
628 | if (rt) | 628 | if (rt) |
629 | dst_release(&rt->u.dst); | 629 | dst_release(&rt->dst); |
630 | 630 | ||
631 | kfree_skb(skb2); | 631 | kfree_skb(skb2); |
632 | } | 632 | } |
@@ -1135,7 +1135,7 @@ static void ip6_tnl_link_config(struct ip6_tnl *t) | |||
1135 | if (dev->mtu < IPV6_MIN_MTU) | 1135 | if (dev->mtu < IPV6_MIN_MTU) |
1136 | dev->mtu = IPV6_MIN_MTU; | 1136 | dev->mtu = IPV6_MIN_MTU; |
1137 | } | 1137 | } |
1138 | dst_release(&rt->u.dst); | 1138 | dst_release(&rt->dst); |
1139 | } | 1139 | } |
1140 | } | 1140 | } |
1141 | 1141 | ||
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 073071f2b75b..66078dad7fe8 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -120,7 +120,7 @@ static void mroute_clean_tables(struct mr6_table *mrt); | |||
120 | static void ipmr_expire_process(unsigned long arg); | 120 | static void ipmr_expire_process(unsigned long arg); |
121 | 121 | ||
122 | #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES | 122 | #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES |
123 | #define ip6mr_for_each_table(mrt, met) \ | 123 | #define ip6mr_for_each_table(mrt, net) \ |
124 | list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list) | 124 | list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list) |
125 | 125 | ||
126 | static struct mr6_table *ip6mr_get_table(struct net *net, u32 id) | 126 | static struct mr6_table *ip6mr_get_table(struct net *net, u32 id) |
@@ -254,8 +254,10 @@ static void __net_exit ip6mr_rules_exit(struct net *net) | |||
254 | { | 254 | { |
255 | struct mr6_table *mrt, *next; | 255 | struct mr6_table *mrt, *next; |
256 | 256 | ||
257 | list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) | 257 | list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) { |
258 | list_del(&mrt->list); | ||
258 | ip6mr_free_table(mrt); | 259 | ip6mr_free_table(mrt); |
260 | } | ||
259 | fib_rules_unregister(net->ipv6.mr6_rules_ops); | 261 | fib_rules_unregister(net->ipv6.mr6_rules_ops); |
260 | } | 262 | } |
261 | #else | 263 | #else |
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 59f1881968c7..d1444b95ad7e 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
@@ -152,18 +152,19 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) | |||
152 | mc_lst->next = NULL; | 152 | mc_lst->next = NULL; |
153 | ipv6_addr_copy(&mc_lst->addr, addr); | 153 | ipv6_addr_copy(&mc_lst->addr, addr); |
154 | 154 | ||
155 | rcu_read_lock(); | ||
155 | if (ifindex == 0) { | 156 | if (ifindex == 0) { |
156 | struct rt6_info *rt; | 157 | struct rt6_info *rt; |
157 | rt = rt6_lookup(net, addr, NULL, 0, 0); | 158 | rt = rt6_lookup(net, addr, NULL, 0, 0); |
158 | if (rt) { | 159 | if (rt) { |
159 | dev = rt->rt6i_dev; | 160 | dev = rt->rt6i_dev; |
160 | dev_hold(dev); | 161 | dst_release(&rt->dst); |
161 | dst_release(&rt->u.dst); | ||
162 | } | 162 | } |
163 | } else | 163 | } else |
164 | dev = dev_get_by_index(net, ifindex); | 164 | dev = dev_get_by_index_rcu(net, ifindex); |
165 | 165 | ||
166 | if (dev == NULL) { | 166 | if (dev == NULL) { |
167 | rcu_read_unlock(); | ||
167 | sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); | 168 | sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); |
168 | return -ENODEV; | 169 | return -ENODEV; |
169 | } | 170 | } |
@@ -180,8 +181,8 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) | |||
180 | err = ipv6_dev_mc_inc(dev, addr); | 181 | err = ipv6_dev_mc_inc(dev, addr); |
181 | 182 | ||
182 | if (err) { | 183 | if (err) { |
184 | rcu_read_unlock(); | ||
183 | sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); | 185 | sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); |
184 | dev_put(dev); | ||
185 | return err; | 186 | return err; |
186 | } | 187 | } |
187 | 188 | ||
@@ -190,7 +191,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) | |||
190 | np->ipv6_mc_list = mc_lst; | 191 | np->ipv6_mc_list = mc_lst; |
191 | write_unlock_bh(&ipv6_sk_mc_lock); | 192 | write_unlock_bh(&ipv6_sk_mc_lock); |
192 | 193 | ||
193 | dev_put(dev); | 194 | rcu_read_unlock(); |
194 | 195 | ||
195 | return 0; | 196 | return 0; |
196 | } | 197 | } |
@@ -213,18 +214,17 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) | |||
213 | *lnk = mc_lst->next; | 214 | *lnk = mc_lst->next; |
214 | write_unlock_bh(&ipv6_sk_mc_lock); | 215 | write_unlock_bh(&ipv6_sk_mc_lock); |
215 | 216 | ||
216 | dev = dev_get_by_index(net, mc_lst->ifindex); | 217 | rcu_read_lock(); |
218 | dev = dev_get_by_index_rcu(net, mc_lst->ifindex); | ||
217 | if (dev != NULL) { | 219 | if (dev != NULL) { |
218 | struct inet6_dev *idev = in6_dev_get(dev); | 220 | struct inet6_dev *idev = __in6_dev_get(dev); |
219 | 221 | ||
220 | (void) ip6_mc_leave_src(sk, mc_lst, idev); | 222 | (void) ip6_mc_leave_src(sk, mc_lst, idev); |
221 | if (idev) { | 223 | if (idev) |
222 | __ipv6_dev_mc_dec(idev, &mc_lst->addr); | 224 | __ipv6_dev_mc_dec(idev, &mc_lst->addr); |
223 | in6_dev_put(idev); | ||
224 | } | ||
225 | dev_put(dev); | ||
226 | } else | 225 | } else |
227 | (void) ip6_mc_leave_src(sk, mc_lst, NULL); | 226 | (void) ip6_mc_leave_src(sk, mc_lst, NULL); |
227 | rcu_read_unlock(); | ||
228 | sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); | 228 | sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); |
229 | return 0; | 229 | return 0; |
230 | } | 230 | } |
@@ -234,43 +234,36 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) | |||
234 | return -EADDRNOTAVAIL; | 234 | return -EADDRNOTAVAIL; |
235 | } | 235 | } |
236 | 236 | ||
237 | static struct inet6_dev *ip6_mc_find_dev(struct net *net, | 237 | /* called with rcu_read_lock() */ |
238 | struct in6_addr *group, | 238 | static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net, |
239 | int ifindex) | 239 | struct in6_addr *group, |
240 | int ifindex) | ||
240 | { | 241 | { |
241 | struct net_device *dev = NULL; | 242 | struct net_device *dev = NULL; |
242 | struct inet6_dev *idev = NULL; | 243 | struct inet6_dev *idev = NULL; |
243 | 244 | ||
244 | if (ifindex == 0) { | 245 | if (ifindex == 0) { |
245 | struct rt6_info *rt; | 246 | struct rt6_info *rt = rt6_lookup(net, group, NULL, 0, 0); |
246 | 247 | ||
247 | rt = rt6_lookup(net, group, NULL, 0, 0); | ||
248 | if (rt) { | 248 | if (rt) { |
249 | dev = rt->rt6i_dev; | 249 | dev = rt->rt6i_dev; |
250 | dev_hold(dev); | 250 | dev_hold(dev); |
251 | dst_release(&rt->u.dst); | 251 | dst_release(&rt->dst); |
252 | } | 252 | } |
253 | } else | 253 | } else |
254 | dev = dev_get_by_index(net, ifindex); | 254 | dev = dev_get_by_index_rcu(net, ifindex); |
255 | 255 | ||
256 | if (!dev) | 256 | if (!dev) |
257 | goto nodev; | 257 | return NULL; |
258 | idev = in6_dev_get(dev); | 258 | idev = __in6_dev_get(dev); |
259 | if (!idev) | 259 | if (!idev) |
260 | goto release; | 260 | return NULL;; |
261 | read_lock_bh(&idev->lock); | 261 | read_lock_bh(&idev->lock); |
262 | if (idev->dead) | 262 | if (idev->dead) { |
263 | goto unlock_release; | 263 | read_unlock_bh(&idev->lock); |
264 | 264 | return NULL; | |
265 | } | ||
265 | return idev; | 266 | return idev; |
266 | |||
267 | unlock_release: | ||
268 | read_unlock_bh(&idev->lock); | ||
269 | in6_dev_put(idev); | ||
270 | release: | ||
271 | dev_put(dev); | ||
272 | nodev: | ||
273 | return NULL; | ||
274 | } | 267 | } |
275 | 268 | ||
276 | void ipv6_sock_mc_close(struct sock *sk) | 269 | void ipv6_sock_mc_close(struct sock *sk) |
@@ -286,19 +279,17 @@ void ipv6_sock_mc_close(struct sock *sk) | |||
286 | np->ipv6_mc_list = mc_lst->next; | 279 | np->ipv6_mc_list = mc_lst->next; |
287 | write_unlock_bh(&ipv6_sk_mc_lock); | 280 | write_unlock_bh(&ipv6_sk_mc_lock); |
288 | 281 | ||
289 | dev = dev_get_by_index(net, mc_lst->ifindex); | 282 | rcu_read_lock(); |
283 | dev = dev_get_by_index_rcu(net, mc_lst->ifindex); | ||
290 | if (dev) { | 284 | if (dev) { |
291 | struct inet6_dev *idev = in6_dev_get(dev); | 285 | struct inet6_dev *idev = __in6_dev_get(dev); |
292 | 286 | ||
293 | (void) ip6_mc_leave_src(sk, mc_lst, idev); | 287 | (void) ip6_mc_leave_src(sk, mc_lst, idev); |
294 | if (idev) { | 288 | if (idev) |
295 | __ipv6_dev_mc_dec(idev, &mc_lst->addr); | 289 | __ipv6_dev_mc_dec(idev, &mc_lst->addr); |
296 | in6_dev_put(idev); | ||
297 | } | ||
298 | dev_put(dev); | ||
299 | } else | 290 | } else |
300 | (void) ip6_mc_leave_src(sk, mc_lst, NULL); | 291 | (void) ip6_mc_leave_src(sk, mc_lst, NULL); |
301 | 292 | rcu_read_unlock(); | |
302 | sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); | 293 | sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); |
303 | 294 | ||
304 | write_lock_bh(&ipv6_sk_mc_lock); | 295 | write_lock_bh(&ipv6_sk_mc_lock); |
@@ -327,14 +318,17 @@ int ip6_mc_source(int add, int omode, struct sock *sk, | |||
327 | if (!ipv6_addr_is_multicast(group)) | 318 | if (!ipv6_addr_is_multicast(group)) |
328 | return -EINVAL; | 319 | return -EINVAL; |
329 | 320 | ||
330 | idev = ip6_mc_find_dev(net, group, pgsr->gsr_interface); | 321 | rcu_read_lock(); |
331 | if (!idev) | 322 | idev = ip6_mc_find_dev_rcu(net, group, pgsr->gsr_interface); |
323 | if (!idev) { | ||
324 | rcu_read_unlock(); | ||
332 | return -ENODEV; | 325 | return -ENODEV; |
326 | } | ||
333 | dev = idev->dev; | 327 | dev = idev->dev; |
334 | 328 | ||
335 | err = -EADDRNOTAVAIL; | 329 | err = -EADDRNOTAVAIL; |
336 | 330 | ||
337 | read_lock_bh(&ipv6_sk_mc_lock); | 331 | read_lock(&ipv6_sk_mc_lock); |
338 | for (pmc=inet6->ipv6_mc_list; pmc; pmc=pmc->next) { | 332 | for (pmc=inet6->ipv6_mc_list; pmc; pmc=pmc->next) { |
339 | if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface) | 333 | if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface) |
340 | continue; | 334 | continue; |
@@ -358,7 +352,7 @@ int ip6_mc_source(int add, int omode, struct sock *sk, | |||
358 | pmc->sfmode = omode; | 352 | pmc->sfmode = omode; |
359 | } | 353 | } |
360 | 354 | ||
361 | write_lock_bh(&pmc->sflock); | 355 | write_lock(&pmc->sflock); |
362 | pmclocked = 1; | 356 | pmclocked = 1; |
363 | 357 | ||
364 | psl = pmc->sflist; | 358 | psl = pmc->sflist; |
@@ -433,11 +427,10 @@ int ip6_mc_source(int add, int omode, struct sock *sk, | |||
433 | ip6_mc_add_src(idev, group, omode, 1, source, 1); | 427 | ip6_mc_add_src(idev, group, omode, 1, source, 1); |
434 | done: | 428 | done: |
435 | if (pmclocked) | 429 | if (pmclocked) |
436 | write_unlock_bh(&pmc->sflock); | 430 | write_unlock(&pmc->sflock); |
437 | read_unlock_bh(&ipv6_sk_mc_lock); | 431 | read_unlock(&ipv6_sk_mc_lock); |
438 | read_unlock_bh(&idev->lock); | 432 | read_unlock_bh(&idev->lock); |
439 | in6_dev_put(idev); | 433 | rcu_read_unlock(); |
440 | dev_put(dev); | ||
441 | if (leavegroup) | 434 | if (leavegroup) |
442 | return ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group); | 435 | return ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group); |
443 | return err; | 436 | return err; |
@@ -463,14 +456,17 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf) | |||
463 | gsf->gf_fmode != MCAST_EXCLUDE) | 456 | gsf->gf_fmode != MCAST_EXCLUDE) |
464 | return -EINVAL; | 457 | return -EINVAL; |
465 | 458 | ||
466 | idev = ip6_mc_find_dev(net, group, gsf->gf_interface); | 459 | rcu_read_lock(); |
460 | idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface); | ||
467 | 461 | ||
468 | if (!idev) | 462 | if (!idev) { |
463 | rcu_read_unlock(); | ||
469 | return -ENODEV; | 464 | return -ENODEV; |
465 | } | ||
470 | dev = idev->dev; | 466 | dev = idev->dev; |
471 | 467 | ||
472 | err = 0; | 468 | err = 0; |
473 | read_lock_bh(&ipv6_sk_mc_lock); | 469 | read_lock(&ipv6_sk_mc_lock); |
474 | 470 | ||
475 | if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) { | 471 | if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) { |
476 | leavegroup = 1; | 472 | leavegroup = 1; |
@@ -512,7 +508,7 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf) | |||
512 | (void) ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0); | 508 | (void) ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0); |
513 | } | 509 | } |
514 | 510 | ||
515 | write_lock_bh(&pmc->sflock); | 511 | write_lock(&pmc->sflock); |
516 | psl = pmc->sflist; | 512 | psl = pmc->sflist; |
517 | if (psl) { | 513 | if (psl) { |
518 | (void) ip6_mc_del_src(idev, group, pmc->sfmode, | 514 | (void) ip6_mc_del_src(idev, group, pmc->sfmode, |
@@ -522,13 +518,12 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf) | |||
522 | (void) ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0); | 518 | (void) ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0); |
523 | pmc->sflist = newpsl; | 519 | pmc->sflist = newpsl; |
524 | pmc->sfmode = gsf->gf_fmode; | 520 | pmc->sfmode = gsf->gf_fmode; |
525 | write_unlock_bh(&pmc->sflock); | 521 | write_unlock(&pmc->sflock); |
526 | err = 0; | 522 | err = 0; |
527 | done: | 523 | done: |
528 | read_unlock_bh(&ipv6_sk_mc_lock); | 524 | read_unlock(&ipv6_sk_mc_lock); |
529 | read_unlock_bh(&idev->lock); | 525 | read_unlock_bh(&idev->lock); |
530 | in6_dev_put(idev); | 526 | rcu_read_unlock(); |
531 | dev_put(dev); | ||
532 | if (leavegroup) | 527 | if (leavegroup) |
533 | err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group); | 528 | err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group); |
534 | return err; | 529 | return err; |
@@ -551,11 +546,13 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf, | |||
551 | if (!ipv6_addr_is_multicast(group)) | 546 | if (!ipv6_addr_is_multicast(group)) |
552 | return -EINVAL; | 547 | return -EINVAL; |
553 | 548 | ||
554 | idev = ip6_mc_find_dev(net, group, gsf->gf_interface); | 549 | rcu_read_lock(); |
550 | idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface); | ||
555 | 551 | ||
556 | if (!idev) | 552 | if (!idev) { |
553 | rcu_read_unlock(); | ||
557 | return -ENODEV; | 554 | return -ENODEV; |
558 | 555 | } | |
559 | dev = idev->dev; | 556 | dev = idev->dev; |
560 | 557 | ||
561 | err = -EADDRNOTAVAIL; | 558 | err = -EADDRNOTAVAIL; |
@@ -577,8 +574,7 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf, | |||
577 | psl = pmc->sflist; | 574 | psl = pmc->sflist; |
578 | count = psl ? psl->sl_count : 0; | 575 | count = psl ? psl->sl_count : 0; |
579 | read_unlock_bh(&idev->lock); | 576 | read_unlock_bh(&idev->lock); |
580 | in6_dev_put(idev); | 577 | rcu_read_unlock(); |
581 | dev_put(dev); | ||
582 | 578 | ||
583 | copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc; | 579 | copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc; |
584 | gsf->gf_numsrc = count; | 580 | gsf->gf_numsrc = count; |
@@ -604,8 +600,7 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf, | |||
604 | return 0; | 600 | return 0; |
605 | done: | 601 | done: |
606 | read_unlock_bh(&idev->lock); | 602 | read_unlock_bh(&idev->lock); |
607 | in6_dev_put(idev); | 603 | rcu_read_unlock(); |
608 | dev_put(dev); | ||
609 | return err; | 604 | return err; |
610 | } | 605 | } |
611 | 606 | ||
@@ -822,6 +817,7 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr) | |||
822 | struct ifmcaddr6 *mc; | 817 | struct ifmcaddr6 *mc; |
823 | struct inet6_dev *idev; | 818 | struct inet6_dev *idev; |
824 | 819 | ||
820 | /* we need to take a reference on idev */ | ||
825 | idev = in6_dev_get(dev); | 821 | idev = in6_dev_get(dev); |
826 | 822 | ||
827 | if (idev == NULL) | 823 | if (idev == NULL) |
@@ -860,7 +856,7 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr) | |||
860 | setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc); | 856 | setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc); |
861 | 857 | ||
862 | ipv6_addr_copy(&mc->mca_addr, addr); | 858 | ipv6_addr_copy(&mc->mca_addr, addr); |
863 | mc->idev = idev; | 859 | mc->idev = idev; /* (reference taken) */ |
864 | mc->mca_users = 1; | 860 | mc->mca_users = 1; |
865 | /* mca_stamp should be updated upon changes */ | 861 | /* mca_stamp should be updated upon changes */ |
866 | mc->mca_cstamp = mc->mca_tstamp = jiffies; | 862 | mc->mca_cstamp = mc->mca_tstamp = jiffies; |
@@ -915,16 +911,18 @@ int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr) | |||
915 | 911 | ||
916 | int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr) | 912 | int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr) |
917 | { | 913 | { |
918 | struct inet6_dev *idev = in6_dev_get(dev); | 914 | struct inet6_dev *idev; |
919 | int err; | 915 | int err; |
920 | 916 | ||
921 | if (!idev) | 917 | rcu_read_lock(); |
922 | return -ENODEV; | ||
923 | |||
924 | err = __ipv6_dev_mc_dec(idev, addr); | ||
925 | 918 | ||
926 | in6_dev_put(idev); | 919 | idev = __in6_dev_get(dev); |
920 | if (!idev) | ||
921 | err = -ENODEV; | ||
922 | else | ||
923 | err = __ipv6_dev_mc_dec(idev, addr); | ||
927 | 924 | ||
925 | rcu_read_unlock(); | ||
928 | return err; | 926 | return err; |
929 | } | 927 | } |
930 | 928 | ||
@@ -965,7 +963,8 @@ int ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group, | |||
965 | struct ifmcaddr6 *mc; | 963 | struct ifmcaddr6 *mc; |
966 | int rv = 0; | 964 | int rv = 0; |
967 | 965 | ||
968 | idev = in6_dev_get(dev); | 966 | rcu_read_lock(); |
967 | idev = __in6_dev_get(dev); | ||
969 | if (idev) { | 968 | if (idev) { |
970 | read_lock_bh(&idev->lock); | 969 | read_lock_bh(&idev->lock); |
971 | for (mc = idev->mc_list; mc; mc=mc->next) { | 970 | for (mc = idev->mc_list; mc; mc=mc->next) { |
@@ -992,8 +991,8 @@ int ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group, | |||
992 | rv = 1; /* don't filter unspecified source */ | 991 | rv = 1; /* don't filter unspecified source */ |
993 | } | 992 | } |
994 | read_unlock_bh(&idev->lock); | 993 | read_unlock_bh(&idev->lock); |
995 | in6_dev_put(idev); | ||
996 | } | 994 | } |
995 | rcu_read_unlock(); | ||
997 | return rv; | 996 | return rv; |
998 | } | 997 | } |
999 | 998 | ||
@@ -1104,6 +1103,7 @@ static int mld_marksources(struct ifmcaddr6 *pmc, int nsrcs, | |||
1104 | return 1; | 1103 | return 1; |
1105 | } | 1104 | } |
1106 | 1105 | ||
1106 | /* called with rcu_read_lock() */ | ||
1107 | int igmp6_event_query(struct sk_buff *skb) | 1107 | int igmp6_event_query(struct sk_buff *skb) |
1108 | { | 1108 | { |
1109 | struct mld2_query *mlh2 = NULL; | 1109 | struct mld2_query *mlh2 = NULL; |
@@ -1127,7 +1127,7 @@ int igmp6_event_query(struct sk_buff *skb) | |||
1127 | if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) | 1127 | if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) |
1128 | return -EINVAL; | 1128 | return -EINVAL; |
1129 | 1129 | ||
1130 | idev = in6_dev_get(skb->dev); | 1130 | idev = __in6_dev_get(skb->dev); |
1131 | 1131 | ||
1132 | if (idev == NULL) | 1132 | if (idev == NULL) |
1133 | return 0; | 1133 | return 0; |
@@ -1137,10 +1137,8 @@ int igmp6_event_query(struct sk_buff *skb) | |||
1137 | group_type = ipv6_addr_type(group); | 1137 | group_type = ipv6_addr_type(group); |
1138 | 1138 | ||
1139 | if (group_type != IPV6_ADDR_ANY && | 1139 | if (group_type != IPV6_ADDR_ANY && |
1140 | !(group_type&IPV6_ADDR_MULTICAST)) { | 1140 | !(group_type&IPV6_ADDR_MULTICAST)) |
1141 | in6_dev_put(idev); | ||
1142 | return -EINVAL; | 1141 | return -EINVAL; |
1143 | } | ||
1144 | 1142 | ||
1145 | if (len == 24) { | 1143 | if (len == 24) { |
1146 | int switchback; | 1144 | int switchback; |
@@ -1161,10 +1159,9 @@ int igmp6_event_query(struct sk_buff *skb) | |||
1161 | } else if (len >= 28) { | 1159 | } else if (len >= 28) { |
1162 | int srcs_offset = sizeof(struct mld2_query) - | 1160 | int srcs_offset = sizeof(struct mld2_query) - |
1163 | sizeof(struct icmp6hdr); | 1161 | sizeof(struct icmp6hdr); |
1164 | if (!pskb_may_pull(skb, srcs_offset)) { | 1162 | if (!pskb_may_pull(skb, srcs_offset)) |
1165 | in6_dev_put(idev); | ||
1166 | return -EINVAL; | 1163 | return -EINVAL; |
1167 | } | 1164 | |
1168 | mlh2 = (struct mld2_query *)skb_transport_header(skb); | 1165 | mlh2 = (struct mld2_query *)skb_transport_header(skb); |
1169 | max_delay = (MLDV2_MRC(ntohs(mlh2->mld2q_mrc))*HZ)/1000; | 1166 | max_delay = (MLDV2_MRC(ntohs(mlh2->mld2q_mrc))*HZ)/1000; |
1170 | if (!max_delay) | 1167 | if (!max_delay) |
@@ -1173,28 +1170,23 @@ int igmp6_event_query(struct sk_buff *skb) | |||
1173 | if (mlh2->mld2q_qrv) | 1170 | if (mlh2->mld2q_qrv) |
1174 | idev->mc_qrv = mlh2->mld2q_qrv; | 1171 | idev->mc_qrv = mlh2->mld2q_qrv; |
1175 | if (group_type == IPV6_ADDR_ANY) { /* general query */ | 1172 | if (group_type == IPV6_ADDR_ANY) { /* general query */ |
1176 | if (mlh2->mld2q_nsrcs) { | 1173 | if (mlh2->mld2q_nsrcs) |
1177 | in6_dev_put(idev); | ||
1178 | return -EINVAL; /* no sources allowed */ | 1174 | return -EINVAL; /* no sources allowed */ |
1179 | } | 1175 | |
1180 | mld_gq_start_timer(idev); | 1176 | mld_gq_start_timer(idev); |
1181 | in6_dev_put(idev); | ||
1182 | return 0; | 1177 | return 0; |
1183 | } | 1178 | } |
1184 | /* mark sources to include, if group & source-specific */ | 1179 | /* mark sources to include, if group & source-specific */ |
1185 | if (mlh2->mld2q_nsrcs != 0) { | 1180 | if (mlh2->mld2q_nsrcs != 0) { |
1186 | if (!pskb_may_pull(skb, srcs_offset + | 1181 | if (!pskb_may_pull(skb, srcs_offset + |
1187 | ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr))) { | 1182 | ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr))) |
1188 | in6_dev_put(idev); | ||
1189 | return -EINVAL; | 1183 | return -EINVAL; |
1190 | } | 1184 | |
1191 | mlh2 = (struct mld2_query *)skb_transport_header(skb); | 1185 | mlh2 = (struct mld2_query *)skb_transport_header(skb); |
1192 | mark = 1; | 1186 | mark = 1; |
1193 | } | 1187 | } |
1194 | } else { | 1188 | } else |
1195 | in6_dev_put(idev); | ||
1196 | return -EINVAL; | 1189 | return -EINVAL; |
1197 | } | ||
1198 | 1190 | ||
1199 | read_lock_bh(&idev->lock); | 1191 | read_lock_bh(&idev->lock); |
1200 | if (group_type == IPV6_ADDR_ANY) { | 1192 | if (group_type == IPV6_ADDR_ANY) { |
@@ -1227,12 +1219,11 @@ int igmp6_event_query(struct sk_buff *skb) | |||
1227 | } | 1219 | } |
1228 | } | 1220 | } |
1229 | read_unlock_bh(&idev->lock); | 1221 | read_unlock_bh(&idev->lock); |
1230 | in6_dev_put(idev); | ||
1231 | 1222 | ||
1232 | return 0; | 1223 | return 0; |
1233 | } | 1224 | } |
1234 | 1225 | ||
1235 | 1226 | /* called with rcu_read_lock() */ | |
1236 | int igmp6_event_report(struct sk_buff *skb) | 1227 | int igmp6_event_report(struct sk_buff *skb) |
1237 | { | 1228 | { |
1238 | struct ifmcaddr6 *ma; | 1229 | struct ifmcaddr6 *ma; |
@@ -1260,7 +1251,7 @@ int igmp6_event_report(struct sk_buff *skb) | |||
1260 | !(addr_type&IPV6_ADDR_LINKLOCAL)) | 1251 | !(addr_type&IPV6_ADDR_LINKLOCAL)) |
1261 | return -EINVAL; | 1252 | return -EINVAL; |
1262 | 1253 | ||
1263 | idev = in6_dev_get(skb->dev); | 1254 | idev = __in6_dev_get(skb->dev); |
1264 | if (idev == NULL) | 1255 | if (idev == NULL) |
1265 | return -ENODEV; | 1256 | return -ENODEV; |
1266 | 1257 | ||
@@ -1280,7 +1271,6 @@ int igmp6_event_report(struct sk_buff *skb) | |||
1280 | } | 1271 | } |
1281 | } | 1272 | } |
1282 | read_unlock_bh(&idev->lock); | 1273 | read_unlock_bh(&idev->lock); |
1283 | in6_dev_put(idev); | ||
1284 | return 0; | 1274 | return 0; |
1285 | } | 1275 | } |
1286 | 1276 | ||
@@ -1356,7 +1346,10 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size) | |||
1356 | IPV6_TLV_PADN, 0 }; | 1346 | IPV6_TLV_PADN, 0 }; |
1357 | 1347 | ||
1358 | /* we assume size > sizeof(ra) here */ | 1348 | /* we assume size > sizeof(ra) here */ |
1359 | skb = sock_alloc_send_skb(sk, size + LL_ALLOCATED_SPACE(dev), 1, &err); | 1349 | size += LL_ALLOCATED_SPACE(dev); |
1350 | /* limit our allocations to order-0 page */ | ||
1351 | size = min_t(int, size, SKB_MAX_ORDER(0, 0)); | ||
1352 | skb = sock_alloc_send_skb(sk, size, 1, &err); | ||
1360 | 1353 | ||
1361 | if (!skb) | 1354 | if (!skb) |
1362 | return NULL; | 1355 | return NULL; |
@@ -1393,12 +1386,14 @@ static void mld_sendpack(struct sk_buff *skb) | |||
1393 | struct mld2_report *pmr = | 1386 | struct mld2_report *pmr = |
1394 | (struct mld2_report *)skb_transport_header(skb); | 1387 | (struct mld2_report *)skb_transport_header(skb); |
1395 | int payload_len, mldlen; | 1388 | int payload_len, mldlen; |
1396 | struct inet6_dev *idev = in6_dev_get(skb->dev); | 1389 | struct inet6_dev *idev; |
1397 | struct net *net = dev_net(skb->dev); | 1390 | struct net *net = dev_net(skb->dev); |
1398 | int err; | 1391 | int err; |
1399 | struct flowi fl; | 1392 | struct flowi fl; |
1400 | struct dst_entry *dst; | 1393 | struct dst_entry *dst; |
1401 | 1394 | ||
1395 | rcu_read_lock(); | ||
1396 | idev = __in6_dev_get(skb->dev); | ||
1402 | IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); | 1397 | IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); |
1403 | 1398 | ||
1404 | payload_len = (skb->tail - skb->network_header) - sizeof(*pip6); | 1399 | payload_len = (skb->tail - skb->network_header) - sizeof(*pip6); |
@@ -1438,8 +1433,7 @@ out: | |||
1438 | } else | 1433 | } else |
1439 | IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_OUTDISCARDS); | 1434 | IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_OUTDISCARDS); |
1440 | 1435 | ||
1441 | if (likely(idev != NULL)) | 1436 | rcu_read_unlock(); |
1442 | in6_dev_put(idev); | ||
1443 | return; | 1437 | return; |
1444 | 1438 | ||
1445 | err_out: | 1439 | err_out: |
@@ -1776,7 +1770,8 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) | |||
1776 | IPPROTO_ICMPV6, | 1770 | IPPROTO_ICMPV6, |
1777 | csum_partial(hdr, len, 0)); | 1771 | csum_partial(hdr, len, 0)); |
1778 | 1772 | ||
1779 | idev = in6_dev_get(skb->dev); | 1773 | rcu_read_lock(); |
1774 | idev = __in6_dev_get(skb->dev); | ||
1780 | 1775 | ||
1781 | dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr); | 1776 | dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr); |
1782 | if (!dst) { | 1777 | if (!dst) { |
@@ -1803,8 +1798,7 @@ out: | |||
1803 | } else | 1798 | } else |
1804 | IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); | 1799 | IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); |
1805 | 1800 | ||
1806 | if (likely(idev != NULL)) | 1801 | rcu_read_unlock(); |
1807 | in6_dev_put(idev); | ||
1808 | return; | 1802 | return; |
1809 | 1803 | ||
1810 | err_out: | 1804 | err_out: |
@@ -1995,8 +1989,7 @@ static int sf_setstate(struct ifmcaddr6 *pmc) | |||
1995 | &psf->sf_addr)) | 1989 | &psf->sf_addr)) |
1996 | break; | 1990 | break; |
1997 | if (!dpsf) { | 1991 | if (!dpsf) { |
1998 | dpsf = (struct ip6_sf_list *) | 1992 | dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC); |
1999 | kmalloc(sizeof(*dpsf), GFP_ATOMIC); | ||
2000 | if (!dpsf) | 1993 | if (!dpsf) |
2001 | continue; | 1994 | continue; |
2002 | *dpsf = *psf; | 1995 | *dpsf = *psf; |
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 0abdc242ddb7..1fc46fc60efd 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c | |||
@@ -1229,7 +1229,7 @@ static void ndisc_router_discovery(struct sk_buff *skb) | |||
1229 | ND_PRINTK0(KERN_ERR | 1229 | ND_PRINTK0(KERN_ERR |
1230 | "ICMPv6 RA: %s() got default router without neighbour.\n", | 1230 | "ICMPv6 RA: %s() got default router without neighbour.\n", |
1231 | __func__); | 1231 | __func__); |
1232 | dst_release(&rt->u.dst); | 1232 | dst_release(&rt->dst); |
1233 | in6_dev_put(in6_dev); | 1233 | in6_dev_put(in6_dev); |
1234 | return; | 1234 | return; |
1235 | } | 1235 | } |
@@ -1244,7 +1244,7 @@ static void ndisc_router_discovery(struct sk_buff *skb) | |||
1244 | if (ra_msg->icmph.icmp6_hop_limit) { | 1244 | if (ra_msg->icmph.icmp6_hop_limit) { |
1245 | in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit; | 1245 | in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit; |
1246 | if (rt) | 1246 | if (rt) |
1247 | rt->u.dst.metrics[RTAX_HOPLIMIT-1] = ra_msg->icmph.icmp6_hop_limit; | 1247 | rt->dst.metrics[RTAX_HOPLIMIT-1] = ra_msg->icmph.icmp6_hop_limit; |
1248 | } | 1248 | } |
1249 | 1249 | ||
1250 | skip_defrtr: | 1250 | skip_defrtr: |
@@ -1363,7 +1363,7 @@ skip_linkparms: | |||
1363 | in6_dev->cnf.mtu6 = mtu; | 1363 | in6_dev->cnf.mtu6 = mtu; |
1364 | 1364 | ||
1365 | if (rt) | 1365 | if (rt) |
1366 | rt->u.dst.metrics[RTAX_MTU-1] = mtu; | 1366 | rt->dst.metrics[RTAX_MTU-1] = mtu; |
1367 | 1367 | ||
1368 | rt6_mtu_change(skb->dev, mtu); | 1368 | rt6_mtu_change(skb->dev, mtu); |
1369 | } | 1369 | } |
@@ -1384,7 +1384,7 @@ skip_linkparms: | |||
1384 | } | 1384 | } |
1385 | out: | 1385 | out: |
1386 | if (rt) | 1386 | if (rt) |
1387 | dst_release(&rt->u.dst); | 1387 | dst_release(&rt->dst); |
1388 | else if (neigh) | 1388 | else if (neigh) |
1389 | neigh_release(neigh); | 1389 | neigh_release(neigh); |
1390 | in6_dev_put(in6_dev); | 1390 | in6_dev_put(in6_dev); |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 82945ef6c9fc..dc41d6d3c6c6 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -363,7 +363,7 @@ ip6t_do_table(struct sk_buff *skb, | |||
363 | cpu = smp_processor_id(); | 363 | cpu = smp_processor_id(); |
364 | table_base = private->entries[cpu]; | 364 | table_base = private->entries[cpu]; |
365 | jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; | 365 | jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; |
366 | stackptr = &private->stackptr[cpu]; | 366 | stackptr = per_cpu_ptr(private->stackptr, cpu); |
367 | origptr = *stackptr; | 367 | origptr = *stackptr; |
368 | 368 | ||
369 | e = get_entry(table_base, private->hook_entry[hook]); | 369 | e = get_entry(table_base, private->hook_entry[hook]); |
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 4a4dcbe4f8b2..e677937a07fc 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -602,31 +602,33 @@ out: | |||
602 | } | 602 | } |
603 | 603 | ||
604 | static int rawv6_send_hdrinc(struct sock *sk, void *from, int length, | 604 | static int rawv6_send_hdrinc(struct sock *sk, void *from, int length, |
605 | struct flowi *fl, struct rt6_info *rt, | 605 | struct flowi *fl, struct dst_entry **dstp, |
606 | unsigned int flags) | 606 | unsigned int flags) |
607 | { | 607 | { |
608 | struct ipv6_pinfo *np = inet6_sk(sk); | 608 | struct ipv6_pinfo *np = inet6_sk(sk); |
609 | struct ipv6hdr *iph; | 609 | struct ipv6hdr *iph; |
610 | struct sk_buff *skb; | 610 | struct sk_buff *skb; |
611 | int err; | 611 | int err; |
612 | struct rt6_info *rt = (struct rt6_info *)*dstp; | ||
612 | 613 | ||
613 | if (length > rt->u.dst.dev->mtu) { | 614 | if (length > rt->dst.dev->mtu) { |
614 | ipv6_local_error(sk, EMSGSIZE, fl, rt->u.dst.dev->mtu); | 615 | ipv6_local_error(sk, EMSGSIZE, fl, rt->dst.dev->mtu); |
615 | return -EMSGSIZE; | 616 | return -EMSGSIZE; |
616 | } | 617 | } |
617 | if (flags&MSG_PROBE) | 618 | if (flags&MSG_PROBE) |
618 | goto out; | 619 | goto out; |
619 | 620 | ||
620 | skb = sock_alloc_send_skb(sk, | 621 | skb = sock_alloc_send_skb(sk, |
621 | length + LL_ALLOCATED_SPACE(rt->u.dst.dev) + 15, | 622 | length + LL_ALLOCATED_SPACE(rt->dst.dev) + 15, |
622 | flags & MSG_DONTWAIT, &err); | 623 | flags & MSG_DONTWAIT, &err); |
623 | if (skb == NULL) | 624 | if (skb == NULL) |
624 | goto error; | 625 | goto error; |
625 | skb_reserve(skb, LL_RESERVED_SPACE(rt->u.dst.dev)); | 626 | skb_reserve(skb, LL_RESERVED_SPACE(rt->dst.dev)); |
626 | 627 | ||
627 | skb->priority = sk->sk_priority; | 628 | skb->priority = sk->sk_priority; |
628 | skb->mark = sk->sk_mark; | 629 | skb->mark = sk->sk_mark; |
629 | skb_dst_set(skb, dst_clone(&rt->u.dst)); | 630 | skb_dst_set(skb, &rt->dst); |
631 | *dstp = NULL; | ||
630 | 632 | ||
631 | skb_put(skb, length); | 633 | skb_put(skb, length); |
632 | skb_reset_network_header(skb); | 634 | skb_reset_network_header(skb); |
@@ -641,7 +643,7 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length, | |||
641 | 643 | ||
642 | IP6_UPD_PO_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len); | 644 | IP6_UPD_PO_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len); |
643 | err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, | 645 | err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, |
644 | rt->u.dst.dev, dst_output); | 646 | rt->dst.dev, dst_output); |
645 | if (err > 0) | 647 | if (err > 0) |
646 | err = net_xmit_errno(err); | 648 | err = net_xmit_errno(err); |
647 | if (err) | 649 | if (err) |
@@ -725,7 +727,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
725 | { | 727 | { |
726 | struct ipv6_txoptions opt_space; | 728 | struct ipv6_txoptions opt_space; |
727 | struct sockaddr_in6 * sin6 = (struct sockaddr_in6 *) msg->msg_name; | 729 | struct sockaddr_in6 * sin6 = (struct sockaddr_in6 *) msg->msg_name; |
728 | struct in6_addr *daddr, *final_p = NULL, final; | 730 | struct in6_addr *daddr, *final_p, final; |
729 | struct inet_sock *inet = inet_sk(sk); | 731 | struct inet_sock *inet = inet_sk(sk); |
730 | struct ipv6_pinfo *np = inet6_sk(sk); | 732 | struct ipv6_pinfo *np = inet6_sk(sk); |
731 | struct raw6_sock *rp = raw6_sk(sk); | 733 | struct raw6_sock *rp = raw6_sk(sk); |
@@ -847,13 +849,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
847 | if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr)) | 849 | if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr)) |
848 | ipv6_addr_copy(&fl.fl6_src, &np->saddr); | 850 | ipv6_addr_copy(&fl.fl6_src, &np->saddr); |
849 | 851 | ||
850 | /* merge ip6_build_xmit from ip6_output */ | 852 | final_p = fl6_update_dst(&fl, opt, &final); |
851 | if (opt && opt->srcrt) { | ||
852 | struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt; | ||
853 | ipv6_addr_copy(&final, &fl.fl6_dst); | ||
854 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); | ||
855 | final_p = &final; | ||
856 | } | ||
857 | 853 | ||
858 | if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) | 854 | if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) |
859 | fl.oif = np->mcast_oif; | 855 | fl.oif = np->mcast_oif; |
@@ -892,9 +888,9 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
892 | goto do_confirm; | 888 | goto do_confirm; |
893 | 889 | ||
894 | back_from_confirm: | 890 | back_from_confirm: |
895 | if (inet->hdrincl) { | 891 | if (inet->hdrincl) |
896 | err = rawv6_send_hdrinc(sk, msg->msg_iov, len, &fl, (struct rt6_info*)dst, msg->msg_flags); | 892 | err = rawv6_send_hdrinc(sk, msg->msg_iov, len, &fl, &dst, msg->msg_flags); |
897 | } else { | 893 | else { |
898 | lock_sock(sk); | 894 | lock_sock(sk); |
899 | err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov, | 895 | err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov, |
900 | len, 0, hlimit, tclass, opt, &fl, (struct rt6_info*)dst, | 896 | len, 0, hlimit, tclass, opt, &fl, (struct rt6_info*)dst, |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 252d76199c41..8f2d0400cf8a 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -126,16 +126,14 @@ static struct dst_ops ip6_dst_blackhole_ops = { | |||
126 | }; | 126 | }; |
127 | 127 | ||
128 | static struct rt6_info ip6_null_entry_template = { | 128 | static struct rt6_info ip6_null_entry_template = { |
129 | .u = { | 129 | .dst = { |
130 | .dst = { | 130 | .__refcnt = ATOMIC_INIT(1), |
131 | .__refcnt = ATOMIC_INIT(1), | 131 | .__use = 1, |
132 | .__use = 1, | 132 | .obsolete = -1, |
133 | .obsolete = -1, | 133 | .error = -ENETUNREACH, |
134 | .error = -ENETUNREACH, | 134 | .metrics = { [RTAX_HOPLIMIT - 1] = 255, }, |
135 | .metrics = { [RTAX_HOPLIMIT - 1] = 255, }, | 135 | .input = ip6_pkt_discard, |
136 | .input = ip6_pkt_discard, | 136 | .output = ip6_pkt_discard_out, |
137 | .output = ip6_pkt_discard_out, | ||
138 | } | ||
139 | }, | 137 | }, |
140 | .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), | 138 | .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), |
141 | .rt6i_protocol = RTPROT_KERNEL, | 139 | .rt6i_protocol = RTPROT_KERNEL, |
@@ -149,16 +147,14 @@ static int ip6_pkt_prohibit(struct sk_buff *skb); | |||
149 | static int ip6_pkt_prohibit_out(struct sk_buff *skb); | 147 | static int ip6_pkt_prohibit_out(struct sk_buff *skb); |
150 | 148 | ||
151 | static struct rt6_info ip6_prohibit_entry_template = { | 149 | static struct rt6_info ip6_prohibit_entry_template = { |
152 | .u = { | 150 | .dst = { |
153 | .dst = { | 151 | .__refcnt = ATOMIC_INIT(1), |
154 | .__refcnt = ATOMIC_INIT(1), | 152 | .__use = 1, |
155 | .__use = 1, | 153 | .obsolete = -1, |
156 | .obsolete = -1, | 154 | .error = -EACCES, |
157 | .error = -EACCES, | 155 | .metrics = { [RTAX_HOPLIMIT - 1] = 255, }, |
158 | .metrics = { [RTAX_HOPLIMIT - 1] = 255, }, | 156 | .input = ip6_pkt_prohibit, |
159 | .input = ip6_pkt_prohibit, | 157 | .output = ip6_pkt_prohibit_out, |
160 | .output = ip6_pkt_prohibit_out, | ||
161 | } | ||
162 | }, | 158 | }, |
163 | .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), | 159 | .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), |
164 | .rt6i_protocol = RTPROT_KERNEL, | 160 | .rt6i_protocol = RTPROT_KERNEL, |
@@ -167,16 +163,14 @@ static struct rt6_info ip6_prohibit_entry_template = { | |||
167 | }; | 163 | }; |
168 | 164 | ||
169 | static struct rt6_info ip6_blk_hole_entry_template = { | 165 | static struct rt6_info ip6_blk_hole_entry_template = { |
170 | .u = { | 166 | .dst = { |
171 | .dst = { | 167 | .__refcnt = ATOMIC_INIT(1), |
172 | .__refcnt = ATOMIC_INIT(1), | 168 | .__use = 1, |
173 | .__use = 1, | 169 | .obsolete = -1, |
174 | .obsolete = -1, | 170 | .error = -EINVAL, |
175 | .error = -EINVAL, | 171 | .metrics = { [RTAX_HOPLIMIT - 1] = 255, }, |
176 | .metrics = { [RTAX_HOPLIMIT - 1] = 255, }, | 172 | .input = dst_discard, |
177 | .input = dst_discard, | 173 | .output = dst_discard, |
178 | .output = dst_discard, | ||
179 | } | ||
180 | }, | 174 | }, |
181 | .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), | 175 | .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), |
182 | .rt6i_protocol = RTPROT_KERNEL, | 176 | .rt6i_protocol = RTPROT_KERNEL, |
@@ -249,7 +243,7 @@ static inline struct rt6_info *rt6_device_match(struct net *net, | |||
249 | if (!oif && ipv6_addr_any(saddr)) | 243 | if (!oif && ipv6_addr_any(saddr)) |
250 | goto out; | 244 | goto out; |
251 | 245 | ||
252 | for (sprt = rt; sprt; sprt = sprt->u.dst.rt6_next) { | 246 | for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) { |
253 | struct net_device *dev = sprt->rt6i_dev; | 247 | struct net_device *dev = sprt->rt6i_dev; |
254 | 248 | ||
255 | if (oif) { | 249 | if (oif) { |
@@ -407,10 +401,10 @@ static struct rt6_info *find_rr_leaf(struct fib6_node *fn, | |||
407 | 401 | ||
408 | match = NULL; | 402 | match = NULL; |
409 | for (rt = rr_head; rt && rt->rt6i_metric == metric; | 403 | for (rt = rr_head; rt && rt->rt6i_metric == metric; |
410 | rt = rt->u.dst.rt6_next) | 404 | rt = rt->dst.rt6_next) |
411 | match = find_match(rt, oif, strict, &mpri, match); | 405 | match = find_match(rt, oif, strict, &mpri, match); |
412 | for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric; | 406 | for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric; |
413 | rt = rt->u.dst.rt6_next) | 407 | rt = rt->dst.rt6_next) |
414 | match = find_match(rt, oif, strict, &mpri, match); | 408 | match = find_match(rt, oif, strict, &mpri, match); |
415 | 409 | ||
416 | return match; | 410 | return match; |
@@ -432,7 +426,7 @@ static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict) | |||
432 | 426 | ||
433 | if (!match && | 427 | if (!match && |
434 | (strict & RT6_LOOKUP_F_REACHABLE)) { | 428 | (strict & RT6_LOOKUP_F_REACHABLE)) { |
435 | struct rt6_info *next = rt0->u.dst.rt6_next; | 429 | struct rt6_info *next = rt0->dst.rt6_next; |
436 | 430 | ||
437 | /* no entries matched; do round-robin */ | 431 | /* no entries matched; do round-robin */ |
438 | if (!next || next->rt6i_metric != rt0->rt6i_metric) | 432 | if (!next || next->rt6i_metric != rt0->rt6i_metric) |
@@ -517,7 +511,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, | |||
517 | rt->rt6i_expires = jiffies + HZ * lifetime; | 511 | rt->rt6i_expires = jiffies + HZ * lifetime; |
518 | rt->rt6i_flags |= RTF_EXPIRES; | 512 | rt->rt6i_flags |= RTF_EXPIRES; |
519 | } | 513 | } |
520 | dst_release(&rt->u.dst); | 514 | dst_release(&rt->dst); |
521 | } | 515 | } |
522 | return 0; | 516 | return 0; |
523 | } | 517 | } |
@@ -555,7 +549,7 @@ restart: | |||
555 | rt = rt6_device_match(net, rt, &fl->fl6_src, fl->oif, flags); | 549 | rt = rt6_device_match(net, rt, &fl->fl6_src, fl->oif, flags); |
556 | BACKTRACK(net, &fl->fl6_src); | 550 | BACKTRACK(net, &fl->fl6_src); |
557 | out: | 551 | out: |
558 | dst_use(&rt->u.dst, jiffies); | 552 | dst_use(&rt->dst, jiffies); |
559 | read_unlock_bh(&table->tb6_lock); | 553 | read_unlock_bh(&table->tb6_lock); |
560 | return rt; | 554 | return rt; |
561 | 555 | ||
@@ -643,7 +637,7 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *dad | |||
643 | ipv6_addr_copy(&rt->rt6i_dst.addr, daddr); | 637 | ipv6_addr_copy(&rt->rt6i_dst.addr, daddr); |
644 | rt->rt6i_dst.plen = 128; | 638 | rt->rt6i_dst.plen = 128; |
645 | rt->rt6i_flags |= RTF_CACHE; | 639 | rt->rt6i_flags |= RTF_CACHE; |
646 | rt->u.dst.flags |= DST_HOST; | 640 | rt->dst.flags |= DST_HOST; |
647 | 641 | ||
648 | #ifdef CONFIG_IPV6_SUBTREES | 642 | #ifdef CONFIG_IPV6_SUBTREES |
649 | if (rt->rt6i_src.plen && saddr) { | 643 | if (rt->rt6i_src.plen && saddr) { |
@@ -677,7 +671,7 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *dad | |||
677 | if (net_ratelimit()) | 671 | if (net_ratelimit()) |
678 | printk(KERN_WARNING | 672 | printk(KERN_WARNING |
679 | "Neighbour table overflow.\n"); | 673 | "Neighbour table overflow.\n"); |
680 | dst_free(&rt->u.dst); | 674 | dst_free(&rt->dst); |
681 | return NULL; | 675 | return NULL; |
682 | } | 676 | } |
683 | rt->rt6i_nexthop = neigh; | 677 | rt->rt6i_nexthop = neigh; |
@@ -694,7 +688,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, struct in6_addr *d | |||
694 | ipv6_addr_copy(&rt->rt6i_dst.addr, daddr); | 688 | ipv6_addr_copy(&rt->rt6i_dst.addr, daddr); |
695 | rt->rt6i_dst.plen = 128; | 689 | rt->rt6i_dst.plen = 128; |
696 | rt->rt6i_flags |= RTF_CACHE; | 690 | rt->rt6i_flags |= RTF_CACHE; |
697 | rt->u.dst.flags |= DST_HOST; | 691 | rt->dst.flags |= DST_HOST; |
698 | rt->rt6i_nexthop = neigh_clone(ort->rt6i_nexthop); | 692 | rt->rt6i_nexthop = neigh_clone(ort->rt6i_nexthop); |
699 | } | 693 | } |
700 | return rt; | 694 | return rt; |
@@ -726,7 +720,7 @@ restart: | |||
726 | rt->rt6i_flags & RTF_CACHE) | 720 | rt->rt6i_flags & RTF_CACHE) |
727 | goto out; | 721 | goto out; |
728 | 722 | ||
729 | dst_hold(&rt->u.dst); | 723 | dst_hold(&rt->dst); |
730 | read_unlock_bh(&table->tb6_lock); | 724 | read_unlock_bh(&table->tb6_lock); |
731 | 725 | ||
732 | if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP)) | 726 | if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP)) |
@@ -739,10 +733,10 @@ restart: | |||
739 | #endif | 733 | #endif |
740 | } | 734 | } |
741 | 735 | ||
742 | dst_release(&rt->u.dst); | 736 | dst_release(&rt->dst); |
743 | rt = nrt ? : net->ipv6.ip6_null_entry; | 737 | rt = nrt ? : net->ipv6.ip6_null_entry; |
744 | 738 | ||
745 | dst_hold(&rt->u.dst); | 739 | dst_hold(&rt->dst); |
746 | if (nrt) { | 740 | if (nrt) { |
747 | err = ip6_ins_rt(nrt); | 741 | err = ip6_ins_rt(nrt); |
748 | if (!err) | 742 | if (!err) |
@@ -756,7 +750,7 @@ restart: | |||
756 | * Race condition! In the gap, when table->tb6_lock was | 750 | * Race condition! In the gap, when table->tb6_lock was |
757 | * released someone could insert this route. Relookup. | 751 | * released someone could insert this route. Relookup. |
758 | */ | 752 | */ |
759 | dst_release(&rt->u.dst); | 753 | dst_release(&rt->dst); |
760 | goto relookup; | 754 | goto relookup; |
761 | 755 | ||
762 | out: | 756 | out: |
@@ -764,11 +758,11 @@ out: | |||
764 | reachable = 0; | 758 | reachable = 0; |
765 | goto restart_2; | 759 | goto restart_2; |
766 | } | 760 | } |
767 | dst_hold(&rt->u.dst); | 761 | dst_hold(&rt->dst); |
768 | read_unlock_bh(&table->tb6_lock); | 762 | read_unlock_bh(&table->tb6_lock); |
769 | out2: | 763 | out2: |
770 | rt->u.dst.lastuse = jiffies; | 764 | rt->dst.lastuse = jiffies; |
771 | rt->u.dst.__use++; | 765 | rt->dst.__use++; |
772 | 766 | ||
773 | return rt; | 767 | return rt; |
774 | } | 768 | } |
@@ -835,15 +829,15 @@ int ip6_dst_blackhole(struct sock *sk, struct dst_entry **dstp, struct flowi *fl | |||
835 | struct dst_entry *new = NULL; | 829 | struct dst_entry *new = NULL; |
836 | 830 | ||
837 | if (rt) { | 831 | if (rt) { |
838 | new = &rt->u.dst; | 832 | new = &rt->dst; |
839 | 833 | ||
840 | atomic_set(&new->__refcnt, 1); | 834 | atomic_set(&new->__refcnt, 1); |
841 | new->__use = 1; | 835 | new->__use = 1; |
842 | new->input = dst_discard; | 836 | new->input = dst_discard; |
843 | new->output = dst_discard; | 837 | new->output = dst_discard; |
844 | 838 | ||
845 | memcpy(new->metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32)); | 839 | memcpy(new->metrics, ort->dst.metrics, RTAX_MAX*sizeof(u32)); |
846 | new->dev = ort->u.dst.dev; | 840 | new->dev = ort->dst.dev; |
847 | if (new->dev) | 841 | if (new->dev) |
848 | dev_hold(new->dev); | 842 | dev_hold(new->dev); |
849 | rt->rt6i_idev = ort->rt6i_idev; | 843 | rt->rt6i_idev = ort->rt6i_idev; |
@@ -912,7 +906,7 @@ static void ip6_link_failure(struct sk_buff *skb) | |||
912 | rt = (struct rt6_info *) skb_dst(skb); | 906 | rt = (struct rt6_info *) skb_dst(skb); |
913 | if (rt) { | 907 | if (rt) { |
914 | if (rt->rt6i_flags&RTF_CACHE) { | 908 | if (rt->rt6i_flags&RTF_CACHE) { |
915 | dst_set_expires(&rt->u.dst, 0); | 909 | dst_set_expires(&rt->dst, 0); |
916 | rt->rt6i_flags |= RTF_EXPIRES; | 910 | rt->rt6i_flags |= RTF_EXPIRES; |
917 | } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) | 911 | } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) |
918 | rt->rt6i_node->fn_sernum = -1; | 912 | rt->rt6i_node->fn_sernum = -1; |
@@ -986,14 +980,14 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev, | |||
986 | rt->rt6i_dev = dev; | 980 | rt->rt6i_dev = dev; |
987 | rt->rt6i_idev = idev; | 981 | rt->rt6i_idev = idev; |
988 | rt->rt6i_nexthop = neigh; | 982 | rt->rt6i_nexthop = neigh; |
989 | atomic_set(&rt->u.dst.__refcnt, 1); | 983 | atomic_set(&rt->dst.__refcnt, 1); |
990 | rt->u.dst.metrics[RTAX_HOPLIMIT-1] = 255; | 984 | rt->dst.metrics[RTAX_HOPLIMIT-1] = 255; |
991 | rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev); | 985 | rt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev); |
992 | rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->u.dst)); | 986 | rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->dst)); |
993 | rt->u.dst.output = ip6_output; | 987 | rt->dst.output = ip6_output; |
994 | 988 | ||
995 | #if 0 /* there's no chance to use these for ndisc */ | 989 | #if 0 /* there's no chance to use these for ndisc */ |
996 | rt->u.dst.flags = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST | 990 | rt->dst.flags = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST |
997 | ? DST_HOST | 991 | ? DST_HOST |
998 | : 0; | 992 | : 0; |
999 | ipv6_addr_copy(&rt->rt6i_dst.addr, addr); | 993 | ipv6_addr_copy(&rt->rt6i_dst.addr, addr); |
@@ -1001,14 +995,14 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev, | |||
1001 | #endif | 995 | #endif |
1002 | 996 | ||
1003 | spin_lock_bh(&icmp6_dst_lock); | 997 | spin_lock_bh(&icmp6_dst_lock); |
1004 | rt->u.dst.next = icmp6_dst_gc_list; | 998 | rt->dst.next = icmp6_dst_gc_list; |
1005 | icmp6_dst_gc_list = &rt->u.dst; | 999 | icmp6_dst_gc_list = &rt->dst; |
1006 | spin_unlock_bh(&icmp6_dst_lock); | 1000 | spin_unlock_bh(&icmp6_dst_lock); |
1007 | 1001 | ||
1008 | fib6_force_start_gc(net); | 1002 | fib6_force_start_gc(net); |
1009 | 1003 | ||
1010 | out: | 1004 | out: |
1011 | return &rt->u.dst; | 1005 | return &rt->dst; |
1012 | } | 1006 | } |
1013 | 1007 | ||
1014 | int icmp6_dst_gc(void) | 1008 | int icmp6_dst_gc(void) |
@@ -1090,11 +1084,11 @@ static int ipv6_get_mtu(struct net_device *dev) | |||
1090 | int mtu = IPV6_MIN_MTU; | 1084 | int mtu = IPV6_MIN_MTU; |
1091 | struct inet6_dev *idev; | 1085 | struct inet6_dev *idev; |
1092 | 1086 | ||
1093 | idev = in6_dev_get(dev); | 1087 | rcu_read_lock(); |
1094 | if (idev) { | 1088 | idev = __in6_dev_get(dev); |
1089 | if (idev) | ||
1095 | mtu = idev->cnf.mtu6; | 1090 | mtu = idev->cnf.mtu6; |
1096 | in6_dev_put(idev); | 1091 | rcu_read_unlock(); |
1097 | } | ||
1098 | return mtu; | 1092 | return mtu; |
1099 | } | 1093 | } |
1100 | 1094 | ||
@@ -1103,12 +1097,15 @@ int ip6_dst_hoplimit(struct dst_entry *dst) | |||
1103 | int hoplimit = dst_metric(dst, RTAX_HOPLIMIT); | 1097 | int hoplimit = dst_metric(dst, RTAX_HOPLIMIT); |
1104 | if (hoplimit < 0) { | 1098 | if (hoplimit < 0) { |
1105 | struct net_device *dev = dst->dev; | 1099 | struct net_device *dev = dst->dev; |
1106 | struct inet6_dev *idev = in6_dev_get(dev); | 1100 | struct inet6_dev *idev; |
1107 | if (idev) { | 1101 | |
1102 | rcu_read_lock(); | ||
1103 | idev = __in6_dev_get(dev); | ||
1104 | if (idev) | ||
1108 | hoplimit = idev->cnf.hop_limit; | 1105 | hoplimit = idev->cnf.hop_limit; |
1109 | in6_dev_put(idev); | 1106 | else |
1110 | } else | ||
1111 | hoplimit = dev_net(dev)->ipv6.devconf_all->hop_limit; | 1107 | hoplimit = dev_net(dev)->ipv6.devconf_all->hop_limit; |
1108 | rcu_read_unlock(); | ||
1112 | } | 1109 | } |
1113 | return hoplimit; | 1110 | return hoplimit; |
1114 | } | 1111 | } |
@@ -1159,7 +1156,7 @@ int ip6_route_add(struct fib6_config *cfg) | |||
1159 | goto out; | 1156 | goto out; |
1160 | } | 1157 | } |
1161 | 1158 | ||
1162 | rt->u.dst.obsolete = -1; | 1159 | rt->dst.obsolete = -1; |
1163 | rt->rt6i_expires = (cfg->fc_flags & RTF_EXPIRES) ? | 1160 | rt->rt6i_expires = (cfg->fc_flags & RTF_EXPIRES) ? |
1164 | jiffies + clock_t_to_jiffies(cfg->fc_expires) : | 1161 | jiffies + clock_t_to_jiffies(cfg->fc_expires) : |
1165 | 0; | 1162 | 0; |
@@ -1171,16 +1168,16 @@ int ip6_route_add(struct fib6_config *cfg) | |||
1171 | addr_type = ipv6_addr_type(&cfg->fc_dst); | 1168 | addr_type = ipv6_addr_type(&cfg->fc_dst); |
1172 | 1169 | ||
1173 | if (addr_type & IPV6_ADDR_MULTICAST) | 1170 | if (addr_type & IPV6_ADDR_MULTICAST) |
1174 | rt->u.dst.input = ip6_mc_input; | 1171 | rt->dst.input = ip6_mc_input; |
1175 | else | 1172 | else |
1176 | rt->u.dst.input = ip6_forward; | 1173 | rt->dst.input = ip6_forward; |
1177 | 1174 | ||
1178 | rt->u.dst.output = ip6_output; | 1175 | rt->dst.output = ip6_output; |
1179 | 1176 | ||
1180 | ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len); | 1177 | ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len); |
1181 | rt->rt6i_dst.plen = cfg->fc_dst_len; | 1178 | rt->rt6i_dst.plen = cfg->fc_dst_len; |
1182 | if (rt->rt6i_dst.plen == 128) | 1179 | if (rt->rt6i_dst.plen == 128) |
1183 | rt->u.dst.flags = DST_HOST; | 1180 | rt->dst.flags = DST_HOST; |
1184 | 1181 | ||
1185 | #ifdef CONFIG_IPV6_SUBTREES | 1182 | #ifdef CONFIG_IPV6_SUBTREES |
1186 | ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len); | 1183 | ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len); |
@@ -1208,9 +1205,9 @@ int ip6_route_add(struct fib6_config *cfg) | |||
1208 | goto out; | 1205 | goto out; |
1209 | } | 1206 | } |
1210 | } | 1207 | } |
1211 | rt->u.dst.output = ip6_pkt_discard_out; | 1208 | rt->dst.output = ip6_pkt_discard_out; |
1212 | rt->u.dst.input = ip6_pkt_discard; | 1209 | rt->dst.input = ip6_pkt_discard; |
1213 | rt->u.dst.error = -ENETUNREACH; | 1210 | rt->dst.error = -ENETUNREACH; |
1214 | rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP; | 1211 | rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP; |
1215 | goto install_route; | 1212 | goto install_route; |
1216 | } | 1213 | } |
@@ -1244,7 +1241,7 @@ int ip6_route_add(struct fib6_config *cfg) | |||
1244 | goto out; | 1241 | goto out; |
1245 | if (dev) { | 1242 | if (dev) { |
1246 | if (dev != grt->rt6i_dev) { | 1243 | if (dev != grt->rt6i_dev) { |
1247 | dst_release(&grt->u.dst); | 1244 | dst_release(&grt->dst); |
1248 | goto out; | 1245 | goto out; |
1249 | } | 1246 | } |
1250 | } else { | 1247 | } else { |
@@ -1255,7 +1252,7 @@ int ip6_route_add(struct fib6_config *cfg) | |||
1255 | } | 1252 | } |
1256 | if (!(grt->rt6i_flags&RTF_GATEWAY)) | 1253 | if (!(grt->rt6i_flags&RTF_GATEWAY)) |
1257 | err = 0; | 1254 | err = 0; |
1258 | dst_release(&grt->u.dst); | 1255 | dst_release(&grt->dst); |
1259 | 1256 | ||
1260 | if (err) | 1257 | if (err) |
1261 | goto out; | 1258 | goto out; |
@@ -1294,18 +1291,18 @@ install_route: | |||
1294 | goto out; | 1291 | goto out; |
1295 | } | 1292 | } |
1296 | 1293 | ||
1297 | rt->u.dst.metrics[type - 1] = nla_get_u32(nla); | 1294 | rt->dst.metrics[type - 1] = nla_get_u32(nla); |
1298 | } | 1295 | } |
1299 | } | 1296 | } |
1300 | } | 1297 | } |
1301 | 1298 | ||
1302 | if (dst_metric(&rt->u.dst, RTAX_HOPLIMIT) == 0) | 1299 | if (dst_metric(&rt->dst, RTAX_HOPLIMIT) == 0) |
1303 | rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1; | 1300 | rt->dst.metrics[RTAX_HOPLIMIT-1] = -1; |
1304 | if (!dst_mtu(&rt->u.dst)) | 1301 | if (!dst_mtu(&rt->dst)) |
1305 | rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(dev); | 1302 | rt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(dev); |
1306 | if (!dst_metric(&rt->u.dst, RTAX_ADVMSS)) | 1303 | if (!dst_metric(&rt->dst, RTAX_ADVMSS)) |
1307 | rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->u.dst)); | 1304 | rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->dst)); |
1308 | rt->u.dst.dev = dev; | 1305 | rt->dst.dev = dev; |
1309 | rt->rt6i_idev = idev; | 1306 | rt->rt6i_idev = idev; |
1310 | rt->rt6i_table = table; | 1307 | rt->rt6i_table = table; |
1311 | 1308 | ||
@@ -1319,7 +1316,7 @@ out: | |||
1319 | if (idev) | 1316 | if (idev) |
1320 | in6_dev_put(idev); | 1317 | in6_dev_put(idev); |
1321 | if (rt) | 1318 | if (rt) |
1322 | dst_free(&rt->u.dst); | 1319 | dst_free(&rt->dst); |
1323 | return err; | 1320 | return err; |
1324 | } | 1321 | } |
1325 | 1322 | ||
@@ -1336,7 +1333,7 @@ static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info) | |||
1336 | write_lock_bh(&table->tb6_lock); | 1333 | write_lock_bh(&table->tb6_lock); |
1337 | 1334 | ||
1338 | err = fib6_del(rt, info); | 1335 | err = fib6_del(rt, info); |
1339 | dst_release(&rt->u.dst); | 1336 | dst_release(&rt->dst); |
1340 | 1337 | ||
1341 | write_unlock_bh(&table->tb6_lock); | 1338 | write_unlock_bh(&table->tb6_lock); |
1342 | 1339 | ||
@@ -1369,7 +1366,7 @@ static int ip6_route_del(struct fib6_config *cfg) | |||
1369 | &cfg->fc_src, cfg->fc_src_len); | 1366 | &cfg->fc_src, cfg->fc_src_len); |
1370 | 1367 | ||
1371 | if (fn) { | 1368 | if (fn) { |
1372 | for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) { | 1369 | for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) { |
1373 | if (cfg->fc_ifindex && | 1370 | if (cfg->fc_ifindex && |
1374 | (rt->rt6i_dev == NULL || | 1371 | (rt->rt6i_dev == NULL || |
1375 | rt->rt6i_dev->ifindex != cfg->fc_ifindex)) | 1372 | rt->rt6i_dev->ifindex != cfg->fc_ifindex)) |
@@ -1379,7 +1376,7 @@ static int ip6_route_del(struct fib6_config *cfg) | |||
1379 | continue; | 1376 | continue; |
1380 | if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric) | 1377 | if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric) |
1381 | continue; | 1378 | continue; |
1382 | dst_hold(&rt->u.dst); | 1379 | dst_hold(&rt->dst); |
1383 | read_unlock_bh(&table->tb6_lock); | 1380 | read_unlock_bh(&table->tb6_lock); |
1384 | 1381 | ||
1385 | return __ip6_del_rt(rt, &cfg->fc_nlinfo); | 1382 | return __ip6_del_rt(rt, &cfg->fc_nlinfo); |
@@ -1421,7 +1418,7 @@ static struct rt6_info *__ip6_route_redirect(struct net *net, | |||
1421 | read_lock_bh(&table->tb6_lock); | 1418 | read_lock_bh(&table->tb6_lock); |
1422 | fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src); | 1419 | fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src); |
1423 | restart: | 1420 | restart: |
1424 | for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) { | 1421 | for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) { |
1425 | /* | 1422 | /* |
1426 | * Current route is on-link; redirect is always invalid. | 1423 | * Current route is on-link; redirect is always invalid. |
1427 | * | 1424 | * |
@@ -1445,7 +1442,7 @@ restart: | |||
1445 | rt = net->ipv6.ip6_null_entry; | 1442 | rt = net->ipv6.ip6_null_entry; |
1446 | BACKTRACK(net, &fl->fl6_src); | 1443 | BACKTRACK(net, &fl->fl6_src); |
1447 | out: | 1444 | out: |
1448 | dst_hold(&rt->u.dst); | 1445 | dst_hold(&rt->dst); |
1449 | 1446 | ||
1450 | read_unlock_bh(&table->tb6_lock); | 1447 | read_unlock_bh(&table->tb6_lock); |
1451 | 1448 | ||
@@ -1513,10 +1510,10 @@ void rt6_redirect(struct in6_addr *dest, struct in6_addr *src, | |||
1513 | * Look, redirects are sent only in response to data packets, | 1510 | * Look, redirects are sent only in response to data packets, |
1514 | * so that this nexthop apparently is reachable. --ANK | 1511 | * so that this nexthop apparently is reachable. --ANK |
1515 | */ | 1512 | */ |
1516 | dst_confirm(&rt->u.dst); | 1513 | dst_confirm(&rt->dst); |
1517 | 1514 | ||
1518 | /* Duplicate redirect: silently ignore. */ | 1515 | /* Duplicate redirect: silently ignore. */ |
1519 | if (neigh == rt->u.dst.neighbour) | 1516 | if (neigh == rt->dst.neighbour) |
1520 | goto out; | 1517 | goto out; |
1521 | 1518 | ||
1522 | nrt = ip6_rt_copy(rt); | 1519 | nrt = ip6_rt_copy(rt); |
@@ -1529,20 +1526,20 @@ void rt6_redirect(struct in6_addr *dest, struct in6_addr *src, | |||
1529 | 1526 | ||
1530 | ipv6_addr_copy(&nrt->rt6i_dst.addr, dest); | 1527 | ipv6_addr_copy(&nrt->rt6i_dst.addr, dest); |
1531 | nrt->rt6i_dst.plen = 128; | 1528 | nrt->rt6i_dst.plen = 128; |
1532 | nrt->u.dst.flags |= DST_HOST; | 1529 | nrt->dst.flags |= DST_HOST; |
1533 | 1530 | ||
1534 | ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key); | 1531 | ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key); |
1535 | nrt->rt6i_nexthop = neigh_clone(neigh); | 1532 | nrt->rt6i_nexthop = neigh_clone(neigh); |
1536 | /* Reset pmtu, it may be better */ | 1533 | /* Reset pmtu, it may be better */ |
1537 | nrt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(neigh->dev); | 1534 | nrt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(neigh->dev); |
1538 | nrt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dev_net(neigh->dev), | 1535 | nrt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dev_net(neigh->dev), |
1539 | dst_mtu(&nrt->u.dst)); | 1536 | dst_mtu(&nrt->dst)); |
1540 | 1537 | ||
1541 | if (ip6_ins_rt(nrt)) | 1538 | if (ip6_ins_rt(nrt)) |
1542 | goto out; | 1539 | goto out; |
1543 | 1540 | ||
1544 | netevent.old = &rt->u.dst; | 1541 | netevent.old = &rt->dst; |
1545 | netevent.new = &nrt->u.dst; | 1542 | netevent.new = &nrt->dst; |
1546 | call_netevent_notifiers(NETEVENT_REDIRECT, &netevent); | 1543 | call_netevent_notifiers(NETEVENT_REDIRECT, &netevent); |
1547 | 1544 | ||
1548 | if (rt->rt6i_flags&RTF_CACHE) { | 1545 | if (rt->rt6i_flags&RTF_CACHE) { |
@@ -1551,7 +1548,7 @@ void rt6_redirect(struct in6_addr *dest, struct in6_addr *src, | |||
1551 | } | 1548 | } |
1552 | 1549 | ||
1553 | out: | 1550 | out: |
1554 | dst_release(&rt->u.dst); | 1551 | dst_release(&rt->dst); |
1555 | } | 1552 | } |
1556 | 1553 | ||
1557 | /* | 1554 | /* |
@@ -1570,7 +1567,7 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr, | |||
1570 | if (rt == NULL) | 1567 | if (rt == NULL) |
1571 | return; | 1568 | return; |
1572 | 1569 | ||
1573 | if (pmtu >= dst_mtu(&rt->u.dst)) | 1570 | if (pmtu >= dst_mtu(&rt->dst)) |
1574 | goto out; | 1571 | goto out; |
1575 | 1572 | ||
1576 | if (pmtu < IPV6_MIN_MTU) { | 1573 | if (pmtu < IPV6_MIN_MTU) { |
@@ -1588,7 +1585,7 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr, | |||
1588 | They are sent only in response to data packets, | 1585 | They are sent only in response to data packets, |
1589 | so that this nexthop apparently is reachable. --ANK | 1586 | so that this nexthop apparently is reachable. --ANK |
1590 | */ | 1587 | */ |
1591 | dst_confirm(&rt->u.dst); | 1588 | dst_confirm(&rt->dst); |
1592 | 1589 | ||
1593 | /* Host route. If it is static, it would be better | 1590 | /* Host route. If it is static, it would be better |
1594 | not to override it, but add new one, so that | 1591 | not to override it, but add new one, so that |
@@ -1596,10 +1593,10 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr, | |||
1596 | would return automatically. | 1593 | would return automatically. |
1597 | */ | 1594 | */ |
1598 | if (rt->rt6i_flags & RTF_CACHE) { | 1595 | if (rt->rt6i_flags & RTF_CACHE) { |
1599 | rt->u.dst.metrics[RTAX_MTU-1] = pmtu; | 1596 | rt->dst.metrics[RTAX_MTU-1] = pmtu; |
1600 | if (allfrag) | 1597 | if (allfrag) |
1601 | rt->u.dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG; | 1598 | rt->dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG; |
1602 | dst_set_expires(&rt->u.dst, net->ipv6.sysctl.ip6_rt_mtu_expires); | 1599 | dst_set_expires(&rt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires); |
1603 | rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES; | 1600 | rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES; |
1604 | goto out; | 1601 | goto out; |
1605 | } | 1602 | } |
@@ -1615,9 +1612,9 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr, | |||
1615 | nrt = rt6_alloc_clone(rt, daddr); | 1612 | nrt = rt6_alloc_clone(rt, daddr); |
1616 | 1613 | ||
1617 | if (nrt) { | 1614 | if (nrt) { |
1618 | nrt->u.dst.metrics[RTAX_MTU-1] = pmtu; | 1615 | nrt->dst.metrics[RTAX_MTU-1] = pmtu; |
1619 | if (allfrag) | 1616 | if (allfrag) |
1620 | nrt->u.dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG; | 1617 | nrt->dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG; |
1621 | 1618 | ||
1622 | /* According to RFC 1981, detecting PMTU increase shouldn't be | 1619 | /* According to RFC 1981, detecting PMTU increase shouldn't be |
1623 | * happened within 5 mins, the recommended timer is 10 mins. | 1620 | * happened within 5 mins, the recommended timer is 10 mins. |
@@ -1625,13 +1622,13 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr, | |||
1625 | * which is 10 mins. After 10 mins the decreased pmtu is expired | 1622 | * which is 10 mins. After 10 mins the decreased pmtu is expired |
1626 | * and detecting PMTU increase will be automatically happened. | 1623 | * and detecting PMTU increase will be automatically happened. |
1627 | */ | 1624 | */ |
1628 | dst_set_expires(&nrt->u.dst, net->ipv6.sysctl.ip6_rt_mtu_expires); | 1625 | dst_set_expires(&nrt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires); |
1629 | nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES; | 1626 | nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES; |
1630 | 1627 | ||
1631 | ip6_ins_rt(nrt); | 1628 | ip6_ins_rt(nrt); |
1632 | } | 1629 | } |
1633 | out: | 1630 | out: |
1634 | dst_release(&rt->u.dst); | 1631 | dst_release(&rt->dst); |
1635 | } | 1632 | } |
1636 | 1633 | ||
1637 | /* | 1634 | /* |
@@ -1644,18 +1641,18 @@ static struct rt6_info * ip6_rt_copy(struct rt6_info *ort) | |||
1644 | struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops); | 1641 | struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops); |
1645 | 1642 | ||
1646 | if (rt) { | 1643 | if (rt) { |
1647 | rt->u.dst.input = ort->u.dst.input; | 1644 | rt->dst.input = ort->dst.input; |
1648 | rt->u.dst.output = ort->u.dst.output; | 1645 | rt->dst.output = ort->dst.output; |
1649 | 1646 | ||
1650 | memcpy(rt->u.dst.metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32)); | 1647 | memcpy(rt->dst.metrics, ort->dst.metrics, RTAX_MAX*sizeof(u32)); |
1651 | rt->u.dst.error = ort->u.dst.error; | 1648 | rt->dst.error = ort->dst.error; |
1652 | rt->u.dst.dev = ort->u.dst.dev; | 1649 | rt->dst.dev = ort->dst.dev; |
1653 | if (rt->u.dst.dev) | 1650 | if (rt->dst.dev) |
1654 | dev_hold(rt->u.dst.dev); | 1651 | dev_hold(rt->dst.dev); |
1655 | rt->rt6i_idev = ort->rt6i_idev; | 1652 | rt->rt6i_idev = ort->rt6i_idev; |
1656 | if (rt->rt6i_idev) | 1653 | if (rt->rt6i_idev) |
1657 | in6_dev_hold(rt->rt6i_idev); | 1654 | in6_dev_hold(rt->rt6i_idev); |
1658 | rt->u.dst.lastuse = jiffies; | 1655 | rt->dst.lastuse = jiffies; |
1659 | rt->rt6i_expires = 0; | 1656 | rt->rt6i_expires = 0; |
1660 | 1657 | ||
1661 | ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway); | 1658 | ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway); |
@@ -1689,14 +1686,14 @@ static struct rt6_info *rt6_get_route_info(struct net *net, | |||
1689 | if (!fn) | 1686 | if (!fn) |
1690 | goto out; | 1687 | goto out; |
1691 | 1688 | ||
1692 | for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) { | 1689 | for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) { |
1693 | if (rt->rt6i_dev->ifindex != ifindex) | 1690 | if (rt->rt6i_dev->ifindex != ifindex) |
1694 | continue; | 1691 | continue; |
1695 | if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY)) | 1692 | if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY)) |
1696 | continue; | 1693 | continue; |
1697 | if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr)) | 1694 | if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr)) |
1698 | continue; | 1695 | continue; |
1699 | dst_hold(&rt->u.dst); | 1696 | dst_hold(&rt->dst); |
1700 | break; | 1697 | break; |
1701 | } | 1698 | } |
1702 | out: | 1699 | out: |
@@ -1744,14 +1741,14 @@ struct rt6_info *rt6_get_dflt_router(struct in6_addr *addr, struct net_device *d | |||
1744 | return NULL; | 1741 | return NULL; |
1745 | 1742 | ||
1746 | write_lock_bh(&table->tb6_lock); | 1743 | write_lock_bh(&table->tb6_lock); |
1747 | for (rt = table->tb6_root.leaf; rt; rt=rt->u.dst.rt6_next) { | 1744 | for (rt = table->tb6_root.leaf; rt; rt=rt->dst.rt6_next) { |
1748 | if (dev == rt->rt6i_dev && | 1745 | if (dev == rt->rt6i_dev && |
1749 | ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) && | 1746 | ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) && |
1750 | ipv6_addr_equal(&rt->rt6i_gateway, addr)) | 1747 | ipv6_addr_equal(&rt->rt6i_gateway, addr)) |
1751 | break; | 1748 | break; |
1752 | } | 1749 | } |
1753 | if (rt) | 1750 | if (rt) |
1754 | dst_hold(&rt->u.dst); | 1751 | dst_hold(&rt->dst); |
1755 | write_unlock_bh(&table->tb6_lock); | 1752 | write_unlock_bh(&table->tb6_lock); |
1756 | return rt; | 1753 | return rt; |
1757 | } | 1754 | } |
@@ -1790,9 +1787,9 @@ void rt6_purge_dflt_routers(struct net *net) | |||
1790 | 1787 | ||
1791 | restart: | 1788 | restart: |
1792 | read_lock_bh(&table->tb6_lock); | 1789 | read_lock_bh(&table->tb6_lock); |
1793 | for (rt = table->tb6_root.leaf; rt; rt = rt->u.dst.rt6_next) { | 1790 | for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) { |
1794 | if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) { | 1791 | if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) { |
1795 | dst_hold(&rt->u.dst); | 1792 | dst_hold(&rt->dst); |
1796 | read_unlock_bh(&table->tb6_lock); | 1793 | read_unlock_bh(&table->tb6_lock); |
1797 | ip6_del_rt(rt); | 1794 | ip6_del_rt(rt); |
1798 | goto restart; | 1795 | goto restart; |
@@ -1930,15 +1927,15 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, | |||
1930 | dev_hold(net->loopback_dev); | 1927 | dev_hold(net->loopback_dev); |
1931 | in6_dev_hold(idev); | 1928 | in6_dev_hold(idev); |
1932 | 1929 | ||
1933 | rt->u.dst.flags = DST_HOST; | 1930 | rt->dst.flags = DST_HOST; |
1934 | rt->u.dst.input = ip6_input; | 1931 | rt->dst.input = ip6_input; |
1935 | rt->u.dst.output = ip6_output; | 1932 | rt->dst.output = ip6_output; |
1936 | rt->rt6i_dev = net->loopback_dev; | 1933 | rt->rt6i_dev = net->loopback_dev; |
1937 | rt->rt6i_idev = idev; | 1934 | rt->rt6i_idev = idev; |
1938 | rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev); | 1935 | rt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev); |
1939 | rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->u.dst)); | 1936 | rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->dst)); |
1940 | rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1; | 1937 | rt->dst.metrics[RTAX_HOPLIMIT-1] = -1; |
1941 | rt->u.dst.obsolete = -1; | 1938 | rt->dst.obsolete = -1; |
1942 | 1939 | ||
1943 | rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP; | 1940 | rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP; |
1944 | if (anycast) | 1941 | if (anycast) |
@@ -1947,7 +1944,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, | |||
1947 | rt->rt6i_flags |= RTF_LOCAL; | 1944 | rt->rt6i_flags |= RTF_LOCAL; |
1948 | neigh = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway); | 1945 | neigh = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway); |
1949 | if (IS_ERR(neigh)) { | 1946 | if (IS_ERR(neigh)) { |
1950 | dst_free(&rt->u.dst); | 1947 | dst_free(&rt->dst); |
1951 | 1948 | ||
1952 | /* We are casting this because that is the return | 1949 | /* We are casting this because that is the return |
1953 | * value type. But an errno encoded pointer is the | 1950 | * value type. But an errno encoded pointer is the |
@@ -1962,7 +1959,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, | |||
1962 | rt->rt6i_dst.plen = 128; | 1959 | rt->rt6i_dst.plen = 128; |
1963 | rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL); | 1960 | rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL); |
1964 | 1961 | ||
1965 | atomic_set(&rt->u.dst.__refcnt, 1); | 1962 | atomic_set(&rt->dst.__refcnt, 1); |
1966 | 1963 | ||
1967 | return rt; | 1964 | return rt; |
1968 | } | 1965 | } |
@@ -2033,12 +2030,12 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg) | |||
2033 | PMTU discouvery. | 2030 | PMTU discouvery. |
2034 | */ | 2031 | */ |
2035 | if (rt->rt6i_dev == arg->dev && | 2032 | if (rt->rt6i_dev == arg->dev && |
2036 | !dst_metric_locked(&rt->u.dst, RTAX_MTU) && | 2033 | !dst_metric_locked(&rt->dst, RTAX_MTU) && |
2037 | (dst_mtu(&rt->u.dst) >= arg->mtu || | 2034 | (dst_mtu(&rt->dst) >= arg->mtu || |
2038 | (dst_mtu(&rt->u.dst) < arg->mtu && | 2035 | (dst_mtu(&rt->dst) < arg->mtu && |
2039 | dst_mtu(&rt->u.dst) == idev->cnf.mtu6))) { | 2036 | dst_mtu(&rt->dst) == idev->cnf.mtu6))) { |
2040 | rt->u.dst.metrics[RTAX_MTU-1] = arg->mtu; | 2037 | rt->dst.metrics[RTAX_MTU-1] = arg->mtu; |
2041 | rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, arg->mtu); | 2038 | rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, arg->mtu); |
2042 | } | 2039 | } |
2043 | return 0; | 2040 | return 0; |
2044 | } | 2041 | } |
@@ -2252,20 +2249,20 @@ static int rt6_fill_node(struct net *net, | |||
2252 | #endif | 2249 | #endif |
2253 | NLA_PUT_U32(skb, RTA_IIF, iif); | 2250 | NLA_PUT_U32(skb, RTA_IIF, iif); |
2254 | } else if (dst) { | 2251 | } else if (dst) { |
2255 | struct inet6_dev *idev = ip6_dst_idev(&rt->u.dst); | 2252 | struct inet6_dev *idev = ip6_dst_idev(&rt->dst); |
2256 | struct in6_addr saddr_buf; | 2253 | struct in6_addr saddr_buf; |
2257 | if (ipv6_dev_get_saddr(net, idev ? idev->dev : NULL, | 2254 | if (ipv6_dev_get_saddr(net, idev ? idev->dev : NULL, |
2258 | dst, 0, &saddr_buf) == 0) | 2255 | dst, 0, &saddr_buf) == 0) |
2259 | NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf); | 2256 | NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf); |
2260 | } | 2257 | } |
2261 | 2258 | ||
2262 | if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0) | 2259 | if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0) |
2263 | goto nla_put_failure; | 2260 | goto nla_put_failure; |
2264 | 2261 | ||
2265 | if (rt->u.dst.neighbour) | 2262 | if (rt->dst.neighbour) |
2266 | NLA_PUT(skb, RTA_GATEWAY, 16, &rt->u.dst.neighbour->primary_key); | 2263 | NLA_PUT(skb, RTA_GATEWAY, 16, &rt->dst.neighbour->primary_key); |
2267 | 2264 | ||
2268 | if (rt->u.dst.dev) | 2265 | if (rt->dst.dev) |
2269 | NLA_PUT_U32(skb, RTA_OIF, rt->rt6i_dev->ifindex); | 2266 | NLA_PUT_U32(skb, RTA_OIF, rt->rt6i_dev->ifindex); |
2270 | 2267 | ||
2271 | NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric); | 2268 | NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric); |
@@ -2277,8 +2274,8 @@ static int rt6_fill_node(struct net *net, | |||
2277 | else | 2274 | else |
2278 | expires = INT_MAX; | 2275 | expires = INT_MAX; |
2279 | 2276 | ||
2280 | if (rtnl_put_cacheinfo(skb, &rt->u.dst, 0, 0, 0, | 2277 | if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0, |
2281 | expires, rt->u.dst.error) < 0) | 2278 | expires, rt->dst.error) < 0) |
2282 | goto nla_put_failure; | 2279 | goto nla_put_failure; |
2283 | 2280 | ||
2284 | return nlmsg_end(skb, nlh); | 2281 | return nlmsg_end(skb, nlh); |
@@ -2364,7 +2361,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void | |||
2364 | skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr)); | 2361 | skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr)); |
2365 | 2362 | ||
2366 | rt = (struct rt6_info*) ip6_route_output(net, NULL, &fl); | 2363 | rt = (struct rt6_info*) ip6_route_output(net, NULL, &fl); |
2367 | skb_dst_set(skb, &rt->u.dst); | 2364 | skb_dst_set(skb, &rt->dst); |
2368 | 2365 | ||
2369 | err = rt6_fill_node(net, skb, rt, &fl.fl6_dst, &fl.fl6_src, iif, | 2366 | err = rt6_fill_node(net, skb, rt, &fl.fl6_dst, &fl.fl6_src, iif, |
2370 | RTM_NEWROUTE, NETLINK_CB(in_skb).pid, | 2367 | RTM_NEWROUTE, NETLINK_CB(in_skb).pid, |
@@ -2416,12 +2413,12 @@ static int ip6_route_dev_notify(struct notifier_block *this, | |||
2416 | struct net *net = dev_net(dev); | 2413 | struct net *net = dev_net(dev); |
2417 | 2414 | ||
2418 | if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) { | 2415 | if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) { |
2419 | net->ipv6.ip6_null_entry->u.dst.dev = dev; | 2416 | net->ipv6.ip6_null_entry->dst.dev = dev; |
2420 | net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev); | 2417 | net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev); |
2421 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES | 2418 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES |
2422 | net->ipv6.ip6_prohibit_entry->u.dst.dev = dev; | 2419 | net->ipv6.ip6_prohibit_entry->dst.dev = dev; |
2423 | net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev); | 2420 | net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev); |
2424 | net->ipv6.ip6_blk_hole_entry->u.dst.dev = dev; | 2421 | net->ipv6.ip6_blk_hole_entry->dst.dev = dev; |
2425 | net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev); | 2422 | net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev); |
2426 | #endif | 2423 | #endif |
2427 | } | 2424 | } |
@@ -2464,8 +2461,8 @@ static int rt6_info_route(struct rt6_info *rt, void *p_arg) | |||
2464 | seq_puts(m, "00000000000000000000000000000000"); | 2461 | seq_puts(m, "00000000000000000000000000000000"); |
2465 | } | 2462 | } |
2466 | seq_printf(m, " %08x %08x %08x %08x %8s\n", | 2463 | seq_printf(m, " %08x %08x %08x %08x %8s\n", |
2467 | rt->rt6i_metric, atomic_read(&rt->u.dst.__refcnt), | 2464 | rt->rt6i_metric, atomic_read(&rt->dst.__refcnt), |
2468 | rt->u.dst.__use, rt->rt6i_flags, | 2465 | rt->dst.__use, rt->rt6i_flags, |
2469 | rt->rt6i_dev ? rt->rt6i_dev->name : ""); | 2466 | rt->rt6i_dev ? rt->rt6i_dev->name : ""); |
2470 | return 0; | 2467 | return 0; |
2471 | } | 2468 | } |
@@ -2646,9 +2643,9 @@ static int __net_init ip6_route_net_init(struct net *net) | |||
2646 | GFP_KERNEL); | 2643 | GFP_KERNEL); |
2647 | if (!net->ipv6.ip6_null_entry) | 2644 | if (!net->ipv6.ip6_null_entry) |
2648 | goto out_ip6_dst_ops; | 2645 | goto out_ip6_dst_ops; |
2649 | net->ipv6.ip6_null_entry->u.dst.path = | 2646 | net->ipv6.ip6_null_entry->dst.path = |
2650 | (struct dst_entry *)net->ipv6.ip6_null_entry; | 2647 | (struct dst_entry *)net->ipv6.ip6_null_entry; |
2651 | net->ipv6.ip6_null_entry->u.dst.ops = &net->ipv6.ip6_dst_ops; | 2648 | net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops; |
2652 | 2649 | ||
2653 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES | 2650 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES |
2654 | net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template, | 2651 | net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template, |
@@ -2656,18 +2653,18 @@ static int __net_init ip6_route_net_init(struct net *net) | |||
2656 | GFP_KERNEL); | 2653 | GFP_KERNEL); |
2657 | if (!net->ipv6.ip6_prohibit_entry) | 2654 | if (!net->ipv6.ip6_prohibit_entry) |
2658 | goto out_ip6_null_entry; | 2655 | goto out_ip6_null_entry; |
2659 | net->ipv6.ip6_prohibit_entry->u.dst.path = | 2656 | net->ipv6.ip6_prohibit_entry->dst.path = |
2660 | (struct dst_entry *)net->ipv6.ip6_prohibit_entry; | 2657 | (struct dst_entry *)net->ipv6.ip6_prohibit_entry; |
2661 | net->ipv6.ip6_prohibit_entry->u.dst.ops = &net->ipv6.ip6_dst_ops; | 2658 | net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops; |
2662 | 2659 | ||
2663 | net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template, | 2660 | net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template, |
2664 | sizeof(*net->ipv6.ip6_blk_hole_entry), | 2661 | sizeof(*net->ipv6.ip6_blk_hole_entry), |
2665 | GFP_KERNEL); | 2662 | GFP_KERNEL); |
2666 | if (!net->ipv6.ip6_blk_hole_entry) | 2663 | if (!net->ipv6.ip6_blk_hole_entry) |
2667 | goto out_ip6_prohibit_entry; | 2664 | goto out_ip6_prohibit_entry; |
2668 | net->ipv6.ip6_blk_hole_entry->u.dst.path = | 2665 | net->ipv6.ip6_blk_hole_entry->dst.path = |
2669 | (struct dst_entry *)net->ipv6.ip6_blk_hole_entry; | 2666 | (struct dst_entry *)net->ipv6.ip6_blk_hole_entry; |
2670 | net->ipv6.ip6_blk_hole_entry->u.dst.ops = &net->ipv6.ip6_dst_ops; | 2667 | net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops; |
2671 | #endif | 2668 | #endif |
2672 | 2669 | ||
2673 | net->ipv6.sysctl.flush_delay = 0; | 2670 | net->ipv6.sysctl.flush_delay = 0; |
@@ -2742,12 +2739,12 @@ int __init ip6_route_init(void) | |||
2742 | /* Registering of the loopback is done before this portion of code, | 2739 | /* Registering of the loopback is done before this portion of code, |
2743 | * the loopback reference in rt6_info will not be taken, do it | 2740 | * the loopback reference in rt6_info will not be taken, do it |
2744 | * manually for init_net */ | 2741 | * manually for init_net */ |
2745 | init_net.ipv6.ip6_null_entry->u.dst.dev = init_net.loopback_dev; | 2742 | init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev; |
2746 | init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); | 2743 | init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); |
2747 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES | 2744 | #ifdef CONFIG_IPV6_MULTIPLE_TABLES |
2748 | init_net.ipv6.ip6_prohibit_entry->u.dst.dev = init_net.loopback_dev; | 2745 | init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev; |
2749 | init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); | 2746 | init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); |
2750 | init_net.ipv6.ip6_blk_hole_entry->u.dst.dev = init_net.loopback_dev; | 2747 | init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev; |
2751 | init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); | 2748 | init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); |
2752 | #endif | 2749 | #endif |
2753 | ret = fib6_init(); | 2750 | ret = fib6_init(); |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index e51e650ea80b..4699cd3c3118 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -249,8 +249,6 @@ failed: | |||
249 | return NULL; | 249 | return NULL; |
250 | } | 250 | } |
251 | 251 | ||
252 | static DEFINE_SPINLOCK(ipip6_prl_lock); | ||
253 | |||
254 | #define for_each_prl_rcu(start) \ | 252 | #define for_each_prl_rcu(start) \ |
255 | for (prl = rcu_dereference(start); \ | 253 | for (prl = rcu_dereference(start); \ |
256 | prl; \ | 254 | prl; \ |
@@ -340,7 +338,7 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg) | |||
340 | if (a->addr == htonl(INADDR_ANY)) | 338 | if (a->addr == htonl(INADDR_ANY)) |
341 | return -EINVAL; | 339 | return -EINVAL; |
342 | 340 | ||
343 | spin_lock(&ipip6_prl_lock); | 341 | ASSERT_RTNL(); |
344 | 342 | ||
345 | for (p = t->prl; p; p = p->next) { | 343 | for (p = t->prl; p; p = p->next) { |
346 | if (p->addr == a->addr) { | 344 | if (p->addr == a->addr) { |
@@ -370,7 +368,6 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg) | |||
370 | t->prl_count++; | 368 | t->prl_count++; |
371 | rcu_assign_pointer(t->prl, p); | 369 | rcu_assign_pointer(t->prl, p); |
372 | out: | 370 | out: |
373 | spin_unlock(&ipip6_prl_lock); | ||
374 | return err; | 371 | return err; |
375 | } | 372 | } |
376 | 373 | ||
@@ -397,7 +394,7 @@ ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a) | |||
397 | struct ip_tunnel_prl_entry *x, **p; | 394 | struct ip_tunnel_prl_entry *x, **p; |
398 | int err = 0; | 395 | int err = 0; |
399 | 396 | ||
400 | spin_lock(&ipip6_prl_lock); | 397 | ASSERT_RTNL(); |
401 | 398 | ||
402 | if (a && a->addr != htonl(INADDR_ANY)) { | 399 | if (a && a->addr != htonl(INADDR_ANY)) { |
403 | for (p = &t->prl; *p; p = &(*p)->next) { | 400 | for (p = &t->prl; *p; p = &(*p)->next) { |
@@ -419,7 +416,6 @@ ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a) | |||
419 | } | 416 | } |
420 | } | 417 | } |
421 | out: | 418 | out: |
422 | spin_unlock(&ipip6_prl_lock); | ||
423 | return err; | 419 | return err; |
424 | } | 420 | } |
425 | 421 | ||
@@ -716,7 +712,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, | |||
716 | stats->tx_carrier_errors++; | 712 | stats->tx_carrier_errors++; |
717 | goto tx_error_icmp; | 713 | goto tx_error_icmp; |
718 | } | 714 | } |
719 | tdev = rt->u.dst.dev; | 715 | tdev = rt->dst.dev; |
720 | 716 | ||
721 | if (tdev == dev) { | 717 | if (tdev == dev) { |
722 | ip_rt_put(rt); | 718 | ip_rt_put(rt); |
@@ -725,7 +721,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, | |||
725 | } | 721 | } |
726 | 722 | ||
727 | if (df) { | 723 | if (df) { |
728 | mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr); | 724 | mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr); |
729 | 725 | ||
730 | if (mtu < 68) { | 726 | if (mtu < 68) { |
731 | stats->collisions++; | 727 | stats->collisions++; |
@@ -784,7 +780,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, | |||
784 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | 780 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
785 | IPCB(skb)->flags = 0; | 781 | IPCB(skb)->flags = 0; |
786 | skb_dst_drop(skb); | 782 | skb_dst_drop(skb); |
787 | skb_dst_set(skb, &rt->u.dst); | 783 | skb_dst_set(skb, &rt->dst); |
788 | 784 | ||
789 | /* | 785 | /* |
790 | * Push down and install the IPIP header. | 786 | * Push down and install the IPIP header. |
@@ -833,7 +829,7 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev) | |||
833 | .proto = IPPROTO_IPV6 }; | 829 | .proto = IPPROTO_IPV6 }; |
834 | struct rtable *rt; | 830 | struct rtable *rt; |
835 | if (!ip_route_output_key(dev_net(dev), &rt, &fl)) { | 831 | if (!ip_route_output_key(dev_net(dev), &rt, &fl)) { |
836 | tdev = rt->u.dst.dev; | 832 | tdev = rt->dst.dev; |
837 | ip_rt_put(rt); | 833 | ip_rt_put(rt); |
838 | } | 834 | } |
839 | dev->flags |= IFF_POINTOPOINT; | 835 | dev->flags |= IFF_POINTOPOINT; |
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 34d1f0690d7e..70d330f8c990 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c | |||
@@ -27,28 +27,17 @@ extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS]; | |||
27 | #define COOKIEBITS 24 /* Upper bits store count */ | 27 | #define COOKIEBITS 24 /* Upper bits store count */ |
28 | #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) | 28 | #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) |
29 | 29 | ||
30 | /* | 30 | /* Table must be sorted. */ |
31 | * This table has to be sorted and terminated with (__u16)-1. | ||
32 | * XXX generate a better table. | ||
33 | * Unresolved Issues: HIPPI with a 64k MSS is not well supported. | ||
34 | * | ||
35 | * Taken directly from ipv4 implementation. | ||
36 | * Should this list be modified for ipv6 use or is it close enough? | ||
37 | * rfc 2460 8.3 suggests mss values 20 bytes less than ipv4 counterpart | ||
38 | */ | ||
39 | static __u16 const msstab[] = { | 31 | static __u16 const msstab[] = { |
40 | 64 - 1, | 32 | 64, |
41 | 256 - 1, | 33 | 512, |
42 | 512 - 1, | 34 | 536, |
43 | 536 - 1, | 35 | 1280 - 60, |
44 | 1024 - 1, | 36 | 1480 - 60, |
45 | 1440 - 1, | 37 | 1500 - 60, |
46 | 1460 - 1, | 38 | 4460 - 60, |
47 | 4312 - 1, | 39 | 9000 - 60, |
48 | (__u16)-1 | ||
49 | }; | 40 | }; |
50 | /* The number doesn't include the -1 terminator */ | ||
51 | #define NUM_MSS (ARRAY_SIZE(msstab) - 1) | ||
52 | 41 | ||
53 | /* | 42 | /* |
54 | * This (misnamed) value is the age of syncookie which is permitted. | 43 | * This (misnamed) value is the age of syncookie which is permitted. |
@@ -134,9 +123,11 @@ __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp) | |||
134 | 123 | ||
135 | tcp_synq_overflow(sk); | 124 | tcp_synq_overflow(sk); |
136 | 125 | ||
137 | for (mssind = 0; mss > msstab[mssind + 1]; mssind++) | 126 | for (mssind = ARRAY_SIZE(msstab) - 1; mssind ; mssind--) |
138 | ; | 127 | if (mss >= msstab[mssind]) |
139 | *mssp = msstab[mssind] + 1; | 128 | break; |
129 | |||
130 | *mssp = msstab[mssind]; | ||
140 | 131 | ||
141 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); | 132 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); |
142 | 133 | ||
@@ -154,7 +145,7 @@ static inline int cookie_check(struct sk_buff *skb, __u32 cookie) | |||
154 | th->source, th->dest, seq, | 145 | th->source, th->dest, seq, |
155 | jiffies / (HZ * 60), COUNTER_TRIES); | 146 | jiffies / (HZ * 60), COUNTER_TRIES); |
156 | 147 | ||
157 | return mssind < NUM_MSS ? msstab[mssind] + 1 : 0; | 148 | return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0; |
158 | } | 149 | } |
159 | 150 | ||
160 | struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) | 151 | struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) |
@@ -174,7 +165,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) | |||
174 | struct dst_entry *dst; | 165 | struct dst_entry *dst; |
175 | __u8 rcv_wscale; | 166 | __u8 rcv_wscale; |
176 | 167 | ||
177 | if (!sysctl_tcp_syncookies || !th->ack) | 168 | if (!sysctl_tcp_syncookies || !th->ack || th->rst) |
178 | goto out; | 169 | goto out; |
179 | 170 | ||
180 | if (tcp_synq_no_recent_overflow(sk) || | 171 | if (tcp_synq_no_recent_overflow(sk) || |
@@ -240,17 +231,12 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) | |||
240 | * me if there is a preferred way. | 231 | * me if there is a preferred way. |
241 | */ | 232 | */ |
242 | { | 233 | { |
243 | struct in6_addr *final_p = NULL, final; | 234 | struct in6_addr *final_p, final; |
244 | struct flowi fl; | 235 | struct flowi fl; |
245 | memset(&fl, 0, sizeof(fl)); | 236 | memset(&fl, 0, sizeof(fl)); |
246 | fl.proto = IPPROTO_TCP; | 237 | fl.proto = IPPROTO_TCP; |
247 | ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr); | 238 | ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr); |
248 | if (np->opt && np->opt->srcrt) { | 239 | final_p = fl6_update_dst(&fl, np->opt, &final); |
249 | struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt; | ||
250 | ipv6_addr_copy(&final, &fl.fl6_dst); | ||
251 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); | ||
252 | final_p = &final; | ||
253 | } | ||
254 | ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr); | 240 | ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr); |
255 | fl.oif = sk->sk_bound_dev_if; | 241 | fl.oif = sk->sk_bound_dev_if; |
256 | fl.mark = sk->sk_mark; | 242 | fl.mark = sk->sk_mark; |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 2b7c3a100e2c..5887141ad641 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -129,7 +129,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
129 | struct inet_connection_sock *icsk = inet_csk(sk); | 129 | struct inet_connection_sock *icsk = inet_csk(sk); |
130 | struct ipv6_pinfo *np = inet6_sk(sk); | 130 | struct ipv6_pinfo *np = inet6_sk(sk); |
131 | struct tcp_sock *tp = tcp_sk(sk); | 131 | struct tcp_sock *tp = tcp_sk(sk); |
132 | struct in6_addr *saddr = NULL, *final_p = NULL, final; | 132 | struct in6_addr *saddr = NULL, *final_p, final; |
133 | struct flowi fl; | 133 | struct flowi fl; |
134 | struct dst_entry *dst; | 134 | struct dst_entry *dst; |
135 | int addr_type; | 135 | int addr_type; |
@@ -250,12 +250,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
250 | fl.fl_ip_dport = usin->sin6_port; | 250 | fl.fl_ip_dport = usin->sin6_port; |
251 | fl.fl_ip_sport = inet->inet_sport; | 251 | fl.fl_ip_sport = inet->inet_sport; |
252 | 252 | ||
253 | if (np->opt && np->opt->srcrt) { | 253 | final_p = fl6_update_dst(&fl, np->opt, &final); |
254 | struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt; | ||
255 | ipv6_addr_copy(&final, &fl.fl6_dst); | ||
256 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); | ||
257 | final_p = &final; | ||
258 | } | ||
259 | 254 | ||
260 | security_sk_classify_flow(sk, &fl); | 255 | security_sk_classify_flow(sk, &fl); |
261 | 256 | ||
@@ -477,7 +472,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, | |||
477 | struct ipv6_pinfo *np = inet6_sk(sk); | 472 | struct ipv6_pinfo *np = inet6_sk(sk); |
478 | struct sk_buff * skb; | 473 | struct sk_buff * skb; |
479 | struct ipv6_txoptions *opt = NULL; | 474 | struct ipv6_txoptions *opt = NULL; |
480 | struct in6_addr * final_p = NULL, final; | 475 | struct in6_addr * final_p, final; |
481 | struct flowi fl; | 476 | struct flowi fl; |
482 | struct dst_entry *dst; | 477 | struct dst_entry *dst; |
483 | int err = -1; | 478 | int err = -1; |
@@ -494,12 +489,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, | |||
494 | security_req_classify_flow(req, &fl); | 489 | security_req_classify_flow(req, &fl); |
495 | 490 | ||
496 | opt = np->opt; | 491 | opt = np->opt; |
497 | if (opt && opt->srcrt) { | 492 | final_p = fl6_update_dst(&fl, opt, &final); |
498 | struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt; | ||
499 | ipv6_addr_copy(&final, &fl.fl6_dst); | ||
500 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); | ||
501 | final_p = &final; | ||
502 | } | ||
503 | 493 | ||
504 | err = ip6_dst_lookup(sk, &dst, &fl); | 494 | err = ip6_dst_lookup(sk, &dst, &fl); |
505 | if (err) | 495 | if (err) |
@@ -1167,7 +1157,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) | |||
1167 | } | 1157 | } |
1168 | 1158 | ||
1169 | #ifdef CONFIG_SYN_COOKIES | 1159 | #ifdef CONFIG_SYN_COOKIES |
1170 | if (!th->rst && !th->syn && th->ack) | 1160 | if (!th->syn) |
1171 | sk = cookie_v6_check(sk, skb); | 1161 | sk = cookie_v6_check(sk, skb); |
1172 | #endif | 1162 | #endif |
1173 | return sk; | 1163 | return sk; |
@@ -1392,18 +1382,13 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1392 | goto out_overflow; | 1382 | goto out_overflow; |
1393 | 1383 | ||
1394 | if (dst == NULL) { | 1384 | if (dst == NULL) { |
1395 | struct in6_addr *final_p = NULL, final; | 1385 | struct in6_addr *final_p, final; |
1396 | struct flowi fl; | 1386 | struct flowi fl; |
1397 | 1387 | ||
1398 | memset(&fl, 0, sizeof(fl)); | 1388 | memset(&fl, 0, sizeof(fl)); |
1399 | fl.proto = IPPROTO_TCP; | 1389 | fl.proto = IPPROTO_TCP; |
1400 | ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr); | 1390 | ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr); |
1401 | if (opt && opt->srcrt) { | 1391 | final_p = fl6_update_dst(&fl, opt, &final); |
1402 | struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt; | ||
1403 | ipv6_addr_copy(&final, &fl.fl6_dst); | ||
1404 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); | ||
1405 | final_p = &final; | ||
1406 | } | ||
1407 | ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr); | 1392 | ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr); |
1408 | fl.oif = sk->sk_bound_dev_if; | 1393 | fl.oif = sk->sk_bound_dev_if; |
1409 | fl.mark = sk->sk_mark; | 1394 | fl.mark = sk->sk_mark; |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 3048f906c042..1dd1affdead2 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -466,11 +466,9 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
466 | if (sk->sk_state != TCP_ESTABLISHED && !np->recverr) | 466 | if (sk->sk_state != TCP_ESTABLISHED && !np->recverr) |
467 | goto out; | 467 | goto out; |
468 | 468 | ||
469 | if (np->recverr) { | 469 | if (np->recverr) |
470 | bh_lock_sock(sk); | ||
471 | ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1)); | 470 | ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1)); |
472 | bh_unlock_sock(sk); | 471 | |
473 | } | ||
474 | sk->sk_err = err; | 472 | sk->sk_err = err; |
475 | sk->sk_error_report(sk); | 473 | sk->sk_error_report(sk); |
476 | out: | 474 | out: |
@@ -929,7 +927,7 @@ int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
929 | struct inet_sock *inet = inet_sk(sk); | 927 | struct inet_sock *inet = inet_sk(sk); |
930 | struct ipv6_pinfo *np = inet6_sk(sk); | 928 | struct ipv6_pinfo *np = inet6_sk(sk); |
931 | struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) msg->msg_name; | 929 | struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) msg->msg_name; |
932 | struct in6_addr *daddr, *final_p = NULL, final; | 930 | struct in6_addr *daddr, *final_p, final; |
933 | struct ipv6_txoptions *opt = NULL; | 931 | struct ipv6_txoptions *opt = NULL; |
934 | struct ip6_flowlabel *flowlabel = NULL; | 932 | struct ip6_flowlabel *flowlabel = NULL; |
935 | struct flowi fl; | 933 | struct flowi fl; |
@@ -1099,14 +1097,9 @@ do_udp_sendmsg: | |||
1099 | ipv6_addr_copy(&fl.fl6_src, &np->saddr); | 1097 | ipv6_addr_copy(&fl.fl6_src, &np->saddr); |
1100 | fl.fl_ip_sport = inet->inet_sport; | 1098 | fl.fl_ip_sport = inet->inet_sport; |
1101 | 1099 | ||
1102 | /* merge ip6_build_xmit from ip6_output */ | 1100 | final_p = fl6_update_dst(&fl, opt, &final); |
1103 | if (opt && opt->srcrt) { | 1101 | if (final_p) |
1104 | struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt; | ||
1105 | ipv6_addr_copy(&final, &fl.fl6_dst); | ||
1106 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); | ||
1107 | final_p = &final; | ||
1108 | connected = 0; | 1102 | connected = 0; |
1109 | } | ||
1110 | 1103 | ||
1111 | if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) { | 1104 | if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) { |
1112 | fl.oif = np->mcast_oif; | 1105 | fl.oif = np->mcast_oif; |
diff --git a/net/irda/irttp.c b/net/irda/irttp.c index 47db1d8a0d92..285761e77d90 100644 --- a/net/irda/irttp.c +++ b/net/irda/irttp.c | |||
@@ -1853,23 +1853,23 @@ static int irttp_seq_show(struct seq_file *seq, void *v) | |||
1853 | self->remote_credit); | 1853 | self->remote_credit); |
1854 | seq_printf(seq, "send credit: %d\n", | 1854 | seq_printf(seq, "send credit: %d\n", |
1855 | self->send_credit); | 1855 | self->send_credit); |
1856 | seq_printf(seq, " tx packets: %ld, ", | 1856 | seq_printf(seq, " tx packets: %lu, ", |
1857 | self->stats.tx_packets); | 1857 | self->stats.tx_packets); |
1858 | seq_printf(seq, "rx packets: %ld, ", | 1858 | seq_printf(seq, "rx packets: %lu, ", |
1859 | self->stats.rx_packets); | 1859 | self->stats.rx_packets); |
1860 | seq_printf(seq, "tx_queue len: %d ", | 1860 | seq_printf(seq, "tx_queue len: %u ", |
1861 | skb_queue_len(&self->tx_queue)); | 1861 | skb_queue_len(&self->tx_queue)); |
1862 | seq_printf(seq, "rx_queue len: %d\n", | 1862 | seq_printf(seq, "rx_queue len: %u\n", |
1863 | skb_queue_len(&self->rx_queue)); | 1863 | skb_queue_len(&self->rx_queue)); |
1864 | seq_printf(seq, " tx_sdu_busy: %s, ", | 1864 | seq_printf(seq, " tx_sdu_busy: %s, ", |
1865 | self->tx_sdu_busy? "TRUE":"FALSE"); | 1865 | self->tx_sdu_busy? "TRUE":"FALSE"); |
1866 | seq_printf(seq, "rx_sdu_busy: %s\n", | 1866 | seq_printf(seq, "rx_sdu_busy: %s\n", |
1867 | self->rx_sdu_busy? "TRUE":"FALSE"); | 1867 | self->rx_sdu_busy? "TRUE":"FALSE"); |
1868 | seq_printf(seq, " max_seg_size: %d, ", | 1868 | seq_printf(seq, " max_seg_size: %u, ", |
1869 | self->max_seg_size); | 1869 | self->max_seg_size); |
1870 | seq_printf(seq, "tx_max_sdu_size: %d, ", | 1870 | seq_printf(seq, "tx_max_sdu_size: %u, ", |
1871 | self->tx_max_sdu_size); | 1871 | self->tx_max_sdu_size); |
1872 | seq_printf(seq, "rx_max_sdu_size: %d\n", | 1872 | seq_printf(seq, "rx_max_sdu_size: %u\n", |
1873 | self->rx_max_sdu_size); | 1873 | self->rx_max_sdu_size); |
1874 | 1874 | ||
1875 | seq_printf(seq, " Used by (%s)\n\n", | 1875 | seq_printf(seq, " Used by (%s)\n\n", |
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index f28ad2cc8428..499c045d6910 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c | |||
@@ -1463,7 +1463,7 @@ struct iucv_path_pending { | |||
1463 | u32 res3; | 1463 | u32 res3; |
1464 | u8 ippollfg; | 1464 | u8 ippollfg; |
1465 | u8 res4[3]; | 1465 | u8 res4[3]; |
1466 | } __attribute__ ((packed)); | 1466 | } __packed; |
1467 | 1467 | ||
1468 | static void iucv_path_pending(struct iucv_irq_data *data) | 1468 | static void iucv_path_pending(struct iucv_irq_data *data) |
1469 | { | 1469 | { |
@@ -1524,7 +1524,7 @@ struct iucv_path_complete { | |||
1524 | u32 res3; | 1524 | u32 res3; |
1525 | u8 ippollfg; | 1525 | u8 ippollfg; |
1526 | u8 res4[3]; | 1526 | u8 res4[3]; |
1527 | } __attribute__ ((packed)); | 1527 | } __packed; |
1528 | 1528 | ||
1529 | static void iucv_path_complete(struct iucv_irq_data *data) | 1529 | static void iucv_path_complete(struct iucv_irq_data *data) |
1530 | { | 1530 | { |
@@ -1554,7 +1554,7 @@ struct iucv_path_severed { | |||
1554 | u32 res4; | 1554 | u32 res4; |
1555 | u8 ippollfg; | 1555 | u8 ippollfg; |
1556 | u8 res5[3]; | 1556 | u8 res5[3]; |
1557 | } __attribute__ ((packed)); | 1557 | } __packed; |
1558 | 1558 | ||
1559 | static void iucv_path_severed(struct iucv_irq_data *data) | 1559 | static void iucv_path_severed(struct iucv_irq_data *data) |
1560 | { | 1560 | { |
@@ -1590,7 +1590,7 @@ struct iucv_path_quiesced { | |||
1590 | u32 res4; | 1590 | u32 res4; |
1591 | u8 ippollfg; | 1591 | u8 ippollfg; |
1592 | u8 res5[3]; | 1592 | u8 res5[3]; |
1593 | } __attribute__ ((packed)); | 1593 | } __packed; |
1594 | 1594 | ||
1595 | static void iucv_path_quiesced(struct iucv_irq_data *data) | 1595 | static void iucv_path_quiesced(struct iucv_irq_data *data) |
1596 | { | 1596 | { |
@@ -1618,7 +1618,7 @@ struct iucv_path_resumed { | |||
1618 | u32 res4; | 1618 | u32 res4; |
1619 | u8 ippollfg; | 1619 | u8 ippollfg; |
1620 | u8 res5[3]; | 1620 | u8 res5[3]; |
1621 | } __attribute__ ((packed)); | 1621 | } __packed; |
1622 | 1622 | ||
1623 | static void iucv_path_resumed(struct iucv_irq_data *data) | 1623 | static void iucv_path_resumed(struct iucv_irq_data *data) |
1624 | { | 1624 | { |
@@ -1649,7 +1649,7 @@ struct iucv_message_complete { | |||
1649 | u32 ipbfln2f; | 1649 | u32 ipbfln2f; |
1650 | u8 ippollfg; | 1650 | u8 ippollfg; |
1651 | u8 res2[3]; | 1651 | u8 res2[3]; |
1652 | } __attribute__ ((packed)); | 1652 | } __packed; |
1653 | 1653 | ||
1654 | static void iucv_message_complete(struct iucv_irq_data *data) | 1654 | static void iucv_message_complete(struct iucv_irq_data *data) |
1655 | { | 1655 | { |
@@ -1694,7 +1694,7 @@ struct iucv_message_pending { | |||
1694 | u32 ipbfln2f; | 1694 | u32 ipbfln2f; |
1695 | u8 ippollfg; | 1695 | u8 ippollfg; |
1696 | u8 res2[3]; | 1696 | u8 res2[3]; |
1697 | } __attribute__ ((packed)); | 1697 | } __packed; |
1698 | 1698 | ||
1699 | static void iucv_message_pending(struct iucv_irq_data *data) | 1699 | static void iucv_message_pending(struct iucv_irq_data *data) |
1700 | { | 1700 | { |
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 0852512d392c..226a0ae3bcfd 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c | |||
@@ -348,7 +348,7 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len | |||
348 | sk->sk_state = TCP_ESTABLISHED; | 348 | sk->sk_state = TCP_ESTABLISHED; |
349 | inet->inet_id = jiffies; | 349 | inet->inet_id = jiffies; |
350 | 350 | ||
351 | sk_dst_set(sk, &rt->u.dst); | 351 | sk_dst_set(sk, &rt->dst); |
352 | 352 | ||
353 | write_lock_bh(&l2tp_ip_lock); | 353 | write_lock_bh(&l2tp_ip_lock); |
354 | hlist_del_init(&sk->sk_bind_node); | 354 | hlist_del_init(&sk->sk_bind_node); |
@@ -496,9 +496,9 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m | |||
496 | if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0)) | 496 | if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0)) |
497 | goto no_route; | 497 | goto no_route; |
498 | } | 498 | } |
499 | sk_setup_caps(sk, &rt->u.dst); | 499 | sk_setup_caps(sk, &rt->dst); |
500 | } | 500 | } |
501 | skb_dst_set(skb, dst_clone(&rt->u.dst)); | 501 | skb_dst_set(skb, dst_clone(&rt->dst)); |
502 | 502 | ||
503 | /* Queue the packet to IP for output */ | 503 | /* Queue the packet to IP for output */ |
504 | rc = ip_queue_xmit(skb); | 504 | rc = ip_queue_xmit(skb); |
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig index 8a91f6c0bb18..83eec7a8bd1f 100644 --- a/net/mac80211/Kconfig +++ b/net/mac80211/Kconfig | |||
@@ -33,6 +33,13 @@ config MAC80211_RC_MINSTREL | |||
33 | ---help--- | 33 | ---help--- |
34 | This option enables the 'minstrel' TX rate control algorithm | 34 | This option enables the 'minstrel' TX rate control algorithm |
35 | 35 | ||
36 | config MAC80211_RC_MINSTREL_HT | ||
37 | bool "Minstrel 802.11n support" if EMBEDDED | ||
38 | depends on MAC80211_RC_MINSTREL | ||
39 | default y | ||
40 | ---help--- | ||
41 | This option enables the 'minstrel_ht' TX rate control algorithm | ||
42 | |||
36 | choice | 43 | choice |
37 | prompt "Default rate control algorithm" | 44 | prompt "Default rate control algorithm" |
38 | depends on MAC80211_HAS_RC | 45 | depends on MAC80211_HAS_RC |
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile index 84b48ba8a77e..fdb54e61d637 100644 --- a/net/mac80211/Makefile +++ b/net/mac80211/Makefile | |||
@@ -51,7 +51,11 @@ rc80211_pid-$(CONFIG_MAC80211_DEBUGFS) += rc80211_pid_debugfs.o | |||
51 | rc80211_minstrel-y := rc80211_minstrel.o | 51 | rc80211_minstrel-y := rc80211_minstrel.o |
52 | rc80211_minstrel-$(CONFIG_MAC80211_DEBUGFS) += rc80211_minstrel_debugfs.o | 52 | rc80211_minstrel-$(CONFIG_MAC80211_DEBUGFS) += rc80211_minstrel_debugfs.o |
53 | 53 | ||
54 | rc80211_minstrel_ht-y := rc80211_minstrel_ht.o | ||
55 | rc80211_minstrel_ht-$(CONFIG_MAC80211_DEBUGFS) += rc80211_minstrel_ht_debugfs.o | ||
56 | |||
54 | mac80211-$(CONFIG_MAC80211_RC_PID) += $(rc80211_pid-y) | 57 | mac80211-$(CONFIG_MAC80211_RC_PID) += $(rc80211_pid-y) |
55 | mac80211-$(CONFIG_MAC80211_RC_MINSTREL) += $(rc80211_minstrel-y) | 58 | mac80211-$(CONFIG_MAC80211_RC_MINSTREL) += $(rc80211_minstrel-y) |
59 | mac80211-$(CONFIG_MAC80211_RC_MINSTREL_HT) += $(rc80211_minstrel_ht-y) | ||
56 | 60 | ||
57 | ccflags-y += -D__CHECK_ENDIAN__ | 61 | ccflags-y += -D__CHECK_ENDIAN__ |
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index c163d0a149f4..d1b6664a2532 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c | |||
@@ -332,14 +332,16 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) | |||
332 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION); | 332 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION); |
333 | 333 | ||
334 | spin_unlock(&local->ampdu_lock); | 334 | spin_unlock(&local->ampdu_lock); |
335 | spin_unlock_bh(&sta->lock); | ||
336 | 335 | ||
337 | /* send an addBA request */ | 336 | /* prepare tid data */ |
338 | sta->ampdu_mlme.dialog_token_allocator++; | 337 | sta->ampdu_mlme.dialog_token_allocator++; |
339 | sta->ampdu_mlme.tid_tx[tid]->dialog_token = | 338 | sta->ampdu_mlme.tid_tx[tid]->dialog_token = |
340 | sta->ampdu_mlme.dialog_token_allocator; | 339 | sta->ampdu_mlme.dialog_token_allocator; |
341 | sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num; | 340 | sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num; |
342 | 341 | ||
342 | spin_unlock_bh(&sta->lock); | ||
343 | |||
344 | /* send AddBA request */ | ||
343 | ieee80211_send_addba_request(sdata, pubsta->addr, tid, | 345 | ieee80211_send_addba_request(sdata, pubsta->addr, tid, |
344 | sta->ampdu_mlme.tid_tx[tid]->dialog_token, | 346 | sta->ampdu_mlme.tid_tx[tid]->dialog_token, |
345 | sta->ampdu_mlme.tid_tx[tid]->ssn, | 347 | sta->ampdu_mlme.tid_tx[tid]->ssn, |
@@ -538,14 +540,13 @@ int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, | |||
538 | return ret; | 540 | return ret; |
539 | } | 541 | } |
540 | 542 | ||
541 | int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, | 543 | int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) |
542 | enum ieee80211_back_parties initiator) | ||
543 | { | 544 | { |
544 | struct sta_info *sta = container_of(pubsta, struct sta_info, sta); | 545 | struct sta_info *sta = container_of(pubsta, struct sta_info, sta); |
545 | struct ieee80211_sub_if_data *sdata = sta->sdata; | 546 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
546 | struct ieee80211_local *local = sdata->local; | 547 | struct ieee80211_local *local = sdata->local; |
547 | 548 | ||
548 | trace_api_stop_tx_ba_session(pubsta, tid, initiator); | 549 | trace_api_stop_tx_ba_session(pubsta, tid); |
549 | 550 | ||
550 | if (!local->ops->ampdu_action) | 551 | if (!local->ops->ampdu_action) |
551 | return -EINVAL; | 552 | return -EINVAL; |
@@ -553,7 +554,7 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, | |||
553 | if (tid >= STA_TID_NUM) | 554 | if (tid >= STA_TID_NUM) |
554 | return -EINVAL; | 555 | return -EINVAL; |
555 | 556 | ||
556 | return __ieee80211_stop_tx_ba_session(sta, tid, initiator); | 557 | return __ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR); |
557 | } | 558 | } |
558 | EXPORT_SYMBOL(ieee80211_stop_tx_ba_session); | 559 | EXPORT_SYMBOL(ieee80211_stop_tx_ba_session); |
559 | 560 | ||
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index c7000a6ca379..1f76d048388b 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -120,6 +120,9 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, | |||
120 | struct ieee80211_key *key; | 120 | struct ieee80211_key *key; |
121 | int err; | 121 | int err; |
122 | 122 | ||
123 | if (!netif_running(dev)) | ||
124 | return -ENETDOWN; | ||
125 | |||
123 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 126 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
124 | 127 | ||
125 | switch (params->cipher) { | 128 | switch (params->cipher) { |
@@ -145,7 +148,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, | |||
145 | if (!key) | 148 | if (!key) |
146 | return -ENOMEM; | 149 | return -ENOMEM; |
147 | 150 | ||
148 | rcu_read_lock(); | 151 | mutex_lock(&sdata->local->sta_mtx); |
149 | 152 | ||
150 | if (mac_addr) { | 153 | if (mac_addr) { |
151 | sta = sta_info_get_bss(sdata, mac_addr); | 154 | sta = sta_info_get_bss(sdata, mac_addr); |
@@ -160,7 +163,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, | |||
160 | 163 | ||
161 | err = 0; | 164 | err = 0; |
162 | out_unlock: | 165 | out_unlock: |
163 | rcu_read_unlock(); | 166 | mutex_unlock(&sdata->local->sta_mtx); |
164 | 167 | ||
165 | return err; | 168 | return err; |
166 | } | 169 | } |
@@ -174,7 +177,7 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev, | |||
174 | 177 | ||
175 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 178 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
176 | 179 | ||
177 | rcu_read_lock(); | 180 | mutex_lock(&sdata->local->sta_mtx); |
178 | 181 | ||
179 | if (mac_addr) { | 182 | if (mac_addr) { |
180 | ret = -ENOENT; | 183 | ret = -ENOENT; |
@@ -202,7 +205,7 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev, | |||
202 | 205 | ||
203 | ret = 0; | 206 | ret = 0; |
204 | out_unlock: | 207 | out_unlock: |
205 | rcu_read_unlock(); | 208 | mutex_unlock(&sdata->local->sta_mtx); |
206 | 209 | ||
207 | return ret; | 210 | return ret; |
208 | } | 211 | } |
@@ -305,15 +308,10 @@ static int ieee80211_config_default_key(struct wiphy *wiphy, | |||
305 | struct net_device *dev, | 308 | struct net_device *dev, |
306 | u8 key_idx) | 309 | u8 key_idx) |
307 | { | 310 | { |
308 | struct ieee80211_sub_if_data *sdata; | 311 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
309 | |||
310 | rcu_read_lock(); | ||
311 | 312 | ||
312 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
313 | ieee80211_set_default_key(sdata, key_idx); | 313 | ieee80211_set_default_key(sdata, key_idx); |
314 | 314 | ||
315 | rcu_read_unlock(); | ||
316 | |||
317 | return 0; | 315 | return 0; |
318 | } | 316 | } |
319 | 317 | ||
@@ -600,7 +598,7 @@ struct iapp_layer2_update { | |||
600 | u8 ssap; /* 0 */ | 598 | u8 ssap; /* 0 */ |
601 | u8 control; | 599 | u8 control; |
602 | u8 xid_info[3]; | 600 | u8 xid_info[3]; |
603 | } __attribute__ ((packed)); | 601 | } __packed; |
604 | 602 | ||
605 | static void ieee80211_send_layer2_update(struct sta_info *sta) | 603 | static void ieee80211_send_layer2_update(struct sta_info *sta) |
606 | { | 604 | { |
@@ -1554,10 +1552,12 @@ static int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy, | |||
1554 | static int ieee80211_action(struct wiphy *wiphy, struct net_device *dev, | 1552 | static int ieee80211_action(struct wiphy *wiphy, struct net_device *dev, |
1555 | struct ieee80211_channel *chan, | 1553 | struct ieee80211_channel *chan, |
1556 | enum nl80211_channel_type channel_type, | 1554 | enum nl80211_channel_type channel_type, |
1555 | bool channel_type_valid, | ||
1557 | const u8 *buf, size_t len, u64 *cookie) | 1556 | const u8 *buf, size_t len, u64 *cookie) |
1558 | { | 1557 | { |
1559 | return ieee80211_mgd_action(IEEE80211_DEV_TO_SUB_IF(dev), chan, | 1558 | return ieee80211_mgd_action(IEEE80211_DEV_TO_SUB_IF(dev), chan, |
1560 | channel_type, buf, len, cookie); | 1559 | channel_type, channel_type_valid, |
1560 | buf, len, cookie); | ||
1561 | } | 1561 | } |
1562 | 1562 | ||
1563 | struct cfg80211_ops mac80211_config_ops = { | 1563 | struct cfg80211_ops mac80211_config_ops = { |
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index 637929b65ccc..a694c593ff6a 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c | |||
@@ -307,9 +307,6 @@ static const struct file_operations queues_ops = { | |||
307 | 307 | ||
308 | /* statistics stuff */ | 308 | /* statistics stuff */ |
309 | 309 | ||
310 | #define DEBUGFS_STATS_FILE(name, buflen, fmt, value...) \ | ||
311 | DEBUGFS_READONLY_FILE(stats_ ##name, buflen, fmt, ##value) | ||
312 | |||
313 | static ssize_t format_devstat_counter(struct ieee80211_local *local, | 310 | static ssize_t format_devstat_counter(struct ieee80211_local *local, |
314 | char __user *userbuf, | 311 | char __user *userbuf, |
315 | size_t count, loff_t *ppos, | 312 | size_t count, loff_t *ppos, |
@@ -351,75 +348,16 @@ static const struct file_operations stats_ ##name## _ops = { \ | |||
351 | .open = mac80211_open_file_generic, \ | 348 | .open = mac80211_open_file_generic, \ |
352 | }; | 349 | }; |
353 | 350 | ||
354 | #define DEBUGFS_STATS_ADD(name) \ | 351 | #define DEBUGFS_STATS_ADD(name, field) \ |
352 | debugfs_create_u32(#name, 0400, statsd, (u32 *) &field); | ||
353 | #define DEBUGFS_DEVSTATS_ADD(name) \ | ||
355 | debugfs_create_file(#name, 0400, statsd, local, &stats_ ##name## _ops); | 354 | debugfs_create_file(#name, 0400, statsd, local, &stats_ ##name## _ops); |
356 | 355 | ||
357 | DEBUGFS_STATS_FILE(transmitted_fragment_count, 20, "%u", | ||
358 | local->dot11TransmittedFragmentCount); | ||
359 | DEBUGFS_STATS_FILE(multicast_transmitted_frame_count, 20, "%u", | ||
360 | local->dot11MulticastTransmittedFrameCount); | ||
361 | DEBUGFS_STATS_FILE(failed_count, 20, "%u", | ||
362 | local->dot11FailedCount); | ||
363 | DEBUGFS_STATS_FILE(retry_count, 20, "%u", | ||
364 | local->dot11RetryCount); | ||
365 | DEBUGFS_STATS_FILE(multiple_retry_count, 20, "%u", | ||
366 | local->dot11MultipleRetryCount); | ||
367 | DEBUGFS_STATS_FILE(frame_duplicate_count, 20, "%u", | ||
368 | local->dot11FrameDuplicateCount); | ||
369 | DEBUGFS_STATS_FILE(received_fragment_count, 20, "%u", | ||
370 | local->dot11ReceivedFragmentCount); | ||
371 | DEBUGFS_STATS_FILE(multicast_received_frame_count, 20, "%u", | ||
372 | local->dot11MulticastReceivedFrameCount); | ||
373 | DEBUGFS_STATS_FILE(transmitted_frame_count, 20, "%u", | ||
374 | local->dot11TransmittedFrameCount); | ||
375 | #ifdef CONFIG_MAC80211_DEBUG_COUNTERS | ||
376 | DEBUGFS_STATS_FILE(tx_handlers_drop, 20, "%u", | ||
377 | local->tx_handlers_drop); | ||
378 | DEBUGFS_STATS_FILE(tx_handlers_queued, 20, "%u", | ||
379 | local->tx_handlers_queued); | ||
380 | DEBUGFS_STATS_FILE(tx_handlers_drop_unencrypted, 20, "%u", | ||
381 | local->tx_handlers_drop_unencrypted); | ||
382 | DEBUGFS_STATS_FILE(tx_handlers_drop_fragment, 20, "%u", | ||
383 | local->tx_handlers_drop_fragment); | ||
384 | DEBUGFS_STATS_FILE(tx_handlers_drop_wep, 20, "%u", | ||
385 | local->tx_handlers_drop_wep); | ||
386 | DEBUGFS_STATS_FILE(tx_handlers_drop_not_assoc, 20, "%u", | ||
387 | local->tx_handlers_drop_not_assoc); | ||
388 | DEBUGFS_STATS_FILE(tx_handlers_drop_unauth_port, 20, "%u", | ||
389 | local->tx_handlers_drop_unauth_port); | ||
390 | DEBUGFS_STATS_FILE(rx_handlers_drop, 20, "%u", | ||
391 | local->rx_handlers_drop); | ||
392 | DEBUGFS_STATS_FILE(rx_handlers_queued, 20, "%u", | ||
393 | local->rx_handlers_queued); | ||
394 | DEBUGFS_STATS_FILE(rx_handlers_drop_nullfunc, 20, "%u", | ||
395 | local->rx_handlers_drop_nullfunc); | ||
396 | DEBUGFS_STATS_FILE(rx_handlers_drop_defrag, 20, "%u", | ||
397 | local->rx_handlers_drop_defrag); | ||
398 | DEBUGFS_STATS_FILE(rx_handlers_drop_short, 20, "%u", | ||
399 | local->rx_handlers_drop_short); | ||
400 | DEBUGFS_STATS_FILE(rx_handlers_drop_passive_scan, 20, "%u", | ||
401 | local->rx_handlers_drop_passive_scan); | ||
402 | DEBUGFS_STATS_FILE(tx_expand_skb_head, 20, "%u", | ||
403 | local->tx_expand_skb_head); | ||
404 | DEBUGFS_STATS_FILE(tx_expand_skb_head_cloned, 20, "%u", | ||
405 | local->tx_expand_skb_head_cloned); | ||
406 | DEBUGFS_STATS_FILE(rx_expand_skb_head, 20, "%u", | ||
407 | local->rx_expand_skb_head); | ||
408 | DEBUGFS_STATS_FILE(rx_expand_skb_head2, 20, "%u", | ||
409 | local->rx_expand_skb_head2); | ||
410 | DEBUGFS_STATS_FILE(rx_handlers_fragments, 20, "%u", | ||
411 | local->rx_handlers_fragments); | ||
412 | DEBUGFS_STATS_FILE(tx_status_drop, 20, "%u", | ||
413 | local->tx_status_drop); | ||
414 | |||
415 | #endif | ||
416 | |||
417 | DEBUGFS_DEVSTATS_FILE(dot11ACKFailureCount); | 356 | DEBUGFS_DEVSTATS_FILE(dot11ACKFailureCount); |
418 | DEBUGFS_DEVSTATS_FILE(dot11RTSFailureCount); | 357 | DEBUGFS_DEVSTATS_FILE(dot11RTSFailureCount); |
419 | DEBUGFS_DEVSTATS_FILE(dot11FCSErrorCount); | 358 | DEBUGFS_DEVSTATS_FILE(dot11FCSErrorCount); |
420 | DEBUGFS_DEVSTATS_FILE(dot11RTSSuccessCount); | 359 | DEBUGFS_DEVSTATS_FILE(dot11RTSSuccessCount); |
421 | 360 | ||
422 | |||
423 | void debugfs_hw_add(struct ieee80211_local *local) | 361 | void debugfs_hw_add(struct ieee80211_local *local) |
424 | { | 362 | { |
425 | struct dentry *phyd = local->hw.wiphy->debugfsdir; | 363 | struct dentry *phyd = local->hw.wiphy->debugfsdir; |
@@ -448,38 +386,60 @@ void debugfs_hw_add(struct ieee80211_local *local) | |||
448 | if (!statsd) | 386 | if (!statsd) |
449 | return; | 387 | return; |
450 | 388 | ||
451 | DEBUGFS_STATS_ADD(transmitted_fragment_count); | 389 | DEBUGFS_STATS_ADD(transmitted_fragment_count, |
452 | DEBUGFS_STATS_ADD(multicast_transmitted_frame_count); | 390 | local->dot11TransmittedFragmentCount); |
453 | DEBUGFS_STATS_ADD(failed_count); | 391 | DEBUGFS_STATS_ADD(multicast_transmitted_frame_count, |
454 | DEBUGFS_STATS_ADD(retry_count); | 392 | local->dot11MulticastTransmittedFrameCount); |
455 | DEBUGFS_STATS_ADD(multiple_retry_count); | 393 | DEBUGFS_STATS_ADD(failed_count, local->dot11FailedCount); |
456 | DEBUGFS_STATS_ADD(frame_duplicate_count); | 394 | DEBUGFS_STATS_ADD(retry_count, local->dot11RetryCount); |
457 | DEBUGFS_STATS_ADD(received_fragment_count); | 395 | DEBUGFS_STATS_ADD(multiple_retry_count, |
458 | DEBUGFS_STATS_ADD(multicast_received_frame_count); | 396 | local->dot11MultipleRetryCount); |
459 | DEBUGFS_STATS_ADD(transmitted_frame_count); | 397 | DEBUGFS_STATS_ADD(frame_duplicate_count, |
398 | local->dot11FrameDuplicateCount); | ||
399 | DEBUGFS_STATS_ADD(received_fragment_count, | ||
400 | local->dot11ReceivedFragmentCount); | ||
401 | DEBUGFS_STATS_ADD(multicast_received_frame_count, | ||
402 | local->dot11MulticastReceivedFrameCount); | ||
403 | DEBUGFS_STATS_ADD(transmitted_frame_count, | ||
404 | local->dot11TransmittedFrameCount); | ||
460 | #ifdef CONFIG_MAC80211_DEBUG_COUNTERS | 405 | #ifdef CONFIG_MAC80211_DEBUG_COUNTERS |
461 | DEBUGFS_STATS_ADD(tx_handlers_drop); | 406 | DEBUGFS_STATS_ADD(tx_handlers_drop, local->tx_handlers_drop); |
462 | DEBUGFS_STATS_ADD(tx_handlers_queued); | 407 | DEBUGFS_STATS_ADD(tx_handlers_queued, local->tx_handlers_queued); |
463 | DEBUGFS_STATS_ADD(tx_handlers_drop_unencrypted); | 408 | DEBUGFS_STATS_ADD(tx_handlers_drop_unencrypted, |
464 | DEBUGFS_STATS_ADD(tx_handlers_drop_fragment); | 409 | local->tx_handlers_drop_unencrypted); |
465 | DEBUGFS_STATS_ADD(tx_handlers_drop_wep); | 410 | DEBUGFS_STATS_ADD(tx_handlers_drop_fragment, |
466 | DEBUGFS_STATS_ADD(tx_handlers_drop_not_assoc); | 411 | local->tx_handlers_drop_fragment); |
467 | DEBUGFS_STATS_ADD(tx_handlers_drop_unauth_port); | 412 | DEBUGFS_STATS_ADD(tx_handlers_drop_wep, |
468 | DEBUGFS_STATS_ADD(rx_handlers_drop); | 413 | local->tx_handlers_drop_wep); |
469 | DEBUGFS_STATS_ADD(rx_handlers_queued); | 414 | DEBUGFS_STATS_ADD(tx_handlers_drop_not_assoc, |
470 | DEBUGFS_STATS_ADD(rx_handlers_drop_nullfunc); | 415 | local->tx_handlers_drop_not_assoc); |
471 | DEBUGFS_STATS_ADD(rx_handlers_drop_defrag); | 416 | DEBUGFS_STATS_ADD(tx_handlers_drop_unauth_port, |
472 | DEBUGFS_STATS_ADD(rx_handlers_drop_short); | 417 | local->tx_handlers_drop_unauth_port); |
473 | DEBUGFS_STATS_ADD(rx_handlers_drop_passive_scan); | 418 | DEBUGFS_STATS_ADD(rx_handlers_drop, local->rx_handlers_drop); |
474 | DEBUGFS_STATS_ADD(tx_expand_skb_head); | 419 | DEBUGFS_STATS_ADD(rx_handlers_queued, local->rx_handlers_queued); |
475 | DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned); | 420 | DEBUGFS_STATS_ADD(rx_handlers_drop_nullfunc, |
476 | DEBUGFS_STATS_ADD(rx_expand_skb_head); | 421 | local->rx_handlers_drop_nullfunc); |
477 | DEBUGFS_STATS_ADD(rx_expand_skb_head2); | 422 | DEBUGFS_STATS_ADD(rx_handlers_drop_defrag, |
478 | DEBUGFS_STATS_ADD(rx_handlers_fragments); | 423 | local->rx_handlers_drop_defrag); |
479 | DEBUGFS_STATS_ADD(tx_status_drop); | 424 | DEBUGFS_STATS_ADD(rx_handlers_drop_short, |
425 | local->rx_handlers_drop_short); | ||
426 | DEBUGFS_STATS_ADD(rx_handlers_drop_passive_scan, | ||
427 | local->rx_handlers_drop_passive_scan); | ||
428 | DEBUGFS_STATS_ADD(tx_expand_skb_head, | ||
429 | local->tx_expand_skb_head); | ||
430 | DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned, | ||
431 | local->tx_expand_skb_head_cloned); | ||
432 | DEBUGFS_STATS_ADD(rx_expand_skb_head, | ||
433 | local->rx_expand_skb_head); | ||
434 | DEBUGFS_STATS_ADD(rx_expand_skb_head2, | ||
435 | local->rx_expand_skb_head2); | ||
436 | DEBUGFS_STATS_ADD(rx_handlers_fragments, | ||
437 | local->rx_handlers_fragments); | ||
438 | DEBUGFS_STATS_ADD(tx_status_drop, | ||
439 | local->tx_status_drop); | ||
480 | #endif | 440 | #endif |
481 | DEBUGFS_STATS_ADD(dot11ACKFailureCount); | 441 | DEBUGFS_DEVSTATS_ADD(dot11ACKFailureCount); |
482 | DEBUGFS_STATS_ADD(dot11RTSFailureCount); | 442 | DEBUGFS_DEVSTATS_ADD(dot11RTSFailureCount); |
483 | DEBUGFS_STATS_ADD(dot11FCSErrorCount); | 443 | DEBUGFS_DEVSTATS_ADD(dot11FCSErrorCount); |
484 | DEBUGFS_STATS_ADD(dot11RTSSuccessCount); | 444 | DEBUGFS_DEVSTATS_ADD(dot11RTSSuccessCount); |
485 | } | 445 | } |
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index e763f1529ddb..576e024715e3 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c | |||
@@ -30,7 +30,6 @@ static ssize_t sta_ ##name## _read(struct file *file, \ | |||
30 | } | 30 | } |
31 | #define STA_READ_D(name, field) STA_READ(name, 20, field, "%d\n") | 31 | #define STA_READ_D(name, field) STA_READ(name, 20, field, "%d\n") |
32 | #define STA_READ_U(name, field) STA_READ(name, 20, field, "%u\n") | 32 | #define STA_READ_U(name, field) STA_READ(name, 20, field, "%u\n") |
33 | #define STA_READ_LU(name, field) STA_READ(name, 20, field, "%lu\n") | ||
34 | #define STA_READ_S(name, field) STA_READ(name, 20, field, "%s\n") | 33 | #define STA_READ_S(name, field) STA_READ(name, 20, field, "%s\n") |
35 | 34 | ||
36 | #define STA_OPS(name) \ | 35 | #define STA_OPS(name) \ |
@@ -52,19 +51,7 @@ static const struct file_operations sta_ ##name## _ops = { \ | |||
52 | 51 | ||
53 | STA_FILE(aid, sta.aid, D); | 52 | STA_FILE(aid, sta.aid, D); |
54 | STA_FILE(dev, sdata->name, S); | 53 | STA_FILE(dev, sdata->name, S); |
55 | STA_FILE(rx_packets, rx_packets, LU); | ||
56 | STA_FILE(tx_packets, tx_packets, LU); | ||
57 | STA_FILE(rx_bytes, rx_bytes, LU); | ||
58 | STA_FILE(tx_bytes, tx_bytes, LU); | ||
59 | STA_FILE(rx_duplicates, num_duplicates, LU); | ||
60 | STA_FILE(rx_fragments, rx_fragments, LU); | ||
61 | STA_FILE(rx_dropped, rx_dropped, LU); | ||
62 | STA_FILE(tx_fragments, tx_fragments, LU); | ||
63 | STA_FILE(tx_filtered, tx_filtered_count, LU); | ||
64 | STA_FILE(tx_retry_failed, tx_retry_failed, LU); | ||
65 | STA_FILE(tx_retry_count, tx_retry_count, LU); | ||
66 | STA_FILE(last_signal, last_signal, D); | 54 | STA_FILE(last_signal, last_signal, D); |
67 | STA_FILE(wep_weak_iv_count, wep_weak_iv_count, LU); | ||
68 | 55 | ||
69 | static ssize_t sta_flags_read(struct file *file, char __user *userbuf, | 56 | static ssize_t sta_flags_read(struct file *file, char __user *userbuf, |
70 | size_t count, loff_t *ppos) | 57 | size_t count, loff_t *ppos) |
@@ -210,8 +197,7 @@ static ssize_t sta_agg_status_write(struct file *file, const char __user *userbu | |||
210 | if (start) | 197 | if (start) |
211 | ret = ieee80211_start_tx_ba_session(&sta->sta, tid); | 198 | ret = ieee80211_start_tx_ba_session(&sta->sta, tid); |
212 | else | 199 | else |
213 | ret = ieee80211_stop_tx_ba_session(&sta->sta, tid, | 200 | ret = ieee80211_stop_tx_ba_session(&sta->sta, tid); |
214 | WLAN_BACK_RECIPIENT); | ||
215 | } else { | 201 | } else { |
216 | __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT, 3); | 202 | __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT, 3); |
217 | ret = 0; | 203 | ret = 0; |
@@ -307,6 +293,13 @@ STA_OPS(ht_capa); | |||
307 | debugfs_create_file(#name, 0400, \ | 293 | debugfs_create_file(#name, 0400, \ |
308 | sta->debugfs.dir, sta, &sta_ ##name## _ops); | 294 | sta->debugfs.dir, sta, &sta_ ##name## _ops); |
309 | 295 | ||
296 | #define DEBUGFS_ADD_COUNTER(name, field) \ | ||
297 | if (sizeof(sta->field) == sizeof(u32)) \ | ||
298 | debugfs_create_u32(#name, 0400, sta->debugfs.dir, \ | ||
299 | (u32 *) &sta->field); \ | ||
300 | else \ | ||
301 | debugfs_create_u64(#name, 0400, sta->debugfs.dir, \ | ||
302 | (u64 *) &sta->field); | ||
310 | 303 | ||
311 | void ieee80211_sta_debugfs_add(struct sta_info *sta) | 304 | void ieee80211_sta_debugfs_add(struct sta_info *sta) |
312 | { | 305 | { |
@@ -339,20 +332,21 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta) | |||
339 | DEBUGFS_ADD(last_seq_ctrl); | 332 | DEBUGFS_ADD(last_seq_ctrl); |
340 | DEBUGFS_ADD(agg_status); | 333 | DEBUGFS_ADD(agg_status); |
341 | DEBUGFS_ADD(dev); | 334 | DEBUGFS_ADD(dev); |
342 | DEBUGFS_ADD(rx_packets); | ||
343 | DEBUGFS_ADD(tx_packets); | ||
344 | DEBUGFS_ADD(rx_bytes); | ||
345 | DEBUGFS_ADD(tx_bytes); | ||
346 | DEBUGFS_ADD(rx_duplicates); | ||
347 | DEBUGFS_ADD(rx_fragments); | ||
348 | DEBUGFS_ADD(rx_dropped); | ||
349 | DEBUGFS_ADD(tx_fragments); | ||
350 | DEBUGFS_ADD(tx_filtered); | ||
351 | DEBUGFS_ADD(tx_retry_failed); | ||
352 | DEBUGFS_ADD(tx_retry_count); | ||
353 | DEBUGFS_ADD(last_signal); | 335 | DEBUGFS_ADD(last_signal); |
354 | DEBUGFS_ADD(wep_weak_iv_count); | ||
355 | DEBUGFS_ADD(ht_capa); | 336 | DEBUGFS_ADD(ht_capa); |
337 | |||
338 | DEBUGFS_ADD_COUNTER(rx_packets, rx_packets); | ||
339 | DEBUGFS_ADD_COUNTER(tx_packets, tx_packets); | ||
340 | DEBUGFS_ADD_COUNTER(rx_bytes, rx_bytes); | ||
341 | DEBUGFS_ADD_COUNTER(tx_bytes, tx_bytes); | ||
342 | DEBUGFS_ADD_COUNTER(rx_duplicates, num_duplicates); | ||
343 | DEBUGFS_ADD_COUNTER(rx_fragments, rx_fragments); | ||
344 | DEBUGFS_ADD_COUNTER(rx_dropped, rx_dropped); | ||
345 | DEBUGFS_ADD_COUNTER(tx_fragments, tx_fragments); | ||
346 | DEBUGFS_ADD_COUNTER(tx_filtered, tx_filtered_count); | ||
347 | DEBUGFS_ADD_COUNTER(tx_retry_failed, tx_retry_failed); | ||
348 | DEBUGFS_ADD_COUNTER(tx_retry_count, tx_retry_count); | ||
349 | DEBUGFS_ADD_COUNTER(wep_weak_iv_count, wep_weak_iv_count); | ||
356 | } | 350 | } |
357 | 351 | ||
358 | void ieee80211_sta_debugfs_remove(struct sta_info *sta) | 352 | void ieee80211_sta_debugfs_remove(struct sta_info *sta) |
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index 4f2271316650..7d18a3245e3d 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h | |||
@@ -83,6 +83,23 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local, | |||
83 | trace_drv_bss_info_changed(local, sdata, info, changed); | 83 | trace_drv_bss_info_changed(local, sdata, info, changed); |
84 | } | 84 | } |
85 | 85 | ||
86 | struct in_ifaddr; | ||
87 | static inline int drv_configure_arp_filter(struct ieee80211_local *local, | ||
88 | struct ieee80211_vif *vif, | ||
89 | struct in_ifaddr *ifa_list) | ||
90 | { | ||
91 | int ret = 0; | ||
92 | |||
93 | might_sleep(); | ||
94 | |||
95 | if (local->ops->configure_arp_filter) | ||
96 | ret = local->ops->configure_arp_filter(&local->hw, vif, | ||
97 | ifa_list); | ||
98 | |||
99 | trace_drv_configure_arp_filter(local, vif_to_sdata(vif), ifa_list, ret); | ||
100 | return ret; | ||
101 | } | ||
102 | |||
86 | static inline u64 drv_prepare_multicast(struct ieee80211_local *local, | 103 | static inline u64 drv_prepare_multicast(struct ieee80211_local *local, |
87 | struct netdev_hw_addr_list *mc_list) | 104 | struct netdev_hw_addr_list *mc_list) |
88 | { | 105 | { |
@@ -252,9 +269,6 @@ static inline int drv_sta_add(struct ieee80211_local *local, | |||
252 | 269 | ||
253 | if (local->ops->sta_add) | 270 | if (local->ops->sta_add) |
254 | ret = local->ops->sta_add(&local->hw, &sdata->vif, sta); | 271 | ret = local->ops->sta_add(&local->hw, &sdata->vif, sta); |
255 | else if (local->ops->sta_notify) | ||
256 | local->ops->sta_notify(&local->hw, &sdata->vif, | ||
257 | STA_NOTIFY_ADD, sta); | ||
258 | 272 | ||
259 | trace_drv_sta_add(local, sdata, sta, ret); | 273 | trace_drv_sta_add(local, sdata, sta, ret); |
260 | 274 | ||
@@ -269,9 +283,6 @@ static inline void drv_sta_remove(struct ieee80211_local *local, | |||
269 | 283 | ||
270 | if (local->ops->sta_remove) | 284 | if (local->ops->sta_remove) |
271 | local->ops->sta_remove(&local->hw, &sdata->vif, sta); | 285 | local->ops->sta_remove(&local->hw, &sdata->vif, sta); |
272 | else if (local->ops->sta_notify) | ||
273 | local->ops->sta_notify(&local->hw, &sdata->vif, | ||
274 | STA_NOTIFY_REMOVE, sta); | ||
275 | 286 | ||
276 | trace_drv_sta_remove(local, sdata, sta); | 287 | trace_drv_sta_remove(local, sdata, sta); |
277 | } | 288 | } |
@@ -349,7 +360,7 @@ static inline int drv_get_survey(struct ieee80211_local *local, int idx, | |||
349 | struct survey_info *survey) | 360 | struct survey_info *survey) |
350 | { | 361 | { |
351 | int ret = -EOPNOTSUPP; | 362 | int ret = -EOPNOTSUPP; |
352 | if (local->ops->conf_tx) | 363 | if (local->ops->get_survey) |
353 | ret = local->ops->get_survey(&local->hw, idx, survey); | 364 | ret = local->ops->get_survey(&local->hw, idx, survey); |
354 | /* trace_drv_get_survey(local, idx, survey, ret); */ | 365 | /* trace_drv_get_survey(local, idx, survey, ret); */ |
355 | return ret; | 366 | return ret; |
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h index 6a9b2342a9c2..6b90630151ab 100644 --- a/net/mac80211/driver-trace.h +++ b/net/mac80211/driver-trace.h | |||
@@ -219,6 +219,31 @@ TRACE_EVENT(drv_bss_info_changed, | |||
219 | ) | 219 | ) |
220 | ); | 220 | ); |
221 | 221 | ||
222 | TRACE_EVENT(drv_configure_arp_filter, | ||
223 | TP_PROTO(struct ieee80211_local *local, | ||
224 | struct ieee80211_sub_if_data *sdata, | ||
225 | struct in_ifaddr *ifa_list, int ret), | ||
226 | |||
227 | TP_ARGS(local, sdata, ifa_list, ret), | ||
228 | |||
229 | TP_STRUCT__entry( | ||
230 | LOCAL_ENTRY | ||
231 | VIF_ENTRY | ||
232 | __field(int, ret) | ||
233 | ), | ||
234 | |||
235 | TP_fast_assign( | ||
236 | LOCAL_ASSIGN; | ||
237 | VIF_ASSIGN; | ||
238 | __entry->ret = ret; | ||
239 | ), | ||
240 | |||
241 | TP_printk( | ||
242 | VIF_PR_FMT LOCAL_PR_FMT " ret:%d", | ||
243 | VIF_PR_ARG, LOCAL_PR_ARG, __entry->ret | ||
244 | ) | ||
245 | ); | ||
246 | |||
222 | TRACE_EVENT(drv_prepare_multicast, | 247 | TRACE_EVENT(drv_prepare_multicast, |
223 | TP_PROTO(struct ieee80211_local *local, int mc_count, u64 ret), | 248 | TP_PROTO(struct ieee80211_local *local, int mc_count, u64 ret), |
224 | 249 | ||
@@ -851,25 +876,23 @@ TRACE_EVENT(api_start_tx_ba_cb, | |||
851 | ); | 876 | ); |
852 | 877 | ||
853 | TRACE_EVENT(api_stop_tx_ba_session, | 878 | TRACE_EVENT(api_stop_tx_ba_session, |
854 | TP_PROTO(struct ieee80211_sta *sta, u16 tid, u16 initiator), | 879 | TP_PROTO(struct ieee80211_sta *sta, u16 tid), |
855 | 880 | ||
856 | TP_ARGS(sta, tid, initiator), | 881 | TP_ARGS(sta, tid), |
857 | 882 | ||
858 | TP_STRUCT__entry( | 883 | TP_STRUCT__entry( |
859 | STA_ENTRY | 884 | STA_ENTRY |
860 | __field(u16, tid) | 885 | __field(u16, tid) |
861 | __field(u16, initiator) | ||
862 | ), | 886 | ), |
863 | 887 | ||
864 | TP_fast_assign( | 888 | TP_fast_assign( |
865 | STA_ASSIGN; | 889 | STA_ASSIGN; |
866 | __entry->tid = tid; | 890 | __entry->tid = tid; |
867 | __entry->initiator = initiator; | ||
868 | ), | 891 | ), |
869 | 892 | ||
870 | TP_printk( | 893 | TP_printk( |
871 | STA_PR_FMT " tid:%d initiator:%d", | 894 | STA_PR_FMT " tid:%d", |
872 | STA_PR_ARG, __entry->tid, __entry->initiator | 895 | STA_PR_ARG, __entry->tid |
873 | ) | 896 | ) |
874 | ); | 897 | ); |
875 | 898 | ||
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index b2cc1fda6cfd..d7a96ced2c83 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c | |||
@@ -798,6 +798,15 @@ static void ieee80211_ibss_work(struct work_struct *work) | |||
798 | } | 798 | } |
799 | } | 799 | } |
800 | 800 | ||
801 | static void ieee80211_queue_ibss_work(struct ieee80211_sub_if_data *sdata) | ||
802 | { | ||
803 | struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; | ||
804 | struct ieee80211_local *local = sdata->local; | ||
805 | |||
806 | set_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request); | ||
807 | ieee80211_queue_work(&local->hw, &ifibss->work); | ||
808 | } | ||
809 | |||
801 | static void ieee80211_ibss_timer(unsigned long data) | 810 | static void ieee80211_ibss_timer(unsigned long data) |
802 | { | 811 | { |
803 | struct ieee80211_sub_if_data *sdata = | 812 | struct ieee80211_sub_if_data *sdata = |
@@ -810,8 +819,7 @@ static void ieee80211_ibss_timer(unsigned long data) | |||
810 | return; | 819 | return; |
811 | } | 820 | } |
812 | 821 | ||
813 | set_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request); | 822 | ieee80211_queue_ibss_work(sdata); |
814 | ieee80211_queue_work(&local->hw, &ifibss->work); | ||
815 | } | 823 | } |
816 | 824 | ||
817 | #ifdef CONFIG_PM | 825 | #ifdef CONFIG_PM |
@@ -859,7 +867,7 @@ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local) | |||
859 | if (!sdata->u.ibss.ssid_len) | 867 | if (!sdata->u.ibss.ssid_len) |
860 | continue; | 868 | continue; |
861 | sdata->u.ibss.last_scan_completed = jiffies; | 869 | sdata->u.ibss.last_scan_completed = jiffies; |
862 | mod_timer(&sdata->u.ibss.timer, 0); | 870 | ieee80211_queue_ibss_work(sdata); |
863 | } | 871 | } |
864 | mutex_unlock(&local->iflist_mtx); | 872 | mutex_unlock(&local->iflist_mtx); |
865 | } | 873 | } |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 1a9e2da37a93..1e779e833473 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -746,10 +746,10 @@ struct ieee80211_local { | |||
746 | struct mutex iflist_mtx; | 746 | struct mutex iflist_mtx; |
747 | 747 | ||
748 | /* | 748 | /* |
749 | * Key lock, protects sdata's key_list and sta_info's | 749 | * Key mutex, protects sdata's key_list and sta_info's |
750 | * key pointers (write access, they're RCU.) | 750 | * key pointers (write access, they're RCU.) |
751 | */ | 751 | */ |
752 | spinlock_t key_lock; | 752 | struct mutex key_mtx; |
753 | 753 | ||
754 | 754 | ||
755 | /* Scanning and BSS list */ | 755 | /* Scanning and BSS list */ |
@@ -851,6 +851,7 @@ struct ieee80211_local { | |||
851 | struct work_struct dynamic_ps_disable_work; | 851 | struct work_struct dynamic_ps_disable_work; |
852 | struct timer_list dynamic_ps_timer; | 852 | struct timer_list dynamic_ps_timer; |
853 | struct notifier_block network_latency_notifier; | 853 | struct notifier_block network_latency_notifier; |
854 | struct notifier_block ifa_notifier; | ||
854 | 855 | ||
855 | int user_power_level; /* in dBm */ | 856 | int user_power_level; /* in dBm */ |
856 | int power_constr_level; /* in dBm */ | 857 | int power_constr_level; /* in dBm */ |
@@ -988,6 +989,7 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, | |||
988 | int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata, | 989 | int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata, |
989 | struct ieee80211_channel *chan, | 990 | struct ieee80211_channel *chan, |
990 | enum nl80211_channel_type channel_type, | 991 | enum nl80211_channel_type channel_type, |
992 | bool channel_type_valid, | ||
991 | const u8 *buf, size_t len, u64 *cookie); | 993 | const u8 *buf, size_t len, u64 *cookie); |
992 | ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata, | 994 | ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata, |
993 | struct sk_buff *skb); | 995 | struct sk_buff *skb); |
@@ -996,6 +998,7 @@ void ieee80211_send_pspoll(struct ieee80211_local *local, | |||
996 | void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency); | 998 | void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency); |
997 | int ieee80211_max_network_latency(struct notifier_block *nb, | 999 | int ieee80211_max_network_latency(struct notifier_block *nb, |
998 | unsigned long data, void *dummy); | 1000 | unsigned long data, void *dummy); |
1001 | int ieee80211_set_arp_filter(struct ieee80211_sub_if_data *sdata); | ||
999 | void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, | 1002 | void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, |
1000 | struct ieee80211_channel_sw_ie *sw_elem, | 1003 | struct ieee80211_channel_sw_ie *sw_elem, |
1001 | struct ieee80211_bss *bss, | 1004 | struct ieee80211_bss *bss, |
@@ -1084,7 +1087,7 @@ struct ieee80211_tx_status_rtap_hdr { | |||
1084 | u8 padding_for_rate; | 1087 | u8 padding_for_rate; |
1085 | __le16 tx_flags; | 1088 | __le16 tx_flags; |
1086 | u8 data_retries; | 1089 | u8 data_retries; |
1087 | } __attribute__ ((packed)); | 1090 | } __packed; |
1088 | 1091 | ||
1089 | 1092 | ||
1090 | /* HT */ | 1093 | /* HT */ |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 50deb017fd6e..1afa9ec81fe8 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -268,7 +268,6 @@ static int ieee80211_open(struct net_device *dev) | |||
268 | 268 | ||
269 | changed |= ieee80211_reset_erp_info(sdata); | 269 | changed |= ieee80211_reset_erp_info(sdata); |
270 | ieee80211_bss_info_change_notify(sdata, changed); | 270 | ieee80211_bss_info_change_notify(sdata, changed); |
271 | ieee80211_enable_keys(sdata); | ||
272 | 271 | ||
273 | if (sdata->vif.type == NL80211_IFTYPE_STATION) | 272 | if (sdata->vif.type == NL80211_IFTYPE_STATION) |
274 | netif_carrier_off(dev); | 273 | netif_carrier_off(dev); |
@@ -321,15 +320,6 @@ static int ieee80211_open(struct net_device *dev) | |||
321 | 320 | ||
322 | ieee80211_recalc_ps(local, -1); | 321 | ieee80211_recalc_ps(local, -1); |
323 | 322 | ||
324 | /* | ||
325 | * ieee80211_sta_work is disabled while network interface | ||
326 | * is down. Therefore, some configuration changes may not | ||
327 | * yet be effective. Trigger execution of ieee80211_sta_work | ||
328 | * to fix this. | ||
329 | */ | ||
330 | if (sdata->vif.type == NL80211_IFTYPE_STATION) | ||
331 | ieee80211_queue_work(&local->hw, &sdata->u.mgd.work); | ||
332 | |||
333 | netif_tx_start_all_queues(dev); | 323 | netif_tx_start_all_queues(dev); |
334 | 324 | ||
335 | return 0; | 325 | return 0; |
@@ -531,8 +521,8 @@ static int ieee80211_stop(struct net_device *dev) | |||
531 | BSS_CHANGED_BEACON_ENABLED); | 521 | BSS_CHANGED_BEACON_ENABLED); |
532 | } | 522 | } |
533 | 523 | ||
534 | /* disable all keys for as long as this netdev is down */ | 524 | /* free all remaining keys, there shouldn't be any */ |
535 | ieee80211_disable_keys(sdata); | 525 | ieee80211_free_keys(sdata); |
536 | drv_remove_interface(local, &sdata->vif); | 526 | drv_remove_interface(local, &sdata->vif); |
537 | } | 527 | } |
538 | 528 | ||
diff --git a/net/mac80211/key.c b/net/mac80211/key.c index e8f6e3b252d8..d0d9001a4a6a 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c | |||
@@ -36,80 +36,20 @@ | |||
36 | * There is currently no way of knowing this except by looking into | 36 | * There is currently no way of knowing this except by looking into |
37 | * debugfs. | 37 | * debugfs. |
38 | * | 38 | * |
39 | * All key operations are protected internally so you can call them at | 39 | * All key operations are protected internally. |
40 | * any time. | ||
41 | * | 40 | * |
42 | * Within mac80211, key references are, just as STA structure references, | 41 | * Within mac80211, key references are, just as STA structure references, |
43 | * protected by RCU. Note, however, that some things are unprotected, | 42 | * protected by RCU. Note, however, that some things are unprotected, |
44 | * namely the key->sta dereferences within the hardware acceleration | 43 | * namely the key->sta dereferences within the hardware acceleration |
45 | * functions. This means that sta_info_destroy() must flush the key todo | 44 | * functions. This means that sta_info_destroy() must remove the key |
46 | * list. | 45 | * which waits for an RCU grace period. |
47 | * | ||
48 | * All the direct key list manipulation functions must not sleep because | ||
49 | * they can operate on STA info structs that are protected by RCU. | ||
50 | */ | 46 | */ |
51 | 47 | ||
52 | static const u8 bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; | 48 | static const u8 bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; |
53 | 49 | ||
54 | /* key mutex: used to synchronise todo runners */ | 50 | static void assert_key_lock(struct ieee80211_local *local) |
55 | static DEFINE_MUTEX(key_mutex); | ||
56 | static DEFINE_SPINLOCK(todo_lock); | ||
57 | static LIST_HEAD(todo_list); | ||
58 | |||
59 | static void key_todo(struct work_struct *work) | ||
60 | { | 51 | { |
61 | ieee80211_key_todo(); | 52 | WARN_ON(!mutex_is_locked(&local->key_mtx)); |
62 | } | ||
63 | |||
64 | static DECLARE_WORK(todo_work, key_todo); | ||
65 | |||
66 | /** | ||
67 | * add_todo - add todo item for a key | ||
68 | * | ||
69 | * @key: key to add to do item for | ||
70 | * @flag: todo flag(s) | ||
71 | * | ||
72 | * Must be called with IRQs or softirqs disabled. | ||
73 | */ | ||
74 | static void add_todo(struct ieee80211_key *key, u32 flag) | ||
75 | { | ||
76 | if (!key) | ||
77 | return; | ||
78 | |||
79 | spin_lock(&todo_lock); | ||
80 | key->flags |= flag; | ||
81 | /* | ||
82 | * Remove again if already on the list so that we move it to the end. | ||
83 | */ | ||
84 | if (!list_empty(&key->todo)) | ||
85 | list_del(&key->todo); | ||
86 | list_add_tail(&key->todo, &todo_list); | ||
87 | schedule_work(&todo_work); | ||
88 | spin_unlock(&todo_lock); | ||
89 | } | ||
90 | |||
91 | /** | ||
92 | * ieee80211_key_lock - lock the mac80211 key operation lock | ||
93 | * | ||
94 | * This locks the (global) mac80211 key operation lock, all | ||
95 | * key operations must be done under this lock. | ||
96 | */ | ||
97 | static void ieee80211_key_lock(void) | ||
98 | { | ||
99 | mutex_lock(&key_mutex); | ||
100 | } | ||
101 | |||
102 | /** | ||
103 | * ieee80211_key_unlock - unlock the mac80211 key operation lock | ||
104 | */ | ||
105 | static void ieee80211_key_unlock(void) | ||
106 | { | ||
107 | mutex_unlock(&key_mutex); | ||
108 | } | ||
109 | |||
110 | static void assert_key_lock(void) | ||
111 | { | ||
112 | WARN_ON(!mutex_is_locked(&key_mutex)); | ||
113 | } | 53 | } |
114 | 54 | ||
115 | static struct ieee80211_sta *get_sta_for_key(struct ieee80211_key *key) | 55 | static struct ieee80211_sta *get_sta_for_key(struct ieee80211_key *key) |
@@ -126,12 +66,13 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key) | |||
126 | struct ieee80211_sta *sta; | 66 | struct ieee80211_sta *sta; |
127 | int ret; | 67 | int ret; |
128 | 68 | ||
129 | assert_key_lock(); | ||
130 | might_sleep(); | 69 | might_sleep(); |
131 | 70 | ||
132 | if (!key->local->ops->set_key) | 71 | if (!key->local->ops->set_key) |
133 | return; | 72 | return; |
134 | 73 | ||
74 | assert_key_lock(key->local); | ||
75 | |||
135 | sta = get_sta_for_key(key); | 76 | sta = get_sta_for_key(key); |
136 | 77 | ||
137 | sdata = key->sdata; | 78 | sdata = key->sdata; |
@@ -142,11 +83,8 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key) | |||
142 | 83 | ||
143 | ret = drv_set_key(key->local, SET_KEY, sdata, sta, &key->conf); | 84 | ret = drv_set_key(key->local, SET_KEY, sdata, sta, &key->conf); |
144 | 85 | ||
145 | if (!ret) { | 86 | if (!ret) |
146 | spin_lock_bh(&todo_lock); | ||
147 | key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; | 87 | key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; |
148 | spin_unlock_bh(&todo_lock); | ||
149 | } | ||
150 | 88 | ||
151 | if (ret && ret != -ENOSPC && ret != -EOPNOTSUPP) | 89 | if (ret && ret != -ENOSPC && ret != -EOPNOTSUPP) |
152 | printk(KERN_ERR "mac80211-%s: failed to set key " | 90 | printk(KERN_ERR "mac80211-%s: failed to set key " |
@@ -161,18 +99,15 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key) | |||
161 | struct ieee80211_sta *sta; | 99 | struct ieee80211_sta *sta; |
162 | int ret; | 100 | int ret; |
163 | 101 | ||
164 | assert_key_lock(); | ||
165 | might_sleep(); | 102 | might_sleep(); |
166 | 103 | ||
167 | if (!key || !key->local->ops->set_key) | 104 | if (!key || !key->local->ops->set_key) |
168 | return; | 105 | return; |
169 | 106 | ||
170 | spin_lock_bh(&todo_lock); | 107 | assert_key_lock(key->local); |
171 | if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) { | 108 | |
172 | spin_unlock_bh(&todo_lock); | 109 | if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) |
173 | return; | 110 | return; |
174 | } | ||
175 | spin_unlock_bh(&todo_lock); | ||
176 | 111 | ||
177 | sta = get_sta_for_key(key); | 112 | sta = get_sta_for_key(key); |
178 | sdata = key->sdata; | 113 | sdata = key->sdata; |
@@ -191,9 +126,7 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key) | |||
191 | wiphy_name(key->local->hw.wiphy), | 126 | wiphy_name(key->local->hw.wiphy), |
192 | key->conf.keyidx, sta ? sta->addr : bcast_addr, ret); | 127 | key->conf.keyidx, sta ? sta->addr : bcast_addr, ret); |
193 | 128 | ||
194 | spin_lock_bh(&todo_lock); | ||
195 | key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; | 129 | key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; |
196 | spin_unlock_bh(&todo_lock); | ||
197 | } | 130 | } |
198 | 131 | ||
199 | static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, | 132 | static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, |
@@ -201,22 +134,24 @@ static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, | |||
201 | { | 134 | { |
202 | struct ieee80211_key *key = NULL; | 135 | struct ieee80211_key *key = NULL; |
203 | 136 | ||
137 | assert_key_lock(sdata->local); | ||
138 | |||
204 | if (idx >= 0 && idx < NUM_DEFAULT_KEYS) | 139 | if (idx >= 0 && idx < NUM_DEFAULT_KEYS) |
205 | key = sdata->keys[idx]; | 140 | key = sdata->keys[idx]; |
206 | 141 | ||
207 | rcu_assign_pointer(sdata->default_key, key); | 142 | rcu_assign_pointer(sdata->default_key, key); |
208 | 143 | ||
209 | if (key) | 144 | if (key) { |
210 | add_todo(key, KEY_FLAG_TODO_DEFKEY); | 145 | ieee80211_debugfs_key_remove_default(key->sdata); |
146 | ieee80211_debugfs_key_add_default(key->sdata); | ||
147 | } | ||
211 | } | 148 | } |
212 | 149 | ||
213 | void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx) | 150 | void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx) |
214 | { | 151 | { |
215 | unsigned long flags; | 152 | mutex_lock(&sdata->local->key_mtx); |
216 | |||
217 | spin_lock_irqsave(&sdata->local->key_lock, flags); | ||
218 | __ieee80211_set_default_key(sdata, idx); | 153 | __ieee80211_set_default_key(sdata, idx); |
219 | spin_unlock_irqrestore(&sdata->local->key_lock, flags); | 154 | mutex_unlock(&sdata->local->key_mtx); |
220 | } | 155 | } |
221 | 156 | ||
222 | static void | 157 | static void |
@@ -224,24 +159,26 @@ __ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata, int idx) | |||
224 | { | 159 | { |
225 | struct ieee80211_key *key = NULL; | 160 | struct ieee80211_key *key = NULL; |
226 | 161 | ||
162 | assert_key_lock(sdata->local); | ||
163 | |||
227 | if (idx >= NUM_DEFAULT_KEYS && | 164 | if (idx >= NUM_DEFAULT_KEYS && |
228 | idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) | 165 | idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) |
229 | key = sdata->keys[idx]; | 166 | key = sdata->keys[idx]; |
230 | 167 | ||
231 | rcu_assign_pointer(sdata->default_mgmt_key, key); | 168 | rcu_assign_pointer(sdata->default_mgmt_key, key); |
232 | 169 | ||
233 | if (key) | 170 | if (key) { |
234 | add_todo(key, KEY_FLAG_TODO_DEFMGMTKEY); | 171 | ieee80211_debugfs_key_remove_mgmt_default(key->sdata); |
172 | ieee80211_debugfs_key_add_mgmt_default(key->sdata); | ||
173 | } | ||
235 | } | 174 | } |
236 | 175 | ||
237 | void ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata, | 176 | void ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata, |
238 | int idx) | 177 | int idx) |
239 | { | 178 | { |
240 | unsigned long flags; | 179 | mutex_lock(&sdata->local->key_mtx); |
241 | |||
242 | spin_lock_irqsave(&sdata->local->key_lock, flags); | ||
243 | __ieee80211_set_default_mgmt_key(sdata, idx); | 180 | __ieee80211_set_default_mgmt_key(sdata, idx); |
244 | spin_unlock_irqrestore(&sdata->local->key_lock, flags); | 181 | mutex_unlock(&sdata->local->key_mtx); |
245 | } | 182 | } |
246 | 183 | ||
247 | 184 | ||
@@ -352,7 +289,6 @@ struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg, | |||
352 | } | 289 | } |
353 | memcpy(key->conf.key, key_data, key_len); | 290 | memcpy(key->conf.key, key_data, key_len); |
354 | INIT_LIST_HEAD(&key->list); | 291 | INIT_LIST_HEAD(&key->list); |
355 | INIT_LIST_HEAD(&key->todo); | ||
356 | 292 | ||
357 | if (alg == ALG_CCMP) { | 293 | if (alg == ALG_CCMP) { |
358 | /* | 294 | /* |
@@ -382,12 +318,27 @@ struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg, | |||
382 | return key; | 318 | return key; |
383 | } | 319 | } |
384 | 320 | ||
321 | static void __ieee80211_key_destroy(struct ieee80211_key *key) | ||
322 | { | ||
323 | if (!key) | ||
324 | return; | ||
325 | |||
326 | ieee80211_key_disable_hw_accel(key); | ||
327 | |||
328 | if (key->conf.alg == ALG_CCMP) | ||
329 | ieee80211_aes_key_free(key->u.ccmp.tfm); | ||
330 | if (key->conf.alg == ALG_AES_CMAC) | ||
331 | ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm); | ||
332 | ieee80211_debugfs_key_remove(key); | ||
333 | |||
334 | kfree(key); | ||
335 | } | ||
336 | |||
385 | void ieee80211_key_link(struct ieee80211_key *key, | 337 | void ieee80211_key_link(struct ieee80211_key *key, |
386 | struct ieee80211_sub_if_data *sdata, | 338 | struct ieee80211_sub_if_data *sdata, |
387 | struct sta_info *sta) | 339 | struct sta_info *sta) |
388 | { | 340 | { |
389 | struct ieee80211_key *old_key; | 341 | struct ieee80211_key *old_key; |
390 | unsigned long flags; | ||
391 | int idx; | 342 | int idx; |
392 | 343 | ||
393 | BUG_ON(!sdata); | 344 | BUG_ON(!sdata); |
@@ -431,7 +382,7 @@ void ieee80211_key_link(struct ieee80211_key *key, | |||
431 | } | 382 | } |
432 | } | 383 | } |
433 | 384 | ||
434 | spin_lock_irqsave(&sdata->local->key_lock, flags); | 385 | mutex_lock(&sdata->local->key_mtx); |
435 | 386 | ||
436 | if (sta) | 387 | if (sta) |
437 | old_key = sta->key; | 388 | old_key = sta->key; |
@@ -439,15 +390,13 @@ void ieee80211_key_link(struct ieee80211_key *key, | |||
439 | old_key = sdata->keys[idx]; | 390 | old_key = sdata->keys[idx]; |
440 | 391 | ||
441 | __ieee80211_key_replace(sdata, sta, old_key, key); | 392 | __ieee80211_key_replace(sdata, sta, old_key, key); |
393 | __ieee80211_key_destroy(old_key); | ||
442 | 394 | ||
443 | /* free old key later */ | 395 | ieee80211_debugfs_key_add(key); |
444 | add_todo(old_key, KEY_FLAG_TODO_DELETE); | ||
445 | 396 | ||
446 | add_todo(key, KEY_FLAG_TODO_ADD_DEBUGFS); | 397 | ieee80211_key_enable_hw_accel(key); |
447 | if (ieee80211_sdata_running(sdata)) | ||
448 | add_todo(key, KEY_FLAG_TODO_HWACCEL_ADD); | ||
449 | 398 | ||
450 | spin_unlock_irqrestore(&sdata->local->key_lock, flags); | 399 | mutex_unlock(&sdata->local->key_mtx); |
451 | } | 400 | } |
452 | 401 | ||
453 | static void __ieee80211_key_free(struct ieee80211_key *key) | 402 | static void __ieee80211_key_free(struct ieee80211_key *key) |
@@ -458,170 +407,65 @@ static void __ieee80211_key_free(struct ieee80211_key *key) | |||
458 | if (key->sdata) | 407 | if (key->sdata) |
459 | __ieee80211_key_replace(key->sdata, key->sta, | 408 | __ieee80211_key_replace(key->sdata, key->sta, |
460 | key, NULL); | 409 | key, NULL); |
461 | 410 | __ieee80211_key_destroy(key); | |
462 | add_todo(key, KEY_FLAG_TODO_DELETE); | ||
463 | } | 411 | } |
464 | 412 | ||
465 | void ieee80211_key_free(struct ieee80211_key *key) | 413 | void ieee80211_key_free(struct ieee80211_key *key) |
466 | { | 414 | { |
467 | unsigned long flags; | 415 | struct ieee80211_local *local; |
468 | 416 | ||
469 | if (!key) | 417 | if (!key) |
470 | return; | 418 | return; |
471 | 419 | ||
472 | if (!key->sdata) { | 420 | local = key->sdata->local; |
473 | /* The key has not been linked yet, simply free it | ||
474 | * and don't Oops */ | ||
475 | if (key->conf.alg == ALG_CCMP) | ||
476 | ieee80211_aes_key_free(key->u.ccmp.tfm); | ||
477 | kfree(key); | ||
478 | return; | ||
479 | } | ||
480 | 421 | ||
481 | spin_lock_irqsave(&key->sdata->local->key_lock, flags); | 422 | mutex_lock(&local->key_mtx); |
482 | __ieee80211_key_free(key); | 423 | __ieee80211_key_free(key); |
483 | spin_unlock_irqrestore(&key->sdata->local->key_lock, flags); | 424 | mutex_unlock(&local->key_mtx); |
484 | } | 425 | } |
485 | 426 | ||
486 | /* | 427 | void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata) |
487 | * To be safe against concurrent manipulations of the list (which shouldn't | ||
488 | * actually happen) we need to hold the spinlock. But under the spinlock we | ||
489 | * can't actually do much, so we defer processing to the todo list. Then run | ||
490 | * the todo list to be sure the operation and possibly previously pending | ||
491 | * operations are completed. | ||
492 | */ | ||
493 | static void ieee80211_todo_for_each_key(struct ieee80211_sub_if_data *sdata, | ||
494 | u32 todo_flags) | ||
495 | { | 428 | { |
496 | struct ieee80211_key *key; | 429 | struct ieee80211_key *key; |
497 | unsigned long flags; | ||
498 | |||
499 | might_sleep(); | ||
500 | |||
501 | spin_lock_irqsave(&sdata->local->key_lock, flags); | ||
502 | list_for_each_entry(key, &sdata->key_list, list) | ||
503 | add_todo(key, todo_flags); | ||
504 | spin_unlock_irqrestore(&sdata->local->key_lock, flags); | ||
505 | |||
506 | ieee80211_key_todo(); | ||
507 | } | ||
508 | 430 | ||
509 | void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata) | ||
510 | { | ||
511 | ASSERT_RTNL(); | 431 | ASSERT_RTNL(); |
512 | 432 | ||
513 | if (WARN_ON(!ieee80211_sdata_running(sdata))) | 433 | if (WARN_ON(!ieee80211_sdata_running(sdata))) |
514 | return; | 434 | return; |
515 | 435 | ||
516 | ieee80211_todo_for_each_key(sdata, KEY_FLAG_TODO_HWACCEL_ADD); | 436 | mutex_lock(&sdata->local->key_mtx); |
517 | } | ||
518 | |||
519 | void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata) | ||
520 | { | ||
521 | ASSERT_RTNL(); | ||
522 | |||
523 | ieee80211_todo_for_each_key(sdata, KEY_FLAG_TODO_HWACCEL_REMOVE); | ||
524 | } | ||
525 | |||
526 | static void __ieee80211_key_destroy(struct ieee80211_key *key) | ||
527 | { | ||
528 | if (!key) | ||
529 | return; | ||
530 | |||
531 | ieee80211_key_disable_hw_accel(key); | ||
532 | 437 | ||
533 | if (key->conf.alg == ALG_CCMP) | 438 | list_for_each_entry(key, &sdata->key_list, list) |
534 | ieee80211_aes_key_free(key->u.ccmp.tfm); | 439 | ieee80211_key_enable_hw_accel(key); |
535 | if (key->conf.alg == ALG_AES_CMAC) | ||
536 | ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm); | ||
537 | ieee80211_debugfs_key_remove(key); | ||
538 | 440 | ||
539 | kfree(key); | 441 | mutex_unlock(&sdata->local->key_mtx); |
540 | } | 442 | } |
541 | 443 | ||
542 | static void __ieee80211_key_todo(void) | 444 | void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata) |
543 | { | 445 | { |
544 | struct ieee80211_key *key; | 446 | struct ieee80211_key *key; |
545 | bool work_done; | ||
546 | u32 todoflags; | ||
547 | 447 | ||
548 | /* | 448 | ASSERT_RTNL(); |
549 | * NB: sta_info_destroy relies on this! | ||
550 | */ | ||
551 | synchronize_rcu(); | ||
552 | |||
553 | spin_lock_bh(&todo_lock); | ||
554 | while (!list_empty(&todo_list)) { | ||
555 | key = list_first_entry(&todo_list, struct ieee80211_key, todo); | ||
556 | list_del_init(&key->todo); | ||
557 | todoflags = key->flags & (KEY_FLAG_TODO_ADD_DEBUGFS | | ||
558 | KEY_FLAG_TODO_DEFKEY | | ||
559 | KEY_FLAG_TODO_DEFMGMTKEY | | ||
560 | KEY_FLAG_TODO_HWACCEL_ADD | | ||
561 | KEY_FLAG_TODO_HWACCEL_REMOVE | | ||
562 | KEY_FLAG_TODO_DELETE); | ||
563 | key->flags &= ~todoflags; | ||
564 | spin_unlock_bh(&todo_lock); | ||
565 | |||
566 | work_done = false; | ||
567 | |||
568 | if (todoflags & KEY_FLAG_TODO_ADD_DEBUGFS) { | ||
569 | ieee80211_debugfs_key_add(key); | ||
570 | work_done = true; | ||
571 | } | ||
572 | if (todoflags & KEY_FLAG_TODO_DEFKEY) { | ||
573 | ieee80211_debugfs_key_remove_default(key->sdata); | ||
574 | ieee80211_debugfs_key_add_default(key->sdata); | ||
575 | work_done = true; | ||
576 | } | ||
577 | if (todoflags & KEY_FLAG_TODO_DEFMGMTKEY) { | ||
578 | ieee80211_debugfs_key_remove_mgmt_default(key->sdata); | ||
579 | ieee80211_debugfs_key_add_mgmt_default(key->sdata); | ||
580 | work_done = true; | ||
581 | } | ||
582 | if (todoflags & KEY_FLAG_TODO_HWACCEL_ADD) { | ||
583 | ieee80211_key_enable_hw_accel(key); | ||
584 | work_done = true; | ||
585 | } | ||
586 | if (todoflags & KEY_FLAG_TODO_HWACCEL_REMOVE) { | ||
587 | ieee80211_key_disable_hw_accel(key); | ||
588 | work_done = true; | ||
589 | } | ||
590 | if (todoflags & KEY_FLAG_TODO_DELETE) { | ||
591 | __ieee80211_key_destroy(key); | ||
592 | work_done = true; | ||
593 | } | ||
594 | 449 | ||
595 | WARN_ON(!work_done); | 450 | mutex_lock(&sdata->local->key_mtx); |
596 | 451 | ||
597 | spin_lock_bh(&todo_lock); | 452 | list_for_each_entry(key, &sdata->key_list, list) |
598 | } | 453 | ieee80211_key_disable_hw_accel(key); |
599 | spin_unlock_bh(&todo_lock); | ||
600 | } | ||
601 | 454 | ||
602 | void ieee80211_key_todo(void) | 455 | mutex_unlock(&sdata->local->key_mtx); |
603 | { | ||
604 | ieee80211_key_lock(); | ||
605 | __ieee80211_key_todo(); | ||
606 | ieee80211_key_unlock(); | ||
607 | } | 456 | } |
608 | 457 | ||
609 | void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata) | 458 | void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata) |
610 | { | 459 | { |
611 | struct ieee80211_key *key, *tmp; | 460 | struct ieee80211_key *key, *tmp; |
612 | unsigned long flags; | ||
613 | 461 | ||
614 | ieee80211_key_lock(); | 462 | mutex_lock(&sdata->local->key_mtx); |
615 | 463 | ||
616 | ieee80211_debugfs_key_remove_default(sdata); | 464 | ieee80211_debugfs_key_remove_default(sdata); |
617 | ieee80211_debugfs_key_remove_mgmt_default(sdata); | 465 | ieee80211_debugfs_key_remove_mgmt_default(sdata); |
618 | 466 | ||
619 | spin_lock_irqsave(&sdata->local->key_lock, flags); | ||
620 | list_for_each_entry_safe(key, tmp, &sdata->key_list, list) | 467 | list_for_each_entry_safe(key, tmp, &sdata->key_list, list) |
621 | __ieee80211_key_free(key); | 468 | __ieee80211_key_free(key); |
622 | spin_unlock_irqrestore(&sdata->local->key_lock, flags); | ||
623 | |||
624 | __ieee80211_key_todo(); | ||
625 | 469 | ||
626 | ieee80211_key_unlock(); | 470 | mutex_unlock(&sdata->local->key_mtx); |
627 | } | 471 | } |
diff --git a/net/mac80211/key.h b/net/mac80211/key.h index bdc2968c2bbe..9996e3be6e63 100644 --- a/net/mac80211/key.h +++ b/net/mac80211/key.h | |||
@@ -38,25 +38,9 @@ struct sta_info; | |||
38 | * | 38 | * |
39 | * @KEY_FLAG_UPLOADED_TO_HARDWARE: Indicates that this key is present | 39 | * @KEY_FLAG_UPLOADED_TO_HARDWARE: Indicates that this key is present |
40 | * in the hardware for TX crypto hardware acceleration. | 40 | * in the hardware for TX crypto hardware acceleration. |
41 | * @KEY_FLAG_TODO_DELETE: Key is marked for deletion and will, after an | ||
42 | * RCU grace period, no longer be reachable other than from the | ||
43 | * todo list. | ||
44 | * @KEY_FLAG_TODO_HWACCEL_ADD: Key needs to be added to hardware acceleration. | ||
45 | * @KEY_FLAG_TODO_HWACCEL_REMOVE: Key needs to be removed from hardware | ||
46 | * acceleration. | ||
47 | * @KEY_FLAG_TODO_DEFKEY: Key is default key and debugfs needs to be updated. | ||
48 | * @KEY_FLAG_TODO_ADD_DEBUGFS: Key needs to be added to debugfs. | ||
49 | * @KEY_FLAG_TODO_DEFMGMTKEY: Key is default management key and debugfs needs | ||
50 | * to be updated. | ||
51 | */ | 41 | */ |
52 | enum ieee80211_internal_key_flags { | 42 | enum ieee80211_internal_key_flags { |
53 | KEY_FLAG_UPLOADED_TO_HARDWARE = BIT(0), | 43 | KEY_FLAG_UPLOADED_TO_HARDWARE = BIT(0), |
54 | KEY_FLAG_TODO_DELETE = BIT(1), | ||
55 | KEY_FLAG_TODO_HWACCEL_ADD = BIT(2), | ||
56 | KEY_FLAG_TODO_HWACCEL_REMOVE = BIT(3), | ||
57 | KEY_FLAG_TODO_DEFKEY = BIT(4), | ||
58 | KEY_FLAG_TODO_ADD_DEBUGFS = BIT(5), | ||
59 | KEY_FLAG_TODO_DEFMGMTKEY = BIT(6), | ||
60 | }; | 44 | }; |
61 | 45 | ||
62 | enum ieee80211_internal_tkip_state { | 46 | enum ieee80211_internal_tkip_state { |
@@ -79,10 +63,8 @@ struct ieee80211_key { | |||
79 | 63 | ||
80 | /* for sdata list */ | 64 | /* for sdata list */ |
81 | struct list_head list; | 65 | struct list_head list; |
82 | /* for todo list */ | ||
83 | struct list_head todo; | ||
84 | 66 | ||
85 | /* protected by todo lock! */ | 67 | /* protected by key mutex */ |
86 | unsigned int flags; | 68 | unsigned int flags; |
87 | 69 | ||
88 | union { | 70 | union { |
@@ -155,6 +137,4 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata); | |||
155 | void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata); | 137 | void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata); |
156 | void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata); | 138 | void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata); |
157 | 139 | ||
158 | void ieee80211_key_todo(void); | ||
159 | |||
160 | #endif /* IEEE80211_KEY_H */ | 140 | #endif /* IEEE80211_KEY_H */ |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 22a384dfab65..88b671a16a41 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -329,6 +329,63 @@ static void ieee80211_recalc_smps_work(struct work_struct *work) | |||
329 | mutex_unlock(&local->iflist_mtx); | 329 | mutex_unlock(&local->iflist_mtx); |
330 | } | 330 | } |
331 | 331 | ||
332 | #ifdef CONFIG_INET | ||
333 | int ieee80211_set_arp_filter(struct ieee80211_sub_if_data *sdata) | ||
334 | { | ||
335 | struct in_device *idev; | ||
336 | int ret = 0; | ||
337 | |||
338 | BUG_ON(!sdata); | ||
339 | ASSERT_RTNL(); | ||
340 | |||
341 | idev = sdata->dev->ip_ptr; | ||
342 | if (!idev) | ||
343 | return 0; | ||
344 | |||
345 | ret = drv_configure_arp_filter(sdata->local, &sdata->vif, | ||
346 | idev->ifa_list); | ||
347 | return ret; | ||
348 | } | ||
349 | |||
350 | static int ieee80211_ifa_changed(struct notifier_block *nb, | ||
351 | unsigned long data, void *arg) | ||
352 | { | ||
353 | struct in_ifaddr *ifa = arg; | ||
354 | struct ieee80211_local *local = | ||
355 | container_of(nb, struct ieee80211_local, | ||
356 | ifa_notifier); | ||
357 | struct net_device *ndev = ifa->ifa_dev->dev; | ||
358 | struct wireless_dev *wdev = ndev->ieee80211_ptr; | ||
359 | struct ieee80211_sub_if_data *sdata; | ||
360 | struct ieee80211_if_managed *ifmgd; | ||
361 | |||
362 | if (!netif_running(ndev)) | ||
363 | return NOTIFY_DONE; | ||
364 | |||
365 | /* Make sure it's our interface that got changed */ | ||
366 | if (!wdev) | ||
367 | return NOTIFY_DONE; | ||
368 | |||
369 | if (wdev->wiphy != local->hw.wiphy) | ||
370 | return NOTIFY_DONE; | ||
371 | |||
372 | /* We are concerned about IP addresses only when associated */ | ||
373 | sdata = IEEE80211_DEV_TO_SUB_IF(ndev); | ||
374 | |||
375 | /* ARP filtering is only supported in managed mode */ | ||
376 | if (sdata->vif.type != NL80211_IFTYPE_STATION) | ||
377 | return NOTIFY_DONE; | ||
378 | |||
379 | ifmgd = &sdata->u.mgd; | ||
380 | mutex_lock(&ifmgd->mtx); | ||
381 | if (ifmgd->associated) | ||
382 | ieee80211_set_arp_filter(sdata); | ||
383 | mutex_unlock(&ifmgd->mtx); | ||
384 | |||
385 | return NOTIFY_DONE; | ||
386 | } | ||
387 | #endif | ||
388 | |||
332 | struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | 389 | struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, |
333 | const struct ieee80211_ops *ops) | 390 | const struct ieee80211_ops *ops) |
334 | { | 391 | { |
@@ -396,7 +453,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | |||
396 | mutex_init(&local->iflist_mtx); | 453 | mutex_init(&local->iflist_mtx); |
397 | mutex_init(&local->scan_mtx); | 454 | mutex_init(&local->scan_mtx); |
398 | 455 | ||
399 | spin_lock_init(&local->key_lock); | 456 | mutex_init(&local->key_mtx); |
400 | spin_lock_init(&local->filter_lock); | 457 | spin_lock_init(&local->filter_lock); |
401 | spin_lock_init(&local->queue_stop_reason_lock); | 458 | spin_lock_init(&local->queue_stop_reason_lock); |
402 | 459 | ||
@@ -612,14 +669,24 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
612 | ieee80211_max_network_latency; | 669 | ieee80211_max_network_latency; |
613 | result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY, | 670 | result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY, |
614 | &local->network_latency_notifier); | 671 | &local->network_latency_notifier); |
615 | |||
616 | if (result) { | 672 | if (result) { |
617 | rtnl_lock(); | 673 | rtnl_lock(); |
618 | goto fail_pm_qos; | 674 | goto fail_pm_qos; |
619 | } | 675 | } |
620 | 676 | ||
677 | #ifdef CONFIG_INET | ||
678 | local->ifa_notifier.notifier_call = ieee80211_ifa_changed; | ||
679 | result = register_inetaddr_notifier(&local->ifa_notifier); | ||
680 | if (result) | ||
681 | goto fail_ifa; | ||
682 | #endif | ||
683 | |||
621 | return 0; | 684 | return 0; |
622 | 685 | ||
686 | fail_ifa: | ||
687 | pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY, | ||
688 | &local->network_latency_notifier); | ||
689 | rtnl_lock(); | ||
623 | fail_pm_qos: | 690 | fail_pm_qos: |
624 | ieee80211_led_exit(local); | 691 | ieee80211_led_exit(local); |
625 | ieee80211_remove_interfaces(local); | 692 | ieee80211_remove_interfaces(local); |
@@ -647,6 +714,9 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) | |||
647 | 714 | ||
648 | pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY, | 715 | pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY, |
649 | &local->network_latency_notifier); | 716 | &local->network_latency_notifier); |
717 | #ifdef CONFIG_INET | ||
718 | unregister_inetaddr_notifier(&local->ifa_notifier); | ||
719 | #endif | ||
650 | 720 | ||
651 | rtnl_lock(); | 721 | rtnl_lock(); |
652 | 722 | ||
@@ -704,6 +774,10 @@ static int __init ieee80211_init(void) | |||
704 | if (ret) | 774 | if (ret) |
705 | return ret; | 775 | return ret; |
706 | 776 | ||
777 | ret = rc80211_minstrel_ht_init(); | ||
778 | if (ret) | ||
779 | goto err_minstrel; | ||
780 | |||
707 | ret = rc80211_pid_init(); | 781 | ret = rc80211_pid_init(); |
708 | if (ret) | 782 | if (ret) |
709 | goto err_pid; | 783 | goto err_pid; |
@@ -716,6 +790,8 @@ static int __init ieee80211_init(void) | |||
716 | err_netdev: | 790 | err_netdev: |
717 | rc80211_pid_exit(); | 791 | rc80211_pid_exit(); |
718 | err_pid: | 792 | err_pid: |
793 | rc80211_minstrel_ht_exit(); | ||
794 | err_minstrel: | ||
719 | rc80211_minstrel_exit(); | 795 | rc80211_minstrel_exit(); |
720 | 796 | ||
721 | return ret; | 797 | return ret; |
@@ -724,6 +800,7 @@ static int __init ieee80211_init(void) | |||
724 | static void __exit ieee80211_exit(void) | 800 | static void __exit ieee80211_exit(void) |
725 | { | 801 | { |
726 | rc80211_pid_exit(); | 802 | rc80211_pid_exit(); |
803 | rc80211_minstrel_ht_exit(); | ||
727 | rc80211_minstrel_exit(); | 804 | rc80211_minstrel_exit(); |
728 | 805 | ||
729 | /* | 806 | /* |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 0839c4e8fd2e..8fb85c3a043d 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -1692,14 +1692,52 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | |||
1692 | rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); | 1692 | rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); |
1693 | break; | 1693 | break; |
1694 | case IEEE80211_STYPE_ACTION: | 1694 | case IEEE80211_STYPE_ACTION: |
1695 | if (mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT) | 1695 | switch (mgmt->u.action.category) { |
1696 | case WLAN_CATEGORY_BACK: { | ||
1697 | struct ieee80211_local *local = sdata->local; | ||
1698 | int len = skb->len; | ||
1699 | struct sta_info *sta; | ||
1700 | |||
1701 | rcu_read_lock(); | ||
1702 | sta = sta_info_get(sdata, mgmt->sa); | ||
1703 | if (!sta) { | ||
1704 | rcu_read_unlock(); | ||
1705 | break; | ||
1706 | } | ||
1707 | |||
1708 | local_bh_disable(); | ||
1709 | |||
1710 | switch (mgmt->u.action.u.addba_req.action_code) { | ||
1711 | case WLAN_ACTION_ADDBA_REQ: | ||
1712 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
1713 | sizeof(mgmt->u.action.u.addba_req))) | ||
1714 | break; | ||
1715 | ieee80211_process_addba_request(local, sta, mgmt, len); | ||
1716 | break; | ||
1717 | case WLAN_ACTION_ADDBA_RESP: | ||
1718 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
1719 | sizeof(mgmt->u.action.u.addba_resp))) | ||
1720 | break; | ||
1721 | ieee80211_process_addba_resp(local, sta, mgmt, len); | ||
1722 | break; | ||
1723 | case WLAN_ACTION_DELBA: | ||
1724 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
1725 | sizeof(mgmt->u.action.u.delba))) | ||
1726 | break; | ||
1727 | ieee80211_process_delba(sdata, sta, mgmt, len); | ||
1728 | break; | ||
1729 | } | ||
1730 | local_bh_enable(); | ||
1731 | rcu_read_unlock(); | ||
1696 | break; | 1732 | break; |
1697 | 1733 | } | |
1698 | ieee80211_sta_process_chanswitch(sdata, | 1734 | case WLAN_CATEGORY_SPECTRUM_MGMT: |
1699 | &mgmt->u.action.u.chan_switch.sw_elem, | 1735 | ieee80211_sta_process_chanswitch(sdata, |
1700 | (void *)ifmgd->associated->priv, | 1736 | &mgmt->u.action.u.chan_switch.sw_elem, |
1701 | rx_status->mactime); | 1737 | (void *)ifmgd->associated->priv, |
1702 | break; | 1738 | rx_status->mactime); |
1739 | break; | ||
1740 | } | ||
1703 | } | 1741 | } |
1704 | mutex_unlock(&ifmgd->mtx); | 1742 | mutex_unlock(&ifmgd->mtx); |
1705 | 1743 | ||
@@ -1722,9 +1760,45 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | |||
1722 | mutex_unlock(&ifmgd->mtx); | 1760 | mutex_unlock(&ifmgd->mtx); |
1723 | 1761 | ||
1724 | if (skb->len >= 24 + 2 /* mgmt + deauth reason */ && | 1762 | if (skb->len >= 24 + 2 /* mgmt + deauth reason */ && |
1725 | (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH) | 1763 | (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH) { |
1726 | cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); | 1764 | struct ieee80211_local *local = sdata->local; |
1765 | struct ieee80211_work *wk; | ||
1766 | |||
1767 | mutex_lock(&local->work_mtx); | ||
1768 | list_for_each_entry(wk, &local->work_list, list) { | ||
1769 | if (wk->sdata != sdata) | ||
1770 | continue; | ||
1771 | |||
1772 | if (wk->type != IEEE80211_WORK_ASSOC) | ||
1773 | continue; | ||
1774 | |||
1775 | if (memcmp(mgmt->bssid, wk->filter_ta, ETH_ALEN)) | ||
1776 | continue; | ||
1777 | if (memcmp(mgmt->sa, wk->filter_ta, ETH_ALEN)) | ||
1778 | continue; | ||
1779 | |||
1780 | /* | ||
1781 | * Printing the message only here means we can't | ||
1782 | * spuriously print it, but it also means that it | ||
1783 | * won't be printed when the frame comes in before | ||
1784 | * we even tried to associate or in similar cases. | ||
1785 | * | ||
1786 | * Ultimately, I suspect cfg80211 should print the | ||
1787 | * messages instead. | ||
1788 | */ | ||
1789 | printk(KERN_DEBUG | ||
1790 | "%s: deauthenticated from %pM (Reason: %u)\n", | ||
1791 | sdata->name, mgmt->bssid, | ||
1792 | le16_to_cpu(mgmt->u.deauth.reason_code)); | ||
1793 | |||
1794 | list_del_rcu(&wk->list); | ||
1795 | free_work(wk); | ||
1796 | break; | ||
1797 | } | ||
1798 | mutex_unlock(&local->work_mtx); | ||
1727 | 1799 | ||
1800 | cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); | ||
1801 | } | ||
1728 | out: | 1802 | out: |
1729 | kfree_skb(skb); | 1803 | kfree_skb(skb); |
1730 | } | 1804 | } |
@@ -1763,7 +1837,7 @@ static void ieee80211_sta_work(struct work_struct *work) | |||
1763 | 1837 | ||
1764 | /* | 1838 | /* |
1765 | * ieee80211_queue_work() should have picked up most cases, | 1839 | * ieee80211_queue_work() should have picked up most cases, |
1766 | * here we'll pick the the rest. | 1840 | * here we'll pick the rest. |
1767 | */ | 1841 | */ |
1768 | if (WARN(local->suspended, "STA MLME work scheduled while " | 1842 | if (WARN(local->suspended, "STA MLME work scheduled while " |
1769 | "going to suspend\n")) | 1843 | "going to suspend\n")) |
@@ -2078,8 +2152,18 @@ static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk, | |||
2078 | cfg80211_send_assoc_timeout(wk->sdata->dev, | 2152 | cfg80211_send_assoc_timeout(wk->sdata->dev, |
2079 | wk->filter_ta); | 2153 | wk->filter_ta); |
2080 | return WORK_DONE_DESTROY; | 2154 | return WORK_DONE_DESTROY; |
2155 | } else { | ||
2156 | mutex_unlock(&wk->sdata->u.mgd.mtx); | ||
2157 | #ifdef CONFIG_INET | ||
2158 | /* | ||
2159 | * configure ARP filter IP addresses to the driver, | ||
2160 | * intentionally outside the mgd mutex. | ||
2161 | */ | ||
2162 | rtnl_lock(); | ||
2163 | ieee80211_set_arp_filter(wk->sdata); | ||
2164 | rtnl_unlock(); | ||
2165 | #endif | ||
2081 | } | 2166 | } |
2082 | mutex_unlock(&wk->sdata->u.mgd.mtx); | ||
2083 | } | 2167 | } |
2084 | 2168 | ||
2085 | cfg80211_send_rx_assoc(wk->sdata->dev, skb->data, skb->len); | 2169 | cfg80211_send_rx_assoc(wk->sdata->dev, skb->data, skb->len); |
@@ -2308,6 +2392,7 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, | |||
2308 | int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata, | 2392 | int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata, |
2309 | struct ieee80211_channel *chan, | 2393 | struct ieee80211_channel *chan, |
2310 | enum nl80211_channel_type channel_type, | 2394 | enum nl80211_channel_type channel_type, |
2395 | bool channel_type_valid, | ||
2311 | const u8 *buf, size_t len, u64 *cookie) | 2396 | const u8 *buf, size_t len, u64 *cookie) |
2312 | { | 2397 | { |
2313 | struct ieee80211_local *local = sdata->local; | 2398 | struct ieee80211_local *local = sdata->local; |
@@ -2315,9 +2400,11 @@ int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata, | |||
2315 | struct sk_buff *skb; | 2400 | struct sk_buff *skb; |
2316 | 2401 | ||
2317 | /* Check that we are on the requested channel for transmission */ | 2402 | /* Check that we are on the requested channel for transmission */ |
2318 | if ((chan != local->tmp_channel || | 2403 | if (chan != local->tmp_channel && |
2319 | channel_type != local->tmp_channel_type) && | 2404 | chan != local->oper_channel) |
2320 | (chan != local->oper_channel || | 2405 | return -EBUSY; |
2406 | if (channel_type_valid && | ||
2407 | (channel_type != local->tmp_channel_type && | ||
2321 | channel_type != local->_oper_channel_type)) | 2408 | channel_type != local->_oper_channel_type)) |
2322 | return -EBUSY; | 2409 | return -EBUSY; |
2323 | 2410 | ||
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h index 065a96190e32..168427b0ffdc 100644 --- a/net/mac80211/rate.h +++ b/net/mac80211/rate.h | |||
@@ -147,5 +147,18 @@ static inline void rc80211_minstrel_exit(void) | |||
147 | } | 147 | } |
148 | #endif | 148 | #endif |
149 | 149 | ||
150 | #ifdef CONFIG_MAC80211_RC_MINSTREL_HT | ||
151 | extern int rc80211_minstrel_ht_init(void); | ||
152 | extern void rc80211_minstrel_ht_exit(void); | ||
153 | #else | ||
154 | static inline int rc80211_minstrel_ht_init(void) | ||
155 | { | ||
156 | return 0; | ||
157 | } | ||
158 | static inline void rc80211_minstrel_ht_exit(void) | ||
159 | { | ||
160 | } | ||
161 | #endif | ||
162 | |||
150 | 163 | ||
151 | #endif /* IEEE80211_RATE_H */ | 164 | #endif /* IEEE80211_RATE_H */ |
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c new file mode 100644 index 000000000000..c23f08251da4 --- /dev/null +++ b/net/mac80211/rc80211_minstrel_ht.c | |||
@@ -0,0 +1,824 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010 Felix Fietkau <nbd@openwrt.org> | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | #include <linux/netdevice.h> | ||
9 | #include <linux/types.h> | ||
10 | #include <linux/skbuff.h> | ||
11 | #include <linux/debugfs.h> | ||
12 | #include <linux/random.h> | ||
13 | #include <linux/ieee80211.h> | ||
14 | #include <net/mac80211.h> | ||
15 | #include "rate.h" | ||
16 | #include "rc80211_minstrel.h" | ||
17 | #include "rc80211_minstrel_ht.h" | ||
18 | |||
19 | #define AVG_PKT_SIZE 1200 | ||
20 | #define SAMPLE_COLUMNS 10 | ||
21 | #define EWMA_LEVEL 75 | ||
22 | |||
23 | /* Number of bits for an average sized packet */ | ||
24 | #define MCS_NBITS (AVG_PKT_SIZE << 3) | ||
25 | |||
26 | /* Number of symbols for a packet with (bps) bits per symbol */ | ||
27 | #define MCS_NSYMS(bps) ((MCS_NBITS + (bps) - 1) / (bps)) | ||
28 | |||
29 | /* Transmission time for a packet containing (syms) symbols */ | ||
30 | #define MCS_SYMBOL_TIME(sgi, syms) \ | ||
31 | (sgi ? \ | ||
32 | ((syms) * 18 + 4) / 5 : /* syms * 3.6 us */ \ | ||
33 | (syms) << 2 /* syms * 4 us */ \ | ||
34 | ) | ||
35 | |||
36 | /* Transmit duration for the raw data part of an average sized packet */ | ||
37 | #define MCS_DURATION(streams, sgi, bps) MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps))) | ||
38 | |||
39 | /* MCS rate information for an MCS group */ | ||
40 | #define MCS_GROUP(_streams, _sgi, _ht40) { \ | ||
41 | .streams = _streams, \ | ||
42 | .flags = \ | ||
43 | (_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \ | ||
44 | (_ht40 ? IEEE80211_TX_RC_40_MHZ_WIDTH : 0), \ | ||
45 | .duration = { \ | ||
46 | MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26), \ | ||
47 | MCS_DURATION(_streams, _sgi, _ht40 ? 108 : 52), \ | ||
48 | MCS_DURATION(_streams, _sgi, _ht40 ? 162 : 78), \ | ||
49 | MCS_DURATION(_streams, _sgi, _ht40 ? 216 : 104), \ | ||
50 | MCS_DURATION(_streams, _sgi, _ht40 ? 324 : 156), \ | ||
51 | MCS_DURATION(_streams, _sgi, _ht40 ? 432 : 208), \ | ||
52 | MCS_DURATION(_streams, _sgi, _ht40 ? 486 : 234), \ | ||
53 | MCS_DURATION(_streams, _sgi, _ht40 ? 540 : 260) \ | ||
54 | } \ | ||
55 | } | ||
56 | |||
57 | /* | ||
58 | * To enable sufficiently targeted rate sampling, MCS rates are divided into | ||
59 | * groups, based on the number of streams and flags (HT40, SGI) that they | ||
60 | * use. | ||
61 | */ | ||
62 | const struct mcs_group minstrel_mcs_groups[] = { | ||
63 | MCS_GROUP(1, 0, 0), | ||
64 | MCS_GROUP(2, 0, 0), | ||
65 | #if MINSTREL_MAX_STREAMS >= 3 | ||
66 | MCS_GROUP(3, 0, 0), | ||
67 | #endif | ||
68 | |||
69 | MCS_GROUP(1, 1, 0), | ||
70 | MCS_GROUP(2, 1, 0), | ||
71 | #if MINSTREL_MAX_STREAMS >= 3 | ||
72 | MCS_GROUP(3, 1, 0), | ||
73 | #endif | ||
74 | |||
75 | MCS_GROUP(1, 0, 1), | ||
76 | MCS_GROUP(2, 0, 1), | ||
77 | #if MINSTREL_MAX_STREAMS >= 3 | ||
78 | MCS_GROUP(3, 0, 1), | ||
79 | #endif | ||
80 | |||
81 | MCS_GROUP(1, 1, 1), | ||
82 | MCS_GROUP(2, 1, 1), | ||
83 | #if MINSTREL_MAX_STREAMS >= 3 | ||
84 | MCS_GROUP(3, 1, 1), | ||
85 | #endif | ||
86 | }; | ||
87 | |||
88 | static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES]; | ||
89 | |||
90 | /* | ||
91 | * Perform EWMA (Exponentially Weighted Moving Average) calculation | ||
92 | */ | ||
93 | static int | ||
94 | minstrel_ewma(int old, int new, int weight) | ||
95 | { | ||
96 | return (new * (100 - weight) + old * weight) / 100; | ||
97 | } | ||
98 | |||
99 | /* | ||
100 | * Look up an MCS group index based on mac80211 rate information | ||
101 | */ | ||
102 | static int | ||
103 | minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate) | ||
104 | { | ||
105 | int streams = (rate->idx / MCS_GROUP_RATES) + 1; | ||
106 | u32 flags = IEEE80211_TX_RC_SHORT_GI | IEEE80211_TX_RC_40_MHZ_WIDTH; | ||
107 | int i; | ||
108 | |||
109 | for (i = 0; i < ARRAY_SIZE(minstrel_mcs_groups); i++) { | ||
110 | if (minstrel_mcs_groups[i].streams != streams) | ||
111 | continue; | ||
112 | if (minstrel_mcs_groups[i].flags != (rate->flags & flags)) | ||
113 | continue; | ||
114 | |||
115 | return i; | ||
116 | } | ||
117 | |||
118 | WARN_ON(1); | ||
119 | return 0; | ||
120 | } | ||
121 | |||
122 | static inline struct minstrel_rate_stats * | ||
123 | minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index) | ||
124 | { | ||
125 | return &mi->groups[index / MCS_GROUP_RATES].rates[index % MCS_GROUP_RATES]; | ||
126 | } | ||
127 | |||
128 | |||
129 | /* | ||
130 | * Recalculate success probabilities and counters for a rate using EWMA | ||
131 | */ | ||
132 | static void | ||
133 | minstrel_calc_rate_ewma(struct minstrel_priv *mp, struct minstrel_rate_stats *mr) | ||
134 | { | ||
135 | if (unlikely(mr->attempts > 0)) { | ||
136 | mr->sample_skipped = 0; | ||
137 | mr->cur_prob = MINSTREL_FRAC(mr->success, mr->attempts); | ||
138 | if (!mr->att_hist) | ||
139 | mr->probability = mr->cur_prob; | ||
140 | else | ||
141 | mr->probability = minstrel_ewma(mr->probability, | ||
142 | mr->cur_prob, EWMA_LEVEL); | ||
143 | mr->att_hist += mr->attempts; | ||
144 | mr->succ_hist += mr->success; | ||
145 | } else { | ||
146 | mr->sample_skipped++; | ||
147 | } | ||
148 | mr->last_success = mr->success; | ||
149 | mr->last_attempts = mr->attempts; | ||
150 | mr->success = 0; | ||
151 | mr->attempts = 0; | ||
152 | } | ||
153 | |||
154 | /* | ||
155 | * Calculate throughput based on the average A-MPDU length, taking into account | ||
156 | * the expected number of retransmissions and their expected length | ||
157 | */ | ||
158 | static void | ||
159 | minstrel_ht_calc_tp(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, | ||
160 | int group, int rate) | ||
161 | { | ||
162 | struct minstrel_rate_stats *mr; | ||
163 | unsigned int usecs; | ||
164 | |||
165 | mr = &mi->groups[group].rates[rate]; | ||
166 | |||
167 | if (mr->probability < MINSTREL_FRAC(1, 10)) { | ||
168 | mr->cur_tp = 0; | ||
169 | return; | ||
170 | } | ||
171 | |||
172 | usecs = mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len); | ||
173 | usecs += minstrel_mcs_groups[group].duration[rate]; | ||
174 | mr->cur_tp = MINSTREL_TRUNC((1000000 / usecs) * mr->probability); | ||
175 | } | ||
176 | |||
177 | /* | ||
178 | * Update rate statistics and select new primary rates | ||
179 | * | ||
180 | * Rules for rate selection: | ||
181 | * - max_prob_rate must use only one stream, as a tradeoff between delivery | ||
182 | * probability and throughput during strong fluctuations | ||
183 | * - as long as the max prob rate has a probability of more than 3/4, pick | ||
184 | * higher throughput rates, even if the probablity is a bit lower | ||
185 | */ | ||
186 | static void | ||
187 | minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) | ||
188 | { | ||
189 | struct minstrel_mcs_group_data *mg; | ||
190 | struct minstrel_rate_stats *mr; | ||
191 | int cur_prob, cur_prob_tp, cur_tp, cur_tp2; | ||
192 | int group, i, index; | ||
193 | |||
194 | if (mi->ampdu_packets > 0) { | ||
195 | mi->avg_ampdu_len = minstrel_ewma(mi->avg_ampdu_len, | ||
196 | MINSTREL_FRAC(mi->ampdu_len, mi->ampdu_packets), EWMA_LEVEL); | ||
197 | mi->ampdu_len = 0; | ||
198 | mi->ampdu_packets = 0; | ||
199 | } | ||
200 | |||
201 | mi->sample_slow = 0; | ||
202 | mi->sample_count = 0; | ||
203 | mi->max_tp_rate = 0; | ||
204 | mi->max_tp_rate2 = 0; | ||
205 | mi->max_prob_rate = 0; | ||
206 | |||
207 | for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) { | ||
208 | cur_prob = 0; | ||
209 | cur_prob_tp = 0; | ||
210 | cur_tp = 0; | ||
211 | cur_tp2 = 0; | ||
212 | |||
213 | mg = &mi->groups[group]; | ||
214 | if (!mg->supported) | ||
215 | continue; | ||
216 | |||
217 | mg->max_tp_rate = 0; | ||
218 | mg->max_tp_rate2 = 0; | ||
219 | mg->max_prob_rate = 0; | ||
220 | mi->sample_count++; | ||
221 | |||
222 | for (i = 0; i < MCS_GROUP_RATES; i++) { | ||
223 | if (!(mg->supported & BIT(i))) | ||
224 | continue; | ||
225 | |||
226 | mr = &mg->rates[i]; | ||
227 | mr->retry_updated = false; | ||
228 | index = MCS_GROUP_RATES * group + i; | ||
229 | minstrel_calc_rate_ewma(mp, mr); | ||
230 | minstrel_ht_calc_tp(mp, mi, group, i); | ||
231 | |||
232 | if (!mr->cur_tp) | ||
233 | continue; | ||
234 | |||
235 | /* ignore the lowest rate of each single-stream group */ | ||
236 | if (!i && minstrel_mcs_groups[group].streams == 1) | ||
237 | continue; | ||
238 | |||
239 | if ((mr->cur_tp > cur_prob_tp && mr->probability > | ||
240 | MINSTREL_FRAC(3, 4)) || mr->probability > cur_prob) { | ||
241 | mg->max_prob_rate = index; | ||
242 | cur_prob = mr->probability; | ||
243 | } | ||
244 | |||
245 | if (mr->cur_tp > cur_tp) { | ||
246 | swap(index, mg->max_tp_rate); | ||
247 | cur_tp = mr->cur_tp; | ||
248 | mr = minstrel_get_ratestats(mi, index); | ||
249 | } | ||
250 | |||
251 | if (index >= mg->max_tp_rate) | ||
252 | continue; | ||
253 | |||
254 | if (mr->cur_tp > cur_tp2) { | ||
255 | mg->max_tp_rate2 = index; | ||
256 | cur_tp2 = mr->cur_tp; | ||
257 | } | ||
258 | } | ||
259 | } | ||
260 | |||
261 | /* try to sample up to half of the availble rates during each interval */ | ||
262 | mi->sample_count *= 4; | ||
263 | |||
264 | cur_prob = 0; | ||
265 | cur_prob_tp = 0; | ||
266 | cur_tp = 0; | ||
267 | cur_tp2 = 0; | ||
268 | for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) { | ||
269 | mg = &mi->groups[group]; | ||
270 | if (!mg->supported) | ||
271 | continue; | ||
272 | |||
273 | mr = minstrel_get_ratestats(mi, mg->max_prob_rate); | ||
274 | if (cur_prob_tp < mr->cur_tp && | ||
275 | minstrel_mcs_groups[group].streams == 1) { | ||
276 | mi->max_prob_rate = mg->max_prob_rate; | ||
277 | cur_prob = mr->cur_prob; | ||
278 | } | ||
279 | |||
280 | mr = minstrel_get_ratestats(mi, mg->max_tp_rate); | ||
281 | if (cur_tp < mr->cur_tp) { | ||
282 | mi->max_tp_rate = mg->max_tp_rate; | ||
283 | cur_tp = mr->cur_tp; | ||
284 | } | ||
285 | |||
286 | mr = minstrel_get_ratestats(mi, mg->max_tp_rate2); | ||
287 | if (cur_tp2 < mr->cur_tp) { | ||
288 | mi->max_tp_rate2 = mg->max_tp_rate2; | ||
289 | cur_tp2 = mr->cur_tp; | ||
290 | } | ||
291 | } | ||
292 | |||
293 | mi->stats_update = jiffies; | ||
294 | } | ||
295 | |||
296 | static bool | ||
297 | minstrel_ht_txstat_valid(struct ieee80211_tx_rate *rate) | ||
298 | { | ||
299 | if (!rate->count) | ||
300 | return false; | ||
301 | |||
302 | if (rate->idx < 0) | ||
303 | return false; | ||
304 | |||
305 | return !!(rate->flags & IEEE80211_TX_RC_MCS); | ||
306 | } | ||
307 | |||
308 | static void | ||
309 | minstrel_next_sample_idx(struct minstrel_ht_sta *mi) | ||
310 | { | ||
311 | struct minstrel_mcs_group_data *mg; | ||
312 | |||
313 | for (;;) { | ||
314 | mi->sample_group++; | ||
315 | mi->sample_group %= ARRAY_SIZE(minstrel_mcs_groups); | ||
316 | mg = &mi->groups[mi->sample_group]; | ||
317 | |||
318 | if (!mg->supported) | ||
319 | continue; | ||
320 | |||
321 | if (++mg->index >= MCS_GROUP_RATES) { | ||
322 | mg->index = 0; | ||
323 | if (++mg->column >= ARRAY_SIZE(sample_table)) | ||
324 | mg->column = 0; | ||
325 | } | ||
326 | break; | ||
327 | } | ||
328 | } | ||
329 | |||
330 | static void | ||
331 | minstrel_downgrade_rate(struct minstrel_ht_sta *mi, int *idx, bool primary) | ||
332 | { | ||
333 | int group, orig_group; | ||
334 | |||
335 | orig_group = group = *idx / MCS_GROUP_RATES; | ||
336 | while (group > 0) { | ||
337 | group--; | ||
338 | |||
339 | if (!mi->groups[group].supported) | ||
340 | continue; | ||
341 | |||
342 | if (minstrel_mcs_groups[group].streams > | ||
343 | minstrel_mcs_groups[orig_group].streams) | ||
344 | continue; | ||
345 | |||
346 | if (primary) | ||
347 | *idx = mi->groups[group].max_tp_rate; | ||
348 | else | ||
349 | *idx = mi->groups[group].max_tp_rate2; | ||
350 | break; | ||
351 | } | ||
352 | } | ||
353 | |||
354 | static void | ||
355 | minstrel_aggr_check(struct minstrel_priv *mp, struct ieee80211_sta *pubsta, struct sk_buff *skb) | ||
356 | { | ||
357 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | ||
358 | struct sta_info *sta = container_of(pubsta, struct sta_info, sta); | ||
359 | u16 tid; | ||
360 | |||
361 | if (unlikely(!ieee80211_is_data_qos(hdr->frame_control))) | ||
362 | return; | ||
363 | |||
364 | if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))) | ||
365 | return; | ||
366 | |||
367 | tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; | ||
368 | if (likely(sta->ampdu_mlme.tid_state_tx[tid] != HT_AGG_STATE_IDLE)) | ||
369 | return; | ||
370 | |||
371 | ieee80211_start_tx_ba_session(pubsta, tid); | ||
372 | } | ||
373 | |||
374 | static void | ||
375 | minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband, | ||
376 | struct ieee80211_sta *sta, void *priv_sta, | ||
377 | struct sk_buff *skb) | ||
378 | { | ||
379 | struct minstrel_ht_sta_priv *msp = priv_sta; | ||
380 | struct minstrel_ht_sta *mi = &msp->ht; | ||
381 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
382 | struct ieee80211_tx_rate *ar = info->status.rates; | ||
383 | struct minstrel_rate_stats *rate, *rate2; | ||
384 | struct minstrel_priv *mp = priv; | ||
385 | bool last = false; | ||
386 | int group; | ||
387 | int i = 0; | ||
388 | |||
389 | if (!msp->is_ht) | ||
390 | return mac80211_minstrel.tx_status(priv, sband, sta, &msp->legacy, skb); | ||
391 | |||
392 | /* This packet was aggregated but doesn't carry status info */ | ||
393 | if ((info->flags & IEEE80211_TX_CTL_AMPDU) && | ||
394 | !(info->flags & IEEE80211_TX_STAT_AMPDU)) | ||
395 | return; | ||
396 | |||
397 | if (!info->status.ampdu_len) { | ||
398 | info->status.ampdu_ack_len = 1; | ||
399 | info->status.ampdu_len = 1; | ||
400 | } | ||
401 | |||
402 | mi->ampdu_packets++; | ||
403 | mi->ampdu_len += info->status.ampdu_len; | ||
404 | |||
405 | if (!mi->sample_wait && !mi->sample_tries && mi->sample_count > 0) { | ||
406 | mi->sample_wait = 4 + 2 * MINSTREL_TRUNC(mi->avg_ampdu_len); | ||
407 | mi->sample_tries = 3; | ||
408 | mi->sample_count--; | ||
409 | } | ||
410 | |||
411 | if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) { | ||
412 | mi->sample_packets += info->status.ampdu_len; | ||
413 | minstrel_next_sample_idx(mi); | ||
414 | } | ||
415 | |||
416 | for (i = 0; !last; i++) { | ||
417 | last = (i == IEEE80211_TX_MAX_RATES - 1) || | ||
418 | !minstrel_ht_txstat_valid(&ar[i + 1]); | ||
419 | |||
420 | if (!minstrel_ht_txstat_valid(&ar[i])) | ||
421 | break; | ||
422 | |||
423 | group = minstrel_ht_get_group_idx(&ar[i]); | ||
424 | rate = &mi->groups[group].rates[ar[i].idx % 8]; | ||
425 | |||
426 | if (last && (info->flags & IEEE80211_TX_STAT_ACK)) | ||
427 | rate->success += info->status.ampdu_ack_len; | ||
428 | |||
429 | rate->attempts += ar[i].count * info->status.ampdu_len; | ||
430 | } | ||
431 | |||
432 | /* | ||
433 | * check for sudden death of spatial multiplexing, | ||
434 | * downgrade to a lower number of streams if necessary. | ||
435 | */ | ||
436 | rate = minstrel_get_ratestats(mi, mi->max_tp_rate); | ||
437 | if (rate->attempts > 30 && | ||
438 | MINSTREL_FRAC(rate->success, rate->attempts) < | ||
439 | MINSTREL_FRAC(20, 100)) | ||
440 | minstrel_downgrade_rate(mi, &mi->max_tp_rate, true); | ||
441 | |||
442 | rate2 = minstrel_get_ratestats(mi, mi->max_tp_rate2); | ||
443 | if (rate->attempts > 30 && | ||
444 | MINSTREL_FRAC(rate->success, rate->attempts) < | ||
445 | MINSTREL_FRAC(20, 100)) | ||
446 | minstrel_downgrade_rate(mi, &mi->max_tp_rate2, false); | ||
447 | |||
448 | if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) { | ||
449 | minstrel_ht_update_stats(mp, mi); | ||
450 | minstrel_aggr_check(mp, sta, skb); | ||
451 | } | ||
452 | } | ||
453 | |||
454 | static void | ||
455 | minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, | ||
456 | int index) | ||
457 | { | ||
458 | struct minstrel_rate_stats *mr; | ||
459 | const struct mcs_group *group; | ||
460 | unsigned int tx_time, tx_time_rtscts, tx_time_data; | ||
461 | unsigned int cw = mp->cw_min; | ||
462 | unsigned int t_slot = 9; /* FIXME */ | ||
463 | unsigned int ampdu_len = MINSTREL_TRUNC(mi->avg_ampdu_len); | ||
464 | |||
465 | mr = minstrel_get_ratestats(mi, index); | ||
466 | if (mr->probability < MINSTREL_FRAC(1, 10)) { | ||
467 | mr->retry_count = 1; | ||
468 | mr->retry_count_rtscts = 1; | ||
469 | return; | ||
470 | } | ||
471 | |||
472 | mr->retry_count = 2; | ||
473 | mr->retry_count_rtscts = 2; | ||
474 | mr->retry_updated = true; | ||
475 | |||
476 | group = &minstrel_mcs_groups[index / MCS_GROUP_RATES]; | ||
477 | tx_time_data = group->duration[index % MCS_GROUP_RATES] * ampdu_len; | ||
478 | tx_time = 2 * (t_slot + mi->overhead + tx_time_data); | ||
479 | tx_time_rtscts = 2 * (t_slot + mi->overhead_rtscts + tx_time_data); | ||
480 | do { | ||
481 | cw = (cw << 1) | 1; | ||
482 | cw = min(cw, mp->cw_max); | ||
483 | tx_time += cw + t_slot + mi->overhead; | ||
484 | tx_time_rtscts += cw + t_slot + mi->overhead_rtscts; | ||
485 | if (tx_time_rtscts < mp->segment_size) | ||
486 | mr->retry_count_rtscts++; | ||
487 | } while ((tx_time < mp->segment_size) && | ||
488 | (++mr->retry_count < mp->max_retry)); | ||
489 | } | ||
490 | |||
491 | |||
492 | static void | ||
493 | minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, | ||
494 | struct ieee80211_tx_rate *rate, int index, | ||
495 | struct ieee80211_tx_rate_control *txrc, | ||
496 | bool sample, bool rtscts) | ||
497 | { | ||
498 | const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES]; | ||
499 | struct minstrel_rate_stats *mr; | ||
500 | |||
501 | mr = minstrel_get_ratestats(mi, index); | ||
502 | if (!mr->retry_updated) | ||
503 | minstrel_calc_retransmit(mp, mi, index); | ||
504 | |||
505 | if (mr->probability < MINSTREL_FRAC(20, 100)) | ||
506 | rate->count = 2; | ||
507 | else if (rtscts) | ||
508 | rate->count = mr->retry_count_rtscts; | ||
509 | else | ||
510 | rate->count = mr->retry_count; | ||
511 | |||
512 | rate->flags = IEEE80211_TX_RC_MCS | group->flags; | ||
513 | if (txrc->short_preamble) | ||
514 | rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE; | ||
515 | if (txrc->rts || rtscts) | ||
516 | rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS; | ||
517 | rate->idx = index % MCS_GROUP_RATES + (group->streams - 1) * MCS_GROUP_RATES; | ||
518 | } | ||
519 | |||
520 | static inline int | ||
521 | minstrel_get_duration(int index) | ||
522 | { | ||
523 | const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES]; | ||
524 | return group->duration[index % MCS_GROUP_RATES]; | ||
525 | } | ||
526 | |||
527 | static int | ||
528 | minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) | ||
529 | { | ||
530 | struct minstrel_rate_stats *mr; | ||
531 | struct minstrel_mcs_group_data *mg; | ||
532 | int sample_idx = 0; | ||
533 | |||
534 | if (mi->sample_wait > 0) { | ||
535 | mi->sample_wait--; | ||
536 | return -1; | ||
537 | } | ||
538 | |||
539 | if (!mi->sample_tries) | ||
540 | return -1; | ||
541 | |||
542 | mi->sample_tries--; | ||
543 | mg = &mi->groups[mi->sample_group]; | ||
544 | sample_idx = sample_table[mg->column][mg->index]; | ||
545 | mr = &mg->rates[sample_idx]; | ||
546 | sample_idx += mi->sample_group * MCS_GROUP_RATES; | ||
547 | |||
548 | /* | ||
549 | * When not using MRR, do not sample if the probability is already | ||
550 | * higher than 95% to avoid wasting airtime | ||
551 | */ | ||
552 | if (!mp->has_mrr && (mr->probability > MINSTREL_FRAC(95, 100))) | ||
553 | goto next; | ||
554 | |||
555 | /* | ||
556 | * Make sure that lower rates get sampled only occasionally, | ||
557 | * if the link is working perfectly. | ||
558 | */ | ||
559 | if (minstrel_get_duration(sample_idx) > | ||
560 | minstrel_get_duration(mi->max_tp_rate)) { | ||
561 | if (mr->sample_skipped < 10) | ||
562 | goto next; | ||
563 | |||
564 | if (mi->sample_slow++ > 2) | ||
565 | goto next; | ||
566 | } | ||
567 | |||
568 | return sample_idx; | ||
569 | |||
570 | next: | ||
571 | minstrel_next_sample_idx(mi); | ||
572 | return -1; | ||
573 | } | ||
574 | |||
575 | static void | ||
576 | minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, | ||
577 | struct ieee80211_tx_rate_control *txrc) | ||
578 | { | ||
579 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb); | ||
580 | struct ieee80211_tx_rate *ar = info->status.rates; | ||
581 | struct minstrel_ht_sta_priv *msp = priv_sta; | ||
582 | struct minstrel_ht_sta *mi = &msp->ht; | ||
583 | struct minstrel_priv *mp = priv; | ||
584 | int sample_idx; | ||
585 | |||
586 | if (rate_control_send_low(sta, priv_sta, txrc)) | ||
587 | return; | ||
588 | |||
589 | if (!msp->is_ht) | ||
590 | return mac80211_minstrel.get_rate(priv, sta, &msp->legacy, txrc); | ||
591 | |||
592 | info->flags |= mi->tx_flags; | ||
593 | sample_idx = minstrel_get_sample_rate(mp, mi); | ||
594 | if (sample_idx >= 0) { | ||
595 | minstrel_ht_set_rate(mp, mi, &ar[0], sample_idx, | ||
596 | txrc, true, false); | ||
597 | minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate, | ||
598 | txrc, false, true); | ||
599 | info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; | ||
600 | } else { | ||
601 | minstrel_ht_set_rate(mp, mi, &ar[0], mi->max_tp_rate, | ||
602 | txrc, false, false); | ||
603 | minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate2, | ||
604 | txrc, false, true); | ||
605 | } | ||
606 | minstrel_ht_set_rate(mp, mi, &ar[2], mi->max_prob_rate, txrc, false, true); | ||
607 | |||
608 | ar[3].count = 0; | ||
609 | ar[3].idx = -1; | ||
610 | |||
611 | mi->total_packets++; | ||
612 | |||
613 | /* wraparound */ | ||
614 | if (mi->total_packets == ~0) { | ||
615 | mi->total_packets = 0; | ||
616 | mi->sample_packets = 0; | ||
617 | } | ||
618 | } | ||
619 | |||
620 | static void | ||
621 | minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, | ||
622 | struct ieee80211_sta *sta, void *priv_sta, | ||
623 | enum nl80211_channel_type oper_chan_type) | ||
624 | { | ||
625 | struct minstrel_priv *mp = priv; | ||
626 | struct minstrel_ht_sta_priv *msp = priv_sta; | ||
627 | struct minstrel_ht_sta *mi = &msp->ht; | ||
628 | struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs; | ||
629 | struct ieee80211_local *local = hw_to_local(mp->hw); | ||
630 | u16 sta_cap = sta->ht_cap.cap; | ||
631 | int ack_dur; | ||
632 | int stbc; | ||
633 | int i; | ||
634 | |||
635 | /* fall back to the old minstrel for legacy stations */ | ||
636 | if (sta && !sta->ht_cap.ht_supported) { | ||
637 | msp->is_ht = false; | ||
638 | memset(&msp->legacy, 0, sizeof(msp->legacy)); | ||
639 | msp->legacy.r = msp->ratelist; | ||
640 | msp->legacy.sample_table = msp->sample_table; | ||
641 | return mac80211_minstrel.rate_init(priv, sband, sta, &msp->legacy); | ||
642 | } | ||
643 | |||
644 | BUILD_BUG_ON(ARRAY_SIZE(minstrel_mcs_groups) != | ||
645 | MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS); | ||
646 | |||
647 | msp->is_ht = true; | ||
648 | memset(mi, 0, sizeof(*mi)); | ||
649 | mi->stats_update = jiffies; | ||
650 | |||
651 | ack_dur = ieee80211_frame_duration(local, 10, 60, 1, 1); | ||
652 | mi->overhead = ieee80211_frame_duration(local, 0, 60, 1, 1) + ack_dur; | ||
653 | mi->overhead_rtscts = mi->overhead + 2 * ack_dur; | ||
654 | |||
655 | mi->avg_ampdu_len = MINSTREL_FRAC(1, 1); | ||
656 | |||
657 | /* When using MRR, sample more on the first attempt, without delay */ | ||
658 | if (mp->has_mrr) { | ||
659 | mi->sample_count = 16; | ||
660 | mi->sample_wait = 0; | ||
661 | } else { | ||
662 | mi->sample_count = 8; | ||
663 | mi->sample_wait = 8; | ||
664 | } | ||
665 | mi->sample_tries = 4; | ||
666 | |||
667 | stbc = (sta_cap & IEEE80211_HT_CAP_RX_STBC) >> | ||
668 | IEEE80211_HT_CAP_RX_STBC_SHIFT; | ||
669 | mi->tx_flags |= stbc << IEEE80211_TX_CTL_STBC_SHIFT; | ||
670 | |||
671 | if (sta_cap & IEEE80211_HT_CAP_LDPC_CODING) | ||
672 | mi->tx_flags |= IEEE80211_TX_CTL_LDPC; | ||
673 | |||
674 | if (oper_chan_type != NL80211_CHAN_HT40MINUS && | ||
675 | oper_chan_type != NL80211_CHAN_HT40PLUS) | ||
676 | sta_cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; | ||
677 | |||
678 | for (i = 0; i < ARRAY_SIZE(mi->groups); i++) { | ||
679 | u16 req = 0; | ||
680 | |||
681 | mi->groups[i].supported = 0; | ||
682 | if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_SHORT_GI) { | ||
683 | if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) | ||
684 | req |= IEEE80211_HT_CAP_SGI_40; | ||
685 | else | ||
686 | req |= IEEE80211_HT_CAP_SGI_20; | ||
687 | } | ||
688 | |||
689 | if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) | ||
690 | req |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; | ||
691 | |||
692 | if ((sta_cap & req) != req) | ||
693 | continue; | ||
694 | |||
695 | mi->groups[i].supported = | ||
696 | mcs->rx_mask[minstrel_mcs_groups[i].streams - 1]; | ||
697 | } | ||
698 | } | ||
699 | |||
700 | static void | ||
701 | minstrel_ht_rate_init(void *priv, struct ieee80211_supported_band *sband, | ||
702 | struct ieee80211_sta *sta, void *priv_sta) | ||
703 | { | ||
704 | struct minstrel_priv *mp = priv; | ||
705 | |||
706 | minstrel_ht_update_caps(priv, sband, sta, priv_sta, mp->hw->conf.channel_type); | ||
707 | } | ||
708 | |||
709 | static void | ||
710 | minstrel_ht_rate_update(void *priv, struct ieee80211_supported_band *sband, | ||
711 | struct ieee80211_sta *sta, void *priv_sta, | ||
712 | u32 changed, enum nl80211_channel_type oper_chan_type) | ||
713 | { | ||
714 | minstrel_ht_update_caps(priv, sband, sta, priv_sta, oper_chan_type); | ||
715 | } | ||
716 | |||
717 | static void * | ||
718 | minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp) | ||
719 | { | ||
720 | struct ieee80211_supported_band *sband; | ||
721 | struct minstrel_ht_sta_priv *msp; | ||
722 | struct minstrel_priv *mp = priv; | ||
723 | struct ieee80211_hw *hw = mp->hw; | ||
724 | int max_rates = 0; | ||
725 | int i; | ||
726 | |||
727 | for (i = 0; i < IEEE80211_NUM_BANDS; i++) { | ||
728 | sband = hw->wiphy->bands[i]; | ||
729 | if (sband && sband->n_bitrates > max_rates) | ||
730 | max_rates = sband->n_bitrates; | ||
731 | } | ||
732 | |||
733 | msp = kzalloc(sizeof(struct minstrel_ht_sta), gfp); | ||
734 | if (!msp) | ||
735 | return NULL; | ||
736 | |||
737 | msp->ratelist = kzalloc(sizeof(struct minstrel_rate) * max_rates, gfp); | ||
738 | if (!msp->ratelist) | ||
739 | goto error; | ||
740 | |||
741 | msp->sample_table = kmalloc(SAMPLE_COLUMNS * max_rates, gfp); | ||
742 | if (!msp->sample_table) | ||
743 | goto error1; | ||
744 | |||
745 | return msp; | ||
746 | |||
747 | error1: | ||
748 | kfree(msp->sample_table); | ||
749 | error: | ||
750 | kfree(msp); | ||
751 | return NULL; | ||
752 | } | ||
753 | |||
754 | static void | ||
755 | minstrel_ht_free_sta(void *priv, struct ieee80211_sta *sta, void *priv_sta) | ||
756 | { | ||
757 | struct minstrel_ht_sta_priv *msp = priv_sta; | ||
758 | |||
759 | kfree(msp->sample_table); | ||
760 | kfree(msp->ratelist); | ||
761 | kfree(msp); | ||
762 | } | ||
763 | |||
764 | static void * | ||
765 | minstrel_ht_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) | ||
766 | { | ||
767 | return mac80211_minstrel.alloc(hw, debugfsdir); | ||
768 | } | ||
769 | |||
770 | static void | ||
771 | minstrel_ht_free(void *priv) | ||
772 | { | ||
773 | mac80211_minstrel.free(priv); | ||
774 | } | ||
775 | |||
776 | static struct rate_control_ops mac80211_minstrel_ht = { | ||
777 | .name = "minstrel_ht", | ||
778 | .tx_status = minstrel_ht_tx_status, | ||
779 | .get_rate = minstrel_ht_get_rate, | ||
780 | .rate_init = minstrel_ht_rate_init, | ||
781 | .rate_update = minstrel_ht_rate_update, | ||
782 | .alloc_sta = minstrel_ht_alloc_sta, | ||
783 | .free_sta = minstrel_ht_free_sta, | ||
784 | .alloc = minstrel_ht_alloc, | ||
785 | .free = minstrel_ht_free, | ||
786 | #ifdef CONFIG_MAC80211_DEBUGFS | ||
787 | .add_sta_debugfs = minstrel_ht_add_sta_debugfs, | ||
788 | .remove_sta_debugfs = minstrel_ht_remove_sta_debugfs, | ||
789 | #endif | ||
790 | }; | ||
791 | |||
792 | |||
793 | static void | ||
794 | init_sample_table(void) | ||
795 | { | ||
796 | int col, i, new_idx; | ||
797 | u8 rnd[MCS_GROUP_RATES]; | ||
798 | |||
799 | memset(sample_table, 0xff, sizeof(sample_table)); | ||
800 | for (col = 0; col < SAMPLE_COLUMNS; col++) { | ||
801 | for (i = 0; i < MCS_GROUP_RATES; i++) { | ||
802 | get_random_bytes(rnd, sizeof(rnd)); | ||
803 | new_idx = (i + rnd[i]) % MCS_GROUP_RATES; | ||
804 | |||
805 | while (sample_table[col][new_idx] != 0xff) | ||
806 | new_idx = (new_idx + 1) % MCS_GROUP_RATES; | ||
807 | |||
808 | sample_table[col][new_idx] = i; | ||
809 | } | ||
810 | } | ||
811 | } | ||
812 | |||
813 | int __init | ||
814 | rc80211_minstrel_ht_init(void) | ||
815 | { | ||
816 | init_sample_table(); | ||
817 | return ieee80211_rate_control_register(&mac80211_minstrel_ht); | ||
818 | } | ||
819 | |||
820 | void | ||
821 | rc80211_minstrel_ht_exit(void) | ||
822 | { | ||
823 | ieee80211_rate_control_unregister(&mac80211_minstrel_ht); | ||
824 | } | ||
diff --git a/net/mac80211/rc80211_minstrel_ht.h b/net/mac80211/rc80211_minstrel_ht.h new file mode 100644 index 000000000000..696c0fc6e0b7 --- /dev/null +++ b/net/mac80211/rc80211_minstrel_ht.h | |||
@@ -0,0 +1,128 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010 Felix Fietkau <nbd@openwrt.org> | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #ifndef __RC_MINSTREL_HT_H | ||
10 | #define __RC_MINSTREL_HT_H | ||
11 | |||
12 | /* | ||
13 | * The number of streams can be changed to 2 to reduce code | ||
14 | * size and memory footprint. | ||
15 | */ | ||
16 | #define MINSTREL_MAX_STREAMS 3 | ||
17 | #define MINSTREL_STREAM_GROUPS 4 | ||
18 | |||
19 | /* scaled fraction values */ | ||
20 | #define MINSTREL_SCALE 16 | ||
21 | #define MINSTREL_FRAC(val, div) (((val) << MINSTREL_SCALE) / div) | ||
22 | #define MINSTREL_TRUNC(val) ((val) >> MINSTREL_SCALE) | ||
23 | |||
24 | #define MCS_GROUP_RATES 8 | ||
25 | |||
26 | struct mcs_group { | ||
27 | u32 flags; | ||
28 | unsigned int streams; | ||
29 | unsigned int duration[MCS_GROUP_RATES]; | ||
30 | }; | ||
31 | |||
32 | struct minstrel_rate_stats { | ||
33 | /* current / last sampling period attempts/success counters */ | ||
34 | unsigned int attempts, last_attempts; | ||
35 | unsigned int success, last_success; | ||
36 | |||
37 | /* total attempts/success counters */ | ||
38 | u64 att_hist, succ_hist; | ||
39 | |||
40 | /* current throughput */ | ||
41 | unsigned int cur_tp; | ||
42 | |||
43 | /* packet delivery probabilities */ | ||
44 | unsigned int cur_prob, probability; | ||
45 | |||
46 | /* maximum retry counts */ | ||
47 | unsigned int retry_count; | ||
48 | unsigned int retry_count_rtscts; | ||
49 | |||
50 | bool retry_updated; | ||
51 | u8 sample_skipped; | ||
52 | }; | ||
53 | |||
54 | struct minstrel_mcs_group_data { | ||
55 | u8 index; | ||
56 | u8 column; | ||
57 | |||
58 | /* bitfield of supported MCS rates of this group */ | ||
59 | u8 supported; | ||
60 | |||
61 | /* selected primary rates */ | ||
62 | unsigned int max_tp_rate; | ||
63 | unsigned int max_tp_rate2; | ||
64 | unsigned int max_prob_rate; | ||
65 | |||
66 | /* MCS rate statistics */ | ||
67 | struct minstrel_rate_stats rates[MCS_GROUP_RATES]; | ||
68 | }; | ||
69 | |||
70 | struct minstrel_ht_sta { | ||
71 | /* ampdu length (average, per sampling interval) */ | ||
72 | unsigned int ampdu_len; | ||
73 | unsigned int ampdu_packets; | ||
74 | |||
75 | /* ampdu length (EWMA) */ | ||
76 | unsigned int avg_ampdu_len; | ||
77 | |||
78 | /* best throughput rate */ | ||
79 | unsigned int max_tp_rate; | ||
80 | |||
81 | /* second best throughput rate */ | ||
82 | unsigned int max_tp_rate2; | ||
83 | |||
84 | /* best probability rate */ | ||
85 | unsigned int max_prob_rate; | ||
86 | |||
87 | /* time of last status update */ | ||
88 | unsigned long stats_update; | ||
89 | |||
90 | /* overhead time in usec for each frame */ | ||
91 | unsigned int overhead; | ||
92 | unsigned int overhead_rtscts; | ||
93 | |||
94 | unsigned int total_packets; | ||
95 | unsigned int sample_packets; | ||
96 | |||
97 | /* tx flags to add for frames for this sta */ | ||
98 | u32 tx_flags; | ||
99 | |||
100 | u8 sample_wait; | ||
101 | u8 sample_tries; | ||
102 | u8 sample_count; | ||
103 | u8 sample_slow; | ||
104 | |||
105 | /* current MCS group to be sampled */ | ||
106 | u8 sample_group; | ||
107 | |||
108 | /* MCS rate group info and statistics */ | ||
109 | struct minstrel_mcs_group_data groups[MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS]; | ||
110 | }; | ||
111 | |||
112 | struct minstrel_ht_sta_priv { | ||
113 | union { | ||
114 | struct minstrel_ht_sta ht; | ||
115 | struct minstrel_sta_info legacy; | ||
116 | }; | ||
117 | #ifdef CONFIG_MAC80211_DEBUGFS | ||
118 | struct dentry *dbg_stats; | ||
119 | #endif | ||
120 | void *ratelist; | ||
121 | void *sample_table; | ||
122 | bool is_ht; | ||
123 | }; | ||
124 | |||
125 | void minstrel_ht_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir); | ||
126 | void minstrel_ht_remove_sta_debugfs(void *priv, void *priv_sta); | ||
127 | |||
128 | #endif | ||
diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c new file mode 100644 index 000000000000..4fb3ccbd8b40 --- /dev/null +++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c | |||
@@ -0,0 +1,120 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010 Felix Fietkau <nbd@openwrt.org> | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | #include <linux/netdevice.h> | ||
9 | #include <linux/types.h> | ||
10 | #include <linux/skbuff.h> | ||
11 | #include <linux/debugfs.h> | ||
12 | #include <linux/ieee80211.h> | ||
13 | #include <net/mac80211.h> | ||
14 | #include "rc80211_minstrel.h" | ||
15 | #include "rc80211_minstrel_ht.h" | ||
16 | |||
17 | extern const struct mcs_group minstrel_mcs_groups[]; | ||
18 | |||
19 | static int | ||
20 | minstrel_ht_stats_open(struct inode *inode, struct file *file) | ||
21 | { | ||
22 | struct minstrel_ht_sta_priv *msp = inode->i_private; | ||
23 | struct minstrel_ht_sta *mi = &msp->ht; | ||
24 | struct minstrel_debugfs_info *ms; | ||
25 | unsigned int i, j, tp, prob, eprob; | ||
26 | char *p; | ||
27 | int ret; | ||
28 | |||
29 | if (!msp->is_ht) { | ||
30 | inode->i_private = &msp->legacy; | ||
31 | ret = minstrel_stats_open(inode, file); | ||
32 | inode->i_private = msp; | ||
33 | return ret; | ||
34 | } | ||
35 | |||
36 | ms = kmalloc(sizeof(*ms) + 8192, GFP_KERNEL); | ||
37 | if (!ms) | ||
38 | return -ENOMEM; | ||
39 | |||
40 | file->private_data = ms; | ||
41 | p = ms->buf; | ||
42 | p += sprintf(p, "type rate throughput ewma prob this prob " | ||
43 | "this succ/attempt success attempts\n"); | ||
44 | for (i = 0; i < MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS; i++) { | ||
45 | char htmode = '2'; | ||
46 | char gimode = 'L'; | ||
47 | |||
48 | if (!mi->groups[i].supported) | ||
49 | continue; | ||
50 | |||
51 | if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) | ||
52 | htmode = '4'; | ||
53 | if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_SHORT_GI) | ||
54 | gimode = 'S'; | ||
55 | |||
56 | for (j = 0; j < MCS_GROUP_RATES; j++) { | ||
57 | struct minstrel_rate_stats *mr = &mi->groups[i].rates[j]; | ||
58 | int idx = i * MCS_GROUP_RATES + j; | ||
59 | |||
60 | if (!(mi->groups[i].supported & BIT(j))) | ||
61 | continue; | ||
62 | |||
63 | p += sprintf(p, "HT%c0/%cGI ", htmode, gimode); | ||
64 | |||
65 | *(p++) = (idx == mi->max_tp_rate) ? 'T' : ' '; | ||
66 | *(p++) = (idx == mi->max_tp_rate2) ? 't' : ' '; | ||
67 | *(p++) = (idx == mi->max_prob_rate) ? 'P' : ' '; | ||
68 | p += sprintf(p, "MCS%-2u", (minstrel_mcs_groups[i].streams - 1) * | ||
69 | MCS_GROUP_RATES + j); | ||
70 | |||
71 | tp = mr->cur_tp / 10; | ||
72 | prob = MINSTREL_TRUNC(mr->cur_prob * 1000); | ||
73 | eprob = MINSTREL_TRUNC(mr->probability * 1000); | ||
74 | |||
75 | p += sprintf(p, " %6u.%1u %6u.%1u %6u.%1u " | ||
76 | "%3u(%3u) %8llu %8llu\n", | ||
77 | tp / 10, tp % 10, | ||
78 | eprob / 10, eprob % 10, | ||
79 | prob / 10, prob % 10, | ||
80 | mr->last_success, | ||
81 | mr->last_attempts, | ||
82 | (unsigned long long)mr->succ_hist, | ||
83 | (unsigned long long)mr->att_hist); | ||
84 | } | ||
85 | } | ||
86 | p += sprintf(p, "\nTotal packet count:: ideal %d " | ||
87 | "lookaround %d\n", | ||
88 | max(0, (int) mi->total_packets - (int) mi->sample_packets), | ||
89 | mi->sample_packets); | ||
90 | p += sprintf(p, "Average A-MPDU length: %d.%d\n", | ||
91 | MINSTREL_TRUNC(mi->avg_ampdu_len), | ||
92 | MINSTREL_TRUNC(mi->avg_ampdu_len * 10) % 10); | ||
93 | ms->len = p - ms->buf; | ||
94 | |||
95 | return 0; | ||
96 | } | ||
97 | |||
98 | static const struct file_operations minstrel_ht_stat_fops = { | ||
99 | .owner = THIS_MODULE, | ||
100 | .open = minstrel_ht_stats_open, | ||
101 | .read = minstrel_stats_read, | ||
102 | .release = minstrel_stats_release, | ||
103 | }; | ||
104 | |||
105 | void | ||
106 | minstrel_ht_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir) | ||
107 | { | ||
108 | struct minstrel_ht_sta_priv *msp = priv_sta; | ||
109 | |||
110 | msp->dbg_stats = debugfs_create_file("rc_stats", S_IRUGO, dir, msp, | ||
111 | &minstrel_ht_stat_fops); | ||
112 | } | ||
113 | |||
114 | void | ||
115 | minstrel_ht_remove_sta_debugfs(void *priv, void *priv_sta) | ||
116 | { | ||
117 | struct minstrel_ht_sta_priv *msp = priv_sta; | ||
118 | |||
119 | debugfs_remove(msp->dbg_stats); | ||
120 | } | ||
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 6e2a7bcd8cb8..6a15632e7eca 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -825,6 +825,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) | |||
825 | ieee80211_rx_result result = RX_DROP_UNUSABLE; | 825 | ieee80211_rx_result result = RX_DROP_UNUSABLE; |
826 | struct ieee80211_key *stakey = NULL; | 826 | struct ieee80211_key *stakey = NULL; |
827 | int mmie_keyidx = -1; | 827 | int mmie_keyidx = -1; |
828 | __le16 fc; | ||
828 | 829 | ||
829 | /* | 830 | /* |
830 | * Key selection 101 | 831 | * Key selection 101 |
@@ -866,13 +867,15 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) | |||
866 | if (rx->sta) | 867 | if (rx->sta) |
867 | stakey = rcu_dereference(rx->sta->key); | 868 | stakey = rcu_dereference(rx->sta->key); |
868 | 869 | ||
869 | if (!ieee80211_has_protected(hdr->frame_control)) | 870 | fc = hdr->frame_control; |
871 | |||
872 | if (!ieee80211_has_protected(fc)) | ||
870 | mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb); | 873 | mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb); |
871 | 874 | ||
872 | if (!is_multicast_ether_addr(hdr->addr1) && stakey) { | 875 | if (!is_multicast_ether_addr(hdr->addr1) && stakey) { |
873 | rx->key = stakey; | 876 | rx->key = stakey; |
874 | /* Skip decryption if the frame is not protected. */ | 877 | /* Skip decryption if the frame is not protected. */ |
875 | if (!ieee80211_has_protected(hdr->frame_control)) | 878 | if (!ieee80211_has_protected(fc)) |
876 | return RX_CONTINUE; | 879 | return RX_CONTINUE; |
877 | } else if (mmie_keyidx >= 0) { | 880 | } else if (mmie_keyidx >= 0) { |
878 | /* Broadcast/multicast robust management frame / BIP */ | 881 | /* Broadcast/multicast robust management frame / BIP */ |
@@ -884,7 +887,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) | |||
884 | mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) | 887 | mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) |
885 | return RX_DROP_MONITOR; /* unexpected BIP keyidx */ | 888 | return RX_DROP_MONITOR; /* unexpected BIP keyidx */ |
886 | rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]); | 889 | rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]); |
887 | } else if (!ieee80211_has_protected(hdr->frame_control)) { | 890 | } else if (!ieee80211_has_protected(fc)) { |
888 | /* | 891 | /* |
889 | * The frame was not protected, so skip decryption. However, we | 892 | * The frame was not protected, so skip decryption. However, we |
890 | * need to set rx->key if there is a key that could have been | 893 | * need to set rx->key if there is a key that could have been |
@@ -892,7 +895,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) | |||
892 | * have been expected. | 895 | * have been expected. |
893 | */ | 896 | */ |
894 | struct ieee80211_key *key = NULL; | 897 | struct ieee80211_key *key = NULL; |
895 | if (ieee80211_is_mgmt(hdr->frame_control) && | 898 | if (ieee80211_is_mgmt(fc) && |
896 | is_multicast_ether_addr(hdr->addr1) && | 899 | is_multicast_ether_addr(hdr->addr1) && |
897 | (key = rcu_dereference(rx->sdata->default_mgmt_key))) | 900 | (key = rcu_dereference(rx->sdata->default_mgmt_key))) |
898 | rx->key = key; | 901 | rx->key = key; |
@@ -914,7 +917,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) | |||
914 | (status->flag & RX_FLAG_IV_STRIPPED)) | 917 | (status->flag & RX_FLAG_IV_STRIPPED)) |
915 | return RX_CONTINUE; | 918 | return RX_CONTINUE; |
916 | 919 | ||
917 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | 920 | hdrlen = ieee80211_hdrlen(fc); |
918 | 921 | ||
919 | if (rx->skb->len < 8 + hdrlen) | 922 | if (rx->skb->len < 8 + hdrlen) |
920 | return RX_DROP_UNUSABLE; /* TODO: count this? */ | 923 | return RX_DROP_UNUSABLE; /* TODO: count this? */ |
@@ -947,19 +950,17 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) | |||
947 | 950 | ||
948 | if (skb_linearize(rx->skb)) | 951 | if (skb_linearize(rx->skb)) |
949 | return RX_DROP_UNUSABLE; | 952 | return RX_DROP_UNUSABLE; |
950 | 953 | /* the hdr variable is invalid now! */ | |
951 | hdr = (struct ieee80211_hdr *)rx->skb->data; | ||
952 | |||
953 | /* Check for weak IVs if possible */ | ||
954 | if (rx->sta && rx->key->conf.alg == ALG_WEP && | ||
955 | ieee80211_is_data(hdr->frame_control) && | ||
956 | (!(status->flag & RX_FLAG_IV_STRIPPED) || | ||
957 | !(status->flag & RX_FLAG_DECRYPTED)) && | ||
958 | ieee80211_wep_is_weak_iv(rx->skb, rx->key)) | ||
959 | rx->sta->wep_weak_iv_count++; | ||
960 | 954 | ||
961 | switch (rx->key->conf.alg) { | 955 | switch (rx->key->conf.alg) { |
962 | case ALG_WEP: | 956 | case ALG_WEP: |
957 | /* Check for weak IVs if possible */ | ||
958 | if (rx->sta && ieee80211_is_data(fc) && | ||
959 | (!(status->flag & RX_FLAG_IV_STRIPPED) || | ||
960 | !(status->flag & RX_FLAG_DECRYPTED)) && | ||
961 | ieee80211_wep_is_weak_iv(rx->skb, rx->key)) | ||
962 | rx->sta->wep_weak_iv_count++; | ||
963 | |||
963 | result = ieee80211_crypto_wep_decrypt(rx); | 964 | result = ieee80211_crypto_wep_decrypt(rx); |
964 | break; | 965 | break; |
965 | case ALG_TKIP: | 966 | case ALG_TKIP: |
@@ -1818,17 +1819,26 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) | |||
1818 | return RX_CONTINUE; | 1819 | return RX_CONTINUE; |
1819 | 1820 | ||
1820 | if (ieee80211_is_back_req(bar->frame_control)) { | 1821 | if (ieee80211_is_back_req(bar->frame_control)) { |
1822 | struct { | ||
1823 | __le16 control, start_seq_num; | ||
1824 | } __packed bar_data; | ||
1825 | |||
1821 | if (!rx->sta) | 1826 | if (!rx->sta) |
1822 | return RX_DROP_MONITOR; | 1827 | return RX_DROP_MONITOR; |
1828 | |||
1829 | if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control), | ||
1830 | &bar_data, sizeof(bar_data))) | ||
1831 | return RX_DROP_MONITOR; | ||
1832 | |||
1823 | spin_lock(&rx->sta->lock); | 1833 | spin_lock(&rx->sta->lock); |
1824 | tid = le16_to_cpu(bar->control) >> 12; | 1834 | tid = le16_to_cpu(bar_data.control) >> 12; |
1825 | if (!rx->sta->ampdu_mlme.tid_active_rx[tid]) { | 1835 | if (!rx->sta->ampdu_mlme.tid_active_rx[tid]) { |
1826 | spin_unlock(&rx->sta->lock); | 1836 | spin_unlock(&rx->sta->lock); |
1827 | return RX_DROP_MONITOR; | 1837 | return RX_DROP_MONITOR; |
1828 | } | 1838 | } |
1829 | tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid]; | 1839 | tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid]; |
1830 | 1840 | ||
1831 | start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4; | 1841 | start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; |
1832 | 1842 | ||
1833 | /* reset session timer */ | 1843 | /* reset session timer */ |
1834 | if (tid_agg_rx->timeout) | 1844 | if (tid_agg_rx->timeout) |
@@ -1843,7 +1853,12 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) | |||
1843 | return RX_QUEUED; | 1853 | return RX_QUEUED; |
1844 | } | 1854 | } |
1845 | 1855 | ||
1846 | return RX_CONTINUE; | 1856 | /* |
1857 | * After this point, we only want management frames, | ||
1858 | * so we can drop all remaining control frames to | ||
1859 | * cooked monitor interfaces. | ||
1860 | */ | ||
1861 | return RX_DROP_MONITOR; | ||
1847 | } | 1862 | } |
1848 | 1863 | ||
1849 | static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, | 1864 | static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, |
@@ -1935,6 +1950,9 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |||
1935 | if (len < IEEE80211_MIN_ACTION_SIZE + 1) | 1950 | if (len < IEEE80211_MIN_ACTION_SIZE + 1) |
1936 | break; | 1951 | break; |
1937 | 1952 | ||
1953 | if (sdata->vif.type == NL80211_IFTYPE_STATION) | ||
1954 | return ieee80211_sta_rx_mgmt(sdata, rx->skb); | ||
1955 | |||
1938 | switch (mgmt->u.action.u.addba_req.action_code) { | 1956 | switch (mgmt->u.action.u.addba_req.action_code) { |
1939 | case WLAN_ACTION_ADDBA_REQ: | 1957 | case WLAN_ACTION_ADDBA_REQ: |
1940 | if (len < (IEEE80211_MIN_ACTION_SIZE + | 1958 | if (len < (IEEE80211_MIN_ACTION_SIZE + |
@@ -2139,7 +2157,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, | |||
2139 | u8 rate_or_pad; | 2157 | u8 rate_or_pad; |
2140 | __le16 chan_freq; | 2158 | __le16 chan_freq; |
2141 | __le16 chan_flags; | 2159 | __le16 chan_flags; |
2142 | } __attribute__ ((packed)) *rthdr; | 2160 | } __packed *rthdr; |
2143 | struct sk_buff *skb = rx->skb, *skb2; | 2161 | struct sk_buff *skb = rx->skb, *skb2; |
2144 | struct net_device *prev_dev = NULL; | 2162 | struct net_device *prev_dev = NULL; |
2145 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); | 2163 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index ba9360a475b0..4607da9a6ff7 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -648,14 +648,6 @@ static int __must_check __sta_info_destroy(struct sta_info *sta) | |||
648 | 648 | ||
649 | if (sta->key) { | 649 | if (sta->key) { |
650 | ieee80211_key_free(sta->key); | 650 | ieee80211_key_free(sta->key); |
651 | /* | ||
652 | * We have only unlinked the key, and actually destroying it | ||
653 | * may mean it is removed from hardware which requires that | ||
654 | * the key->sta pointer is still valid, so flush the key todo | ||
655 | * list here. | ||
656 | */ | ||
657 | ieee80211_key_todo(); | ||
658 | |||
659 | WARN_ON(sta->key); | 651 | WARN_ON(sta->key); |
660 | } | 652 | } |
661 | 653 | ||
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index df9d45544ca5..813da34db733 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h | |||
@@ -120,6 +120,28 @@ struct tid_ampdu_rx { | |||
120 | }; | 120 | }; |
121 | 121 | ||
122 | /** | 122 | /** |
123 | * struct sta_ampdu_mlme - STA aggregation information. | ||
124 | * | ||
125 | * @tid_active_rx: TID's state in Rx session state machine. | ||
126 | * @tid_rx: aggregation info for Rx per TID | ||
127 | * @tid_state_tx: TID's state in Tx session state machine. | ||
128 | * @tid_tx: aggregation info for Tx per TID | ||
129 | * @addba_req_num: number of times addBA request has been sent. | ||
130 | * @dialog_token_allocator: dialog token enumerator for each new session; | ||
131 | */ | ||
132 | struct sta_ampdu_mlme { | ||
133 | /* rx */ | ||
134 | bool tid_active_rx[STA_TID_NUM]; | ||
135 | struct tid_ampdu_rx *tid_rx[STA_TID_NUM]; | ||
136 | /* tx */ | ||
137 | u8 tid_state_tx[STA_TID_NUM]; | ||
138 | struct tid_ampdu_tx *tid_tx[STA_TID_NUM]; | ||
139 | u8 addba_req_num[STA_TID_NUM]; | ||
140 | u8 dialog_token_allocator; | ||
141 | }; | ||
142 | |||
143 | |||
144 | /** | ||
123 | * enum plink_state - state of a mesh peer link finite state machine | 145 | * enum plink_state - state of a mesh peer link finite state machine |
124 | * | 146 | * |
125 | * @PLINK_LISTEN: initial state, considered the implicit state of non existant | 147 | * @PLINK_LISTEN: initial state, considered the implicit state of non existant |
@@ -143,28 +165,6 @@ enum plink_state { | |||
143 | }; | 165 | }; |
144 | 166 | ||
145 | /** | 167 | /** |
146 | * struct sta_ampdu_mlme - STA aggregation information. | ||
147 | * | ||
148 | * @tid_active_rx: TID's state in Rx session state machine. | ||
149 | * @tid_rx: aggregation info for Rx per TID | ||
150 | * @tid_state_tx: TID's state in Tx session state machine. | ||
151 | * @tid_tx: aggregation info for Tx per TID | ||
152 | * @addba_req_num: number of times addBA request has been sent. | ||
153 | * @dialog_token_allocator: dialog token enumerator for each new session; | ||
154 | */ | ||
155 | struct sta_ampdu_mlme { | ||
156 | /* rx */ | ||
157 | bool tid_active_rx[STA_TID_NUM]; | ||
158 | struct tid_ampdu_rx *tid_rx[STA_TID_NUM]; | ||
159 | /* tx */ | ||
160 | u8 tid_state_tx[STA_TID_NUM]; | ||
161 | struct tid_ampdu_tx *tid_tx[STA_TID_NUM]; | ||
162 | u8 addba_req_num[STA_TID_NUM]; | ||
163 | u8 dialog_token_allocator; | ||
164 | }; | ||
165 | |||
166 | |||
167 | /** | ||
168 | * struct sta_info - STA information | 168 | * struct sta_info - STA information |
169 | * | 169 | * |
170 | * This structure collects information about a station that | 170 | * This structure collects information about a station that |
diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 94613af009f3..34da67995d94 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c | |||
@@ -47,7 +47,7 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local, | |||
47 | /* | 47 | /* |
48 | * This skb 'survived' a round-trip through the driver, and | 48 | * This skb 'survived' a round-trip through the driver, and |
49 | * hopefully the driver didn't mangle it too badly. However, | 49 | * hopefully the driver didn't mangle it too badly. However, |
50 | * we can definitely not rely on the the control information | 50 | * we can definitely not rely on the control information |
51 | * being correct. Clear it so we don't get junk there, and | 51 | * being correct. Clear it so we don't get junk there, and |
52 | * indicate that it needs new processing, but must not be | 52 | * indicate that it needs new processing, but must not be |
53 | * modified/encrypted again. | 53 | * modified/encrypted again. |
diff --git a/net/mac80211/work.c b/net/mac80211/work.c index be3d4a698692..4157717ed786 100644 --- a/net/mac80211/work.c +++ b/net/mac80211/work.c | |||
@@ -840,7 +840,7 @@ static void ieee80211_work_work(struct work_struct *work) | |||
840 | 840 | ||
841 | /* | 841 | /* |
842 | * ieee80211_queue_work() should have picked up most cases, | 842 | * ieee80211_queue_work() should have picked up most cases, |
843 | * here we'll pick the the rest. | 843 | * here we'll pick the rest. |
844 | */ | 844 | */ |
845 | if (WARN(local->suspended, "work scheduled while going to suspend\n")) | 845 | if (WARN(local->suspended, "work scheduled while going to suspend\n")) |
846 | return; | 846 | return; |
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 93c15a107b2c..02b078e11cf3 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c | |||
@@ -90,10 +90,10 @@ __ip_vs_get_out_rt(struct ip_vs_conn *cp, u32 rtos) | |||
90 | &dest->addr.ip); | 90 | &dest->addr.ip); |
91 | return NULL; | 91 | return NULL; |
92 | } | 92 | } |
93 | __ip_vs_dst_set(dest, rtos, dst_clone(&rt->u.dst)); | 93 | __ip_vs_dst_set(dest, rtos, dst_clone(&rt->dst)); |
94 | IP_VS_DBG(10, "new dst %pI4, refcnt=%d, rtos=%X\n", | 94 | IP_VS_DBG(10, "new dst %pI4, refcnt=%d, rtos=%X\n", |
95 | &dest->addr.ip, | 95 | &dest->addr.ip, |
96 | atomic_read(&rt->u.dst.__refcnt), rtos); | 96 | atomic_read(&rt->dst.__refcnt), rtos); |
97 | } | 97 | } |
98 | spin_unlock(&dest->dst_lock); | 98 | spin_unlock(&dest->dst_lock); |
99 | } else { | 99 | } else { |
@@ -148,10 +148,10 @@ __ip_vs_get_out_rt_v6(struct ip_vs_conn *cp) | |||
148 | &dest->addr.in6); | 148 | &dest->addr.in6); |
149 | return NULL; | 149 | return NULL; |
150 | } | 150 | } |
151 | __ip_vs_dst_set(dest, 0, dst_clone(&rt->u.dst)); | 151 | __ip_vs_dst_set(dest, 0, dst_clone(&rt->dst)); |
152 | IP_VS_DBG(10, "new dst %pI6, refcnt=%d\n", | 152 | IP_VS_DBG(10, "new dst %pI6, refcnt=%d\n", |
153 | &dest->addr.in6, | 153 | &dest->addr.in6, |
154 | atomic_read(&rt->u.dst.__refcnt)); | 154 | atomic_read(&rt->dst.__refcnt)); |
155 | } | 155 | } |
156 | spin_unlock(&dest->dst_lock); | 156 | spin_unlock(&dest->dst_lock); |
157 | } else { | 157 | } else { |
@@ -198,7 +198,7 @@ do { \ | |||
198 | (skb)->ipvs_property = 1; \ | 198 | (skb)->ipvs_property = 1; \ |
199 | skb_forward_csum(skb); \ | 199 | skb_forward_csum(skb); \ |
200 | NF_HOOK(pf, NF_INET_LOCAL_OUT, (skb), NULL, \ | 200 | NF_HOOK(pf, NF_INET_LOCAL_OUT, (skb), NULL, \ |
201 | (rt)->u.dst.dev, dst_output); \ | 201 | (rt)->dst.dev, dst_output); \ |
202 | } while (0) | 202 | } while (0) |
203 | 203 | ||
204 | 204 | ||
@@ -245,7 +245,7 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
245 | } | 245 | } |
246 | 246 | ||
247 | /* MTU checking */ | 247 | /* MTU checking */ |
248 | mtu = dst_mtu(&rt->u.dst); | 248 | mtu = dst_mtu(&rt->dst); |
249 | if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) { | 249 | if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) { |
250 | ip_rt_put(rt); | 250 | ip_rt_put(rt); |
251 | icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu)); | 251 | icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu)); |
@@ -265,7 +265,7 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
265 | 265 | ||
266 | /* drop old route */ | 266 | /* drop old route */ |
267 | skb_dst_drop(skb); | 267 | skb_dst_drop(skb); |
268 | skb_dst_set(skb, &rt->u.dst); | 268 | skb_dst_set(skb, &rt->dst); |
269 | 269 | ||
270 | /* Another hack: avoid icmp_send in ip_fragment */ | 270 | /* Another hack: avoid icmp_send in ip_fragment */ |
271 | skb->local_df = 1; | 271 | skb->local_df = 1; |
@@ -309,9 +309,9 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
309 | } | 309 | } |
310 | 310 | ||
311 | /* MTU checking */ | 311 | /* MTU checking */ |
312 | mtu = dst_mtu(&rt->u.dst); | 312 | mtu = dst_mtu(&rt->dst); |
313 | if (skb->len > mtu) { | 313 | if (skb->len > mtu) { |
314 | dst_release(&rt->u.dst); | 314 | dst_release(&rt->dst); |
315 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | 315 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); |
316 | IP_VS_DBG_RL("%s(): frag needed\n", __func__); | 316 | IP_VS_DBG_RL("%s(): frag needed\n", __func__); |
317 | goto tx_error; | 317 | goto tx_error; |
@@ -323,13 +323,13 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
323 | */ | 323 | */ |
324 | skb = skb_share_check(skb, GFP_ATOMIC); | 324 | skb = skb_share_check(skb, GFP_ATOMIC); |
325 | if (unlikely(skb == NULL)) { | 325 | if (unlikely(skb == NULL)) { |
326 | dst_release(&rt->u.dst); | 326 | dst_release(&rt->dst); |
327 | return NF_STOLEN; | 327 | return NF_STOLEN; |
328 | } | 328 | } |
329 | 329 | ||
330 | /* drop old route */ | 330 | /* drop old route */ |
331 | skb_dst_drop(skb); | 331 | skb_dst_drop(skb); |
332 | skb_dst_set(skb, &rt->u.dst); | 332 | skb_dst_set(skb, &rt->dst); |
333 | 333 | ||
334 | /* Another hack: avoid icmp_send in ip_fragment */ | 334 | /* Another hack: avoid icmp_send in ip_fragment */ |
335 | skb->local_df = 1; | 335 | skb->local_df = 1; |
@@ -376,7 +376,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
376 | goto tx_error_icmp; | 376 | goto tx_error_icmp; |
377 | 377 | ||
378 | /* MTU checking */ | 378 | /* MTU checking */ |
379 | mtu = dst_mtu(&rt->u.dst); | 379 | mtu = dst_mtu(&rt->dst); |
380 | if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) { | 380 | if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) { |
381 | ip_rt_put(rt); | 381 | ip_rt_put(rt); |
382 | icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu)); | 382 | icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu)); |
@@ -388,12 +388,12 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
388 | if (!skb_make_writable(skb, sizeof(struct iphdr))) | 388 | if (!skb_make_writable(skb, sizeof(struct iphdr))) |
389 | goto tx_error_put; | 389 | goto tx_error_put; |
390 | 390 | ||
391 | if (skb_cow(skb, rt->u.dst.dev->hard_header_len)) | 391 | if (skb_cow(skb, rt->dst.dev->hard_header_len)) |
392 | goto tx_error_put; | 392 | goto tx_error_put; |
393 | 393 | ||
394 | /* drop old route */ | 394 | /* drop old route */ |
395 | skb_dst_drop(skb); | 395 | skb_dst_drop(skb); |
396 | skb_dst_set(skb, &rt->u.dst); | 396 | skb_dst_set(skb, &rt->dst); |
397 | 397 | ||
398 | /* mangle the packet */ | 398 | /* mangle the packet */ |
399 | if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp)) | 399 | if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp)) |
@@ -452,9 +452,9 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
452 | goto tx_error_icmp; | 452 | goto tx_error_icmp; |
453 | 453 | ||
454 | /* MTU checking */ | 454 | /* MTU checking */ |
455 | mtu = dst_mtu(&rt->u.dst); | 455 | mtu = dst_mtu(&rt->dst); |
456 | if (skb->len > mtu) { | 456 | if (skb->len > mtu) { |
457 | dst_release(&rt->u.dst); | 457 | dst_release(&rt->dst); |
458 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | 458 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); |
459 | IP_VS_DBG_RL_PKT(0, pp, skb, 0, | 459 | IP_VS_DBG_RL_PKT(0, pp, skb, 0, |
460 | "ip_vs_nat_xmit_v6(): frag needed for"); | 460 | "ip_vs_nat_xmit_v6(): frag needed for"); |
@@ -465,12 +465,12 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
465 | if (!skb_make_writable(skb, sizeof(struct ipv6hdr))) | 465 | if (!skb_make_writable(skb, sizeof(struct ipv6hdr))) |
466 | goto tx_error_put; | 466 | goto tx_error_put; |
467 | 467 | ||
468 | if (skb_cow(skb, rt->u.dst.dev->hard_header_len)) | 468 | if (skb_cow(skb, rt->dst.dev->hard_header_len)) |
469 | goto tx_error_put; | 469 | goto tx_error_put; |
470 | 470 | ||
471 | /* drop old route */ | 471 | /* drop old route */ |
472 | skb_dst_drop(skb); | 472 | skb_dst_drop(skb); |
473 | skb_dst_set(skb, &rt->u.dst); | 473 | skb_dst_set(skb, &rt->dst); |
474 | 474 | ||
475 | /* mangle the packet */ | 475 | /* mangle the packet */ |
476 | if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp)) | 476 | if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp)) |
@@ -498,7 +498,7 @@ tx_error: | |||
498 | kfree_skb(skb); | 498 | kfree_skb(skb); |
499 | return NF_STOLEN; | 499 | return NF_STOLEN; |
500 | tx_error_put: | 500 | tx_error_put: |
501 | dst_release(&rt->u.dst); | 501 | dst_release(&rt->dst); |
502 | goto tx_error; | 502 | goto tx_error; |
503 | } | 503 | } |
504 | #endif | 504 | #endif |
@@ -549,9 +549,9 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
549 | if (!(rt = __ip_vs_get_out_rt(cp, RT_TOS(tos)))) | 549 | if (!(rt = __ip_vs_get_out_rt(cp, RT_TOS(tos)))) |
550 | goto tx_error_icmp; | 550 | goto tx_error_icmp; |
551 | 551 | ||
552 | tdev = rt->u.dst.dev; | 552 | tdev = rt->dst.dev; |
553 | 553 | ||
554 | mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr); | 554 | mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr); |
555 | if (mtu < 68) { | 555 | if (mtu < 68) { |
556 | ip_rt_put(rt); | 556 | ip_rt_put(rt); |
557 | IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__); | 557 | IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__); |
@@ -601,7 +601,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
601 | 601 | ||
602 | /* drop old route */ | 602 | /* drop old route */ |
603 | skb_dst_drop(skb); | 603 | skb_dst_drop(skb); |
604 | skb_dst_set(skb, &rt->u.dst); | 604 | skb_dst_set(skb, &rt->dst); |
605 | 605 | ||
606 | /* | 606 | /* |
607 | * Push down and install the IPIP header. | 607 | * Push down and install the IPIP header. |
@@ -615,7 +615,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
615 | iph->daddr = rt->rt_dst; | 615 | iph->daddr = rt->rt_dst; |
616 | iph->saddr = rt->rt_src; | 616 | iph->saddr = rt->rt_src; |
617 | iph->ttl = old_iph->ttl; | 617 | iph->ttl = old_iph->ttl; |
618 | ip_select_ident(iph, &rt->u.dst, NULL); | 618 | ip_select_ident(iph, &rt->dst, NULL); |
619 | 619 | ||
620 | /* Another hack: avoid icmp_send in ip_fragment */ | 620 | /* Another hack: avoid icmp_send in ip_fragment */ |
621 | skb->local_df = 1; | 621 | skb->local_df = 1; |
@@ -660,12 +660,12 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
660 | if (!rt) | 660 | if (!rt) |
661 | goto tx_error_icmp; | 661 | goto tx_error_icmp; |
662 | 662 | ||
663 | tdev = rt->u.dst.dev; | 663 | tdev = rt->dst.dev; |
664 | 664 | ||
665 | mtu = dst_mtu(&rt->u.dst) - sizeof(struct ipv6hdr); | 665 | mtu = dst_mtu(&rt->dst) - sizeof(struct ipv6hdr); |
666 | /* TODO IPv6: do we need this check in IPv6? */ | 666 | /* TODO IPv6: do we need this check in IPv6? */ |
667 | if (mtu < 1280) { | 667 | if (mtu < 1280) { |
668 | dst_release(&rt->u.dst); | 668 | dst_release(&rt->dst); |
669 | IP_VS_DBG_RL("%s(): mtu less than 1280\n", __func__); | 669 | IP_VS_DBG_RL("%s(): mtu less than 1280\n", __func__); |
670 | goto tx_error; | 670 | goto tx_error; |
671 | } | 671 | } |
@@ -674,7 +674,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
674 | 674 | ||
675 | if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) { | 675 | if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) { |
676 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | 676 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); |
677 | dst_release(&rt->u.dst); | 677 | dst_release(&rt->dst); |
678 | IP_VS_DBG_RL("%s(): frag needed\n", __func__); | 678 | IP_VS_DBG_RL("%s(): frag needed\n", __func__); |
679 | goto tx_error; | 679 | goto tx_error; |
680 | } | 680 | } |
@@ -689,7 +689,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
689 | struct sk_buff *new_skb = | 689 | struct sk_buff *new_skb = |
690 | skb_realloc_headroom(skb, max_headroom); | 690 | skb_realloc_headroom(skb, max_headroom); |
691 | if (!new_skb) { | 691 | if (!new_skb) { |
692 | dst_release(&rt->u.dst); | 692 | dst_release(&rt->dst); |
693 | kfree_skb(skb); | 693 | kfree_skb(skb); |
694 | IP_VS_ERR_RL("%s(): no memory\n", __func__); | 694 | IP_VS_ERR_RL("%s(): no memory\n", __func__); |
695 | return NF_STOLEN; | 695 | return NF_STOLEN; |
@@ -707,7 +707,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
707 | 707 | ||
708 | /* drop old route */ | 708 | /* drop old route */ |
709 | skb_dst_drop(skb); | 709 | skb_dst_drop(skb); |
710 | skb_dst_set(skb, &rt->u.dst); | 710 | skb_dst_set(skb, &rt->dst); |
711 | 711 | ||
712 | /* | 712 | /* |
713 | * Push down and install the IPIP header. | 713 | * Push down and install the IPIP header. |
@@ -760,7 +760,7 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
760 | goto tx_error_icmp; | 760 | goto tx_error_icmp; |
761 | 761 | ||
762 | /* MTU checking */ | 762 | /* MTU checking */ |
763 | mtu = dst_mtu(&rt->u.dst); | 763 | mtu = dst_mtu(&rt->dst); |
764 | if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu) { | 764 | if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu) { |
765 | icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu)); | 765 | icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu)); |
766 | ip_rt_put(rt); | 766 | ip_rt_put(rt); |
@@ -780,7 +780,7 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
780 | 780 | ||
781 | /* drop old route */ | 781 | /* drop old route */ |
782 | skb_dst_drop(skb); | 782 | skb_dst_drop(skb); |
783 | skb_dst_set(skb, &rt->u.dst); | 783 | skb_dst_set(skb, &rt->dst); |
784 | 784 | ||
785 | /* Another hack: avoid icmp_send in ip_fragment */ | 785 | /* Another hack: avoid icmp_send in ip_fragment */ |
786 | skb->local_df = 1; | 786 | skb->local_df = 1; |
@@ -813,10 +813,10 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
813 | goto tx_error_icmp; | 813 | goto tx_error_icmp; |
814 | 814 | ||
815 | /* MTU checking */ | 815 | /* MTU checking */ |
816 | mtu = dst_mtu(&rt->u.dst); | 816 | mtu = dst_mtu(&rt->dst); |
817 | if (skb->len > mtu) { | 817 | if (skb->len > mtu) { |
818 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | 818 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); |
819 | dst_release(&rt->u.dst); | 819 | dst_release(&rt->dst); |
820 | IP_VS_DBG_RL("%s(): frag needed\n", __func__); | 820 | IP_VS_DBG_RL("%s(): frag needed\n", __func__); |
821 | goto tx_error; | 821 | goto tx_error; |
822 | } | 822 | } |
@@ -827,13 +827,13 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
827 | */ | 827 | */ |
828 | skb = skb_share_check(skb, GFP_ATOMIC); | 828 | skb = skb_share_check(skb, GFP_ATOMIC); |
829 | if (unlikely(skb == NULL)) { | 829 | if (unlikely(skb == NULL)) { |
830 | dst_release(&rt->u.dst); | 830 | dst_release(&rt->dst); |
831 | return NF_STOLEN; | 831 | return NF_STOLEN; |
832 | } | 832 | } |
833 | 833 | ||
834 | /* drop old route */ | 834 | /* drop old route */ |
835 | skb_dst_drop(skb); | 835 | skb_dst_drop(skb); |
836 | skb_dst_set(skb, &rt->u.dst); | 836 | skb_dst_set(skb, &rt->dst); |
837 | 837 | ||
838 | /* Another hack: avoid icmp_send in ip_fragment */ | 838 | /* Another hack: avoid icmp_send in ip_fragment */ |
839 | skb->local_df = 1; | 839 | skb->local_df = 1; |
@@ -888,7 +888,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
888 | goto tx_error_icmp; | 888 | goto tx_error_icmp; |
889 | 889 | ||
890 | /* MTU checking */ | 890 | /* MTU checking */ |
891 | mtu = dst_mtu(&rt->u.dst); | 891 | mtu = dst_mtu(&rt->dst); |
892 | if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF))) { | 892 | if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF))) { |
893 | ip_rt_put(rt); | 893 | ip_rt_put(rt); |
894 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); | 894 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); |
@@ -900,12 +900,12 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
900 | if (!skb_make_writable(skb, offset)) | 900 | if (!skb_make_writable(skb, offset)) |
901 | goto tx_error_put; | 901 | goto tx_error_put; |
902 | 902 | ||
903 | if (skb_cow(skb, rt->u.dst.dev->hard_header_len)) | 903 | if (skb_cow(skb, rt->dst.dev->hard_header_len)) |
904 | goto tx_error_put; | 904 | goto tx_error_put; |
905 | 905 | ||
906 | /* drop the old route when skb is not shared */ | 906 | /* drop the old route when skb is not shared */ |
907 | skb_dst_drop(skb); | 907 | skb_dst_drop(skb); |
908 | skb_dst_set(skb, &rt->u.dst); | 908 | skb_dst_set(skb, &rt->dst); |
909 | 909 | ||
910 | ip_vs_nat_icmp(skb, pp, cp, 0); | 910 | ip_vs_nat_icmp(skb, pp, cp, 0); |
911 | 911 | ||
@@ -963,9 +963,9 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
963 | goto tx_error_icmp; | 963 | goto tx_error_icmp; |
964 | 964 | ||
965 | /* MTU checking */ | 965 | /* MTU checking */ |
966 | mtu = dst_mtu(&rt->u.dst); | 966 | mtu = dst_mtu(&rt->dst); |
967 | if (skb->len > mtu) { | 967 | if (skb->len > mtu) { |
968 | dst_release(&rt->u.dst); | 968 | dst_release(&rt->dst); |
969 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | 969 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); |
970 | IP_VS_DBG_RL("%s(): frag needed\n", __func__); | 970 | IP_VS_DBG_RL("%s(): frag needed\n", __func__); |
971 | goto tx_error; | 971 | goto tx_error; |
@@ -975,12 +975,12 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
975 | if (!skb_make_writable(skb, offset)) | 975 | if (!skb_make_writable(skb, offset)) |
976 | goto tx_error_put; | 976 | goto tx_error_put; |
977 | 977 | ||
978 | if (skb_cow(skb, rt->u.dst.dev->hard_header_len)) | 978 | if (skb_cow(skb, rt->dst.dev->hard_header_len)) |
979 | goto tx_error_put; | 979 | goto tx_error_put; |
980 | 980 | ||
981 | /* drop the old route when skb is not shared */ | 981 | /* drop the old route when skb is not shared */ |
982 | skb_dst_drop(skb); | 982 | skb_dst_drop(skb); |
983 | skb_dst_set(skb, &rt->u.dst); | 983 | skb_dst_set(skb, &rt->dst); |
984 | 984 | ||
985 | ip_vs_nat_icmp_v6(skb, pp, cp, 0); | 985 | ip_vs_nat_icmp_v6(skb, pp, cp, 0); |
986 | 986 | ||
@@ -1001,7 +1001,7 @@ out: | |||
1001 | LeaveFunction(10); | 1001 | LeaveFunction(10); |
1002 | return rc; | 1002 | return rc; |
1003 | tx_error_put: | 1003 | tx_error_put: |
1004 | dst_release(&rt->u.dst); | 1004 | dst_release(&rt->dst); |
1005 | goto tx_error; | 1005 | goto tx_error; |
1006 | } | 1006 | } |
1007 | #endif | 1007 | #endif |
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 9c661413b826..16b41b4e2a3c 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -619,9 +619,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone, | |||
619 | ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev = NULL; | 619 | ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev = NULL; |
620 | /* Don't set timer yet: wait for confirmation */ | 620 | /* Don't set timer yet: wait for confirmation */ |
621 | setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct); | 621 | setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct); |
622 | #ifdef CONFIG_NET_NS | 622 | write_pnet(&ct->ct_net, net); |
623 | ct->ct_net = net; | ||
624 | #endif | ||
625 | #ifdef CONFIG_NF_CONNTRACK_ZONES | 623 | #ifdef CONFIG_NF_CONNTRACK_ZONES |
626 | if (zone) { | 624 | if (zone) { |
627 | struct nf_conntrack_zone *nf_ct_zone; | 625 | struct nf_conntrack_zone *nf_ct_zone; |
@@ -1385,7 +1383,6 @@ static int nf_conntrack_init_init_net(void) | |||
1385 | /* Set up fake conntrack: to never be deleted, not in any hashes */ | 1383 | /* Set up fake conntrack: to never be deleted, not in any hashes */ |
1386 | for_each_possible_cpu(cpu) { | 1384 | for_each_possible_cpu(cpu) { |
1387 | struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu); | 1385 | struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu); |
1388 | |||
1389 | write_pnet(&ct->ct_net, &init_net); | 1386 | write_pnet(&ct->ct_net, &init_net); |
1390 | atomic_set(&ct->ct_general.use, 1); | 1387 | atomic_set(&ct->ct_general.use, 1); |
1391 | } | 1388 | } |
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c index 6eaee7c8a337..b969025cf82f 100644 --- a/net/netfilter/nf_conntrack_h323_main.c +++ b/net/netfilter/nf_conntrack_h323_main.c | |||
@@ -734,11 +734,11 @@ static int callforward_do_filter(const union nf_inet_addr *src, | |||
734 | if (!afinfo->route((struct dst_entry **)&rt1, &fl1)) { | 734 | if (!afinfo->route((struct dst_entry **)&rt1, &fl1)) { |
735 | if (!afinfo->route((struct dst_entry **)&rt2, &fl2)) { | 735 | if (!afinfo->route((struct dst_entry **)&rt2, &fl2)) { |
736 | if (rt1->rt_gateway == rt2->rt_gateway && | 736 | if (rt1->rt_gateway == rt2->rt_gateway && |
737 | rt1->u.dst.dev == rt2->u.dst.dev) | 737 | rt1->dst.dev == rt2->dst.dev) |
738 | ret = 1; | 738 | ret = 1; |
739 | dst_release(&rt2->u.dst); | 739 | dst_release(&rt2->dst); |
740 | } | 740 | } |
741 | dst_release(&rt1->u.dst); | 741 | dst_release(&rt1->dst); |
742 | } | 742 | } |
743 | break; | 743 | break; |
744 | } | 744 | } |
@@ -753,11 +753,11 @@ static int callforward_do_filter(const union nf_inet_addr *src, | |||
753 | if (!afinfo->route((struct dst_entry **)&rt2, &fl2)) { | 753 | if (!afinfo->route((struct dst_entry **)&rt2, &fl2)) { |
754 | if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway, | 754 | if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway, |
755 | sizeof(rt1->rt6i_gateway)) && | 755 | sizeof(rt1->rt6i_gateway)) && |
756 | rt1->u.dst.dev == rt2->u.dst.dev) | 756 | rt1->dst.dev == rt2->dst.dev) |
757 | ret = 1; | 757 | ret = 1; |
758 | dst_release(&rt2->u.dst); | 758 | dst_release(&rt2->dst); |
759 | } | 759 | } |
760 | dst_release(&rt1->u.dst); | 760 | dst_release(&rt1->dst); |
761 | } | 761 | } |
762 | break; | 762 | break; |
763 | } | 763 | } |
diff --git a/net/netfilter/nf_conntrack_netbios_ns.c b/net/netfilter/nf_conntrack_netbios_ns.c index 497b2224536f..aadde018a072 100644 --- a/net/netfilter/nf_conntrack_netbios_ns.c +++ b/net/netfilter/nf_conntrack_netbios_ns.c | |||
@@ -61,7 +61,7 @@ static int help(struct sk_buff *skb, unsigned int protoff, | |||
61 | goto out; | 61 | goto out; |
62 | 62 | ||
63 | rcu_read_lock(); | 63 | rcu_read_lock(); |
64 | in_dev = __in_dev_get_rcu(rt->u.dst.dev); | 64 | in_dev = __in_dev_get_rcu(rt->dst.dev); |
65 | if (in_dev != NULL) { | 65 | if (in_dev != NULL) { |
66 | for_primary_ifa(in_dev) { | 66 | for_primary_ifa(in_dev) { |
67 | if (ifa->ifa_broadcast == iph->daddr) { | 67 | if (ifa->ifa_broadcast == iph->daddr) { |
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 445de702b8b7..e34622fa0003 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c | |||
@@ -699,10 +699,8 @@ void xt_free_table_info(struct xt_table_info *info) | |||
699 | vfree(info->jumpstack); | 699 | vfree(info->jumpstack); |
700 | else | 700 | else |
701 | kfree(info->jumpstack); | 701 | kfree(info->jumpstack); |
702 | if (sizeof(unsigned int) * nr_cpu_ids > PAGE_SIZE) | 702 | |
703 | vfree(info->stackptr); | 703 | free_percpu(info->stackptr); |
704 | else | ||
705 | kfree(info->stackptr); | ||
706 | 704 | ||
707 | kfree(info); | 705 | kfree(info); |
708 | } | 706 | } |
@@ -753,14 +751,9 @@ static int xt_jumpstack_alloc(struct xt_table_info *i) | |||
753 | unsigned int size; | 751 | unsigned int size; |
754 | int cpu; | 752 | int cpu; |
755 | 753 | ||
756 | size = sizeof(unsigned int) * nr_cpu_ids; | 754 | i->stackptr = alloc_percpu(unsigned int); |
757 | if (size > PAGE_SIZE) | ||
758 | i->stackptr = vmalloc(size); | ||
759 | else | ||
760 | i->stackptr = kmalloc(size, GFP_KERNEL); | ||
761 | if (i->stackptr == NULL) | 755 | if (i->stackptr == NULL) |
762 | return -ENOMEM; | 756 | return -ENOMEM; |
763 | memset(i->stackptr, 0, size); | ||
764 | 757 | ||
765 | size = sizeof(void **) * nr_cpu_ids; | 758 | size = sizeof(void **) * nr_cpu_ids; |
766 | if (size > PAGE_SIZE) | 759 | if (size > PAGE_SIZE) |
@@ -844,10 +837,6 @@ struct xt_table *xt_register_table(struct net *net, | |||
844 | struct xt_table_info *private; | 837 | struct xt_table_info *private; |
845 | struct xt_table *t, *table; | 838 | struct xt_table *t, *table; |
846 | 839 | ||
847 | ret = xt_jumpstack_alloc(newinfo); | ||
848 | if (ret < 0) | ||
849 | return ERR_PTR(ret); | ||
850 | |||
851 | /* Don't add one object to multiple lists. */ | 840 | /* Don't add one object to multiple lists. */ |
852 | table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL); | 841 | table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL); |
853 | if (!table) { | 842 | if (!table) { |
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c index 69c01e10f8af..de079abd5bc8 100644 --- a/net/netfilter/xt_RATEEST.c +++ b/net/netfilter/xt_RATEEST.c | |||
@@ -60,13 +60,22 @@ struct xt_rateest *xt_rateest_lookup(const char *name) | |||
60 | } | 60 | } |
61 | EXPORT_SYMBOL_GPL(xt_rateest_lookup); | 61 | EXPORT_SYMBOL_GPL(xt_rateest_lookup); |
62 | 62 | ||
63 | static void xt_rateest_free_rcu(struct rcu_head *head) | ||
64 | { | ||
65 | kfree(container_of(head, struct xt_rateest, rcu)); | ||
66 | } | ||
67 | |||
63 | void xt_rateest_put(struct xt_rateest *est) | 68 | void xt_rateest_put(struct xt_rateest *est) |
64 | { | 69 | { |
65 | mutex_lock(&xt_rateest_mutex); | 70 | mutex_lock(&xt_rateest_mutex); |
66 | if (--est->refcnt == 0) { | 71 | if (--est->refcnt == 0) { |
67 | hlist_del(&est->list); | 72 | hlist_del(&est->list); |
68 | gen_kill_estimator(&est->bstats, &est->rstats); | 73 | gen_kill_estimator(&est->bstats, &est->rstats); |
69 | kfree(est); | 74 | /* |
75 | * gen_estimator est_timer() might access est->lock or bstats, | ||
76 | * wait a RCU grace period before freeing 'est' | ||
77 | */ | ||
78 | call_rcu(&est->rcu, xt_rateest_free_rcu); | ||
70 | } | 79 | } |
71 | mutex_unlock(&xt_rateest_mutex); | 80 | mutex_unlock(&xt_rateest_mutex); |
72 | } | 81 | } |
@@ -179,6 +188,7 @@ static int __init xt_rateest_tg_init(void) | |||
179 | static void __exit xt_rateest_tg_fini(void) | 188 | static void __exit xt_rateest_tg_fini(void) |
180 | { | 189 | { |
181 | xt_unregister_target(&xt_rateest_tg_reg); | 190 | xt_unregister_target(&xt_rateest_tg_reg); |
191 | rcu_barrier(); /* Wait for completion of call_rcu()'s (xt_rateest_free_rcu) */ | ||
182 | } | 192 | } |
183 | 193 | ||
184 | 194 | ||
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c index 62ec021fbd50..1841388c770a 100644 --- a/net/netfilter/xt_TCPMSS.c +++ b/net/netfilter/xt_TCPMSS.c | |||
@@ -165,8 +165,8 @@ static u_int32_t tcpmss_reverse_mtu(const struct sk_buff *skb, | |||
165 | rcu_read_unlock(); | 165 | rcu_read_unlock(); |
166 | 166 | ||
167 | if (rt != NULL) { | 167 | if (rt != NULL) { |
168 | mtu = dst_mtu(&rt->u.dst); | 168 | mtu = dst_mtu(&rt->dst); |
169 | dst_release(&rt->u.dst); | 169 | dst_release(&rt->dst); |
170 | } | 170 | } |
171 | return mtu; | 171 | return mtu; |
172 | } | 172 | } |
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c index 7a118267c4c4..22a2d421e7eb 100644 --- a/net/netfilter/xt_TEE.c +++ b/net/netfilter/xt_TEE.c | |||
@@ -77,8 +77,8 @@ tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info) | |||
77 | return false; | 77 | return false; |
78 | 78 | ||
79 | skb_dst_drop(skb); | 79 | skb_dst_drop(skb); |
80 | skb_dst_set(skb, &rt->u.dst); | 80 | skb_dst_set(skb, &rt->dst); |
81 | skb->dev = rt->u.dst.dev; | 81 | skb->dev = rt->dst.dev; |
82 | skb->protocol = htons(ETH_P_IP); | 82 | skb->protocol = htons(ETH_P_IP); |
83 | return true; | 83 | return true; |
84 | } | 84 | } |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 2078a277e06b..9a17f28b1253 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -83,6 +83,7 @@ | |||
83 | #include <linux/if_vlan.h> | 83 | #include <linux/if_vlan.h> |
84 | #include <linux/virtio_net.h> | 84 | #include <linux/virtio_net.h> |
85 | #include <linux/errqueue.h> | 85 | #include <linux/errqueue.h> |
86 | #include <linux/net_tstamp.h> | ||
86 | 87 | ||
87 | #ifdef CONFIG_INET | 88 | #ifdef CONFIG_INET |
88 | #include <net/inet_common.h> | 89 | #include <net/inet_common.h> |
@@ -202,6 +203,7 @@ struct packet_sock { | |||
202 | unsigned int tp_hdrlen; | 203 | unsigned int tp_hdrlen; |
203 | unsigned int tp_reserve; | 204 | unsigned int tp_reserve; |
204 | unsigned int tp_loss:1; | 205 | unsigned int tp_loss:1; |
206 | unsigned int tp_tstamp; | ||
205 | struct packet_type prot_hook ____cacheline_aligned_in_smp; | 207 | struct packet_type prot_hook ____cacheline_aligned_in_smp; |
206 | }; | 208 | }; |
207 | 209 | ||
@@ -656,6 +658,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, | |||
656 | struct sk_buff *copy_skb = NULL; | 658 | struct sk_buff *copy_skb = NULL; |
657 | struct timeval tv; | 659 | struct timeval tv; |
658 | struct timespec ts; | 660 | struct timespec ts; |
661 | struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); | ||
659 | 662 | ||
660 | if (skb->pkt_type == PACKET_LOOPBACK) | 663 | if (skb->pkt_type == PACKET_LOOPBACK) |
661 | goto drop; | 664 | goto drop; |
@@ -737,7 +740,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, | |||
737 | h.h1->tp_snaplen = snaplen; | 740 | h.h1->tp_snaplen = snaplen; |
738 | h.h1->tp_mac = macoff; | 741 | h.h1->tp_mac = macoff; |
739 | h.h1->tp_net = netoff; | 742 | h.h1->tp_net = netoff; |
740 | if (skb->tstamp.tv64) | 743 | if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE) |
744 | && shhwtstamps->syststamp.tv64) | ||
745 | tv = ktime_to_timeval(shhwtstamps->syststamp); | ||
746 | else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE) | ||
747 | && shhwtstamps->hwtstamp.tv64) | ||
748 | tv = ktime_to_timeval(shhwtstamps->hwtstamp); | ||
749 | else if (skb->tstamp.tv64) | ||
741 | tv = ktime_to_timeval(skb->tstamp); | 750 | tv = ktime_to_timeval(skb->tstamp); |
742 | else | 751 | else |
743 | do_gettimeofday(&tv); | 752 | do_gettimeofday(&tv); |
@@ -750,7 +759,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, | |||
750 | h.h2->tp_snaplen = snaplen; | 759 | h.h2->tp_snaplen = snaplen; |
751 | h.h2->tp_mac = macoff; | 760 | h.h2->tp_mac = macoff; |
752 | h.h2->tp_net = netoff; | 761 | h.h2->tp_net = netoff; |
753 | if (skb->tstamp.tv64) | 762 | if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE) |
763 | && shhwtstamps->syststamp.tv64) | ||
764 | ts = ktime_to_timespec(shhwtstamps->syststamp); | ||
765 | else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE) | ||
766 | && shhwtstamps->hwtstamp.tv64) | ||
767 | ts = ktime_to_timespec(shhwtstamps->hwtstamp); | ||
768 | else if (skb->tstamp.tv64) | ||
754 | ts = ktime_to_timespec(skb->tstamp); | 769 | ts = ktime_to_timespec(skb->tstamp); |
755 | else | 770 | else |
756 | getnstimeofday(&ts); | 771 | getnstimeofday(&ts); |
@@ -2027,6 +2042,18 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv | |||
2027 | po->has_vnet_hdr = !!val; | 2042 | po->has_vnet_hdr = !!val; |
2028 | return 0; | 2043 | return 0; |
2029 | } | 2044 | } |
2045 | case PACKET_TIMESTAMP: | ||
2046 | { | ||
2047 | int val; | ||
2048 | |||
2049 | if (optlen != sizeof(val)) | ||
2050 | return -EINVAL; | ||
2051 | if (copy_from_user(&val, optval, sizeof(val))) | ||
2052 | return -EFAULT; | ||
2053 | |||
2054 | po->tp_tstamp = val; | ||
2055 | return 0; | ||
2056 | } | ||
2030 | default: | 2057 | default: |
2031 | return -ENOPROTOOPT; | 2058 | return -ENOPROTOOPT; |
2032 | } | 2059 | } |
@@ -2119,6 +2146,12 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, | |||
2119 | val = po->tp_loss; | 2146 | val = po->tp_loss; |
2120 | data = &val; | 2147 | data = &val; |
2121 | break; | 2148 | break; |
2149 | case PACKET_TIMESTAMP: | ||
2150 | if (len > sizeof(int)) | ||
2151 | len = sizeof(int); | ||
2152 | val = po->tp_tstamp; | ||
2153 | data = &val; | ||
2154 | break; | ||
2122 | default: | 2155 | default: |
2123 | return -ENOPROTOOPT; | 2156 | return -ENOPROTOOPT; |
2124 | } | 2157 | } |
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c index c33da6576942..b18e48fae975 100644 --- a/net/phonet/pn_dev.c +++ b/net/phonet/pn_dev.c | |||
@@ -162,6 +162,14 @@ int phonet_address_add(struct net_device *dev, u8 addr) | |||
162 | return err; | 162 | return err; |
163 | } | 163 | } |
164 | 164 | ||
165 | static void phonet_device_rcu_free(struct rcu_head *head) | ||
166 | { | ||
167 | struct phonet_device *pnd; | ||
168 | |||
169 | pnd = container_of(head, struct phonet_device, rcu); | ||
170 | kfree(pnd); | ||
171 | } | ||
172 | |||
165 | int phonet_address_del(struct net_device *dev, u8 addr) | 173 | int phonet_address_del(struct net_device *dev, u8 addr) |
166 | { | 174 | { |
167 | struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); | 175 | struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); |
@@ -179,10 +187,9 @@ int phonet_address_del(struct net_device *dev, u8 addr) | |||
179 | pnd = NULL; | 187 | pnd = NULL; |
180 | mutex_unlock(&pndevs->lock); | 188 | mutex_unlock(&pndevs->lock); |
181 | 189 | ||
182 | if (pnd) { | 190 | if (pnd) |
183 | synchronize_rcu(); | 191 | call_rcu(&pnd->rcu, phonet_device_rcu_free); |
184 | kfree(pnd); | 192 | |
185 | } | ||
186 | return err; | 193 | return err; |
187 | } | 194 | } |
188 | 195 | ||
diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c index f0f85b0123f7..9f1729bd60de 100644 --- a/net/rxrpc/ar-peer.c +++ b/net/rxrpc/ar-peer.c | |||
@@ -64,8 +64,8 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer) | |||
64 | return; | 64 | return; |
65 | } | 65 | } |
66 | 66 | ||
67 | peer->if_mtu = dst_mtu(&rt->u.dst); | 67 | peer->if_mtu = dst_mtu(&rt->dst); |
68 | dst_release(&rt->u.dst); | 68 | dst_release(&rt->dst); |
69 | 69 | ||
70 | _leave(" [if_mtu %u]", peer->if_mtu); | 70 | _leave(" [if_mtu %u]", peer->if_mtu); |
71 | } | 71 | } |
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 972378f47f3c..23b25f89e7e0 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
@@ -26,6 +26,11 @@ | |||
26 | #include <net/act_api.h> | 26 | #include <net/act_api.h> |
27 | #include <net/netlink.h> | 27 | #include <net/netlink.h> |
28 | 28 | ||
29 | static void tcf_common_free_rcu(struct rcu_head *head) | ||
30 | { | ||
31 | kfree(container_of(head, struct tcf_common, tcfc_rcu)); | ||
32 | } | ||
33 | |||
29 | void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo) | 34 | void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo) |
30 | { | 35 | { |
31 | unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask); | 36 | unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask); |
@@ -38,7 +43,11 @@ void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo) | |||
38 | write_unlock_bh(hinfo->lock); | 43 | write_unlock_bh(hinfo->lock); |
39 | gen_kill_estimator(&p->tcfc_bstats, | 44 | gen_kill_estimator(&p->tcfc_bstats, |
40 | &p->tcfc_rate_est); | 45 | &p->tcfc_rate_est); |
41 | kfree(p); | 46 | /* |
47 | * gen_estimator est_timer() might access p->tcfc_lock | ||
48 | * or bstats, wait a RCU grace period before freeing p | ||
49 | */ | ||
50 | call_rcu(&p->tcfc_rcu, tcf_common_free_rcu); | ||
42 | return; | 51 | return; |
43 | } | 52 | } |
44 | } | 53 | } |
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index d885ba311564..570949417f38 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c | |||
@@ -159,6 +159,9 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a, | |||
159 | iph->daddr = new_addr; | 159 | iph->daddr = new_addr; |
160 | 160 | ||
161 | csum_replace4(&iph->check, addr, new_addr); | 161 | csum_replace4(&iph->check, addr, new_addr); |
162 | } else if ((iph->frag_off & htons(IP_OFFSET)) || | ||
163 | iph->protocol != IPPROTO_ICMP) { | ||
164 | goto out; | ||
162 | } | 165 | } |
163 | 166 | ||
164 | ihl = iph->ihl * 4; | 167 | ihl = iph->ihl * 4; |
@@ -247,6 +250,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a, | |||
247 | break; | 250 | break; |
248 | } | 251 | } |
249 | 252 | ||
253 | out: | ||
250 | return action; | 254 | return action; |
251 | 255 | ||
252 | drop: | 256 | drop: |
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index fdbd0b7bd840..a0593c9640db 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c | |||
@@ -125,16 +125,15 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, | |||
125 | { | 125 | { |
126 | struct tcf_pedit *p = a->priv; | 126 | struct tcf_pedit *p = a->priv; |
127 | int i, munged = 0; | 127 | int i, munged = 0; |
128 | u8 *pptr; | 128 | unsigned int off; |
129 | 129 | ||
130 | if (!(skb->tc_verd & TC_OK2MUNGE)) { | 130 | if (skb_cloned(skb)) { |
131 | /* should we set skb->cloned? */ | ||
132 | if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { | 131 | if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { |
133 | return p->tcf_action; | 132 | return p->tcf_action; |
134 | } | 133 | } |
135 | } | 134 | } |
136 | 135 | ||
137 | pptr = skb_network_header(skb); | 136 | off = skb_network_offset(skb); |
138 | 137 | ||
139 | spin_lock(&p->tcf_lock); | 138 | spin_lock(&p->tcf_lock); |
140 | 139 | ||
@@ -144,17 +143,17 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, | |||
144 | struct tc_pedit_key *tkey = p->tcfp_keys; | 143 | struct tc_pedit_key *tkey = p->tcfp_keys; |
145 | 144 | ||
146 | for (i = p->tcfp_nkeys; i > 0; i--, tkey++) { | 145 | for (i = p->tcfp_nkeys; i > 0; i--, tkey++) { |
147 | u32 *ptr; | 146 | u32 *ptr, _data; |
148 | int offset = tkey->off; | 147 | int offset = tkey->off; |
149 | 148 | ||
150 | if (tkey->offmask) { | 149 | if (tkey->offmask) { |
151 | if (skb->len > tkey->at) { | 150 | char *d, _d; |
152 | char *j = pptr + tkey->at; | 151 | |
153 | offset += ((*j & tkey->offmask) >> | 152 | d = skb_header_pointer(skb, off + tkey->at, 1, |
154 | tkey->shift); | 153 | &_d); |
155 | } else { | 154 | if (!d) |
156 | goto bad; | 155 | goto bad; |
157 | } | 156 | offset += (*d & tkey->offmask) >> tkey->shift; |
158 | } | 157 | } |
159 | 158 | ||
160 | if (offset % 4) { | 159 | if (offset % 4) { |
@@ -169,9 +168,13 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, | |||
169 | goto bad; | 168 | goto bad; |
170 | } | 169 | } |
171 | 170 | ||
172 | ptr = (u32 *)(pptr+offset); | 171 | ptr = skb_header_pointer(skb, off + offset, 4, &_data); |
172 | if (!ptr) | ||
173 | goto bad; | ||
173 | /* just do it, baby */ | 174 | /* just do it, baby */ |
174 | *ptr = ((*ptr & tkey->mask) ^ tkey->val); | 175 | *ptr = ((*ptr & tkey->mask) ^ tkey->val); |
176 | if (ptr == &_data) | ||
177 | skb_store_bits(skb, off + offset, ptr, 4); | ||
175 | munged++; | 178 | munged++; |
176 | } | 179 | } |
177 | 180 | ||
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 654f73dff7c1..537a48732e9e 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
@@ -97,6 +97,11 @@ nla_put_failure: | |||
97 | goto done; | 97 | goto done; |
98 | } | 98 | } |
99 | 99 | ||
100 | static void tcf_police_free_rcu(struct rcu_head *head) | ||
101 | { | ||
102 | kfree(container_of(head, struct tcf_police, tcf_rcu)); | ||
103 | } | ||
104 | |||
100 | static void tcf_police_destroy(struct tcf_police *p) | 105 | static void tcf_police_destroy(struct tcf_police *p) |
101 | { | 106 | { |
102 | unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK); | 107 | unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK); |
@@ -113,7 +118,11 @@ static void tcf_police_destroy(struct tcf_police *p) | |||
113 | qdisc_put_rtab(p->tcfp_R_tab); | 118 | qdisc_put_rtab(p->tcfp_R_tab); |
114 | if (p->tcfp_P_tab) | 119 | if (p->tcfp_P_tab) |
115 | qdisc_put_rtab(p->tcfp_P_tab); | 120 | qdisc_put_rtab(p->tcfp_P_tab); |
116 | kfree(p); | 121 | /* |
122 | * gen_estimator est_timer() might access p->tcf_lock | ||
123 | * or bstats, wait a RCU grace period before freeing p | ||
124 | */ | ||
125 | call_rcu(&p->tcf_rcu, tcf_police_free_rcu); | ||
117 | return; | 126 | return; |
118 | } | 127 | } |
119 | } | 128 | } |
@@ -397,6 +406,7 @@ static void __exit | |||
397 | police_cleanup_module(void) | 406 | police_cleanup_module(void) |
398 | { | 407 | { |
399 | tcf_unregister_action(&act_police_ops); | 408 | tcf_unregister_action(&act_police_ops); |
409 | rcu_barrier(); /* Wait for completion of call_rcu()'s (tcf_police_free_rcu) */ | ||
400 | } | 410 | } |
401 | 411 | ||
402 | module_init(police_init_module); | 412 | module_init(police_init_module); |
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 96275422c619..4f522143811e 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c | |||
@@ -98,11 +98,11 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re | |||
98 | { | 98 | { |
99 | struct { | 99 | struct { |
100 | struct tc_u_knode *knode; | 100 | struct tc_u_knode *knode; |
101 | u8 *ptr; | 101 | unsigned int off; |
102 | } stack[TC_U32_MAXDEPTH]; | 102 | } stack[TC_U32_MAXDEPTH]; |
103 | 103 | ||
104 | struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root; | 104 | struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root; |
105 | u8 *ptr = skb_network_header(skb); | 105 | unsigned int off = skb_network_offset(skb); |
106 | struct tc_u_knode *n; | 106 | struct tc_u_knode *n; |
107 | int sdepth = 0; | 107 | int sdepth = 0; |
108 | int off2 = 0; | 108 | int off2 = 0; |
@@ -134,8 +134,14 @@ next_knode: | |||
134 | #endif | 134 | #endif |
135 | 135 | ||
136 | for (i = n->sel.nkeys; i>0; i--, key++) { | 136 | for (i = n->sel.nkeys; i>0; i--, key++) { |
137 | 137 | unsigned int toff; | |
138 | if ((*(__be32*)(ptr+key->off+(off2&key->offmask))^key->val)&key->mask) { | 138 | __be32 *data, _data; |
139 | |||
140 | toff = off + key->off + (off2 & key->offmask); | ||
141 | data = skb_header_pointer(skb, toff, 4, &_data); | ||
142 | if (!data) | ||
143 | goto out; | ||
144 | if ((*data ^ key->val) & key->mask) { | ||
139 | n = n->next; | 145 | n = n->next; |
140 | goto next_knode; | 146 | goto next_knode; |
141 | } | 147 | } |
@@ -174,29 +180,45 @@ check_terminal: | |||
174 | if (sdepth >= TC_U32_MAXDEPTH) | 180 | if (sdepth >= TC_U32_MAXDEPTH) |
175 | goto deadloop; | 181 | goto deadloop; |
176 | stack[sdepth].knode = n; | 182 | stack[sdepth].knode = n; |
177 | stack[sdepth].ptr = ptr; | 183 | stack[sdepth].off = off; |
178 | sdepth++; | 184 | sdepth++; |
179 | 185 | ||
180 | ht = n->ht_down; | 186 | ht = n->ht_down; |
181 | sel = 0; | 187 | sel = 0; |
182 | if (ht->divisor) | 188 | if (ht->divisor) { |
183 | sel = ht->divisor&u32_hash_fold(*(__be32*)(ptr+n->sel.hoff), &n->sel,n->fshift); | 189 | __be32 *data, _data; |
184 | 190 | ||
191 | data = skb_header_pointer(skb, off + n->sel.hoff, 4, | ||
192 | &_data); | ||
193 | if (!data) | ||
194 | goto out; | ||
195 | sel = ht->divisor & u32_hash_fold(*data, &n->sel, | ||
196 | n->fshift); | ||
197 | } | ||
185 | if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT))) | 198 | if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT))) |
186 | goto next_ht; | 199 | goto next_ht; |
187 | 200 | ||
188 | if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) { | 201 | if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) { |
189 | off2 = n->sel.off + 3; | 202 | off2 = n->sel.off + 3; |
190 | if (n->sel.flags&TC_U32_VAROFFSET) | 203 | if (n->sel.flags & TC_U32_VAROFFSET) { |
191 | off2 += ntohs(n->sel.offmask & *(__be16*)(ptr+n->sel.offoff)) >>n->sel.offshift; | 204 | __be16 *data, _data; |
205 | |||
206 | data = skb_header_pointer(skb, | ||
207 | off + n->sel.offoff, | ||
208 | 2, &_data); | ||
209 | if (!data) | ||
210 | goto out; | ||
211 | off2 += ntohs(n->sel.offmask & *data) >> | ||
212 | n->sel.offshift; | ||
213 | } | ||
192 | off2 &= ~3; | 214 | off2 &= ~3; |
193 | } | 215 | } |
194 | if (n->sel.flags&TC_U32_EAT) { | 216 | if (n->sel.flags&TC_U32_EAT) { |
195 | ptr += off2; | 217 | off += off2; |
196 | off2 = 0; | 218 | off2 = 0; |
197 | } | 219 | } |
198 | 220 | ||
199 | if (ptr < skb_tail_pointer(skb)) | 221 | if (off < skb->len) |
200 | goto next_ht; | 222 | goto next_ht; |
201 | } | 223 | } |
202 | 224 | ||
@@ -204,9 +226,10 @@ check_terminal: | |||
204 | if (sdepth--) { | 226 | if (sdepth--) { |
205 | n = stack[sdepth].knode; | 227 | n = stack[sdepth].knode; |
206 | ht = n->ht_up; | 228 | ht = n->ht_up; |
207 | ptr = stack[sdepth].ptr; | 229 | off = stack[sdepth].off; |
208 | goto check_terminal; | 230 | goto check_terminal; |
209 | } | 231 | } |
232 | out: | ||
210 | return -1; | 233 | return -1; |
211 | 234 | ||
212 | deadloop: | 235 | deadloop: |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index bd1892fe4b21..d20fcd2a5519 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -205,7 +205,7 @@ void __qdisc_run(struct Qdisc *q) | |||
205 | } | 205 | } |
206 | } | 206 | } |
207 | 207 | ||
208 | clear_bit(__QDISC_STATE_RUNNING, &q->state); | 208 | qdisc_run_end(q); |
209 | } | 209 | } |
210 | 210 | ||
211 | unsigned long dev_trans_start(struct net_device *dev) | 211 | unsigned long dev_trans_start(struct net_device *dev) |
@@ -561,6 +561,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, | |||
561 | 561 | ||
562 | INIT_LIST_HEAD(&sch->list); | 562 | INIT_LIST_HEAD(&sch->list); |
563 | skb_queue_head_init(&sch->q); | 563 | skb_queue_head_init(&sch->q); |
564 | spin_lock_init(&sch->busylock); | ||
564 | sch->ops = ops; | 565 | sch->ops = ops; |
565 | sch->enqueue = ops->enqueue; | 566 | sch->enqueue = ops->enqueue; |
566 | sch->dequeue = ops->dequeue; | 567 | sch->dequeue = ops->dequeue; |
@@ -797,7 +798,7 @@ static bool some_qdisc_is_busy(struct net_device *dev) | |||
797 | 798 | ||
798 | spin_lock_bh(root_lock); | 799 | spin_lock_bh(root_lock); |
799 | 800 | ||
800 | val = (test_bit(__QDISC_STATE_RUNNING, &q->state) || | 801 | val = (qdisc_is_running(q) || |
801 | test_bit(__QDISC_STATE_SCHED, &q->state)); | 802 | test_bit(__QDISC_STATE_SCHED, &q->state)); |
802 | 803 | ||
803 | spin_unlock_bh(root_lock); | 804 | spin_unlock_bh(root_lock); |
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 0b52b8de562c..4be8d04b262d 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -1550,7 +1550,6 @@ static const struct Qdisc_class_ops htb_class_ops = { | |||
1550 | }; | 1550 | }; |
1551 | 1551 | ||
1552 | static struct Qdisc_ops htb_qdisc_ops __read_mostly = { | 1552 | static struct Qdisc_ops htb_qdisc_ops __read_mostly = { |
1553 | .next = NULL, | ||
1554 | .cl_ops = &htb_class_ops, | 1553 | .cl_ops = &htb_class_ops, |
1555 | .id = "htb", | 1554 | .id = "htb", |
1556 | .priv_size = sizeof(struct htb_sched), | 1555 | .priv_size = sizeof(struct htb_sched), |
@@ -1561,7 +1560,6 @@ static struct Qdisc_ops htb_qdisc_ops __read_mostly = { | |||
1561 | .init = htb_init, | 1560 | .init = htb_init, |
1562 | .reset = htb_reset, | 1561 | .reset = htb_reset, |
1563 | .destroy = htb_destroy, | 1562 | .destroy = htb_destroy, |
1564 | .change = NULL /* htb_change */, | ||
1565 | .dump = htb_dump, | 1563 | .dump = htb_dump, |
1566 | .owner = THIS_MODULE, | 1564 | .owner = THIS_MODULE, |
1567 | }; | 1565 | }; |
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 182749867c72..a0e1a7fdebbf 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
@@ -490,7 +490,7 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc, | |||
490 | __func__, &fl.fl4_dst, &fl.fl4_src); | 490 | __func__, &fl.fl4_dst, &fl.fl4_src); |
491 | 491 | ||
492 | if (!ip_route_output_key(&init_net, &rt, &fl)) { | 492 | if (!ip_route_output_key(&init_net, &rt, &fl)) { |
493 | dst = &rt->u.dst; | 493 | dst = &rt->dst; |
494 | } | 494 | } |
495 | 495 | ||
496 | /* If there is no association or if a source address is passed, no | 496 | /* If there is no association or if a source address is passed, no |
@@ -534,7 +534,7 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc, | |||
534 | fl.fl4_src = laddr->a.v4.sin_addr.s_addr; | 534 | fl.fl4_src = laddr->a.v4.sin_addr.s_addr; |
535 | fl.fl_ip_sport = laddr->a.v4.sin_port; | 535 | fl.fl_ip_sport = laddr->a.v4.sin_port; |
536 | if (!ip_route_output_key(&init_net, &rt, &fl)) { | 536 | if (!ip_route_output_key(&init_net, &rt, &fl)) { |
537 | dst = &rt->u.dst; | 537 | dst = &rt->dst; |
538 | goto out_unlock; | 538 | goto out_unlock; |
539 | } | 539 | } |
540 | } | 540 | } |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index bd2a50b482ac..246f92924658 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -1817,7 +1817,7 @@ malformed: | |||
1817 | struct __sctp_missing { | 1817 | struct __sctp_missing { |
1818 | __be32 num_missing; | 1818 | __be32 num_missing; |
1819 | __be16 type; | 1819 | __be16 type; |
1820 | } __attribute__((packed)); | 1820 | } __packed; |
1821 | 1821 | ||
1822 | /* | 1822 | /* |
1823 | * Report a missing mandatory parameter. | 1823 | * Report a missing mandatory parameter. |
diff --git a/net/socket.c b/net/socket.c index 367d5477d00f..acfa1738663d 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -124,7 +124,7 @@ static int sock_fasync(int fd, struct file *filp, int on); | |||
124 | static ssize_t sock_sendpage(struct file *file, struct page *page, | 124 | static ssize_t sock_sendpage(struct file *file, struct page *page, |
125 | int offset, size_t size, loff_t *ppos, int more); | 125 | int offset, size_t size, loff_t *ppos, int more); |
126 | static ssize_t sock_splice_read(struct file *file, loff_t *ppos, | 126 | static ssize_t sock_splice_read(struct file *file, loff_t *ppos, |
127 | struct pipe_inode_info *pipe, size_t len, | 127 | struct pipe_inode_info *pipe, size_t len, |
128 | unsigned int flags); | 128 | unsigned int flags); |
129 | 129 | ||
130 | /* | 130 | /* |
@@ -162,7 +162,7 @@ static const struct net_proto_family *net_families[NPROTO] __read_mostly; | |||
162 | * Statistics counters of the socket lists | 162 | * Statistics counters of the socket lists |
163 | */ | 163 | */ |
164 | 164 | ||
165 | static DEFINE_PER_CPU(int, sockets_in_use) = 0; | 165 | static DEFINE_PER_CPU(int, sockets_in_use); |
166 | 166 | ||
167 | /* | 167 | /* |
168 | * Support routines. | 168 | * Support routines. |
@@ -309,9 +309,9 @@ static int init_inodecache(void) | |||
309 | } | 309 | } |
310 | 310 | ||
311 | static const struct super_operations sockfs_ops = { | 311 | static const struct super_operations sockfs_ops = { |
312 | .alloc_inode = sock_alloc_inode, | 312 | .alloc_inode = sock_alloc_inode, |
313 | .destroy_inode =sock_destroy_inode, | 313 | .destroy_inode = sock_destroy_inode, |
314 | .statfs = simple_statfs, | 314 | .statfs = simple_statfs, |
315 | }; | 315 | }; |
316 | 316 | ||
317 | static int sockfs_get_sb(struct file_system_type *fs_type, | 317 | static int sockfs_get_sb(struct file_system_type *fs_type, |
@@ -411,6 +411,7 @@ int sock_map_fd(struct socket *sock, int flags) | |||
411 | 411 | ||
412 | return fd; | 412 | return fd; |
413 | } | 413 | } |
414 | EXPORT_SYMBOL(sock_map_fd); | ||
414 | 415 | ||
415 | static struct socket *sock_from_file(struct file *file, int *err) | 416 | static struct socket *sock_from_file(struct file *file, int *err) |
416 | { | 417 | { |
@@ -422,7 +423,7 @@ static struct socket *sock_from_file(struct file *file, int *err) | |||
422 | } | 423 | } |
423 | 424 | ||
424 | /** | 425 | /** |
425 | * sockfd_lookup - Go from a file number to its socket slot | 426 | * sockfd_lookup - Go from a file number to its socket slot |
426 | * @fd: file handle | 427 | * @fd: file handle |
427 | * @err: pointer to an error code return | 428 | * @err: pointer to an error code return |
428 | * | 429 | * |
@@ -450,6 +451,7 @@ struct socket *sockfd_lookup(int fd, int *err) | |||
450 | fput(file); | 451 | fput(file); |
451 | return sock; | 452 | return sock; |
452 | } | 453 | } |
454 | EXPORT_SYMBOL(sockfd_lookup); | ||
453 | 455 | ||
454 | static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed) | 456 | static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed) |
455 | { | 457 | { |
@@ -540,6 +542,7 @@ void sock_release(struct socket *sock) | |||
540 | } | 542 | } |
541 | sock->file = NULL; | 543 | sock->file = NULL; |
542 | } | 544 | } |
545 | EXPORT_SYMBOL(sock_release); | ||
543 | 546 | ||
544 | int sock_tx_timestamp(struct msghdr *msg, struct sock *sk, | 547 | int sock_tx_timestamp(struct msghdr *msg, struct sock *sk, |
545 | union skb_shared_tx *shtx) | 548 | union skb_shared_tx *shtx) |
@@ -586,6 +589,7 @@ int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) | |||
586 | ret = wait_on_sync_kiocb(&iocb); | 589 | ret = wait_on_sync_kiocb(&iocb); |
587 | return ret; | 590 | return ret; |
588 | } | 591 | } |
592 | EXPORT_SYMBOL(sock_sendmsg); | ||
589 | 593 | ||
590 | int kernel_sendmsg(struct socket *sock, struct msghdr *msg, | 594 | int kernel_sendmsg(struct socket *sock, struct msghdr *msg, |
591 | struct kvec *vec, size_t num, size_t size) | 595 | struct kvec *vec, size_t num, size_t size) |
@@ -604,6 +608,7 @@ int kernel_sendmsg(struct socket *sock, struct msghdr *msg, | |||
604 | set_fs(oldfs); | 608 | set_fs(oldfs); |
605 | return result; | 609 | return result; |
606 | } | 610 | } |
611 | EXPORT_SYMBOL(kernel_sendmsg); | ||
607 | 612 | ||
608 | static int ktime2ts(ktime_t kt, struct timespec *ts) | 613 | static int ktime2ts(ktime_t kt, struct timespec *ts) |
609 | { | 614 | { |
@@ -664,7 +669,6 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, | |||
664 | put_cmsg(msg, SOL_SOCKET, | 669 | put_cmsg(msg, SOL_SOCKET, |
665 | SCM_TIMESTAMPING, sizeof(ts), &ts); | 670 | SCM_TIMESTAMPING, sizeof(ts), &ts); |
666 | } | 671 | } |
667 | |||
668 | EXPORT_SYMBOL_GPL(__sock_recv_timestamp); | 672 | EXPORT_SYMBOL_GPL(__sock_recv_timestamp); |
669 | 673 | ||
670 | inline void sock_recv_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) | 674 | inline void sock_recv_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) |
@@ -720,6 +724,7 @@ int sock_recvmsg(struct socket *sock, struct msghdr *msg, | |||
720 | ret = wait_on_sync_kiocb(&iocb); | 724 | ret = wait_on_sync_kiocb(&iocb); |
721 | return ret; | 725 | return ret; |
722 | } | 726 | } |
727 | EXPORT_SYMBOL(sock_recvmsg); | ||
723 | 728 | ||
724 | static int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg, | 729 | static int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg, |
725 | size_t size, int flags) | 730 | size_t size, int flags) |
@@ -752,6 +757,7 @@ int kernel_recvmsg(struct socket *sock, struct msghdr *msg, | |||
752 | set_fs(oldfs); | 757 | set_fs(oldfs); |
753 | return result; | 758 | return result; |
754 | } | 759 | } |
760 | EXPORT_SYMBOL(kernel_recvmsg); | ||
755 | 761 | ||
756 | static void sock_aio_dtor(struct kiocb *iocb) | 762 | static void sock_aio_dtor(struct kiocb *iocb) |
757 | { | 763 | { |
@@ -774,7 +780,7 @@ static ssize_t sock_sendpage(struct file *file, struct page *page, | |||
774 | } | 780 | } |
775 | 781 | ||
776 | static ssize_t sock_splice_read(struct file *file, loff_t *ppos, | 782 | static ssize_t sock_splice_read(struct file *file, loff_t *ppos, |
777 | struct pipe_inode_info *pipe, size_t len, | 783 | struct pipe_inode_info *pipe, size_t len, |
778 | unsigned int flags) | 784 | unsigned int flags) |
779 | { | 785 | { |
780 | struct socket *sock = file->private_data; | 786 | struct socket *sock = file->private_data; |
@@ -887,7 +893,7 @@ static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov, | |||
887 | */ | 893 | */ |
888 | 894 | ||
889 | static DEFINE_MUTEX(br_ioctl_mutex); | 895 | static DEFINE_MUTEX(br_ioctl_mutex); |
890 | static int (*br_ioctl_hook) (struct net *, unsigned int cmd, void __user *arg) = NULL; | 896 | static int (*br_ioctl_hook) (struct net *, unsigned int cmd, void __user *arg); |
891 | 897 | ||
892 | void brioctl_set(int (*hook) (struct net *, unsigned int, void __user *)) | 898 | void brioctl_set(int (*hook) (struct net *, unsigned int, void __user *)) |
893 | { | 899 | { |
@@ -895,7 +901,6 @@ void brioctl_set(int (*hook) (struct net *, unsigned int, void __user *)) | |||
895 | br_ioctl_hook = hook; | 901 | br_ioctl_hook = hook; |
896 | mutex_unlock(&br_ioctl_mutex); | 902 | mutex_unlock(&br_ioctl_mutex); |
897 | } | 903 | } |
898 | |||
899 | EXPORT_SYMBOL(brioctl_set); | 904 | EXPORT_SYMBOL(brioctl_set); |
900 | 905 | ||
901 | static DEFINE_MUTEX(vlan_ioctl_mutex); | 906 | static DEFINE_MUTEX(vlan_ioctl_mutex); |
@@ -907,7 +912,6 @@ void vlan_ioctl_set(int (*hook) (struct net *, void __user *)) | |||
907 | vlan_ioctl_hook = hook; | 912 | vlan_ioctl_hook = hook; |
908 | mutex_unlock(&vlan_ioctl_mutex); | 913 | mutex_unlock(&vlan_ioctl_mutex); |
909 | } | 914 | } |
910 | |||
911 | EXPORT_SYMBOL(vlan_ioctl_set); | 915 | EXPORT_SYMBOL(vlan_ioctl_set); |
912 | 916 | ||
913 | static DEFINE_MUTEX(dlci_ioctl_mutex); | 917 | static DEFINE_MUTEX(dlci_ioctl_mutex); |
@@ -919,7 +923,6 @@ void dlci_ioctl_set(int (*hook) (unsigned int, void __user *)) | |||
919 | dlci_ioctl_hook = hook; | 923 | dlci_ioctl_hook = hook; |
920 | mutex_unlock(&dlci_ioctl_mutex); | 924 | mutex_unlock(&dlci_ioctl_mutex); |
921 | } | 925 | } |
922 | |||
923 | EXPORT_SYMBOL(dlci_ioctl_set); | 926 | EXPORT_SYMBOL(dlci_ioctl_set); |
924 | 927 | ||
925 | static long sock_do_ioctl(struct net *net, struct socket *sock, | 928 | static long sock_do_ioctl(struct net *net, struct socket *sock, |
@@ -1047,6 +1050,7 @@ out_release: | |||
1047 | sock = NULL; | 1050 | sock = NULL; |
1048 | goto out; | 1051 | goto out; |
1049 | } | 1052 | } |
1053 | EXPORT_SYMBOL(sock_create_lite); | ||
1050 | 1054 | ||
1051 | /* No kernel lock held - perfect */ | 1055 | /* No kernel lock held - perfect */ |
1052 | static unsigned int sock_poll(struct file *file, poll_table *wait) | 1056 | static unsigned int sock_poll(struct file *file, poll_table *wait) |
@@ -1147,6 +1151,7 @@ call_kill: | |||
1147 | rcu_read_unlock(); | 1151 | rcu_read_unlock(); |
1148 | return 0; | 1152 | return 0; |
1149 | } | 1153 | } |
1154 | EXPORT_SYMBOL(sock_wake_async); | ||
1150 | 1155 | ||
1151 | static int __sock_create(struct net *net, int family, int type, int protocol, | 1156 | static int __sock_create(struct net *net, int family, int type, int protocol, |
1152 | struct socket **res, int kern) | 1157 | struct socket **res, int kern) |
@@ -1265,11 +1270,13 @@ int sock_create(int family, int type, int protocol, struct socket **res) | |||
1265 | { | 1270 | { |
1266 | return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0); | 1271 | return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0); |
1267 | } | 1272 | } |
1273 | EXPORT_SYMBOL(sock_create); | ||
1268 | 1274 | ||
1269 | int sock_create_kern(int family, int type, int protocol, struct socket **res) | 1275 | int sock_create_kern(int family, int type, int protocol, struct socket **res) |
1270 | { | 1276 | { |
1271 | return __sock_create(&init_net, family, type, protocol, res, 1); | 1277 | return __sock_create(&init_net, family, type, protocol, res, 1); |
1272 | } | 1278 | } |
1279 | EXPORT_SYMBOL(sock_create_kern); | ||
1273 | 1280 | ||
1274 | SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol) | 1281 | SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol) |
1275 | { | 1282 | { |
@@ -1474,7 +1481,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, | |||
1474 | goto out; | 1481 | goto out; |
1475 | 1482 | ||
1476 | err = -ENFILE; | 1483 | err = -ENFILE; |
1477 | if (!(newsock = sock_alloc())) | 1484 | newsock = sock_alloc(); |
1485 | if (!newsock) | ||
1478 | goto out_put; | 1486 | goto out_put; |
1479 | 1487 | ||
1480 | newsock->type = sock->type; | 1488 | newsock->type = sock->type; |
@@ -1861,8 +1869,7 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags) | |||
1861 | if (MSG_CMSG_COMPAT & flags) { | 1869 | if (MSG_CMSG_COMPAT & flags) { |
1862 | if (get_compat_msghdr(&msg_sys, msg_compat)) | 1870 | if (get_compat_msghdr(&msg_sys, msg_compat)) |
1863 | return -EFAULT; | 1871 | return -EFAULT; |
1864 | } | 1872 | } else if (copy_from_user(&msg_sys, msg, sizeof(struct msghdr))) |
1865 | else if (copy_from_user(&msg_sys, msg, sizeof(struct msghdr))) | ||
1866 | return -EFAULT; | 1873 | return -EFAULT; |
1867 | 1874 | ||
1868 | sock = sockfd_lookup_light(fd, &err, &fput_needed); | 1875 | sock = sockfd_lookup_light(fd, &err, &fput_needed); |
@@ -1964,8 +1971,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg, | |||
1964 | if (MSG_CMSG_COMPAT & flags) { | 1971 | if (MSG_CMSG_COMPAT & flags) { |
1965 | if (get_compat_msghdr(msg_sys, msg_compat)) | 1972 | if (get_compat_msghdr(msg_sys, msg_compat)) |
1966 | return -EFAULT; | 1973 | return -EFAULT; |
1967 | } | 1974 | } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) |
1968 | else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) | ||
1969 | return -EFAULT; | 1975 | return -EFAULT; |
1970 | 1976 | ||
1971 | err = -EMSGSIZE; | 1977 | err = -EMSGSIZE; |
@@ -2191,10 +2197,10 @@ SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg, | |||
2191 | /* Argument list sizes for sys_socketcall */ | 2197 | /* Argument list sizes for sys_socketcall */ |
2192 | #define AL(x) ((x) * sizeof(unsigned long)) | 2198 | #define AL(x) ((x) * sizeof(unsigned long)) |
2193 | static const unsigned char nargs[20] = { | 2199 | static const unsigned char nargs[20] = { |
2194 | AL(0),AL(3),AL(3),AL(3),AL(2),AL(3), | 2200 | AL(0), AL(3), AL(3), AL(3), AL(2), AL(3), |
2195 | AL(3),AL(3),AL(4),AL(4),AL(4),AL(6), | 2201 | AL(3), AL(3), AL(4), AL(4), AL(4), AL(6), |
2196 | AL(6),AL(2),AL(5),AL(5),AL(3),AL(3), | 2202 | AL(6), AL(2), AL(5), AL(5), AL(3), AL(3), |
2197 | AL(4),AL(5) | 2203 | AL(4), AL(5) |
2198 | }; | 2204 | }; |
2199 | 2205 | ||
2200 | #undef AL | 2206 | #undef AL |
@@ -2340,6 +2346,7 @@ int sock_register(const struct net_proto_family *ops) | |||
2340 | printk(KERN_INFO "NET: Registered protocol family %d\n", ops->family); | 2346 | printk(KERN_INFO "NET: Registered protocol family %d\n", ops->family); |
2341 | return err; | 2347 | return err; |
2342 | } | 2348 | } |
2349 | EXPORT_SYMBOL(sock_register); | ||
2343 | 2350 | ||
2344 | /** | 2351 | /** |
2345 | * sock_unregister - remove a protocol handler | 2352 | * sock_unregister - remove a protocol handler |
@@ -2366,6 +2373,7 @@ void sock_unregister(int family) | |||
2366 | 2373 | ||
2367 | printk(KERN_INFO "NET: Unregistered protocol family %d\n", family); | 2374 | printk(KERN_INFO "NET: Unregistered protocol family %d\n", family); |
2368 | } | 2375 | } |
2376 | EXPORT_SYMBOL(sock_unregister); | ||
2369 | 2377 | ||
2370 | static int __init sock_init(void) | 2378 | static int __init sock_init(void) |
2371 | { | 2379 | { |
@@ -2490,13 +2498,13 @@ static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32) | |||
2490 | ifc.ifc_req = NULL; | 2498 | ifc.ifc_req = NULL; |
2491 | uifc = compat_alloc_user_space(sizeof(struct ifconf)); | 2499 | uifc = compat_alloc_user_space(sizeof(struct ifconf)); |
2492 | } else { | 2500 | } else { |
2493 | size_t len =((ifc32.ifc_len / sizeof (struct compat_ifreq)) + 1) * | 2501 | size_t len = ((ifc32.ifc_len / sizeof(struct compat_ifreq)) + 1) * |
2494 | sizeof (struct ifreq); | 2502 | sizeof(struct ifreq); |
2495 | uifc = compat_alloc_user_space(sizeof(struct ifconf) + len); | 2503 | uifc = compat_alloc_user_space(sizeof(struct ifconf) + len); |
2496 | ifc.ifc_len = len; | 2504 | ifc.ifc_len = len; |
2497 | ifr = ifc.ifc_req = (void __user *)(uifc + 1); | 2505 | ifr = ifc.ifc_req = (void __user *)(uifc + 1); |
2498 | ifr32 = compat_ptr(ifc32.ifcbuf); | 2506 | ifr32 = compat_ptr(ifc32.ifcbuf); |
2499 | for (i = 0; i < ifc32.ifc_len; i += sizeof (struct compat_ifreq)) { | 2507 | for (i = 0; i < ifc32.ifc_len; i += sizeof(struct compat_ifreq)) { |
2500 | if (copy_in_user(ifr, ifr32, sizeof(struct compat_ifreq))) | 2508 | if (copy_in_user(ifr, ifr32, sizeof(struct compat_ifreq))) |
2501 | return -EFAULT; | 2509 | return -EFAULT; |
2502 | ifr++; | 2510 | ifr++; |
@@ -2516,9 +2524,9 @@ static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32) | |||
2516 | ifr = ifc.ifc_req; | 2524 | ifr = ifc.ifc_req; |
2517 | ifr32 = compat_ptr(ifc32.ifcbuf); | 2525 | ifr32 = compat_ptr(ifc32.ifcbuf); |
2518 | for (i = 0, j = 0; | 2526 | for (i = 0, j = 0; |
2519 | i + sizeof (struct compat_ifreq) <= ifc32.ifc_len && j < ifc.ifc_len; | 2527 | i + sizeof(struct compat_ifreq) <= ifc32.ifc_len && j < ifc.ifc_len; |
2520 | i += sizeof (struct compat_ifreq), j += sizeof (struct ifreq)) { | 2528 | i += sizeof(struct compat_ifreq), j += sizeof(struct ifreq)) { |
2521 | if (copy_in_user(ifr32, ifr, sizeof (struct compat_ifreq))) | 2529 | if (copy_in_user(ifr32, ifr, sizeof(struct compat_ifreq))) |
2522 | return -EFAULT; | 2530 | return -EFAULT; |
2523 | ifr32++; | 2531 | ifr32++; |
2524 | ifr++; | 2532 | ifr++; |
@@ -2567,7 +2575,7 @@ static int compat_siocwandev(struct net *net, struct compat_ifreq __user *uifr32 | |||
2567 | compat_uptr_t uptr32; | 2575 | compat_uptr_t uptr32; |
2568 | struct ifreq __user *uifr; | 2576 | struct ifreq __user *uifr; |
2569 | 2577 | ||
2570 | uifr = compat_alloc_user_space(sizeof (*uifr)); | 2578 | uifr = compat_alloc_user_space(sizeof(*uifr)); |
2571 | if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) | 2579 | if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) |
2572 | return -EFAULT; | 2580 | return -EFAULT; |
2573 | 2581 | ||
@@ -2601,9 +2609,9 @@ static int bond_ioctl(struct net *net, unsigned int cmd, | |||
2601 | return -EFAULT; | 2609 | return -EFAULT; |
2602 | 2610 | ||
2603 | old_fs = get_fs(); | 2611 | old_fs = get_fs(); |
2604 | set_fs (KERNEL_DS); | 2612 | set_fs(KERNEL_DS); |
2605 | err = dev_ioctl(net, cmd, &kifr); | 2613 | err = dev_ioctl(net, cmd, &kifr); |
2606 | set_fs (old_fs); | 2614 | set_fs(old_fs); |
2607 | 2615 | ||
2608 | return err; | 2616 | return err; |
2609 | case SIOCBONDSLAVEINFOQUERY: | 2617 | case SIOCBONDSLAVEINFOQUERY: |
@@ -2710,9 +2718,9 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd, | |||
2710 | return -EFAULT; | 2718 | return -EFAULT; |
2711 | 2719 | ||
2712 | old_fs = get_fs(); | 2720 | old_fs = get_fs(); |
2713 | set_fs (KERNEL_DS); | 2721 | set_fs(KERNEL_DS); |
2714 | err = dev_ioctl(net, cmd, (void __user *)&ifr); | 2722 | err = dev_ioctl(net, cmd, (void __user *)&ifr); |
2715 | set_fs (old_fs); | 2723 | set_fs(old_fs); |
2716 | 2724 | ||
2717 | if (cmd == SIOCGIFMAP && !err) { | 2725 | if (cmd == SIOCGIFMAP && !err) { |
2718 | err = copy_to_user(uifr32, &ifr, sizeof(ifr.ifr_name)); | 2726 | err = copy_to_user(uifr32, &ifr, sizeof(ifr.ifr_name)); |
@@ -2734,7 +2742,7 @@ static int compat_siocshwtstamp(struct net *net, struct compat_ifreq __user *uif | |||
2734 | compat_uptr_t uptr32; | 2742 | compat_uptr_t uptr32; |
2735 | struct ifreq __user *uifr; | 2743 | struct ifreq __user *uifr; |
2736 | 2744 | ||
2737 | uifr = compat_alloc_user_space(sizeof (*uifr)); | 2745 | uifr = compat_alloc_user_space(sizeof(*uifr)); |
2738 | if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) | 2746 | if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) |
2739 | return -EFAULT; | 2747 | return -EFAULT; |
2740 | 2748 | ||
@@ -2750,20 +2758,20 @@ static int compat_siocshwtstamp(struct net *net, struct compat_ifreq __user *uif | |||
2750 | } | 2758 | } |
2751 | 2759 | ||
2752 | struct rtentry32 { | 2760 | struct rtentry32 { |
2753 | u32 rt_pad1; | 2761 | u32 rt_pad1; |
2754 | struct sockaddr rt_dst; /* target address */ | 2762 | struct sockaddr rt_dst; /* target address */ |
2755 | struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */ | 2763 | struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */ |
2756 | struct sockaddr rt_genmask; /* target network mask (IP) */ | 2764 | struct sockaddr rt_genmask; /* target network mask (IP) */ |
2757 | unsigned short rt_flags; | 2765 | unsigned short rt_flags; |
2758 | short rt_pad2; | 2766 | short rt_pad2; |
2759 | u32 rt_pad3; | 2767 | u32 rt_pad3; |
2760 | unsigned char rt_tos; | 2768 | unsigned char rt_tos; |
2761 | unsigned char rt_class; | 2769 | unsigned char rt_class; |
2762 | short rt_pad4; | 2770 | short rt_pad4; |
2763 | short rt_metric; /* +1 for binary compatibility! */ | 2771 | short rt_metric; /* +1 for binary compatibility! */ |
2764 | /* char * */ u32 rt_dev; /* forcing the device at add */ | 2772 | /* char * */ u32 rt_dev; /* forcing the device at add */ |
2765 | u32 rt_mtu; /* per route MTU/Window */ | 2773 | u32 rt_mtu; /* per route MTU/Window */ |
2766 | u32 rt_window; /* Window clamping */ | 2774 | u32 rt_window; /* Window clamping */ |
2767 | unsigned short rt_irtt; /* Initial RTT */ | 2775 | unsigned short rt_irtt; /* Initial RTT */ |
2768 | }; | 2776 | }; |
2769 | 2777 | ||
@@ -2793,29 +2801,29 @@ static int routing_ioctl(struct net *net, struct socket *sock, | |||
2793 | 2801 | ||
2794 | if (sock && sock->sk && sock->sk->sk_family == AF_INET6) { /* ipv6 */ | 2802 | if (sock && sock->sk && sock->sk->sk_family == AF_INET6) { /* ipv6 */ |
2795 | struct in6_rtmsg32 __user *ur6 = argp; | 2803 | struct in6_rtmsg32 __user *ur6 = argp; |
2796 | ret = copy_from_user (&r6.rtmsg_dst, &(ur6->rtmsg_dst), | 2804 | ret = copy_from_user(&r6.rtmsg_dst, &(ur6->rtmsg_dst), |
2797 | 3 * sizeof(struct in6_addr)); | 2805 | 3 * sizeof(struct in6_addr)); |
2798 | ret |= __get_user (r6.rtmsg_type, &(ur6->rtmsg_type)); | 2806 | ret |= __get_user(r6.rtmsg_type, &(ur6->rtmsg_type)); |
2799 | ret |= __get_user (r6.rtmsg_dst_len, &(ur6->rtmsg_dst_len)); | 2807 | ret |= __get_user(r6.rtmsg_dst_len, &(ur6->rtmsg_dst_len)); |
2800 | ret |= __get_user (r6.rtmsg_src_len, &(ur6->rtmsg_src_len)); | 2808 | ret |= __get_user(r6.rtmsg_src_len, &(ur6->rtmsg_src_len)); |
2801 | ret |= __get_user (r6.rtmsg_metric, &(ur6->rtmsg_metric)); | 2809 | ret |= __get_user(r6.rtmsg_metric, &(ur6->rtmsg_metric)); |
2802 | ret |= __get_user (r6.rtmsg_info, &(ur6->rtmsg_info)); | 2810 | ret |= __get_user(r6.rtmsg_info, &(ur6->rtmsg_info)); |
2803 | ret |= __get_user (r6.rtmsg_flags, &(ur6->rtmsg_flags)); | 2811 | ret |= __get_user(r6.rtmsg_flags, &(ur6->rtmsg_flags)); |
2804 | ret |= __get_user (r6.rtmsg_ifindex, &(ur6->rtmsg_ifindex)); | 2812 | ret |= __get_user(r6.rtmsg_ifindex, &(ur6->rtmsg_ifindex)); |
2805 | 2813 | ||
2806 | r = (void *) &r6; | 2814 | r = (void *) &r6; |
2807 | } else { /* ipv4 */ | 2815 | } else { /* ipv4 */ |
2808 | struct rtentry32 __user *ur4 = argp; | 2816 | struct rtentry32 __user *ur4 = argp; |
2809 | ret = copy_from_user (&r4.rt_dst, &(ur4->rt_dst), | 2817 | ret = copy_from_user(&r4.rt_dst, &(ur4->rt_dst), |
2810 | 3 * sizeof(struct sockaddr)); | 2818 | 3 * sizeof(struct sockaddr)); |
2811 | ret |= __get_user (r4.rt_flags, &(ur4->rt_flags)); | 2819 | ret |= __get_user(r4.rt_flags, &(ur4->rt_flags)); |
2812 | ret |= __get_user (r4.rt_metric, &(ur4->rt_metric)); | 2820 | ret |= __get_user(r4.rt_metric, &(ur4->rt_metric)); |
2813 | ret |= __get_user (r4.rt_mtu, &(ur4->rt_mtu)); | 2821 | ret |= __get_user(r4.rt_mtu, &(ur4->rt_mtu)); |
2814 | ret |= __get_user (r4.rt_window, &(ur4->rt_window)); | 2822 | ret |= __get_user(r4.rt_window, &(ur4->rt_window)); |
2815 | ret |= __get_user (r4.rt_irtt, &(ur4->rt_irtt)); | 2823 | ret |= __get_user(r4.rt_irtt, &(ur4->rt_irtt)); |
2816 | ret |= __get_user (rtdev, &(ur4->rt_dev)); | 2824 | ret |= __get_user(rtdev, &(ur4->rt_dev)); |
2817 | if (rtdev) { | 2825 | if (rtdev) { |
2818 | ret |= copy_from_user (devname, compat_ptr(rtdev), 15); | 2826 | ret |= copy_from_user(devname, compat_ptr(rtdev), 15); |
2819 | r4.rt_dev = devname; devname[15] = 0; | 2827 | r4.rt_dev = devname; devname[15] = 0; |
2820 | } else | 2828 | } else |
2821 | r4.rt_dev = NULL; | 2829 | r4.rt_dev = NULL; |
@@ -2828,9 +2836,9 @@ static int routing_ioctl(struct net *net, struct socket *sock, | |||
2828 | goto out; | 2836 | goto out; |
2829 | } | 2837 | } |
2830 | 2838 | ||
2831 | set_fs (KERNEL_DS); | 2839 | set_fs(KERNEL_DS); |
2832 | ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r); | 2840 | ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r); |
2833 | set_fs (old_fs); | 2841 | set_fs(old_fs); |
2834 | 2842 | ||
2835 | out: | 2843 | out: |
2836 | return ret; | 2844 | return ret; |
@@ -2993,11 +3001,13 @@ int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen) | |||
2993 | { | 3001 | { |
2994 | return sock->ops->bind(sock, addr, addrlen); | 3002 | return sock->ops->bind(sock, addr, addrlen); |
2995 | } | 3003 | } |
3004 | EXPORT_SYMBOL(kernel_bind); | ||
2996 | 3005 | ||
2997 | int kernel_listen(struct socket *sock, int backlog) | 3006 | int kernel_listen(struct socket *sock, int backlog) |
2998 | { | 3007 | { |
2999 | return sock->ops->listen(sock, backlog); | 3008 | return sock->ops->listen(sock, backlog); |
3000 | } | 3009 | } |
3010 | EXPORT_SYMBOL(kernel_listen); | ||
3001 | 3011 | ||
3002 | int kernel_accept(struct socket *sock, struct socket **newsock, int flags) | 3012 | int kernel_accept(struct socket *sock, struct socket **newsock, int flags) |
3003 | { | 3013 | { |
@@ -3022,24 +3032,28 @@ int kernel_accept(struct socket *sock, struct socket **newsock, int flags) | |||
3022 | done: | 3032 | done: |
3023 | return err; | 3033 | return err; |
3024 | } | 3034 | } |
3035 | EXPORT_SYMBOL(kernel_accept); | ||
3025 | 3036 | ||
3026 | int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen, | 3037 | int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen, |
3027 | int flags) | 3038 | int flags) |
3028 | { | 3039 | { |
3029 | return sock->ops->connect(sock, addr, addrlen, flags); | 3040 | return sock->ops->connect(sock, addr, addrlen, flags); |
3030 | } | 3041 | } |
3042 | EXPORT_SYMBOL(kernel_connect); | ||
3031 | 3043 | ||
3032 | int kernel_getsockname(struct socket *sock, struct sockaddr *addr, | 3044 | int kernel_getsockname(struct socket *sock, struct sockaddr *addr, |
3033 | int *addrlen) | 3045 | int *addrlen) |
3034 | { | 3046 | { |
3035 | return sock->ops->getname(sock, addr, addrlen, 0); | 3047 | return sock->ops->getname(sock, addr, addrlen, 0); |
3036 | } | 3048 | } |
3049 | EXPORT_SYMBOL(kernel_getsockname); | ||
3037 | 3050 | ||
3038 | int kernel_getpeername(struct socket *sock, struct sockaddr *addr, | 3051 | int kernel_getpeername(struct socket *sock, struct sockaddr *addr, |
3039 | int *addrlen) | 3052 | int *addrlen) |
3040 | { | 3053 | { |
3041 | return sock->ops->getname(sock, addr, addrlen, 1); | 3054 | return sock->ops->getname(sock, addr, addrlen, 1); |
3042 | } | 3055 | } |
3056 | EXPORT_SYMBOL(kernel_getpeername); | ||
3043 | 3057 | ||
3044 | int kernel_getsockopt(struct socket *sock, int level, int optname, | 3058 | int kernel_getsockopt(struct socket *sock, int level, int optname, |
3045 | char *optval, int *optlen) | 3059 | char *optval, int *optlen) |
@@ -3056,6 +3070,7 @@ int kernel_getsockopt(struct socket *sock, int level, int optname, | |||
3056 | set_fs(oldfs); | 3070 | set_fs(oldfs); |
3057 | return err; | 3071 | return err; |
3058 | } | 3072 | } |
3073 | EXPORT_SYMBOL(kernel_getsockopt); | ||
3059 | 3074 | ||
3060 | int kernel_setsockopt(struct socket *sock, int level, int optname, | 3075 | int kernel_setsockopt(struct socket *sock, int level, int optname, |
3061 | char *optval, unsigned int optlen) | 3076 | char *optval, unsigned int optlen) |
@@ -3072,6 +3087,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname, | |||
3072 | set_fs(oldfs); | 3087 | set_fs(oldfs); |
3073 | return err; | 3088 | return err; |
3074 | } | 3089 | } |
3090 | EXPORT_SYMBOL(kernel_setsockopt); | ||
3075 | 3091 | ||
3076 | int kernel_sendpage(struct socket *sock, struct page *page, int offset, | 3092 | int kernel_sendpage(struct socket *sock, struct page *page, int offset, |
3077 | size_t size, int flags) | 3093 | size_t size, int flags) |
@@ -3083,6 +3099,7 @@ int kernel_sendpage(struct socket *sock, struct page *page, int offset, | |||
3083 | 3099 | ||
3084 | return sock_no_sendpage(sock, page, offset, size, flags); | 3100 | return sock_no_sendpage(sock, page, offset, size, flags); |
3085 | } | 3101 | } |
3102 | EXPORT_SYMBOL(kernel_sendpage); | ||
3086 | 3103 | ||
3087 | int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg) | 3104 | int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg) |
3088 | { | 3105 | { |
@@ -3095,33 +3112,10 @@ int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg) | |||
3095 | 3112 | ||
3096 | return err; | 3113 | return err; |
3097 | } | 3114 | } |
3115 | EXPORT_SYMBOL(kernel_sock_ioctl); | ||
3098 | 3116 | ||
3099 | int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how) | 3117 | int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how) |
3100 | { | 3118 | { |
3101 | return sock->ops->shutdown(sock, how); | 3119 | return sock->ops->shutdown(sock, how); |
3102 | } | 3120 | } |
3103 | |||
3104 | EXPORT_SYMBOL(sock_create); | ||
3105 | EXPORT_SYMBOL(sock_create_kern); | ||
3106 | EXPORT_SYMBOL(sock_create_lite); | ||
3107 | EXPORT_SYMBOL(sock_map_fd); | ||
3108 | EXPORT_SYMBOL(sock_recvmsg); | ||
3109 | EXPORT_SYMBOL(sock_register); | ||
3110 | EXPORT_SYMBOL(sock_release); | ||
3111 | EXPORT_SYMBOL(sock_sendmsg); | ||
3112 | EXPORT_SYMBOL(sock_unregister); | ||
3113 | EXPORT_SYMBOL(sock_wake_async); | ||
3114 | EXPORT_SYMBOL(sockfd_lookup); | ||
3115 | EXPORT_SYMBOL(kernel_sendmsg); | ||
3116 | EXPORT_SYMBOL(kernel_recvmsg); | ||
3117 | EXPORT_SYMBOL(kernel_bind); | ||
3118 | EXPORT_SYMBOL(kernel_listen); | ||
3119 | EXPORT_SYMBOL(kernel_accept); | ||
3120 | EXPORT_SYMBOL(kernel_connect); | ||
3121 | EXPORT_SYMBOL(kernel_getsockname); | ||
3122 | EXPORT_SYMBOL(kernel_getpeername); | ||
3123 | EXPORT_SYMBOL(kernel_getsockopt); | ||
3124 | EXPORT_SYMBOL(kernel_setsockopt); | ||
3125 | EXPORT_SYMBOL(kernel_sendpage); | ||
3126 | EXPORT_SYMBOL(kernel_sock_ioctl); | ||
3127 | EXPORT_SYMBOL(kernel_sock_shutdown); | 3121 | EXPORT_SYMBOL(kernel_sock_shutdown); |
diff --git a/net/wireless/chan.c b/net/wireless/chan.c index b01a6f6397d7..d0c92dddb26b 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c | |||
@@ -35,8 +35,9 @@ rdev_freq_to_chan(struct cfg80211_registered_device *rdev, | |||
35 | if (!ht_cap->ht_supported) | 35 | if (!ht_cap->ht_supported) |
36 | return NULL; | 36 | return NULL; |
37 | 37 | ||
38 | if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) || | 38 | if (channel_type != NL80211_CHAN_HT20 && |
39 | ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT) | 39 | (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) || |
40 | ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT)) | ||
40 | return NULL; | 41 | return NULL; |
41 | } | 42 | } |
42 | 43 | ||
diff --git a/net/wireless/core.h b/net/wireless/core.h index ae930acf75e9..63d57ae399c3 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h | |||
@@ -339,6 +339,7 @@ int cfg80211_mlme_action(struct cfg80211_registered_device *rdev, | |||
339 | struct net_device *dev, | 339 | struct net_device *dev, |
340 | struct ieee80211_channel *chan, | 340 | struct ieee80211_channel *chan, |
341 | enum nl80211_channel_type channel_type, | 341 | enum nl80211_channel_type channel_type, |
342 | bool channel_type_valid, | ||
342 | const u8 *buf, size_t len, u64 *cookie); | 343 | const u8 *buf, size_t len, u64 *cookie); |
343 | 344 | ||
344 | /* SME */ | 345 | /* SME */ |
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 48ead6f0426d..f69ae19f497f 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c | |||
@@ -827,6 +827,7 @@ int cfg80211_mlme_action(struct cfg80211_registered_device *rdev, | |||
827 | struct net_device *dev, | 827 | struct net_device *dev, |
828 | struct ieee80211_channel *chan, | 828 | struct ieee80211_channel *chan, |
829 | enum nl80211_channel_type channel_type, | 829 | enum nl80211_channel_type channel_type, |
830 | bool channel_type_valid, | ||
830 | const u8 *buf, size_t len, u64 *cookie) | 831 | const u8 *buf, size_t len, u64 *cookie) |
831 | { | 832 | { |
832 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 833 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
@@ -855,7 +856,7 @@ int cfg80211_mlme_action(struct cfg80211_registered_device *rdev, | |||
855 | 856 | ||
856 | /* Transmit the Action frame as requested by user space */ | 857 | /* Transmit the Action frame as requested by user space */ |
857 | return rdev->ops->action(&rdev->wiphy, dev, chan, channel_type, | 858 | return rdev->ops->action(&rdev->wiphy, dev, chan, channel_type, |
858 | buf, len, cookie); | 859 | channel_type_valid, buf, len, cookie); |
859 | } | 860 | } |
860 | 861 | ||
861 | bool cfg80211_rx_action(struct net_device *dev, int freq, const u8 *buf, | 862 | bool cfg80211_rx_action(struct net_device *dev, int freq, const u8 *buf, |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index db71150b8040..90ab3c8519be 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -4681,6 +4681,7 @@ static int nl80211_action(struct sk_buff *skb, struct genl_info *info) | |||
4681 | struct net_device *dev; | 4681 | struct net_device *dev; |
4682 | struct ieee80211_channel *chan; | 4682 | struct ieee80211_channel *chan; |
4683 | enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; | 4683 | enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; |
4684 | bool channel_type_valid = false; | ||
4684 | u32 freq; | 4685 | u32 freq; |
4685 | int err; | 4686 | int err; |
4686 | void *hdr; | 4687 | void *hdr; |
@@ -4722,6 +4723,7 @@ static int nl80211_action(struct sk_buff *skb, struct genl_info *info) | |||
4722 | err = -EINVAL; | 4723 | err = -EINVAL; |
4723 | goto out; | 4724 | goto out; |
4724 | } | 4725 | } |
4726 | channel_type_valid = true; | ||
4725 | } | 4727 | } |
4726 | 4728 | ||
4727 | freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); | 4729 | freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); |
@@ -4745,6 +4747,7 @@ static int nl80211_action(struct sk_buff *skb, struct genl_info *info) | |||
4745 | goto free_msg; | 4747 | goto free_msg; |
4746 | } | 4748 | } |
4747 | err = cfg80211_mlme_action(rdev, dev, chan, channel_type, | 4749 | err = cfg80211_mlme_action(rdev, dev, chan, channel_type, |
4750 | channel_type_valid, | ||
4748 | nla_data(info->attrs[NL80211_ATTR_FRAME]), | 4751 | nla_data(info->attrs[NL80211_ATTR_FRAME]), |
4749 | nla_len(info->attrs[NL80211_ATTR_FRAME]), | 4752 | nla_len(info->attrs[NL80211_ATTR_FRAME]), |
4750 | &cookie); | 4753 | &cookie); |
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index 6a329158bdfa..a3cca0a94346 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c | |||
@@ -95,13 +95,13 @@ resume: | |||
95 | goto error_nolock; | 95 | goto error_nolock; |
96 | } | 96 | } |
97 | 97 | ||
98 | dst = dst_pop(dst); | 98 | dst = skb_dst_pop(skb); |
99 | if (!dst) { | 99 | if (!dst) { |
100 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); | 100 | XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); |
101 | err = -EHOSTUNREACH; | 101 | err = -EHOSTUNREACH; |
102 | goto error_nolock; | 102 | goto error_nolock; |
103 | } | 103 | } |
104 | skb_dst_set(skb, dst); | 104 | skb_dst_set_noref(skb, dst); |
105 | x = dst->xfrm; | 105 | x = dst->xfrm; |
106 | } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL)); | 106 | } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL)); |
107 | 107 | ||
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index d965a2bad8d3..4bf27d901333 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -2153,6 +2153,7 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family) | |||
2153 | return 0; | 2153 | return 0; |
2154 | } | 2154 | } |
2155 | 2155 | ||
2156 | skb_dst_force(skb); | ||
2156 | dst = skb_dst(skb); | 2157 | dst = skb_dst(skb); |
2157 | 2158 | ||
2158 | res = xfrm_lookup(net, &dst, &fl, NULL, 0) == 0; | 2159 | res = xfrm_lookup(net, &dst, &fl, NULL, 0) == 0; |