aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-11 12:10:19 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-11 12:10:19 -0400
commitee54d2d87a8158d14434c1a3274bd7f713105836 (patch)
treecd3e1f6fc0a7fc920e4153c01f35ff7bd92d79da /net
parentbf61f8d357e5d71d74a3ca3be3cce52bf1a2c01a (diff)
parentda0dd231436ba7e81789e93dd933d7a275e1709d (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (31 commits) [NETFILTER]: xt_conntrack: add compat support [NETFILTER]: iptable_raw: ignore short packets sent by SOCK_RAW sockets [NETFILTER]: iptable_{filter,mangle}: more descriptive "happy cracking" message [NETFILTER]: nf_nat: Clears helper private area when NATing [NETFILTER]: ctnetlink: clear helper area and handle unchanged helper [NETFILTER]: nf_conntrack: Removes unused destroy operation of l3proto [NETFILTER]: nf_conntrack: Removes duplicated declarations [NETFILTER]: nf_nat: remove unused argument of function allocating binding [NETFILTER]: Clean up table initialization [NET_SCHED]: Avoid requeue warning on dev_deactivate [NET_SCHED]: Reread dev->qdisc for NETDEV_TX_OK [NET_SCHED]: Rationalise return value of qdisc_restart [NET]: Fix dev->qdisc race for NETDEV_TX_LOCKED case [UDP]: Fix AF-specific references in AF-agnostic code. [IrDA]: KingSun/DonShine USB IrDA dongle support. [IPV6] ROUTE: Assign rt6i_idev for ip6_{prohibit,blk_hole}_entry. [IPV6]: Do no rely on skb->dst before it is assigned. [IPV6]: Send ICMPv6 error on scope violations. [SCTP]: Do not include ABORT chunk header in the notification. [SCTP]: Correctly copy addresses in sctp_copy_laddrs ...
Diffstat (limited to 'net')
-rw-r--r--net/bluetooth/hidp/core.c10
-rw-r--r--net/core/link_watch.c166
-rw-r--r--net/ipv4/netfilter/arptable_filter.c140
-rw-r--r--net/ipv4/netfilter/iptable_filter.c73
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c99
-rw-r--r--net/ipv4/netfilter/iptable_raw.c79
-rw-r--r--net/ipv4/netfilter/nf_nat_rule.c86
-rw-r--r--net/ipv4/netfilter/nf_nat_standalone.c11
-rw-r--r--net/ipv4/udp.c85
-rw-r--r--net/ipv4/udp_impl.h6
-rw-r--r--net/ipv4/udplite.c7
-rw-r--r--net/ipv6/addrconf.c4
-rw-r--r--net/ipv6/exthdrs.c16
-rw-r--r--net/ipv6/ip6_output.c13
-rw-r--r--net/ipv6/netfilter/ip6table_filter.c70
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c96
-rw-r--r--net/ipv6/netfilter/ip6table_raw.c52
-rw-r--r--net/ipv6/udp.c21
-rw-r--r--net/ipv6/udp_impl.h2
-rw-r--r--net/ipv6/udplite.c2
-rw-r--r--net/mac80211/ieee80211_sta.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c14
-rw-r--r--net/netfilter/nf_conntrack_netlink.c40
-rw-r--r--net/netfilter/xt_conntrack.c54
-rw-r--r--net/sched/sch_generic.c41
-rw-r--r--net/sched/sch_teql.c5
-rw-r--r--net/sctp/socket.c19
-rw-r--r--net/sctp/ulpevent.c11
28 files changed, 521 insertions, 703 deletions
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index d342e89b8bdd..0ea40ab4dbae 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -174,7 +174,7 @@ static inline int hidp_queue_event(struct hidp_session *session, struct input_de
174 174
175static int hidp_hidinput_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) 175static int hidp_hidinput_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
176{ 176{
177 struct hid_device *hid = dev->private; 177 struct hid_device *hid = input_get_drvdata(dev);
178 struct hidp_session *session = hid->driver_data; 178 struct hidp_session *session = hid->driver_data;
179 179
180 return hidp_queue_event(session, dev, type, code, value); 180 return hidp_queue_event(session, dev, type, code, value);
@@ -182,7 +182,7 @@ static int hidp_hidinput_event(struct input_dev *dev, unsigned int type, unsigne
182 182
183static int hidp_input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) 183static int hidp_input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
184{ 184{
185 struct hidp_session *session = dev->private; 185 struct hidp_session *session = input_get_drvdata(dev);
186 186
187 return hidp_queue_event(session, dev, type, code, value); 187 return hidp_queue_event(session, dev, type, code, value);
188} 188}
@@ -630,7 +630,7 @@ static inline void hidp_setup_input(struct hidp_session *session, struct hidp_co
630 struct input_dev *input = session->input; 630 struct input_dev *input = session->input;
631 int i; 631 int i;
632 632
633 input->private = session; 633 input_set_drvdata(input, session);
634 634
635 input->name = "Bluetooth HID Boot Protocol Device"; 635 input->name = "Bluetooth HID Boot Protocol Device";
636 636
@@ -663,7 +663,7 @@ static inline void hidp_setup_input(struct hidp_session *session, struct hidp_co
663 input->relbit[0] |= BIT(REL_WHEEL); 663 input->relbit[0] |= BIT(REL_WHEEL);
664 } 664 }
665 665
666 input->cdev.dev = hidp_get_device(session); 666 input->dev.parent = hidp_get_device(session);
667 667
668 input->event = hidp_input_event; 668 input->event = hidp_input_event;
669 669
@@ -864,7 +864,7 @@ failed:
864 if (session->hid) 864 if (session->hid)
865 hid_free_device(session->hid); 865 hid_free_device(session->hid);
866 866
867 kfree(session->input); 867 input_free_device(session->input);
868 kfree(session); 868 kfree(session);
869 return err; 869 return err;
870} 870}
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index e3c26a9ccad6..a5e372b9ec4d 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -19,7 +19,6 @@
19#include <linux/rtnetlink.h> 19#include <linux/rtnetlink.h>
20#include <linux/jiffies.h> 20#include <linux/jiffies.h>
21#include <linux/spinlock.h> 21#include <linux/spinlock.h>
22#include <linux/list.h>
23#include <linux/slab.h> 22#include <linux/slab.h>
24#include <linux/workqueue.h> 23#include <linux/workqueue.h>
25#include <linux/bitops.h> 24#include <linux/bitops.h>
@@ -27,8 +26,7 @@
27 26
28 27
29enum lw_bits { 28enum lw_bits {
30 LW_RUNNING = 0, 29 LW_URGENT = 0,
31 LW_SE_USED
32}; 30};
33 31
34static unsigned long linkwatch_flags; 32static unsigned long linkwatch_flags;
@@ -37,17 +35,9 @@ static unsigned long linkwatch_nextevent;
37static void linkwatch_event(struct work_struct *dummy); 35static void linkwatch_event(struct work_struct *dummy);
38static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event); 36static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event);
39 37
40static LIST_HEAD(lweventlist); 38static struct net_device *lweventlist;
41static DEFINE_SPINLOCK(lweventlist_lock); 39static DEFINE_SPINLOCK(lweventlist_lock);
42 40
43struct lw_event {
44 struct list_head list;
45 struct net_device *dev;
46};
47
48/* Avoid kmalloc() for most systems */
49static struct lw_event singleevent;
50
51static unsigned char default_operstate(const struct net_device *dev) 41static unsigned char default_operstate(const struct net_device *dev)
52{ 42{
53 if (!netif_carrier_ok(dev)) 43 if (!netif_carrier_ok(dev))
@@ -87,25 +77,102 @@ static void rfc2863_policy(struct net_device *dev)
87} 77}
88 78
89 79
90/* Must be called with the rtnl semaphore held */ 80static int linkwatch_urgent_event(struct net_device *dev)
91void linkwatch_run_queue(void)
92{ 81{
93 struct list_head head, *n, *next; 82 return netif_running(dev) && netif_carrier_ok(dev) &&
83 dev->qdisc != dev->qdisc_sleeping;
84}
85
86
87static void linkwatch_add_event(struct net_device *dev)
88{
89 unsigned long flags;
90
91 spin_lock_irqsave(&lweventlist_lock, flags);
92 dev->link_watch_next = lweventlist;
93 lweventlist = dev;
94 spin_unlock_irqrestore(&lweventlist_lock, flags);
95}
96
97
98static void linkwatch_schedule_work(int urgent)
99{
100 unsigned long delay = linkwatch_nextevent - jiffies;
101
102 if (test_bit(LW_URGENT, &linkwatch_flags))
103 return;
104
105 /* Minimise down-time: drop delay for up event. */
106 if (urgent) {
107 if (test_and_set_bit(LW_URGENT, &linkwatch_flags))
108 return;
109 delay = 0;
110 }
111
112 /* If we wrap around we'll delay it by at most HZ. */
113 if (delay > HZ)
114 delay = 0;
115
116 /*
117 * This is true if we've scheduled it immeditately or if we don't
118 * need an immediate execution and it's already pending.
119 */
120 if (schedule_delayed_work(&linkwatch_work, delay) == !delay)
121 return;
122
123 /* Don't bother if there is nothing urgent. */
124 if (!test_bit(LW_URGENT, &linkwatch_flags))
125 return;
126
127 /* It's already running which is good enough. */
128 if (!cancel_delayed_work(&linkwatch_work))
129 return;
130
131 /* Otherwise we reschedule it again for immediate exection. */
132 schedule_delayed_work(&linkwatch_work, 0);
133}
134
135
136static void __linkwatch_run_queue(int urgent_only)
137{
138 struct net_device *next;
139
140 /*
141 * Limit the number of linkwatch events to one
142 * per second so that a runaway driver does not
143 * cause a storm of messages on the netlink
144 * socket. This limit does not apply to up events
145 * while the device qdisc is down.
146 */
147 if (!urgent_only)
148 linkwatch_nextevent = jiffies + HZ;
149 /* Limit wrap-around effect on delay. */
150 else if (time_after(linkwatch_nextevent, jiffies + HZ))
151 linkwatch_nextevent = jiffies;
152
153 clear_bit(LW_URGENT, &linkwatch_flags);
94 154
95 spin_lock_irq(&lweventlist_lock); 155 spin_lock_irq(&lweventlist_lock);
96 list_replace_init(&lweventlist, &head); 156 next = lweventlist;
157 lweventlist = NULL;
97 spin_unlock_irq(&lweventlist_lock); 158 spin_unlock_irq(&lweventlist_lock);
98 159
99 list_for_each_safe(n, next, &head) { 160 while (next) {
100 struct lw_event *event = list_entry(n, struct lw_event, list); 161 struct net_device *dev = next;
101 struct net_device *dev = event->dev;
102 162
103 if (event == &singleevent) { 163 next = dev->link_watch_next;
104 clear_bit(LW_SE_USED, &linkwatch_flags); 164
105 } else { 165 if (urgent_only && !linkwatch_urgent_event(dev)) {
106 kfree(event); 166 linkwatch_add_event(dev);
167 continue;
107 } 168 }
108 169
170 /*
171 * Make sure the above read is complete since it can be
172 * rewritten as soon as we clear the bit below.
173 */
174 smp_mb__before_clear_bit();
175
109 /* We are about to handle this device, 176 /* We are about to handle this device,
110 * so new events can be accepted 177 * so new events can be accepted
111 */ 178 */
@@ -124,58 +191,39 @@ void linkwatch_run_queue(void)
124 191
125 dev_put(dev); 192 dev_put(dev);
126 } 193 }
194
195 if (lweventlist)
196 linkwatch_schedule_work(0);
127} 197}
128 198
129 199
130static void linkwatch_event(struct work_struct *dummy) 200/* Must be called with the rtnl semaphore held */
201void linkwatch_run_queue(void)
131{ 202{
132 /* Limit the number of linkwatch events to one 203 __linkwatch_run_queue(0);
133 * per second so that a runaway driver does not 204}
134 * cause a storm of messages on the netlink
135 * socket
136 */
137 linkwatch_nextevent = jiffies + HZ;
138 clear_bit(LW_RUNNING, &linkwatch_flags);
139 205
206
207static void linkwatch_event(struct work_struct *dummy)
208{
140 rtnl_lock(); 209 rtnl_lock();
141 linkwatch_run_queue(); 210 __linkwatch_run_queue(time_after(linkwatch_nextevent, jiffies));
142 rtnl_unlock(); 211 rtnl_unlock();
143} 212}
144 213
145 214
146void linkwatch_fire_event(struct net_device *dev) 215void linkwatch_fire_event(struct net_device *dev)
147{ 216{
148 if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) { 217 int urgent = linkwatch_urgent_event(dev);
149 unsigned long flags;
150 struct lw_event *event;
151
152 if (test_and_set_bit(LW_SE_USED, &linkwatch_flags)) {
153 event = kmalloc(sizeof(struct lw_event), GFP_ATOMIC);
154
155 if (unlikely(event == NULL)) {
156 clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
157 return;
158 }
159 } else {
160 event = &singleevent;
161 }
162 218
219 if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) {
163 dev_hold(dev); 220 dev_hold(dev);
164 event->dev = dev;
165
166 spin_lock_irqsave(&lweventlist_lock, flags);
167 list_add_tail(&event->list, &lweventlist);
168 spin_unlock_irqrestore(&lweventlist_lock, flags);
169 221
170 if (!test_and_set_bit(LW_RUNNING, &linkwatch_flags)) { 222 linkwatch_add_event(dev);
171 unsigned long delay = linkwatch_nextevent - jiffies; 223 } else if (!urgent)
224 return;
172 225
173 /* If we wrap around we'll delay it by at most HZ. */ 226 linkwatch_schedule_work(urgent);
174 if (delay > HZ)
175 delay = 0;
176 schedule_delayed_work(&linkwatch_work, delay);
177 }
178 }
179} 227}
180 228
181EXPORT_SYMBOL(linkwatch_fire_event); 229EXPORT_SYMBOL(linkwatch_fire_event);
diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c
index 7edea2a1696c..75c023062533 100644
--- a/net/ipv4/netfilter/arptable_filter.c
+++ b/net/ipv4/netfilter/arptable_filter.c
@@ -15,128 +15,34 @@ MODULE_DESCRIPTION("arptables filter table");
15#define FILTER_VALID_HOOKS ((1 << NF_ARP_IN) | (1 << NF_ARP_OUT) | \ 15#define FILTER_VALID_HOOKS ((1 << NF_ARP_IN) | (1 << NF_ARP_OUT) | \
16 (1 << NF_ARP_FORWARD)) 16 (1 << NF_ARP_FORWARD))
17 17
18/* Standard entry. */
19struct arpt_standard
20{
21 struct arpt_entry entry;
22 struct arpt_standard_target target;
23};
24
25struct arpt_error_target
26{
27 struct arpt_entry_target target;
28 char errorname[ARPT_FUNCTION_MAXNAMELEN];
29};
30
31struct arpt_error
32{
33 struct arpt_entry entry;
34 struct arpt_error_target target;
35};
36
37static struct 18static struct
38{ 19{
39 struct arpt_replace repl; 20 struct arpt_replace repl;
40 struct arpt_standard entries[3]; 21 struct arpt_standard entries[3];
41 struct arpt_error term; 22 struct arpt_error term;
42} initial_table __initdata 23} initial_table __initdata = {
43= { { "filter", FILTER_VALID_HOOKS, 4, 24 .repl = {
44 sizeof(struct arpt_standard) * 3 + sizeof(struct arpt_error), 25 .name = "filter",
45 { [NF_ARP_IN] = 0, 26 .valid_hooks = FILTER_VALID_HOOKS,
46 [NF_ARP_OUT] = sizeof(struct arpt_standard), 27 .num_entries = 4,
47 [NF_ARP_FORWARD] = 2 * sizeof(struct arpt_standard), }, 28 .size = sizeof(struct arpt_standard) * 3 + sizeof(struct arpt_error),
48 { [NF_ARP_IN] = 0, 29 .hook_entry = {
49 [NF_ARP_OUT] = sizeof(struct arpt_standard), 30 [NF_ARP_IN] = 0,
50 [NF_ARP_FORWARD] = 2 * sizeof(struct arpt_standard), }, 31 [NF_ARP_OUT] = sizeof(struct arpt_standard),
51 0, NULL, { } }, 32 [NF_ARP_FORWARD] = 2 * sizeof(struct arpt_standard),
52 { 33 },
53 /* ARP_IN */ 34 .underflow = {
54 { 35 [NF_ARP_IN] = 0,
55 { 36 [NF_ARP_OUT] = sizeof(struct arpt_standard),
56 { 37 [NF_ARP_FORWARD] = 2 * sizeof(struct arpt_standard),
57 { 0 }, { 0 }, { 0 }, { 0 }, 38 },
58 0, 0, 39 },
59 { { 0, }, { 0, } }, 40 .entries = {
60 { { 0, }, { 0, } }, 41 ARPT_STANDARD_INIT(NF_ACCEPT), /* ARP_IN */
61 0, 0, 42 ARPT_STANDARD_INIT(NF_ACCEPT), /* ARP_OUT */
62 0, 0, 43 ARPT_STANDARD_INIT(NF_ACCEPT), /* ARP_FORWARD */
63 0, 0, 44 },
64 "", "", { 0 }, { 0 }, 45 .term = ARPT_ERROR_INIT,
65 0, 0
66 },
67 sizeof(struct arpt_entry),
68 sizeof(struct arpt_standard),
69 0,
70 { 0, 0 }, { } },
71 { { { { ARPT_ALIGN(sizeof(struct arpt_standard_target)), "" } }, { } },
72 -NF_ACCEPT - 1 }
73 },
74 /* ARP_OUT */
75 {
76 {
77 {
78 { 0 }, { 0 }, { 0 }, { 0 },
79 0, 0,
80 { { 0, }, { 0, } },
81 { { 0, }, { 0, } },
82 0, 0,
83 0, 0,
84 0, 0,
85 "", "", { 0 }, { 0 },
86 0, 0
87 },
88 sizeof(struct arpt_entry),
89 sizeof(struct arpt_standard),
90 0,
91 { 0, 0 }, { } },
92 { { { { ARPT_ALIGN(sizeof(struct arpt_standard_target)), "" } }, { } },
93 -NF_ACCEPT - 1 }
94 },
95 /* ARP_FORWARD */
96 {
97 {
98 {
99 { 0 }, { 0 }, { 0 }, { 0 },
100 0, 0,
101 { { 0, }, { 0, } },
102 { { 0, }, { 0, } },
103 0, 0,
104 0, 0,
105 0, 0,
106 "", "", { 0 }, { 0 },
107 0, 0
108 },
109 sizeof(struct arpt_entry),
110 sizeof(struct arpt_standard),
111 0,
112 { 0, 0 }, { } },
113 { { { { ARPT_ALIGN(sizeof(struct arpt_standard_target)), "" } }, { } },
114 -NF_ACCEPT - 1 }
115 }
116 },
117 /* ERROR */
118 {
119 {
120 {
121 { 0 }, { 0 }, { 0 }, { 0 },
122 0, 0,
123 { { 0, }, { 0, } },
124 { { 0, }, { 0, } },
125 0, 0,
126 0, 0,
127 0, 0,
128 "", "", { 0 }, { 0 },
129 0, 0
130 },
131 sizeof(struct arpt_entry),
132 sizeof(struct arpt_error),
133 0,
134 { 0, 0 }, { } },
135 { { { { ARPT_ALIGN(sizeof(struct arpt_error_target)), ARPT_ERROR_TARGET } },
136 { } },
137 "ERROR"
138 }
139 }
140}; 46};
141 47
142static struct arpt_table packet_filter = { 48static struct arpt_table packet_filter = {
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index 42728909eba0..4f51c1d7d2d6 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -26,53 +26,29 @@ static struct
26 struct ipt_replace repl; 26 struct ipt_replace repl;
27 struct ipt_standard entries[3]; 27 struct ipt_standard entries[3];
28 struct ipt_error term; 28 struct ipt_error term;
29} initial_table __initdata 29} initial_table __initdata = {
30= { { "filter", FILTER_VALID_HOOKS, 4, 30 .repl = {
31 sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error), 31 .name = "filter",
32 { [NF_IP_LOCAL_IN] = 0, 32 .valid_hooks = FILTER_VALID_HOOKS,
33 [NF_IP_FORWARD] = sizeof(struct ipt_standard), 33 .num_entries = 4,
34 [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 2 }, 34 .size = sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error),
35 { [NF_IP_LOCAL_IN] = 0, 35 .hook_entry = {
36 [NF_IP_FORWARD] = sizeof(struct ipt_standard), 36 [NF_IP_LOCAL_IN] = 0,
37 [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 2 }, 37 [NF_IP_FORWARD] = sizeof(struct ipt_standard),
38 0, NULL, { } }, 38 [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
39 { 39 },
40 /* LOCAL_IN */ 40 .underflow = {
41 { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 }, 41 [NF_IP_LOCAL_IN] = 0,
42 0, 42 [NF_IP_FORWARD] = sizeof(struct ipt_standard),
43 sizeof(struct ipt_entry), 43 [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
44 sizeof(struct ipt_standard), 44 },
45 0, { 0, 0 }, { } }, 45 },
46 { { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } }, 46 .entries = {
47 -NF_ACCEPT - 1 } }, 47 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
48 /* FORWARD */ 48 IPT_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
49 { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 }, 49 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
50 0, 50 },
51 sizeof(struct ipt_entry), 51 .term = IPT_ERROR_INIT, /* ERROR */
52 sizeof(struct ipt_standard),
53 0, { 0, 0 }, { } },
54 { { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } },
55 -NF_ACCEPT - 1 } },
56 /* LOCAL_OUT */
57 { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
58 0,
59 sizeof(struct ipt_entry),
60 sizeof(struct ipt_standard),
61 0, { 0, 0 }, { } },
62 { { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } },
63 -NF_ACCEPT - 1 } }
64 },
65 /* ERROR */
66 { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
67 0,
68 sizeof(struct ipt_entry),
69 sizeof(struct ipt_error),
70 0, { 0, 0 }, { } },
71 { { { { IPT_ALIGN(sizeof(struct ipt_error_target)), IPT_ERROR_TARGET } },
72 { } },
73 "ERROR"
74 }
75 }
76}; 52};
77 53
78static struct xt_table packet_filter = { 54static struct xt_table packet_filter = {
@@ -105,7 +81,8 @@ ipt_local_out_hook(unsigned int hook,
105 if ((*pskb)->len < sizeof(struct iphdr) 81 if ((*pskb)->len < sizeof(struct iphdr)
106 || ip_hdrlen(*pskb) < sizeof(struct iphdr)) { 82 || ip_hdrlen(*pskb) < sizeof(struct iphdr)) {
107 if (net_ratelimit()) 83 if (net_ratelimit())
108 printk("ipt_hook: happy cracking.\n"); 84 printk("iptable_filter: ignoring short SOCK_RAW "
85 "packet.\n");
109 return NF_ACCEPT; 86 return NF_ACCEPT;
110 } 87 }
111 88
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index 9278802f2742..902446f7cbca 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -33,73 +33,35 @@ static struct
33 struct ipt_replace repl; 33 struct ipt_replace repl;
34 struct ipt_standard entries[5]; 34 struct ipt_standard entries[5];
35 struct ipt_error term; 35 struct ipt_error term;
36} initial_table __initdata 36} initial_table __initdata = {
37= { { "mangle", MANGLE_VALID_HOOKS, 6, 37 .repl = {
38 sizeof(struct ipt_standard) * 5 + sizeof(struct ipt_error), 38 .name = "mangle",
39 { [NF_IP_PRE_ROUTING] = 0, 39 .valid_hooks = MANGLE_VALID_HOOKS,
40 [NF_IP_LOCAL_IN] = sizeof(struct ipt_standard), 40 .num_entries = 6,
41 [NF_IP_FORWARD] = sizeof(struct ipt_standard) * 2, 41 .size = sizeof(struct ipt_standard) * 5 + sizeof(struct ipt_error),
42 [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 3, 42 .hook_entry = {
43 [NF_IP_POST_ROUTING] = sizeof(struct ipt_standard) * 4 }, 43 [NF_IP_PRE_ROUTING] = 0,
44 { [NF_IP_PRE_ROUTING] = 0, 44 [NF_IP_LOCAL_IN] = sizeof(struct ipt_standard),
45 [NF_IP_LOCAL_IN] = sizeof(struct ipt_standard), 45 [NF_IP_FORWARD] = sizeof(struct ipt_standard) * 2,
46 [NF_IP_FORWARD] = sizeof(struct ipt_standard) * 2, 46 [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 3,
47 [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 3, 47 [NF_IP_POST_ROUTING] = sizeof(struct ipt_standard) * 4,
48 [NF_IP_POST_ROUTING] = sizeof(struct ipt_standard) * 4 }, 48 },
49 0, NULL, { } }, 49 .underflow = {
50 { 50 [NF_IP_PRE_ROUTING] = 0,
51 /* PRE_ROUTING */ 51 [NF_IP_LOCAL_IN] = sizeof(struct ipt_standard),
52 { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 }, 52 [NF_IP_FORWARD] = sizeof(struct ipt_standard) * 2,
53 0, 53 [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 3,
54 sizeof(struct ipt_entry), 54 [NF_IP_POST_ROUTING] = sizeof(struct ipt_standard) * 4,
55 sizeof(struct ipt_standard), 55 },
56 0, { 0, 0 }, { } }, 56 },
57 { { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } }, 57 .entries = {
58 -NF_ACCEPT - 1 } }, 58 IPT_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
59 /* LOCAL_IN */ 59 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
60 { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 }, 60 IPT_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
61 0, 61 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
62 sizeof(struct ipt_entry), 62 IPT_STANDARD_INIT(NF_ACCEPT), /* POST_ROUTING */
63 sizeof(struct ipt_standard), 63 },
64 0, { 0, 0 }, { } }, 64 .term = IPT_ERROR_INIT, /* ERROR */
65 { { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } },
66 -NF_ACCEPT - 1 } },
67 /* FORWARD */
68 { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
69 0,
70 sizeof(struct ipt_entry),
71 sizeof(struct ipt_standard),
72 0, { 0, 0 }, { } },
73 { { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } },
74 -NF_ACCEPT - 1 } },
75 /* LOCAL_OUT */
76 { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
77 0,
78 sizeof(struct ipt_entry),
79 sizeof(struct ipt_standard),
80 0, { 0, 0 }, { } },
81 { { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } },
82 -NF_ACCEPT - 1 } },
83 /* POST_ROUTING */
84 { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
85 0,
86 sizeof(struct ipt_entry),
87 sizeof(struct ipt_standard),
88 0, { 0, 0 }, { } },
89 { { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } },
90 -NF_ACCEPT - 1 } },
91 },
92 /* ERROR */
93 { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
94 0,
95 sizeof(struct ipt_entry),
96 sizeof(struct ipt_error),
97 0, { 0, 0 }, { } },
98 { { { { IPT_ALIGN(sizeof(struct ipt_error_target)), IPT_ERROR_TARGET } },
99 { } },
100 "ERROR"
101 }
102 }
103}; 65};
104 66
105static struct xt_table packet_mangler = { 67static struct xt_table packet_mangler = {
@@ -138,7 +100,8 @@ ipt_local_hook(unsigned int hook,
138 if ((*pskb)->len < sizeof(struct iphdr) 100 if ((*pskb)->len < sizeof(struct iphdr)
139 || ip_hdrlen(*pskb) < sizeof(struct iphdr)) { 101 || ip_hdrlen(*pskb) < sizeof(struct iphdr)) {
140 if (net_ratelimit()) 102 if (net_ratelimit())
141 printk("ipt_hook: happy cracking.\n"); 103 printk("iptable_mangle: ignoring short SOCK_RAW "
104 "packet.\n");
142 return NF_ACCEPT; 105 return NF_ACCEPT;
143 } 106 }
144 107
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index 18c3d4c9ff51..d6e503395684 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -5,6 +5,7 @@
5 */ 5 */
6#include <linux/module.h> 6#include <linux/module.h>
7#include <linux/netfilter_ipv4/ip_tables.h> 7#include <linux/netfilter_ipv4/ip_tables.h>
8#include <net/ip.h>
8 9
9#define RAW_VALID_HOOKS ((1 << NF_IP_PRE_ROUTING) | (1 << NF_IP_LOCAL_OUT)) 10#define RAW_VALID_HOOKS ((1 << NF_IP_PRE_ROUTING) | (1 << NF_IP_LOCAL_OUT))
10 11
@@ -21,62 +22,18 @@ static struct
21 .size = sizeof(struct ipt_standard) * 2 + sizeof(struct ipt_error), 22 .size = sizeof(struct ipt_standard) * 2 + sizeof(struct ipt_error),
22 .hook_entry = { 23 .hook_entry = {
23 [NF_IP_PRE_ROUTING] = 0, 24 [NF_IP_PRE_ROUTING] = 0,
24 [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) }, 25 [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard)
26 },
25 .underflow = { 27 .underflow = {
26 [NF_IP_PRE_ROUTING] = 0, 28 [NF_IP_PRE_ROUTING] = 0,
27 [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) }, 29 [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard)
30 },
28 }, 31 },
29 .entries = { 32 .entries = {
30 /* PRE_ROUTING */ 33 IPT_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
31 { 34 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
32 .entry = {
33 .target_offset = sizeof(struct ipt_entry),
34 .next_offset = sizeof(struct ipt_standard),
35 },
36 .target = {
37 .target = {
38 .u = {
39 .target_size = IPT_ALIGN(sizeof(struct ipt_standard_target)),
40 },
41 },
42 .verdict = -NF_ACCEPT - 1,
43 },
44 },
45
46 /* LOCAL_OUT */
47 {
48 .entry = {
49 .target_offset = sizeof(struct ipt_entry),
50 .next_offset = sizeof(struct ipt_standard),
51 },
52 .target = {
53 .target = {
54 .u = {
55 .target_size = IPT_ALIGN(sizeof(struct ipt_standard_target)),
56 },
57 },
58 .verdict = -NF_ACCEPT - 1,
59 },
60 },
61 }, 35 },
62 /* ERROR */ 36 .term = IPT_ERROR_INIT, /* ERROR */
63 .term = {
64 .entry = {
65 .target_offset = sizeof(struct ipt_entry),
66 .next_offset = sizeof(struct ipt_error),
67 },
68 .target = {
69 .target = {
70 .u = {
71 .user = {
72 .target_size = IPT_ALIGN(sizeof(struct ipt_error_target)),
73 .name = IPT_ERROR_TARGET,
74 },
75 },
76 },
77 .errorname = "ERROR",
78 },
79 }
80}; 37};
81 38
82static struct xt_table packet_raw = { 39static struct xt_table packet_raw = {
@@ -98,6 +55,24 @@ ipt_hook(unsigned int hook,
98 return ipt_do_table(pskb, hook, in, out, &packet_raw); 55 return ipt_do_table(pskb, hook, in, out, &packet_raw);
99} 56}
100 57
58static unsigned int
59ipt_local_hook(unsigned int hook,
60 struct sk_buff **pskb,
61 const struct net_device *in,
62 const struct net_device *out,
63 int (*okfn)(struct sk_buff *))
64{
65 /* root is playing with raw sockets. */
66 if ((*pskb)->len < sizeof(struct iphdr) ||
67 ip_hdrlen(*pskb) < sizeof(struct iphdr)) {
68 if (net_ratelimit())
69 printk("iptable_raw: ignoring short SOCK_RAW"
70 "packet.\n");
71 return NF_ACCEPT;
72 }
73 return ipt_do_table(pskb, hook, in, out, &packet_raw);
74}
75
101/* 'raw' is the very first table. */ 76/* 'raw' is the very first table. */
102static struct nf_hook_ops ipt_ops[] = { 77static struct nf_hook_ops ipt_ops[] = {
103 { 78 {
@@ -108,7 +83,7 @@ static struct nf_hook_ops ipt_ops[] = {
108 .owner = THIS_MODULE, 83 .owner = THIS_MODULE,
109 }, 84 },
110 { 85 {
111 .hook = ipt_hook, 86 .hook = ipt_local_hook,
112 .pf = PF_INET, 87 .pf = PF_INET,
113 .hooknum = NF_IP_LOCAL_OUT, 88 .hooknum = NF_IP_LOCAL_OUT,
114 .priority = NF_IP_PRI_RAW, 89 .priority = NF_IP_PRI_RAW,
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
index 2534f718ab92..6740736c5e79 100644
--- a/net/ipv4/netfilter/nf_nat_rule.c
+++ b/net/ipv4/netfilter/nf_nat_rule.c
@@ -46,77 +46,20 @@ static struct
46 .hook_entry = { 46 .hook_entry = {
47 [NF_IP_PRE_ROUTING] = 0, 47 [NF_IP_PRE_ROUTING] = 0,
48 [NF_IP_POST_ROUTING] = sizeof(struct ipt_standard), 48 [NF_IP_POST_ROUTING] = sizeof(struct ipt_standard),
49 [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 2 }, 49 [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 2
50 },
50 .underflow = { 51 .underflow = {
51 [NF_IP_PRE_ROUTING] = 0, 52 [NF_IP_PRE_ROUTING] = 0,
52 [NF_IP_POST_ROUTING] = sizeof(struct ipt_standard), 53 [NF_IP_POST_ROUTING] = sizeof(struct ipt_standard),
53 [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 2 }, 54 [NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 2
55 },
54 }, 56 },
55 .entries = { 57 .entries = {
56 /* PRE_ROUTING */ 58 IPT_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
57 { 59 IPT_STANDARD_INIT(NF_ACCEPT), /* POST_ROUTING */
58 .entry = { 60 IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
59 .target_offset = sizeof(struct ipt_entry),
60 .next_offset = sizeof(struct ipt_standard),
61 },
62 .target = {
63 .target = {
64 .u = {
65 .target_size = IPT_ALIGN(sizeof(struct ipt_standard_target)),
66 },
67 },
68 .verdict = -NF_ACCEPT - 1,
69 },
70 },
71 /* POST_ROUTING */
72 {
73 .entry = {
74 .target_offset = sizeof(struct ipt_entry),
75 .next_offset = sizeof(struct ipt_standard),
76 },
77 .target = {
78 .target = {
79 .u = {
80 .target_size = IPT_ALIGN(sizeof(struct ipt_standard_target)),
81 },
82 },
83 .verdict = -NF_ACCEPT - 1,
84 },
85 },
86 /* LOCAL_OUT */
87 {
88 .entry = {
89 .target_offset = sizeof(struct ipt_entry),
90 .next_offset = sizeof(struct ipt_standard),
91 },
92 .target = {
93 .target = {
94 .u = {
95 .target_size = IPT_ALIGN(sizeof(struct ipt_standard_target)),
96 },
97 },
98 .verdict = -NF_ACCEPT - 1,
99 },
100 },
101 }, 61 },
102 /* ERROR */ 62 .term = IPT_ERROR_INIT, /* ERROR */
103 .term = {
104 .entry = {
105 .target_offset = sizeof(struct ipt_entry),
106 .next_offset = sizeof(struct ipt_error),
107 },
108 .target = {
109 .target = {
110 .u = {
111 .user = {
112 .target_size = IPT_ALIGN(sizeof(struct ipt_error_target)),
113 .name = IPT_ERROR_TARGET,
114 },
115 },
116 },
117 .errorname = "ERROR",
118 },
119 }
120}; 63};
121 64
122static struct xt_table nat_table = { 65static struct xt_table nat_table = {
@@ -230,9 +173,7 @@ static int ipt_dnat_checkentry(const char *tablename,
230} 173}
231 174
232inline unsigned int 175inline unsigned int
233alloc_null_binding(struct nf_conn *ct, 176alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
234 struct nf_nat_info *info,
235 unsigned int hooknum)
236{ 177{
237 /* Force range to this IP; let proto decide mapping for 178 /* Force range to this IP; let proto decide mapping for
238 per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED). 179 per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
@@ -251,9 +192,7 @@ alloc_null_binding(struct nf_conn *ct,
251} 192}
252 193
253unsigned int 194unsigned int
254alloc_null_binding_confirmed(struct nf_conn *ct, 195alloc_null_binding_confirmed(struct nf_conn *ct, unsigned int hooknum)
255 struct nf_nat_info *info,
256 unsigned int hooknum)
257{ 196{
258 __be32 ip 197 __be32 ip
259 = (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC 198 = (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC
@@ -275,8 +214,7 @@ int nf_nat_rule_find(struct sk_buff **pskb,
275 unsigned int hooknum, 214 unsigned int hooknum,
276 const struct net_device *in, 215 const struct net_device *in,
277 const struct net_device *out, 216 const struct net_device *out,
278 struct nf_conn *ct, 217 struct nf_conn *ct)
279 struct nf_nat_info *info)
280{ 218{
281 int ret; 219 int ret;
282 220
@@ -285,7 +223,7 @@ int nf_nat_rule_find(struct sk_buff **pskb,
285 if (ret == NF_ACCEPT) { 223 if (ret == NF_ACCEPT) {
286 if (!nf_nat_initialized(ct, HOOK2MANIP(hooknum))) 224 if (!nf_nat_initialized(ct, HOOK2MANIP(hooknum)))
287 /* NUL mapping */ 225 /* NUL mapping */
288 ret = alloc_null_binding(ct, info, hooknum); 226 ret = alloc_null_binding(ct, hooknum);
289 } 227 }
290 return ret; 228 return ret;
291} 229}
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c
index 64bbed2ba780..55dac36dbc85 100644
--- a/net/ipv4/netfilter/nf_nat_standalone.c
+++ b/net/ipv4/netfilter/nf_nat_standalone.c
@@ -80,7 +80,6 @@ nf_nat_fn(unsigned int hooknum,
80 struct nf_conn *ct; 80 struct nf_conn *ct;
81 enum ip_conntrack_info ctinfo; 81 enum ip_conntrack_info ctinfo;
82 struct nf_conn_nat *nat; 82 struct nf_conn_nat *nat;
83 struct nf_nat_info *info;
84 /* maniptype == SRC for postrouting. */ 83 /* maniptype == SRC for postrouting. */
85 enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum); 84 enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum);
86 85
@@ -129,7 +128,6 @@ nf_nat_fn(unsigned int hooknum,
129 } 128 }
130 /* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */ 129 /* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */
131 case IP_CT_NEW: 130 case IP_CT_NEW:
132 info = &nat->info;
133 131
134 /* Seen it before? This can happen for loopback, retrans, 132 /* Seen it before? This can happen for loopback, retrans,
135 or local packets.. */ 133 or local packets.. */
@@ -138,14 +136,13 @@ nf_nat_fn(unsigned int hooknum,
138 136
139 if (unlikely(nf_ct_is_confirmed(ct))) 137 if (unlikely(nf_ct_is_confirmed(ct)))
140 /* NAT module was loaded late */ 138 /* NAT module was loaded late */
141 ret = alloc_null_binding_confirmed(ct, info, 139 ret = alloc_null_binding_confirmed(ct, hooknum);
142 hooknum);
143 else if (hooknum == NF_IP_LOCAL_IN) 140 else if (hooknum == NF_IP_LOCAL_IN)
144 /* LOCAL_IN hook doesn't have a chain! */ 141 /* LOCAL_IN hook doesn't have a chain! */
145 ret = alloc_null_binding(ct, info, hooknum); 142 ret = alloc_null_binding(ct, hooknum);
146 else 143 else
147 ret = nf_nat_rule_find(pskb, hooknum, in, out, 144 ret = nf_nat_rule_find(pskb, hooknum, in, out,
148 ct, info); 145 ct);
149 146
150 if (ret != NF_ACCEPT) { 147 if (ret != NF_ACCEPT) {
151 return ret; 148 return ret;
@@ -160,10 +157,8 @@ nf_nat_fn(unsigned int hooknum,
160 /* ESTABLISHED */ 157 /* ESTABLISHED */
161 NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED || 158 NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
162 ctinfo == (IP_CT_ESTABLISHED+IP_CT_IS_REPLY)); 159 ctinfo == (IP_CT_ESTABLISHED+IP_CT_IS_REPLY));
163 info = &nat->info;
164 } 160 }
165 161
166 NF_CT_ASSERT(info);
167 return nf_nat_packet(ct, ctinfo, hooknum, pskb); 162 return nf_nat_packet(ct, ctinfo, hooknum, pskb);
168} 163}
169 164
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 66026df1cc76..4c7e95fa090d 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -118,15 +118,15 @@ static int udp_port_rover;
118 * Note about this hash function : 118 * Note about this hash function :
119 * Typical use is probably daddr = 0, only dport is going to vary hash 119 * Typical use is probably daddr = 0, only dport is going to vary hash
120 */ 120 */
121static inline unsigned int hash_port_and_addr(__u16 port, __be32 addr) 121static inline unsigned int udp_hash_port(__u16 port)
122{ 122{
123 addr ^= addr >> 16; 123 return port;
124 addr ^= addr >> 8;
125 return port ^ addr;
126} 124}
127 125
128static inline int __udp_lib_port_inuse(unsigned int hash, int port, 126static inline int __udp_lib_port_inuse(unsigned int hash, int port,
129 __be32 daddr, struct hlist_head udptable[]) 127 const struct sock *this_sk,
128 struct hlist_head udptable[],
129 const struct udp_get_port_ops *ops)
130{ 130{
131 struct sock *sk; 131 struct sock *sk;
132 struct hlist_node *node; 132 struct hlist_node *node;
@@ -138,7 +138,10 @@ static inline int __udp_lib_port_inuse(unsigned int hash, int port,
138 inet = inet_sk(sk); 138 inet = inet_sk(sk);
139 if (inet->num != port) 139 if (inet->num != port)
140 continue; 140 continue;
141 if (inet->rcv_saddr == daddr) 141 if (this_sk) {
142 if (ops->saddr_cmp(sk, this_sk))
143 return 1;
144 } else if (ops->saddr_any(sk))
142 return 1; 145 return 1;
143 } 146 }
144 return 0; 147 return 0;
@@ -151,12 +154,11 @@ static inline int __udp_lib_port_inuse(unsigned int hash, int port,
151 * @snum: port number to look up 154 * @snum: port number to look up
152 * @udptable: hash list table, must be of UDP_HTABLE_SIZE 155 * @udptable: hash list table, must be of UDP_HTABLE_SIZE
153 * @port_rover: pointer to record of last unallocated port 156 * @port_rover: pointer to record of last unallocated port
154 * @saddr_comp: AF-dependent comparison of bound local IP addresses 157 * @ops: AF-dependent address operations
155 */ 158 */
156int __udp_lib_get_port(struct sock *sk, unsigned short snum, 159int __udp_lib_get_port(struct sock *sk, unsigned short snum,
157 struct hlist_head udptable[], int *port_rover, 160 struct hlist_head udptable[], int *port_rover,
158 int (*saddr_comp)(const struct sock *sk1, 161 const struct udp_get_port_ops *ops)
159 const struct sock *sk2 ) )
160{ 162{
161 struct hlist_node *node; 163 struct hlist_node *node;
162 struct hlist_head *head; 164 struct hlist_head *head;
@@ -176,8 +178,7 @@ int __udp_lib_get_port(struct sock *sk, unsigned short snum,
176 for (i = 0; i < UDP_HTABLE_SIZE; i++, result++) { 178 for (i = 0; i < UDP_HTABLE_SIZE; i++, result++) {
177 int size; 179 int size;
178 180
179 hash = hash_port_and_addr(result, 181 hash = ops->hash_port_and_rcv_saddr(result, sk);
180 inet_sk(sk)->rcv_saddr);
181 head = &udptable[hash & (UDP_HTABLE_SIZE - 1)]; 182 head = &udptable[hash & (UDP_HTABLE_SIZE - 1)];
182 if (hlist_empty(head)) { 183 if (hlist_empty(head)) {
183 if (result > sysctl_local_port_range[1]) 184 if (result > sysctl_local_port_range[1])
@@ -203,17 +204,16 @@ int __udp_lib_get_port(struct sock *sk, unsigned short snum,
203 result = sysctl_local_port_range[0] 204 result = sysctl_local_port_range[0]
204 + ((result - sysctl_local_port_range[0]) & 205 + ((result - sysctl_local_port_range[0]) &
205 (UDP_HTABLE_SIZE - 1)); 206 (UDP_HTABLE_SIZE - 1));
206 hash = hash_port_and_addr(result, 0); 207 hash = udp_hash_port(result);
207 if (__udp_lib_port_inuse(hash, result, 208 if (__udp_lib_port_inuse(hash, result,
208 0, udptable)) 209 NULL, udptable, ops))
209 continue; 210 continue;
210 if (!inet_sk(sk)->rcv_saddr) 211 if (ops->saddr_any(sk))
211 break; 212 break;
212 213
213 hash = hash_port_and_addr(result, 214 hash = ops->hash_port_and_rcv_saddr(result, sk);
214 inet_sk(sk)->rcv_saddr);
215 if (! __udp_lib_port_inuse(hash, result, 215 if (! __udp_lib_port_inuse(hash, result,
216 inet_sk(sk)->rcv_saddr, udptable)) 216 sk, udptable, ops))
217 break; 217 break;
218 } 218 }
219 if (i >= (1 << 16) / UDP_HTABLE_SIZE) 219 if (i >= (1 << 16) / UDP_HTABLE_SIZE)
@@ -221,7 +221,7 @@ int __udp_lib_get_port(struct sock *sk, unsigned short snum,
221gotit: 221gotit:
222 *port_rover = snum = result; 222 *port_rover = snum = result;
223 } else { 223 } else {
224 hash = hash_port_and_addr(snum, 0); 224 hash = udp_hash_port(snum);
225 head = &udptable[hash & (UDP_HTABLE_SIZE - 1)]; 225 head = &udptable[hash & (UDP_HTABLE_SIZE - 1)];
226 226
227 sk_for_each(sk2, node, head) 227 sk_for_each(sk2, node, head)
@@ -231,12 +231,11 @@ gotit:
231 (!sk2->sk_reuse || !sk->sk_reuse) && 231 (!sk2->sk_reuse || !sk->sk_reuse) &&
232 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || 232 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
233 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && 233 sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
234 (*saddr_comp)(sk, sk2)) 234 ops->saddr_cmp(sk, sk2))
235 goto fail; 235 goto fail;
236 236
237 if (inet_sk(sk)->rcv_saddr) { 237 if (!ops->saddr_any(sk)) {
238 hash = hash_port_and_addr(snum, 238 hash = ops->hash_port_and_rcv_saddr(snum, sk);
239 inet_sk(sk)->rcv_saddr);
240 head = &udptable[hash & (UDP_HTABLE_SIZE - 1)]; 239 head = &udptable[hash & (UDP_HTABLE_SIZE - 1)];
241 240
242 sk_for_each(sk2, node, head) 241 sk_for_each(sk2, node, head)
@@ -248,7 +247,7 @@ gotit:
248 !sk->sk_bound_dev_if || 247 !sk->sk_bound_dev_if ||
249 sk2->sk_bound_dev_if == 248 sk2->sk_bound_dev_if ==
250 sk->sk_bound_dev_if) && 249 sk->sk_bound_dev_if) &&
251 (*saddr_comp)(sk, sk2)) 250 ops->saddr_cmp(sk, sk2))
252 goto fail; 251 goto fail;
253 } 252 }
254 } 253 }
@@ -266,12 +265,12 @@ fail:
266} 265}
267 266
268int udp_get_port(struct sock *sk, unsigned short snum, 267int udp_get_port(struct sock *sk, unsigned short snum,
269 int (*scmp)(const struct sock *, const struct sock *)) 268 const struct udp_get_port_ops *ops)
270{ 269{
271 return __udp_lib_get_port(sk, snum, udp_hash, &udp_port_rover, scmp); 270 return __udp_lib_get_port(sk, snum, udp_hash, &udp_port_rover, ops);
272} 271}
273 272
274int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2) 273static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
275{ 274{
276 struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2); 275 struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
277 276
@@ -280,9 +279,33 @@ int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
280 inet1->rcv_saddr == inet2->rcv_saddr )); 279 inet1->rcv_saddr == inet2->rcv_saddr ));
281} 280}
282 281
282static int ipv4_rcv_saddr_any(const struct sock *sk)
283{
284 return !inet_sk(sk)->rcv_saddr;
285}
286
287static inline unsigned int ipv4_hash_port_and_addr(__u16 port, __be32 addr)
288{
289 addr ^= addr >> 16;
290 addr ^= addr >> 8;
291 return port ^ addr;
292}
293
294static unsigned int ipv4_hash_port_and_rcv_saddr(__u16 port,
295 const struct sock *sk)
296{
297 return ipv4_hash_port_and_addr(port, inet_sk(sk)->rcv_saddr);
298}
299
300const struct udp_get_port_ops udp_ipv4_ops = {
301 .saddr_cmp = ipv4_rcv_saddr_equal,
302 .saddr_any = ipv4_rcv_saddr_any,
303 .hash_port_and_rcv_saddr = ipv4_hash_port_and_rcv_saddr,
304};
305
283static inline int udp_v4_get_port(struct sock *sk, unsigned short snum) 306static inline int udp_v4_get_port(struct sock *sk, unsigned short snum)
284{ 307{
285 return udp_get_port(sk, snum, ipv4_rcv_saddr_equal); 308 return udp_get_port(sk, snum, &udp_ipv4_ops);
286} 309}
287 310
288/* UDP is nearly always wildcards out the wazoo, it makes no sense to try 311/* UDP is nearly always wildcards out the wazoo, it makes no sense to try
@@ -297,8 +320,8 @@ static struct sock *__udp4_lib_lookup(__be32 saddr, __be16 sport,
297 unsigned int hash, hashwild; 320 unsigned int hash, hashwild;
298 int score, best = -1, hport = ntohs(dport); 321 int score, best = -1, hport = ntohs(dport);
299 322
300 hash = hash_port_and_addr(hport, daddr); 323 hash = ipv4_hash_port_and_addr(hport, daddr);
301 hashwild = hash_port_and_addr(hport, 0); 324 hashwild = udp_hash_port(hport);
302 325
303 read_lock(&udp_hash_lock); 326 read_lock(&udp_hash_lock);
304 327
@@ -1198,8 +1221,8 @@ static int __udp4_lib_mcast_deliver(struct sk_buff *skb,
1198 struct sock *sk, *skw, *sknext; 1221 struct sock *sk, *skw, *sknext;
1199 int dif; 1222 int dif;
1200 int hport = ntohs(uh->dest); 1223 int hport = ntohs(uh->dest);
1201 unsigned int hash = hash_port_and_addr(hport, daddr); 1224 unsigned int hash = ipv4_hash_port_and_addr(hport, daddr);
1202 unsigned int hashwild = hash_port_and_addr(hport, 0); 1225 unsigned int hashwild = udp_hash_port(hport);
1203 1226
1204 dif = skb->dev->ifindex; 1227 dif = skb->dev->ifindex;
1205 1228
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h
index 820a477cfaa6..06d94195e644 100644
--- a/net/ipv4/udp_impl.h
+++ b/net/ipv4/udp_impl.h
@@ -5,14 +5,14 @@
5#include <net/protocol.h> 5#include <net/protocol.h>
6#include <net/inet_common.h> 6#include <net/inet_common.h>
7 7
8extern const struct udp_get_port_ops udp_ipv4_ops;
9
8extern int __udp4_lib_rcv(struct sk_buff *, struct hlist_head [], int ); 10extern int __udp4_lib_rcv(struct sk_buff *, struct hlist_head [], int );
9extern void __udp4_lib_err(struct sk_buff *, u32, struct hlist_head []); 11extern void __udp4_lib_err(struct sk_buff *, u32, struct hlist_head []);
10 12
11extern int __udp_lib_get_port(struct sock *sk, unsigned short snum, 13extern int __udp_lib_get_port(struct sock *sk, unsigned short snum,
12 struct hlist_head udptable[], int *port_rover, 14 struct hlist_head udptable[], int *port_rover,
13 int (*)(const struct sock*,const struct sock*)); 15 const struct udp_get_port_ops *ops);
14extern int ipv4_rcv_saddr_equal(const struct sock *, const struct sock *);
15
16 16
17extern int udp_setsockopt(struct sock *sk, int level, int optname, 17extern int udp_setsockopt(struct sock *sk, int level, int optname,
18 char __user *optval, int optlen); 18 char __user *optval, int optlen);
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index f34fd686a8f1..3653b32dce2d 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -19,14 +19,15 @@ struct hlist_head udplite_hash[UDP_HTABLE_SIZE];
19static int udplite_port_rover; 19static int udplite_port_rover;
20 20
21int udplite_get_port(struct sock *sk, unsigned short p, 21int udplite_get_port(struct sock *sk, unsigned short p,
22 int (*c)(const struct sock *, const struct sock *)) 22 const struct udp_get_port_ops *ops)
23{ 23{
24 return __udp_lib_get_port(sk, p, udplite_hash, &udplite_port_rover, c); 24 return __udp_lib_get_port(sk, p, udplite_hash,
25 &udplite_port_rover, ops);
25} 26}
26 27
27static int udplite_v4_get_port(struct sock *sk, unsigned short snum) 28static int udplite_v4_get_port(struct sock *sk, unsigned short snum)
28{ 29{
29 return udplite_get_port(sk, snum, ipv4_rcv_saddr_equal); 30 return udplite_get_port(sk, snum, &udp_ipv4_ops);
30} 31}
31 32
32static int udplite_rcv(struct sk_buff *skb) 33static int udplite_rcv(struct sk_buff *skb)
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index d02685c6bc69..c7ea248fae2e 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -4204,6 +4204,10 @@ int __init addrconf_init(void)
4204 return err; 4204 return err;
4205 4205
4206 ip6_null_entry.rt6i_idev = in6_dev_get(&loopback_dev); 4206 ip6_null_entry.rt6i_idev = in6_dev_get(&loopback_dev);
4207#ifdef CONFIG_IPV6_MULTIPLE_TABLES
4208 ip6_prohibit_entry.rt6i_idev = in6_dev_get(&loopback_dev);
4209 ip6_blk_hole_entry.rt6i_idev = in6_dev_get(&loopback_dev);
4210#endif
4207 4211
4208 register_netdevice_notifier(&ipv6_dev_notf); 4212 register_netdevice_notifier(&ipv6_dev_notf);
4209 4213
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 6d8e4ac7bdad..14be0b9b77a5 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -660,6 +660,14 @@ EXPORT_SYMBOL_GPL(ipv6_invert_rthdr);
660 Hop-by-hop options. 660 Hop-by-hop options.
661 **********************************/ 661 **********************************/
662 662
663/*
664 * Note: we cannot rely on skb->dst before we assign it in ip6_route_input().
665 */
666static inline struct inet6_dev *ipv6_skb_idev(struct sk_buff *skb)
667{
668 return skb->dst ? ip6_dst_idev(skb->dst) : __in6_dev_get(skb->dev);
669}
670
663/* Router Alert as of RFC 2711 */ 671/* Router Alert as of RFC 2711 */
664 672
665static int ipv6_hop_ra(struct sk_buff **skbp, int optoff) 673static int ipv6_hop_ra(struct sk_buff **skbp, int optoff)
@@ -688,25 +696,25 @@ static int ipv6_hop_jumbo(struct sk_buff **skbp, int optoff)
688 if (nh[optoff + 1] != 4 || (optoff & 3) != 2) { 696 if (nh[optoff + 1] != 4 || (optoff & 3) != 2) {
689 LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n", 697 LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
690 nh[optoff+1]); 698 nh[optoff+1]);
691 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), 699 IP6_INC_STATS_BH(ipv6_skb_idev(skb),
692 IPSTATS_MIB_INHDRERRORS); 700 IPSTATS_MIB_INHDRERRORS);
693 goto drop; 701 goto drop;
694 } 702 }
695 703
696 pkt_len = ntohl(*(__be32 *)(nh + optoff + 2)); 704 pkt_len = ntohl(*(__be32 *)(nh + optoff + 2));
697 if (pkt_len <= IPV6_MAXPLEN) { 705 if (pkt_len <= IPV6_MAXPLEN) {
698 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS); 706 IP6_INC_STATS_BH(ipv6_skb_idev(skb), IPSTATS_MIB_INHDRERRORS);
699 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2); 707 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2);
700 return 0; 708 return 0;
701 } 709 }
702 if (ipv6_hdr(skb)->payload_len) { 710 if (ipv6_hdr(skb)->payload_len) {
703 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS); 711 IP6_INC_STATS_BH(ipv6_skb_idev(skb), IPSTATS_MIB_INHDRERRORS);
704 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff); 712 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff);
705 return 0; 713 return 0;
706 } 714 }
707 715
708 if (pkt_len > skb->len - sizeof(struct ipv6hdr)) { 716 if (pkt_len > skb->len - sizeof(struct ipv6hdr)) {
709 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INTRUNCATEDPKTS); 717 IP6_INC_STATS_BH(ipv6_skb_idev(skb), IPSTATS_MIB_INTRUNCATEDPKTS);
710 goto drop; 718 goto drop;
711 } 719 }
712 720
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index f508171bab73..4704b5fc3085 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -463,10 +463,17 @@ int ip6_forward(struct sk_buff *skb)
463 */ 463 */
464 if (xrlim_allow(dst, 1*HZ)) 464 if (xrlim_allow(dst, 1*HZ))
465 ndisc_send_redirect(skb, n, target); 465 ndisc_send_redirect(skb, n, target);
466 } else if (ipv6_addr_type(&hdr->saddr)&(IPV6_ADDR_MULTICAST|IPV6_ADDR_LOOPBACK 466 } else {
467 |IPV6_ADDR_LINKLOCAL)) { 467 int addrtype = ipv6_addr_type(&hdr->saddr);
468
468 /* This check is security critical. */ 469 /* This check is security critical. */
469 goto error; 470 if (addrtype & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LOOPBACK))
471 goto error;
472 if (addrtype & IPV6_ADDR_LINKLOCAL) {
473 icmpv6_send(skb, ICMPV6_DEST_UNREACH,
474 ICMPV6_NOT_NEIGHBOUR, 0, skb->dev);
475 goto error;
476 }
470 } 477 }
471 478
472 if (skb->len > dst_mtu(dst)) { 479 if (skb->len > dst_mtu(dst)) {
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c
index 76f0cf66f95c..7e32e2aaf7f7 100644
--- a/net/ipv6/netfilter/ip6table_filter.c
+++ b/net/ipv6/netfilter/ip6table_filter.c
@@ -24,53 +24,29 @@ static struct
24 struct ip6t_replace repl; 24 struct ip6t_replace repl;
25 struct ip6t_standard entries[3]; 25 struct ip6t_standard entries[3];
26 struct ip6t_error term; 26 struct ip6t_error term;
27} initial_table __initdata 27} initial_table __initdata = {
28= { { "filter", FILTER_VALID_HOOKS, 4, 28 .repl = {
29 sizeof(struct ip6t_standard) * 3 + sizeof(struct ip6t_error), 29 .name = "filter",
30 { [NF_IP6_LOCAL_IN] = 0, 30 .valid_hooks = FILTER_VALID_HOOKS,
31 [NF_IP6_FORWARD] = sizeof(struct ip6t_standard), 31 .num_entries = 4,
32 [NF_IP6_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2 }, 32 .size = sizeof(struct ip6t_standard) * 3 + sizeof(struct ip6t_error),
33 { [NF_IP6_LOCAL_IN] = 0, 33 .hook_entry = {
34 [NF_IP6_FORWARD] = sizeof(struct ip6t_standard), 34 [NF_IP6_LOCAL_IN] = 0,
35 [NF_IP6_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2 }, 35 [NF_IP6_FORWARD] = sizeof(struct ip6t_standard),
36 0, NULL, { } }, 36 [NF_IP6_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2
37 { 37 },
38 /* LOCAL_IN */ 38 .underflow = {
39 { { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 }, 39 [NF_IP6_LOCAL_IN] = 0,
40 0, 40 [NF_IP6_FORWARD] = sizeof(struct ip6t_standard),
41 sizeof(struct ip6t_entry), 41 [NF_IP6_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2
42 sizeof(struct ip6t_standard), 42 },
43 0, { 0, 0 }, { } }, 43 },
44 { { { { IP6T_ALIGN(sizeof(struct ip6t_standard_target)), "" } }, { } }, 44 .entries = {
45 -NF_ACCEPT - 1 } }, 45 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
46 /* FORWARD */ 46 IP6T_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
47 { { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 }, 47 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
48 0, 48 },
49 sizeof(struct ip6t_entry), 49 .term = IP6T_ERROR_INIT, /* ERROR */
50 sizeof(struct ip6t_standard),
51 0, { 0, 0 }, { } },
52 { { { { IP6T_ALIGN(sizeof(struct ip6t_standard_target)), "" } }, { } },
53 -NF_ACCEPT - 1 } },
54 /* LOCAL_OUT */
55 { { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 },
56 0,
57 sizeof(struct ip6t_entry),
58 sizeof(struct ip6t_standard),
59 0, { 0, 0 }, { } },
60 { { { { IP6T_ALIGN(sizeof(struct ip6t_standard_target)), "" } }, { } },
61 -NF_ACCEPT - 1 } }
62 },
63 /* ERROR */
64 { { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 },
65 0,
66 sizeof(struct ip6t_entry),
67 sizeof(struct ip6t_error),
68 0, { 0, 0 }, { } },
69 { { { { IP6T_ALIGN(sizeof(struct ip6t_error_target)), IP6T_ERROR_TARGET } },
70 { } },
71 "ERROR"
72 }
73 }
74}; 50};
75 51
76static struct xt_table packet_filter = { 52static struct xt_table packet_filter = {
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index a9f10e32c163..f2d26495f413 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -32,73 +32,35 @@ static struct
32 struct ip6t_replace repl; 32 struct ip6t_replace repl;
33 struct ip6t_standard entries[5]; 33 struct ip6t_standard entries[5];
34 struct ip6t_error term; 34 struct ip6t_error term;
35} initial_table __initdata 35} initial_table __initdata = {
36= { { "mangle", MANGLE_VALID_HOOKS, 6, 36 .repl = {
37 sizeof(struct ip6t_standard) * 5 + sizeof(struct ip6t_error), 37 .name = "mangle",
38 { [NF_IP6_PRE_ROUTING] = 0, 38 .valid_hooks = MANGLE_VALID_HOOKS,
39 [NF_IP6_LOCAL_IN] = sizeof(struct ip6t_standard), 39 .num_entries = 6,
40 [NF_IP6_FORWARD] = sizeof(struct ip6t_standard) * 2, 40 .size = sizeof(struct ip6t_standard) * 5 + sizeof(struct ip6t_error),
41 [NF_IP6_LOCAL_OUT] = sizeof(struct ip6t_standard) * 3, 41 .hook_entry = {
42 [NF_IP6_POST_ROUTING] = sizeof(struct ip6t_standard) * 4}, 42 [NF_IP6_PRE_ROUTING] = 0,
43 { [NF_IP6_PRE_ROUTING] = 0, 43 [NF_IP6_LOCAL_IN] = sizeof(struct ip6t_standard),
44 [NF_IP6_LOCAL_IN] = sizeof(struct ip6t_standard), 44 [NF_IP6_FORWARD] = sizeof(struct ip6t_standard) * 2,
45 [NF_IP6_FORWARD] = sizeof(struct ip6t_standard) * 2, 45 [NF_IP6_LOCAL_OUT] = sizeof(struct ip6t_standard) * 3,
46 [NF_IP6_LOCAL_OUT] = sizeof(struct ip6t_standard) * 3, 46 [NF_IP6_POST_ROUTING] = sizeof(struct ip6t_standard) * 4,
47 [NF_IP6_POST_ROUTING] = sizeof(struct ip6t_standard) * 4}, 47 },
48 0, NULL, { } }, 48 .underflow = {
49 { 49 [NF_IP6_PRE_ROUTING] = 0,
50 /* PRE_ROUTING */ 50 [NF_IP6_LOCAL_IN] = sizeof(struct ip6t_standard),
51 { { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 }, 51 [NF_IP6_FORWARD] = sizeof(struct ip6t_standard) * 2,
52 0, 52 [NF_IP6_LOCAL_OUT] = sizeof(struct ip6t_standard) * 3,
53 sizeof(struct ip6t_entry), 53 [NF_IP6_POST_ROUTING] = sizeof(struct ip6t_standard) * 4,
54 sizeof(struct ip6t_standard), 54 },
55 0, { 0, 0 }, { } }, 55 },
56 { { { { IP6T_ALIGN(sizeof(struct ip6t_standard_target)), "" } }, { } }, 56 .entries = {
57 -NF_ACCEPT - 1 } }, 57 IP6T_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
58 /* LOCAL_IN */ 58 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
59 { { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 }, 59 IP6T_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
60 0, 60 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
61 sizeof(struct ip6t_entry), 61 IP6T_STANDARD_INIT(NF_ACCEPT), /* POST_ROUTING */
62 sizeof(struct ip6t_standard), 62 },
63 0, { 0, 0 }, { } }, 63 .term = IP6T_ERROR_INIT, /* ERROR */
64 { { { { IP6T_ALIGN(sizeof(struct ip6t_standard_target)), "" } }, { } },
65 -NF_ACCEPT - 1 } },
66 /* FORWARD */
67 { { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 },
68 0,
69 sizeof(struct ip6t_entry),
70 sizeof(struct ip6t_standard),
71 0, { 0, 0 }, { } },
72 { { { { IP6T_ALIGN(sizeof(struct ip6t_standard_target)), "" } }, { } },
73 -NF_ACCEPT - 1 } },
74 /* LOCAL_OUT */
75 { { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 },
76 0,
77 sizeof(struct ip6t_entry),
78 sizeof(struct ip6t_standard),
79 0, { 0, 0 }, { } },
80 { { { { IP6T_ALIGN(sizeof(struct ip6t_standard_target)), "" } }, { } },
81 -NF_ACCEPT - 1 } },
82 /* POST_ROUTING */
83 { { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 },
84 0,
85 sizeof(struct ip6t_entry),
86 sizeof(struct ip6t_standard),
87 0, { 0, 0 }, { } },
88 { { { { IP6T_ALIGN(sizeof(struct ip6t_standard_target)), "" } }, { } },
89 -NF_ACCEPT - 1 } }
90 },
91 /* ERROR */
92 { { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 },
93 0,
94 sizeof(struct ip6t_entry),
95 sizeof(struct ip6t_error),
96 0, { 0, 0 }, { } },
97 { { { { IP6T_ALIGN(sizeof(struct ip6t_error_target)), IP6T_ERROR_TARGET } },
98 { } },
99 "ERROR"
100 }
101 }
102}; 64};
103 65
104static struct xt_table packet_mangler = { 66static struct xt_table packet_mangler = {
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c
index a3eb5b8ce18d..0acda45d455d 100644
--- a/net/ipv6/netfilter/ip6table_raw.c
+++ b/net/ipv6/netfilter/ip6table_raw.c
@@ -35,56 +35,10 @@ static struct
35 }, 35 },
36 }, 36 },
37 .entries = { 37 .entries = {
38 /* PRE_ROUTING */ 38 IP6T_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
39 { 39 IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
40 .entry = {
41 .target_offset = sizeof(struct ip6t_entry),
42 .next_offset = sizeof(struct ip6t_standard),
43 },
44 .target = {
45 .target = {
46 .u = {
47 .target_size = IP6T_ALIGN(sizeof(struct ip6t_standard_target)),
48 },
49 },
50 .verdict = -NF_ACCEPT - 1,
51 },
52 },
53
54 /* LOCAL_OUT */
55 {
56 .entry = {
57 .target_offset = sizeof(struct ip6t_entry),
58 .next_offset = sizeof(struct ip6t_standard),
59 },
60 .target = {
61 .target = {
62 .u = {
63 .target_size = IP6T_ALIGN(sizeof(struct ip6t_standard_target)),
64 },
65 },
66 .verdict = -NF_ACCEPT - 1,
67 },
68 },
69 }, 40 },
70 /* ERROR */ 41 .term = IP6T_ERROR_INIT, /* ERROR */
71 .term = {
72 .entry = {
73 .target_offset = sizeof(struct ip6t_entry),
74 .next_offset = sizeof(struct ip6t_error),
75 },
76 .target = {
77 .target = {
78 .u = {
79 .user = {
80 .target_size = IP6T_ALIGN(sizeof(struct ip6t_error_target)),
81 .name = IP6T_ERROR_TARGET,
82 },
83 },
84 },
85 .errorname = "ERROR",
86 },
87 }
88}; 42};
89 43
90static struct xt_table packet_raw = { 44static struct xt_table packet_raw = {
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index b083c09e3d2d..a7ae59c954d5 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -52,9 +52,28 @@
52 52
53DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6) __read_mostly; 53DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6) __read_mostly;
54 54
55static int ipv6_rcv_saddr_any(const struct sock *sk)
56{
57 struct ipv6_pinfo *np = inet6_sk(sk);
58
59 return ipv6_addr_any(&np->rcv_saddr);
60}
61
62static unsigned int ipv6_hash_port_and_rcv_saddr(__u16 port,
63 const struct sock *sk)
64{
65 return port;
66}
67
68const struct udp_get_port_ops udp_ipv6_ops = {
69 .saddr_cmp = ipv6_rcv_saddr_equal,
70 .saddr_any = ipv6_rcv_saddr_any,
71 .hash_port_and_rcv_saddr = ipv6_hash_port_and_rcv_saddr,
72};
73
55static inline int udp_v6_get_port(struct sock *sk, unsigned short snum) 74static inline int udp_v6_get_port(struct sock *sk, unsigned short snum)
56{ 75{
57 return udp_get_port(sk, snum, ipv6_rcv_saddr_equal); 76 return udp_get_port(sk, snum, &udp_ipv6_ops);
58} 77}
59 78
60static struct sock *__udp6_lib_lookup(struct in6_addr *saddr, __be16 sport, 79static struct sock *__udp6_lib_lookup(struct in6_addr *saddr, __be16 sport,
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
index 6e252f318f7c..36b0c11a28a3 100644
--- a/net/ipv6/udp_impl.h
+++ b/net/ipv6/udp_impl.h
@@ -6,6 +6,8 @@
6#include <net/addrconf.h> 6#include <net/addrconf.h>
7#include <net/inet_common.h> 7#include <net/inet_common.h>
8 8
9extern const struct udp_get_port_ops udp_ipv6_ops;
10
9extern int __udp6_lib_rcv(struct sk_buff **, struct hlist_head [], int ); 11extern int __udp6_lib_rcv(struct sk_buff **, struct hlist_head [], int );
10extern void __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *, 12extern void __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *,
11 int , int , int , __be32 , struct hlist_head []); 13 int , int , int , __be32 , struct hlist_head []);
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index f54016a55004..c40a51362f89 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -37,7 +37,7 @@ static struct inet6_protocol udplitev6_protocol = {
37 37
38static int udplite_v6_get_port(struct sock *sk, unsigned short snum) 38static int udplite_v6_get_port(struct sock *sk, unsigned short snum)
39{ 39{
40 return udplite_get_port(sk, snum, ipv6_rcv_saddr_equal); 40 return udplite_get_port(sk, snum, &udp_ipv6_ops);
41} 41}
42 42
43struct proto udplitev6_prot = { 43struct proto udplitev6_prot = {
diff --git a/net/mac80211/ieee80211_sta.c b/net/mac80211/ieee80211_sta.c
index 822917debeff..3e07e9d6fa42 100644
--- a/net/mac80211/ieee80211_sta.c
+++ b/net/mac80211/ieee80211_sta.c
@@ -17,6 +17,7 @@
17 * scan result table filtering (by capability (privacy, IBSS/BSS, WPA/RSN IE, 17 * scan result table filtering (by capability (privacy, IBSS/BSS, WPA/RSN IE,
18 * SSID) 18 * SSID)
19 */ 19 */
20#include <linux/delay.h>
20#include <linux/if_ether.h> 21#include <linux/if_ether.h>
21#include <linux/skbuff.h> 22#include <linux/skbuff.h>
22#include <linux/netdevice.h> 23#include <linux/netdevice.h>
@@ -27,7 +28,6 @@
27#include <linux/rtnetlink.h> 28#include <linux/rtnetlink.h>
28#include <net/iw_handler.h> 29#include <net/iw_handler.h>
29#include <asm/types.h> 30#include <asm/types.h>
30#include <asm/delay.h>
31 31
32#include <net/mac80211.h> 32#include <net/mac80211.h>
33#include "ieee80211_i.h" 33#include "ieee80211_i.h"
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index e132c8ae8784..e8b5c2d7db62 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -299,7 +299,6 @@ destroy_conntrack(struct nf_conntrack *nfct)
299{ 299{
300 struct nf_conn *ct = (struct nf_conn *)nfct; 300 struct nf_conn *ct = (struct nf_conn *)nfct;
301 struct nf_conn_help *help = nfct_help(ct); 301 struct nf_conn_help *help = nfct_help(ct);
302 struct nf_conntrack_l3proto *l3proto;
303 struct nf_conntrack_l4proto *l4proto; 302 struct nf_conntrack_l4proto *l4proto;
304 typeof(nf_conntrack_destroyed) destroyed; 303 typeof(nf_conntrack_destroyed) destroyed;
305 304
@@ -317,10 +316,6 @@ destroy_conntrack(struct nf_conntrack *nfct)
317 * destroy_conntrack() MUST NOT be called with a write lock 316 * destroy_conntrack() MUST NOT be called with a write lock
318 * to nf_conntrack_lock!!! -HW */ 317 * to nf_conntrack_lock!!! -HW */
319 rcu_read_lock(); 318 rcu_read_lock();
320 l3proto = __nf_ct_l3proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num);
321 if (l3proto && l3proto->destroy)
322 l3proto->destroy(ct);
323
324 l4proto = __nf_ct_l4proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num, 319 l4proto = __nf_ct_l4proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num,
325 ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum); 320 ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum);
326 if (l4proto && l4proto->destroy) 321 if (l4proto && l4proto->destroy)
@@ -893,8 +888,13 @@ void nf_conntrack_alter_reply(struct nf_conn *ct,
893 NF_CT_DUMP_TUPLE(newreply); 888 NF_CT_DUMP_TUPLE(newreply);
894 889
895 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply; 890 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
896 if (!ct->master && help && help->expecting == 0) 891 if (!ct->master && help && help->expecting == 0) {
897 help->helper = __nf_ct_helper_find(newreply); 892 struct nf_conntrack_helper *helper;
893 helper = __nf_ct_helper_find(newreply);
894 if (helper)
895 memset(&help->help, 0, sizeof(help->help));
896 help->helper = helper;
897 }
898 write_unlock_bh(&nf_conntrack_lock); 898 write_unlock_bh(&nf_conntrack_lock);
899} 899}
900EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply); 900EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index aa1a97ee514b..d6d39e241327 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -830,11 +830,6 @@ ctnetlink_change_helper(struct nf_conn *ct, struct nfattr *cda[])
830 char *helpname; 830 char *helpname;
831 int err; 831 int err;
832 832
833 if (!help) {
834 /* FIXME: we need to reallocate and rehash */
835 return -EBUSY;
836 }
837
838 /* don't change helper of sibling connections */ 833 /* don't change helper of sibling connections */
839 if (ct->master) 834 if (ct->master)
840 return -EINVAL; 835 return -EINVAL;
@@ -843,25 +838,34 @@ ctnetlink_change_helper(struct nf_conn *ct, struct nfattr *cda[])
843 if (err < 0) 838 if (err < 0)
844 return err; 839 return err;
845 840
846 helper = __nf_conntrack_helper_find_byname(helpname); 841 if (!strcmp(helpname, "")) {
847 if (!helper) { 842 if (help && help->helper) {
848 if (!strcmp(helpname, ""))
849 helper = NULL;
850 else
851 return -EINVAL;
852 }
853
854 if (help->helper) {
855 if (!helper) {
856 /* we had a helper before ... */ 843 /* we had a helper before ... */
857 nf_ct_remove_expectations(ct); 844 nf_ct_remove_expectations(ct);
858 help->helper = NULL; 845 help->helper = NULL;
859 } else {
860 /* need to zero data of old helper */
861 memset(&help->help, 0, sizeof(help->help));
862 } 846 }
847
848 return 0;
863 } 849 }
864 850
851 if (!help) {
852 /* FIXME: we need to reallocate and rehash */
853 return -EBUSY;
854 }
855
856 helper = __nf_conntrack_helper_find_byname(helpname);
857 if (helper == NULL)
858 return -EINVAL;
859
860 if (help->helper == helper)
861 return 0;
862
863 if (help->helper)
864 /* we had a helper before ... */
865 nf_ct_remove_expectations(ct);
866
867 /* need to zero data of old helper */
868 memset(&help->help, 0, sizeof(help->help));
865 help->helper = helper; 869 help->helper = helper;
866 870
867 return 0; 871 return 0;
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c
index f4ea8fe07a53..189ded5f378b 100644
--- a/net/netfilter/xt_conntrack.c
+++ b/net/netfilter/xt_conntrack.c
@@ -134,12 +134,66 @@ static void destroy(const struct xt_match *match, void *matchinfo)
134 nf_ct_l3proto_module_put(match->family); 134 nf_ct_l3proto_module_put(match->family);
135} 135}
136 136
137#ifdef CONFIG_COMPAT
138struct compat_xt_conntrack_info
139{
140 compat_uint_t statemask;
141 compat_uint_t statusmask;
142 struct ip_conntrack_old_tuple tuple[IP_CT_DIR_MAX];
143 struct in_addr sipmsk[IP_CT_DIR_MAX];
144 struct in_addr dipmsk[IP_CT_DIR_MAX];
145 compat_ulong_t expires_min;
146 compat_ulong_t expires_max;
147 u_int8_t flags;
148 u_int8_t invflags;
149};
150
151static void compat_from_user(void *dst, void *src)
152{
153 struct compat_xt_conntrack_info *cm = src;
154 struct xt_conntrack_info m = {
155 .statemask = cm->statemask,
156 .statusmask = cm->statusmask,
157 .expires_min = cm->expires_min,
158 .expires_max = cm->expires_max,
159 .flags = cm->flags,
160 .invflags = cm->invflags,
161 };
162 memcpy(m.tuple, cm->tuple, sizeof(m.tuple));
163 memcpy(m.sipmsk, cm->sipmsk, sizeof(m.sipmsk));
164 memcpy(m.dipmsk, cm->dipmsk, sizeof(m.dipmsk));
165 memcpy(dst, &m, sizeof(m));
166}
167
168static int compat_to_user(void __user *dst, void *src)
169{
170 struct xt_conntrack_info *m = src;
171 struct compat_xt_conntrack_info cm = {
172 .statemask = m->statemask,
173 .statusmask = m->statusmask,
174 .expires_min = m->expires_min,
175 .expires_max = m->expires_max,
176 .flags = m->flags,
177 .invflags = m->invflags,
178 };
179 memcpy(cm.tuple, m->tuple, sizeof(cm.tuple));
180 memcpy(cm.sipmsk, m->sipmsk, sizeof(cm.sipmsk));
181 memcpy(cm.dipmsk, m->dipmsk, sizeof(cm.dipmsk));
182 return copy_to_user(dst, &cm, sizeof(cm)) ? -EFAULT : 0;
183}
184#endif
185
137static struct xt_match conntrack_match = { 186static struct xt_match conntrack_match = {
138 .name = "conntrack", 187 .name = "conntrack",
139 .match = match, 188 .match = match,
140 .checkentry = checkentry, 189 .checkentry = checkentry,
141 .destroy = destroy, 190 .destroy = destroy,
142 .matchsize = sizeof(struct xt_conntrack_info), 191 .matchsize = sizeof(struct xt_conntrack_info),
192#ifdef CONFIG_COMPAT
193 .compatsize = sizeof(struct compat_xt_conntrack_info),
194 .compat_from_user = compat_from_user,
195 .compat_to_user = compat_to_user,
196#endif
143 .family = AF_INET, 197 .family = AF_INET,
144 .me = THIS_MODULE, 198 .me = THIS_MODULE,
145}; 199};
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 3385ee592541..f28bb2dc58d0 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -71,12 +71,9 @@ void qdisc_unlock_tree(struct net_device *dev)
71 71
72 72
73/* Kick device. 73/* Kick device.
74 Note, that this procedure can be called by a watchdog timer, so that
75 we do not check dev->tbusy flag here.
76 74
77 Returns: 0 - queue is empty. 75 Returns: 0 - queue is empty or throttled.
78 >0 - queue is not empty, but throttled. 76 >0 - queue is not empty.
79 <0 - queue is not empty. Device is throttled, if dev->tbusy != 0.
80 77
81 NOTE: Called under dev->queue_lock with locally disabled BH. 78 NOTE: Called under dev->queue_lock with locally disabled BH.
82*/ 79*/
@@ -115,7 +112,7 @@ static inline int qdisc_restart(struct net_device *dev)
115 kfree_skb(skb); 112 kfree_skb(skb);
116 if (net_ratelimit()) 113 if (net_ratelimit())
117 printk(KERN_DEBUG "Dead loop on netdevice %s, fix it urgently!\n", dev->name); 114 printk(KERN_DEBUG "Dead loop on netdevice %s, fix it urgently!\n", dev->name);
118 return -1; 115 goto out;
119 } 116 }
120 __get_cpu_var(netdev_rx_stat).cpu_collision++; 117 __get_cpu_var(netdev_rx_stat).cpu_collision++;
121 goto requeue; 118 goto requeue;
@@ -135,10 +132,12 @@ static inline int qdisc_restart(struct net_device *dev)
135 netif_tx_unlock(dev); 132 netif_tx_unlock(dev);
136 } 133 }
137 spin_lock(&dev->queue_lock); 134 spin_lock(&dev->queue_lock);
138 return -1; 135 q = dev->qdisc;
136 goto out;
139 } 137 }
140 if (ret == NETDEV_TX_LOCKED && nolock) { 138 if (ret == NETDEV_TX_LOCKED && nolock) {
141 spin_lock(&dev->queue_lock); 139 spin_lock(&dev->queue_lock);
140 q = dev->qdisc;
142 goto collision; 141 goto collision;
143 } 142 }
144 } 143 }
@@ -163,26 +162,28 @@ static inline int qdisc_restart(struct net_device *dev)
163 */ 162 */
164 163
165requeue: 164requeue:
166 if (skb->next) 165 if (unlikely(q == &noop_qdisc))
166 kfree_skb(skb);
167 else if (skb->next)
167 dev->gso_skb = skb; 168 dev->gso_skb = skb;
168 else 169 else
169 q->ops->requeue(skb, q); 170 q->ops->requeue(skb, q);
170 netif_schedule(dev); 171 netif_schedule(dev);
171 return 1; 172 return 0;
172 } 173 }
174
175out:
173 BUG_ON((int) q->q.qlen < 0); 176 BUG_ON((int) q->q.qlen < 0);
174 return q->q.qlen; 177 return q->q.qlen;
175} 178}
176 179
177void __qdisc_run(struct net_device *dev) 180void __qdisc_run(struct net_device *dev)
178{ 181{
179 if (unlikely(dev->qdisc == &noop_qdisc)) 182 do {
180 goto out; 183 if (!qdisc_restart(dev))
181 184 break;
182 while (qdisc_restart(dev) < 0 && !netif_queue_stopped(dev)) 185 } while (!netif_queue_stopped(dev));
183 /* NOTHING */;
184 186
185out:
186 clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state); 187 clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
187} 188}
188 189
@@ -544,6 +545,7 @@ void dev_activate(struct net_device *dev)
544void dev_deactivate(struct net_device *dev) 545void dev_deactivate(struct net_device *dev)
545{ 546{
546 struct Qdisc *qdisc; 547 struct Qdisc *qdisc;
548 struct sk_buff *skb;
547 549
548 spin_lock_bh(&dev->queue_lock); 550 spin_lock_bh(&dev->queue_lock);
549 qdisc = dev->qdisc; 551 qdisc = dev->qdisc;
@@ -551,8 +553,12 @@ void dev_deactivate(struct net_device *dev)
551 553
552 qdisc_reset(qdisc); 554 qdisc_reset(qdisc);
553 555
556 skb = dev->gso_skb;
557 dev->gso_skb = NULL;
554 spin_unlock_bh(&dev->queue_lock); 558 spin_unlock_bh(&dev->queue_lock);
555 559
560 kfree_skb(skb);
561
556 dev_watchdog_down(dev); 562 dev_watchdog_down(dev);
557 563
558 /* Wait for outstanding dev_queue_xmit calls. */ 564 /* Wait for outstanding dev_queue_xmit calls. */
@@ -561,11 +567,6 @@ void dev_deactivate(struct net_device *dev)
561 /* Wait for outstanding qdisc_run calls. */ 567 /* Wait for outstanding qdisc_run calls. */
562 while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state)) 568 while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
563 yield(); 569 yield();
564
565 if (dev->gso_skb) {
566 kfree_skb(dev->gso_skb);
567 dev->gso_skb = NULL;
568 }
569} 570}
570 571
571void dev_init_scheduler(struct net_device *dev) 572void dev_init_scheduler(struct net_device *dev)
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index d24914db7861..f05ad9a30b4c 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -94,14 +94,13 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
94 struct net_device *dev = sch->dev; 94 struct net_device *dev = sch->dev;
95 struct teql_sched_data *q = qdisc_priv(sch); 95 struct teql_sched_data *q = qdisc_priv(sch);
96 96
97 __skb_queue_tail(&q->q, skb); 97 if (q->q.qlen < dev->tx_queue_len) {
98 if (q->q.qlen <= dev->tx_queue_len) { 98 __skb_queue_tail(&q->q, skb);
99 sch->bstats.bytes += skb->len; 99 sch->bstats.bytes += skb->len;
100 sch->bstats.packets++; 100 sch->bstats.packets++;
101 return 0; 101 return 0;
102 } 102 }
103 103
104 __skb_unlink(skb, &q->q);
105 kfree_skb(skb); 104 kfree_skb(skb);
106 sch->qstats.drops++; 105 sch->qstats.drops++;
107 return NET_XMIT_DROP; 106 return NET_XMIT_DROP;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 83a76ba9d7b3..4dcdabf56473 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4164,6 +4164,7 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
4164 rwlock_t *addr_lock; 4164 rwlock_t *addr_lock;
4165 int err = 0; 4165 int err = 0;
4166 void *addrs; 4166 void *addrs;
4167 void *buf;
4167 int bytes_copied = 0; 4168 int bytes_copied = 0;
4168 4169
4169 if (len != sizeof(struct sctp_getaddrs_old)) 4170 if (len != sizeof(struct sctp_getaddrs_old))
@@ -4217,13 +4218,14 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
4217 } 4218 }
4218 } 4219 }
4219 4220
4221 buf = addrs;
4220 list_for_each(pos, &bp->address_list) { 4222 list_for_each(pos, &bp->address_list) {
4221 addr = list_entry(pos, struct sctp_sockaddr_entry, list); 4223 addr = list_entry(pos, struct sctp_sockaddr_entry, list);
4222 memcpy(&temp, &addr->a, sizeof(temp)); 4224 memcpy(&temp, &addr->a, sizeof(temp));
4223 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); 4225 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
4224 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; 4226 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
4225 memcpy(addrs, &temp, addrlen); 4227 memcpy(buf, &temp, addrlen);
4226 to += addrlen; 4228 buf += addrlen;
4227 bytes_copied += addrlen; 4229 bytes_copied += addrlen;
4228 cnt ++; 4230 cnt ++;
4229 if (cnt >= getaddrs.addr_num) break; 4231 if (cnt >= getaddrs.addr_num) break;
@@ -4266,6 +4268,7 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
4266 size_t space_left; 4268 size_t space_left;
4267 int bytes_copied = 0; 4269 int bytes_copied = 0;
4268 void *addrs; 4270 void *addrs;
4271 void *buf;
4269 4272
4270 if (len <= sizeof(struct sctp_getaddrs)) 4273 if (len <= sizeof(struct sctp_getaddrs))
4271 return -EINVAL; 4274 return -EINVAL;
@@ -4316,6 +4319,7 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
4316 } 4319 }
4317 } 4320 }
4318 4321
4322 buf = addrs;
4319 list_for_each(pos, &bp->address_list) { 4323 list_for_each(pos, &bp->address_list) {
4320 addr = list_entry(pos, struct sctp_sockaddr_entry, list); 4324 addr = list_entry(pos, struct sctp_sockaddr_entry, list);
4321 memcpy(&temp, &addr->a, sizeof(temp)); 4325 memcpy(&temp, &addr->a, sizeof(temp));
@@ -4325,8 +4329,8 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
4325 err = -ENOMEM; /*fixme: right error?*/ 4329 err = -ENOMEM; /*fixme: right error?*/
4326 goto error; 4330 goto error;
4327 } 4331 }
4328 memcpy(addrs, &temp, addrlen); 4332 memcpy(buf, &temp, addrlen);
4329 to += addrlen; 4333 buf += addrlen;
4330 bytes_copied += addrlen; 4334 bytes_copied += addrlen;
4331 cnt ++; 4335 cnt ++;
4332 space_left -= addrlen; 4336 space_left -= addrlen;
@@ -5227,7 +5231,12 @@ int sctp_inet_listen(struct socket *sock, int backlog)
5227 /* Allocate HMAC for generating cookie. */ 5231 /* Allocate HMAC for generating cookie. */
5228 if (sctp_hmac_alg) { 5232 if (sctp_hmac_alg) {
5229 tfm = crypto_alloc_hash(sctp_hmac_alg, 0, CRYPTO_ALG_ASYNC); 5233 tfm = crypto_alloc_hash(sctp_hmac_alg, 0, CRYPTO_ALG_ASYNC);
5230 if (!tfm) { 5234 if (IS_ERR(tfm)) {
5235 if (net_ratelimit()) {
5236 printk(KERN_INFO
5237 "SCTP: failed to load transform for %s: %ld\n",
5238 sctp_hmac_alg, PTR_ERR(tfm));
5239 }
5231 err = -ENOSYS; 5240 err = -ENOSYS;
5232 goto out; 5241 goto out;
5233 } 5242 }
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 661ea2dd78ba..bfecb353ab3d 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -141,11 +141,6 @@ struct sctp_ulpevent *sctp_ulpevent_make_assoc_change(
141 * an ABORT, so we need to include it in the sac_info. 141 * an ABORT, so we need to include it in the sac_info.
142 */ 142 */
143 if (chunk) { 143 if (chunk) {
144 /* sctp_inqu_pop() has allready pulled off the chunk
145 * header. We need to put it back temporarily
146 */
147 skb_push(chunk->skb, sizeof(sctp_chunkhdr_t));
148
149 /* Copy the chunk data to a new skb and reserve enough 144 /* Copy the chunk data to a new skb and reserve enough
150 * head room to use as notification. 145 * head room to use as notification.
151 */ 146 */
@@ -155,9 +150,6 @@ struct sctp_ulpevent *sctp_ulpevent_make_assoc_change(
155 if (!skb) 150 if (!skb)
156 goto fail; 151 goto fail;
157 152
158 /* put back the chunk header now that we have a copy */
159 skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
160
161 /* Embed the event fields inside the cloned skb. */ 153 /* Embed the event fields inside the cloned skb. */
162 event = sctp_skb2event(skb); 154 event = sctp_skb2event(skb);
163 sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize); 155 sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
@@ -168,7 +160,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_assoc_change(
168 160
169 /* Trim the buffer to the right length. */ 161 /* Trim the buffer to the right length. */
170 skb_trim(skb, sizeof(struct sctp_assoc_change) + 162 skb_trim(skb, sizeof(struct sctp_assoc_change) +
171 ntohs(chunk->chunk_hdr->length)); 163 ntohs(chunk->chunk_hdr->length) -
164 sizeof(sctp_chunkhdr_t));
172 } else { 165 } else {
173 event = sctp_ulpevent_new(sizeof(struct sctp_assoc_change), 166 event = sctp_ulpevent_new(sizeof(struct sctp_assoc_change),
174 MSG_NOTIFICATION, gfp); 167 MSG_NOTIFICATION, gfp);