aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-06-01 15:06:21 -0400
committerIngo Molnar <mingo@elte.hu>2009-06-01 15:06:21 -0400
commit3d58f48ba05caed9118bce62b3047f8683438835 (patch)
tree94c911034f0e14ded73d3e9e6e9f8e22b6cad822 /net
parentabfe0af9813153bae8c85d9bac966bafcb8ddab1 (diff)
parentd9244b5d2fbfe9fa540024b410047af13ceec90f (diff)
Merge branch 'linus' into irq/numa
Conflicts: arch/mips/sibyte/bcm1480/irq.c arch/mips/sibyte/sb1250/irq.c Merge reason: we gathered a few conflicts plus update to latest upstream fixes. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'net')
-rw-r--r--net/Kconfig6
-rw-r--r--net/bluetooth/hci_conn.c6
-rw-r--r--net/bluetooth/hci_event.c2
-rw-r--r--net/bluetooth/hci_sysfs.c10
-rw-r--r--net/bridge/br_input.c5
-rw-r--r--net/bridge/br_stp.c3
-rw-r--r--net/core/gen_estimator.c13
-rw-r--r--net/core/netpoll.c8
-rw-r--r--net/core/pktgen.c2
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/ipv4/Kconfig4
-rw-r--r--net/ipv4/fib_trie.c6
-rw-r--r--net/ipv4/ipconfig.c12
-rw-r--r--net/ipv4/route.c60
-rw-r--r--net/ipv4/tcp.c5
-rw-r--r--net/ipv4/tcp_vegas.c11
-rw-r--r--net/ipv6/netfilter/ip6t_ipv6header.c6
-rw-r--r--net/ipv6/route.c3
-rw-r--r--net/mac80211/rc80211_minstrel.c4
-rw-r--r--net/mac80211/rc80211_pid_algo.c73
-rw-r--r--net/mac80211/tx.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c9
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c4
-rw-r--r--net/netfilter/nf_conntrack_netlink.c48
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c4
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c18
-rw-r--r--net/netfilter/nfnetlink_log.c6
-rw-r--r--net/netfilter/xt_cluster.c8
-rw-r--r--net/netfilter/xt_hashlimit.c2
-rw-r--r--net/rxrpc/ar-connection.c12
-rw-r--r--net/sched/cls_cgroup.c22
-rw-r--r--net/sched/sch_fifo.c2
-rw-r--r--net/sched/sch_teql.c5
-rw-r--r--net/sunrpc/svcsock.c35
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c2
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c15
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c13
-rw-r--r--net/sunrpc/xprtrdma/verbs.c3
-rw-r--r--net/wimax/op-msg.c11
-rw-r--r--net/wimax/stack.c17
-rw-r--r--net/wireless/reg.c24
-rw-r--r--net/wireless/scan.c1
-rw-r--r--net/wireless/wext.c7
43 files changed, 319 insertions, 194 deletions
diff --git a/net/Kconfig b/net/Kconfig
index ce77db4fcec8..c19f549c8e74 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -119,12 +119,6 @@ menuconfig NETFILTER
119 <file:Documentation/Changes> under "iptables" for the location of 119 <file:Documentation/Changes> under "iptables" for the location of
120 these packages. 120 these packages.
121 121
122 Make sure to say N to "Fast switching" below if you intend to say Y
123 here, as Fast switching currently bypasses netfilter.
124
125 Chances are that you should say Y here if you compile a kernel which
126 will run as a router and N for regular hosts. If unsure, say N.
127
128if NETFILTER 122if NETFILTER
129 123
130config NETFILTER_DEBUG 124config NETFILTER_DEBUG
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 61309b26f271..fa47d5d84f5c 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -171,10 +171,8 @@ static void hci_conn_timeout(unsigned long arg)
171 switch (conn->state) { 171 switch (conn->state) {
172 case BT_CONNECT: 172 case BT_CONNECT:
173 case BT_CONNECT2: 173 case BT_CONNECT2:
174 if (conn->type == ACL_LINK) 174 if (conn->type == ACL_LINK && conn->out)
175 hci_acl_connect_cancel(conn); 175 hci_acl_connect_cancel(conn);
176 else
177 hci_acl_disconn(conn, 0x13);
178 break; 176 break;
179 case BT_CONFIG: 177 case BT_CONFIG:
180 case BT_CONNECTED: 178 case BT_CONNECTED:
@@ -292,6 +290,8 @@ int hci_conn_del(struct hci_conn *conn)
292 290
293 hci_conn_del_sysfs(conn); 291 hci_conn_del_sysfs(conn);
294 292
293 hci_dev_put(hdev);
294
295 return 0; 295 return 0;
296} 296}
297 297
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 4e7cb88e5da9..184ba0a88ec0 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1493,7 +1493,7 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
1493 hci_dev_lock(hdev); 1493 hci_dev_lock(hdev);
1494 1494
1495 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 1495 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1496 if (conn) { 1496 if (conn && conn->state == BT_CONNECTED) {
1497 hci_conn_hold(conn); 1497 hci_conn_hold(conn);
1498 conn->disc_timeout = HCI_PAIRING_TIMEOUT; 1498 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1499 hci_conn_put(conn); 1499 hci_conn_put(conn);
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 582d8877078c..4cc3624bd22d 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -88,14 +88,19 @@ static struct device_type bt_link = {
88static void add_conn(struct work_struct *work) 88static void add_conn(struct work_struct *work)
89{ 89{
90 struct hci_conn *conn = container_of(work, struct hci_conn, work_add); 90 struct hci_conn *conn = container_of(work, struct hci_conn, work_add);
91 struct hci_dev *hdev = conn->hdev;
91 92
92 /* ensure previous del is complete */ 93 /* ensure previous del is complete */
93 flush_work(&conn->work_del); 94 flush_work(&conn->work_del);
94 95
96 dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
97
95 if (device_add(&conn->dev) < 0) { 98 if (device_add(&conn->dev) < 0) {
96 BT_ERR("Failed to register connection device"); 99 BT_ERR("Failed to register connection device");
97 return; 100 return;
98 } 101 }
102
103 hci_dev_hold(hdev);
99} 104}
100 105
101/* 106/*
@@ -131,6 +136,7 @@ static void del_conn(struct work_struct *work)
131 136
132 device_del(&conn->dev); 137 device_del(&conn->dev);
133 put_device(&conn->dev); 138 put_device(&conn->dev);
139
134 hci_dev_put(hdev); 140 hci_dev_put(hdev);
135} 141}
136 142
@@ -154,12 +160,8 @@ void hci_conn_init_sysfs(struct hci_conn *conn)
154 160
155void hci_conn_add_sysfs(struct hci_conn *conn) 161void hci_conn_add_sysfs(struct hci_conn *conn)
156{ 162{
157 struct hci_dev *hdev = conn->hdev;
158
159 BT_DBG("conn %p", conn); 163 BT_DBG("conn %p", conn);
160 164
161 dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
162
163 queue_work(bt_workq, &conn->work_add); 165 queue_work(bt_workq, &conn->work_add);
164} 166}
165 167
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 30b88777c3df..5ee1a3682bf2 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -134,6 +134,10 @@ struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb)
134 if (skb->protocol == htons(ETH_P_PAUSE)) 134 if (skb->protocol == htons(ETH_P_PAUSE))
135 goto drop; 135 goto drop;
136 136
137 /* If STP is turned off, then forward */
138 if (p->br->stp_enabled == BR_NO_STP && dest[5] == 0)
139 goto forward;
140
137 if (NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev, 141 if (NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
138 NULL, br_handle_local_finish)) 142 NULL, br_handle_local_finish))
139 return NULL; /* frame consumed by filter */ 143 return NULL; /* frame consumed by filter */
@@ -141,6 +145,7 @@ struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb)
141 return skb; /* continue processing */ 145 return skb; /* continue processing */
142 } 146 }
143 147
148forward:
144 switch (p->state) { 149 switch (p->state) {
145 case BR_STATE_FORWARDING: 150 case BR_STATE_FORWARDING:
146 rhook = rcu_dereference(br_should_route_hook); 151 rhook = rcu_dereference(br_should_route_hook);
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 6e63ec3f1fcf..0660515f3992 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -297,6 +297,9 @@ void br_topology_change_detection(struct net_bridge *br)
297{ 297{
298 int isroot = br_is_root_bridge(br); 298 int isroot = br_is_root_bridge(br);
299 299
300 if (br->stp_enabled != BR_KERNEL_STP)
301 return;
302
300 pr_info("%s: topology change detected, %s\n", br->dev->name, 303 pr_info("%s: topology change detected, %s\n", br->dev->name,
301 isroot ? "propagating" : "sending tcn bpdu"); 304 isroot ? "propagating" : "sending tcn bpdu");
302 305
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 9cc9f95b109e..6d62d4618cfc 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -66,9 +66,9 @@
66 66
67 NOTES. 67 NOTES.
68 68
69 * The stored value for avbps is scaled by 2^5, so that maximal 69 * avbps is scaled by 2^5, avpps is scaled by 2^10.
70 rate is ~1Gbit, avpps is scaled by 2^10. 70 * both values are reported as 32 bit unsigned values. bps can
71 71 overflow for fast links : max speed being 34360Mbit/sec
72 * Minimal interval is HZ/4=250msec (it is the greatest common divisor 72 * Minimal interval is HZ/4=250msec (it is the greatest common divisor
73 for HZ=100 and HZ=1024 8)), maximal interval 73 for HZ=100 and HZ=1024 8)), maximal interval
74 is (HZ*2^EST_MAX_INTERVAL)/4 = 8sec. Shorter intervals 74 is (HZ*2^EST_MAX_INTERVAL)/4 = 8sec. Shorter intervals
@@ -86,9 +86,9 @@ struct gen_estimator
86 spinlock_t *stats_lock; 86 spinlock_t *stats_lock;
87 int ewma_log; 87 int ewma_log;
88 u64 last_bytes; 88 u64 last_bytes;
89 u64 avbps;
89 u32 last_packets; 90 u32 last_packets;
90 u32 avpps; 91 u32 avpps;
91 u32 avbps;
92 struct rcu_head e_rcu; 92 struct rcu_head e_rcu;
93 struct rb_node node; 93 struct rb_node node;
94}; 94};
@@ -115,6 +115,7 @@ static void est_timer(unsigned long arg)
115 rcu_read_lock(); 115 rcu_read_lock();
116 list_for_each_entry_rcu(e, &elist[idx].list, list) { 116 list_for_each_entry_rcu(e, &elist[idx].list, list) {
117 u64 nbytes; 117 u64 nbytes;
118 u64 brate;
118 u32 npackets; 119 u32 npackets;
119 u32 rate; 120 u32 rate;
120 121
@@ -125,9 +126,9 @@ static void est_timer(unsigned long arg)
125 126
126 nbytes = e->bstats->bytes; 127 nbytes = e->bstats->bytes;
127 npackets = e->bstats->packets; 128 npackets = e->bstats->packets;
128 rate = (nbytes - e->last_bytes)<<(7 - idx); 129 brate = (nbytes - e->last_bytes)<<(7 - idx);
129 e->last_bytes = nbytes; 130 e->last_bytes = nbytes;
130 e->avbps += ((long)rate - (long)e->avbps) >> e->ewma_log; 131 e->avbps += ((s64)(brate - e->avbps)) >> e->ewma_log;
131 e->rate_est->bps = (e->avbps+0xF)>>5; 132 e->rate_est->bps = (e->avbps+0xF)>>5;
132 133
133 rate = (npackets - e->last_packets)<<(12 - idx); 134 rate = (npackets - e->last_packets)<<(12 - idx);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index b5873bdff612..64f51eec6576 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -175,9 +175,13 @@ static void service_arp_queue(struct netpoll_info *npi)
175void netpoll_poll(struct netpoll *np) 175void netpoll_poll(struct netpoll *np)
176{ 176{
177 struct net_device *dev = np->dev; 177 struct net_device *dev = np->dev;
178 const struct net_device_ops *ops = dev->netdev_ops; 178 const struct net_device_ops *ops;
179
180 if (!dev || !netif_running(dev))
181 return;
179 182
180 if (!dev || !netif_running(dev) || !ops->ndo_poll_controller) 183 ops = dev->netdev_ops;
184 if (!ops->ndo_poll_controller)
181 return; 185 return;
182 186
183 /* Process pending work on NIC */ 187 /* Process pending work on NIC */
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 3779c1438c11..0666a827bc62 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2447,7 +2447,7 @@ static inline void free_SAs(struct pktgen_dev *pkt_dev)
2447 if (pkt_dev->cflows) { 2447 if (pkt_dev->cflows) {
2448 /* let go of the SAs if we have them */ 2448 /* let go of the SAs if we have them */
2449 int i = 0; 2449 int i = 0;
2450 for (; i < pkt_dev->nflows; i++){ 2450 for (; i < pkt_dev->cflows; i++) {
2451 struct xfrm_state *x = pkt_dev->flows[i].x; 2451 struct xfrm_state *x = pkt_dev->flows[i].x;
2452 if (x) { 2452 if (x) {
2453 xfrm_state_put(x); 2453 xfrm_state_put(x);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f091a5a845c1..e505b5392e1e 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -502,7 +502,9 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size)
502 shinfo->gso_segs = 0; 502 shinfo->gso_segs = 0;
503 shinfo->gso_type = 0; 503 shinfo->gso_type = 0;
504 shinfo->ip6_frag_id = 0; 504 shinfo->ip6_frag_id = 0;
505 shinfo->tx_flags.flags = 0;
505 shinfo->frag_list = NULL; 506 shinfo->frag_list = NULL;
507 memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps));
506 508
507 memset(skb, 0, offsetof(struct sk_buff, tail)); 509 memset(skb, 0, offsetof(struct sk_buff, tail));
508 skb->data = skb->head + NET_SKB_PAD; 510 skb->data = skb->head + NET_SKB_PAD;
@@ -2286,7 +2288,7 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
2286next_skb: 2288next_skb:
2287 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 2289 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
2288 2290
2289 if (abs_offset < block_limit) { 2291 if (abs_offset < block_limit && !st->frag_data) {
2290 *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 2292 *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
2291 return block_limit - abs_offset; 2293 return block_limit - abs_offset;
2292 } 2294 }
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index b2cf91e4ccaa..5b919f7b45db 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -407,8 +407,8 @@ config INET_XFRM_MODE_BEET
407 If unsure, say Y. 407 If unsure, say Y.
408 408
409config INET_LRO 409config INET_LRO
410 tristate "Large Receive Offload (ipv4/tcp)" 410 bool "Large Receive Offload (ipv4/tcp)"
411 411 default y
412 ---help--- 412 ---help---
413 Support for Large Receive Offload (ipv4/tcp). 413 Support for Large Receive Offload (ipv4/tcp).
414 414
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index ec0ae490f0b6..33c7c85dfe40 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -986,9 +986,12 @@ fib_find_node(struct trie *t, u32 key)
986static struct node *trie_rebalance(struct trie *t, struct tnode *tn) 986static struct node *trie_rebalance(struct trie *t, struct tnode *tn)
987{ 987{
988 int wasfull; 988 int wasfull;
989 t_key cindex, key = tn->key; 989 t_key cindex, key;
990 struct tnode *tp; 990 struct tnode *tp;
991 991
992 preempt_disable();
993 key = tn->key;
994
992 while (tn != NULL && (tp = node_parent((struct node *)tn)) != NULL) { 995 while (tn != NULL && (tp = node_parent((struct node *)tn)) != NULL) {
993 cindex = tkey_extract_bits(key, tp->pos, tp->bits); 996 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
994 wasfull = tnode_full(tp, tnode_get_child(tp, cindex)); 997 wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
@@ -1007,6 +1010,7 @@ static struct node *trie_rebalance(struct trie *t, struct tnode *tn)
1007 if (IS_TNODE(tn)) 1010 if (IS_TNODE(tn))
1008 tn = (struct tnode *)resize(t, (struct tnode *)tn); 1011 tn = (struct tnode *)resize(t, (struct tnode *)tn);
1009 1012
1013 preempt_enable();
1010 return (struct node *)tn; 1014 return (struct node *)tn;
1011} 1015}
1012 1016
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 90d22ae0a419..88bf051d0cbb 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -139,6 +139,8 @@ __be32 ic_servaddr = NONE; /* Boot server IP address */
139__be32 root_server_addr = NONE; /* Address of NFS server */ 139__be32 root_server_addr = NONE; /* Address of NFS server */
140u8 root_server_path[256] = { 0, }; /* Path to mount as root */ 140u8 root_server_path[256] = { 0, }; /* Path to mount as root */
141 141
142u32 ic_dev_xid; /* Device under configuration */
143
142/* vendor class identifier */ 144/* vendor class identifier */
143static char vendor_class_identifier[253] __initdata; 145static char vendor_class_identifier[253] __initdata;
144 146
@@ -932,6 +934,13 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
932 goto drop_unlock; 934 goto drop_unlock;
933 } 935 }
934 936
937 /* Is it a reply for the device we are configuring? */
938 if (b->xid != ic_dev_xid) {
939 if (net_ratelimit())
940 printk(KERN_ERR "DHCP/BOOTP: Ignoring delayed packet \n");
941 goto drop_unlock;
942 }
943
935 /* Parse extensions */ 944 /* Parse extensions */
936 if (ext_len >= 4 && 945 if (ext_len >= 4 &&
937 !memcmp(b->exten, ic_bootp_cookie, 4)) { /* Check magic cookie */ 946 !memcmp(b->exten, ic_bootp_cookie, 4)) { /* Check magic cookie */
@@ -1115,6 +1124,9 @@ static int __init ic_dynamic(void)
1115 get_random_bytes(&timeout, sizeof(timeout)); 1124 get_random_bytes(&timeout, sizeof(timeout));
1116 timeout = CONF_BASE_TIMEOUT + (timeout % (unsigned) CONF_TIMEOUT_RANDOM); 1125 timeout = CONF_BASE_TIMEOUT + (timeout % (unsigned) CONF_TIMEOUT_RANDOM);
1117 for (;;) { 1126 for (;;) {
1127 /* Track the device we are configuring */
1128 ic_dev_xid = d->xid;
1129
1118#ifdef IPCONFIG_BOOTP 1130#ifdef IPCONFIG_BOOTP
1119 if (do_bootp && (d->able & IC_BOOTP)) 1131 if (do_bootp && (d->able & IC_BOOTP))
1120 ic_bootp_send_if(d, jiffies - start_jiffies); 1132 ic_bootp_send_if(d, jiffies - start_jiffies);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index c4c60e9f068a..28205e5bfa9b 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -784,8 +784,8 @@ static void rt_check_expire(void)
784{ 784{
785 static unsigned int rover; 785 static unsigned int rover;
786 unsigned int i = rover, goal; 786 unsigned int i = rover, goal;
787 struct rtable *rth, **rthp; 787 struct rtable *rth, *aux, **rthp;
788 unsigned long length = 0, samples = 0; 788 unsigned long samples = 0;
789 unsigned long sum = 0, sum2 = 0; 789 unsigned long sum = 0, sum2 = 0;
790 u64 mult; 790 u64 mult;
791 791
@@ -795,9 +795,9 @@ static void rt_check_expire(void)
795 goal = (unsigned int)mult; 795 goal = (unsigned int)mult;
796 if (goal > rt_hash_mask) 796 if (goal > rt_hash_mask)
797 goal = rt_hash_mask + 1; 797 goal = rt_hash_mask + 1;
798 length = 0;
799 for (; goal > 0; goal--) { 798 for (; goal > 0; goal--) {
800 unsigned long tmo = ip_rt_gc_timeout; 799 unsigned long tmo = ip_rt_gc_timeout;
800 unsigned long length;
801 801
802 i = (i + 1) & rt_hash_mask; 802 i = (i + 1) & rt_hash_mask;
803 rthp = &rt_hash_table[i].chain; 803 rthp = &rt_hash_table[i].chain;
@@ -809,8 +809,10 @@ static void rt_check_expire(void)
809 809
810 if (*rthp == NULL) 810 if (*rthp == NULL)
811 continue; 811 continue;
812 length = 0;
812 spin_lock_bh(rt_hash_lock_addr(i)); 813 spin_lock_bh(rt_hash_lock_addr(i));
813 while ((rth = *rthp) != NULL) { 814 while ((rth = *rthp) != NULL) {
815 prefetch(rth->u.dst.rt_next);
814 if (rt_is_expired(rth)) { 816 if (rt_is_expired(rth)) {
815 *rthp = rth->u.dst.rt_next; 817 *rthp = rth->u.dst.rt_next;
816 rt_free(rth); 818 rt_free(rth);
@@ -819,33 +821,30 @@ static void rt_check_expire(void)
819 if (rth->u.dst.expires) { 821 if (rth->u.dst.expires) {
820 /* Entry is expired even if it is in use */ 822 /* Entry is expired even if it is in use */
821 if (time_before_eq(jiffies, rth->u.dst.expires)) { 823 if (time_before_eq(jiffies, rth->u.dst.expires)) {
824nofree:
822 tmo >>= 1; 825 tmo >>= 1;
823 rthp = &rth->u.dst.rt_next; 826 rthp = &rth->u.dst.rt_next;
824 /* 827 /*
825 * Only bump our length if the hash 828 * We only count entries on
826 * inputs on entries n and n+1 are not
827 * the same, we only count entries on
828 * a chain with equal hash inputs once 829 * a chain with equal hash inputs once
829 * so that entries for different QOS 830 * so that entries for different QOS
830 * levels, and other non-hash input 831 * levels, and other non-hash input
831 * attributes don't unfairly skew 832 * attributes don't unfairly skew
832 * the length computation 833 * the length computation
833 */ 834 */
834 if ((*rthp == NULL) || 835 for (aux = rt_hash_table[i].chain;;) {
835 !compare_hash_inputs(&(*rthp)->fl, 836 if (aux == rth) {
836 &rth->fl)) 837 length += ONE;
837 length += ONE; 838 break;
839 }
840 if (compare_hash_inputs(&aux->fl, &rth->fl))
841 break;
842 aux = aux->u.dst.rt_next;
843 }
838 continue; 844 continue;
839 } 845 }
840 } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) { 846 } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
841 tmo >>= 1; 847 goto nofree;
842 rthp = &rth->u.dst.rt_next;
843 if ((*rthp == NULL) ||
844 !compare_hash_inputs(&(*rthp)->fl,
845 &rth->fl))
846 length += ONE;
847 continue;
848 }
849 848
850 /* Cleanup aged off entries. */ 849 /* Cleanup aged off entries. */
851 *rthp = rth->u.dst.rt_next; 850 *rthp = rth->u.dst.rt_next;
@@ -1068,7 +1067,6 @@ out: return 0;
1068static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp) 1067static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
1069{ 1068{
1070 struct rtable *rth, **rthp; 1069 struct rtable *rth, **rthp;
1071 struct rtable *rthi;
1072 unsigned long now; 1070 unsigned long now;
1073 struct rtable *cand, **candp; 1071 struct rtable *cand, **candp;
1074 u32 min_score; 1072 u32 min_score;
@@ -1088,7 +1086,6 @@ restart:
1088 } 1086 }
1089 1087
1090 rthp = &rt_hash_table[hash].chain; 1088 rthp = &rt_hash_table[hash].chain;
1091 rthi = NULL;
1092 1089
1093 spin_lock_bh(rt_hash_lock_addr(hash)); 1090 spin_lock_bh(rt_hash_lock_addr(hash));
1094 while ((rth = *rthp) != NULL) { 1091 while ((rth = *rthp) != NULL) {
@@ -1134,17 +1131,6 @@ restart:
1134 chain_length++; 1131 chain_length++;
1135 1132
1136 rthp = &rth->u.dst.rt_next; 1133 rthp = &rth->u.dst.rt_next;
1137
1138 /*
1139 * check to see if the next entry in the chain
1140 * contains the same hash input values as rt. If it does
1141 * This is where we will insert into the list, instead of
1142 * at the head. This groups entries that differ by aspects not
1143 * relvant to the hash function together, which we use to adjust
1144 * our chain length
1145 */
1146 if (*rthp && compare_hash_inputs(&(*rthp)->fl, &rt->fl))
1147 rthi = rth;
1148 } 1134 }
1149 1135
1150 if (cand) { 1136 if (cand) {
@@ -1205,10 +1191,7 @@ restart:
1205 } 1191 }
1206 } 1192 }
1207 1193
1208 if (rthi) 1194 rt->u.dst.rt_next = rt_hash_table[hash].chain;
1209 rt->u.dst.rt_next = rthi->u.dst.rt_next;
1210 else
1211 rt->u.dst.rt_next = rt_hash_table[hash].chain;
1212 1195
1213#if RT_CACHE_DEBUG >= 2 1196#if RT_CACHE_DEBUG >= 2
1214 if (rt->u.dst.rt_next) { 1197 if (rt->u.dst.rt_next) {
@@ -1224,10 +1207,7 @@ restart:
1224 * previous writes to rt are comitted to memory 1207 * previous writes to rt are comitted to memory
1225 * before making rt visible to other CPUS. 1208 * before making rt visible to other CPUS.
1226 */ 1209 */
1227 if (rthi) 1210 rcu_assign_pointer(rt_hash_table[hash].chain, rt);
1228 rcu_assign_pointer(rthi->u.dst.rt_next, rt);
1229 else
1230 rcu_assign_pointer(rt_hash_table[hash].chain, rt);
1231 1211
1232 spin_unlock_bh(rt_hash_lock_addr(hash)); 1212 spin_unlock_bh(rt_hash_lock_addr(hash));
1233 *rp = rt; 1213 *rp = rt;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1d7f49c6f0ca..7a0f0b27bf1f 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1321,6 +1321,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1321 struct task_struct *user_recv = NULL; 1321 struct task_struct *user_recv = NULL;
1322 int copied_early = 0; 1322 int copied_early = 0;
1323 struct sk_buff *skb; 1323 struct sk_buff *skb;
1324 u32 urg_hole = 0;
1324 1325
1325 lock_sock(sk); 1326 lock_sock(sk);
1326 1327
@@ -1532,7 +1533,8 @@ do_prequeue:
1532 } 1533 }
1533 } 1534 }
1534 } 1535 }
1535 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) { 1536 if ((flags & MSG_PEEK) &&
1537 (peek_seq - copied - urg_hole != tp->copied_seq)) {
1536 if (net_ratelimit()) 1538 if (net_ratelimit())
1537 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n", 1539 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1538 current->comm, task_pid_nr(current)); 1540 current->comm, task_pid_nr(current));
@@ -1553,6 +1555,7 @@ do_prequeue:
1553 if (!urg_offset) { 1555 if (!urg_offset) {
1554 if (!sock_flag(sk, SOCK_URGINLINE)) { 1556 if (!sock_flag(sk, SOCK_URGINLINE)) {
1555 ++*seq; 1557 ++*seq;
1558 urg_hole++;
1556 offset++; 1559 offset++;
1557 used--; 1560 used--;
1558 if (!used) 1561 if (!used)
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index a453aac91bd3..c6743eec9b7d 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -158,6 +158,11 @@ void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event)
158} 158}
159EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event); 159EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event);
160 160
161static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp)
162{
163 return min(tp->snd_ssthresh, tp->snd_cwnd-1);
164}
165
161static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) 166static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
162{ 167{
163 struct tcp_sock *tp = tcp_sk(sk); 168 struct tcp_sock *tp = tcp_sk(sk);
@@ -221,11 +226,10 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
221 */ 226 */
222 diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT; 227 diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT;
223 228
224 if (diff > gamma && tp->snd_ssthresh > 2 ) { 229 if (diff > gamma && tp->snd_cwnd <= tp->snd_ssthresh) {
225 /* Going too fast. Time to slow down 230 /* Going too fast. Time to slow down
226 * and switch to congestion avoidance. 231 * and switch to congestion avoidance.
227 */ 232 */
228 tp->snd_ssthresh = 2;
229 233
230 /* Set cwnd to match the actual rate 234 /* Set cwnd to match the actual rate
231 * exactly: 235 * exactly:
@@ -235,6 +239,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
235 * utilization. 239 * utilization.
236 */ 240 */
237 tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1); 241 tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1);
242 tp->snd_ssthresh = tcp_vegas_ssthresh(tp);
238 243
239 } else if (tp->snd_cwnd <= tp->snd_ssthresh) { 244 } else if (tp->snd_cwnd <= tp->snd_ssthresh) {
240 /* Slow start. */ 245 /* Slow start. */
@@ -250,6 +255,8 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
250 * we slow down. 255 * we slow down.
251 */ 256 */
252 tp->snd_cwnd--; 257 tp->snd_cwnd--;
258 tp->snd_ssthresh
259 = tcp_vegas_ssthresh(tp);
253 } else if (diff < alpha) { 260 } else if (diff < alpha) {
254 /* We don't have enough extra packets 261 /* We don't have enough extra packets
255 * in the network, so speed up. 262 * in the network, so speed up.
diff --git a/net/ipv6/netfilter/ip6t_ipv6header.c b/net/ipv6/netfilter/ip6t_ipv6header.c
index 14e6724d5672..91490ad9302c 100644
--- a/net/ipv6/netfilter/ip6t_ipv6header.c
+++ b/net/ipv6/netfilter/ip6t_ipv6header.c
@@ -50,14 +50,14 @@ ipv6header_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
50 struct ipv6_opt_hdr _hdr; 50 struct ipv6_opt_hdr _hdr;
51 int hdrlen; 51 int hdrlen;
52 52
53 /* Is there enough space for the next ext header? */
54 if (len < (int)sizeof(struct ipv6_opt_hdr))
55 return false;
56 /* No more exthdr -> evaluate */ 53 /* No more exthdr -> evaluate */
57 if (nexthdr == NEXTHDR_NONE) { 54 if (nexthdr == NEXTHDR_NONE) {
58 temp |= MASK_NONE; 55 temp |= MASK_NONE;
59 break; 56 break;
60 } 57 }
58 /* Is there enough space for the next ext header? */
59 if (len < (int)sizeof(struct ipv6_opt_hdr))
60 return false;
61 /* ESP -> evaluate */ 61 /* ESP -> evaluate */
62 if (nexthdr == NEXTHDR_ESP) { 62 if (nexthdr == NEXTHDR_ESP) {
63 temp |= MASK_ESP; 63 temp |= MASK_ESP;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 1394ddb6e35c..032a5ec391c5 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -137,6 +137,7 @@ static struct rt6_info ip6_null_entry_template = {
137 } 137 }
138 }, 138 },
139 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), 139 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
140 .rt6i_protocol = RTPROT_KERNEL,
140 .rt6i_metric = ~(u32) 0, 141 .rt6i_metric = ~(u32) 0,
141 .rt6i_ref = ATOMIC_INIT(1), 142 .rt6i_ref = ATOMIC_INIT(1),
142}; 143};
@@ -159,6 +160,7 @@ static struct rt6_info ip6_prohibit_entry_template = {
159 } 160 }
160 }, 161 },
161 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), 162 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
163 .rt6i_protocol = RTPROT_KERNEL,
162 .rt6i_metric = ~(u32) 0, 164 .rt6i_metric = ~(u32) 0,
163 .rt6i_ref = ATOMIC_INIT(1), 165 .rt6i_ref = ATOMIC_INIT(1),
164}; 166};
@@ -176,6 +178,7 @@ static struct rt6_info ip6_blk_hole_entry_template = {
176 } 178 }
177 }, 179 },
178 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), 180 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
181 .rt6i_protocol = RTPROT_KERNEL,
179 .rt6i_metric = ~(u32) 0, 182 .rt6i_metric = ~(u32) 0,
180 .rt6i_ref = ATOMIC_INIT(1), 183 .rt6i_ref = ATOMIC_INIT(1),
181}; 184};
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 3824990d340b..d9233ec50610 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -476,8 +476,8 @@ minstrel_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
476 return NULL; 476 return NULL;
477 477
478 for (i = 0; i < IEEE80211_NUM_BANDS; i++) { 478 for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
479 sband = hw->wiphy->bands[hw->conf.channel->band]; 479 sband = hw->wiphy->bands[i];
480 if (sband->n_bitrates > max_rates) 480 if (sband && sband->n_bitrates > max_rates)
481 max_rates = sband->n_bitrates; 481 max_rates = sband->n_bitrates;
482 } 482 }
483 483
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c
index b16801cde06f..8bef9a1262ff 100644
--- a/net/mac80211/rc80211_pid_algo.c
+++ b/net/mac80211/rc80211_pid_algo.c
@@ -317,13 +317,44 @@ rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband,
317 struct ieee80211_sta *sta, void *priv_sta) 317 struct ieee80211_sta *sta, void *priv_sta)
318{ 318{
319 struct rc_pid_sta_info *spinfo = priv_sta; 319 struct rc_pid_sta_info *spinfo = priv_sta;
320 struct rc_pid_info *pinfo = priv;
321 struct rc_pid_rateinfo *rinfo = pinfo->rinfo;
320 struct sta_info *si; 322 struct sta_info *si;
323 int i, j, tmp;
324 bool s;
321 325
322 /* TODO: This routine should consider using RSSI from previous packets 326 /* TODO: This routine should consider using RSSI from previous packets
323 * as we need to have IEEE 802.1X auth succeed immediately after assoc.. 327 * as we need to have IEEE 802.1X auth succeed immediately after assoc..
324 * Until that method is implemented, we will use the lowest supported 328 * Until that method is implemented, we will use the lowest supported
325 * rate as a workaround. */ 329 * rate as a workaround. */
326 330
331 /* Sort the rates. This is optimized for the most common case (i.e.
332 * almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed
333 * mapping too. */
334 for (i = 0; i < sband->n_bitrates; i++) {
335 rinfo[i].index = i;
336 rinfo[i].rev_index = i;
337 if (RC_PID_FAST_START)
338 rinfo[i].diff = 0;
339 else
340 rinfo[i].diff = i * pinfo->norm_offset;
341 }
342 for (i = 1; i < sband->n_bitrates; i++) {
343 s = 0;
344 for (j = 0; j < sband->n_bitrates - i; j++)
345 if (unlikely(sband->bitrates[rinfo[j].index].bitrate >
346 sband->bitrates[rinfo[j + 1].index].bitrate)) {
347 tmp = rinfo[j].index;
348 rinfo[j].index = rinfo[j + 1].index;
349 rinfo[j + 1].index = tmp;
350 rinfo[rinfo[j].index].rev_index = j;
351 rinfo[rinfo[j + 1].index].rev_index = j + 1;
352 s = 1;
353 }
354 if (!s)
355 break;
356 }
357
327 spinfo->txrate_idx = rate_lowest_index(sband, sta); 358 spinfo->txrate_idx = rate_lowest_index(sband, sta);
328 /* HACK */ 359 /* HACK */
329 si = container_of(sta, struct sta_info, sta); 360 si = container_of(sta, struct sta_info, sta);
@@ -336,21 +367,22 @@ static void *rate_control_pid_alloc(struct ieee80211_hw *hw,
336 struct rc_pid_info *pinfo; 367 struct rc_pid_info *pinfo;
337 struct rc_pid_rateinfo *rinfo; 368 struct rc_pid_rateinfo *rinfo;
338 struct ieee80211_supported_band *sband; 369 struct ieee80211_supported_band *sband;
339 int i, j, tmp; 370 int i, max_rates = 0;
340 bool s;
341#ifdef CONFIG_MAC80211_DEBUGFS 371#ifdef CONFIG_MAC80211_DEBUGFS
342 struct rc_pid_debugfs_entries *de; 372 struct rc_pid_debugfs_entries *de;
343#endif 373#endif
344 374
345 sband = hw->wiphy->bands[hw->conf.channel->band];
346
347 pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC); 375 pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC);
348 if (!pinfo) 376 if (!pinfo)
349 return NULL; 377 return NULL;
350 378
351 /* We can safely assume that sband won't change unless we get 379 for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
352 * reinitialized. */ 380 sband = hw->wiphy->bands[i];
353 rinfo = kmalloc(sizeof(*rinfo) * sband->n_bitrates, GFP_ATOMIC); 381 if (sband && sband->n_bitrates > max_rates)
382 max_rates = sband->n_bitrates;
383 }
384
385 rinfo = kmalloc(sizeof(*rinfo) * max_rates, GFP_ATOMIC);
354 if (!rinfo) { 386 if (!rinfo) {
355 kfree(pinfo); 387 kfree(pinfo);
356 return NULL; 388 return NULL;
@@ -368,33 +400,6 @@ static void *rate_control_pid_alloc(struct ieee80211_hw *hw,
368 pinfo->rinfo = rinfo; 400 pinfo->rinfo = rinfo;
369 pinfo->oldrate = 0; 401 pinfo->oldrate = 0;
370 402
371 /* Sort the rates. This is optimized for the most common case (i.e.
372 * almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed
373 * mapping too. */
374 for (i = 0; i < sband->n_bitrates; i++) {
375 rinfo[i].index = i;
376 rinfo[i].rev_index = i;
377 if (RC_PID_FAST_START)
378 rinfo[i].diff = 0;
379 else
380 rinfo[i].diff = i * pinfo->norm_offset;
381 }
382 for (i = 1; i < sband->n_bitrates; i++) {
383 s = 0;
384 for (j = 0; j < sband->n_bitrates - i; j++)
385 if (unlikely(sband->bitrates[rinfo[j].index].bitrate >
386 sband->bitrates[rinfo[j + 1].index].bitrate)) {
387 tmp = rinfo[j].index;
388 rinfo[j].index = rinfo[j + 1].index;
389 rinfo[j + 1].index = tmp;
390 rinfo[rinfo[j].index].rev_index = j;
391 rinfo[rinfo[j + 1].index].rev_index = j + 1;
392 s = 1;
393 }
394 if (!s)
395 break;
396 }
397
398#ifdef CONFIG_MAC80211_DEBUGFS 403#ifdef CONFIG_MAC80211_DEBUGFS
399 de = &pinfo->dentries; 404 de = &pinfo->dentries;
400 de->target = debugfs_create_u32("target_pf", S_IRUSR | S_IWUSR, 405 de->target = debugfs_create_u32("target_pf", S_IRUSR | S_IWUSR,
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 3fb04a86444d..63656266d567 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -772,7 +772,7 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
772 hdrlen = ieee80211_hdrlen(hdr->frame_control); 772 hdrlen = ieee80211_hdrlen(hdr->frame_control);
773 773
774 /* internal error, why is TX_FRAGMENTED set? */ 774 /* internal error, why is TX_FRAGMENTED set? */
775 if (WARN_ON(skb->len <= frag_threshold)) 775 if (WARN_ON(skb->len + FCS_LEN <= frag_threshold))
776 return TX_DROP; 776 return TX_DROP;
777 777
778 /* 778 /*
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 60aba45023ff..77bfdfeb966e 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -260,7 +260,10 @@ struct ip_vs_conn *ip_vs_ct_in_get
260 list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { 260 list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
261 if (cp->af == af && 261 if (cp->af == af &&
262 ip_vs_addr_equal(af, s_addr, &cp->caddr) && 262 ip_vs_addr_equal(af, s_addr, &cp->caddr) &&
263 ip_vs_addr_equal(af, d_addr, &cp->vaddr) && 263 /* protocol should only be IPPROTO_IP if
264 * d_addr is a fwmark */
265 ip_vs_addr_equal(protocol == IPPROTO_IP ? AF_UNSPEC : af,
266 d_addr, &cp->vaddr) &&
264 s_port == cp->cport && d_port == cp->vport && 267 s_port == cp->cport && d_port == cp->vport &&
265 cp->flags & IP_VS_CONN_F_TEMPLATE && 268 cp->flags & IP_VS_CONN_F_TEMPLATE &&
266 protocol == cp->protocol) { 269 protocol == cp->protocol) {
@@ -698,7 +701,9 @@ ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport,
698 cp->cport = cport; 701 cp->cport = cport;
699 ip_vs_addr_copy(af, &cp->vaddr, vaddr); 702 ip_vs_addr_copy(af, &cp->vaddr, vaddr);
700 cp->vport = vport; 703 cp->vport = vport;
701 ip_vs_addr_copy(af, &cp->daddr, daddr); 704 /* proto should only be IPPROTO_IP if d_addr is a fwmark */
705 ip_vs_addr_copy(proto == IPPROTO_IP ? AF_UNSPEC : af,
706 &cp->daddr, daddr);
702 cp->dport = dport; 707 cp->dport = dport;
703 cp->flags = flags; 708 cp->flags = flags;
704 spin_lock_init(&cp->lock); 709 spin_lock_init(&cp->lock);
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index cb3e031335eb..8dddb17a947a 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -278,7 +278,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
278 */ 278 */
279 if (svc->fwmark) { 279 if (svc->fwmark) {
280 union nf_inet_addr fwmark = { 280 union nf_inet_addr fwmark = {
281 .all = { 0, 0, 0, htonl(svc->fwmark) } 281 .ip = htonl(svc->fwmark)
282 }; 282 };
283 283
284 ct = ip_vs_ct_in_get(svc->af, IPPROTO_IP, &snet, 0, 284 ct = ip_vs_ct_in_get(svc->af, IPPROTO_IP, &snet, 0,
@@ -306,7 +306,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
306 */ 306 */
307 if (svc->fwmark) { 307 if (svc->fwmark) {
308 union nf_inet_addr fwmark = { 308 union nf_inet_addr fwmark = {
309 .all = { 0, 0, 0, htonl(svc->fwmark) } 309 .ip = htonl(svc->fwmark)
310 }; 310 };
311 311
312 ct = ip_vs_conn_new(svc->af, IPPROTO_IP, 312 ct = ip_vs_conn_new(svc->af, IPPROTO_IP,
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index f13fc57e1ecb..c523f0b8cee5 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1186,28 +1186,6 @@ ctnetlink_change_conntrack(struct nf_conn *ct, struct nlattr *cda[])
1186 return 0; 1186 return 0;
1187} 1187}
1188 1188
1189static inline void
1190ctnetlink_event_report(struct nf_conn *ct, u32 pid, int report)
1191{
1192 unsigned int events = 0;
1193
1194 if (test_bit(IPS_EXPECTED_BIT, &ct->status))
1195 events |= IPCT_RELATED;
1196 else
1197 events |= IPCT_NEW;
1198
1199 nf_conntrack_event_report(IPCT_STATUS |
1200 IPCT_HELPER |
1201 IPCT_REFRESH |
1202 IPCT_PROTOINFO |
1203 IPCT_NATSEQADJ |
1204 IPCT_MARK |
1205 events,
1206 ct,
1207 pid,
1208 report);
1209}
1210
1211static struct nf_conn * 1189static struct nf_conn *
1212ctnetlink_create_conntrack(struct nlattr *cda[], 1190ctnetlink_create_conntrack(struct nlattr *cda[],
1213 struct nf_conntrack_tuple *otuple, 1191 struct nf_conntrack_tuple *otuple,
@@ -1373,6 +1351,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1373 err = -ENOENT; 1351 err = -ENOENT;
1374 if (nlh->nlmsg_flags & NLM_F_CREATE) { 1352 if (nlh->nlmsg_flags & NLM_F_CREATE) {
1375 struct nf_conn *ct; 1353 struct nf_conn *ct;
1354 enum ip_conntrack_events events;
1376 1355
1377 ct = ctnetlink_create_conntrack(cda, &otuple, 1356 ct = ctnetlink_create_conntrack(cda, &otuple,
1378 &rtuple, u3); 1357 &rtuple, u3);
@@ -1383,9 +1362,18 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1383 err = 0; 1362 err = 0;
1384 nf_conntrack_get(&ct->ct_general); 1363 nf_conntrack_get(&ct->ct_general);
1385 spin_unlock_bh(&nf_conntrack_lock); 1364 spin_unlock_bh(&nf_conntrack_lock);
1386 ctnetlink_event_report(ct, 1365 if (test_bit(IPS_EXPECTED_BIT, &ct->status))
1387 NETLINK_CB(skb).pid, 1366 events = IPCT_RELATED;
1388 nlmsg_report(nlh)); 1367 else
1368 events = IPCT_NEW;
1369
1370 nf_conntrack_event_report(IPCT_STATUS |
1371 IPCT_HELPER |
1372 IPCT_PROTOINFO |
1373 IPCT_NATSEQADJ |
1374 IPCT_MARK | events,
1375 ct, NETLINK_CB(skb).pid,
1376 nlmsg_report(nlh));
1389 nf_ct_put(ct); 1377 nf_ct_put(ct);
1390 } else 1378 } else
1391 spin_unlock_bh(&nf_conntrack_lock); 1379 spin_unlock_bh(&nf_conntrack_lock);
@@ -1404,9 +1392,13 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1404 if (err == 0) { 1392 if (err == 0) {
1405 nf_conntrack_get(&ct->ct_general); 1393 nf_conntrack_get(&ct->ct_general);
1406 spin_unlock_bh(&nf_conntrack_lock); 1394 spin_unlock_bh(&nf_conntrack_lock);
1407 ctnetlink_event_report(ct, 1395 nf_conntrack_event_report(IPCT_STATUS |
1408 NETLINK_CB(skb).pid, 1396 IPCT_HELPER |
1409 nlmsg_report(nlh)); 1397 IPCT_PROTOINFO |
1398 IPCT_NATSEQADJ |
1399 IPCT_MARK,
1400 ct, NETLINK_CB(skb).pid,
1401 nlmsg_report(nlh));
1410 nf_ct_put(ct); 1402 nf_ct_put(ct);
1411 } else 1403 } else
1412 spin_unlock_bh(&nf_conntrack_lock); 1404 spin_unlock_bh(&nf_conntrack_lock);
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 8e757dd53396..aee0d6bea309 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -22,6 +22,7 @@
22#include <linux/netfilter/nfnetlink_conntrack.h> 22#include <linux/netfilter/nfnetlink_conntrack.h>
23#include <net/netfilter/nf_conntrack.h> 23#include <net/netfilter/nf_conntrack.h>
24#include <net/netfilter/nf_conntrack_l4proto.h> 24#include <net/netfilter/nf_conntrack_l4proto.h>
25#include <net/netfilter/nf_conntrack_ecache.h>
25#include <net/netfilter/nf_log.h> 26#include <net/netfilter/nf_log.h>
26 27
27static DEFINE_RWLOCK(dccp_lock); 28static DEFINE_RWLOCK(dccp_lock);
@@ -553,6 +554,9 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
553 ct->proto.dccp.state = new_state; 554 ct->proto.dccp.state = new_state;
554 write_unlock_bh(&dccp_lock); 555 write_unlock_bh(&dccp_lock);
555 556
557 if (new_state != old_state)
558 nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
559
556 dn = dccp_pernet(net); 560 dn = dccp_pernet(net);
557 nf_ct_refresh_acct(ct, ctinfo, skb, dn->dccp_timeout[new_state]); 561 nf_ct_refresh_acct(ct, ctinfo, skb, dn->dccp_timeout[new_state]);
558 562
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index b5ccf2b4b2e7..97a6e93d742e 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -634,6 +634,14 @@ static bool tcp_in_window(const struct nf_conn *ct,
634 sender->td_end = end; 634 sender->td_end = end;
635 sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED; 635 sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
636 } 636 }
637 if (tcph->ack) {
638 if (!(sender->flags & IP_CT_TCP_FLAG_MAXACK_SET)) {
639 sender->td_maxack = ack;
640 sender->flags |= IP_CT_TCP_FLAG_MAXACK_SET;
641 } else if (after(ack, sender->td_maxack))
642 sender->td_maxack = ack;
643 }
644
637 /* 645 /*
638 * Update receiver data. 646 * Update receiver data.
639 */ 647 */
@@ -919,6 +927,16 @@ static int tcp_packet(struct nf_conn *ct,
919 return -NF_ACCEPT; 927 return -NF_ACCEPT;
920 case TCP_CONNTRACK_CLOSE: 928 case TCP_CONNTRACK_CLOSE:
921 if (index == TCP_RST_SET 929 if (index == TCP_RST_SET
930 && (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET)
931 && before(ntohl(th->seq), ct->proto.tcp.seen[!dir].td_maxack)) {
932 /* Invalid RST */
933 write_unlock_bh(&tcp_lock);
934 if (LOG_INVALID(net, IPPROTO_TCP))
935 nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
936 "nf_ct_tcp: invalid RST ");
937 return -NF_ACCEPT;
938 }
939 if (index == TCP_RST_SET
922 && ((test_bit(IPS_SEEN_REPLY_BIT, &ct->status) 940 && ((test_bit(IPS_SEEN_REPLY_BIT, &ct->status)
923 && ct->proto.tcp.last_index == TCP_SYN_SET) 941 && ct->proto.tcp.last_index == TCP_SYN_SET)
924 || (!test_bit(IPS_ASSURED_BIT, &ct->status) 942 || (!test_bit(IPS_ASSURED_BIT, &ct->status)
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index fd326ac27ec8..66a6dd5c519a 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -581,6 +581,12 @@ nfulnl_log_packet(u_int8_t pf,
581 + nla_total_size(sizeof(struct nfulnl_msg_packet_hw)) 581 + nla_total_size(sizeof(struct nfulnl_msg_packet_hw))
582 + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp)); 582 + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp));
583 583
584 if (in && skb_mac_header_was_set(skb)) {
585 size += nla_total_size(skb->dev->hard_header_len)
586 + nla_total_size(sizeof(u_int16_t)) /* hwtype */
587 + nla_total_size(sizeof(u_int16_t)); /* hwlen */
588 }
589
584 spin_lock_bh(&inst->lock); 590 spin_lock_bh(&inst->lock);
585 591
586 if (inst->flags & NFULNL_CFG_F_SEQ) 592 if (inst->flags & NFULNL_CFG_F_SEQ)
diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c
index 6c4847662b85..69a639f35403 100644
--- a/net/netfilter/xt_cluster.c
+++ b/net/netfilter/xt_cluster.c
@@ -135,7 +135,13 @@ static bool xt_cluster_mt_checkentry(const struct xt_mtchk_param *par)
135{ 135{
136 struct xt_cluster_match_info *info = par->matchinfo; 136 struct xt_cluster_match_info *info = par->matchinfo;
137 137
138 if (info->node_mask >= (1 << info->total_nodes)) { 138 if (info->total_nodes > XT_CLUSTER_NODES_MAX) {
139 printk(KERN_ERR "xt_cluster: you have exceeded the maximum "
140 "number of cluster nodes (%u > %u)\n",
141 info->total_nodes, XT_CLUSTER_NODES_MAX);
142 return false;
143 }
144 if (info->node_mask >= (1ULL << info->total_nodes)) {
139 printk(KERN_ERR "xt_cluster: this node mask cannot be " 145 printk(KERN_ERR "xt_cluster: this node mask cannot be "
140 "higher than the total number of nodes\n"); 146 "higher than the total number of nodes\n");
141 return false; 147 return false;
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index a5b5369c30f9..219dcdbe388c 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -926,7 +926,7 @@ static int dl_seq_show(struct seq_file *s, void *v)
926 if (!hlist_empty(&htable->hash[*bucket])) { 926 if (!hlist_empty(&htable->hash[*bucket])) {
927 hlist_for_each_entry(ent, pos, &htable->hash[*bucket], node) 927 hlist_for_each_entry(ent, pos, &htable->hash[*bucket], node)
928 if (dl_seq_real_show(ent, htable->family, s)) 928 if (dl_seq_real_show(ent, htable->family, s))
929 return 1; 929 return -1;
930 } 930 }
931 return 0; 931 return 0;
932} 932}
diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
index 0f1218b8d289..67e38a056240 100644
--- a/net/rxrpc/ar-connection.c
+++ b/net/rxrpc/ar-connection.c
@@ -343,9 +343,9 @@ static int rxrpc_connect_exclusive(struct rxrpc_sock *rx,
343 /* not yet present - create a candidate for a new connection 343 /* not yet present - create a candidate for a new connection
344 * and then redo the check */ 344 * and then redo the check */
345 conn = rxrpc_alloc_connection(gfp); 345 conn = rxrpc_alloc_connection(gfp);
346 if (IS_ERR(conn)) { 346 if (!conn) {
347 _leave(" = %ld", PTR_ERR(conn)); 347 _leave(" = -ENOMEM");
348 return PTR_ERR(conn); 348 return -ENOMEM;
349 } 349 }
350 350
351 conn->trans = trans; 351 conn->trans = trans;
@@ -508,9 +508,9 @@ int rxrpc_connect_call(struct rxrpc_sock *rx,
508 /* not yet present - create a candidate for a new connection and then 508 /* not yet present - create a candidate for a new connection and then
509 * redo the check */ 509 * redo the check */
510 candidate = rxrpc_alloc_connection(gfp); 510 candidate = rxrpc_alloc_connection(gfp);
511 if (IS_ERR(candidate)) { 511 if (!candidate) {
512 _leave(" = %ld", PTR_ERR(candidate)); 512 _leave(" = -ENOMEM");
513 return PTR_ERR(candidate); 513 return -ENOMEM;
514 } 514 }
515 515
516 candidate->trans = trans; 516 candidate->trans = trans;
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 91a3db4a76f8..cc29b44b1500 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -104,8 +104,7 @@ static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp,
104 struct tcf_result *res) 104 struct tcf_result *res)
105{ 105{
106 struct cls_cgroup_head *head = tp->root; 106 struct cls_cgroup_head *head = tp->root;
107 struct cgroup_cls_state *cs; 107 u32 classid;
108 int ret = 0;
109 108
110 /* 109 /*
111 * Due to the nature of the classifier it is required to ignore all 110 * Due to the nature of the classifier it is required to ignore all
@@ -121,17 +120,18 @@ static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp,
121 return -1; 120 return -1;
122 121
123 rcu_read_lock(); 122 rcu_read_lock();
124 cs = task_cls_state(current); 123 classid = task_cls_state(current)->classid;
125 if (cs->classid && tcf_em_tree_match(skb, &head->ematches, NULL)) {
126 res->classid = cs->classid;
127 res->class = 0;
128 ret = tcf_exts_exec(skb, &head->exts, res);
129 } else
130 ret = -1;
131
132 rcu_read_unlock(); 124 rcu_read_unlock();
133 125
134 return ret; 126 if (!classid)
127 return -1;
128
129 if (!tcf_em_tree_match(skb, &head->ematches, NULL))
130 return -1;
131
132 res->classid = classid;
133 res->class = 0;
134 return tcf_exts_exec(skb, &head->exts, res);
135} 135}
136 136
137static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle) 137static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle)
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 92cfc9d7e3b9..69188e8358b4 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -51,7 +51,7 @@ static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
51 u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1; 51 u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1;
52 52
53 if (sch->ops == &bfifo_qdisc_ops) 53 if (sch->ops == &bfifo_qdisc_ops)
54 limit *= qdisc_dev(sch)->mtu; 54 limit *= psched_mtu(qdisc_dev(sch));
55 55
56 q->limit = limit; 56 q->limit = limit;
57 } else { 57 } else {
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index ec697cebb63b..3b6418297231 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -303,6 +303,8 @@ restart:
303 switch (teql_resolve(skb, skb_res, slave)) { 303 switch (teql_resolve(skb, skb_res, slave)) {
304 case 0: 304 case 0:
305 if (__netif_tx_trylock(slave_txq)) { 305 if (__netif_tx_trylock(slave_txq)) {
306 unsigned int length = qdisc_pkt_len(skb);
307
306 if (!netif_tx_queue_stopped(slave_txq) && 308 if (!netif_tx_queue_stopped(slave_txq) &&
307 !netif_tx_queue_frozen(slave_txq) && 309 !netif_tx_queue_frozen(slave_txq) &&
308 slave_ops->ndo_start_xmit(skb, slave) == 0) { 310 slave_ops->ndo_start_xmit(skb, slave) == 0) {
@@ -310,8 +312,7 @@ restart:
310 master->slaves = NEXT_SLAVE(q); 312 master->slaves = NEXT_SLAVE(q);
311 netif_wake_queue(dev); 313 netif_wake_queue(dev);
312 master->stats.tx_packets++; 314 master->stats.tx_packets++;
313 master->stats.tx_bytes += 315 master->stats.tx_bytes += length;
314 qdisc_pkt_len(skb);
315 return 0; 316 return 0;
316 } 317 }
317 __netif_tx_unlock(slave_txq); 318 __netif_tx_unlock(slave_txq);
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index af3198814c15..9d504234af4a 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -345,6 +345,7 @@ static void svc_sock_setbufsize(struct socket *sock, unsigned int snd,
345 lock_sock(sock->sk); 345 lock_sock(sock->sk);
346 sock->sk->sk_sndbuf = snd * 2; 346 sock->sk->sk_sndbuf = snd * 2;
347 sock->sk->sk_rcvbuf = rcv * 2; 347 sock->sk->sk_rcvbuf = rcv * 2;
348 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
348 release_sock(sock->sk); 349 release_sock(sock->sk);
349#endif 350#endif
350} 351}
@@ -796,6 +797,23 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
796 test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags), 797 test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags),
797 test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags)); 798 test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags));
798 799
800 if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags))
801 /* sndbuf needs to have room for one request
802 * per thread, otherwise we can stall even when the
803 * network isn't a bottleneck.
804 *
805 * We count all threads rather than threads in a
806 * particular pool, which provides an upper bound
807 * on the number of threads which will access the socket.
808 *
809 * rcvbuf just needs to be able to hold a few requests.
810 * Normally they will be removed from the queue
811 * as soon a a complete request arrives.
812 */
813 svc_sock_setbufsize(svsk->sk_sock,
814 (serv->sv_nrthreads+3) * serv->sv_max_mesg,
815 3 * serv->sv_max_mesg);
816
799 clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 817 clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
800 818
801 /* Receive data. If we haven't got the record length yet, get 819 /* Receive data. If we haven't got the record length yet, get
@@ -1043,6 +1061,15 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
1043 1061
1044 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; 1062 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
1045 1063
1064 /* initialise setting must have enough space to
1065 * receive and respond to one request.
1066 * svc_tcp_recvfrom will re-adjust if necessary
1067 */
1068 svc_sock_setbufsize(svsk->sk_sock,
1069 3 * svsk->sk_xprt.xpt_server->sv_max_mesg,
1070 3 * svsk->sk_xprt.xpt_server->sv_max_mesg);
1071
1072 set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
1046 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 1073 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
1047 if (sk->sk_state != TCP_ESTABLISHED) 1074 if (sk->sk_state != TCP_ESTABLISHED)
1048 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); 1075 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
@@ -1112,14 +1139,8 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
1112 /* Initialize the socket */ 1139 /* Initialize the socket */
1113 if (sock->type == SOCK_DGRAM) 1140 if (sock->type == SOCK_DGRAM)
1114 svc_udp_init(svsk, serv); 1141 svc_udp_init(svsk, serv);
1115 else { 1142 else
1116 /* initialise setting must have enough space to
1117 * receive and respond to one request.
1118 */
1119 svc_sock_setbufsize(svsk->sk_sock, 4 * serv->sv_max_mesg,
1120 4 * serv->sv_max_mesg);
1121 svc_tcp_init(svsk, serv); 1143 svc_tcp_init(svsk, serv);
1122 }
1123 1144
1124 dprintk("svc: svc_setup_socket created %p (inet %p)\n", 1145 dprintk("svc: svc_setup_socket created %p (inet %p)\n",
1125 svsk, svsk->sk_sk); 1146 svsk, svsk->sk_sk);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 629a28764da9..42a6f9f20285 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -265,7 +265,7 @@ static int fast_reg_read_chunks(struct svcxprt_rdma *xprt,
265 frmr->page_list->page_list[page_no] = 265 frmr->page_list->page_list[page_no] =
266 ib_dma_map_single(xprt->sc_cm_id->device, 266 ib_dma_map_single(xprt->sc_cm_id->device,
267 page_address(rqstp->rq_arg.pages[page_no]), 267 page_address(rqstp->rq_arg.pages[page_no]),
268 PAGE_SIZE, DMA_TO_DEVICE); 268 PAGE_SIZE, DMA_FROM_DEVICE);
269 if (ib_dma_mapping_error(xprt->sc_cm_id->device, 269 if (ib_dma_mapping_error(xprt->sc_cm_id->device,
270 frmr->page_list->page_list[page_no])) 270 frmr->page_list->page_list[page_no]))
271 goto fatal_err; 271 goto fatal_err;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 6c26a675435a..f11be72a1a80 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -128,7 +128,8 @@ static int fast_reg_xdr(struct svcxprt_rdma *xprt,
128 page_bytes -= sge_bytes; 128 page_bytes -= sge_bytes;
129 129
130 frmr->page_list->page_list[page_no] = 130 frmr->page_list->page_list[page_no] =
131 ib_dma_map_page(xprt->sc_cm_id->device, page, 0, 131 ib_dma_map_single(xprt->sc_cm_id->device,
132 page_address(page),
132 PAGE_SIZE, DMA_TO_DEVICE); 133 PAGE_SIZE, DMA_TO_DEVICE);
133 if (ib_dma_mapping_error(xprt->sc_cm_id->device, 134 if (ib_dma_mapping_error(xprt->sc_cm_id->device,
134 frmr->page_list->page_list[page_no])) 135 frmr->page_list->page_list[page_no]))
@@ -183,6 +184,7 @@ static int fast_reg_xdr(struct svcxprt_rdma *xprt,
183 184
184 fatal_err: 185 fatal_err:
185 printk("svcrdma: Error fast registering memory for xprt %p\n", xprt); 186 printk("svcrdma: Error fast registering memory for xprt %p\n", xprt);
187 vec->frmr = NULL;
186 svc_rdma_put_frmr(xprt, frmr); 188 svc_rdma_put_frmr(xprt, frmr);
187 return -EIO; 189 return -EIO;
188} 190}
@@ -516,6 +518,7 @@ static int send_reply(struct svcxprt_rdma *rdma,
516 "svcrdma: could not post a receive buffer, err=%d." 518 "svcrdma: could not post a receive buffer, err=%d."
517 "Closing transport %p.\n", ret, rdma); 519 "Closing transport %p.\n", ret, rdma);
518 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); 520 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
521 svc_rdma_put_frmr(rdma, vec->frmr);
519 svc_rdma_put_context(ctxt, 0); 522 svc_rdma_put_context(ctxt, 0);
520 return -ENOTCONN; 523 return -ENOTCONN;
521 } 524 }
@@ -530,18 +533,17 @@ static int send_reply(struct svcxprt_rdma *rdma,
530 clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags); 533 clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);
531 534
532 /* Prepare the SGE for the RPCRDMA Header */ 535 /* Prepare the SGE for the RPCRDMA Header */
536 ctxt->sge[0].lkey = rdma->sc_dma_lkey;
537 ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
533 ctxt->sge[0].addr = 538 ctxt->sge[0].addr =
534 ib_dma_map_page(rdma->sc_cm_id->device, 539 ib_dma_map_single(rdma->sc_cm_id->device, page_address(page),
535 page, 0, PAGE_SIZE, DMA_TO_DEVICE); 540 ctxt->sge[0].length, DMA_TO_DEVICE);
536 if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr)) 541 if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr))
537 goto err; 542 goto err;
538 atomic_inc(&rdma->sc_dma_used); 543 atomic_inc(&rdma->sc_dma_used);
539 544
540 ctxt->direction = DMA_TO_DEVICE; 545 ctxt->direction = DMA_TO_DEVICE;
541 546
542 ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
543 ctxt->sge[0].lkey = rdma->sc_dma_lkey;
544
545 /* Determine how many of our SGE are to be transmitted */ 547 /* Determine how many of our SGE are to be transmitted */
546 for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) { 548 for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
547 sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count); 549 sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
@@ -606,6 +608,7 @@ static int send_reply(struct svcxprt_rdma *rdma,
606 return 0; 608 return 0;
607 609
608 err: 610 err:
611 svc_rdma_unmap_dma(ctxt);
609 svc_rdma_put_frmr(rdma, vec->frmr); 612 svc_rdma_put_frmr(rdma, vec->frmr);
610 svc_rdma_put_context(ctxt, 1); 613 svc_rdma_put_context(ctxt, 1);
611 return -EIO; 614 return -EIO;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 3d810e7df3fb..5151f9f6c573 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -500,8 +500,8 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
500 BUG_ON(sge_no >= xprt->sc_max_sge); 500 BUG_ON(sge_no >= xprt->sc_max_sge);
501 page = svc_rdma_get_page(); 501 page = svc_rdma_get_page();
502 ctxt->pages[sge_no] = page; 502 ctxt->pages[sge_no] = page;
503 pa = ib_dma_map_page(xprt->sc_cm_id->device, 503 pa = ib_dma_map_single(xprt->sc_cm_id->device,
504 page, 0, PAGE_SIZE, 504 page_address(page), PAGE_SIZE,
505 DMA_FROM_DEVICE); 505 DMA_FROM_DEVICE);
506 if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa)) 506 if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa))
507 goto err_put_ctxt; 507 goto err_put_ctxt;
@@ -520,8 +520,9 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
520 svc_xprt_get(&xprt->sc_xprt); 520 svc_xprt_get(&xprt->sc_xprt);
521 ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr); 521 ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr);
522 if (ret) { 522 if (ret) {
523 svc_xprt_put(&xprt->sc_xprt); 523 svc_rdma_unmap_dma(ctxt);
524 svc_rdma_put_context(ctxt, 1); 524 svc_rdma_put_context(ctxt, 1);
525 svc_xprt_put(&xprt->sc_xprt);
525 } 526 }
526 return ret; 527 return ret;
527 528
@@ -1314,8 +1315,8 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
1314 length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va); 1315 length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
1315 1316
1316 /* Prepare SGE for local address */ 1317 /* Prepare SGE for local address */
1317 sge.addr = ib_dma_map_page(xprt->sc_cm_id->device, 1318 sge.addr = ib_dma_map_single(xprt->sc_cm_id->device,
1318 p, 0, PAGE_SIZE, DMA_FROM_DEVICE); 1319 page_address(p), PAGE_SIZE, DMA_FROM_DEVICE);
1319 if (ib_dma_mapping_error(xprt->sc_cm_id->device, sge.addr)) { 1320 if (ib_dma_mapping_error(xprt->sc_cm_id->device, sge.addr)) {
1320 put_page(p); 1321 put_page(p);
1321 return; 1322 return;
@@ -1342,7 +1343,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
1342 if (ret) { 1343 if (ret) {
1343 dprintk("svcrdma: Error %d posting send for protocol error\n", 1344 dprintk("svcrdma: Error %d posting send for protocol error\n",
1344 ret); 1345 ret);
1345 ib_dma_unmap_page(xprt->sc_cm_id->device, 1346 ib_dma_unmap_single(xprt->sc_cm_id->device,
1346 sge.addr, PAGE_SIZE, 1347 sge.addr, PAGE_SIZE,
1347 DMA_FROM_DEVICE); 1348 DMA_FROM_DEVICE);
1348 svc_rdma_put_context(ctxt, 1); 1349 svc_rdma_put_context(ctxt, 1);
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 3b21e0cc5e69..465aafc2007f 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -1495,7 +1495,8 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg,
1495 frmr_wr.wr.fast_reg.page_shift = PAGE_SHIFT; 1495 frmr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
1496 frmr_wr.wr.fast_reg.length = i << PAGE_SHIFT; 1496 frmr_wr.wr.fast_reg.length = i << PAGE_SHIFT;
1497 frmr_wr.wr.fast_reg.access_flags = (writing ? 1497 frmr_wr.wr.fast_reg.access_flags = (writing ?
1498 IB_ACCESS_REMOTE_WRITE : IB_ACCESS_REMOTE_READ); 1498 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
1499 IB_ACCESS_REMOTE_READ);
1499 frmr_wr.wr.fast_reg.rkey = seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey; 1500 frmr_wr.wr.fast_reg.rkey = seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey;
1500 DECR_CQCOUNT(&r_xprt->rx_ep); 1501 DECR_CQCOUNT(&r_xprt->rx_ep);
1501 1502
diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c
index 5d149c1b5f0d..9ad4d893a566 100644
--- a/net/wimax/op-msg.c
+++ b/net/wimax/op-msg.c
@@ -149,7 +149,8 @@ struct sk_buff *wimax_msg_alloc(struct wimax_dev *wimax_dev,
149 } 149 }
150 result = nla_put(skb, WIMAX_GNL_MSG_DATA, size, msg); 150 result = nla_put(skb, WIMAX_GNL_MSG_DATA, size, msg);
151 if (result < 0) { 151 if (result < 0) {
152 dev_err(dev, "no memory to add payload in attribute\n"); 152 dev_err(dev, "no memory to add payload (msg %p size %zu) in "
153 "attribute: %d\n", msg, size, result);
153 goto error_nla_put; 154 goto error_nla_put;
154 } 155 }
155 genlmsg_end(skb, genl_msg); 156 genlmsg_end(skb, genl_msg);
@@ -299,10 +300,10 @@ int wimax_msg(struct wimax_dev *wimax_dev, const char *pipe_name,
299 struct sk_buff *skb; 300 struct sk_buff *skb;
300 301
301 skb = wimax_msg_alloc(wimax_dev, pipe_name, buf, size, gfp_flags); 302 skb = wimax_msg_alloc(wimax_dev, pipe_name, buf, size, gfp_flags);
302 if (skb == NULL) 303 if (IS_ERR(skb))
303 goto error_msg_new; 304 result = PTR_ERR(skb);
304 result = wimax_msg_send(wimax_dev, skb); 305 else
305error_msg_new: 306 result = wimax_msg_send(wimax_dev, skb);
306 return result; 307 return result;
307} 308}
308EXPORT_SYMBOL_GPL(wimax_msg); 309EXPORT_SYMBOL_GPL(wimax_msg);
diff --git a/net/wimax/stack.c b/net/wimax/stack.c
index a0ee76b52510..933e1422b09f 100644
--- a/net/wimax/stack.c
+++ b/net/wimax/stack.c
@@ -338,8 +338,21 @@ out:
338 */ 338 */
339void wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state) 339void wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state)
340{ 340{
341 /*
342 * A driver cannot take the wimax_dev out of the
343 * __WIMAX_ST_NULL state unless by calling wimax_dev_add(). If
344 * the wimax_dev's state is still NULL, we ignore any request
345 * to change its state because it means it hasn't been yet
346 * registered.
347 *
348 * There is no need to complain about it, as routines that
349 * call this might be shared from different code paths that
350 * are called before or after wimax_dev_add() has done its
351 * job.
352 */
341 mutex_lock(&wimax_dev->mutex); 353 mutex_lock(&wimax_dev->mutex);
342 __wimax_state_change(wimax_dev, new_state); 354 if (wimax_dev->state > __WIMAX_ST_NULL)
355 __wimax_state_change(wimax_dev, new_state);
343 mutex_unlock(&wimax_dev->mutex); 356 mutex_unlock(&wimax_dev->mutex);
344 return; 357 return;
345} 358}
@@ -376,7 +389,7 @@ EXPORT_SYMBOL_GPL(wimax_state_get);
376void wimax_dev_init(struct wimax_dev *wimax_dev) 389void wimax_dev_init(struct wimax_dev *wimax_dev)
377{ 390{
378 INIT_LIST_HEAD(&wimax_dev->id_table_node); 391 INIT_LIST_HEAD(&wimax_dev->id_table_node);
379 __wimax_state_set(wimax_dev, WIMAX_ST_UNINITIALIZED); 392 __wimax_state_set(wimax_dev, __WIMAX_ST_NULL);
380 mutex_init(&wimax_dev->mutex); 393 mutex_init(&wimax_dev->mutex);
381 mutex_init(&wimax_dev->mutex_reset); 394 mutex_init(&wimax_dev->mutex_reset);
382} 395}
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 6c1993d99902..487cb627ddba 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -907,6 +907,7 @@ EXPORT_SYMBOL(freq_reg_info);
907int freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 *bandwidth, 907int freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 *bandwidth,
908 const struct ieee80211_reg_rule **reg_rule) 908 const struct ieee80211_reg_rule **reg_rule)
909{ 909{
910 assert_cfg80211_lock();
910 return freq_reg_info_regd(wiphy, center_freq, 911 return freq_reg_info_regd(wiphy, center_freq,
911 bandwidth, reg_rule, NULL); 912 bandwidth, reg_rule, NULL);
912} 913}
@@ -1133,7 +1134,8 @@ static bool reg_is_world_roaming(struct wiphy *wiphy)
1133 if (is_world_regdom(cfg80211_regdomain->alpha2) || 1134 if (is_world_regdom(cfg80211_regdomain->alpha2) ||
1134 (wiphy->regd && is_world_regdom(wiphy->regd->alpha2))) 1135 (wiphy->regd && is_world_regdom(wiphy->regd->alpha2)))
1135 return true; 1136 return true;
1136 if (last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE && 1137 if (last_request &&
1138 last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
1137 wiphy->custom_regulatory) 1139 wiphy->custom_regulatory)
1138 return true; 1140 return true;
1139 return false; 1141 return false;
@@ -1142,6 +1144,12 @@ static bool reg_is_world_roaming(struct wiphy *wiphy)
1142/* Reap the advantages of previously found beacons */ 1144/* Reap the advantages of previously found beacons */
1143static void reg_process_beacons(struct wiphy *wiphy) 1145static void reg_process_beacons(struct wiphy *wiphy)
1144{ 1146{
1147 /*
1148 * Means we are just firing up cfg80211, so no beacons would
1149 * have been processed yet.
1150 */
1151 if (!last_request)
1152 return;
1145 if (!reg_is_world_roaming(wiphy)) 1153 if (!reg_is_world_roaming(wiphy))
1146 return; 1154 return;
1147 wiphy_update_beacon_reg(wiphy); 1155 wiphy_update_beacon_reg(wiphy);
@@ -1176,6 +1184,8 @@ static void handle_channel_custom(struct wiphy *wiphy,
1176 struct ieee80211_supported_band *sband; 1184 struct ieee80211_supported_band *sband;
1177 struct ieee80211_channel *chan; 1185 struct ieee80211_channel *chan;
1178 1186
1187 assert_cfg80211_lock();
1188
1179 sband = wiphy->bands[band]; 1189 sband = wiphy->bands[band];
1180 BUG_ON(chan_idx >= sband->n_channels); 1190 BUG_ON(chan_idx >= sband->n_channels);
1181 chan = &sband->channels[chan_idx]; 1191 chan = &sband->channels[chan_idx];
@@ -1214,10 +1224,13 @@ void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
1214 const struct ieee80211_regdomain *regd) 1224 const struct ieee80211_regdomain *regd)
1215{ 1225{
1216 enum ieee80211_band band; 1226 enum ieee80211_band band;
1227
1228 mutex_lock(&cfg80211_mutex);
1217 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 1229 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
1218 if (wiphy->bands[band]) 1230 if (wiphy->bands[band])
1219 handle_band_custom(wiphy, band, regd); 1231 handle_band_custom(wiphy, band, regd);
1220 } 1232 }
1233 mutex_unlock(&cfg80211_mutex);
1221} 1234}
1222EXPORT_SYMBOL(wiphy_apply_custom_regulatory); 1235EXPORT_SYMBOL(wiphy_apply_custom_regulatory);
1223 1236
@@ -1423,7 +1436,7 @@ new_request:
1423 return call_crda(last_request->alpha2); 1436 return call_crda(last_request->alpha2);
1424} 1437}
1425 1438
1426/* This currently only processes user and driver regulatory hints */ 1439/* This processes *all* regulatory hints */
1427static void reg_process_hint(struct regulatory_request *reg_request) 1440static void reg_process_hint(struct regulatory_request *reg_request)
1428{ 1441{
1429 int r = 0; 1442 int r = 0;
@@ -1538,6 +1551,13 @@ static int regulatory_hint_core(const char *alpha2)
1538 1551
1539 queue_regulatory_request(request); 1552 queue_regulatory_request(request);
1540 1553
1554 /*
1555 * This ensures last_request is populated once modules
1556 * come swinging in and calling regulatory hints and
1557 * wiphy_apply_custom_regulatory().
1558 */
1559 flush_scheduled_work();
1560
1541 return 0; 1561 return 0;
1542} 1562}
1543 1563
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 2ae65b39b529..1f260c40b6ca 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -395,6 +395,7 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
395 memcpy(ies, res->pub.information_elements, ielen); 395 memcpy(ies, res->pub.information_elements, ielen);
396 found->ies_allocated = true; 396 found->ies_allocated = true;
397 found->pub.information_elements = ies; 397 found->pub.information_elements = ies;
398 found->pub.len_information_elements = ielen;
398 } 399 }
399 } 400 }
400 } 401 }
diff --git a/net/wireless/wext.c b/net/wireless/wext.c
index cb6a5bb85d80..0e59f9ae9b81 100644
--- a/net/wireless/wext.c
+++ b/net/wireless/wext.c
@@ -786,6 +786,13 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
786 err = -EFAULT; 786 err = -EFAULT;
787 goto out; 787 goto out;
788 } 788 }
789
790 if (cmd == SIOCSIWENCODEEXT) {
791 struct iw_encode_ext *ee = (void *) extra;
792
793 if (iwp->length < sizeof(*ee) + ee->key_len)
794 return -EFAULT;
795 }
789 } 796 }
790 797
791 err = handler(dev, info, (union iwreq_data *) iwp, extra); 798 err = handler(dev, info, (union iwreq_data *) iwp, extra);