aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-08-10 00:05:52 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-10 00:05:52 -0400
commitf6cec0ae58c17522a7bc4e2f39dae19f199ab534 (patch)
tree496cf6f53b0c75d9ae57bd0e411c5d2f6cea5cbb /net
parent0fcf12d510b6d1b1b090a090c62009310eca4be4 (diff)
parentc4e9b56e24422e71424b24eee27c2b134a191d7b (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (59 commits) igbvf.txt: Add igbvf Documentation igb.txt: Add igb documentation e100/e1000*/igb*/ixgb*: Add missing read memory barrier ixgbe: fix build error with FCOE_CONFIG without DCB_CONFIG netxen: protect tx timeout recovery by rtnl lock isdn: gigaset: use after free isdn: gigaset: add missing unlock solos-pci: Fix race condition in tasklet RX handling pkt_sched: Fix sch_sfq vs tcf_bind_filter oops net: disable preemption before call smp_processor_id() tcp: no md5sig option size check bug iwlwifi: fix locking assertions iwlwifi: fix TX tracer isdn: fix information leak net: Fix napi_gro_frags vs netpoll path usbnet: remove noisy and hardly useful printk rtl8180: avoid potential NULL deref in rtl8180_beacon_work ath9k: Remove myself from the MAINTAINERS list libertas: scan before assocation if no BSSID was given libertas: fix association with some APs by using extended rates ...
Diffstat (limited to 'net')
-rw-r--r--net/atm/pppoatm.c2
-rw-r--r--net/bluetooth/hci_core.c2
-rw-r--r--net/bluetooth/hci_sock.c8
-rw-r--r--net/bluetooth/hci_sysfs.c3
-rw-r--r--net/bluetooth/l2cap.c24
-rw-r--r--net/bluetooth/rfcomm/tty.c2
-rw-r--r--net/core/dev.c7
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/irda/irnet/irnet_ppp.c2
-rw-r--r--net/l2tp/l2tp_ppp.c5
-rw-r--r--net/mac80211/main.c2
-rw-r--r--net/mac80211/scan.c14
-rw-r--r--net/rxrpc/ar-ack.c3
-rw-r--r--net/rxrpc/ar-call.c6
-rw-r--r--net/sched/act_nat.c23
-rw-r--r--net/sched/cls_flow.c96
-rw-r--r--net/sched/cls_rsvp.h12
-rw-r--r--net/sched/sch_sfq.c36
18 files changed, 150 insertions, 99 deletions
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index e49bb6d948a..e9aced0ec56 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -260,7 +260,7 @@ static int pppoatm_devppp_ioctl(struct ppp_channel *chan, unsigned int cmd,
260 return -ENOTTY; 260 return -ENOTTY;
261} 261}
262 262
263static /*const*/ struct ppp_channel_ops pppoatm_ops = { 263static const struct ppp_channel_ops pppoatm_ops = {
264 .start_xmit = pppoatm_send, 264 .start_xmit = pppoatm_send,
265 .ioctl = pppoatm_devppp_ioctl, 265 .ioctl = pppoatm_devppp_ioctl,
266}; 266};
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 8303f1c9ef5..c52f091ee6d 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -924,7 +924,7 @@ int hci_register_dev(struct hci_dev *hdev)
924 924
925 hci_conn_hash_init(hdev); 925 hci_conn_hash_init(hdev);
926 926
927 INIT_LIST_HEAD(&hdev->blacklist.list); 927 INIT_LIST_HEAD(&hdev->blacklist);
928 928
929 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); 929 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
930 930
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 4f170a59593..83acd164d39 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -168,9 +168,8 @@ static int hci_sock_release(struct socket *sock)
168struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) 168struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
169{ 169{
170 struct list_head *p; 170 struct list_head *p;
171 struct bdaddr_list *blacklist = &hdev->blacklist;
172 171
173 list_for_each(p, &blacklist->list) { 172 list_for_each(p, &hdev->blacklist) {
174 struct bdaddr_list *b; 173 struct bdaddr_list *b;
175 174
176 b = list_entry(p, struct bdaddr_list, list); 175 b = list_entry(p, struct bdaddr_list, list);
@@ -202,7 +201,7 @@ static int hci_blacklist_add(struct hci_dev *hdev, void __user *arg)
202 201
203 bacpy(&entry->bdaddr, &bdaddr); 202 bacpy(&entry->bdaddr, &bdaddr);
204 203
205 list_add(&entry->list, &hdev->blacklist.list); 204 list_add(&entry->list, &hdev->blacklist);
206 205
207 return 0; 206 return 0;
208} 207}
@@ -210,9 +209,8 @@ static int hci_blacklist_add(struct hci_dev *hdev, void __user *arg)
210int hci_blacklist_clear(struct hci_dev *hdev) 209int hci_blacklist_clear(struct hci_dev *hdev)
211{ 210{
212 struct list_head *p, *n; 211 struct list_head *p, *n;
213 struct bdaddr_list *blacklist = &hdev->blacklist;
214 212
215 list_for_each_safe(p, n, &blacklist->list) { 213 list_for_each_safe(p, n, &hdev->blacklist) {
216 struct bdaddr_list *b; 214 struct bdaddr_list *b;
217 215
218 b = list_entry(p, struct bdaddr_list, list); 216 b = list_entry(p, struct bdaddr_list, list);
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index ce44c47eeac..8fb967beee8 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -439,12 +439,11 @@ static const struct file_operations inquiry_cache_fops = {
439static int blacklist_show(struct seq_file *f, void *p) 439static int blacklist_show(struct seq_file *f, void *p)
440{ 440{
441 struct hci_dev *hdev = f->private; 441 struct hci_dev *hdev = f->private;
442 struct bdaddr_list *blacklist = &hdev->blacklist;
443 struct list_head *l; 442 struct list_head *l;
444 443
445 hci_dev_lock_bh(hdev); 444 hci_dev_lock_bh(hdev);
446 445
447 list_for_each(l, &blacklist->list) { 446 list_for_each(l, &hdev->blacklist) {
448 struct bdaddr_list *b; 447 struct bdaddr_list *b;
449 bdaddr_t bdaddr; 448 bdaddr_t bdaddr;
450 449
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 9ba1e8eee37..3e3cd9d4e52 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -2527,6 +2527,10 @@ done:
2527 if (pi->imtu != L2CAP_DEFAULT_MTU) 2527 if (pi->imtu != L2CAP_DEFAULT_MTU)
2528 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu); 2528 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2529 2529
2530 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2531 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
2532 break;
2533
2530 rfc.mode = L2CAP_MODE_BASIC; 2534 rfc.mode = L2CAP_MODE_BASIC;
2531 rfc.txwin_size = 0; 2535 rfc.txwin_size = 0;
2532 rfc.max_transmit = 0; 2536 rfc.max_transmit = 0;
@@ -2534,6 +2538,8 @@ done:
2534 rfc.monitor_timeout = 0; 2538 rfc.monitor_timeout = 0;
2535 rfc.max_pdu_size = 0; 2539 rfc.max_pdu_size = 0;
2536 2540
2541 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2542 (unsigned long) &rfc);
2537 break; 2543 break;
2538 2544
2539 case L2CAP_MODE_ERTM: 2545 case L2CAP_MODE_ERTM:
@@ -2546,6 +2552,9 @@ done:
2546 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10) 2552 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2547 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10); 2553 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2548 2554
2555 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2556 (unsigned long) &rfc);
2557
2549 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS)) 2558 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2550 break; 2559 break;
2551 2560
@@ -2566,6 +2575,9 @@ done:
2566 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10) 2575 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2567 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10); 2576 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2568 2577
2578 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2579 (unsigned long) &rfc);
2580
2569 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS)) 2581 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2570 break; 2582 break;
2571 2583
@@ -2577,9 +2589,6 @@ done:
2577 break; 2589 break;
2578 } 2590 }
2579 2591
2580 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2581 (unsigned long) &rfc);
2582
2583 /* FIXME: Need actual value of the flush timeout */ 2592 /* FIXME: Need actual value of the flush timeout */
2584 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO) 2593 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2585 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to); 2594 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
@@ -3339,6 +3348,15 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
3339 3348
3340 del_timer(&conn->info_timer); 3349 del_timer(&conn->info_timer);
3341 3350
3351 if (result != L2CAP_IR_SUCCESS) {
3352 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3353 conn->info_ident = 0;
3354
3355 l2cap_conn_start(conn);
3356
3357 return 0;
3358 }
3359
3342 if (type == L2CAP_IT_FEAT_MASK) { 3360 if (type == L2CAP_IT_FEAT_MASK) {
3343 conn->feat_mask = get_unaligned_le32(rsp->data); 3361 conn->feat_mask = get_unaligned_le32(rsp->data);
3344 3362
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 026205c18b7..befc3a52aa0 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -1183,7 +1183,7 @@ int __init rfcomm_init_ttys(void)
1183 return 0; 1183 return 0;
1184} 1184}
1185 1185
1186void __exit rfcomm_cleanup_ttys(void) 1186void rfcomm_cleanup_ttys(void)
1187{ 1187{
1188 tty_unregister_driver(rfcomm_tty_driver); 1188 tty_unregister_driver(rfcomm_tty_driver);
1189 put_tty_driver(rfcomm_tty_driver); 1189 put_tty_driver(rfcomm_tty_driver);
diff --git a/net/core/dev.c b/net/core/dev.c
index e1c1cdcc2bb..1ae65439144 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2517,6 +2517,7 @@ int netif_rx(struct sk_buff *skb)
2517 struct rps_dev_flow voidflow, *rflow = &voidflow; 2517 struct rps_dev_flow voidflow, *rflow = &voidflow;
2518 int cpu; 2518 int cpu;
2519 2519
2520 preempt_disable();
2520 rcu_read_lock(); 2521 rcu_read_lock();
2521 2522
2522 cpu = get_rps_cpu(skb->dev, skb, &rflow); 2523 cpu = get_rps_cpu(skb->dev, skb, &rflow);
@@ -2526,6 +2527,7 @@ int netif_rx(struct sk_buff *skb)
2526 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 2527 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
2527 2528
2528 rcu_read_unlock(); 2529 rcu_read_unlock();
2530 preempt_enable();
2529 } 2531 }
2530#else 2532#else
2531 { 2533 {
@@ -3072,7 +3074,7 @@ enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3072 int mac_len; 3074 int mac_len;
3073 enum gro_result ret; 3075 enum gro_result ret;
3074 3076
3075 if (!(skb->dev->features & NETIF_F_GRO)) 3077 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
3076 goto normal; 3078 goto normal;
3077 3079
3078 if (skb_is_gso(skb) || skb_has_frags(skb)) 3080 if (skb_is_gso(skb) || skb_has_frags(skb))
@@ -3159,9 +3161,6 @@ __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3159{ 3161{
3160 struct sk_buff *p; 3162 struct sk_buff *p;
3161 3163
3162 if (netpoll_rx_on(skb))
3163 return GRO_NORMAL;
3164
3165 for (p = napi->gro_list; p; p = p->next) { 3164 for (p = napi->gro_list; p; p = p->next) {
3166 NAPI_GRO_CB(p)->same_flow = 3165 NAPI_GRO_CB(p)->same_flow =
3167 (p->dev == skb->dev) && 3166 (p->dev == skb->dev) &&
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3c426cb318e..e663b78a2ef 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3930,7 +3930,7 @@ u8 *tcp_parse_md5sig_option(struct tcphdr *th)
3930 if (opsize < 2 || opsize > length) 3930 if (opsize < 2 || opsize > length)
3931 return NULL; 3931 return NULL;
3932 if (opcode == TCPOPT_MD5SIG) 3932 if (opcode == TCPOPT_MD5SIG)
3933 return ptr; 3933 return opsize == TCPOLEN_MD5SIG ? ptr : NULL;
3934 } 3934 }
3935 ptr += opsize - 2; 3935 ptr += opsize - 2;
3936 length -= opsize; 3936 length -= opsize;
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index 800bc53b7f6..dfe7b38dd4a 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -20,7 +20,7 @@
20/* Please put other headers in irnet.h - Thanks */ 20/* Please put other headers in irnet.h - Thanks */
21 21
22/* Generic PPP callbacks (to call us) */ 22/* Generic PPP callbacks (to call us) */
23static struct ppp_channel_ops irnet_ppp_ops = { 23static const struct ppp_channel_ops irnet_ppp_ops = {
24 .start_xmit = ppp_irnet_send, 24 .start_xmit = ppp_irnet_send,
25 .ioctl = ppp_irnet_ioctl 25 .ioctl = ppp_irnet_ioctl
26}; 26};
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 90d82b3f288..ff954b3e94b 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -135,7 +135,10 @@ struct pppol2tp_session {
135 135
136static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb); 136static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb);
137 137
138static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL }; 138static const struct ppp_channel_ops pppol2tp_chan_ops = {
139 .start_xmit = pppol2tp_xmit,
140};
141
139static const struct proto_ops pppol2tp_ops; 142static const struct proto_ops pppol2tp_ops;
140 143
141/* Helpers to obtain tunnel/session contexts from sockets. 144/* Helpers to obtain tunnel/session contexts from sockets.
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 7cc4f913a43..798a91b100c 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -685,10 +685,12 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
685 685
686 return 0; 686 return 0;
687 687
688#ifdef CONFIG_INET
688 fail_ifa: 689 fail_ifa:
689 pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY, 690 pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY,
690 &local->network_latency_notifier); 691 &local->network_latency_notifier);
691 rtnl_lock(); 692 rtnl_lock();
693#endif
692 fail_pm_qos: 694 fail_pm_qos:
693 ieee80211_led_exit(local); 695 ieee80211_led_exit(local);
694 ieee80211_remove_interfaces(local); 696 ieee80211_remove_interfaces(local);
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 41f20fb7e67..872d7b6ef6b 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -400,19 +400,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
400 else 400 else
401 __set_bit(SCAN_SW_SCANNING, &local->scanning); 401 __set_bit(SCAN_SW_SCANNING, &local->scanning);
402 402
403 /*
404 * Kicking off the scan need not be protected,
405 * only the scan variable stuff, since now
406 * local->scan_req is assigned and other callers
407 * will abort their scan attempts.
408 *
409 * This avoids too many locking dependencies
410 * so that the scan completed calls have more
411 * locking freedom.
412 */
413
414 ieee80211_recalc_idle(local); 403 ieee80211_recalc_idle(local);
415 mutex_unlock(&local->scan_mtx);
416 404
417 if (local->ops->hw_scan) { 405 if (local->ops->hw_scan) {
418 WARN_ON(!ieee80211_prep_hw_scan(local)); 406 WARN_ON(!ieee80211_prep_hw_scan(local));
@@ -420,8 +408,6 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
420 } else 408 } else
421 rc = ieee80211_start_sw_scan(local); 409 rc = ieee80211_start_sw_scan(local);
422 410
423 mutex_lock(&local->scan_mtx);
424
425 if (rc) { 411 if (rc) {
426 kfree(local->hw_scan_req); 412 kfree(local->hw_scan_req);
427 local->hw_scan_req = NULL; 413 local->hw_scan_req = NULL;
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
index 2714da167fb..b6ffe4e1b84 100644
--- a/net/rxrpc/ar-ack.c
+++ b/net/rxrpc/ar-ack.c
@@ -245,6 +245,9 @@ static void rxrpc_resend_timer(struct rxrpc_call *call)
245 _enter("%d,%d,%d", 245 _enter("%d,%d,%d",
246 call->acks_tail, call->acks_unacked, call->acks_head); 246 call->acks_tail, call->acks_unacked, call->acks_head);
247 247
248 if (call->state >= RXRPC_CALL_COMPLETE)
249 return;
250
248 resend = 0; 251 resend = 0;
249 resend_at = 0; 252 resend_at = 0;
250 253
diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
index 909d092de9f..bf656c230ba 100644
--- a/net/rxrpc/ar-call.c
+++ b/net/rxrpc/ar-call.c
@@ -786,6 +786,7 @@ static void rxrpc_call_life_expired(unsigned long _call)
786 786
787/* 787/*
788 * handle resend timer expiry 788 * handle resend timer expiry
789 * - may not take call->state_lock as this can deadlock against del_timer_sync()
789 */ 790 */
790static void rxrpc_resend_time_expired(unsigned long _call) 791static void rxrpc_resend_time_expired(unsigned long _call)
791{ 792{
@@ -796,12 +797,9 @@ static void rxrpc_resend_time_expired(unsigned long _call)
796 if (call->state >= RXRPC_CALL_COMPLETE) 797 if (call->state >= RXRPC_CALL_COMPLETE)
797 return; 798 return;
798 799
799 read_lock_bh(&call->state_lock);
800 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); 800 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
801 if (call->state < RXRPC_CALL_COMPLETE && 801 if (!test_and_set_bit(RXRPC_CALL_RESEND_TIMER, &call->events))
802 !test_and_set_bit(RXRPC_CALL_RESEND_TIMER, &call->events))
803 rxrpc_queue_call(call); 802 rxrpc_queue_call(call);
804 read_unlock_bh(&call->state_lock);
805} 803}
806 804
807/* 805/*
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index d0386a413e8..509a2d53a99 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -114,6 +114,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
114 int egress; 114 int egress;
115 int action; 115 int action;
116 int ihl; 116 int ihl;
117 int noff;
117 118
118 spin_lock(&p->tcf_lock); 119 spin_lock(&p->tcf_lock);
119 120
@@ -132,7 +133,8 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
132 if (unlikely(action == TC_ACT_SHOT)) 133 if (unlikely(action == TC_ACT_SHOT))
133 goto drop; 134 goto drop;
134 135
135 if (!pskb_may_pull(skb, sizeof(*iph))) 136 noff = skb_network_offset(skb);
137 if (!pskb_may_pull(skb, sizeof(*iph) + noff))
136 goto drop; 138 goto drop;
137 139
138 iph = ip_hdr(skb); 140 iph = ip_hdr(skb);
@@ -144,7 +146,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
144 146
145 if (!((old_addr ^ addr) & mask)) { 147 if (!((old_addr ^ addr) & mask)) {
146 if (skb_cloned(skb) && 148 if (skb_cloned(skb) &&
147 !skb_clone_writable(skb, sizeof(*iph)) && 149 !skb_clone_writable(skb, sizeof(*iph) + noff) &&
148 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 150 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
149 goto drop; 151 goto drop;
150 152
@@ -172,9 +174,9 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
172 { 174 {
173 struct tcphdr *tcph; 175 struct tcphdr *tcph;
174 176
175 if (!pskb_may_pull(skb, ihl + sizeof(*tcph)) || 177 if (!pskb_may_pull(skb, ihl + sizeof(*tcph) + noff) ||
176 (skb_cloned(skb) && 178 (skb_cloned(skb) &&
177 !skb_clone_writable(skb, ihl + sizeof(*tcph)) && 179 !skb_clone_writable(skb, ihl + sizeof(*tcph) + noff) &&
178 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 180 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
179 goto drop; 181 goto drop;
180 182
@@ -186,9 +188,9 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
186 { 188 {
187 struct udphdr *udph; 189 struct udphdr *udph;
188 190
189 if (!pskb_may_pull(skb, ihl + sizeof(*udph)) || 191 if (!pskb_may_pull(skb, ihl + sizeof(*udph) + noff) ||
190 (skb_cloned(skb) && 192 (skb_cloned(skb) &&
191 !skb_clone_writable(skb, ihl + sizeof(*udph)) && 193 !skb_clone_writable(skb, ihl + sizeof(*udph) + noff) &&
192 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 194 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
193 goto drop; 195 goto drop;
194 196
@@ -205,7 +207,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
205 { 207 {
206 struct icmphdr *icmph; 208 struct icmphdr *icmph;
207 209
208 if (!pskb_may_pull(skb, ihl + sizeof(*icmph))) 210 if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + noff))
209 goto drop; 211 goto drop;
210 212
211 icmph = (void *)(skb_network_header(skb) + ihl); 213 icmph = (void *)(skb_network_header(skb) + ihl);
@@ -215,7 +217,8 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
215 (icmph->type != ICMP_PARAMETERPROB)) 217 (icmph->type != ICMP_PARAMETERPROB))
216 break; 218 break;
217 219
218 if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph))) 220 if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph) +
221 noff))
219 goto drop; 222 goto drop;
220 223
221 icmph = (void *)(skb_network_header(skb) + ihl); 224 icmph = (void *)(skb_network_header(skb) + ihl);
@@ -229,8 +232,8 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
229 break; 232 break;
230 233
231 if (skb_cloned(skb) && 234 if (skb_cloned(skb) &&
232 !skb_clone_writable(skb, 235 !skb_clone_writable(skb, ihl + sizeof(*icmph) +
233 ihl + sizeof(*icmph) + sizeof(*iph)) && 236 sizeof(*iph) + noff) &&
234 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 237 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
235 goto drop; 238 goto drop;
236 239
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index f73542d2cdd..e17096e3913 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -65,37 +65,47 @@ static inline u32 addr_fold(void *addr)
65 return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0); 65 return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0);
66} 66}
67 67
68static u32 flow_get_src(const struct sk_buff *skb) 68static u32 flow_get_src(struct sk_buff *skb)
69{ 69{
70 switch (skb->protocol) { 70 switch (skb->protocol) {
71 case htons(ETH_P_IP): 71 case htons(ETH_P_IP):
72 return ntohl(ip_hdr(skb)->saddr); 72 if (pskb_network_may_pull(skb, sizeof(struct iphdr)))
73 return ntohl(ip_hdr(skb)->saddr);
74 break;
73 case htons(ETH_P_IPV6): 75 case htons(ETH_P_IPV6):
74 return ntohl(ipv6_hdr(skb)->saddr.s6_addr32[3]); 76 if (pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
75 default: 77 return ntohl(ipv6_hdr(skb)->saddr.s6_addr32[3]);
76 return addr_fold(skb->sk); 78 break;
77 } 79 }
80
81 return addr_fold(skb->sk);
78} 82}
79 83
80static u32 flow_get_dst(const struct sk_buff *skb) 84static u32 flow_get_dst(struct sk_buff *skb)
81{ 85{
82 switch (skb->protocol) { 86 switch (skb->protocol) {
83 case htons(ETH_P_IP): 87 case htons(ETH_P_IP):
84 return ntohl(ip_hdr(skb)->daddr); 88 if (pskb_network_may_pull(skb, sizeof(struct iphdr)))
89 return ntohl(ip_hdr(skb)->daddr);
90 break;
85 case htons(ETH_P_IPV6): 91 case htons(ETH_P_IPV6):
86 return ntohl(ipv6_hdr(skb)->daddr.s6_addr32[3]); 92 if (pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
87 default: 93 return ntohl(ipv6_hdr(skb)->daddr.s6_addr32[3]);
88 return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol; 94 break;
89 } 95 }
96
97 return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
90} 98}
91 99
92static u32 flow_get_proto(const struct sk_buff *skb) 100static u32 flow_get_proto(struct sk_buff *skb)
93{ 101{
94 switch (skb->protocol) { 102 switch (skb->protocol) {
95 case htons(ETH_P_IP): 103 case htons(ETH_P_IP):
96 return ip_hdr(skb)->protocol; 104 return pskb_network_may_pull(skb, sizeof(struct iphdr)) ?
105 ip_hdr(skb)->protocol : 0;
97 case htons(ETH_P_IPV6): 106 case htons(ETH_P_IPV6):
98 return ipv6_hdr(skb)->nexthdr; 107 return pskb_network_may_pull(skb, sizeof(struct ipv6hdr)) ?
108 ipv6_hdr(skb)->nexthdr : 0;
99 default: 109 default:
100 return 0; 110 return 0;
101 } 111 }
@@ -116,58 +126,64 @@ static int has_ports(u8 protocol)
116 } 126 }
117} 127}
118 128
119static u32 flow_get_proto_src(const struct sk_buff *skb) 129static u32 flow_get_proto_src(struct sk_buff *skb)
120{ 130{
121 u32 res = 0;
122
123 switch (skb->protocol) { 131 switch (skb->protocol) {
124 case htons(ETH_P_IP): { 132 case htons(ETH_P_IP): {
125 struct iphdr *iph = ip_hdr(skb); 133 struct iphdr *iph;
126 134
135 if (!pskb_network_may_pull(skb, sizeof(*iph)))
136 break;
137 iph = ip_hdr(skb);
127 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && 138 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
128 has_ports(iph->protocol)) 139 has_ports(iph->protocol) &&
129 res = ntohs(*(__be16 *)((void *)iph + iph->ihl * 4)); 140 pskb_network_may_pull(skb, iph->ihl * 4 + 2))
141 return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4));
130 break; 142 break;
131 } 143 }
132 case htons(ETH_P_IPV6): { 144 case htons(ETH_P_IPV6): {
133 struct ipv6hdr *iph = ipv6_hdr(skb); 145 struct ipv6hdr *iph;
134 146
147 if (!pskb_network_may_pull(skb, sizeof(*iph) + 2))
148 break;
149 iph = ipv6_hdr(skb);
135 if (has_ports(iph->nexthdr)) 150 if (has_ports(iph->nexthdr))
136 res = ntohs(*(__be16 *)&iph[1]); 151 return ntohs(*(__be16 *)&iph[1]);
137 break; 152 break;
138 } 153 }
139 default:
140 res = addr_fold(skb->sk);
141 } 154 }
142 155
143 return res; 156 return addr_fold(skb->sk);
144} 157}
145 158
146static u32 flow_get_proto_dst(const struct sk_buff *skb) 159static u32 flow_get_proto_dst(struct sk_buff *skb)
147{ 160{
148 u32 res = 0;
149
150 switch (skb->protocol) { 161 switch (skb->protocol) {
151 case htons(ETH_P_IP): { 162 case htons(ETH_P_IP): {
152 struct iphdr *iph = ip_hdr(skb); 163 struct iphdr *iph;
153 164
165 if (!pskb_network_may_pull(skb, sizeof(*iph)))
166 break;
167 iph = ip_hdr(skb);
154 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && 168 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
155 has_ports(iph->protocol)) 169 has_ports(iph->protocol) &&
156 res = ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 + 2)); 170 pskb_network_may_pull(skb, iph->ihl * 4 + 4))
171 return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 + 2));
157 break; 172 break;
158 } 173 }
159 case htons(ETH_P_IPV6): { 174 case htons(ETH_P_IPV6): {
160 struct ipv6hdr *iph = ipv6_hdr(skb); 175 struct ipv6hdr *iph;
161 176
177 if (!pskb_network_may_pull(skb, sizeof(*iph) + 4))
178 break;
179 iph = ipv6_hdr(skb);
162 if (has_ports(iph->nexthdr)) 180 if (has_ports(iph->nexthdr))
163 res = ntohs(*(__be16 *)((void *)&iph[1] + 2)); 181 return ntohs(*(__be16 *)((void *)&iph[1] + 2));
164 break; 182 break;
165 } 183 }
166 default:
167 res = addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
168 } 184 }
169 185
170 return res; 186 return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
171} 187}
172 188
173static u32 flow_get_iif(const struct sk_buff *skb) 189static u32 flow_get_iif(const struct sk_buff *skb)
@@ -211,7 +227,7 @@ static u32 flow_get_nfct(const struct sk_buff *skb)
211}) 227})
212#endif 228#endif
213 229
214static u32 flow_get_nfct_src(const struct sk_buff *skb) 230static u32 flow_get_nfct_src(struct sk_buff *skb)
215{ 231{
216 switch (skb->protocol) { 232 switch (skb->protocol) {
217 case htons(ETH_P_IP): 233 case htons(ETH_P_IP):
@@ -223,7 +239,7 @@ fallback:
223 return flow_get_src(skb); 239 return flow_get_src(skb);
224} 240}
225 241
226static u32 flow_get_nfct_dst(const struct sk_buff *skb) 242static u32 flow_get_nfct_dst(struct sk_buff *skb)
227{ 243{
228 switch (skb->protocol) { 244 switch (skb->protocol) {
229 case htons(ETH_P_IP): 245 case htons(ETH_P_IP):
@@ -235,14 +251,14 @@ fallback:
235 return flow_get_dst(skb); 251 return flow_get_dst(skb);
236} 252}
237 253
238static u32 flow_get_nfct_proto_src(const struct sk_buff *skb) 254static u32 flow_get_nfct_proto_src(struct sk_buff *skb)
239{ 255{
240 return ntohs(CTTUPLE(skb, src.u.all)); 256 return ntohs(CTTUPLE(skb, src.u.all));
241fallback: 257fallback:
242 return flow_get_proto_src(skb); 258 return flow_get_proto_src(skb);
243} 259}
244 260
245static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb) 261static u32 flow_get_nfct_proto_dst(struct sk_buff *skb)
246{ 262{
247 return ntohs(CTTUPLE(skb, dst.u.all)); 263 return ntohs(CTTUPLE(skb, dst.u.all));
248fallback: 264fallback:
@@ -281,7 +297,7 @@ static u32 flow_get_vlan_tag(const struct sk_buff *skb)
281 return tag & VLAN_VID_MASK; 297 return tag & VLAN_VID_MASK;
282} 298}
283 299
284static u32 flow_key_get(const struct sk_buff *skb, int key) 300static u32 flow_key_get(struct sk_buff *skb, int key)
285{ 301{
286 switch (key) { 302 switch (key) {
287 case FLOW_KEY_SRC: 303 case FLOW_KEY_SRC:
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index dd9414e4420..425a1790b04 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -143,9 +143,17 @@ static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp,
143 u8 tunnelid = 0; 143 u8 tunnelid = 0;
144 u8 *xprt; 144 u8 *xprt;
145#if RSVP_DST_LEN == 4 145#if RSVP_DST_LEN == 4
146 struct ipv6hdr *nhptr = ipv6_hdr(skb); 146 struct ipv6hdr *nhptr;
147
148 if (!pskb_network_may_pull(skb, sizeof(*nhptr)))
149 return -1;
150 nhptr = ipv6_hdr(skb);
147#else 151#else
148 struct iphdr *nhptr = ip_hdr(skb); 152 struct iphdr *nhptr;
153
154 if (!pskb_network_may_pull(skb, sizeof(*nhptr)))
155 return -1;
156 nhptr = ip_hdr(skb);
149#endif 157#endif
150 158
151restart: 159restart:
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index c65762823f5..534f33231c1 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -122,7 +122,11 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
122 switch (skb->protocol) { 122 switch (skb->protocol) {
123 case htons(ETH_P_IP): 123 case htons(ETH_P_IP):
124 { 124 {
125 const struct iphdr *iph = ip_hdr(skb); 125 const struct iphdr *iph;
126
127 if (!pskb_network_may_pull(skb, sizeof(*iph)))
128 goto err;
129 iph = ip_hdr(skb);
126 h = (__force u32)iph->daddr; 130 h = (__force u32)iph->daddr;
127 h2 = (__force u32)iph->saddr ^ iph->protocol; 131 h2 = (__force u32)iph->saddr ^ iph->protocol;
128 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && 132 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
@@ -131,25 +135,32 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
131 iph->protocol == IPPROTO_UDPLITE || 135 iph->protocol == IPPROTO_UDPLITE ||
132 iph->protocol == IPPROTO_SCTP || 136 iph->protocol == IPPROTO_SCTP ||
133 iph->protocol == IPPROTO_DCCP || 137 iph->protocol == IPPROTO_DCCP ||
134 iph->protocol == IPPROTO_ESP)) 138 iph->protocol == IPPROTO_ESP) &&
139 pskb_network_may_pull(skb, iph->ihl * 4 + 4))
135 h2 ^= *(((u32*)iph) + iph->ihl); 140 h2 ^= *(((u32*)iph) + iph->ihl);
136 break; 141 break;
137 } 142 }
138 case htons(ETH_P_IPV6): 143 case htons(ETH_P_IPV6):
139 { 144 {
140 struct ipv6hdr *iph = ipv6_hdr(skb); 145 struct ipv6hdr *iph;
146
147 if (!pskb_network_may_pull(skb, sizeof(*iph)))
148 goto err;
149 iph = ipv6_hdr(skb);
141 h = (__force u32)iph->daddr.s6_addr32[3]; 150 h = (__force u32)iph->daddr.s6_addr32[3];
142 h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr; 151 h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr;
143 if (iph->nexthdr == IPPROTO_TCP || 152 if ((iph->nexthdr == IPPROTO_TCP ||
144 iph->nexthdr == IPPROTO_UDP || 153 iph->nexthdr == IPPROTO_UDP ||
145 iph->nexthdr == IPPROTO_UDPLITE || 154 iph->nexthdr == IPPROTO_UDPLITE ||
146 iph->nexthdr == IPPROTO_SCTP || 155 iph->nexthdr == IPPROTO_SCTP ||
147 iph->nexthdr == IPPROTO_DCCP || 156 iph->nexthdr == IPPROTO_DCCP ||
148 iph->nexthdr == IPPROTO_ESP) 157 iph->nexthdr == IPPROTO_ESP) &&
158 pskb_network_may_pull(skb, sizeof(*iph) + 4))
149 h2 ^= *(u32*)&iph[1]; 159 h2 ^= *(u32*)&iph[1];
150 break; 160 break;
151 } 161 }
152 default: 162 default:
163err:
153 h = (unsigned long)skb_dst(skb) ^ (__force u32)skb->protocol; 164 h = (unsigned long)skb_dst(skb) ^ (__force u32)skb->protocol;
154 h2 = (unsigned long)skb->sk; 165 h2 = (unsigned long)skb->sk;
155 } 166 }
@@ -502,6 +513,12 @@ static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
502 return 0; 513 return 0;
503} 514}
504 515
516static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
517 u32 classid)
518{
519 return 0;
520}
521
505static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl) 522static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl)
506{ 523{
507 struct sfq_sched_data *q = qdisc_priv(sch); 524 struct sfq_sched_data *q = qdisc_priv(sch);
@@ -556,6 +573,7 @@ static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
556static const struct Qdisc_class_ops sfq_class_ops = { 573static const struct Qdisc_class_ops sfq_class_ops = {
557 .get = sfq_get, 574 .get = sfq_get,
558 .tcf_chain = sfq_find_tcf, 575 .tcf_chain = sfq_find_tcf,
576 .bind_tcf = sfq_bind,
559 .dump = sfq_dump_class, 577 .dump = sfq_dump_class,
560 .dump_stats = sfq_dump_class_stats, 578 .dump_stats = sfq_dump_class_stats,
561 .walk = sfq_walk, 579 .walk = sfq_walk,