aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/if_vlan.h5
-rw-r--r--net/core/skbuff.c91
-rw-r--r--net/ipv4/ip_input.c3
-rw-r--r--net/sched/sch_htb.c4
4 files changed, 73 insertions, 30 deletions
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index eef0876d8307..383627ad328f 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -23,8 +23,8 @@ struct vlan_collection;
23struct vlan_dev_info; 23struct vlan_dev_info;
24struct hlist_node; 24struct hlist_node;
25 25
26#include <linux/proc_fs.h> /* for proc_dir_entry */
27#include <linux/netdevice.h> 26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28 28
29#define VLAN_HLEN 4 /* The additional bytes (on top of the Ethernet header) 29#define VLAN_HLEN 4 /* The additional bytes (on top of the Ethernet header)
30 * that VLAN requires. 30 * that VLAN requires.
@@ -185,7 +185,8 @@ static inline int __vlan_hwaccel_rx(struct sk_buff *skb,
185 * This allows the VLAN to have a different MAC than the underlying 185 * This allows the VLAN to have a different MAC than the underlying
186 * device, and still route correctly. 186 * device, and still route correctly.
187 */ 187 */
188 if (!memcmp(eth_hdr(skb)->h_dest, skb->dev->dev_addr, ETH_ALEN)) 188 if (!compare_ether_addr(eth_hdr(skb)->h_dest,
189 skb->dev->dev_addr))
189 skb->pkt_type = PACKET_HOST; 190 skb->pkt_type = PACKET_HOST;
190 break; 191 break;
191 }; 192 };
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 44f6a181a754..476aa3978504 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -257,11 +257,11 @@ nodata:
257} 257}
258 258
259 259
260static void skb_drop_fraglist(struct sk_buff *skb) 260static void skb_drop_list(struct sk_buff **listp)
261{ 261{
262 struct sk_buff *list = skb_shinfo(skb)->frag_list; 262 struct sk_buff *list = *listp;
263 263
264 skb_shinfo(skb)->frag_list = NULL; 264 *listp = NULL;
265 265
266 do { 266 do {
267 struct sk_buff *this = list; 267 struct sk_buff *this = list;
@@ -270,6 +270,11 @@ static void skb_drop_fraglist(struct sk_buff *skb)
270 } while (list); 270 } while (list);
271} 271}
272 272
273static inline void skb_drop_fraglist(struct sk_buff *skb)
274{
275 skb_drop_list(&skb_shinfo(skb)->frag_list);
276}
277
273static void skb_clone_fraglist(struct sk_buff *skb) 278static void skb_clone_fraglist(struct sk_buff *skb)
274{ 279{
275 struct sk_buff *list; 280 struct sk_buff *list;
@@ -830,41 +835,75 @@ free_skb:
830 835
831int ___pskb_trim(struct sk_buff *skb, unsigned int len) 836int ___pskb_trim(struct sk_buff *skb, unsigned int len)
832{ 837{
838 struct sk_buff **fragp;
839 struct sk_buff *frag;
833 int offset = skb_headlen(skb); 840 int offset = skb_headlen(skb);
834 int nfrags = skb_shinfo(skb)->nr_frags; 841 int nfrags = skb_shinfo(skb)->nr_frags;
835 int i; 842 int i;
843 int err;
844
845 if (skb_cloned(skb) &&
846 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
847 return err;
836 848
837 for (i = 0; i < nfrags; i++) { 849 for (i = 0; i < nfrags; i++) {
838 int end = offset + skb_shinfo(skb)->frags[i].size; 850 int end = offset + skb_shinfo(skb)->frags[i].size;
839 if (end > len) { 851
840 if (skb_cloned(skb)) { 852 if (end < len) {
841 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 853 offset = end;
842 return -ENOMEM; 854 continue;
843 }
844 if (len <= offset) {
845 put_page(skb_shinfo(skb)->frags[i].page);
846 skb_shinfo(skb)->nr_frags--;
847 } else {
848 skb_shinfo(skb)->frags[i].size = len - offset;
849 }
850 } 855 }
851 offset = end; 856
857 if (len > offset)
858 skb_shinfo(skb)->frags[i++].size = len - offset;
859
860 skb_shinfo(skb)->nr_frags = i;
861
862 for (; i < nfrags; i++)
863 put_page(skb_shinfo(skb)->frags[i].page);
864
865 if (skb_shinfo(skb)->frag_list)
866 skb_drop_fraglist(skb);
867 break;
852 } 868 }
853 869
854 if (offset < len) { 870 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
871 fragp = &frag->next) {
872 int end = offset + frag->len;
873
874 if (skb_shared(frag)) {
875 struct sk_buff *nfrag;
876
877 nfrag = skb_clone(frag, GFP_ATOMIC);
878 if (unlikely(!nfrag))
879 return -ENOMEM;
880
881 nfrag->next = frag->next;
882 frag = nfrag;
883 *fragp = frag;
884 }
885
886 if (end < len) {
887 offset = end;
888 continue;
889 }
890
891 if (end > len &&
892 unlikely((err = pskb_trim(frag, len - offset))))
893 return err;
894
895 if (frag->next)
896 skb_drop_list(&frag->next);
897 break;
898 }
899
900 if (len > skb_headlen(skb)) {
855 skb->data_len -= skb->len - len; 901 skb->data_len -= skb->len - len;
856 skb->len = len; 902 skb->len = len;
857 } else { 903 } else {
858 if (len <= skb_headlen(skb)) { 904 skb->len = len;
859 skb->len = len; 905 skb->data_len = 0;
860 skb->data_len = 0; 906 skb->tail = skb->data + len;
861 skb->tail = skb->data + len;
862 if (skb_shinfo(skb)->frag_list && !skb_cloned(skb))
863 skb_drop_fraglist(skb);
864 } else {
865 skb->data_len -= skb->len - len;
866 skb->len = len;
867 }
868 } 907 }
869 908
870 return 0; 909 return 0;
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index e1a7dba2fa8a..184c78ca79e6 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -428,6 +428,9 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
428 goto drop; 428 goto drop;
429 } 429 }
430 430
431 /* Remove any debris in the socket control block */
432 memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
433
431 return NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, dev, NULL, 434 return NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, dev, NULL,
432 ip_rcv_finish); 435 ip_rcv_finish);
433 436
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 34afe41fa2f3..cd0a973b1128 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -196,7 +196,7 @@ struct htb_class
196 struct qdisc_rate_table *rate; /* rate table of the class itself */ 196 struct qdisc_rate_table *rate; /* rate table of the class itself */
197 struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */ 197 struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */
198 long buffer,cbuffer; /* token bucket depth/rate */ 198 long buffer,cbuffer; /* token bucket depth/rate */
199 long mbuffer; /* max wait time */ 199 psched_tdiff_t mbuffer; /* max wait time */
200 long tokens,ctokens; /* current number of tokens */ 200 long tokens,ctokens; /* current number of tokens */
201 psched_time_t t_c; /* checkpoint time */ 201 psched_time_t t_c; /* checkpoint time */
202}; 202};
@@ -1601,7 +1601,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1601 /* set class to be in HTB_CAN_SEND state */ 1601 /* set class to be in HTB_CAN_SEND state */
1602 cl->tokens = hopt->buffer; 1602 cl->tokens = hopt->buffer;
1603 cl->ctokens = hopt->cbuffer; 1603 cl->ctokens = hopt->cbuffer;
1604 cl->mbuffer = 60000000; /* 1min */ 1604 cl->mbuffer = PSCHED_JIFFIE2US(HZ*60) /* 1min */
1605 PSCHED_GET_TIME(cl->t_c); 1605 PSCHED_GET_TIME(cl->t_c);
1606 cl->cmode = HTB_CAN_SEND; 1606 cl->cmode = HTB_CAN_SEND;
1607 1607